]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86_64/entry.S | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs | |
6 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> | |
1da177e4 LT |
7 | */ |
8 | ||
9 | /* | |
10 | * entry.S contains the system-call and fault low-level handling routines. | |
11 | * | |
12 | * NOTE: This code handles signal-recognition, which happens every time | |
13 | * after an interrupt and after each system call. | |
14 | * | |
15 | * Normal syscalls and interrupts don't save a full stack frame, this is | |
16 | * only done for syscall tracing, signals or fork/exec et.al. | |
17 | * | |
18 | * A note on terminology: | |
19 | * - top of stack: Architecture defined interrupt frame from SS to RIP | |
20 | * at the top of the kernel process stack. | |
21 | * - partial stack frame: partially saved registers upto R11. | |
22 | * - full stack frame: Like partial stack frame, but all register saved. | |
2e91a17b AK |
23 | * |
24 | * Some macro usage: | |
25 | * - CFI macros are used to generate dwarf2 unwind information for better | |
26 | * backtraces. They don't change any code. | |
27 | * - SAVE_ALL/RESTORE_ALL - Save/restore all registers | |
28 | * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify. | |
29 | * There are unfortunately lots of special cases where some registers | |
30 | * not touched. The macro is a big mess that should be cleaned up. | |
31 | * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS. | |
32 | * Gives a full stack frame. | |
33 | * - ENTRY/END Define functions in the symbol table. | |
34 | * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack | |
35 | * frame that is otherwise undefined after a SYSCALL | |
36 | * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging. | |
37 | * - errorentry/paranoidentry/zeroentry - Define exception entry points. | |
1da177e4 LT |
38 | */ |
39 | ||
1da177e4 LT |
40 | #include <linux/linkage.h> |
41 | #include <asm/segment.h> | |
1da177e4 LT |
42 | #include <asm/cache.h> |
43 | #include <asm/errno.h> | |
44 | #include <asm/dwarf2.h> | |
45 | #include <asm/calling.h> | |
e2d5df93 | 46 | #include <asm/asm-offsets.h> |
1da177e4 LT |
47 | #include <asm/msr.h> |
48 | #include <asm/unistd.h> | |
49 | #include <asm/thread_info.h> | |
50 | #include <asm/hw_irq.h> | |
5f8efbb9 | 51 | #include <asm/page.h> |
2601e64d | 52 | #include <asm/irqflags.h> |
72fe4858 | 53 | #include <asm/paravirt.h> |
1da177e4 LT |
54 | |
55 | .code64 | |
56 | ||
dc37db4d | 57 | #ifndef CONFIG_PREEMPT |
1da177e4 LT |
58 | #define retint_kernel retint_restore_args |
59 | #endif | |
2601e64d | 60 | |
72fe4858 GOC |
61 | #ifdef CONFIG_PARAVIRT |
62 | ENTRY(native_irq_enable_syscall_ret) | |
63 | movq %gs:pda_oldrsp,%rsp | |
64 | swapgs | |
65 | sysretq | |
66 | #endif /* CONFIG_PARAVIRT */ | |
67 | ||
2601e64d IM |
68 | |
69 | .macro TRACE_IRQS_IRETQ offset=ARGOFFSET | |
70 | #ifdef CONFIG_TRACE_IRQFLAGS | |
71 | bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */ | |
72 | jnc 1f | |
73 | TRACE_IRQS_ON | |
74 | 1: | |
75 | #endif | |
76 | .endm | |
77 | ||
1da177e4 LT |
78 | /* |
79 | * C code is not supposed to know about undefined top of stack. Every time | |
80 | * a C function with an pt_regs argument is called from the SYSCALL based | |
81 | * fast path FIXUP_TOP_OF_STACK is needed. | |
82 | * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs | |
83 | * manipulation. | |
84 | */ | |
85 | ||
86 | /* %rsp:at FRAMEEND */ | |
87 | .macro FIXUP_TOP_OF_STACK tmp | |
88 | movq %gs:pda_oldrsp,\tmp | |
89 | movq \tmp,RSP(%rsp) | |
90 | movq $__USER_DS,SS(%rsp) | |
91 | movq $__USER_CS,CS(%rsp) | |
92 | movq $-1,RCX(%rsp) | |
93 | movq R11(%rsp),\tmp /* get eflags */ | |
94 | movq \tmp,EFLAGS(%rsp) | |
95 | .endm | |
96 | ||
97 | .macro RESTORE_TOP_OF_STACK tmp,offset=0 | |
98 | movq RSP-\offset(%rsp),\tmp | |
99 | movq \tmp,%gs:pda_oldrsp | |
100 | movq EFLAGS-\offset(%rsp),\tmp | |
101 | movq \tmp,R11-\offset(%rsp) | |
102 | .endm | |
103 | ||
104 | .macro FAKE_STACK_FRAME child_rip | |
105 | /* push in order ss, rsp, eflags, cs, rip */ | |
3829ee6b | 106 | xorl %eax, %eax |
1da177e4 LT |
107 | pushq %rax /* ss */ |
108 | CFI_ADJUST_CFA_OFFSET 8 | |
7effaa88 | 109 | /*CFI_REL_OFFSET ss,0*/ |
1da177e4 LT |
110 | pushq %rax /* rsp */ |
111 | CFI_ADJUST_CFA_OFFSET 8 | |
7effaa88 | 112 | CFI_REL_OFFSET rsp,0 |
1da177e4 LT |
113 | pushq $(1<<9) /* eflags - interrupts on */ |
114 | CFI_ADJUST_CFA_OFFSET 8 | |
7effaa88 | 115 | /*CFI_REL_OFFSET rflags,0*/ |
1da177e4 LT |
116 | pushq $__KERNEL_CS /* cs */ |
117 | CFI_ADJUST_CFA_OFFSET 8 | |
7effaa88 | 118 | /*CFI_REL_OFFSET cs,0*/ |
1da177e4 LT |
119 | pushq \child_rip /* rip */ |
120 | CFI_ADJUST_CFA_OFFSET 8 | |
7effaa88 | 121 | CFI_REL_OFFSET rip,0 |
1da177e4 LT |
122 | pushq %rax /* orig rax */ |
123 | CFI_ADJUST_CFA_OFFSET 8 | |
124 | .endm | |
125 | ||
126 | .macro UNFAKE_STACK_FRAME | |
127 | addq $8*6, %rsp | |
128 | CFI_ADJUST_CFA_OFFSET -(6*8) | |
129 | .endm | |
130 | ||
7effaa88 JB |
131 | .macro CFI_DEFAULT_STACK start=1 |
132 | .if \start | |
133 | CFI_STARTPROC simple | |
adf14236 | 134 | CFI_SIGNAL_FRAME |
7effaa88 JB |
135 | CFI_DEF_CFA rsp,SS+8 |
136 | .else | |
137 | CFI_DEF_CFA_OFFSET SS+8 | |
138 | .endif | |
139 | CFI_REL_OFFSET r15,R15 | |
140 | CFI_REL_OFFSET r14,R14 | |
141 | CFI_REL_OFFSET r13,R13 | |
142 | CFI_REL_OFFSET r12,R12 | |
143 | CFI_REL_OFFSET rbp,RBP | |
144 | CFI_REL_OFFSET rbx,RBX | |
145 | CFI_REL_OFFSET r11,R11 | |
146 | CFI_REL_OFFSET r10,R10 | |
147 | CFI_REL_OFFSET r9,R9 | |
148 | CFI_REL_OFFSET r8,R8 | |
149 | CFI_REL_OFFSET rax,RAX | |
150 | CFI_REL_OFFSET rcx,RCX | |
151 | CFI_REL_OFFSET rdx,RDX | |
152 | CFI_REL_OFFSET rsi,RSI | |
153 | CFI_REL_OFFSET rdi,RDI | |
154 | CFI_REL_OFFSET rip,RIP | |
155 | /*CFI_REL_OFFSET cs,CS*/ | |
156 | /*CFI_REL_OFFSET rflags,EFLAGS*/ | |
157 | CFI_REL_OFFSET rsp,RSP | |
158 | /*CFI_REL_OFFSET ss,SS*/ | |
1da177e4 LT |
159 | .endm |
160 | /* | |
161 | * A newly forked process directly context switches into this. | |
162 | */ | |
163 | /* rdi: prev */ | |
164 | ENTRY(ret_from_fork) | |
1da177e4 | 165 | CFI_DEFAULT_STACK |
658fdbef AK |
166 | push kernel_eflags(%rip) |
167 | CFI_ADJUST_CFA_OFFSET 4 | |
168 | popf # reset kernel eflags | |
169 | CFI_ADJUST_CFA_OFFSET -4 | |
1da177e4 LT |
170 | call schedule_tail |
171 | GET_THREAD_INFO(%rcx) | |
172 | testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx) | |
173 | jnz rff_trace | |
174 | rff_action: | |
175 | RESTORE_REST | |
176 | testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread? | |
177 | je int_ret_from_sys_call | |
178 | testl $_TIF_IA32,threadinfo_flags(%rcx) | |
179 | jnz int_ret_from_sys_call | |
180 | RESTORE_TOP_OF_STACK %rdi,ARGOFFSET | |
181 | jmp ret_from_sys_call | |
182 | rff_trace: | |
183 | movq %rsp,%rdi | |
184 | call syscall_trace_leave | |
185 | GET_THREAD_INFO(%rcx) | |
186 | jmp rff_action | |
187 | CFI_ENDPROC | |
4b787e0b | 188 | END(ret_from_fork) |
1da177e4 LT |
189 | |
190 | /* | |
191 | * System call entry. Upto 6 arguments in registers are supported. | |
192 | * | |
193 | * SYSCALL does not save anything on the stack and does not change the | |
194 | * stack pointer. | |
195 | */ | |
196 | ||
197 | /* | |
198 | * Register setup: | |
199 | * rax system call number | |
200 | * rdi arg0 | |
201 | * rcx return address for syscall/sysret, C arg3 | |
202 | * rsi arg1 | |
203 | * rdx arg2 | |
204 | * r10 arg3 (--> moved to rcx for C) | |
205 | * r8 arg4 | |
206 | * r9 arg5 | |
207 | * r11 eflags for syscall/sysret, temporary for C | |
208 | * r12-r15,rbp,rbx saved by C code, not touched. | |
209 | * | |
210 | * Interrupts are off on entry. | |
211 | * Only called from user space. | |
212 | * | |
213 | * XXX if we had a free scratch register we could save the RSP into the stack frame | |
214 | * and report it properly in ps. Unfortunately we haven't. | |
7bf36bbc AK |
215 | * |
216 | * When user can change the frames always force IRET. That is because | |
217 | * it deals with uncanonical addresses better. SYSRET has trouble | |
218 | * with them due to bugs in both AMD and Intel CPUs. | |
1da177e4 LT |
219 | */ |
220 | ||
221 | ENTRY(system_call) | |
7effaa88 | 222 | CFI_STARTPROC simple |
adf14236 | 223 | CFI_SIGNAL_FRAME |
dffead4e | 224 | CFI_DEF_CFA rsp,PDA_STACKOFFSET |
7effaa88 JB |
225 | CFI_REGISTER rip,rcx |
226 | /*CFI_REGISTER rflags,r11*/ | |
72fe4858 GOC |
227 | SWAPGS_UNSAFE_STACK |
228 | /* | |
229 | * A hypervisor implementation might want to use a label | |
230 | * after the swapgs, so that it can do the swapgs | |
231 | * for the guest and jump here on syscall. | |
232 | */ | |
233 | ENTRY(system_call_after_swapgs) | |
234 | ||
1da177e4 LT |
235 | movq %rsp,%gs:pda_oldrsp |
236 | movq %gs:pda_kernelstack,%rsp | |
2601e64d IM |
237 | /* |
238 | * No need to follow this irqs off/on section - it's straight | |
239 | * and short: | |
240 | */ | |
72fe4858 | 241 | ENABLE_INTERRUPTS(CLBR_NONE) |
1da177e4 LT |
242 | SAVE_ARGS 8,1 |
243 | movq %rax,ORIG_RAX-ARGOFFSET(%rsp) | |
7effaa88 JB |
244 | movq %rcx,RIP-ARGOFFSET(%rsp) |
245 | CFI_REL_OFFSET rip,RIP-ARGOFFSET | |
1da177e4 LT |
246 | GET_THREAD_INFO(%rcx) |
247 | testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx) | |
248 | jnz tracesys | |
249 | cmpq $__NR_syscall_max,%rax | |
250 | ja badsys | |
251 | movq %r10,%rcx | |
252 | call *sys_call_table(,%rax,8) # XXX: rip relative | |
253 | movq %rax,RAX-ARGOFFSET(%rsp) | |
254 | /* | |
255 | * Syscall return path ending with SYSRET (fast path) | |
256 | * Has incomplete stack frame and undefined top of stack. | |
257 | */ | |
1da177e4 | 258 | ret_from_sys_call: |
11b854b2 | 259 | movl $_TIF_ALLWORK_MASK,%edi |
1da177e4 LT |
260 | /* edi: flagmask */ |
261 | sysret_check: | |
10cd706d | 262 | LOCKDEP_SYS_EXIT |
1da177e4 | 263 | GET_THREAD_INFO(%rcx) |
72fe4858 | 264 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 265 | TRACE_IRQS_OFF |
1da177e4 LT |
266 | movl threadinfo_flags(%rcx),%edx |
267 | andl %edi,%edx | |
268 | jnz sysret_careful | |
bcddc015 | 269 | CFI_REMEMBER_STATE |
2601e64d IM |
270 | /* |
271 | * sysretq will re-enable interrupts: | |
272 | */ | |
273 | TRACE_IRQS_ON | |
1da177e4 | 274 | movq RIP-ARGOFFSET(%rsp),%rcx |
7effaa88 | 275 | CFI_REGISTER rip,rcx |
1da177e4 | 276 | RESTORE_ARGS 0,-ARG_SKIP,1 |
7effaa88 | 277 | /*CFI_REGISTER rflags,r11*/ |
72fe4858 | 278 | ENABLE_INTERRUPTS_SYSCALL_RET |
1da177e4 | 279 | |
bcddc015 | 280 | CFI_RESTORE_STATE |
1da177e4 LT |
281 | /* Handle reschedules */ |
282 | /* edx: work, edi: workmask */ | |
283 | sysret_careful: | |
284 | bt $TIF_NEED_RESCHED,%edx | |
285 | jnc sysret_signal | |
2601e64d | 286 | TRACE_IRQS_ON |
72fe4858 | 287 | ENABLE_INTERRUPTS(CLBR_NONE) |
1da177e4 | 288 | pushq %rdi |
7effaa88 | 289 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 LT |
290 | call schedule |
291 | popq %rdi | |
7effaa88 | 292 | CFI_ADJUST_CFA_OFFSET -8 |
1da177e4 LT |
293 | jmp sysret_check |
294 | ||
295 | /* Handle a signal */ | |
296 | sysret_signal: | |
2601e64d | 297 | TRACE_IRQS_ON |
72fe4858 | 298 | ENABLE_INTERRUPTS(CLBR_NONE) |
8f4d37ec | 299 | testl $_TIF_DO_NOTIFY_MASK,%edx |
10ffdbb8 AK |
300 | jz 1f |
301 | ||
302 | /* Really a signal */ | |
303 | /* edx: work flags (arg3) */ | |
1da177e4 LT |
304 | leaq do_notify_resume(%rip),%rax |
305 | leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1 | |
306 | xorl %esi,%esi # oldset -> arg2 | |
307 | call ptregscall_common | |
10ffdbb8 | 308 | 1: movl $_TIF_NEED_RESCHED,%edi |
7bf36bbc AK |
309 | /* Use IRET because user could have changed frame. This |
310 | works because ptregscall_common has called FIXUP_TOP_OF_STACK. */ | |
72fe4858 | 311 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 312 | TRACE_IRQS_OFF |
7bf36bbc | 313 | jmp int_with_check |
1da177e4 | 314 | |
7effaa88 JB |
315 | badsys: |
316 | movq $-ENOSYS,RAX-ARGOFFSET(%rsp) | |
317 | jmp ret_from_sys_call | |
318 | ||
1da177e4 LT |
319 | /* Do syscall tracing */ |
320 | tracesys: | |
321 | SAVE_REST | |
322 | movq $-ENOSYS,RAX(%rsp) | |
323 | FIXUP_TOP_OF_STACK %rdi | |
324 | movq %rsp,%rdi | |
325 | call syscall_trace_enter | |
326 | LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */ | |
327 | RESTORE_REST | |
328 | cmpq $__NR_syscall_max,%rax | |
cc7d479f JB |
329 | movq $-ENOSYS,%rcx |
330 | cmova %rcx,%rax | |
1da177e4 LT |
331 | ja 1f |
332 | movq %r10,%rcx /* fixup for C */ | |
333 | call *sys_call_table(,%rax,8) | |
822ff019 | 334 | 1: movq %rax,RAX-ARGOFFSET(%rsp) |
7bf36bbc | 335 | /* Use IRET because user could have changed frame */ |
1da177e4 | 336 | |
1da177e4 LT |
337 | /* |
338 | * Syscall return path ending with IRET. | |
339 | * Has correct top of stack, but partial stack frame. | |
bcddc015 JB |
340 | */ |
341 | .globl int_ret_from_sys_call | |
342 | int_ret_from_sys_call: | |
72fe4858 | 343 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 344 | TRACE_IRQS_OFF |
1da177e4 LT |
345 | testl $3,CS-ARGOFFSET(%rsp) |
346 | je retint_restore_args | |
347 | movl $_TIF_ALLWORK_MASK,%edi | |
348 | /* edi: mask to check */ | |
349 | int_with_check: | |
10cd706d | 350 | LOCKDEP_SYS_EXIT_IRQ |
1da177e4 LT |
351 | GET_THREAD_INFO(%rcx) |
352 | movl threadinfo_flags(%rcx),%edx | |
353 | andl %edi,%edx | |
354 | jnz int_careful | |
bf2fcc6f | 355 | andl $~TS_COMPAT,threadinfo_status(%rcx) |
1da177e4 LT |
356 | jmp retint_swapgs |
357 | ||
358 | /* Either reschedule or signal or syscall exit tracking needed. */ | |
359 | /* First do a reschedule test. */ | |
360 | /* edx: work, edi: workmask */ | |
361 | int_careful: | |
362 | bt $TIF_NEED_RESCHED,%edx | |
363 | jnc int_very_careful | |
2601e64d | 364 | TRACE_IRQS_ON |
72fe4858 | 365 | ENABLE_INTERRUPTS(CLBR_NONE) |
1da177e4 | 366 | pushq %rdi |
7effaa88 | 367 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 LT |
368 | call schedule |
369 | popq %rdi | |
7effaa88 | 370 | CFI_ADJUST_CFA_OFFSET -8 |
72fe4858 | 371 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 372 | TRACE_IRQS_OFF |
1da177e4 LT |
373 | jmp int_with_check |
374 | ||
375 | /* handle signals and tracing -- both require a full stack frame */ | |
376 | int_very_careful: | |
2601e64d | 377 | TRACE_IRQS_ON |
72fe4858 | 378 | ENABLE_INTERRUPTS(CLBR_NONE) |
1da177e4 LT |
379 | SAVE_REST |
380 | /* Check for syscall exit trace */ | |
381 | testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx | |
382 | jz int_signal | |
383 | pushq %rdi | |
7effaa88 | 384 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 LT |
385 | leaq 8(%rsp),%rdi # &ptregs -> arg1 |
386 | call syscall_trace_leave | |
387 | popq %rdi | |
7effaa88 | 388 | CFI_ADJUST_CFA_OFFSET -8 |
36c1104e | 389 | andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi |
1da177e4 LT |
390 | jmp int_restore_rest |
391 | ||
392 | int_signal: | |
8f4d37ec | 393 | testl $_TIF_DO_NOTIFY_MASK,%edx |
1da177e4 LT |
394 | jz 1f |
395 | movq %rsp,%rdi # &ptregs -> arg1 | |
396 | xorl %esi,%esi # oldset -> arg2 | |
397 | call do_notify_resume | |
398 | 1: movl $_TIF_NEED_RESCHED,%edi | |
399 | int_restore_rest: | |
400 | RESTORE_REST | |
72fe4858 | 401 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 402 | TRACE_IRQS_OFF |
1da177e4 LT |
403 | jmp int_with_check |
404 | CFI_ENDPROC | |
bcddc015 | 405 | END(system_call) |
1da177e4 LT |
406 | |
407 | /* | |
408 | * Certain special system calls that need to save a complete full stack frame. | |
409 | */ | |
410 | ||
411 | .macro PTREGSCALL label,func,arg | |
412 | .globl \label | |
413 | \label: | |
414 | leaq \func(%rip),%rax | |
415 | leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */ | |
416 | jmp ptregscall_common | |
4b787e0b | 417 | END(\label) |
1da177e4 LT |
418 | .endm |
419 | ||
7effaa88 JB |
420 | CFI_STARTPROC |
421 | ||
1da177e4 LT |
422 | PTREGSCALL stub_clone, sys_clone, %r8 |
423 | PTREGSCALL stub_fork, sys_fork, %rdi | |
424 | PTREGSCALL stub_vfork, sys_vfork, %rdi | |
425 | PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx | |
426 | PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx | |
427 | PTREGSCALL stub_iopl, sys_iopl, %rsi | |
428 | ||
429 | ENTRY(ptregscall_common) | |
1da177e4 | 430 | popq %r11 |
7effaa88 JB |
431 | CFI_ADJUST_CFA_OFFSET -8 |
432 | CFI_REGISTER rip, r11 | |
1da177e4 LT |
433 | SAVE_REST |
434 | movq %r11, %r15 | |
7effaa88 | 435 | CFI_REGISTER rip, r15 |
1da177e4 LT |
436 | FIXUP_TOP_OF_STACK %r11 |
437 | call *%rax | |
438 | RESTORE_TOP_OF_STACK %r11 | |
439 | movq %r15, %r11 | |
7effaa88 | 440 | CFI_REGISTER rip, r11 |
1da177e4 LT |
441 | RESTORE_REST |
442 | pushq %r11 | |
7effaa88 JB |
443 | CFI_ADJUST_CFA_OFFSET 8 |
444 | CFI_REL_OFFSET rip, 0 | |
1da177e4 LT |
445 | ret |
446 | CFI_ENDPROC | |
4b787e0b | 447 | END(ptregscall_common) |
1da177e4 LT |
448 | |
449 | ENTRY(stub_execve) | |
450 | CFI_STARTPROC | |
451 | popq %r11 | |
7effaa88 JB |
452 | CFI_ADJUST_CFA_OFFSET -8 |
453 | CFI_REGISTER rip, r11 | |
1da177e4 | 454 | SAVE_REST |
1da177e4 | 455 | FIXUP_TOP_OF_STACK %r11 |
5d119b2c | 456 | movq %rsp, %rcx |
1da177e4 | 457 | call sys_execve |
1da177e4 | 458 | RESTORE_TOP_OF_STACK %r11 |
1da177e4 LT |
459 | movq %rax,RAX(%rsp) |
460 | RESTORE_REST | |
461 | jmp int_ret_from_sys_call | |
462 | CFI_ENDPROC | |
4b787e0b | 463 | END(stub_execve) |
1da177e4 LT |
464 | |
465 | /* | |
466 | * sigreturn is special because it needs to restore all registers on return. | |
467 | * This cannot be done with SYSRET, so use the IRET return path instead. | |
468 | */ | |
469 | ENTRY(stub_rt_sigreturn) | |
470 | CFI_STARTPROC | |
7effaa88 JB |
471 | addq $8, %rsp |
472 | CFI_ADJUST_CFA_OFFSET -8 | |
1da177e4 LT |
473 | SAVE_REST |
474 | movq %rsp,%rdi | |
475 | FIXUP_TOP_OF_STACK %r11 | |
476 | call sys_rt_sigreturn | |
477 | movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer | |
478 | RESTORE_REST | |
479 | jmp int_ret_from_sys_call | |
480 | CFI_ENDPROC | |
4b787e0b | 481 | END(stub_rt_sigreturn) |
1da177e4 | 482 | |
7effaa88 JB |
483 | /* |
484 | * initial frame state for interrupts and exceptions | |
485 | */ | |
486 | .macro _frame ref | |
487 | CFI_STARTPROC simple | |
adf14236 | 488 | CFI_SIGNAL_FRAME |
7effaa88 JB |
489 | CFI_DEF_CFA rsp,SS+8-\ref |
490 | /*CFI_REL_OFFSET ss,SS-\ref*/ | |
491 | CFI_REL_OFFSET rsp,RSP-\ref | |
492 | /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/ | |
493 | /*CFI_REL_OFFSET cs,CS-\ref*/ | |
494 | CFI_REL_OFFSET rip,RIP-\ref | |
495 | .endm | |
496 | ||
497 | /* initial frame state for interrupts (and exceptions without error code) */ | |
498 | #define INTR_FRAME _frame RIP | |
499 | /* initial frame state for exceptions with error code (and interrupts with | |
500 | vector already pushed) */ | |
501 | #define XCPT_FRAME _frame ORIG_RAX | |
502 | ||
1da177e4 LT |
503 | /* |
504 | * Interrupt entry/exit. | |
505 | * | |
506 | * Interrupt entry points save only callee clobbered registers in fast path. | |
507 | * | |
508 | * Entry runs with interrupts off. | |
509 | */ | |
510 | ||
511 | /* 0(%rsp): interrupt number */ | |
512 | .macro interrupt func | |
1da177e4 | 513 | cld |
1da177e4 LT |
514 | SAVE_ARGS |
515 | leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler | |
1de9c3f6 JB |
516 | pushq %rbp |
517 | CFI_ADJUST_CFA_OFFSET 8 | |
518 | CFI_REL_OFFSET rbp, 0 | |
519 | movq %rsp,%rbp | |
520 | CFI_DEF_CFA_REGISTER rbp | |
1da177e4 LT |
521 | testl $3,CS(%rdi) |
522 | je 1f | |
72fe4858 | 523 | SWAPGS |
96e54049 AK |
524 | /* irqcount is used to check if a CPU is already on an interrupt |
525 | stack or not. While this is essentially redundant with preempt_count | |
526 | it is a little cheaper to use a separate counter in the PDA | |
527 | (short of moving irq_enter into assembly, which would be too | |
528 | much work) */ | |
529 | 1: incl %gs:pda_irqcount | |
1de9c3f6 | 530 | cmoveq %gs:pda_irqstackptr,%rsp |
2699500b | 531 | push %rbp # backlink for old unwinder |
2601e64d IM |
532 | /* |
533 | * We entered an interrupt context - irqs are off: | |
534 | */ | |
535 | TRACE_IRQS_OFF | |
1da177e4 LT |
536 | call \func |
537 | .endm | |
538 | ||
539 | ENTRY(common_interrupt) | |
7effaa88 | 540 | XCPT_FRAME |
1da177e4 LT |
541 | interrupt do_IRQ |
542 | /* 0(%rsp): oldrsp-ARGOFFSET */ | |
7effaa88 | 543 | ret_from_intr: |
72fe4858 | 544 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 545 | TRACE_IRQS_OFF |
3829ee6b | 546 | decl %gs:pda_irqcount |
1de9c3f6 | 547 | leaveq |
7effaa88 | 548 | CFI_DEF_CFA_REGISTER rsp |
1de9c3f6 | 549 | CFI_ADJUST_CFA_OFFSET -8 |
7effaa88 | 550 | exit_intr: |
1da177e4 LT |
551 | GET_THREAD_INFO(%rcx) |
552 | testl $3,CS-ARGOFFSET(%rsp) | |
553 | je retint_kernel | |
554 | ||
555 | /* Interrupt came from user space */ | |
556 | /* | |
557 | * Has a correct top of stack, but a partial stack frame | |
558 | * %rcx: thread info. Interrupts off. | |
559 | */ | |
560 | retint_with_reschedule: | |
561 | movl $_TIF_WORK_MASK,%edi | |
7effaa88 | 562 | retint_check: |
10cd706d | 563 | LOCKDEP_SYS_EXIT_IRQ |
1da177e4 LT |
564 | movl threadinfo_flags(%rcx),%edx |
565 | andl %edi,%edx | |
7effaa88 | 566 | CFI_REMEMBER_STATE |
1da177e4 | 567 | jnz retint_careful |
10cd706d PZ |
568 | |
569 | retint_swapgs: /* return to user-space */ | |
2601e64d IM |
570 | /* |
571 | * The iretq could re-enable interrupts: | |
572 | */ | |
72fe4858 | 573 | DISABLE_INTERRUPTS(CLBR_ANY) |
2601e64d | 574 | TRACE_IRQS_IRETQ |
72fe4858 | 575 | SWAPGS |
2601e64d IM |
576 | jmp restore_args |
577 | ||
10cd706d | 578 | retint_restore_args: /* return to kernel space */ |
72fe4858 | 579 | DISABLE_INTERRUPTS(CLBR_ANY) |
2601e64d IM |
580 | /* |
581 | * The iretq could re-enable interrupts: | |
582 | */ | |
583 | TRACE_IRQS_IRETQ | |
584 | restore_args: | |
3701d863 IM |
585 | RESTORE_ARGS 0,8,0 |
586 | ||
f7f3d791 | 587 | irq_return: |
72fe4858 | 588 | INTERRUPT_RETURN |
3701d863 IM |
589 | |
590 | .section __ex_table, "a" | |
591 | .quad irq_return, bad_iret | |
592 | .previous | |
593 | ||
594 | #ifdef CONFIG_PARAVIRT | |
72fe4858 | 595 | ENTRY(native_iret) |
1da177e4 LT |
596 | iretq |
597 | ||
598 | .section __ex_table,"a" | |
72fe4858 | 599 | .quad native_iret, bad_iret |
1da177e4 | 600 | .previous |
3701d863 IM |
601 | #endif |
602 | ||
1da177e4 | 603 | .section .fixup,"ax" |
1da177e4 | 604 | bad_iret: |
3aa4b37d RM |
605 | /* |
606 | * The iret traps when the %cs or %ss being restored is bogus. | |
607 | * We've lost the original trap vector and error code. | |
608 | * #GPF is the most likely one to get for an invalid selector. | |
609 | * So pretend we completed the iret and took the #GPF in user mode. | |
610 | * | |
611 | * We are now running with the kernel GS after exception recovery. | |
612 | * But error_entry expects us to have user GS to match the user %cs, | |
613 | * so swap back. | |
614 | */ | |
615 | pushq $0 | |
616 | ||
617 | SWAPGS | |
618 | jmp general_protection | |
619 | ||
72fe4858 GOC |
620 | .previous |
621 | ||
7effaa88 | 622 | /* edi: workmask, edx: work */ |
1da177e4 | 623 | retint_careful: |
7effaa88 | 624 | CFI_RESTORE_STATE |
1da177e4 LT |
625 | bt $TIF_NEED_RESCHED,%edx |
626 | jnc retint_signal | |
2601e64d | 627 | TRACE_IRQS_ON |
72fe4858 | 628 | ENABLE_INTERRUPTS(CLBR_NONE) |
1da177e4 | 629 | pushq %rdi |
7effaa88 | 630 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 LT |
631 | call schedule |
632 | popq %rdi | |
7effaa88 | 633 | CFI_ADJUST_CFA_OFFSET -8 |
1da177e4 | 634 | GET_THREAD_INFO(%rcx) |
72fe4858 | 635 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 636 | TRACE_IRQS_OFF |
1da177e4 LT |
637 | jmp retint_check |
638 | ||
639 | retint_signal: | |
8f4d37ec | 640 | testl $_TIF_DO_NOTIFY_MASK,%edx |
10ffdbb8 | 641 | jz retint_swapgs |
2601e64d | 642 | TRACE_IRQS_ON |
72fe4858 | 643 | ENABLE_INTERRUPTS(CLBR_NONE) |
1da177e4 LT |
644 | SAVE_REST |
645 | movq $-1,ORIG_RAX(%rsp) | |
3829ee6b | 646 | xorl %esi,%esi # oldset |
1da177e4 LT |
647 | movq %rsp,%rdi # &pt_regs |
648 | call do_notify_resume | |
649 | RESTORE_REST | |
72fe4858 | 650 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 651 | TRACE_IRQS_OFF |
10ffdbb8 | 652 | movl $_TIF_NEED_RESCHED,%edi |
be9e6870 | 653 | GET_THREAD_INFO(%rcx) |
1da177e4 LT |
654 | jmp retint_check |
655 | ||
656 | #ifdef CONFIG_PREEMPT | |
657 | /* Returning to kernel space. Check if we need preemption */ | |
658 | /* rcx: threadinfo. interrupts off. */ | |
b06babac | 659 | ENTRY(retint_kernel) |
1da177e4 LT |
660 | cmpl $0,threadinfo_preempt_count(%rcx) |
661 | jnz retint_restore_args | |
662 | bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx) | |
663 | jnc retint_restore_args | |
664 | bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ | |
665 | jnc retint_restore_args | |
666 | call preempt_schedule_irq | |
667 | jmp exit_intr | |
668 | #endif | |
4b787e0b | 669 | |
1da177e4 | 670 | CFI_ENDPROC |
4b787e0b | 671 | END(common_interrupt) |
1da177e4 LT |
672 | |
673 | /* | |
674 | * APIC interrupts. | |
675 | */ | |
676 | .macro apicinterrupt num,func | |
7effaa88 | 677 | INTR_FRAME |
19eadf98 | 678 | pushq $~(\num) |
7effaa88 | 679 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 LT |
680 | interrupt \func |
681 | jmp ret_from_intr | |
682 | CFI_ENDPROC | |
683 | .endm | |
684 | ||
685 | ENTRY(thermal_interrupt) | |
686 | apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt | |
4b787e0b | 687 | END(thermal_interrupt) |
1da177e4 | 688 | |
89b831ef JS |
689 | ENTRY(threshold_interrupt) |
690 | apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt | |
4b787e0b | 691 | END(threshold_interrupt) |
89b831ef | 692 | |
1da177e4 LT |
693 | #ifdef CONFIG_SMP |
694 | ENTRY(reschedule_interrupt) | |
695 | apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt | |
4b787e0b | 696 | END(reschedule_interrupt) |
1da177e4 | 697 | |
e5bc8b6b AK |
698 | .macro INVALIDATE_ENTRY num |
699 | ENTRY(invalidate_interrupt\num) | |
700 | apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt | |
4b787e0b | 701 | END(invalidate_interrupt\num) |
e5bc8b6b AK |
702 | .endm |
703 | ||
704 | INVALIDATE_ENTRY 0 | |
705 | INVALIDATE_ENTRY 1 | |
706 | INVALIDATE_ENTRY 2 | |
707 | INVALIDATE_ENTRY 3 | |
708 | INVALIDATE_ENTRY 4 | |
709 | INVALIDATE_ENTRY 5 | |
710 | INVALIDATE_ENTRY 6 | |
711 | INVALIDATE_ENTRY 7 | |
1da177e4 LT |
712 | |
713 | ENTRY(call_function_interrupt) | |
714 | apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt | |
4b787e0b | 715 | END(call_function_interrupt) |
61014292 EB |
716 | ENTRY(irq_move_cleanup_interrupt) |
717 | apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt | |
718 | END(irq_move_cleanup_interrupt) | |
1da177e4 LT |
719 | #endif |
720 | ||
1da177e4 LT |
721 | ENTRY(apic_timer_interrupt) |
722 | apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt | |
4b787e0b | 723 | END(apic_timer_interrupt) |
1da177e4 LT |
724 | |
725 | ENTRY(error_interrupt) | |
726 | apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt | |
4b787e0b | 727 | END(error_interrupt) |
1da177e4 LT |
728 | |
729 | ENTRY(spurious_interrupt) | |
730 | apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt | |
4b787e0b | 731 | END(spurious_interrupt) |
1da177e4 LT |
732 | |
733 | /* | |
734 | * Exception entry points. | |
735 | */ | |
736 | .macro zeroentry sym | |
7effaa88 | 737 | INTR_FRAME |
1da177e4 | 738 | pushq $0 /* push error code/oldrax */ |
7effaa88 | 739 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 | 740 | pushq %rax /* push real oldrax to the rdi slot */ |
7effaa88 | 741 | CFI_ADJUST_CFA_OFFSET 8 |
37550907 | 742 | CFI_REL_OFFSET rax,0 |
1da177e4 LT |
743 | leaq \sym(%rip),%rax |
744 | jmp error_entry | |
7effaa88 | 745 | CFI_ENDPROC |
1da177e4 LT |
746 | .endm |
747 | ||
748 | .macro errorentry sym | |
7effaa88 | 749 | XCPT_FRAME |
1da177e4 | 750 | pushq %rax |
7effaa88 | 751 | CFI_ADJUST_CFA_OFFSET 8 |
37550907 | 752 | CFI_REL_OFFSET rax,0 |
1da177e4 LT |
753 | leaq \sym(%rip),%rax |
754 | jmp error_entry | |
7effaa88 | 755 | CFI_ENDPROC |
1da177e4 LT |
756 | .endm |
757 | ||
758 | /* error code is on the stack already */ | |
759 | /* handle NMI like exceptions that can happen everywhere */ | |
2601e64d | 760 | .macro paranoidentry sym, ist=0, irqtrace=1 |
1da177e4 LT |
761 | SAVE_ALL |
762 | cld | |
763 | movl $1,%ebx | |
764 | movl $MSR_GS_BASE,%ecx | |
765 | rdmsr | |
766 | testl %edx,%edx | |
767 | js 1f | |
72fe4858 | 768 | SWAPGS |
1da177e4 | 769 | xorl %ebx,%ebx |
b556b35e JB |
770 | 1: |
771 | .if \ist | |
772 | movq %gs:pda_data_offset, %rbp | |
773 | .endif | |
774 | movq %rsp,%rdi | |
1da177e4 LT |
775 | movq ORIG_RAX(%rsp),%rsi |
776 | movq $-1,ORIG_RAX(%rsp) | |
b556b35e | 777 | .if \ist |
5f8efbb9 | 778 | subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) |
b556b35e | 779 | .endif |
1da177e4 | 780 | call \sym |
b556b35e | 781 | .if \ist |
5f8efbb9 | 782 | addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) |
b556b35e | 783 | .endif |
72fe4858 | 784 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d IM |
785 | .if \irqtrace |
786 | TRACE_IRQS_OFF | |
787 | .endif | |
1da177e4 | 788 | .endm |
2601e64d IM |
789 | |
790 | /* | |
791 | * "Paranoid" exit path from exception stack. | |
792 | * Paranoid because this is used by NMIs and cannot take | |
793 | * any kernel state for granted. | |
794 | * We don't do kernel preemption checks here, because only | |
795 | * NMI should be common and it does not enable IRQs and | |
796 | * cannot get reschedule ticks. | |
797 | * | |
798 | * "trace" is 0 for the NMI handler only, because irq-tracing | |
799 | * is fundamentally NMI-unsafe. (we cannot change the soft and | |
800 | * hard flags at once, atomically) | |
801 | */ | |
802 | .macro paranoidexit trace=1 | |
803 | /* ebx: no swapgs flag */ | |
804 | paranoid_exit\trace: | |
805 | testl %ebx,%ebx /* swapgs needed? */ | |
806 | jnz paranoid_restore\trace | |
807 | testl $3,CS(%rsp) | |
808 | jnz paranoid_userspace\trace | |
809 | paranoid_swapgs\trace: | |
7a0a2dff | 810 | .if \trace |
2601e64d | 811 | TRACE_IRQS_IRETQ 0 |
7a0a2dff | 812 | .endif |
72fe4858 | 813 | SWAPGS_UNSAFE_STACK |
2601e64d IM |
814 | paranoid_restore\trace: |
815 | RESTORE_ALL 8 | |
3701d863 | 816 | jmp irq_return |
2601e64d IM |
817 | paranoid_userspace\trace: |
818 | GET_THREAD_INFO(%rcx) | |
819 | movl threadinfo_flags(%rcx),%ebx | |
820 | andl $_TIF_WORK_MASK,%ebx | |
821 | jz paranoid_swapgs\trace | |
822 | movq %rsp,%rdi /* &pt_regs */ | |
823 | call sync_regs | |
824 | movq %rax,%rsp /* switch stack for scheduling */ | |
825 | testl $_TIF_NEED_RESCHED,%ebx | |
826 | jnz paranoid_schedule\trace | |
827 | movl %ebx,%edx /* arg3: thread flags */ | |
828 | .if \trace | |
829 | TRACE_IRQS_ON | |
830 | .endif | |
72fe4858 | 831 | ENABLE_INTERRUPTS(CLBR_NONE) |
2601e64d IM |
832 | xorl %esi,%esi /* arg2: oldset */ |
833 | movq %rsp,%rdi /* arg1: &pt_regs */ | |
834 | call do_notify_resume | |
72fe4858 | 835 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d IM |
836 | .if \trace |
837 | TRACE_IRQS_OFF | |
838 | .endif | |
839 | jmp paranoid_userspace\trace | |
840 | paranoid_schedule\trace: | |
841 | .if \trace | |
842 | TRACE_IRQS_ON | |
843 | .endif | |
72fe4858 | 844 | ENABLE_INTERRUPTS(CLBR_ANY) |
2601e64d | 845 | call schedule |
72fe4858 | 846 | DISABLE_INTERRUPTS(CLBR_ANY) |
2601e64d IM |
847 | .if \trace |
848 | TRACE_IRQS_OFF | |
849 | .endif | |
850 | jmp paranoid_userspace\trace | |
851 | CFI_ENDPROC | |
852 | .endm | |
853 | ||
1da177e4 LT |
854 | /* |
855 | * Exception entry point. This expects an error code/orig_rax on the stack | |
856 | * and the exception handler in %rax. | |
857 | */ | |
d28c4393 | 858 | KPROBE_ENTRY(error_entry) |
7effaa88 | 859 | _frame RDI |
37550907 | 860 | CFI_REL_OFFSET rax,0 |
1da177e4 LT |
861 | /* rdi slot contains rax, oldrax contains error code */ |
862 | cld | |
863 | subq $14*8,%rsp | |
864 | CFI_ADJUST_CFA_OFFSET (14*8) | |
865 | movq %rsi,13*8(%rsp) | |
866 | CFI_REL_OFFSET rsi,RSI | |
867 | movq 14*8(%rsp),%rsi /* load rax from rdi slot */ | |
37550907 | 868 | CFI_REGISTER rax,rsi |
1da177e4 LT |
869 | movq %rdx,12*8(%rsp) |
870 | CFI_REL_OFFSET rdx,RDX | |
871 | movq %rcx,11*8(%rsp) | |
872 | CFI_REL_OFFSET rcx,RCX | |
873 | movq %rsi,10*8(%rsp) /* store rax */ | |
874 | CFI_REL_OFFSET rax,RAX | |
875 | movq %r8, 9*8(%rsp) | |
876 | CFI_REL_OFFSET r8,R8 | |
877 | movq %r9, 8*8(%rsp) | |
878 | CFI_REL_OFFSET r9,R9 | |
879 | movq %r10,7*8(%rsp) | |
880 | CFI_REL_OFFSET r10,R10 | |
881 | movq %r11,6*8(%rsp) | |
882 | CFI_REL_OFFSET r11,R11 | |
883 | movq %rbx,5*8(%rsp) | |
884 | CFI_REL_OFFSET rbx,RBX | |
885 | movq %rbp,4*8(%rsp) | |
886 | CFI_REL_OFFSET rbp,RBP | |
887 | movq %r12,3*8(%rsp) | |
888 | CFI_REL_OFFSET r12,R12 | |
889 | movq %r13,2*8(%rsp) | |
890 | CFI_REL_OFFSET r13,R13 | |
891 | movq %r14,1*8(%rsp) | |
892 | CFI_REL_OFFSET r14,R14 | |
893 | movq %r15,(%rsp) | |
894 | CFI_REL_OFFSET r15,R15 | |
895 | xorl %ebx,%ebx | |
896 | testl $3,CS(%rsp) | |
897 | je error_kernelspace | |
898 | error_swapgs: | |
72fe4858 | 899 | SWAPGS |
1da177e4 LT |
900 | error_sti: |
901 | movq %rdi,RDI(%rsp) | |
37550907 | 902 | CFI_REL_OFFSET rdi,RDI |
1da177e4 LT |
903 | movq %rsp,%rdi |
904 | movq ORIG_RAX(%rsp),%rsi /* get error code */ | |
905 | movq $-1,ORIG_RAX(%rsp) | |
906 | call *%rax | |
10cd706d PZ |
907 | /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ |
908 | error_exit: | |
909 | movl %ebx,%eax | |
1da177e4 | 910 | RESTORE_REST |
72fe4858 | 911 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 912 | TRACE_IRQS_OFF |
1da177e4 LT |
913 | GET_THREAD_INFO(%rcx) |
914 | testl %eax,%eax | |
915 | jne retint_kernel | |
10cd706d | 916 | LOCKDEP_SYS_EXIT_IRQ |
1da177e4 LT |
917 | movl threadinfo_flags(%rcx),%edx |
918 | movl $_TIF_WORK_MASK,%edi | |
919 | andl %edi,%edx | |
920 | jnz retint_careful | |
10cd706d | 921 | jmp retint_swapgs |
1da177e4 LT |
922 | CFI_ENDPROC |
923 | ||
924 | error_kernelspace: | |
925 | incl %ebx | |
926 | /* There are two places in the kernel that can potentially fault with | |
927 | usergs. Handle them here. The exception handlers after | |
928 | iret run with kernel gs again, so don't set the user space flag. | |
929 | B stepping K8s sometimes report an truncated RIP for IRET | |
930 | exceptions returning to compat mode. Check for these here too. */ | |
3701d863 | 931 | leaq irq_return(%rip),%rbp |
1da177e4 LT |
932 | cmpq %rbp,RIP(%rsp) |
933 | je error_swapgs | |
934 | movl %ebp,%ebp /* zero extend */ | |
935 | cmpq %rbp,RIP(%rsp) | |
936 | je error_swapgs | |
937 | cmpq $gs_change,RIP(%rsp) | |
938 | je error_swapgs | |
939 | jmp error_sti | |
d28c4393 | 940 | KPROBE_END(error_entry) |
1da177e4 LT |
941 | |
942 | /* Reload gs selector with exception handling */ | |
943 | /* edi: new selector */ | |
944 | ENTRY(load_gs_index) | |
7effaa88 | 945 | CFI_STARTPROC |
1da177e4 | 946 | pushf |
7effaa88 | 947 | CFI_ADJUST_CFA_OFFSET 8 |
72fe4858 GOC |
948 | DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) |
949 | SWAPGS | |
1da177e4 LT |
950 | gs_change: |
951 | movl %edi,%gs | |
952 | 2: mfence /* workaround */ | |
72fe4858 | 953 | SWAPGS |
1da177e4 | 954 | popf |
7effaa88 | 955 | CFI_ADJUST_CFA_OFFSET -8 |
1da177e4 | 956 | ret |
7effaa88 | 957 | CFI_ENDPROC |
4b787e0b | 958 | ENDPROC(load_gs_index) |
1da177e4 LT |
959 | |
960 | .section __ex_table,"a" | |
961 | .align 8 | |
962 | .quad gs_change,bad_gs | |
963 | .previous | |
964 | .section .fixup,"ax" | |
965 | /* running with kernelgs */ | |
966 | bad_gs: | |
72fe4858 | 967 | SWAPGS /* switch back to user gs */ |
1da177e4 LT |
968 | xorl %eax,%eax |
969 | movl %eax,%gs | |
970 | jmp 2b | |
971 | .previous | |
972 | ||
973 | /* | |
974 | * Create a kernel thread. | |
975 | * | |
976 | * C extern interface: | |
977 | * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |
978 | * | |
979 | * asm input arguments: | |
980 | * rdi: fn, rsi: arg, rdx: flags | |
981 | */ | |
982 | ENTRY(kernel_thread) | |
983 | CFI_STARTPROC | |
984 | FAKE_STACK_FRAME $child_rip | |
985 | SAVE_ALL | |
986 | ||
987 | # rdi: flags, rsi: usp, rdx: will be &pt_regs | |
988 | movq %rdx,%rdi | |
989 | orq kernel_thread_flags(%rip),%rdi | |
990 | movq $-1, %rsi | |
991 | movq %rsp, %rdx | |
992 | ||
993 | xorl %r8d,%r8d | |
994 | xorl %r9d,%r9d | |
995 | ||
996 | # clone now | |
997 | call do_fork | |
998 | movq %rax,RAX(%rsp) | |
999 | xorl %edi,%edi | |
1000 | ||
1001 | /* | |
1002 | * It isn't worth to check for reschedule here, | |
1003 | * so internally to the x86_64 port you can rely on kernel_thread() | |
1004 | * not to reschedule the child before returning, this avoids the need | |
1005 | * of hacks for example to fork off the per-CPU idle tasks. | |
1006 | * [Hopefully no generic code relies on the reschedule -AK] | |
1007 | */ | |
1008 | RESTORE_ALL | |
1009 | UNFAKE_STACK_FRAME | |
1010 | ret | |
1011 | CFI_ENDPROC | |
4b787e0b | 1012 | ENDPROC(kernel_thread) |
1da177e4 LT |
1013 | |
1014 | child_rip: | |
c05991ed AK |
1015 | pushq $0 # fake return address |
1016 | CFI_STARTPROC | |
1da177e4 LT |
1017 | /* |
1018 | * Here we are in the child and the registers are set as they were | |
1019 | * at kernel_thread() invocation in the parent. | |
1020 | */ | |
1021 | movq %rdi, %rax | |
1022 | movq %rsi, %rdi | |
1023 | call *%rax | |
1024 | # exit | |
1c5b5cfd | 1025 | mov %eax, %edi |
1da177e4 | 1026 | call do_exit |
c05991ed | 1027 | CFI_ENDPROC |
4b787e0b | 1028 | ENDPROC(child_rip) |
1da177e4 LT |
1029 | |
1030 | /* | |
1031 | * execve(). This function needs to use IRET, not SYSRET, to set up all state properly. | |
1032 | * | |
1033 | * C extern interface: | |
1034 | * extern long execve(char *name, char **argv, char **envp) | |
1035 | * | |
1036 | * asm input arguments: | |
1037 | * rdi: name, rsi: argv, rdx: envp | |
1038 | * | |
1039 | * We want to fallback into: | |
5d119b2c | 1040 | * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs) |
1da177e4 LT |
1041 | * |
1042 | * do_sys_execve asm fallback arguments: | |
5d119b2c | 1043 | * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack |
1da177e4 | 1044 | */ |
3db03b4a | 1045 | ENTRY(kernel_execve) |
1da177e4 LT |
1046 | CFI_STARTPROC |
1047 | FAKE_STACK_FRAME $0 | |
1048 | SAVE_ALL | |
5d119b2c | 1049 | movq %rsp,%rcx |
1da177e4 LT |
1050 | call sys_execve |
1051 | movq %rax, RAX(%rsp) | |
1052 | RESTORE_REST | |
1053 | testq %rax,%rax | |
1054 | je int_ret_from_sys_call | |
1055 | RESTORE_ARGS | |
1056 | UNFAKE_STACK_FRAME | |
1057 | ret | |
1058 | CFI_ENDPROC | |
3db03b4a | 1059 | ENDPROC(kernel_execve) |
1da177e4 | 1060 | |
0f2fbdcb | 1061 | KPROBE_ENTRY(page_fault) |
1da177e4 | 1062 | errorentry do_page_fault |
d28c4393 | 1063 | KPROBE_END(page_fault) |
1da177e4 LT |
1064 | |
1065 | ENTRY(coprocessor_error) | |
1066 | zeroentry do_coprocessor_error | |
4b787e0b | 1067 | END(coprocessor_error) |
1da177e4 LT |
1068 | |
1069 | ENTRY(simd_coprocessor_error) | |
1070 | zeroentry do_simd_coprocessor_error | |
4b787e0b | 1071 | END(simd_coprocessor_error) |
1da177e4 LT |
1072 | |
1073 | ENTRY(device_not_available) | |
1074 | zeroentry math_state_restore | |
4b787e0b | 1075 | END(device_not_available) |
1da177e4 LT |
1076 | |
1077 | /* runs on exception stack */ | |
0f2fbdcb | 1078 | KPROBE_ENTRY(debug) |
7effaa88 | 1079 | INTR_FRAME |
1da177e4 LT |
1080 | pushq $0 |
1081 | CFI_ADJUST_CFA_OFFSET 8 | |
5f8efbb9 | 1082 | paranoidentry do_debug, DEBUG_STACK |
2601e64d | 1083 | paranoidexit |
d28c4393 | 1084 | KPROBE_END(debug) |
1da177e4 LT |
1085 | |
1086 | /* runs on exception stack */ | |
eddb6fb9 | 1087 | KPROBE_ENTRY(nmi) |
7effaa88 | 1088 | INTR_FRAME |
1da177e4 | 1089 | pushq $-1 |
7effaa88 | 1090 | CFI_ADJUST_CFA_OFFSET 8 |
2601e64d IM |
1091 | paranoidentry do_nmi, 0, 0 |
1092 | #ifdef CONFIG_TRACE_IRQFLAGS | |
1093 | paranoidexit 0 | |
1094 | #else | |
1095 | jmp paranoid_exit1 | |
1096 | CFI_ENDPROC | |
1097 | #endif | |
d28c4393 | 1098 | KPROBE_END(nmi) |
6fefb0d1 | 1099 | |
0f2fbdcb | 1100 | KPROBE_ENTRY(int3) |
b556b35e JB |
1101 | INTR_FRAME |
1102 | pushq $0 | |
1103 | CFI_ADJUST_CFA_OFFSET 8 | |
5f8efbb9 | 1104 | paranoidentry do_int3, DEBUG_STACK |
2601e64d | 1105 | jmp paranoid_exit1 |
b556b35e | 1106 | CFI_ENDPROC |
d28c4393 | 1107 | KPROBE_END(int3) |
1da177e4 LT |
1108 | |
1109 | ENTRY(overflow) | |
1110 | zeroentry do_overflow | |
4b787e0b | 1111 | END(overflow) |
1da177e4 LT |
1112 | |
1113 | ENTRY(bounds) | |
1114 | zeroentry do_bounds | |
4b787e0b | 1115 | END(bounds) |
1da177e4 LT |
1116 | |
1117 | ENTRY(invalid_op) | |
1118 | zeroentry do_invalid_op | |
4b787e0b | 1119 | END(invalid_op) |
1da177e4 LT |
1120 | |
1121 | ENTRY(coprocessor_segment_overrun) | |
1122 | zeroentry do_coprocessor_segment_overrun | |
4b787e0b | 1123 | END(coprocessor_segment_overrun) |
1da177e4 LT |
1124 | |
1125 | ENTRY(reserved) | |
1126 | zeroentry do_reserved | |
4b787e0b | 1127 | END(reserved) |
1da177e4 LT |
1128 | |
1129 | /* runs on exception stack */ | |
1130 | ENTRY(double_fault) | |
7effaa88 | 1131 | XCPT_FRAME |
1da177e4 | 1132 | paranoidentry do_double_fault |
2601e64d | 1133 | jmp paranoid_exit1 |
1da177e4 | 1134 | CFI_ENDPROC |
4b787e0b | 1135 | END(double_fault) |
1da177e4 LT |
1136 | |
1137 | ENTRY(invalid_TSS) | |
1138 | errorentry do_invalid_TSS | |
4b787e0b | 1139 | END(invalid_TSS) |
1da177e4 LT |
1140 | |
1141 | ENTRY(segment_not_present) | |
1142 | errorentry do_segment_not_present | |
4b787e0b | 1143 | END(segment_not_present) |
1da177e4 LT |
1144 | |
1145 | /* runs on exception stack */ | |
1146 | ENTRY(stack_segment) | |
7effaa88 | 1147 | XCPT_FRAME |
1da177e4 | 1148 | paranoidentry do_stack_segment |
2601e64d | 1149 | jmp paranoid_exit1 |
1da177e4 | 1150 | CFI_ENDPROC |
4b787e0b | 1151 | END(stack_segment) |
1da177e4 | 1152 | |
0f2fbdcb | 1153 | KPROBE_ENTRY(general_protection) |
1da177e4 | 1154 | errorentry do_general_protection |
d28c4393 | 1155 | KPROBE_END(general_protection) |
1da177e4 LT |
1156 | |
1157 | ENTRY(alignment_check) | |
1158 | errorentry do_alignment_check | |
4b787e0b | 1159 | END(alignment_check) |
1da177e4 LT |
1160 | |
1161 | ENTRY(divide_error) | |
1162 | zeroentry do_divide_error | |
4b787e0b | 1163 | END(divide_error) |
1da177e4 LT |
1164 | |
1165 | ENTRY(spurious_interrupt_bug) | |
1166 | zeroentry do_spurious_interrupt_bug | |
4b787e0b | 1167 | END(spurious_interrupt_bug) |
1da177e4 LT |
1168 | |
1169 | #ifdef CONFIG_X86_MCE | |
1170 | /* runs on exception stack */ | |
1171 | ENTRY(machine_check) | |
7effaa88 | 1172 | INTR_FRAME |
1da177e4 LT |
1173 | pushq $0 |
1174 | CFI_ADJUST_CFA_OFFSET 8 | |
1175 | paranoidentry do_machine_check | |
2601e64d | 1176 | jmp paranoid_exit1 |
1da177e4 | 1177 | CFI_ENDPROC |
4b787e0b | 1178 | END(machine_check) |
1da177e4 LT |
1179 | #endif |
1180 | ||
2699500b | 1181 | /* Call softirq on interrupt stack. Interrupts are off. */ |
ed6b676c | 1182 | ENTRY(call_softirq) |
7effaa88 | 1183 | CFI_STARTPROC |
2699500b AK |
1184 | push %rbp |
1185 | CFI_ADJUST_CFA_OFFSET 8 | |
1186 | CFI_REL_OFFSET rbp,0 | |
1187 | mov %rsp,%rbp | |
1188 | CFI_DEF_CFA_REGISTER rbp | |
ed6b676c | 1189 | incl %gs:pda_irqcount |
2699500b AK |
1190 | cmove %gs:pda_irqstackptr,%rsp |
1191 | push %rbp # backlink for old unwinder | |
ed6b676c | 1192 | call __do_softirq |
2699500b | 1193 | leaveq |
7effaa88 | 1194 | CFI_DEF_CFA_REGISTER rsp |
2699500b | 1195 | CFI_ADJUST_CFA_OFFSET -8 |
ed6b676c | 1196 | decl %gs:pda_irqcount |
ed6b676c | 1197 | ret |
7effaa88 | 1198 | CFI_ENDPROC |
4b787e0b | 1199 | ENDPROC(call_softirq) |
75154f40 AK |
1200 | |
1201 | KPROBE_ENTRY(ignore_sysret) | |
1202 | CFI_STARTPROC | |
1203 | mov $-ENOSYS,%eax | |
1204 | sysret | |
1205 | CFI_ENDPROC | |
1206 | ENDPROC(ignore_sysret) |