]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86_64/entry.S | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs | |
6 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> | |
7 | * | |
8 | * $Id$ | |
9 | */ | |
10 | ||
11 | /* | |
12 | * entry.S contains the system-call and fault low-level handling routines. | |
13 | * | |
14 | * NOTE: This code handles signal-recognition, which happens every time | |
15 | * after an interrupt and after each system call. | |
16 | * | |
17 | * Normal syscalls and interrupts don't save a full stack frame, this is | |
18 | * only done for syscall tracing, signals or fork/exec et.al. | |
19 | * | |
20 | * A note on terminology: | |
21 | * - top of stack: Architecture defined interrupt frame from SS to RIP | |
22 | * at the top of the kernel process stack. | |
23 | * - partial stack frame: partially saved registers upto R11. | |
24 | * - full stack frame: Like partial stack frame, but all register saved. | |
25 | * | |
26 | * TODO: | |
27 | * - schedule it carefully for the final hardware. | |
28 | */ | |
29 | ||
1da177e4 LT |
30 | #include <linux/linkage.h> |
31 | #include <asm/segment.h> | |
1da177e4 LT |
32 | #include <asm/cache.h> |
33 | #include <asm/errno.h> | |
34 | #include <asm/dwarf2.h> | |
35 | #include <asm/calling.h> | |
e2d5df93 | 36 | #include <asm/asm-offsets.h> |
1da177e4 LT |
37 | #include <asm/msr.h> |
38 | #include <asm/unistd.h> | |
39 | #include <asm/thread_info.h> | |
40 | #include <asm/hw_irq.h> | |
5f8efbb9 | 41 | #include <asm/page.h> |
2601e64d | 42 | #include <asm/irqflags.h> |
1da177e4 LT |
43 | |
44 | .code64 | |
45 | ||
dc37db4d | 46 | #ifndef CONFIG_PREEMPT |
1da177e4 LT |
47 | #define retint_kernel retint_restore_args |
48 | #endif | |
2601e64d IM |
49 | |
50 | ||
51 | .macro TRACE_IRQS_IRETQ offset=ARGOFFSET | |
52 | #ifdef CONFIG_TRACE_IRQFLAGS | |
53 | bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */ | |
54 | jnc 1f | |
55 | TRACE_IRQS_ON | |
56 | 1: | |
57 | #endif | |
58 | .endm | |
59 | ||
1da177e4 LT |
60 | /* |
61 | * C code is not supposed to know about undefined top of stack. Every time | |
62 | * a C function with an pt_regs argument is called from the SYSCALL based | |
63 | * fast path FIXUP_TOP_OF_STACK is needed. | |
64 | * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs | |
65 | * manipulation. | |
66 | */ | |
67 | ||
68 | /* %rsp:at FRAMEEND */ | |
69 | .macro FIXUP_TOP_OF_STACK tmp | |
70 | movq %gs:pda_oldrsp,\tmp | |
71 | movq \tmp,RSP(%rsp) | |
72 | movq $__USER_DS,SS(%rsp) | |
73 | movq $__USER_CS,CS(%rsp) | |
74 | movq $-1,RCX(%rsp) | |
75 | movq R11(%rsp),\tmp /* get eflags */ | |
76 | movq \tmp,EFLAGS(%rsp) | |
77 | .endm | |
78 | ||
79 | .macro RESTORE_TOP_OF_STACK tmp,offset=0 | |
80 | movq RSP-\offset(%rsp),\tmp | |
81 | movq \tmp,%gs:pda_oldrsp | |
82 | movq EFLAGS-\offset(%rsp),\tmp | |
83 | movq \tmp,R11-\offset(%rsp) | |
84 | .endm | |
85 | ||
86 | .macro FAKE_STACK_FRAME child_rip | |
87 | /* push in order ss, rsp, eflags, cs, rip */ | |
3829ee6b | 88 | xorl %eax, %eax |
1da177e4 LT |
89 | pushq %rax /* ss */ |
90 | CFI_ADJUST_CFA_OFFSET 8 | |
7effaa88 | 91 | /*CFI_REL_OFFSET ss,0*/ |
1da177e4 LT |
92 | pushq %rax /* rsp */ |
93 | CFI_ADJUST_CFA_OFFSET 8 | |
7effaa88 | 94 | CFI_REL_OFFSET rsp,0 |
1da177e4 LT |
95 | pushq $(1<<9) /* eflags - interrupts on */ |
96 | CFI_ADJUST_CFA_OFFSET 8 | |
7effaa88 | 97 | /*CFI_REL_OFFSET rflags,0*/ |
1da177e4 LT |
98 | pushq $__KERNEL_CS /* cs */ |
99 | CFI_ADJUST_CFA_OFFSET 8 | |
7effaa88 | 100 | /*CFI_REL_OFFSET cs,0*/ |
1da177e4 LT |
101 | pushq \child_rip /* rip */ |
102 | CFI_ADJUST_CFA_OFFSET 8 | |
7effaa88 | 103 | CFI_REL_OFFSET rip,0 |
1da177e4 LT |
104 | pushq %rax /* orig rax */ |
105 | CFI_ADJUST_CFA_OFFSET 8 | |
106 | .endm | |
107 | ||
108 | .macro UNFAKE_STACK_FRAME | |
109 | addq $8*6, %rsp | |
110 | CFI_ADJUST_CFA_OFFSET -(6*8) | |
111 | .endm | |
112 | ||
7effaa88 JB |
113 | .macro CFI_DEFAULT_STACK start=1 |
114 | .if \start | |
115 | CFI_STARTPROC simple | |
116 | CFI_DEF_CFA rsp,SS+8 | |
117 | .else | |
118 | CFI_DEF_CFA_OFFSET SS+8 | |
119 | .endif | |
120 | CFI_REL_OFFSET r15,R15 | |
121 | CFI_REL_OFFSET r14,R14 | |
122 | CFI_REL_OFFSET r13,R13 | |
123 | CFI_REL_OFFSET r12,R12 | |
124 | CFI_REL_OFFSET rbp,RBP | |
125 | CFI_REL_OFFSET rbx,RBX | |
126 | CFI_REL_OFFSET r11,R11 | |
127 | CFI_REL_OFFSET r10,R10 | |
128 | CFI_REL_OFFSET r9,R9 | |
129 | CFI_REL_OFFSET r8,R8 | |
130 | CFI_REL_OFFSET rax,RAX | |
131 | CFI_REL_OFFSET rcx,RCX | |
132 | CFI_REL_OFFSET rdx,RDX | |
133 | CFI_REL_OFFSET rsi,RSI | |
134 | CFI_REL_OFFSET rdi,RDI | |
135 | CFI_REL_OFFSET rip,RIP | |
136 | /*CFI_REL_OFFSET cs,CS*/ | |
137 | /*CFI_REL_OFFSET rflags,EFLAGS*/ | |
138 | CFI_REL_OFFSET rsp,RSP | |
139 | /*CFI_REL_OFFSET ss,SS*/ | |
1da177e4 LT |
140 | .endm |
141 | /* | |
142 | * A newly forked process directly context switches into this. | |
143 | */ | |
144 | /* rdi: prev */ | |
145 | ENTRY(ret_from_fork) | |
1da177e4 LT |
146 | CFI_DEFAULT_STACK |
147 | call schedule_tail | |
148 | GET_THREAD_INFO(%rcx) | |
149 | testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx) | |
150 | jnz rff_trace | |
151 | rff_action: | |
152 | RESTORE_REST | |
153 | testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread? | |
154 | je int_ret_from_sys_call | |
155 | testl $_TIF_IA32,threadinfo_flags(%rcx) | |
156 | jnz int_ret_from_sys_call | |
157 | RESTORE_TOP_OF_STACK %rdi,ARGOFFSET | |
158 | jmp ret_from_sys_call | |
159 | rff_trace: | |
160 | movq %rsp,%rdi | |
161 | call syscall_trace_leave | |
162 | GET_THREAD_INFO(%rcx) | |
163 | jmp rff_action | |
164 | CFI_ENDPROC | |
4b787e0b | 165 | END(ret_from_fork) |
1da177e4 LT |
166 | |
167 | /* | |
168 | * System call entry. Upto 6 arguments in registers are supported. | |
169 | * | |
170 | * SYSCALL does not save anything on the stack and does not change the | |
171 | * stack pointer. | |
172 | */ | |
173 | ||
174 | /* | |
175 | * Register setup: | |
176 | * rax system call number | |
177 | * rdi arg0 | |
178 | * rcx return address for syscall/sysret, C arg3 | |
179 | * rsi arg1 | |
180 | * rdx arg2 | |
181 | * r10 arg3 (--> moved to rcx for C) | |
182 | * r8 arg4 | |
183 | * r9 arg5 | |
184 | * r11 eflags for syscall/sysret, temporary for C | |
185 | * r12-r15,rbp,rbx saved by C code, not touched. | |
186 | * | |
187 | * Interrupts are off on entry. | |
188 | * Only called from user space. | |
189 | * | |
190 | * XXX if we had a free scratch register we could save the RSP into the stack frame | |
191 | * and report it properly in ps. Unfortunately we haven't. | |
7bf36bbc AK |
192 | * |
193 | * When user can change the frames always force IRET. That is because | |
194 | * it deals with uncanonical addresses better. SYSRET has trouble | |
195 | * with them due to bugs in both AMD and Intel CPUs. | |
1da177e4 LT |
196 | */ |
197 | ||
198 | ENTRY(system_call) | |
7effaa88 | 199 | CFI_STARTPROC simple |
dffead4e | 200 | CFI_DEF_CFA rsp,PDA_STACKOFFSET |
7effaa88 JB |
201 | CFI_REGISTER rip,rcx |
202 | /*CFI_REGISTER rflags,r11*/ | |
1da177e4 LT |
203 | swapgs |
204 | movq %rsp,%gs:pda_oldrsp | |
205 | movq %gs:pda_kernelstack,%rsp | |
2601e64d IM |
206 | /* |
207 | * No need to follow this irqs off/on section - it's straight | |
208 | * and short: | |
209 | */ | |
1da177e4 LT |
210 | sti |
211 | SAVE_ARGS 8,1 | |
212 | movq %rax,ORIG_RAX-ARGOFFSET(%rsp) | |
7effaa88 JB |
213 | movq %rcx,RIP-ARGOFFSET(%rsp) |
214 | CFI_REL_OFFSET rip,RIP-ARGOFFSET | |
1da177e4 LT |
215 | GET_THREAD_INFO(%rcx) |
216 | testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx) | |
7effaa88 | 217 | CFI_REMEMBER_STATE |
1da177e4 LT |
218 | jnz tracesys |
219 | cmpq $__NR_syscall_max,%rax | |
220 | ja badsys | |
221 | movq %r10,%rcx | |
222 | call *sys_call_table(,%rax,8) # XXX: rip relative | |
223 | movq %rax,RAX-ARGOFFSET(%rsp) | |
224 | /* | |
225 | * Syscall return path ending with SYSRET (fast path) | |
226 | * Has incomplete stack frame and undefined top of stack. | |
227 | */ | |
228 | .globl ret_from_sys_call | |
229 | ret_from_sys_call: | |
11b854b2 | 230 | movl $_TIF_ALLWORK_MASK,%edi |
1da177e4 LT |
231 | /* edi: flagmask */ |
232 | sysret_check: | |
233 | GET_THREAD_INFO(%rcx) | |
234 | cli | |
2601e64d | 235 | TRACE_IRQS_OFF |
1da177e4 LT |
236 | movl threadinfo_flags(%rcx),%edx |
237 | andl %edi,%edx | |
7effaa88 | 238 | CFI_REMEMBER_STATE |
1da177e4 | 239 | jnz sysret_careful |
2601e64d IM |
240 | /* |
241 | * sysretq will re-enable interrupts: | |
242 | */ | |
243 | TRACE_IRQS_ON | |
1da177e4 | 244 | movq RIP-ARGOFFSET(%rsp),%rcx |
7effaa88 | 245 | CFI_REGISTER rip,rcx |
1da177e4 | 246 | RESTORE_ARGS 0,-ARG_SKIP,1 |
7effaa88 | 247 | /*CFI_REGISTER rflags,r11*/ |
1da177e4 LT |
248 | movq %gs:pda_oldrsp,%rsp |
249 | swapgs | |
250 | sysretq | |
251 | ||
252 | /* Handle reschedules */ | |
253 | /* edx: work, edi: workmask */ | |
254 | sysret_careful: | |
7effaa88 | 255 | CFI_RESTORE_STATE |
1da177e4 LT |
256 | bt $TIF_NEED_RESCHED,%edx |
257 | jnc sysret_signal | |
2601e64d | 258 | TRACE_IRQS_ON |
1da177e4 LT |
259 | sti |
260 | pushq %rdi | |
7effaa88 | 261 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 LT |
262 | call schedule |
263 | popq %rdi | |
7effaa88 | 264 | CFI_ADJUST_CFA_OFFSET -8 |
1da177e4 LT |
265 | jmp sysret_check |
266 | ||
267 | /* Handle a signal */ | |
268 | sysret_signal: | |
2601e64d | 269 | TRACE_IRQS_ON |
1da177e4 | 270 | sti |
10ffdbb8 AK |
271 | testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx |
272 | jz 1f | |
273 | ||
274 | /* Really a signal */ | |
275 | /* edx: work flags (arg3) */ | |
1da177e4 LT |
276 | leaq do_notify_resume(%rip),%rax |
277 | leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1 | |
278 | xorl %esi,%esi # oldset -> arg2 | |
279 | call ptregscall_common | |
10ffdbb8 | 280 | 1: movl $_TIF_NEED_RESCHED,%edi |
7bf36bbc AK |
281 | /* Use IRET because user could have changed frame. This |
282 | works because ptregscall_common has called FIXUP_TOP_OF_STACK. */ | |
283 | cli | |
2601e64d | 284 | TRACE_IRQS_OFF |
7bf36bbc | 285 | jmp int_with_check |
1da177e4 | 286 | |
7effaa88 JB |
287 | badsys: |
288 | movq $-ENOSYS,RAX-ARGOFFSET(%rsp) | |
289 | jmp ret_from_sys_call | |
290 | ||
1da177e4 LT |
291 | /* Do syscall tracing */ |
292 | tracesys: | |
7effaa88 | 293 | CFI_RESTORE_STATE |
1da177e4 LT |
294 | SAVE_REST |
295 | movq $-ENOSYS,RAX(%rsp) | |
296 | FIXUP_TOP_OF_STACK %rdi | |
297 | movq %rsp,%rdi | |
298 | call syscall_trace_enter | |
299 | LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */ | |
300 | RESTORE_REST | |
301 | cmpq $__NR_syscall_max,%rax | |
302 | ja 1f | |
303 | movq %r10,%rcx /* fixup for C */ | |
304 | call *sys_call_table(,%rax,8) | |
822ff019 | 305 | 1: movq %rax,RAX-ARGOFFSET(%rsp) |
7bf36bbc AK |
306 | /* Use IRET because user could have changed frame */ |
307 | jmp int_ret_from_sys_call | |
7effaa88 | 308 | CFI_ENDPROC |
4b787e0b | 309 | END(system_call) |
1da177e4 | 310 | |
1da177e4 LT |
311 | /* |
312 | * Syscall return path ending with IRET. | |
313 | * Has correct top of stack, but partial stack frame. | |
314 | */ | |
7effaa88 JB |
315 | ENTRY(int_ret_from_sys_call) |
316 | CFI_STARTPROC simple | |
317 | CFI_DEF_CFA rsp,SS+8-ARGOFFSET | |
318 | /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/ | |
319 | CFI_REL_OFFSET rsp,RSP-ARGOFFSET | |
320 | /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/ | |
321 | /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/ | |
322 | CFI_REL_OFFSET rip,RIP-ARGOFFSET | |
323 | CFI_REL_OFFSET rdx,RDX-ARGOFFSET | |
324 | CFI_REL_OFFSET rcx,RCX-ARGOFFSET | |
325 | CFI_REL_OFFSET rax,RAX-ARGOFFSET | |
326 | CFI_REL_OFFSET rdi,RDI-ARGOFFSET | |
327 | CFI_REL_OFFSET rsi,RSI-ARGOFFSET | |
328 | CFI_REL_OFFSET r8,R8-ARGOFFSET | |
329 | CFI_REL_OFFSET r9,R9-ARGOFFSET | |
330 | CFI_REL_OFFSET r10,R10-ARGOFFSET | |
331 | CFI_REL_OFFSET r11,R11-ARGOFFSET | |
1da177e4 | 332 | cli |
2601e64d | 333 | TRACE_IRQS_OFF |
1da177e4 LT |
334 | testl $3,CS-ARGOFFSET(%rsp) |
335 | je retint_restore_args | |
336 | movl $_TIF_ALLWORK_MASK,%edi | |
337 | /* edi: mask to check */ | |
338 | int_with_check: | |
339 | GET_THREAD_INFO(%rcx) | |
340 | movl threadinfo_flags(%rcx),%edx | |
341 | andl %edi,%edx | |
342 | jnz int_careful | |
bf2fcc6f | 343 | andl $~TS_COMPAT,threadinfo_status(%rcx) |
1da177e4 LT |
344 | jmp retint_swapgs |
345 | ||
346 | /* Either reschedule or signal or syscall exit tracking needed. */ | |
347 | /* First do a reschedule test. */ | |
348 | /* edx: work, edi: workmask */ | |
349 | int_careful: | |
350 | bt $TIF_NEED_RESCHED,%edx | |
351 | jnc int_very_careful | |
2601e64d | 352 | TRACE_IRQS_ON |
1da177e4 LT |
353 | sti |
354 | pushq %rdi | |
7effaa88 | 355 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 LT |
356 | call schedule |
357 | popq %rdi | |
7effaa88 | 358 | CFI_ADJUST_CFA_OFFSET -8 |
cdd219cd | 359 | cli |
2601e64d | 360 | TRACE_IRQS_OFF |
1da177e4 LT |
361 | jmp int_with_check |
362 | ||
363 | /* handle signals and tracing -- both require a full stack frame */ | |
364 | int_very_careful: | |
2601e64d | 365 | TRACE_IRQS_ON |
1da177e4 LT |
366 | sti |
367 | SAVE_REST | |
368 | /* Check for syscall exit trace */ | |
369 | testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx | |
370 | jz int_signal | |
371 | pushq %rdi | |
7effaa88 | 372 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 LT |
373 | leaq 8(%rsp),%rdi # &ptregs -> arg1 |
374 | call syscall_trace_leave | |
375 | popq %rdi | |
7effaa88 | 376 | CFI_ADJUST_CFA_OFFSET -8 |
36c1104e | 377 | andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi |
be9e6870 | 378 | cli |
2601e64d | 379 | TRACE_IRQS_OFF |
1da177e4 LT |
380 | jmp int_restore_rest |
381 | ||
382 | int_signal: | |
383 | testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx | |
384 | jz 1f | |
385 | movq %rsp,%rdi # &ptregs -> arg1 | |
386 | xorl %esi,%esi # oldset -> arg2 | |
387 | call do_notify_resume | |
388 | 1: movl $_TIF_NEED_RESCHED,%edi | |
389 | int_restore_rest: | |
390 | RESTORE_REST | |
be9e6870 | 391 | cli |
2601e64d | 392 | TRACE_IRQS_OFF |
1da177e4 LT |
393 | jmp int_with_check |
394 | CFI_ENDPROC | |
4b787e0b | 395 | END(int_ret_from_sys_call) |
1da177e4 LT |
396 | |
397 | /* | |
398 | * Certain special system calls that need to save a complete full stack frame. | |
399 | */ | |
400 | ||
401 | .macro PTREGSCALL label,func,arg | |
402 | .globl \label | |
403 | \label: | |
404 | leaq \func(%rip),%rax | |
405 | leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */ | |
406 | jmp ptregscall_common | |
4b787e0b | 407 | END(\label) |
1da177e4 LT |
408 | .endm |
409 | ||
7effaa88 JB |
410 | CFI_STARTPROC |
411 | ||
1da177e4 LT |
412 | PTREGSCALL stub_clone, sys_clone, %r8 |
413 | PTREGSCALL stub_fork, sys_fork, %rdi | |
414 | PTREGSCALL stub_vfork, sys_vfork, %rdi | |
415 | PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx | |
416 | PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx | |
417 | PTREGSCALL stub_iopl, sys_iopl, %rsi | |
418 | ||
419 | ENTRY(ptregscall_common) | |
1da177e4 | 420 | popq %r11 |
7effaa88 JB |
421 | CFI_ADJUST_CFA_OFFSET -8 |
422 | CFI_REGISTER rip, r11 | |
1da177e4 LT |
423 | SAVE_REST |
424 | movq %r11, %r15 | |
7effaa88 | 425 | CFI_REGISTER rip, r15 |
1da177e4 LT |
426 | FIXUP_TOP_OF_STACK %r11 |
427 | call *%rax | |
428 | RESTORE_TOP_OF_STACK %r11 | |
429 | movq %r15, %r11 | |
7effaa88 | 430 | CFI_REGISTER rip, r11 |
1da177e4 LT |
431 | RESTORE_REST |
432 | pushq %r11 | |
7effaa88 JB |
433 | CFI_ADJUST_CFA_OFFSET 8 |
434 | CFI_REL_OFFSET rip, 0 | |
1da177e4 LT |
435 | ret |
436 | CFI_ENDPROC | |
4b787e0b | 437 | END(ptregscall_common) |
1da177e4 LT |
438 | |
439 | ENTRY(stub_execve) | |
440 | CFI_STARTPROC | |
441 | popq %r11 | |
7effaa88 JB |
442 | CFI_ADJUST_CFA_OFFSET -8 |
443 | CFI_REGISTER rip, r11 | |
1da177e4 | 444 | SAVE_REST |
1da177e4 LT |
445 | FIXUP_TOP_OF_STACK %r11 |
446 | call sys_execve | |
1da177e4 | 447 | RESTORE_TOP_OF_STACK %r11 |
1da177e4 LT |
448 | movq %rax,RAX(%rsp) |
449 | RESTORE_REST | |
450 | jmp int_ret_from_sys_call | |
451 | CFI_ENDPROC | |
4b787e0b | 452 | END(stub_execve) |
1da177e4 LT |
453 | |
454 | /* | |
455 | * sigreturn is special because it needs to restore all registers on return. | |
456 | * This cannot be done with SYSRET, so use the IRET return path instead. | |
457 | */ | |
458 | ENTRY(stub_rt_sigreturn) | |
459 | CFI_STARTPROC | |
7effaa88 JB |
460 | addq $8, %rsp |
461 | CFI_ADJUST_CFA_OFFSET -8 | |
1da177e4 LT |
462 | SAVE_REST |
463 | movq %rsp,%rdi | |
464 | FIXUP_TOP_OF_STACK %r11 | |
465 | call sys_rt_sigreturn | |
466 | movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer | |
467 | RESTORE_REST | |
468 | jmp int_ret_from_sys_call | |
469 | CFI_ENDPROC | |
4b787e0b | 470 | END(stub_rt_sigreturn) |
1da177e4 | 471 | |
7effaa88 JB |
472 | /* |
473 | * initial frame state for interrupts and exceptions | |
474 | */ | |
475 | .macro _frame ref | |
476 | CFI_STARTPROC simple | |
477 | CFI_DEF_CFA rsp,SS+8-\ref | |
478 | /*CFI_REL_OFFSET ss,SS-\ref*/ | |
479 | CFI_REL_OFFSET rsp,RSP-\ref | |
480 | /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/ | |
481 | /*CFI_REL_OFFSET cs,CS-\ref*/ | |
482 | CFI_REL_OFFSET rip,RIP-\ref | |
483 | .endm | |
484 | ||
485 | /* initial frame state for interrupts (and exceptions without error code) */ | |
486 | #define INTR_FRAME _frame RIP | |
487 | /* initial frame state for exceptions with error code (and interrupts with | |
488 | vector already pushed) */ | |
489 | #define XCPT_FRAME _frame ORIG_RAX | |
490 | ||
1da177e4 LT |
491 | /* |
492 | * Interrupt entry/exit. | |
493 | * | |
494 | * Interrupt entry points save only callee clobbered registers in fast path. | |
495 | * | |
496 | * Entry runs with interrupts off. | |
497 | */ | |
498 | ||
499 | /* 0(%rsp): interrupt number */ | |
500 | .macro interrupt func | |
1da177e4 | 501 | cld |
1da177e4 LT |
502 | SAVE_ARGS |
503 | leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler | |
1de9c3f6 JB |
504 | pushq %rbp |
505 | CFI_ADJUST_CFA_OFFSET 8 | |
506 | CFI_REL_OFFSET rbp, 0 | |
507 | movq %rsp,%rbp | |
508 | CFI_DEF_CFA_REGISTER rbp | |
1da177e4 LT |
509 | testl $3,CS(%rdi) |
510 | je 1f | |
511 | swapgs | |
3829ee6b | 512 | 1: incl %gs:pda_irqcount # RED-PEN should check preempt count |
1de9c3f6 | 513 | cmoveq %gs:pda_irqstackptr,%rsp |
2699500b | 514 | push %rbp # backlink for old unwinder |
2601e64d IM |
515 | /* |
516 | * We entered an interrupt context - irqs are off: | |
517 | */ | |
518 | TRACE_IRQS_OFF | |
1da177e4 LT |
519 | call \func |
520 | .endm | |
521 | ||
522 | ENTRY(common_interrupt) | |
7effaa88 | 523 | XCPT_FRAME |
1da177e4 LT |
524 | interrupt do_IRQ |
525 | /* 0(%rsp): oldrsp-ARGOFFSET */ | |
7effaa88 | 526 | ret_from_intr: |
1da177e4 | 527 | cli |
2601e64d | 528 | TRACE_IRQS_OFF |
3829ee6b | 529 | decl %gs:pda_irqcount |
1de9c3f6 | 530 | leaveq |
7effaa88 | 531 | CFI_DEF_CFA_REGISTER rsp |
1de9c3f6 | 532 | CFI_ADJUST_CFA_OFFSET -8 |
7effaa88 | 533 | exit_intr: |
1da177e4 LT |
534 | GET_THREAD_INFO(%rcx) |
535 | testl $3,CS-ARGOFFSET(%rsp) | |
536 | je retint_kernel | |
537 | ||
538 | /* Interrupt came from user space */ | |
539 | /* | |
540 | * Has a correct top of stack, but a partial stack frame | |
541 | * %rcx: thread info. Interrupts off. | |
542 | */ | |
543 | retint_with_reschedule: | |
544 | movl $_TIF_WORK_MASK,%edi | |
7effaa88 | 545 | retint_check: |
1da177e4 LT |
546 | movl threadinfo_flags(%rcx),%edx |
547 | andl %edi,%edx | |
7effaa88 | 548 | CFI_REMEMBER_STATE |
1da177e4 LT |
549 | jnz retint_careful |
550 | retint_swapgs: | |
2601e64d IM |
551 | /* |
552 | * The iretq could re-enable interrupts: | |
553 | */ | |
554 | cli | |
555 | TRACE_IRQS_IRETQ | |
1da177e4 | 556 | swapgs |
2601e64d IM |
557 | jmp restore_args |
558 | ||
1da177e4 LT |
559 | retint_restore_args: |
560 | cli | |
2601e64d IM |
561 | /* |
562 | * The iretq could re-enable interrupts: | |
563 | */ | |
564 | TRACE_IRQS_IRETQ | |
565 | restore_args: | |
1da177e4 LT |
566 | RESTORE_ARGS 0,8,0 |
567 | iret_label: | |
568 | iretq | |
569 | ||
570 | .section __ex_table,"a" | |
571 | .quad iret_label,bad_iret | |
572 | .previous | |
573 | .section .fixup,"ax" | |
574 | /* force a signal here? this matches i386 behaviour */ | |
575 | /* running with kernel gs */ | |
576 | bad_iret: | |
3076a492 | 577 | movq $11,%rdi /* SIGSEGV */ |
2601e64d | 578 | TRACE_IRQS_ON |
2391c4b5 | 579 | sti |
1da177e4 LT |
580 | jmp do_exit |
581 | .previous | |
582 | ||
7effaa88 | 583 | /* edi: workmask, edx: work */ |
1da177e4 | 584 | retint_careful: |
7effaa88 | 585 | CFI_RESTORE_STATE |
1da177e4 LT |
586 | bt $TIF_NEED_RESCHED,%edx |
587 | jnc retint_signal | |
2601e64d | 588 | TRACE_IRQS_ON |
1da177e4 LT |
589 | sti |
590 | pushq %rdi | |
7effaa88 | 591 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 LT |
592 | call schedule |
593 | popq %rdi | |
7effaa88 | 594 | CFI_ADJUST_CFA_OFFSET -8 |
1da177e4 LT |
595 | GET_THREAD_INFO(%rcx) |
596 | cli | |
2601e64d | 597 | TRACE_IRQS_OFF |
1da177e4 LT |
598 | jmp retint_check |
599 | ||
600 | retint_signal: | |
10ffdbb8 AK |
601 | testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx |
602 | jz retint_swapgs | |
2601e64d | 603 | TRACE_IRQS_ON |
1da177e4 LT |
604 | sti |
605 | SAVE_REST | |
606 | movq $-1,ORIG_RAX(%rsp) | |
3829ee6b | 607 | xorl %esi,%esi # oldset |
1da177e4 LT |
608 | movq %rsp,%rdi # &pt_regs |
609 | call do_notify_resume | |
610 | RESTORE_REST | |
611 | cli | |
2601e64d | 612 | TRACE_IRQS_OFF |
10ffdbb8 | 613 | movl $_TIF_NEED_RESCHED,%edi |
be9e6870 | 614 | GET_THREAD_INFO(%rcx) |
1da177e4 LT |
615 | jmp retint_check |
616 | ||
617 | #ifdef CONFIG_PREEMPT | |
618 | /* Returning to kernel space. Check if we need preemption */ | |
619 | /* rcx: threadinfo. interrupts off. */ | |
620 | .p2align | |
621 | retint_kernel: | |
622 | cmpl $0,threadinfo_preempt_count(%rcx) | |
623 | jnz retint_restore_args | |
624 | bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx) | |
625 | jnc retint_restore_args | |
626 | bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ | |
627 | jnc retint_restore_args | |
628 | call preempt_schedule_irq | |
629 | jmp exit_intr | |
630 | #endif | |
4b787e0b | 631 | |
1da177e4 | 632 | CFI_ENDPROC |
4b787e0b | 633 | END(common_interrupt) |
1da177e4 LT |
634 | |
635 | /* | |
636 | * APIC interrupts. | |
637 | */ | |
638 | .macro apicinterrupt num,func | |
7effaa88 | 639 | INTR_FRAME |
19eadf98 | 640 | pushq $~(\num) |
7effaa88 | 641 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 LT |
642 | interrupt \func |
643 | jmp ret_from_intr | |
644 | CFI_ENDPROC | |
645 | .endm | |
646 | ||
647 | ENTRY(thermal_interrupt) | |
648 | apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt | |
4b787e0b | 649 | END(thermal_interrupt) |
1da177e4 | 650 | |
89b831ef JS |
651 | ENTRY(threshold_interrupt) |
652 | apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt | |
4b787e0b | 653 | END(threshold_interrupt) |
89b831ef | 654 | |
1da177e4 LT |
655 | #ifdef CONFIG_SMP |
656 | ENTRY(reschedule_interrupt) | |
657 | apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt | |
4b787e0b | 658 | END(reschedule_interrupt) |
1da177e4 | 659 | |
e5bc8b6b AK |
660 | .macro INVALIDATE_ENTRY num |
661 | ENTRY(invalidate_interrupt\num) | |
662 | apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt | |
4b787e0b | 663 | END(invalidate_interrupt\num) |
e5bc8b6b AK |
664 | .endm |
665 | ||
666 | INVALIDATE_ENTRY 0 | |
667 | INVALIDATE_ENTRY 1 | |
668 | INVALIDATE_ENTRY 2 | |
669 | INVALIDATE_ENTRY 3 | |
670 | INVALIDATE_ENTRY 4 | |
671 | INVALIDATE_ENTRY 5 | |
672 | INVALIDATE_ENTRY 6 | |
673 | INVALIDATE_ENTRY 7 | |
1da177e4 LT |
674 | |
675 | ENTRY(call_function_interrupt) | |
676 | apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt | |
4b787e0b | 677 | END(call_function_interrupt) |
1da177e4 LT |
678 | #endif |
679 | ||
680 | #ifdef CONFIG_X86_LOCAL_APIC | |
681 | ENTRY(apic_timer_interrupt) | |
682 | apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt | |
4b787e0b | 683 | END(apic_timer_interrupt) |
1da177e4 LT |
684 | |
685 | ENTRY(error_interrupt) | |
686 | apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt | |
4b787e0b | 687 | END(error_interrupt) |
1da177e4 LT |
688 | |
689 | ENTRY(spurious_interrupt) | |
690 | apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt | |
4b787e0b | 691 | END(spurious_interrupt) |
1da177e4 LT |
692 | #endif |
693 | ||
694 | /* | |
695 | * Exception entry points. | |
696 | */ | |
697 | .macro zeroentry sym | |
7effaa88 | 698 | INTR_FRAME |
1da177e4 | 699 | pushq $0 /* push error code/oldrax */ |
7effaa88 | 700 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 | 701 | pushq %rax /* push real oldrax to the rdi slot */ |
7effaa88 | 702 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 LT |
703 | leaq \sym(%rip),%rax |
704 | jmp error_entry | |
7effaa88 | 705 | CFI_ENDPROC |
1da177e4 LT |
706 | .endm |
707 | ||
708 | .macro errorentry sym | |
7effaa88 | 709 | XCPT_FRAME |
1da177e4 | 710 | pushq %rax |
7effaa88 | 711 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 LT |
712 | leaq \sym(%rip),%rax |
713 | jmp error_entry | |
7effaa88 | 714 | CFI_ENDPROC |
1da177e4 LT |
715 | .endm |
716 | ||
717 | /* error code is on the stack already */ | |
718 | /* handle NMI like exceptions that can happen everywhere */ | |
2601e64d | 719 | .macro paranoidentry sym, ist=0, irqtrace=1 |
1da177e4 LT |
720 | SAVE_ALL |
721 | cld | |
722 | movl $1,%ebx | |
723 | movl $MSR_GS_BASE,%ecx | |
724 | rdmsr | |
725 | testl %edx,%edx | |
726 | js 1f | |
727 | swapgs | |
728 | xorl %ebx,%ebx | |
b556b35e JB |
729 | 1: |
730 | .if \ist | |
731 | movq %gs:pda_data_offset, %rbp | |
732 | .endif | |
733 | movq %rsp,%rdi | |
1da177e4 LT |
734 | movq ORIG_RAX(%rsp),%rsi |
735 | movq $-1,ORIG_RAX(%rsp) | |
b556b35e | 736 | .if \ist |
5f8efbb9 | 737 | subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) |
b556b35e | 738 | .endif |
1da177e4 | 739 | call \sym |
b556b35e | 740 | .if \ist |
5f8efbb9 | 741 | addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) |
b556b35e | 742 | .endif |
6fefb0d1 | 743 | cli |
2601e64d IM |
744 | .if \irqtrace |
745 | TRACE_IRQS_OFF | |
746 | .endif | |
1da177e4 | 747 | .endm |
2601e64d IM |
748 | |
749 | /* | |
750 | * "Paranoid" exit path from exception stack. | |
751 | * Paranoid because this is used by NMIs and cannot take | |
752 | * any kernel state for granted. | |
753 | * We don't do kernel preemption checks here, because only | |
754 | * NMI should be common and it does not enable IRQs and | |
755 | * cannot get reschedule ticks. | |
756 | * | |
757 | * "trace" is 0 for the NMI handler only, because irq-tracing | |
758 | * is fundamentally NMI-unsafe. (we cannot change the soft and | |
759 | * hard flags at once, atomically) | |
760 | */ | |
761 | .macro paranoidexit trace=1 | |
762 | /* ebx: no swapgs flag */ | |
763 | paranoid_exit\trace: | |
764 | testl %ebx,%ebx /* swapgs needed? */ | |
765 | jnz paranoid_restore\trace | |
766 | testl $3,CS(%rsp) | |
767 | jnz paranoid_userspace\trace | |
768 | paranoid_swapgs\trace: | |
769 | TRACE_IRQS_IRETQ 0 | |
770 | swapgs | |
771 | paranoid_restore\trace: | |
772 | RESTORE_ALL 8 | |
773 | iretq | |
774 | paranoid_userspace\trace: | |
775 | GET_THREAD_INFO(%rcx) | |
776 | movl threadinfo_flags(%rcx),%ebx | |
777 | andl $_TIF_WORK_MASK,%ebx | |
778 | jz paranoid_swapgs\trace | |
779 | movq %rsp,%rdi /* &pt_regs */ | |
780 | call sync_regs | |
781 | movq %rax,%rsp /* switch stack for scheduling */ | |
782 | testl $_TIF_NEED_RESCHED,%ebx | |
783 | jnz paranoid_schedule\trace | |
784 | movl %ebx,%edx /* arg3: thread flags */ | |
785 | .if \trace | |
786 | TRACE_IRQS_ON | |
787 | .endif | |
788 | sti | |
789 | xorl %esi,%esi /* arg2: oldset */ | |
790 | movq %rsp,%rdi /* arg1: &pt_regs */ | |
791 | call do_notify_resume | |
792 | cli | |
793 | .if \trace | |
794 | TRACE_IRQS_OFF | |
795 | .endif | |
796 | jmp paranoid_userspace\trace | |
797 | paranoid_schedule\trace: | |
798 | .if \trace | |
799 | TRACE_IRQS_ON | |
800 | .endif | |
801 | sti | |
802 | call schedule | |
803 | cli | |
804 | .if \trace | |
805 | TRACE_IRQS_OFF | |
806 | .endif | |
807 | jmp paranoid_userspace\trace | |
808 | CFI_ENDPROC | |
809 | .endm | |
810 | ||
1da177e4 LT |
811 | /* |
812 | * Exception entry point. This expects an error code/orig_rax on the stack | |
813 | * and the exception handler in %rax. | |
814 | */ | |
815 | ENTRY(error_entry) | |
7effaa88 | 816 | _frame RDI |
1da177e4 LT |
817 | /* rdi slot contains rax, oldrax contains error code */ |
818 | cld | |
819 | subq $14*8,%rsp | |
820 | CFI_ADJUST_CFA_OFFSET (14*8) | |
821 | movq %rsi,13*8(%rsp) | |
822 | CFI_REL_OFFSET rsi,RSI | |
823 | movq 14*8(%rsp),%rsi /* load rax from rdi slot */ | |
824 | movq %rdx,12*8(%rsp) | |
825 | CFI_REL_OFFSET rdx,RDX | |
826 | movq %rcx,11*8(%rsp) | |
827 | CFI_REL_OFFSET rcx,RCX | |
828 | movq %rsi,10*8(%rsp) /* store rax */ | |
829 | CFI_REL_OFFSET rax,RAX | |
830 | movq %r8, 9*8(%rsp) | |
831 | CFI_REL_OFFSET r8,R8 | |
832 | movq %r9, 8*8(%rsp) | |
833 | CFI_REL_OFFSET r9,R9 | |
834 | movq %r10,7*8(%rsp) | |
835 | CFI_REL_OFFSET r10,R10 | |
836 | movq %r11,6*8(%rsp) | |
837 | CFI_REL_OFFSET r11,R11 | |
838 | movq %rbx,5*8(%rsp) | |
839 | CFI_REL_OFFSET rbx,RBX | |
840 | movq %rbp,4*8(%rsp) | |
841 | CFI_REL_OFFSET rbp,RBP | |
842 | movq %r12,3*8(%rsp) | |
843 | CFI_REL_OFFSET r12,R12 | |
844 | movq %r13,2*8(%rsp) | |
845 | CFI_REL_OFFSET r13,R13 | |
846 | movq %r14,1*8(%rsp) | |
847 | CFI_REL_OFFSET r14,R14 | |
848 | movq %r15,(%rsp) | |
849 | CFI_REL_OFFSET r15,R15 | |
850 | xorl %ebx,%ebx | |
851 | testl $3,CS(%rsp) | |
852 | je error_kernelspace | |
853 | error_swapgs: | |
854 | swapgs | |
855 | error_sti: | |
856 | movq %rdi,RDI(%rsp) | |
857 | movq %rsp,%rdi | |
858 | movq ORIG_RAX(%rsp),%rsi /* get error code */ | |
859 | movq $-1,ORIG_RAX(%rsp) | |
860 | call *%rax | |
861 | /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ | |
862 | error_exit: | |
863 | movl %ebx,%eax | |
864 | RESTORE_REST | |
865 | cli | |
2601e64d | 866 | TRACE_IRQS_OFF |
1da177e4 LT |
867 | GET_THREAD_INFO(%rcx) |
868 | testl %eax,%eax | |
869 | jne retint_kernel | |
870 | movl threadinfo_flags(%rcx),%edx | |
871 | movl $_TIF_WORK_MASK,%edi | |
872 | andl %edi,%edx | |
873 | jnz retint_careful | |
2601e64d IM |
874 | /* |
875 | * The iret might restore flags: | |
876 | */ | |
877 | TRACE_IRQS_IRETQ | |
1da177e4 LT |
878 | swapgs |
879 | RESTORE_ARGS 0,8,0 | |
505cc4e1 | 880 | jmp iret_label |
1da177e4 LT |
881 | CFI_ENDPROC |
882 | ||
883 | error_kernelspace: | |
884 | incl %ebx | |
885 | /* There are two places in the kernel that can potentially fault with | |
886 | usergs. Handle them here. The exception handlers after | |
887 | iret run with kernel gs again, so don't set the user space flag. | |
888 | B stepping K8s sometimes report an truncated RIP for IRET | |
889 | exceptions returning to compat mode. Check for these here too. */ | |
890 | leaq iret_label(%rip),%rbp | |
891 | cmpq %rbp,RIP(%rsp) | |
892 | je error_swapgs | |
893 | movl %ebp,%ebp /* zero extend */ | |
894 | cmpq %rbp,RIP(%rsp) | |
895 | je error_swapgs | |
896 | cmpq $gs_change,RIP(%rsp) | |
897 | je error_swapgs | |
898 | jmp error_sti | |
4b787e0b | 899 | END(error_entry) |
1da177e4 LT |
900 | |
901 | /* Reload gs selector with exception handling */ | |
902 | /* edi: new selector */ | |
903 | ENTRY(load_gs_index) | |
7effaa88 | 904 | CFI_STARTPROC |
1da177e4 | 905 | pushf |
7effaa88 | 906 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 LT |
907 | cli |
908 | swapgs | |
909 | gs_change: | |
910 | movl %edi,%gs | |
911 | 2: mfence /* workaround */ | |
912 | swapgs | |
913 | popf | |
7effaa88 | 914 | CFI_ADJUST_CFA_OFFSET -8 |
1da177e4 | 915 | ret |
7effaa88 | 916 | CFI_ENDPROC |
4b787e0b | 917 | ENDPROC(load_gs_index) |
1da177e4 LT |
918 | |
919 | .section __ex_table,"a" | |
920 | .align 8 | |
921 | .quad gs_change,bad_gs | |
922 | .previous | |
923 | .section .fixup,"ax" | |
924 | /* running with kernelgs */ | |
925 | bad_gs: | |
926 | swapgs /* switch back to user gs */ | |
927 | xorl %eax,%eax | |
928 | movl %eax,%gs | |
929 | jmp 2b | |
930 | .previous | |
931 | ||
932 | /* | |
933 | * Create a kernel thread. | |
934 | * | |
935 | * C extern interface: | |
936 | * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |
937 | * | |
938 | * asm input arguments: | |
939 | * rdi: fn, rsi: arg, rdx: flags | |
940 | */ | |
941 | ENTRY(kernel_thread) | |
942 | CFI_STARTPROC | |
943 | FAKE_STACK_FRAME $child_rip | |
944 | SAVE_ALL | |
945 | ||
946 | # rdi: flags, rsi: usp, rdx: will be &pt_regs | |
947 | movq %rdx,%rdi | |
948 | orq kernel_thread_flags(%rip),%rdi | |
949 | movq $-1, %rsi | |
950 | movq %rsp, %rdx | |
951 | ||
952 | xorl %r8d,%r8d | |
953 | xorl %r9d,%r9d | |
954 | ||
955 | # clone now | |
956 | call do_fork | |
957 | movq %rax,RAX(%rsp) | |
958 | xorl %edi,%edi | |
959 | ||
960 | /* | |
961 | * It isn't worth to check for reschedule here, | |
962 | * so internally to the x86_64 port you can rely on kernel_thread() | |
963 | * not to reschedule the child before returning, this avoids the need | |
964 | * of hacks for example to fork off the per-CPU idle tasks. | |
965 | * [Hopefully no generic code relies on the reschedule -AK] | |
966 | */ | |
967 | RESTORE_ALL | |
968 | UNFAKE_STACK_FRAME | |
969 | ret | |
970 | CFI_ENDPROC | |
4b787e0b | 971 | ENDPROC(kernel_thread) |
1da177e4 LT |
972 | |
973 | child_rip: | |
c05991ed AK |
974 | pushq $0 # fake return address |
975 | CFI_STARTPROC | |
1da177e4 LT |
976 | /* |
977 | * Here we are in the child and the registers are set as they were | |
978 | * at kernel_thread() invocation in the parent. | |
979 | */ | |
980 | movq %rdi, %rax | |
981 | movq %rsi, %rdi | |
982 | call *%rax | |
983 | # exit | |
3829ee6b | 984 | xorl %edi, %edi |
1da177e4 | 985 | call do_exit |
c05991ed | 986 | CFI_ENDPROC |
4b787e0b | 987 | ENDPROC(child_rip) |
1da177e4 LT |
988 | |
989 | /* | |
990 | * execve(). This function needs to use IRET, not SYSRET, to set up all state properly. | |
991 | * | |
992 | * C extern interface: | |
993 | * extern long execve(char *name, char **argv, char **envp) | |
994 | * | |
995 | * asm input arguments: | |
996 | * rdi: name, rsi: argv, rdx: envp | |
997 | * | |
998 | * We want to fallback into: | |
999 | * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs) | |
1000 | * | |
1001 | * do_sys_execve asm fallback arguments: | |
1002 | * rdi: name, rsi: argv, rdx: envp, fake frame on the stack | |
1003 | */ | |
1004 | ENTRY(execve) | |
1005 | CFI_STARTPROC | |
1006 | FAKE_STACK_FRAME $0 | |
1007 | SAVE_ALL | |
1008 | call sys_execve | |
1009 | movq %rax, RAX(%rsp) | |
1010 | RESTORE_REST | |
1011 | testq %rax,%rax | |
1012 | je int_ret_from_sys_call | |
1013 | RESTORE_ARGS | |
1014 | UNFAKE_STACK_FRAME | |
1015 | ret | |
1016 | CFI_ENDPROC | |
4b787e0b | 1017 | ENDPROC(execve) |
1da177e4 | 1018 | |
0f2fbdcb | 1019 | KPROBE_ENTRY(page_fault) |
1da177e4 | 1020 | errorentry do_page_fault |
4b787e0b | 1021 | END(page_fault) |
0f2fbdcb | 1022 | .previous .text |
1da177e4 LT |
1023 | |
1024 | ENTRY(coprocessor_error) | |
1025 | zeroentry do_coprocessor_error | |
4b787e0b | 1026 | END(coprocessor_error) |
1da177e4 LT |
1027 | |
1028 | ENTRY(simd_coprocessor_error) | |
1029 | zeroentry do_simd_coprocessor_error | |
4b787e0b | 1030 | END(simd_coprocessor_error) |
1da177e4 LT |
1031 | |
1032 | ENTRY(device_not_available) | |
1033 | zeroentry math_state_restore | |
4b787e0b | 1034 | END(device_not_available) |
1da177e4 LT |
1035 | |
1036 | /* runs on exception stack */ | |
0f2fbdcb | 1037 | KPROBE_ENTRY(debug) |
7effaa88 | 1038 | INTR_FRAME |
1da177e4 LT |
1039 | pushq $0 |
1040 | CFI_ADJUST_CFA_OFFSET 8 | |
5f8efbb9 | 1041 | paranoidentry do_debug, DEBUG_STACK |
2601e64d | 1042 | paranoidexit |
4b787e0b | 1043 | END(debug) |
0f2fbdcb | 1044 | .previous .text |
1da177e4 LT |
1045 | |
1046 | /* runs on exception stack */ | |
eddb6fb9 | 1047 | KPROBE_ENTRY(nmi) |
7effaa88 | 1048 | INTR_FRAME |
1da177e4 | 1049 | pushq $-1 |
7effaa88 | 1050 | CFI_ADJUST_CFA_OFFSET 8 |
2601e64d IM |
1051 | paranoidentry do_nmi, 0, 0 |
1052 | #ifdef CONFIG_TRACE_IRQFLAGS | |
1053 | paranoidexit 0 | |
1054 | #else | |
1055 | jmp paranoid_exit1 | |
1056 | CFI_ENDPROC | |
1057 | #endif | |
4b787e0b | 1058 | END(nmi) |
eddb6fb9 | 1059 | .previous .text |
6fefb0d1 | 1060 | |
0f2fbdcb | 1061 | KPROBE_ENTRY(int3) |
b556b35e JB |
1062 | INTR_FRAME |
1063 | pushq $0 | |
1064 | CFI_ADJUST_CFA_OFFSET 8 | |
5f8efbb9 | 1065 | paranoidentry do_int3, DEBUG_STACK |
2601e64d | 1066 | jmp paranoid_exit1 |
b556b35e | 1067 | CFI_ENDPROC |
4b787e0b | 1068 | END(int3) |
0f2fbdcb | 1069 | .previous .text |
1da177e4 LT |
1070 | |
1071 | ENTRY(overflow) | |
1072 | zeroentry do_overflow | |
4b787e0b | 1073 | END(overflow) |
1da177e4 LT |
1074 | |
1075 | ENTRY(bounds) | |
1076 | zeroentry do_bounds | |
4b787e0b | 1077 | END(bounds) |
1da177e4 LT |
1078 | |
1079 | ENTRY(invalid_op) | |
1080 | zeroentry do_invalid_op | |
4b787e0b | 1081 | END(invalid_op) |
1da177e4 LT |
1082 | |
1083 | ENTRY(coprocessor_segment_overrun) | |
1084 | zeroentry do_coprocessor_segment_overrun | |
4b787e0b | 1085 | END(coprocessor_segment_overrun) |
1da177e4 LT |
1086 | |
1087 | ENTRY(reserved) | |
1088 | zeroentry do_reserved | |
4b787e0b | 1089 | END(reserved) |
1da177e4 LT |
1090 | |
1091 | /* runs on exception stack */ | |
1092 | ENTRY(double_fault) | |
7effaa88 | 1093 | XCPT_FRAME |
1da177e4 | 1094 | paranoidentry do_double_fault |
2601e64d | 1095 | jmp paranoid_exit1 |
1da177e4 | 1096 | CFI_ENDPROC |
4b787e0b | 1097 | END(double_fault) |
1da177e4 LT |
1098 | |
1099 | ENTRY(invalid_TSS) | |
1100 | errorentry do_invalid_TSS | |
4b787e0b | 1101 | END(invalid_TSS) |
1da177e4 LT |
1102 | |
1103 | ENTRY(segment_not_present) | |
1104 | errorentry do_segment_not_present | |
4b787e0b | 1105 | END(segment_not_present) |
1da177e4 LT |
1106 | |
1107 | /* runs on exception stack */ | |
1108 | ENTRY(stack_segment) | |
7effaa88 | 1109 | XCPT_FRAME |
1da177e4 | 1110 | paranoidentry do_stack_segment |
2601e64d | 1111 | jmp paranoid_exit1 |
1da177e4 | 1112 | CFI_ENDPROC |
4b787e0b | 1113 | END(stack_segment) |
1da177e4 | 1114 | |
0f2fbdcb | 1115 | KPROBE_ENTRY(general_protection) |
1da177e4 | 1116 | errorentry do_general_protection |
4b787e0b | 1117 | END(general_protection) |
0f2fbdcb | 1118 | .previous .text |
1da177e4 LT |
1119 | |
1120 | ENTRY(alignment_check) | |
1121 | errorentry do_alignment_check | |
4b787e0b | 1122 | END(alignment_check) |
1da177e4 LT |
1123 | |
1124 | ENTRY(divide_error) | |
1125 | zeroentry do_divide_error | |
4b787e0b | 1126 | END(divide_error) |
1da177e4 LT |
1127 | |
1128 | ENTRY(spurious_interrupt_bug) | |
1129 | zeroentry do_spurious_interrupt_bug | |
4b787e0b | 1130 | END(spurious_interrupt_bug) |
1da177e4 LT |
1131 | |
1132 | #ifdef CONFIG_X86_MCE | |
1133 | /* runs on exception stack */ | |
1134 | ENTRY(machine_check) | |
7effaa88 | 1135 | INTR_FRAME |
1da177e4 LT |
1136 | pushq $0 |
1137 | CFI_ADJUST_CFA_OFFSET 8 | |
1138 | paranoidentry do_machine_check | |
2601e64d | 1139 | jmp paranoid_exit1 |
1da177e4 | 1140 | CFI_ENDPROC |
4b787e0b | 1141 | END(machine_check) |
1da177e4 LT |
1142 | #endif |
1143 | ||
2699500b | 1144 | /* Call softirq on interrupt stack. Interrupts are off. */ |
ed6b676c | 1145 | ENTRY(call_softirq) |
7effaa88 | 1146 | CFI_STARTPROC |
2699500b AK |
1147 | push %rbp |
1148 | CFI_ADJUST_CFA_OFFSET 8 | |
1149 | CFI_REL_OFFSET rbp,0 | |
1150 | mov %rsp,%rbp | |
1151 | CFI_DEF_CFA_REGISTER rbp | |
ed6b676c | 1152 | incl %gs:pda_irqcount |
2699500b AK |
1153 | cmove %gs:pda_irqstackptr,%rsp |
1154 | push %rbp # backlink for old unwinder | |
ed6b676c | 1155 | call __do_softirq |
2699500b | 1156 | leaveq |
7effaa88 | 1157 | CFI_DEF_CFA_REGISTER rsp |
2699500b | 1158 | CFI_ADJUST_CFA_OFFSET -8 |
ed6b676c | 1159 | decl %gs:pda_irqcount |
ed6b676c | 1160 | ret |
7effaa88 | 1161 | CFI_ENDPROC |
4b787e0b | 1162 | ENDPROC(call_softirq) |
b538ed27 JB |
1163 | |
1164 | #ifdef CONFIG_STACK_UNWIND | |
1165 | ENTRY(arch_unwind_init_running) | |
1166 | CFI_STARTPROC | |
1167 | movq %r15, R15(%rdi) | |
1168 | movq %r14, R14(%rdi) | |
1169 | xchgq %rsi, %rdx | |
1170 | movq %r13, R13(%rdi) | |
1171 | movq %r12, R12(%rdi) | |
1172 | xorl %eax, %eax | |
1173 | movq %rbp, RBP(%rdi) | |
1174 | movq %rbx, RBX(%rdi) | |
1175 | movq (%rsp), %rcx | |
1176 | movq %rax, R11(%rdi) | |
1177 | movq %rax, R10(%rdi) | |
1178 | movq %rax, R9(%rdi) | |
1179 | movq %rax, R8(%rdi) | |
1180 | movq %rax, RAX(%rdi) | |
1181 | movq %rax, RCX(%rdi) | |
1182 | movq %rax, RDX(%rdi) | |
1183 | movq %rax, RSI(%rdi) | |
1184 | movq %rax, RDI(%rdi) | |
1185 | movq %rax, ORIG_RAX(%rdi) | |
1186 | movq %rcx, RIP(%rdi) | |
1187 | leaq 8(%rsp), %rcx | |
1188 | movq $__KERNEL_CS, CS(%rdi) | |
1189 | movq %rax, EFLAGS(%rdi) | |
1190 | movq %rcx, RSP(%rdi) | |
1191 | movq $__KERNEL_DS, SS(%rdi) | |
1192 | jmpq *%rdx | |
1193 | CFI_ENDPROC | |
1194 | ENDPROC(arch_unwind_init_running) | |
1195 | #endif |