]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/xen/xen-asm_64.S
x86-64: Move kernelstack from PDA to per-cpu.
[mirror_ubuntu-artful-kernel.git] / arch / x86 / xen / xen-asm_64.S
CommitLineData
cdacc127
JF
1/*
2 Asm versions of Xen pv-ops, suitable for either direct use or inlining.
3 The inline versions are the same as the direct-use versions, with the
4 pre- and post-amble chopped off.
5
6 This code is encoded for size rather than absolute efficiency,
7 with a view to being able to inline as much as possible.
8
9 We only bother with direct forms (ie, vcpu in pda) of the operations
10 here; the indirect forms are better handled in C, since they're
11 generally too large to inline anyway.
12 */
13
14#include <linux/linkage.h>
15
16#include <asm/asm-offsets.h>
17#include <asm/processor-flags.h>
6fcac6d3
JF
18#include <asm/errno.h>
19#include <asm/segment.h>
9af45651 20#include <asm/percpu.h>
cdacc127
JF
21
22#include <xen/interface/xen.h>
23
24#define RELOC(x, v) .globl x##_reloc; x##_reloc=v
25#define ENDPATCH(x) .globl x##_end; x##_end=.
26
27/* Pseudo-flag used for virtual NMI, which we don't implement yet */
28#define XEN_EFLAGS_NMI 0x80000000
29
db053b86
JF
30#if 1
31/*
9af45651
BG
32 FIXME: x86_64 now can support direct access to percpu variables
33 via a segment override. Update xen accordingly.
db053b86
JF
34 */
35#define BUG ud2a
db053b86 36#endif
cdacc127
JF
37
38/*
39 Enable events. This clears the event mask and tests the pending
40 event status with one and operation. If there are pending
41 events, then enter the hypervisor to get them handled.
42 */
43ENTRY(xen_irq_enable_direct)
db053b86
JF
44 BUG
45
cdacc127 46 /* Unmask events */
9af45651 47 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
cdacc127
JF
48
49 /* Preempt here doesn't matter because that will deal with
50 any pending interrupts. The pending check may end up being
51 run on the wrong CPU, but that doesn't hurt. */
52
53 /* Test for pending */
9af45651 54 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
cdacc127
JF
55 jz 1f
56
572: call check_events
581:
59ENDPATCH(xen_irq_enable_direct)
60 ret
61 ENDPROC(xen_irq_enable_direct)
62 RELOC(xen_irq_enable_direct, 2b+1)
63
64/*
65 Disabling events is simply a matter of making the event mask
66 non-zero.
67 */
68ENTRY(xen_irq_disable_direct)
db053b86
JF
69 BUG
70
9af45651 71 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
cdacc127
JF
72ENDPATCH(xen_irq_disable_direct)
73 ret
74 ENDPROC(xen_irq_disable_direct)
75 RELOC(xen_irq_disable_direct, 0)
76
77/*
78 (xen_)save_fl is used to get the current interrupt enable status.
79 Callers expect the status to be in X86_EFLAGS_IF, and other bits
80 may be set in the return value. We take advantage of this by
81 making sure that X86_EFLAGS_IF has the right value (and other bits
82 in that byte are 0), but other bits in the return value are
83 undefined. We need to toggle the state of the bit, because
84 Xen and x86 use opposite senses (mask vs enable).
85 */
86ENTRY(xen_save_fl_direct)
db053b86
JF
87 BUG
88
9af45651 89 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
cdacc127
JF
90 setz %ah
91 addb %ah,%ah
92ENDPATCH(xen_save_fl_direct)
93 ret
94 ENDPROC(xen_save_fl_direct)
95 RELOC(xen_save_fl_direct, 0)
96
97/*
98 In principle the caller should be passing us a value return
99 from xen_save_fl_direct, but for robustness sake we test only
100 the X86_EFLAGS_IF flag rather than the whole byte. After
101 setting the interrupt mask state, it checks for unmasked
102 pending events and enters the hypervisor to get them delivered
103 if so.
104 */
105ENTRY(xen_restore_fl_direct)
db053b86
JF
106 BUG
107
cdacc127 108 testb $X86_EFLAGS_IF>>8, %ah
9af45651 109 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
cdacc127
JF
110 /* Preempt here doesn't matter because that will deal with
111 any pending interrupts. The pending check may end up being
112 run on the wrong CPU, but that doesn't hurt. */
113
114 /* check for unmasked and pending */
9af45651 115 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
cdacc127
JF
116 jz 1f
1172: call check_events
1181:
119ENDPATCH(xen_restore_fl_direct)
120 ret
121 ENDPROC(xen_restore_fl_direct)
122 RELOC(xen_restore_fl_direct, 2b+1)
123
124
125/*
126 Force an event check by making a hypercall,
127 but preserve regs before making the call.
128 */
129check_events:
130 push %rax
131 push %rcx
132 push %rdx
133 push %rsi
134 push %rdi
135 push %r8
136 push %r9
137 push %r10
138 push %r11
0d1edf46 139 call xen_force_evtchn_callback
cdacc127
JF
140 pop %r11
141 pop %r10
142 pop %r9
143 pop %r8
144 pop %rdi
145 pop %rsi
146 pop %rdx
147 pop %rcx
148 pop %rax
149 ret
cdacc127 150
997409d3
JF
151ENTRY(xen_adjust_exception_frame)
152 mov 8+0(%rsp),%rcx
153 mov 8+8(%rsp),%r11
154 ret $16
155
6fcac6d3
JF
156hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
157/*
158 Xen64 iret frame:
159
160 ss
161 rsp
162 rflags
163 cs
164 rip <-- standard iret frame
165
166 flags
167
168 rcx }
169 r11 }<-- pushed by hypercall page
170rsp -> rax }
171 */
cdacc127
JF
172ENTRY(xen_iret)
173 pushq $0
6fcac6d3
JF
1741: jmp hypercall_iret
175ENDPATCH(xen_iret)
176RELOC(xen_iret, 1b+1)
cdacc127 177
6fcac6d3
JF
178/*
179 sysexit is not used for 64-bit processes, so it's
180 only ever used to return to 32-bit compat userspace.
181 */
cdacc127 182ENTRY(xen_sysexit)
6fcac6d3
JF
183 pushq $__USER32_DS
184 pushq %rcx
185 pushq $X86_EFLAGS_IF
186 pushq $__USER32_CS
187 pushq %rdx
188
2dc1697e 189 pushq $0
6fcac6d3
JF
1901: jmp hypercall_iret
191ENDPATCH(xen_sysexit)
192RELOC(xen_sysexit, 1b+1)
193
194ENTRY(xen_sysret64)
195 /* We're already on the usermode stack at this point, but still
196 with the kernel gs, so we can easily switch back */
197 movq %rsp, %gs:pda_oldrsp
9af45651 198 movq PER_CPU_VAR(kernel_stack),%rsp
6fcac6d3
JF
199
200 pushq $__USER_DS
201 pushq %gs:pda_oldrsp
202 pushq %r11
203 pushq $__USER_CS
204 pushq %rcx
205
206 pushq $VGCF_in_syscall
2071: jmp hypercall_iret
208ENDPATCH(xen_sysret64)
209RELOC(xen_sysret64, 1b+1)
210
211ENTRY(xen_sysret32)
212 /* We're already on the usermode stack at this point, but still
213 with the kernel gs, so we can easily switch back */
214 movq %rsp, %gs:pda_oldrsp
9af45651 215 movq PER_CPU_VAR(kernel_stack), %rsp
6fcac6d3
JF
216
217 pushq $__USER32_DS
218 pushq %gs:pda_oldrsp
219 pushq %r11
220 pushq $__USER32_CS
221 pushq %rcx
222
223 pushq $VGCF_in_syscall
2241: jmp hypercall_iret
225ENDPATCH(xen_sysret32)
226RELOC(xen_sysret32, 1b+1)
227
228/*
229 Xen handles syscall callbacks much like ordinary exceptions,
230 which means we have:
231 - kernel gs
232 - kernel rsp
233 - an iret-like stack frame on the stack (including rcx and r11):
234 ss
235 rsp
236 rflags
237 cs
238 rip
239 r11
240 rsp-> rcx
241
242 In all the entrypoints, we undo all that to make it look
243 like a CPU-generated syscall/sysenter and jump to the normal
244 entrypoint.
245 */
246
247.macro undo_xen_syscall
248 mov 0*8(%rsp),%rcx
249 mov 1*8(%rsp),%r11
250 mov 5*8(%rsp),%rsp
251.endm
252
253/* Normal 64-bit system call target */
254ENTRY(xen_syscall_target)
255 undo_xen_syscall
256 jmp system_call_after_swapgs
257ENDPROC(xen_syscall_target)
258
259#ifdef CONFIG_IA32_EMULATION
260
261/* 32-bit compat syscall target */
262ENTRY(xen_syscall32_target)
263 undo_xen_syscall
264 jmp ia32_cstar_target
265ENDPROC(xen_syscall32_target)
266
267/* 32-bit compat sysenter target */
268ENTRY(xen_sysenter_target)
269 undo_xen_syscall
270 jmp ia32_sysenter_target
271ENDPROC(xen_sysenter_target)
272
273#else /* !CONFIG_IA32_EMULATION */
274
275ENTRY(xen_syscall32_target)
276ENTRY(xen_sysenter_target)
277 lea 16(%rsp), %rsp /* strip %rcx,%r11 */
278 mov $-ENOSYS, %rax
279 pushq $VGCF_in_syscall
280 jmp hypercall_iret
281ENDPROC(xen_syscall32_target)
282ENDPROC(xen_sysenter_target)
283
284#endif /* CONFIG_IA32_EMULATION */