]>
Commit | Line | Data |
---|---|---|
cdacc127 JF |
1 | /* |
2 | Asm versions of Xen pv-ops, suitable for either direct use or inlining. | |
3 | The inline versions are the same as the direct-use versions, with the | |
4 | pre- and post-amble chopped off. | |
5 | ||
6 | This code is encoded for size rather than absolute efficiency, | |
7 | with a view to being able to inline as much as possible. | |
8 | ||
9 | We only bother with direct forms (ie, vcpu in pda) of the operations | |
10 | here; the indirect forms are better handled in C, since they're | |
11 | generally too large to inline anyway. | |
12 | */ | |
13 | ||
14 | #include <linux/linkage.h> | |
15 | ||
16 | #include <asm/asm-offsets.h> | |
17 | #include <asm/processor-flags.h> | |
6fcac6d3 JF |
18 | #include <asm/errno.h> |
19 | #include <asm/segment.h> | |
cdacc127 JF |
20 | |
21 | #include <xen/interface/xen.h> | |
22 | ||
23 | #define RELOC(x, v) .globl x##_reloc; x##_reloc=v | |
24 | #define ENDPATCH(x) .globl x##_end; x##_end=. | |
25 | ||
26 | /* Pseudo-flag used for virtual NMI, which we don't implement yet */ | |
27 | #define XEN_EFLAGS_NMI 0x80000000 | |
28 | ||
db053b86 JF |
29 | #if 1 |
30 | /* | |
31 | x86-64 does not yet support direct access to percpu variables | |
32 | via a segment override, so we just need to make sure this code | |
33 | never gets used | |
34 | */ | |
35 | #define BUG ud2a | |
36 | #define PER_CPU_VAR(var, off) 0xdeadbeef | |
37 | #endif | |
cdacc127 JF |
38 | |
39 | /* | |
40 | Enable events. This clears the event mask and tests the pending | |
41 | event status with one and operation. If there are pending | |
42 | events, then enter the hypervisor to get them handled. | |
43 | */ | |
44 | ENTRY(xen_irq_enable_direct) | |
db053b86 JF |
45 | BUG |
46 | ||
cdacc127 JF |
47 | /* Unmask events */ |
48 | movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) | |
49 | ||
50 | /* Preempt here doesn't matter because that will deal with | |
51 | any pending interrupts. The pending check may end up being | |
52 | run on the wrong CPU, but that doesn't hurt. */ | |
53 | ||
54 | /* Test for pending */ | |
55 | testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending) | |
56 | jz 1f | |
57 | ||
58 | 2: call check_events | |
59 | 1: | |
60 | ENDPATCH(xen_irq_enable_direct) | |
61 | ret | |
62 | ENDPROC(xen_irq_enable_direct) | |
63 | RELOC(xen_irq_enable_direct, 2b+1) | |
64 | ||
65 | /* | |
66 | Disabling events is simply a matter of making the event mask | |
67 | non-zero. | |
68 | */ | |
69 | ENTRY(xen_irq_disable_direct) | |
db053b86 JF |
70 | BUG |
71 | ||
cdacc127 JF |
72 | movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) |
73 | ENDPATCH(xen_irq_disable_direct) | |
74 | ret | |
75 | ENDPROC(xen_irq_disable_direct) | |
76 | RELOC(xen_irq_disable_direct, 0) | |
77 | ||
78 | /* | |
79 | (xen_)save_fl is used to get the current interrupt enable status. | |
80 | Callers expect the status to be in X86_EFLAGS_IF, and other bits | |
81 | may be set in the return value. We take advantage of this by | |
82 | making sure that X86_EFLAGS_IF has the right value (and other bits | |
83 | in that byte are 0), but other bits in the return value are | |
84 | undefined. We need to toggle the state of the bit, because | |
85 | Xen and x86 use opposite senses (mask vs enable). | |
86 | */ | |
87 | ENTRY(xen_save_fl_direct) | |
db053b86 JF |
88 | BUG |
89 | ||
cdacc127 JF |
90 | testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) |
91 | setz %ah | |
92 | addb %ah,%ah | |
93 | ENDPATCH(xen_save_fl_direct) | |
94 | ret | |
95 | ENDPROC(xen_save_fl_direct) | |
96 | RELOC(xen_save_fl_direct, 0) | |
97 | ||
98 | /* | |
99 | In principle the caller should be passing us a value return | |
100 | from xen_save_fl_direct, but for robustness sake we test only | |
101 | the X86_EFLAGS_IF flag rather than the whole byte. After | |
102 | setting the interrupt mask state, it checks for unmasked | |
103 | pending events and enters the hypervisor to get them delivered | |
104 | if so. | |
105 | */ | |
106 | ENTRY(xen_restore_fl_direct) | |
db053b86 JF |
107 | BUG |
108 | ||
cdacc127 JF |
109 | testb $X86_EFLAGS_IF>>8, %ah |
110 | setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) | |
111 | /* Preempt here doesn't matter because that will deal with | |
112 | any pending interrupts. The pending check may end up being | |
113 | run on the wrong CPU, but that doesn't hurt. */ | |
114 | ||
115 | /* check for unmasked and pending */ | |
116 | cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending) | |
117 | jz 1f | |
118 | 2: call check_events | |
119 | 1: | |
120 | ENDPATCH(xen_restore_fl_direct) | |
121 | ret | |
122 | ENDPROC(xen_restore_fl_direct) | |
123 | RELOC(xen_restore_fl_direct, 2b+1) | |
124 | ||
125 | ||
126 | /* | |
127 | Force an event check by making a hypercall, | |
128 | but preserve regs before making the call. | |
129 | */ | |
130 | check_events: | |
131 | push %rax | |
132 | push %rcx | |
133 | push %rdx | |
134 | push %rsi | |
135 | push %rdi | |
136 | push %r8 | |
137 | push %r9 | |
138 | push %r10 | |
139 | push %r11 | |
0d1edf46 | 140 | call xen_force_evtchn_callback |
cdacc127 JF |
141 | pop %r11 |
142 | pop %r10 | |
143 | pop %r9 | |
144 | pop %r8 | |
145 | pop %rdi | |
146 | pop %rsi | |
147 | pop %rdx | |
148 | pop %rcx | |
149 | pop %rax | |
150 | ret | |
cdacc127 | 151 | |
997409d3 JF |
152 | ENTRY(xen_adjust_exception_frame) |
153 | mov 8+0(%rsp),%rcx | |
154 | mov 8+8(%rsp),%r11 | |
155 | ret $16 | |
156 | ||
6fcac6d3 JF |
157 | hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 |
158 | /* | |
159 | Xen64 iret frame: | |
160 | ||
161 | ss | |
162 | rsp | |
163 | rflags | |
164 | cs | |
165 | rip <-- standard iret frame | |
166 | ||
167 | flags | |
168 | ||
169 | rcx } | |
170 | r11 }<-- pushed by hypercall page | |
171 | rsp -> rax } | |
172 | */ | |
cdacc127 JF |
173 | ENTRY(xen_iret) |
174 | pushq $0 | |
6fcac6d3 JF |
175 | 1: jmp hypercall_iret |
176 | ENDPATCH(xen_iret) | |
177 | RELOC(xen_iret, 1b+1) | |
cdacc127 | 178 | |
6fcac6d3 JF |
179 | /* |
180 | sysexit is not used for 64-bit processes, so it's | |
181 | only ever used to return to 32-bit compat userspace. | |
182 | */ | |
cdacc127 | 183 | ENTRY(xen_sysexit) |
6fcac6d3 JF |
184 | pushq $__USER32_DS |
185 | pushq %rcx | |
186 | pushq $X86_EFLAGS_IF | |
187 | pushq $__USER32_CS | |
188 | pushq %rdx | |
189 | ||
2dc1697e | 190 | pushq $0 |
6fcac6d3 JF |
191 | 1: jmp hypercall_iret |
192 | ENDPATCH(xen_sysexit) | |
193 | RELOC(xen_sysexit, 1b+1) | |
194 | ||
195 | ENTRY(xen_sysret64) | |
196 | /* We're already on the usermode stack at this point, but still | |
197 | with the kernel gs, so we can easily switch back */ | |
198 | movq %rsp, %gs:pda_oldrsp | |
199 | movq %gs:pda_kernelstack,%rsp | |
200 | ||
201 | pushq $__USER_DS | |
202 | pushq %gs:pda_oldrsp | |
203 | pushq %r11 | |
204 | pushq $__USER_CS | |
205 | pushq %rcx | |
206 | ||
207 | pushq $VGCF_in_syscall | |
208 | 1: jmp hypercall_iret | |
209 | ENDPATCH(xen_sysret64) | |
210 | RELOC(xen_sysret64, 1b+1) | |
211 | ||
212 | ENTRY(xen_sysret32) | |
213 | /* We're already on the usermode stack at this point, but still | |
214 | with the kernel gs, so we can easily switch back */ | |
215 | movq %rsp, %gs:pda_oldrsp | |
216 | movq %gs:pda_kernelstack, %rsp | |
217 | ||
218 | pushq $__USER32_DS | |
219 | pushq %gs:pda_oldrsp | |
220 | pushq %r11 | |
221 | pushq $__USER32_CS | |
222 | pushq %rcx | |
223 | ||
224 | pushq $VGCF_in_syscall | |
225 | 1: jmp hypercall_iret | |
226 | ENDPATCH(xen_sysret32) | |
227 | RELOC(xen_sysret32, 1b+1) | |
228 | ||
229 | /* | |
230 | Xen handles syscall callbacks much like ordinary exceptions, | |
231 | which means we have: | |
232 | - kernel gs | |
233 | - kernel rsp | |
234 | - an iret-like stack frame on the stack (including rcx and r11): | |
235 | ss | |
236 | rsp | |
237 | rflags | |
238 | cs | |
239 | rip | |
240 | r11 | |
241 | rsp-> rcx | |
242 | ||
243 | In all the entrypoints, we undo all that to make it look | |
244 | like a CPU-generated syscall/sysenter and jump to the normal | |
245 | entrypoint. | |
246 | */ | |
247 | ||
248 | .macro undo_xen_syscall | |
249 | mov 0*8(%rsp),%rcx | |
250 | mov 1*8(%rsp),%r11 | |
251 | mov 5*8(%rsp),%rsp | |
252 | .endm | |
253 | ||
254 | /* Normal 64-bit system call target */ | |
255 | ENTRY(xen_syscall_target) | |
256 | undo_xen_syscall | |
257 | jmp system_call_after_swapgs | |
258 | ENDPROC(xen_syscall_target) | |
259 | ||
260 | #ifdef CONFIG_IA32_EMULATION | |
261 | ||
262 | /* 32-bit compat syscall target */ | |
263 | ENTRY(xen_syscall32_target) | |
264 | undo_xen_syscall | |
265 | jmp ia32_cstar_target | |
266 | ENDPROC(xen_syscall32_target) | |
267 | ||
268 | /* 32-bit compat sysenter target */ | |
269 | ENTRY(xen_sysenter_target) | |
270 | undo_xen_syscall | |
271 | jmp ia32_sysenter_target | |
272 | ENDPROC(xen_sysenter_target) | |
273 | ||
274 | #else /* !CONFIG_IA32_EMULATION */ | |
275 | ||
276 | ENTRY(xen_syscall32_target) | |
277 | ENTRY(xen_sysenter_target) | |
278 | lea 16(%rsp), %rsp /* strip %rcx,%r11 */ | |
279 | mov $-ENOSYS, %rax | |
280 | pushq $VGCF_in_syscall | |
281 | jmp hypercall_iret | |
282 | ENDPROC(xen_syscall32_target) | |
283 | ENDPROC(xen_sysenter_target) | |
284 | ||
285 | #endif /* CONFIG_IA32_EMULATION */ |