]>
Commit | Line | Data |
---|---|---|
29eb61bc AG |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright SUSE Linux Products GmbH 2009 | |
16 | * | |
17 | * Authors: Alexander Graf <agraf@suse.de> | |
18 | */ | |
19 | ||
20 | #include <asm/ppc_asm.h> | |
21 | #include <asm/kvm_asm.h> | |
22 | #include <asm/reg.h> | |
23 | #include <asm/page.h> | |
24 | #include <asm/asm-offsets.h> | |
25 | #include <asm/exception-64s.h> | |
26 | ||
b79fcdf6 | 27 | #if defined(CONFIG_PPC_BOOK3S_64) |
29eb61bc | 28 | |
b79fcdf6 AG |
29 | #define ULONG_SIZE 8 |
30 | #define FUNC(name) GLUE(.,name) | |
29eb61bc | 31 | |
b79fcdf6 AG |
32 | #define GET_SHADOW_VCPU(reg) \ |
33 | addi reg, r13, PACA_KVM_SVCPU | |
34 | ||
35 | #define DISABLE_INTERRUPTS \ | |
36 | mfmsr r0; \ | |
37 | rldicl r0,r0,48,1; \ | |
38 | rotldi r0,r0,16; \ | |
39 | mtmsrd r0,1; \ | |
40 | ||
41 | #elif defined(CONFIG_PPC_BOOK3S_32) | |
42 | ||
43 | #define ULONG_SIZE 4 | |
44 | #define FUNC(name) name | |
45 | ||
46 | #define GET_SHADOW_VCPU(reg) \ | |
47 | lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2) | |
48 | ||
49 | #define DISABLE_INTERRUPTS \ | |
50 | mfmsr r0; \ | |
51 | rlwinm r0,r0,0,17,15; \ | |
52 | mtmsr r0; \ | |
53 | ||
54 | #endif /* CONFIG_PPC_BOOK3S_XX */ | |
55 | ||
56 | ||
57 | #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) | |
97c4cfbe | 58 | #define VCPU_LOAD_NVGPRS(vcpu) \ |
b79fcdf6 AG |
59 | PPC_LL r14, VCPU_GPR(r14)(vcpu); \ |
60 | PPC_LL r15, VCPU_GPR(r15)(vcpu); \ | |
61 | PPC_LL r16, VCPU_GPR(r16)(vcpu); \ | |
62 | PPC_LL r17, VCPU_GPR(r17)(vcpu); \ | |
63 | PPC_LL r18, VCPU_GPR(r18)(vcpu); \ | |
64 | PPC_LL r19, VCPU_GPR(r19)(vcpu); \ | |
65 | PPC_LL r20, VCPU_GPR(r20)(vcpu); \ | |
66 | PPC_LL r21, VCPU_GPR(r21)(vcpu); \ | |
67 | PPC_LL r22, VCPU_GPR(r22)(vcpu); \ | |
68 | PPC_LL r23, VCPU_GPR(r23)(vcpu); \ | |
69 | PPC_LL r24, VCPU_GPR(r24)(vcpu); \ | |
70 | PPC_LL r25, VCPU_GPR(r25)(vcpu); \ | |
71 | PPC_LL r26, VCPU_GPR(r26)(vcpu); \ | |
72 | PPC_LL r27, VCPU_GPR(r27)(vcpu); \ | |
73 | PPC_LL r28, VCPU_GPR(r28)(vcpu); \ | |
74 | PPC_LL r29, VCPU_GPR(r29)(vcpu); \ | |
75 | PPC_LL r30, VCPU_GPR(r30)(vcpu); \ | |
76 | PPC_LL r31, VCPU_GPR(r31)(vcpu); \ | |
97c4cfbe | 77 | |
29eb61bc AG |
78 | /***************************************************************************** |
79 | * * | |
80 | * Guest entry / exit code that is in kernel module memory (highmem) * | |
81 | * * | |
82 | ****************************************************************************/ | |
83 | ||
84 | /* Registers: | |
85 | * r3: kvm_run pointer | |
86 | * r4: vcpu pointer | |
87 | */ | |
df6909e5 | 88 | _GLOBAL(__kvmppc_vcpu_run) |
29eb61bc AG |
89 | |
90 | kvm_start_entry: | |
91 | /* Write correct stack frame */ | |
b79fcdf6 AG |
92 | mflr r0 |
93 | PPC_STL r0,PPC_LR_STKOFF(r1) | |
29eb61bc AG |
94 | |
95 | /* Save host state to the stack */ | |
b79fcdf6 | 96 | PPC_STLU r1, -SWITCH_FRAME_SIZE(r1) |
29eb61bc AG |
97 | |
98 | /* Save r3 (kvm_run) and r4 (vcpu) */ | |
99 | SAVE_2GPRS(3, r1) | |
100 | ||
101 | /* Save non-volatile registers (r14 - r31) */ | |
102 | SAVE_NVGPRS(r1) | |
103 | ||
104 | /* Save LR */ | |
b79fcdf6 | 105 | PPC_STL r0, _LINK(r1) |
97c4cfbe AG |
106 | |
107 | /* Load non-volatile guest state from the vcpu */ | |
108 | VCPU_LOAD_NVGPRS(r4) | |
29eb61bc | 109 | |
b79fcdf6 AG |
110 | GET_SHADOW_VCPU(r5) |
111 | ||
29eb61bc | 112 | /* Save R1/R2 in the PACA */ |
b79fcdf6 AG |
113 | PPC_STL r1, SVCPU_HOST_R1(r5) |
114 | PPC_STL r2, SVCPU_HOST_R2(r5) | |
7e57cba0 AG |
115 | |
116 | /* XXX swap in/out on load? */ | |
b79fcdf6 AG |
117 | PPC_LL r3, VCPU_HIGHMEM_HANDLER(r4) |
118 | PPC_STL r3, SVCPU_VMHANDLER(r5) | |
29eb61bc | 119 | |
7e57cba0 | 120 | kvm_start_lightweight: |
29eb61bc | 121 | |
b79fcdf6 | 122 | PPC_LL r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ |
29eb61bc | 123 | |
7e57cba0 | 124 | DISABLE_INTERRUPTS |
29eb61bc | 125 | |
b79fcdf6 | 126 | #ifdef CONFIG_PPC_BOOK3S_64 |
29eb61bc AG |
127 | /* Some guests may need to have dcbz set to 32 byte length. |
128 | * | |
129 | * Usually we ensure that by patching the guest's instructions | |
130 | * to trap on dcbz and emulate it in the hypervisor. | |
131 | * | |
132 | * If we can, we should tell the CPU to use 32 byte dcbz though, | |
133 | * because that's a lot faster. | |
134 | */ | |
135 | ||
b79fcdf6 | 136 | PPC_LL r3, VCPU_HFLAGS(r4) |
29eb61bc AG |
137 | rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */ |
138 | beq no_dcbz32_on | |
139 | ||
140 | mfspr r3,SPRN_HID5 | |
141 | ori r3, r3, 0x80 /* XXX HID5_dcbz32 = 0x80 */ | |
142 | mtspr SPRN_HID5,r3 | |
143 | ||
144 | no_dcbz32_on: | |
29eb61bc | 145 | |
b79fcdf6 AG |
146 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
147 | ||
148 | PPC_LL r6, VCPU_RMCALL(r4) | |
021ec9c6 | 149 | mtctr r6 |
7e57cba0 | 150 | |
b79fcdf6 | 151 | PPC_LL r3, VCPU_TRAMPOLINE_ENTER(r4) |
021ec9c6 | 152 | LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR)) |
7e57cba0 | 153 | |
b79fcdf6 | 154 | /* Jump to segment patching handler and into our guest */ |
021ec9c6 | 155 | bctr |
29eb61bc AG |
156 | |
157 | /* | |
158 | * This is the handler in module memory. It gets jumped at from the | |
159 | * lowmem trampoline code, so it's basically the guest exit code. | |
160 | * | |
161 | */ | |
162 | ||
163 | .global kvmppc_handler_highmem | |
164 | kvmppc_handler_highmem: | |
165 | ||
166 | /* | |
167 | * Register usage at this point: | |
168 | * | |
b79fcdf6 AG |
169 | * R1 = host R1 |
170 | * R2 = host R2 | |
171 | * R12 = exit handler id | |
172 | * R13 = PACA | |
173 | * SVCPU.* = guest * | |
29eb61bc AG |
174 | * |
175 | */ | |
176 | ||
7e57cba0 | 177 | /* R7 = vcpu */ |
b79fcdf6 | 178 | PPC_LL r7, GPR4(r1) |
29eb61bc | 179 | |
b79fcdf6 | 180 | #ifdef CONFIG_PPC_BOOK3S_64 |
29eb61bc | 181 | |
b79fcdf6 | 182 | PPC_LL r5, VCPU_HFLAGS(r7) |
29eb61bc AG |
183 | rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */ |
184 | beq no_dcbz32_off | |
185 | ||
d35feb26 | 186 | li r4, 0 |
29eb61bc | 187 | mfspr r5,SPRN_HID5 |
d35feb26 | 188 | rldimi r5,r4,6,56 |
29eb61bc AG |
189 | mtspr SPRN_HID5,r5 |
190 | ||
191 | no_dcbz32_off: | |
192 | ||
b79fcdf6 AG |
193 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
194 | ||
195 | PPC_STL r14, VCPU_GPR(r14)(r7) | |
196 | PPC_STL r15, VCPU_GPR(r15)(r7) | |
197 | PPC_STL r16, VCPU_GPR(r16)(r7) | |
198 | PPC_STL r17, VCPU_GPR(r17)(r7) | |
199 | PPC_STL r18, VCPU_GPR(r18)(r7) | |
200 | PPC_STL r19, VCPU_GPR(r19)(r7) | |
201 | PPC_STL r20, VCPU_GPR(r20)(r7) | |
202 | PPC_STL r21, VCPU_GPR(r21)(r7) | |
203 | PPC_STL r22, VCPU_GPR(r22)(r7) | |
204 | PPC_STL r23, VCPU_GPR(r23)(r7) | |
205 | PPC_STL r24, VCPU_GPR(r24)(r7) | |
206 | PPC_STL r25, VCPU_GPR(r25)(r7) | |
207 | PPC_STL r26, VCPU_GPR(r26)(r7) | |
208 | PPC_STL r27, VCPU_GPR(r27)(r7) | |
209 | PPC_STL r28, VCPU_GPR(r28)(r7) | |
210 | PPC_STL r29, VCPU_GPR(r29)(r7) | |
211 | PPC_STL r30, VCPU_GPR(r30)(r7) | |
212 | PPC_STL r31, VCPU_GPR(r31)(r7) | |
29eb61bc | 213 | |
29eb61bc | 214 | /* Restore host msr -> SRR1 */ |
b79fcdf6 | 215 | PPC_LL r6, VCPU_HOST_MSR(r7) |
29eb61bc AG |
216 | |
217 | /* | |
218 | * For some interrupts, we need to call the real Linux | |
219 | * handler, so it can do work for us. This has to happen | |
220 | * as if the interrupt arrived from the kernel though, | |
221 | * so let's fake it here where most state is restored. | |
222 | * | |
223 | * Call Linux for hardware interrupts/decrementer | |
224 | * r3 = address of interrupt handler (exit reason) | |
225 | */ | |
226 | ||
7e57cba0 | 227 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL |
29eb61bc | 228 | beq call_linux_handler |
7e57cba0 | 229 | cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER |
29eb61bc | 230 | beq call_linux_handler |
7fdaec99 AG |
231 | cmpwi r12, BOOK3S_INTERRUPT_PERFMON |
232 | beq call_linux_handler | |
29eb61bc | 233 | |
bc90923e AG |
234 | /* Back to EE=1 */ |
235 | mtmsr r6 | |
b79fcdf6 | 236 | sync |
bc90923e | 237 | b kvm_return_point |
29eb61bc AG |
238 | |
239 | call_linux_handler: | |
240 | ||
241 | /* | |
242 | * If we land here we need to jump back to the handler we | |
243 | * came from. | |
244 | * | |
245 | * We have a page that we can access from real mode, so let's | |
246 | * jump back to that and use it as a trampoline to get back into the | |
247 | * interrupt handler! | |
248 | * | |
249 | * R3 still contains the exit code, | |
bc90923e AG |
250 | * R5 VCPU_HOST_RETIP and |
251 | * R6 VCPU_HOST_MSR | |
29eb61bc AG |
252 | */ |
253 | ||
bc90923e | 254 | /* Restore host IP -> SRR0 */ |
b79fcdf6 | 255 | PPC_LL r5, VCPU_HOST_RETIP(r7) |
bc90923e AG |
256 | |
257 | /* XXX Better move to a safe function? | |
258 | * What if we get an HTAB flush in between mtsrr0 and mtsrr1? */ | |
259 | ||
7e57cba0 | 260 | mtlr r12 |
29eb61bc | 261 | |
b79fcdf6 | 262 | PPC_LL r4, VCPU_TRAMPOLINE_LOWMEM(r7) |
7e57cba0 AG |
263 | mtsrr0 r4 |
264 | LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) | |
265 | mtsrr1 r3 | |
29eb61bc AG |
266 | |
267 | RFI | |
268 | ||
269 | .global kvm_return_point | |
270 | kvm_return_point: | |
271 | ||
272 | /* Jump back to lightweight entry if we're supposed to */ | |
273 | /* go back into the guest */ | |
97c4cfbe AG |
274 | |
275 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ | |
7e57cba0 | 276 | mr r5, r12 |
97c4cfbe | 277 | |
29eb61bc AG |
278 | /* Restore r3 (kvm_run) and r4 (vcpu) */ |
279 | REST_2GPRS(3, r1) | |
b79fcdf6 | 280 | bl FUNC(kvmppc_handle_exit) |
29eb61bc | 281 | |
97c4cfbe | 282 | /* If RESUME_GUEST, get back in the loop */ |
29eb61bc | 283 | cmpwi r3, RESUME_GUEST |
97c4cfbe | 284 | beq kvm_loop_lightweight |
29eb61bc | 285 | |
97c4cfbe AG |
286 | cmpwi r3, RESUME_GUEST_NV |
287 | beq kvm_loop_heavyweight | |
29eb61bc | 288 | |
97c4cfbe | 289 | kvm_exit_loop: |
29eb61bc | 290 | |
b79fcdf6 | 291 | PPC_LL r4, _LINK(r1) |
29eb61bc AG |
292 | mtlr r4 |
293 | ||
97c4cfbe AG |
294 | /* Restore non-volatile host registers (r14 - r31) */ |
295 | REST_NVGPRS(r1) | |
296 | ||
297 | addi r1, r1, SWITCH_FRAME_SIZE | |
298 | blr | |
299 | ||
300 | kvm_loop_heavyweight: | |
29eb61bc | 301 | |
b79fcdf6 AG |
302 | PPC_LL r4, _LINK(r1) |
303 | PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1) | |
97c4cfbe AG |
304 | |
305 | /* Load vcpu and cpu_run */ | |
29eb61bc AG |
306 | REST_2GPRS(3, r1) |
307 | ||
97c4cfbe AG |
308 | /* Load non-volatile guest state from the vcpu */ |
309 | VCPU_LOAD_NVGPRS(r4) | |
29eb61bc | 310 | |
97c4cfbe AG |
311 | /* Jump back into the beginning of this function */ |
312 | b kvm_start_lightweight | |
29eb61bc | 313 | |
97c4cfbe | 314 | kvm_loop_lightweight: |
29eb61bc | 315 | |
97c4cfbe AG |
316 | /* We'll need the vcpu pointer */ |
317 | REST_GPR(4, r1) | |
318 | ||
319 | /* Jump back into the beginning of this function */ | |
320 | b kvm_start_lightweight |