]>
Commit | Line | Data |
---|---|---|
f5c236dd | 1 | /* |
d116e812 DCZ |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel | |
7 | * | |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | |
10 | */ | |
f5c236dd SL |
11 | |
12 | #include <linux/errno.h> | |
13 | #include <linux/err.h> | |
f5c236dd | 14 | #include <linux/kvm_host.h> |
dacc3ed1 | 15 | #include <linux/uaccess.h> |
1581ff3d JH |
16 | #include <linux/vmalloc.h> |
17 | #include <asm/mmu_context.h> | |
f7f1427d | 18 | #include <asm/pgalloc.h> |
f5c236dd | 19 | |
d7d5b05f | 20 | #include "interrupt.h" |
f5c236dd SL |
21 | |
22 | static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) | |
23 | { | |
24 | gpa_t gpa; | |
8cffd197 | 25 | gva_t kseg = KSEGX(gva); |
b8f79ddb | 26 | gva_t gkseg = KVM_GUEST_KSEGX(gva); |
f5c236dd SL |
27 | |
28 | if ((kseg == CKSEG0) || (kseg == CKSEG1)) | |
29 | gpa = CPHYSADDR(gva); | |
b8f79ddb JH |
30 | else if (gkseg == KVM_GUEST_KSEG0) |
31 | gpa = KVM_GUEST_CPHYSADDR(gva); | |
f5c236dd | 32 | else { |
6ad78a5c | 33 | kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); |
f5c236dd SL |
34 | kvm_mips_dump_host_tlbs(); |
35 | gpa = KVM_INVALID_ADDR; | |
36 | } | |
37 | ||
f5c236dd | 38 | kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa); |
f5c236dd SL |
39 | |
40 | return gpa; | |
41 | } | |
42 | ||
f5c236dd SL |
43 | static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) |
44 | { | |
1c0cd66a | 45 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
f5c236dd | 46 | struct kvm_run *run = vcpu->run; |
8cffd197 | 47 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
31cf7498 | 48 | u32 cause = vcpu->arch.host_cp0_cause; |
f5c236dd SL |
49 | enum emulation_result er = EMULATE_DONE; |
50 | int ret = RESUME_GUEST; | |
51 | ||
1c0cd66a JH |
52 | if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { |
53 | /* FPU Unusable */ | |
54 | if (!kvm_mips_guest_has_fpu(&vcpu->arch) || | |
55 | (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) { | |
56 | /* | |
57 | * Unusable/no FPU in guest: | |
58 | * deliver guest COP1 Unusable Exception | |
59 | */ | |
60 | er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); | |
61 | } else { | |
62 | /* Restore FPU state */ | |
63 | kvm_own_fpu(vcpu); | |
64 | er = EMULATE_DONE; | |
65 | } | |
66 | } else { | |
f5c236dd | 67 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
1c0cd66a | 68 | } |
f5c236dd SL |
69 | |
70 | switch (er) { | |
71 | case EMULATE_DONE: | |
72 | ret = RESUME_GUEST; | |
73 | break; | |
74 | ||
75 | case EMULATE_FAIL: | |
76 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
77 | ret = RESUME_HOST; | |
78 | break; | |
79 | ||
80 | case EMULATE_WAIT: | |
81 | run->exit_reason = KVM_EXIT_INTR; | |
82 | ret = RESUME_HOST; | |
83 | break; | |
84 | ||
955d8dc3 JH |
85 | case EMULATE_HYPERCALL: |
86 | ret = kvm_mips_handle_hypcall(vcpu); | |
87 | break; | |
88 | ||
f5c236dd SL |
89 | default: |
90 | BUG(); | |
91 | } | |
92 | return ret; | |
93 | } | |
94 | ||
420ea09b JH |
95 | static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run, |
96 | struct kvm_vcpu *vcpu) | |
97 | { | |
98 | enum emulation_result er; | |
99 | union mips_instruction inst; | |
100 | int err; | |
101 | ||
102 | /* A code fetch fault doesn't count as an MMIO */ | |
103 | if (kvm_is_ifetch_fault(&vcpu->arch)) { | |
104 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
105 | return RESUME_HOST; | |
106 | } | |
107 | ||
108 | /* Fetch the instruction. */ | |
109 | if (cause & CAUSEF_BD) | |
110 | opc += 1; | |
111 | err = kvm_get_badinstr(opc, vcpu, &inst.word); | |
112 | if (err) { | |
113 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
114 | return RESUME_HOST; | |
115 | } | |
116 | ||
117 | /* Emulate the load */ | |
118 | er = kvm_mips_emulate_load(inst, cause, run, vcpu); | |
119 | if (er == EMULATE_FAIL) { | |
120 | kvm_err("Emulate load from MMIO space failed\n"); | |
121 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
122 | } else { | |
123 | run->exit_reason = KVM_EXIT_MMIO; | |
124 | } | |
125 | return RESUME_HOST; | |
126 | } | |
127 | ||
128 | static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_run *run, | |
129 | struct kvm_vcpu *vcpu) | |
130 | { | |
131 | enum emulation_result er; | |
132 | union mips_instruction inst; | |
133 | int err; | |
134 | ||
135 | /* Fetch the instruction. */ | |
136 | if (cause & CAUSEF_BD) | |
137 | opc += 1; | |
138 | err = kvm_get_badinstr(opc, vcpu, &inst.word); | |
139 | if (err) { | |
140 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
141 | return RESUME_HOST; | |
142 | } | |
143 | ||
144 | /* Emulate the store */ | |
145 | er = kvm_mips_emulate_store(inst, cause, run, vcpu); | |
146 | if (er == EMULATE_FAIL) { | |
147 | kvm_err("Emulate store to MMIO space failed\n"); | |
148 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
149 | } else { | |
150 | run->exit_reason = KVM_EXIT_MMIO; | |
151 | } | |
152 | return RESUME_HOST; | |
153 | } | |
154 | ||
155 | static int kvm_mips_bad_access(u32 cause, u32 *opc, struct kvm_run *run, | |
156 | struct kvm_vcpu *vcpu, bool store) | |
157 | { | |
158 | if (store) | |
159 | return kvm_mips_bad_store(cause, opc, run, vcpu); | |
160 | else | |
161 | return kvm_mips_bad_load(cause, opc, run, vcpu); | |
162 | } | |
163 | ||
f5c236dd SL |
164 | static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) |
165 | { | |
64ebc9e2 | 166 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
f5c236dd | 167 | struct kvm_run *run = vcpu->run; |
8cffd197 | 168 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
f5c236dd | 169 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
31cf7498 | 170 | u32 cause = vcpu->arch.host_cp0_cause; |
64ebc9e2 JH |
171 | struct kvm_mips_tlb *tlb; |
172 | unsigned long entryhi; | |
173 | int index; | |
f5c236dd SL |
174 | |
175 | if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | |
176 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | |
64ebc9e2 JH |
177 | /* |
178 | * First find the mapping in the guest TLB. If the failure to | |
179 | * write was due to the guest TLB, it should be up to the guest | |
180 | * to handle it. | |
181 | */ | |
182 | entryhi = (badvaddr & VPN2_MASK) | | |
183 | (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); | |
184 | index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); | |
f5c236dd | 185 | |
64ebc9e2 JH |
186 | /* |
187 | * These should never happen. | |
188 | * They would indicate stale host TLB entries. | |
189 | */ | |
190 | if (unlikely(index < 0)) { | |
f5c236dd | 191 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
64ebc9e2 | 192 | return RESUME_HOST; |
f5c236dd | 193 | } |
64ebc9e2 JH |
194 | tlb = vcpu->arch.guest_tlb + index; |
195 | if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) { | |
196 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
197 | return RESUME_HOST; | |
198 | } | |
199 | ||
d116e812 | 200 | /* |
64ebc9e2 JH |
201 | * Guest entry not dirty? That would explain the TLB modified |
202 | * exception. Relay that on to the guest so it can handle it. | |
f5c236dd | 203 | */ |
64ebc9e2 JH |
204 | if (!TLB_IS_DIRTY(*tlb, badvaddr)) { |
205 | kvm_mips_emulate_tlbmod(cause, opc, run, vcpu); | |
206 | return RESUME_GUEST; | |
207 | } | |
208 | ||
209 | if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr, | |
210 | true)) | |
211 | /* Not writable, needs handling as MMIO */ | |
212 | return kvm_mips_bad_store(cause, opc, run, vcpu); | |
213 | return RESUME_GUEST; | |
214 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { | |
215 | if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0) | |
216 | /* Not writable, needs handling as MMIO */ | |
217 | return kvm_mips_bad_store(cause, opc, run, vcpu); | |
218 | return RESUME_GUEST; | |
f5c236dd | 219 | } else { |
64ebc9e2 JH |
220 | /* host kernel addresses are all handled as MMIO */ |
221 | return kvm_mips_bad_store(cause, opc, run, vcpu); | |
f5c236dd | 222 | } |
f5c236dd SL |
223 | } |
224 | ||
3b08aec5 | 225 | static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store) |
f5c236dd SL |
226 | { |
227 | struct kvm_run *run = vcpu->run; | |
8cffd197 | 228 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
f5c236dd | 229 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
31cf7498 | 230 | u32 cause = vcpu->arch.host_cp0_cause; |
f5c236dd SL |
231 | enum emulation_result er = EMULATE_DONE; |
232 | int ret = RESUME_GUEST; | |
233 | ||
234 | if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) | |
235 | && KVM_GUEST_KERNEL_MODE(vcpu)) { | |
236 | if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) { | |
237 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
238 | ret = RESUME_HOST; | |
239 | } | |
240 | } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | |
241 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | |
3b08aec5 JH |
242 | kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n", |
243 | store ? "ST" : "LD", cause, opc, badvaddr); | |
f5c236dd | 244 | |
d116e812 DCZ |
245 | /* |
246 | * User Address (UA) fault, this could happen if | |
247 | * (1) TLB entry not present/valid in both Guest and shadow host | |
248 | * TLBs, in this case we pass on the fault to the guest | |
249 | * kernel and let it handle it. | |
250 | * (2) TLB entry is present in the Guest TLB but not in the | |
251 | * shadow, in this case we inject the TLB from the Guest TLB | |
252 | * into the shadow host TLB | |
f5c236dd SL |
253 | */ |
254 | ||
577ed7f7 | 255 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu, store); |
f5c236dd SL |
256 | if (er == EMULATE_DONE) |
257 | ret = RESUME_GUEST; | |
258 | else { | |
259 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
260 | ret = RESUME_HOST; | |
261 | } | |
262 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { | |
3b08aec5 JH |
263 | /* |
264 | * All KSEG0 faults are handled by KVM, as the guest kernel does | |
265 | * not expect to ever get them | |
266 | */ | |
b8f79ddb JH |
267 | if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0) |
268 | ret = kvm_mips_bad_access(cause, opc, run, vcpu, store); | |
d5888477 JH |
269 | } else if (KVM_GUEST_KERNEL_MODE(vcpu) |
270 | && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { | |
271 | /* | |
272 | * With EVA we may get a TLB exception instead of an address | |
273 | * error when the guest performs MMIO to KSeg1 addresses. | |
274 | */ | |
420ea09b | 275 | ret = kvm_mips_bad_access(cause, opc, run, vcpu, store); |
f5c236dd | 276 | } else { |
3b08aec5 JH |
277 | kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n", |
278 | store ? "ST" : "LD", cause, opc, badvaddr); | |
f5c236dd SL |
279 | kvm_mips_dump_host_tlbs(); |
280 | kvm_arch_vcpu_dump_regs(vcpu); | |
281 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
282 | ret = RESUME_HOST; | |
283 | } | |
284 | return ret; | |
285 | } | |
286 | ||
3b08aec5 JH |
287 | static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) |
288 | { | |
289 | return kvm_trap_emul_handle_tlb_miss(vcpu, true); | |
290 | } | |
291 | ||
292 | static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) | |
293 | { | |
294 | return kvm_trap_emul_handle_tlb_miss(vcpu, false); | |
295 | } | |
296 | ||
f5c236dd SL |
297 | static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) |
298 | { | |
299 | struct kvm_run *run = vcpu->run; | |
8cffd197 | 300 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
f5c236dd | 301 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
31cf7498 | 302 | u32 cause = vcpu->arch.host_cp0_cause; |
f5c236dd SL |
303 | int ret = RESUME_GUEST; |
304 | ||
305 | if (KVM_GUEST_KERNEL_MODE(vcpu) | |
306 | && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { | |
420ea09b | 307 | ret = kvm_mips_bad_store(cause, opc, run, vcpu); |
f5c236dd | 308 | } else { |
31cf7498 | 309 | kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n", |
6ad78a5c | 310 | cause, opc, badvaddr); |
f5c236dd SL |
311 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
312 | ret = RESUME_HOST; | |
313 | } | |
314 | return ret; | |
315 | } | |
316 | ||
317 | static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) | |
318 | { | |
319 | struct kvm_run *run = vcpu->run; | |
8cffd197 | 320 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
f5c236dd | 321 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
31cf7498 | 322 | u32 cause = vcpu->arch.host_cp0_cause; |
f5c236dd SL |
323 | int ret = RESUME_GUEST; |
324 | ||
325 | if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) { | |
420ea09b | 326 | ret = kvm_mips_bad_load(cause, opc, run, vcpu); |
f5c236dd | 327 | } else { |
31cf7498 | 328 | kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n", |
6ad78a5c | 329 | cause, opc, badvaddr); |
f5c236dd SL |
330 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
331 | ret = RESUME_HOST; | |
f5c236dd SL |
332 | } |
333 | return ret; | |
334 | } | |
335 | ||
336 | static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu) | |
337 | { | |
338 | struct kvm_run *run = vcpu->run; | |
8cffd197 | 339 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
31cf7498 | 340 | u32 cause = vcpu->arch.host_cp0_cause; |
f5c236dd SL |
341 | enum emulation_result er = EMULATE_DONE; |
342 | int ret = RESUME_GUEST; | |
343 | ||
344 | er = kvm_mips_emulate_syscall(cause, opc, run, vcpu); | |
345 | if (er == EMULATE_DONE) | |
346 | ret = RESUME_GUEST; | |
347 | else { | |
348 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
349 | ret = RESUME_HOST; | |
350 | } | |
351 | return ret; | |
352 | } | |
353 | ||
354 | static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu) | |
355 | { | |
356 | struct kvm_run *run = vcpu->run; | |
8cffd197 | 357 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
31cf7498 | 358 | u32 cause = vcpu->arch.host_cp0_cause; |
f5c236dd SL |
359 | enum emulation_result er = EMULATE_DONE; |
360 | int ret = RESUME_GUEST; | |
361 | ||
362 | er = kvm_mips_handle_ri(cause, opc, run, vcpu); | |
363 | if (er == EMULATE_DONE) | |
364 | ret = RESUME_GUEST; | |
365 | else { | |
366 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
367 | ret = RESUME_HOST; | |
368 | } | |
369 | return ret; | |
370 | } | |
371 | ||
372 | static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) | |
373 | { | |
374 | struct kvm_run *run = vcpu->run; | |
8cffd197 | 375 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
31cf7498 | 376 | u32 cause = vcpu->arch.host_cp0_cause; |
f5c236dd SL |
377 | enum emulation_result er = EMULATE_DONE; |
378 | int ret = RESUME_GUEST; | |
379 | ||
380 | er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu); | |
381 | if (er == EMULATE_DONE) | |
382 | ret = RESUME_GUEST; | |
383 | else { | |
384 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
385 | ret = RESUME_HOST; | |
386 | } | |
387 | return ret; | |
388 | } | |
389 | ||
0a560427 JH |
390 | static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu) |
391 | { | |
392 | struct kvm_run *run = vcpu->run; | |
8cffd197 | 393 | u32 __user *opc = (u32 __user *)vcpu->arch.pc; |
31cf7498 | 394 | u32 cause = vcpu->arch.host_cp0_cause; |
0a560427 JH |
395 | enum emulation_result er = EMULATE_DONE; |
396 | int ret = RESUME_GUEST; | |
397 | ||
398 | er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu); | |
399 | if (er == EMULATE_DONE) { | |
400 | ret = RESUME_GUEST; | |
401 | } else { | |
402 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
403 | ret = RESUME_HOST; | |
404 | } | |
405 | return ret; | |
406 | } | |
407 | ||
c2537ed9 JH |
408 | static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu) |
409 | { | |
410 | struct kvm_run *run = vcpu->run; | |
8cffd197 | 411 | u32 __user *opc = (u32 __user *)vcpu->arch.pc; |
31cf7498 | 412 | u32 cause = vcpu->arch.host_cp0_cause; |
c2537ed9 JH |
413 | enum emulation_result er = EMULATE_DONE; |
414 | int ret = RESUME_GUEST; | |
415 | ||
416 | er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu); | |
417 | if (er == EMULATE_DONE) { | |
418 | ret = RESUME_GUEST; | |
419 | } else { | |
420 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
421 | ret = RESUME_HOST; | |
422 | } | |
423 | return ret; | |
424 | } | |
425 | ||
1c0cd66a JH |
426 | static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu) |
427 | { | |
428 | struct kvm_run *run = vcpu->run; | |
8cffd197 | 429 | u32 __user *opc = (u32 __user *)vcpu->arch.pc; |
31cf7498 | 430 | u32 cause = vcpu->arch.host_cp0_cause; |
1c0cd66a JH |
431 | enum emulation_result er = EMULATE_DONE; |
432 | int ret = RESUME_GUEST; | |
433 | ||
434 | er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu); | |
435 | if (er == EMULATE_DONE) { | |
436 | ret = RESUME_GUEST; | |
437 | } else { | |
438 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
439 | ret = RESUME_HOST; | |
440 | } | |
441 | return ret; | |
442 | } | |
443 | ||
c2537ed9 JH |
444 | /** |
445 | * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root. | |
446 | * @vcpu: Virtual CPU context. | |
447 | * | |
448 | * Handle when the guest attempts to use MSA when it is disabled. | |
449 | */ | |
98119ad5 JH |
450 | static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) |
451 | { | |
c2537ed9 | 452 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
98119ad5 | 453 | struct kvm_run *run = vcpu->run; |
8cffd197 | 454 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
31cf7498 | 455 | u32 cause = vcpu->arch.host_cp0_cause; |
98119ad5 JH |
456 | enum emulation_result er = EMULATE_DONE; |
457 | int ret = RESUME_GUEST; | |
458 | ||
c2537ed9 JH |
459 | if (!kvm_mips_guest_has_msa(&vcpu->arch) || |
460 | (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) { | |
461 | /* | |
462 | * No MSA in guest, or FPU enabled and not in FR=1 mode, | |
463 | * guest reserved instruction exception | |
464 | */ | |
465 | er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); | |
466 | } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) { | |
467 | /* MSA disabled by guest, guest MSA disabled exception */ | |
468 | er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu); | |
469 | } else { | |
470 | /* Restore MSA/FPU state */ | |
471 | kvm_own_msa(vcpu); | |
472 | er = EMULATE_DONE; | |
473 | } | |
98119ad5 JH |
474 | |
475 | switch (er) { | |
476 | case EMULATE_DONE: | |
477 | ret = RESUME_GUEST; | |
478 | break; | |
479 | ||
480 | case EMULATE_FAIL: | |
481 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
482 | ret = RESUME_HOST; | |
483 | break; | |
484 | ||
485 | default: | |
486 | BUG(); | |
487 | } | |
488 | return ret; | |
489 | } | |
490 | ||
607ef2fd JH |
491 | static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext) |
492 | { | |
493 | int r; | |
494 | ||
495 | switch (ext) { | |
496 | case KVM_CAP_MIPS_TE: | |
497 | r = 1; | |
498 | break; | |
499 | default: | |
500 | r = 0; | |
501 | break; | |
502 | } | |
503 | ||
504 | return r; | |
505 | } | |
506 | ||
f5c236dd SL |
507 | static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu) |
508 | { | |
f7f1427d JH |
509 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; |
510 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; | |
511 | ||
f7f1427d JH |
512 | /* |
513 | * Allocate GVA -> HPA page tables. | |
514 | * MIPS doesn't use the mm_struct pointer argument. | |
515 | */ | |
516 | kern_mm->pgd = pgd_alloc(kern_mm); | |
517 | if (!kern_mm->pgd) | |
518 | return -ENOMEM; | |
519 | ||
520 | user_mm->pgd = pgd_alloc(user_mm); | |
521 | if (!user_mm->pgd) { | |
522 | pgd_free(kern_mm, kern_mm->pgd); | |
523 | return -ENOMEM; | |
524 | } | |
525 | ||
f5c236dd SL |
526 | return 0; |
527 | } | |
528 | ||
f7f1427d JH |
529 | static void kvm_mips_emul_free_gva_pt(pgd_t *pgd) |
530 | { | |
531 | /* Don't free host kernel page tables copied from init_mm.pgd */ | |
532 | const unsigned long end = 0x80000000; | |
533 | unsigned long pgd_va, pud_va, pmd_va; | |
534 | pud_t *pud; | |
535 | pmd_t *pmd; | |
536 | pte_t *pte; | |
537 | int i, j, k; | |
538 | ||
539 | for (i = 0; i < USER_PTRS_PER_PGD; i++) { | |
540 | if (pgd_none(pgd[i])) | |
541 | continue; | |
542 | ||
543 | pgd_va = (unsigned long)i << PGDIR_SHIFT; | |
544 | if (pgd_va >= end) | |
545 | break; | |
546 | pud = pud_offset(pgd + i, 0); | |
547 | for (j = 0; j < PTRS_PER_PUD; j++) { | |
548 | if (pud_none(pud[j])) | |
549 | continue; | |
550 | ||
551 | pud_va = pgd_va | ((unsigned long)j << PUD_SHIFT); | |
552 | if (pud_va >= end) | |
553 | break; | |
554 | pmd = pmd_offset(pud + j, 0); | |
555 | for (k = 0; k < PTRS_PER_PMD; k++) { | |
556 | if (pmd_none(pmd[k])) | |
557 | continue; | |
558 | ||
559 | pmd_va = pud_va | (k << PMD_SHIFT); | |
560 | if (pmd_va >= end) | |
561 | break; | |
562 | pte = pte_offset(pmd + k, 0); | |
563 | pte_free_kernel(NULL, pte); | |
564 | } | |
565 | pmd_free(NULL, pmd); | |
566 | } | |
567 | pud_free(NULL, pud); | |
568 | } | |
569 | pgd_free(NULL, pgd); | |
570 | } | |
571 | ||
630766b3 JH |
572 | static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu *vcpu) |
573 | { | |
f7f1427d JH |
574 | kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd); |
575 | kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd); | |
630766b3 JH |
576 | } |
577 | ||
f5c236dd SL |
578 | static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) |
579 | { | |
580 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
e342925f | 581 | u32 config, config1; |
f5c236dd SL |
582 | int vcpu_id = vcpu->vcpu_id; |
583 | ||
a517c1ad JH |
584 | /* Start off the timer at 100 MHz */ |
585 | kvm_mips_init_count(vcpu, 100*1000*1000); | |
586 | ||
d116e812 DCZ |
587 | /* |
588 | * Arch specific stuff, set up config registers properly so that the | |
84260972 | 589 | * guest will come up as expected |
f5c236dd | 590 | */ |
84260972 JH |
591 | #ifndef CONFIG_CPU_MIPSR6 |
592 | /* r2-r5, simulate a MIPS 24kc */ | |
f5c236dd | 593 | kvm_write_c0_guest_prid(cop0, 0x00019300); |
84260972 JH |
594 | #else |
595 | /* r6+, simulate a generic QEMU machine */ | |
596 | kvm_write_c0_guest_prid(cop0, 0x00010000); | |
597 | #endif | |
e342925f JH |
598 | /* |
599 | * Have config1, Cacheable, noncoherent, write-back, write allocate. | |
600 | * Endianness, arch revision & virtually tagged icache should match | |
601 | * host. | |
602 | */ | |
603 | config = read_c0_config() & MIPS_CONF_AR; | |
4e10b764 | 604 | config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB; |
e342925f JH |
605 | #ifdef CONFIG_CPU_BIG_ENDIAN |
606 | config |= CONF_BE; | |
607 | #endif | |
608 | if (cpu_has_vtag_icache) | |
609 | config |= MIPS_CONF_VI; | |
610 | kvm_write_c0_guest_config(cop0, config); | |
f5c236dd SL |
611 | |
612 | /* Read the cache characteristics from the host Config1 Register */ | |
613 | config1 = (read_c0_config1() & ~0x7f); | |
614 | ||
615 | /* Set up MMU size */ | |
616 | config1 &= ~(0x3f << 25); | |
617 | config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25); | |
618 | ||
619 | /* We unset some bits that we aren't emulating */ | |
4e10b764 JH |
620 | config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC | |
621 | MIPS_CONF1_WR | MIPS_CONF1_CA); | |
f5c236dd SL |
622 | kvm_write_c0_guest_config1(cop0, config1); |
623 | ||
2211ee81 JH |
624 | /* Have config3, no tertiary/secondary caches implemented */ |
625 | kvm_write_c0_guest_config2(cop0, MIPS_CONF_M); | |
626 | /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */ | |
627 | ||
c771607a JH |
628 | /* Have config4, UserLocal */ |
629 | kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI); | |
630 | ||
631 | /* Have config5 */ | |
632 | kvm_write_c0_guest_config4(cop0, MIPS_CONF_M); | |
633 | ||
634 | /* No config6 */ | |
635 | kvm_write_c0_guest_config5(cop0, 0); | |
f5c236dd SL |
636 | |
637 | /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ | |
638 | kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); | |
639 | ||
be67a0be JH |
640 | /* Status */ |
641 | kvm_write_c0_guest_status(cop0, ST0_BEV | ST0_ERL); | |
642 | ||
d116e812 | 643 | /* |
92a76f6d | 644 | * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5) |
d116e812 | 645 | */ |
f5c236dd SL |
646 | kvm_write_c0_guest_intctl(cop0, 0xFC000000); |
647 | ||
648 | /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ | |
37af2f30 JH |
649 | kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | |
650 | (vcpu_id & MIPS_EBASE_CPUNUM)); | |
f5c236dd | 651 | |
be67a0be JH |
652 | /* Put PC at guest reset vector */ |
653 | vcpu->arch.pc = KVM_GUEST_CKSEG1ADDR(0x1fc00000); | |
654 | ||
f5c236dd SL |
655 | return 0; |
656 | } | |
657 | ||
b6209110 JH |
658 | static void kvm_trap_emul_flush_shadow_all(struct kvm *kvm) |
659 | { | |
660 | /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */ | |
661 | kvm_flush_remote_tlbs(kvm); | |
662 | } | |
663 | ||
664 | static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm, | |
665 | const struct kvm_memory_slot *slot) | |
666 | { | |
667 | kvm_trap_emul_flush_shadow_all(kvm); | |
668 | } | |
669 | ||
654229a0 JH |
670 | static u64 kvm_trap_emul_get_one_regs[] = { |
671 | KVM_REG_MIPS_CP0_INDEX, | |
013044cc JH |
672 | KVM_REG_MIPS_CP0_ENTRYLO0, |
673 | KVM_REG_MIPS_CP0_ENTRYLO1, | |
654229a0 JH |
674 | KVM_REG_MIPS_CP0_CONTEXT, |
675 | KVM_REG_MIPS_CP0_USERLOCAL, | |
676 | KVM_REG_MIPS_CP0_PAGEMASK, | |
677 | KVM_REG_MIPS_CP0_WIRED, | |
678 | KVM_REG_MIPS_CP0_HWRENA, | |
679 | KVM_REG_MIPS_CP0_BADVADDR, | |
680 | KVM_REG_MIPS_CP0_COUNT, | |
681 | KVM_REG_MIPS_CP0_ENTRYHI, | |
682 | KVM_REG_MIPS_CP0_COMPARE, | |
683 | KVM_REG_MIPS_CP0_STATUS, | |
ad58d4d4 | 684 | KVM_REG_MIPS_CP0_INTCTL, |
654229a0 JH |
685 | KVM_REG_MIPS_CP0_CAUSE, |
686 | KVM_REG_MIPS_CP0_EPC, | |
687 | KVM_REG_MIPS_CP0_PRID, | |
7801bbe1 | 688 | KVM_REG_MIPS_CP0_EBASE, |
654229a0 JH |
689 | KVM_REG_MIPS_CP0_CONFIG, |
690 | KVM_REG_MIPS_CP0_CONFIG1, | |
691 | KVM_REG_MIPS_CP0_CONFIG2, | |
692 | KVM_REG_MIPS_CP0_CONFIG3, | |
693 | KVM_REG_MIPS_CP0_CONFIG4, | |
694 | KVM_REG_MIPS_CP0_CONFIG5, | |
695 | KVM_REG_MIPS_CP0_CONFIG7, | |
696 | KVM_REG_MIPS_CP0_ERROREPC, | |
697 | KVM_REG_MIPS_CP0_KSCRATCH1, | |
698 | KVM_REG_MIPS_CP0_KSCRATCH2, | |
699 | KVM_REG_MIPS_CP0_KSCRATCH3, | |
700 | KVM_REG_MIPS_CP0_KSCRATCH4, | |
701 | KVM_REG_MIPS_CP0_KSCRATCH5, | |
702 | KVM_REG_MIPS_CP0_KSCRATCH6, | |
703 | ||
704 | KVM_REG_MIPS_COUNT_CTL, | |
705 | KVM_REG_MIPS_COUNT_RESUME, | |
706 | KVM_REG_MIPS_COUNT_HZ, | |
707 | }; | |
708 | ||
f5c43bd4 JH |
709 | static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu) |
710 | { | |
654229a0 | 711 | return ARRAY_SIZE(kvm_trap_emul_get_one_regs); |
f5c43bd4 JH |
712 | } |
713 | ||
714 | static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu, | |
715 | u64 __user *indices) | |
716 | { | |
654229a0 JH |
717 | if (copy_to_user(indices, kvm_trap_emul_get_one_regs, |
718 | sizeof(kvm_trap_emul_get_one_regs))) | |
719 | return -EFAULT; | |
720 | indices += ARRAY_SIZE(kvm_trap_emul_get_one_regs); | |
721 | ||
f5c43bd4 JH |
722 | return 0; |
723 | } | |
724 | ||
f8be02da JH |
725 | static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu, |
726 | const struct kvm_one_reg *reg, | |
727 | s64 *v) | |
728 | { | |
654229a0 JH |
729 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
730 | ||
f8be02da | 731 | switch (reg->id) { |
654229a0 JH |
732 | case KVM_REG_MIPS_CP0_INDEX: |
733 | *v = (long)kvm_read_c0_guest_index(cop0); | |
734 | break; | |
013044cc JH |
735 | case KVM_REG_MIPS_CP0_ENTRYLO0: |
736 | *v = kvm_read_c0_guest_entrylo0(cop0); | |
737 | break; | |
738 | case KVM_REG_MIPS_CP0_ENTRYLO1: | |
739 | *v = kvm_read_c0_guest_entrylo1(cop0); | |
740 | break; | |
654229a0 JH |
741 | case KVM_REG_MIPS_CP0_CONTEXT: |
742 | *v = (long)kvm_read_c0_guest_context(cop0); | |
743 | break; | |
744 | case KVM_REG_MIPS_CP0_USERLOCAL: | |
745 | *v = (long)kvm_read_c0_guest_userlocal(cop0); | |
746 | break; | |
747 | case KVM_REG_MIPS_CP0_PAGEMASK: | |
748 | *v = (long)kvm_read_c0_guest_pagemask(cop0); | |
749 | break; | |
750 | case KVM_REG_MIPS_CP0_WIRED: | |
751 | *v = (long)kvm_read_c0_guest_wired(cop0); | |
752 | break; | |
753 | case KVM_REG_MIPS_CP0_HWRENA: | |
754 | *v = (long)kvm_read_c0_guest_hwrena(cop0); | |
755 | break; | |
756 | case KVM_REG_MIPS_CP0_BADVADDR: | |
757 | *v = (long)kvm_read_c0_guest_badvaddr(cop0); | |
758 | break; | |
759 | case KVM_REG_MIPS_CP0_ENTRYHI: | |
760 | *v = (long)kvm_read_c0_guest_entryhi(cop0); | |
761 | break; | |
762 | case KVM_REG_MIPS_CP0_COMPARE: | |
763 | *v = (long)kvm_read_c0_guest_compare(cop0); | |
764 | break; | |
765 | case KVM_REG_MIPS_CP0_STATUS: | |
766 | *v = (long)kvm_read_c0_guest_status(cop0); | |
767 | break; | |
ad58d4d4 JH |
768 | case KVM_REG_MIPS_CP0_INTCTL: |
769 | *v = (long)kvm_read_c0_guest_intctl(cop0); | |
770 | break; | |
654229a0 JH |
771 | case KVM_REG_MIPS_CP0_CAUSE: |
772 | *v = (long)kvm_read_c0_guest_cause(cop0); | |
773 | break; | |
774 | case KVM_REG_MIPS_CP0_EPC: | |
775 | *v = (long)kvm_read_c0_guest_epc(cop0); | |
776 | break; | |
777 | case KVM_REG_MIPS_CP0_PRID: | |
778 | *v = (long)kvm_read_c0_guest_prid(cop0); | |
779 | break; | |
7801bbe1 JH |
780 | case KVM_REG_MIPS_CP0_EBASE: |
781 | *v = (long)kvm_read_c0_guest_ebase(cop0); | |
782 | break; | |
654229a0 JH |
783 | case KVM_REG_MIPS_CP0_CONFIG: |
784 | *v = (long)kvm_read_c0_guest_config(cop0); | |
785 | break; | |
786 | case KVM_REG_MIPS_CP0_CONFIG1: | |
787 | *v = (long)kvm_read_c0_guest_config1(cop0); | |
788 | break; | |
789 | case KVM_REG_MIPS_CP0_CONFIG2: | |
790 | *v = (long)kvm_read_c0_guest_config2(cop0); | |
791 | break; | |
792 | case KVM_REG_MIPS_CP0_CONFIG3: | |
793 | *v = (long)kvm_read_c0_guest_config3(cop0); | |
794 | break; | |
795 | case KVM_REG_MIPS_CP0_CONFIG4: | |
796 | *v = (long)kvm_read_c0_guest_config4(cop0); | |
797 | break; | |
798 | case KVM_REG_MIPS_CP0_CONFIG5: | |
799 | *v = (long)kvm_read_c0_guest_config5(cop0); | |
800 | break; | |
801 | case KVM_REG_MIPS_CP0_CONFIG7: | |
802 | *v = (long)kvm_read_c0_guest_config7(cop0); | |
803 | break; | |
f8be02da | 804 | case KVM_REG_MIPS_CP0_COUNT: |
e30492bb | 805 | *v = kvm_mips_read_count(vcpu); |
f8be02da | 806 | break; |
f8239342 JH |
807 | case KVM_REG_MIPS_COUNT_CTL: |
808 | *v = vcpu->arch.count_ctl; | |
809 | break; | |
810 | case KVM_REG_MIPS_COUNT_RESUME: | |
811 | *v = ktime_to_ns(vcpu->arch.count_resume); | |
812 | break; | |
f74a8e22 JH |
813 | case KVM_REG_MIPS_COUNT_HZ: |
814 | *v = vcpu->arch.count_hz; | |
815 | break; | |
654229a0 JH |
816 | case KVM_REG_MIPS_CP0_ERROREPC: |
817 | *v = (long)kvm_read_c0_guest_errorepc(cop0); | |
818 | break; | |
819 | case KVM_REG_MIPS_CP0_KSCRATCH1: | |
820 | *v = (long)kvm_read_c0_guest_kscratch1(cop0); | |
821 | break; | |
822 | case KVM_REG_MIPS_CP0_KSCRATCH2: | |
823 | *v = (long)kvm_read_c0_guest_kscratch2(cop0); | |
824 | break; | |
825 | case KVM_REG_MIPS_CP0_KSCRATCH3: | |
826 | *v = (long)kvm_read_c0_guest_kscratch3(cop0); | |
827 | break; | |
828 | case KVM_REG_MIPS_CP0_KSCRATCH4: | |
829 | *v = (long)kvm_read_c0_guest_kscratch4(cop0); | |
830 | break; | |
831 | case KVM_REG_MIPS_CP0_KSCRATCH5: | |
832 | *v = (long)kvm_read_c0_guest_kscratch5(cop0); | |
833 | break; | |
834 | case KVM_REG_MIPS_CP0_KSCRATCH6: | |
835 | *v = (long)kvm_read_c0_guest_kscratch6(cop0); | |
836 | break; | |
f8be02da JH |
837 | default: |
838 | return -EINVAL; | |
839 | } | |
840 | return 0; | |
841 | } | |
842 | ||
843 | static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, | |
844 | const struct kvm_one_reg *reg, | |
845 | s64 v) | |
846 | { | |
847 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
f8239342 | 848 | int ret = 0; |
c771607a | 849 | unsigned int cur, change; |
f8be02da JH |
850 | |
851 | switch (reg->id) { | |
654229a0 JH |
852 | case KVM_REG_MIPS_CP0_INDEX: |
853 | kvm_write_c0_guest_index(cop0, v); | |
854 | break; | |
013044cc JH |
855 | case KVM_REG_MIPS_CP0_ENTRYLO0: |
856 | kvm_write_c0_guest_entrylo0(cop0, v); | |
857 | break; | |
858 | case KVM_REG_MIPS_CP0_ENTRYLO1: | |
859 | kvm_write_c0_guest_entrylo1(cop0, v); | |
860 | break; | |
654229a0 JH |
861 | case KVM_REG_MIPS_CP0_CONTEXT: |
862 | kvm_write_c0_guest_context(cop0, v); | |
863 | break; | |
864 | case KVM_REG_MIPS_CP0_USERLOCAL: | |
865 | kvm_write_c0_guest_userlocal(cop0, v); | |
866 | break; | |
867 | case KVM_REG_MIPS_CP0_PAGEMASK: | |
868 | kvm_write_c0_guest_pagemask(cop0, v); | |
869 | break; | |
870 | case KVM_REG_MIPS_CP0_WIRED: | |
871 | kvm_write_c0_guest_wired(cop0, v); | |
872 | break; | |
873 | case KVM_REG_MIPS_CP0_HWRENA: | |
874 | kvm_write_c0_guest_hwrena(cop0, v); | |
875 | break; | |
876 | case KVM_REG_MIPS_CP0_BADVADDR: | |
877 | kvm_write_c0_guest_badvaddr(cop0, v); | |
878 | break; | |
879 | case KVM_REG_MIPS_CP0_ENTRYHI: | |
880 | kvm_write_c0_guest_entryhi(cop0, v); | |
881 | break; | |
882 | case KVM_REG_MIPS_CP0_STATUS: | |
883 | kvm_write_c0_guest_status(cop0, v); | |
884 | break; | |
ad58d4d4 JH |
885 | case KVM_REG_MIPS_CP0_INTCTL: |
886 | /* No VInt, so no VS, read-only for now */ | |
887 | break; | |
654229a0 JH |
888 | case KVM_REG_MIPS_CP0_EPC: |
889 | kvm_write_c0_guest_epc(cop0, v); | |
890 | break; | |
891 | case KVM_REG_MIPS_CP0_PRID: | |
892 | kvm_write_c0_guest_prid(cop0, v); | |
893 | break; | |
7801bbe1 JH |
894 | case KVM_REG_MIPS_CP0_EBASE: |
895 | /* | |
896 | * Allow core number to be written, but the exception base must | |
897 | * remain in guest KSeg0. | |
898 | */ | |
899 | kvm_change_c0_guest_ebase(cop0, 0x1ffff000 | MIPS_EBASE_CPUNUM, | |
900 | v); | |
901 | break; | |
f8be02da | 902 | case KVM_REG_MIPS_CP0_COUNT: |
e30492bb | 903 | kvm_mips_write_count(vcpu, v); |
f8be02da JH |
904 | break; |
905 | case KVM_REG_MIPS_CP0_COMPARE: | |
b45bacd2 | 906 | kvm_mips_write_compare(vcpu, v, false); |
e30492bb JH |
907 | break; |
908 | case KVM_REG_MIPS_CP0_CAUSE: | |
909 | /* | |
910 | * If the timer is stopped or started (DC bit) it must look | |
911 | * atomic with changes to the interrupt pending bits (TI, IRQ5). | |
912 | * A timer interrupt should not happen in between. | |
913 | */ | |
914 | if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) { | |
915 | if (v & CAUSEF_DC) { | |
916 | /* disable timer first */ | |
917 | kvm_mips_count_disable_cause(vcpu); | |
918 | kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v); | |
919 | } else { | |
920 | /* enable timer last */ | |
921 | kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v); | |
922 | kvm_mips_count_enable_cause(vcpu); | |
923 | } | |
924 | } else { | |
925 | kvm_write_c0_guest_cause(cop0, v); | |
926 | } | |
f8be02da | 927 | break; |
c771607a JH |
928 | case KVM_REG_MIPS_CP0_CONFIG: |
929 | /* read-only for now */ | |
930 | break; | |
931 | case KVM_REG_MIPS_CP0_CONFIG1: | |
932 | cur = kvm_read_c0_guest_config1(cop0); | |
933 | change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu); | |
934 | if (change) { | |
935 | v = cur ^ change; | |
936 | kvm_write_c0_guest_config1(cop0, v); | |
937 | } | |
938 | break; | |
939 | case KVM_REG_MIPS_CP0_CONFIG2: | |
940 | /* read-only for now */ | |
941 | break; | |
942 | case KVM_REG_MIPS_CP0_CONFIG3: | |
943 | cur = kvm_read_c0_guest_config3(cop0); | |
944 | change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu); | |
945 | if (change) { | |
946 | v = cur ^ change; | |
947 | kvm_write_c0_guest_config3(cop0, v); | |
948 | } | |
949 | break; | |
950 | case KVM_REG_MIPS_CP0_CONFIG4: | |
951 | cur = kvm_read_c0_guest_config4(cop0); | |
952 | change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu); | |
953 | if (change) { | |
954 | v = cur ^ change; | |
955 | kvm_write_c0_guest_config4(cop0, v); | |
956 | } | |
957 | break; | |
958 | case KVM_REG_MIPS_CP0_CONFIG5: | |
959 | cur = kvm_read_c0_guest_config5(cop0); | |
960 | change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu); | |
961 | if (change) { | |
962 | v = cur ^ change; | |
963 | kvm_write_c0_guest_config5(cop0, v); | |
964 | } | |
965 | break; | |
89d6ad8a JH |
966 | case KVM_REG_MIPS_CP0_CONFIG7: |
967 | /* writes ignored */ | |
968 | break; | |
f8239342 JH |
969 | case KVM_REG_MIPS_COUNT_CTL: |
970 | ret = kvm_mips_set_count_ctl(vcpu, v); | |
971 | break; | |
972 | case KVM_REG_MIPS_COUNT_RESUME: | |
973 | ret = kvm_mips_set_count_resume(vcpu, v); | |
974 | break; | |
f74a8e22 JH |
975 | case KVM_REG_MIPS_COUNT_HZ: |
976 | ret = kvm_mips_set_count_hz(vcpu, v); | |
977 | break; | |
654229a0 JH |
978 | case KVM_REG_MIPS_CP0_ERROREPC: |
979 | kvm_write_c0_guest_errorepc(cop0, v); | |
980 | break; | |
981 | case KVM_REG_MIPS_CP0_KSCRATCH1: | |
982 | kvm_write_c0_guest_kscratch1(cop0, v); | |
983 | break; | |
984 | case KVM_REG_MIPS_CP0_KSCRATCH2: | |
985 | kvm_write_c0_guest_kscratch2(cop0, v); | |
986 | break; | |
987 | case KVM_REG_MIPS_CP0_KSCRATCH3: | |
988 | kvm_write_c0_guest_kscratch3(cop0, v); | |
989 | break; | |
990 | case KVM_REG_MIPS_CP0_KSCRATCH4: | |
991 | kvm_write_c0_guest_kscratch4(cop0, v); | |
992 | break; | |
993 | case KVM_REG_MIPS_CP0_KSCRATCH5: | |
994 | kvm_write_c0_guest_kscratch5(cop0, v); | |
995 | break; | |
996 | case KVM_REG_MIPS_CP0_KSCRATCH6: | |
997 | kvm_write_c0_guest_kscratch6(cop0, v); | |
998 | break; | |
f8be02da JH |
999 | default: |
1000 | return -EINVAL; | |
1001 | } | |
f8239342 | 1002 | return ret; |
f8be02da JH |
1003 | } |
1004 | ||
a60b8438 | 1005 | static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
b86ecb37 | 1006 | { |
c550d539 JH |
1007 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; |
1008 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; | |
7faa6eec | 1009 | struct mm_struct *mm; |
1581ff3d | 1010 | |
1581ff3d | 1011 | /* |
91737ea2 JH |
1012 | * Were we in guest context? If so, restore the appropriate ASID based |
1013 | * on the mode of the Guest (Kernel/User). | |
1581ff3d JH |
1014 | */ |
1015 | if (current->flags & PF_VCPU) { | |
7faa6eec | 1016 | mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; |
91737ea2 JH |
1017 | if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & |
1018 | asid_version_mask(cpu)) | |
1019 | get_new_mmu_context(mm, cpu); | |
7faa6eec JH |
1020 | write_c0_entryhi(cpu_asid(cpu, mm)); |
1021 | TLBMISS_HANDLER_SETUP_PGD(mm->pgd); | |
a7ebb2e4 | 1022 | kvm_mips_suspend_mm(cpu); |
1581ff3d JH |
1023 | ehb(); |
1024 | } | |
1025 | ||
b86ecb37 JH |
1026 | return 0; |
1027 | } | |
1028 | ||
a60b8438 | 1029 | static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu) |
b86ecb37 | 1030 | { |
a60b8438 JH |
1031 | kvm_lose_fpu(vcpu); |
1032 | ||
91cdee57 JH |
1033 | if (current->flags & PF_VCPU) { |
1034 | /* Restore normal Linux process memory map */ | |
1035 | if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & | |
91737ea2 | 1036 | asid_version_mask(cpu))) |
91cdee57 | 1037 | get_new_mmu_context(current->mm, cpu); |
91cdee57 | 1038 | write_c0_entryhi(cpu_asid(cpu, current->mm)); |
7faa6eec | 1039 | TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd); |
a7ebb2e4 | 1040 | kvm_mips_resume_mm(cpu); |
91cdee57 | 1041 | ehb(); |
1581ff3d | 1042 | } |
1581ff3d | 1043 | |
b86ecb37 JH |
1044 | return 0; |
1045 | } | |
1046 | ||
b29e115a JH |
1047 | static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu, |
1048 | bool reload_asid) | |
1049 | { | |
1050 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; | |
1051 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; | |
1052 | struct mm_struct *mm; | |
1053 | int i; | |
1054 | ||
1055 | if (likely(!vcpu->requests)) | |
1056 | return; | |
1057 | ||
1058 | if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { | |
1059 | /* | |
1060 | * Both kernel & user GVA mappings must be invalidated. The | |
1061 | * caller is just about to check whether the ASID is stale | |
1062 | * anyway so no need to reload it here. | |
1063 | */ | |
1064 | kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN); | |
1065 | kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER); | |
1066 | for_each_possible_cpu(i) { | |
1067 | cpu_context(i, kern_mm) = 0; | |
1068 | cpu_context(i, user_mm) = 0; | |
1069 | } | |
1070 | ||
1071 | /* Generate new ASID for current mode */ | |
1072 | if (reload_asid) { | |
1073 | mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; | |
1074 | get_new_mmu_context(mm, cpu); | |
1075 | htw_stop(); | |
1076 | write_c0_entryhi(cpu_asid(cpu, mm)); | |
1077 | TLBMISS_HANDLER_SETUP_PGD(mm->pgd); | |
1078 | htw_start(); | |
1079 | } | |
1080 | } | |
1081 | } | |
1082 | ||
1880afd6 JH |
1083 | /** |
1084 | * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space. | |
1085 | * @vcpu: VCPU pointer. | |
1086 | * | |
1087 | * Call before a GVA space access outside of guest mode, to ensure that | |
1088 | * asynchronous TLB flush requests are handled or delayed until completion of | |
1089 | * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()). | |
1090 | * | |
1091 | * Should be called with IRQs already enabled. | |
1092 | */ | |
1093 | void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu) | |
1094 | { | |
1095 | /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */ | |
1096 | WARN_ON_ONCE(irqs_disabled()); | |
1097 | ||
1098 | /* | |
1099 | * The caller is about to access the GVA space, so we set the mode to | |
1100 | * force TLB flush requests to send an IPI, and also disable IRQs to | |
1101 | * delay IPI handling until kvm_trap_emul_gva_lockless_end(). | |
1102 | */ | |
1103 | local_irq_disable(); | |
1104 | ||
1105 | /* | |
1106 | * Make sure the read of VCPU requests is not reordered ahead of the | |
1107 | * write to vcpu->mode, or we could miss a TLB flush request while | |
1108 | * the requester sees the VCPU as outside of guest mode and not needing | |
1109 | * an IPI. | |
1110 | */ | |
1111 | smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES); | |
1112 | ||
1113 | /* | |
1114 | * If a TLB flush has been requested (potentially while | |
1115 | * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it | |
1116 | * before accessing the GVA space, and be sure to reload the ASID if | |
1117 | * necessary as it'll be immediately used. | |
1118 | * | |
1119 | * TLB flush requests after this check will trigger an IPI due to the | |
1120 | * mode change above, which will be delayed due to IRQs disabled. | |
1121 | */ | |
1122 | kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true); | |
1123 | } | |
1124 | ||
1125 | /** | |
1126 | * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space. | |
1127 | * @vcpu: VCPU pointer. | |
1128 | * | |
1129 | * Called after a GVA space access outside of guest mode. Should have a matching | |
1130 | * call to kvm_trap_emul_gva_lockless_begin(). | |
1131 | */ | |
1132 | void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu) | |
1133 | { | |
1134 | /* | |
1135 | * Make sure the write to vcpu->mode is not reordered in front of GVA | |
1136 | * accesses, or a TLB flush requester may not think it necessary to send | |
1137 | * an IPI. | |
1138 | */ | |
1139 | smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE); | |
1140 | ||
1141 | /* | |
1142 | * Now that the access to GVA space is complete, its safe for pending | |
1143 | * TLB flush request IPIs to be handled (which indicates completion). | |
1144 | */ | |
1145 | local_irq_enable(); | |
1146 | } | |
1147 | ||
a2c046e4 JH |
1148 | static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run, |
1149 | struct kvm_vcpu *vcpu) | |
1150 | { | |
b29e115a | 1151 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; |
a2c046e4 | 1152 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; |
b29e115a | 1153 | struct mm_struct *mm; |
a2c046e4 JH |
1154 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1155 | int i, cpu = smp_processor_id(); | |
1156 | unsigned int gasid; | |
1157 | ||
1158 | /* | |
b29e115a JH |
1159 | * No need to reload ASID, IRQs are disabled already so there's no rush, |
1160 | * and we'll check if we need to regenerate below anyway before | |
1161 | * re-entering the guest. | |
a2c046e4 | 1162 | */ |
b29e115a JH |
1163 | kvm_trap_emul_check_requests(vcpu, cpu, false); |
1164 | ||
1165 | if (KVM_GUEST_KERNEL_MODE(vcpu)) { | |
1166 | mm = kern_mm; | |
1167 | } else { | |
1168 | mm = user_mm; | |
1169 | ||
1170 | /* | |
1171 | * Lazy host ASID regeneration / PT flush for guest user mode. | |
1172 | * If the guest ASID has changed since the last guest usermode | |
1173 | * execution, invalidate the stale TLB entries and flush GVA PT | |
1174 | * entries too. | |
1175 | */ | |
a2c046e4 JH |
1176 | gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID; |
1177 | if (gasid != vcpu->arch.last_user_gasid) { | |
a31b50d7 | 1178 | kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER); |
a2c046e4 | 1179 | for_each_possible_cpu(i) |
b29e115a | 1180 | cpu_context(i, user_mm) = 0; |
a2c046e4 JH |
1181 | vcpu->arch.last_user_gasid = gasid; |
1182 | } | |
1183 | } | |
b29e115a JH |
1184 | |
1185 | /* | |
1186 | * Check if ASID is stale. This may happen due to a TLB flush request or | |
1187 | * a lazy user MM invalidation. | |
1188 | */ | |
1189 | if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & | |
1190 | asid_version_mask(cpu)) | |
1191 | get_new_mmu_context(mm, cpu); | |
a2c046e4 JH |
1192 | } |
1193 | ||
1194 | static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) | |
1195 | { | |
a7ebb2e4 | 1196 | int cpu = smp_processor_id(); |
a2c046e4 JH |
1197 | int r; |
1198 | ||
1199 | /* Check if we have any exceptions/interrupts pending */ | |
1200 | kvm_mips_deliver_interrupts(vcpu, | |
1201 | kvm_read_c0_guest_cause(vcpu->arch.cop0)); | |
1202 | ||
1203 | kvm_trap_emul_vcpu_reenter(run, vcpu); | |
1204 | ||
dacc3ed1 JH |
1205 | /* |
1206 | * We use user accessors to access guest memory, but we don't want to | |
1207 | * invoke Linux page faulting. | |
1208 | */ | |
1209 | pagefault_disable(); | |
1210 | ||
a2c046e4 JH |
1211 | /* Disable hardware page table walking while in guest */ |
1212 | htw_stop(); | |
1213 | ||
a7ebb2e4 JH |
1214 | /* |
1215 | * While in guest context we're in the guest's address space, not the | |
1216 | * host process address space, so we need to be careful not to confuse | |
1217 | * e.g. cache management IPIs. | |
1218 | */ | |
1219 | kvm_mips_suspend_mm(cpu); | |
1220 | ||
a2c046e4 JH |
1221 | r = vcpu->arch.vcpu_run(run, vcpu); |
1222 | ||
91cdee57 JH |
1223 | /* We may have migrated while handling guest exits */ |
1224 | cpu = smp_processor_id(); | |
1225 | ||
1226 | /* Restore normal Linux process memory map */ | |
1227 | if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & | |
1228 | asid_version_mask(cpu))) | |
1229 | get_new_mmu_context(current->mm, cpu); | |
1230 | write_c0_entryhi(cpu_asid(cpu, current->mm)); | |
7faa6eec | 1231 | TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd); |
a7ebb2e4 | 1232 | kvm_mips_resume_mm(cpu); |
91cdee57 | 1233 | |
a2c046e4 JH |
1234 | htw_start(); |
1235 | ||
dacc3ed1 JH |
1236 | pagefault_enable(); |
1237 | ||
a2c046e4 JH |
1238 | return r; |
1239 | } | |
1240 | ||
f5c236dd SL |
1241 | static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { |
1242 | /* exit handlers */ | |
1243 | .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, | |
1244 | .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod, | |
1245 | .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss, | |
1246 | .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss, | |
1247 | .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st, | |
1248 | .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld, | |
1249 | .handle_syscall = kvm_trap_emul_handle_syscall, | |
1250 | .handle_res_inst = kvm_trap_emul_handle_res_inst, | |
1251 | .handle_break = kvm_trap_emul_handle_break, | |
0a560427 | 1252 | .handle_trap = kvm_trap_emul_handle_trap, |
c2537ed9 | 1253 | .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe, |
1c0cd66a | 1254 | .handle_fpe = kvm_trap_emul_handle_fpe, |
98119ad5 | 1255 | .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled, |
f5c236dd | 1256 | |
607ef2fd | 1257 | .check_extension = kvm_trap_emul_check_extension, |
f5c236dd | 1258 | .vcpu_init = kvm_trap_emul_vcpu_init, |
630766b3 | 1259 | .vcpu_uninit = kvm_trap_emul_vcpu_uninit, |
f5c236dd | 1260 | .vcpu_setup = kvm_trap_emul_vcpu_setup, |
b6209110 JH |
1261 | .flush_shadow_all = kvm_trap_emul_flush_shadow_all, |
1262 | .flush_shadow_memslot = kvm_trap_emul_flush_shadow_memslot, | |
f5c236dd SL |
1263 | .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb, |
1264 | .queue_timer_int = kvm_mips_queue_timer_int_cb, | |
1265 | .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb, | |
1266 | .queue_io_int = kvm_mips_queue_io_int_cb, | |
1267 | .dequeue_io_int = kvm_mips_dequeue_io_int_cb, | |
1268 | .irq_deliver = kvm_mips_irq_deliver_cb, | |
1269 | .irq_clear = kvm_mips_irq_clear_cb, | |
f5c43bd4 JH |
1270 | .num_regs = kvm_trap_emul_num_regs, |
1271 | .copy_reg_indices = kvm_trap_emul_copy_reg_indices, | |
f8be02da JH |
1272 | .get_one_reg = kvm_trap_emul_get_one_reg, |
1273 | .set_one_reg = kvm_trap_emul_set_one_reg, | |
a60b8438 JH |
1274 | .vcpu_load = kvm_trap_emul_vcpu_load, |
1275 | .vcpu_put = kvm_trap_emul_vcpu_put, | |
a2c046e4 JH |
1276 | .vcpu_run = kvm_trap_emul_vcpu_run, |
1277 | .vcpu_reenter = kvm_trap_emul_vcpu_reenter, | |
f5c236dd SL |
1278 | }; |
1279 | ||
1280 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) | |
1281 | { | |
1282 | *install_callbacks = &kvm_trap_emul_callbacks; | |
1283 | return 0; | |
1284 | } |