]>
Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
f05ed4d5 PM |
2 | /* |
3 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. | |
4 | * | |
5 | * Authors: | |
6 | * Alexander Graf <agraf@suse.de> | |
7 | * Kevin Wolf <mail@kevin-wolf.de> | |
8 | * Paul Mackerras <paulus@samba.org> | |
9 | * | |
10 | * Description: | |
11 | * Functions relating to running KVM on Book 3S processors where | |
12 | * we don't have access to hypervisor mode, and we run the guest | |
13 | * in problem state (user mode). | |
14 | * | |
15 | * This file is derived from arch/powerpc/kvm/44x.c, | |
16 | * by Hollis Blanchard <hollisb@us.ibm.com>. | |
f05ed4d5 PM |
17 | */ |
18 | ||
19 | #include <linux/kvm_host.h> | |
93087948 | 20 | #include <linux/export.h> |
f05ed4d5 PM |
21 | #include <linux/err.h> |
22 | #include <linux/slab.h> | |
23 | ||
24 | #include <asm/reg.h> | |
25 | #include <asm/cputable.h> | |
26 | #include <asm/cacheflush.h> | |
7c0f6ba6 | 27 | #include <linux/uaccess.h> |
f05ed4d5 PM |
28 | #include <asm/io.h> |
29 | #include <asm/kvm_ppc.h> | |
30 | #include <asm/kvm_book3s.h> | |
31 | #include <asm/mmu_context.h> | |
95327d08 | 32 | #include <asm/switch_to.h> |
a413f474 | 33 | #include <asm/firmware.h> |
d3cbff1b | 34 | #include <asm/setup.h> |
f05ed4d5 PM |
35 | #include <linux/gfp.h> |
36 | #include <linux/sched.h> | |
37 | #include <linux/vmalloc.h> | |
38 | #include <linux/highmem.h> | |
2ba9f0d8 | 39 | #include <linux/module.h> |
398a76c6 | 40 | #include <linux/miscdevice.h> |
66c33e79 | 41 | #include <asm/asm-prototypes.h> |
8d2e2fc5 | 42 | #include <asm/tm.h> |
f05ed4d5 | 43 | |
3a167bea | 44 | #include "book3s.h" |
72c12535 AK |
45 | |
46 | #define CREATE_TRACE_POINTS | |
47 | #include "trace_pr.h" | |
f05ed4d5 PM |
48 | |
49 | /* #define EXIT_DEBUG */ | |
50 | /* #define DEBUG_EXT */ | |
51 | ||
52 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |
53 | ulong msr); | |
7284ca8a SG |
54 | #ifdef CONFIG_PPC_BOOK3S_64 |
55 | static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac); | |
56 | #endif | |
f05ed4d5 PM |
57 | |
58 | /* Some compatibility defines */ | |
59 | #ifdef CONFIG_PPC_BOOK3S_32 | |
60 | #define MSR_USER32 MSR_USER | |
61 | #define MSR_USER64 MSR_USER | |
62 | #define HW_PAGE_SIZE PAGE_SIZE | |
6c7d47c3 | 63 | #define HPTE_R_M _PAGE_COHERENT |
f05ed4d5 PM |
64 | #endif |
65 | ||
c01e3f66 AG |
66 | static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) |
67 | { | |
68 | ulong msr = kvmppc_get_msr(vcpu); | |
69 | return (msr & (MSR_IR|MSR_DR)) == MSR_DR; | |
70 | } | |
71 | ||
72 | static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu) | |
73 | { | |
74 | ulong msr = kvmppc_get_msr(vcpu); | |
75 | ulong pc = kvmppc_get_pc(vcpu); | |
76 | ||
77 | /* We are in DR only split real mode */ | |
78 | if ((msr & (MSR_IR|MSR_DR)) != MSR_DR) | |
79 | return; | |
80 | ||
81 | /* We have not fixed up the guest already */ | |
82 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) | |
83 | return; | |
84 | ||
85 | /* The code is in fixupable address space */ | |
86 | if (pc & SPLIT_HACK_MASK) | |
87 | return; | |
88 | ||
89 | vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK; | |
90 | kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS); | |
91 | } | |
92 | ||
93 | void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu); | |
94 | ||
3a167bea | 95 | static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) |
f05ed4d5 PM |
96 | { |
97 | #ifdef CONFIG_PPC_BOOK3S_64 | |
468a12c2 AG |
98 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
99 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); | |
468a12c2 | 100 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; |
40fdd8c8 | 101 | svcpu->in_use = 0; |
468a12c2 | 102 | svcpu_put(svcpu); |
f05ed4d5 | 103 | #endif |
fb4188ba AG |
104 | |
105 | /* Disable AIL if supported */ | |
106 | if (cpu_has_feature(CPU_FTR_HVMODE) && | |
107 | cpu_has_feature(CPU_FTR_ARCH_207S)) | |
108 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL); | |
109 | ||
a47d72f3 | 110 | vcpu->cpu = smp_processor_id(); |
f05ed4d5 | 111 | #ifdef CONFIG_PPC_BOOK3S_32 |
3ff95502 | 112 | current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; |
f05ed4d5 | 113 | #endif |
c01e3f66 AG |
114 | |
115 | if (kvmppc_is_split_real(vcpu)) | |
116 | kvmppc_fixup_split_real(vcpu); | |
8d2e2fc5 SG |
117 | |
118 | kvmppc_restore_tm_pr(vcpu); | |
f05ed4d5 PM |
119 | } |
120 | ||
3a167bea | 121 | static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) |
f05ed4d5 PM |
122 | { |
123 | #ifdef CONFIG_PPC_BOOK3S_64 | |
468a12c2 | 124 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
40fdd8c8 | 125 | if (svcpu->in_use) { |
07ae5389 | 126 | kvmppc_copy_from_svcpu(vcpu); |
40fdd8c8 | 127 | } |
468a12c2 | 128 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); |
468a12c2 AG |
129 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; |
130 | svcpu_put(svcpu); | |
f05ed4d5 PM |
131 | #endif |
132 | ||
c01e3f66 AG |
133 | if (kvmppc_is_split_real(vcpu)) |
134 | kvmppc_unfixup_split_real(vcpu); | |
135 | ||
28c483b6 | 136 | kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); |
e14e7a1e | 137 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); |
8d2e2fc5 | 138 | kvmppc_save_tm_pr(vcpu); |
fb4188ba AG |
139 | |
140 | /* Enable AIL if supported */ | |
141 | if (cpu_has_feature(CPU_FTR_HVMODE) && | |
142 | cpu_has_feature(CPU_FTR_ARCH_207S)) | |
143 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3); | |
144 | ||
a47d72f3 | 145 | vcpu->cpu = -1; |
f05ed4d5 PM |
146 | } |
147 | ||
a2d56020 | 148 | /* Copy data needed by real-mode code from vcpu to shadow vcpu */ |
07ae5389 | 149 | void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu) |
a2d56020 | 150 | { |
07ae5389 AG |
151 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
152 | ||
1143a706 SG |
153 | svcpu->gpr[0] = vcpu->arch.regs.gpr[0]; |
154 | svcpu->gpr[1] = vcpu->arch.regs.gpr[1]; | |
155 | svcpu->gpr[2] = vcpu->arch.regs.gpr[2]; | |
156 | svcpu->gpr[3] = vcpu->arch.regs.gpr[3]; | |
157 | svcpu->gpr[4] = vcpu->arch.regs.gpr[4]; | |
158 | svcpu->gpr[5] = vcpu->arch.regs.gpr[5]; | |
159 | svcpu->gpr[6] = vcpu->arch.regs.gpr[6]; | |
160 | svcpu->gpr[7] = vcpu->arch.regs.gpr[7]; | |
161 | svcpu->gpr[8] = vcpu->arch.regs.gpr[8]; | |
162 | svcpu->gpr[9] = vcpu->arch.regs.gpr[9]; | |
163 | svcpu->gpr[10] = vcpu->arch.regs.gpr[10]; | |
164 | svcpu->gpr[11] = vcpu->arch.regs.gpr[11]; | |
165 | svcpu->gpr[12] = vcpu->arch.regs.gpr[12]; | |
166 | svcpu->gpr[13] = vcpu->arch.regs.gpr[13]; | |
fd0944ba | 167 | svcpu->cr = vcpu->arch.regs.ccr; |
173c520a SG |
168 | svcpu->xer = vcpu->arch.regs.xer; |
169 | svcpu->ctr = vcpu->arch.regs.ctr; | |
170 | svcpu->lr = vcpu->arch.regs.link; | |
171 | svcpu->pc = vcpu->arch.regs.nip; | |
616dff86 AG |
172 | #ifdef CONFIG_PPC_BOOK3S_64 |
173 | svcpu->shadow_fscr = vcpu->arch.shadow_fscr; | |
174 | #endif | |
3cd60e31 AK |
175 | /* |
176 | * Now also save the current time base value. We use this | |
177 | * to find the guest purr and spurr value. | |
178 | */ | |
179 | vcpu->arch.entry_tb = get_tb(); | |
8f42ab27 | 180 | vcpu->arch.entry_vtb = get_vtb(); |
06da28e7 AK |
181 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
182 | vcpu->arch.entry_ic = mfspr(SPRN_IC); | |
40fdd8c8 | 183 | svcpu->in_use = true; |
07ae5389 AG |
184 | |
185 | svcpu_put(svcpu); | |
a2d56020 PM |
186 | } |
187 | ||
95757bfc SG |
188 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) |
189 | { | |
190 | ulong guest_msr = kvmppc_get_msr(vcpu); | |
191 | ulong smsr = guest_msr; | |
192 | ||
193 | /* Guest MSR values */ | |
194 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
195 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE | | |
196 | MSR_TM | MSR_TS_MASK; | |
197 | #else | |
198 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE; | |
199 | #endif | |
200 | /* Process MSR values */ | |
201 | smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; | |
202 | /* External providers the guest reserved */ | |
203 | smsr |= (guest_msr & vcpu->arch.guest_owned_ext); | |
204 | /* 64-bit Process MSR values */ | |
205 | #ifdef CONFIG_PPC_BOOK3S_64 | |
206 | smsr |= MSR_ISF | MSR_HV; | |
5706340a SG |
207 | #endif |
208 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
209 | /* | |
210 | * in guest privileged state, we want to fail all TM transactions. | |
211 | * So disable MSR TM bit so that all tbegin. will be able to be | |
212 | * trapped into host. | |
213 | */ | |
214 | if (!(guest_msr & MSR_PR)) | |
215 | smsr &= ~MSR_TM; | |
95757bfc SG |
216 | #endif |
217 | vcpu->arch.shadow_msr = smsr; | |
218 | } | |
219 | ||
a2d56020 | 220 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ |
07ae5389 | 221 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) |
a2d56020 | 222 | { |
07ae5389 | 223 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
95757bfc SG |
224 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
225 | ulong old_msr; | |
226 | #endif | |
40fdd8c8 AG |
227 | |
228 | /* | |
229 | * Maybe we were already preempted and synced the svcpu from | |
230 | * our preempt notifiers. Don't bother touching this svcpu then. | |
231 | */ | |
232 | if (!svcpu->in_use) | |
233 | goto out; | |
234 | ||
1143a706 SG |
235 | vcpu->arch.regs.gpr[0] = svcpu->gpr[0]; |
236 | vcpu->arch.regs.gpr[1] = svcpu->gpr[1]; | |
237 | vcpu->arch.regs.gpr[2] = svcpu->gpr[2]; | |
238 | vcpu->arch.regs.gpr[3] = svcpu->gpr[3]; | |
239 | vcpu->arch.regs.gpr[4] = svcpu->gpr[4]; | |
240 | vcpu->arch.regs.gpr[5] = svcpu->gpr[5]; | |
241 | vcpu->arch.regs.gpr[6] = svcpu->gpr[6]; | |
242 | vcpu->arch.regs.gpr[7] = svcpu->gpr[7]; | |
243 | vcpu->arch.regs.gpr[8] = svcpu->gpr[8]; | |
244 | vcpu->arch.regs.gpr[9] = svcpu->gpr[9]; | |
245 | vcpu->arch.regs.gpr[10] = svcpu->gpr[10]; | |
246 | vcpu->arch.regs.gpr[11] = svcpu->gpr[11]; | |
247 | vcpu->arch.regs.gpr[12] = svcpu->gpr[12]; | |
248 | vcpu->arch.regs.gpr[13] = svcpu->gpr[13]; | |
fd0944ba | 249 | vcpu->arch.regs.ccr = svcpu->cr; |
173c520a SG |
250 | vcpu->arch.regs.xer = svcpu->xer; |
251 | vcpu->arch.regs.ctr = svcpu->ctr; | |
252 | vcpu->arch.regs.link = svcpu->lr; | |
253 | vcpu->arch.regs.nip = svcpu->pc; | |
a2d56020 PM |
254 | vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; |
255 | vcpu->arch.fault_dar = svcpu->fault_dar; | |
256 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; | |
257 | vcpu->arch.last_inst = svcpu->last_inst; | |
616dff86 AG |
258 | #ifdef CONFIG_PPC_BOOK3S_64 |
259 | vcpu->arch.shadow_fscr = svcpu->shadow_fscr; | |
260 | #endif | |
3cd60e31 AK |
261 | /* |
262 | * Update purr and spurr using time base on exit. | |
263 | */ | |
264 | vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb; | |
265 | vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb; | |
88b02cf9 | 266 | to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb; |
06da28e7 AK |
267 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
268 | vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic; | |
95757bfc SG |
269 | |
270 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
271 | /* | |
272 | * Unlike other MSR bits, MSR[TS]bits can be changed at guest without | |
273 | * notifying host: | |
274 | * modified by unprivileged instructions like "tbegin"/"tend"/ | |
275 | * "tresume"/"tsuspend" in PR KVM guest. | |
276 | * | |
277 | * It is necessary to sync here to calculate a correct shadow_msr. | |
278 | * | |
279 | * privileged guest's tbegin will be failed at present. So we | |
280 | * only take care of problem state guest. | |
281 | */ | |
282 | old_msr = kvmppc_get_msr(vcpu); | |
283 | if (unlikely((old_msr & MSR_PR) && | |
284 | (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) != | |
285 | (old_msr & (MSR_TS_MASK)))) { | |
286 | old_msr &= ~(MSR_TS_MASK); | |
287 | old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)); | |
288 | kvmppc_set_msr_fast(vcpu, old_msr); | |
289 | kvmppc_recalc_shadow_msr(vcpu); | |
290 | } | |
291 | #endif | |
292 | ||
40fdd8c8 AG |
293 | svcpu->in_use = false; |
294 | ||
295 | out: | |
07ae5389 | 296 | svcpu_put(svcpu); |
a2d56020 PM |
297 | } |
298 | ||
66c33e79 | 299 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
e32c53d1 | 300 | void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) |
66c33e79 SG |
301 | { |
302 | tm_enable(); | |
303 | vcpu->arch.tfhar = mfspr(SPRN_TFHAR); | |
304 | vcpu->arch.texasr = mfspr(SPRN_TEXASR); | |
305 | vcpu->arch.tfiar = mfspr(SPRN_TFIAR); | |
306 | tm_disable(); | |
307 | } | |
308 | ||
5706340a | 309 | void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) |
66c33e79 SG |
310 | { |
311 | tm_enable(); | |
312 | mtspr(SPRN_TFHAR, vcpu->arch.tfhar); | |
313 | mtspr(SPRN_TEXASR, vcpu->arch.texasr); | |
314 | mtspr(SPRN_TFIAR, vcpu->arch.tfiar); | |
315 | tm_disable(); | |
316 | } | |
317 | ||
13989b65 SG |
318 | /* loadup math bits which is enabled at kvmppc_get_msr() but not enabled at |
319 | * hardware. | |
320 | */ | |
321 | static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu) | |
322 | { | |
323 | ulong exit_nr; | |
324 | ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) & | |
325 | (MSR_FP | MSR_VEC | MSR_VSX); | |
326 | ||
327 | if (!ext_diff) | |
328 | return; | |
329 | ||
330 | if (ext_diff == MSR_FP) | |
331 | exit_nr = BOOK3S_INTERRUPT_FP_UNAVAIL; | |
332 | else if (ext_diff == MSR_VEC) | |
333 | exit_nr = BOOK3S_INTERRUPT_ALTIVEC; | |
334 | else | |
335 | exit_nr = BOOK3S_INTERRUPT_VSX; | |
336 | ||
337 | kvmppc_handle_ext(vcpu, exit_nr, ext_diff); | |
338 | } | |
339 | ||
8d2e2fc5 SG |
340 | void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) |
341 | { | |
342 | if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) { | |
343 | kvmppc_save_tm_sprs(vcpu); | |
344 | return; | |
345 | } | |
346 | ||
7284ca8a | 347 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); |
13989b65 SG |
348 | kvmppc_giveup_ext(vcpu, MSR_VSX); |
349 | ||
8d2e2fc5 SG |
350 | preempt_disable(); |
351 | _kvmppc_save_tm_pr(vcpu, mfmsr()); | |
352 | preempt_enable(); | |
353 | } | |
354 | ||
355 | void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) | |
356 | { | |
357 | if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) { | |
358 | kvmppc_restore_tm_sprs(vcpu); | |
7284ca8a | 359 | if (kvmppc_get_msr(vcpu) & MSR_TM) { |
13989b65 | 360 | kvmppc_handle_lost_math_exts(vcpu); |
7284ca8a SG |
361 | if (vcpu->arch.fscr & FSCR_TAR) |
362 | kvmppc_handle_fac(vcpu, FSCR_TAR_LG); | |
363 | } | |
8d2e2fc5 SG |
364 | return; |
365 | } | |
366 | ||
367 | preempt_disable(); | |
368 | _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu)); | |
369 | preempt_enable(); | |
13989b65 | 370 | |
7284ca8a | 371 | if (kvmppc_get_msr(vcpu) & MSR_TM) { |
13989b65 | 372 | kvmppc_handle_lost_math_exts(vcpu); |
7284ca8a SG |
373 | if (vcpu->arch.fscr & FSCR_TAR) |
374 | kvmppc_handle_fac(vcpu, FSCR_TAR_LG); | |
375 | } | |
8d2e2fc5 | 376 | } |
66c33e79 SG |
377 | #endif |
378 | ||
3a167bea | 379 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) |
03d25c5b | 380 | { |
7c973a2e AG |
381 | int r = 1; /* Indicate we want to get back into the guest */ |
382 | ||
9b0cb3c8 AG |
383 | /* We misuse TLB_FLUSH to indicate that we want to clear |
384 | all shadow cache entries */ | |
385 | if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) | |
386 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | |
7c973a2e AG |
387 | |
388 | return r; | |
03d25c5b AG |
389 | } |
390 | ||
9b0cb3c8 | 391 | /************* MMU Notifiers *************/ |
491d6ecc PM |
392 | static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start, |
393 | unsigned long end) | |
394 | { | |
395 | long i; | |
396 | struct kvm_vcpu *vcpu; | |
397 | struct kvm_memslots *slots; | |
398 | struct kvm_memory_slot *memslot; | |
399 | ||
400 | slots = kvm_memslots(kvm); | |
401 | kvm_for_each_memslot(memslot, slots) { | |
402 | unsigned long hva_start, hva_end; | |
403 | gfn_t gfn, gfn_end; | |
404 | ||
405 | hva_start = max(start, memslot->userspace_addr); | |
406 | hva_end = min(end, memslot->userspace_addr + | |
407 | (memslot->npages << PAGE_SHIFT)); | |
408 | if (hva_start >= hva_end) | |
409 | continue; | |
410 | /* | |
411 | * {gfn(page) | page intersects with [hva_start, hva_end)} = | |
412 | * {gfn, gfn+1, ..., gfn_end-1}. | |
413 | */ | |
414 | gfn = hva_to_gfn_memslot(hva_start, memslot); | |
415 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); | |
416 | kvm_for_each_vcpu(i, vcpu, kvm) | |
417 | kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT, | |
418 | gfn_end << PAGE_SHIFT); | |
419 | } | |
420 | } | |
9b0cb3c8 | 421 | |
3a167bea AK |
422 | static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start, |
423 | unsigned long end) | |
9b0cb3c8 | 424 | { |
491d6ecc | 425 | do_kvm_unmap_hva(kvm, start, end); |
9b0cb3c8 AG |
426 | |
427 | return 0; | |
428 | } | |
429 | ||
57128468 ALC |
430 | static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start, |
431 | unsigned long end) | |
9b0cb3c8 AG |
432 | { |
433 | /* XXX could be more clever ;) */ | |
434 | return 0; | |
435 | } | |
436 | ||
3a167bea | 437 | static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva) |
9b0cb3c8 AG |
438 | { |
439 | /* XXX could be more clever ;) */ | |
440 | return 0; | |
441 | } | |
442 | ||
3a167bea | 443 | static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte) |
9b0cb3c8 AG |
444 | { |
445 | /* The page will get remapped properly on its next fault */ | |
491d6ecc | 446 | do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE); |
9b0cb3c8 AG |
447 | } |
448 | ||
449 | /*****************************************/ | |
450 | ||
3a167bea | 451 | static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) |
f05ed4d5 | 452 | { |
68ab07b9 | 453 | ulong old_msr; |
f05ed4d5 | 454 | |
4f169d21 PM |
455 | /* For PAPR guest, make sure MSR reflects guest mode */ |
456 | if (vcpu->arch.papr_enabled) | |
457 | msr = (msr & ~MSR_HV) | MSR_ME; | |
458 | ||
f05ed4d5 PM |
459 | #ifdef EXIT_DEBUG |
460 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); | |
461 | #endif | |
462 | ||
68ab07b9 SG |
463 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
464 | /* We should never target guest MSR to TS=10 && PR=0, | |
465 | * since we always fail transaction for guest privilege | |
466 | * state. | |
467 | */ | |
468 | if (!(msr & MSR_PR) && MSR_TM_TRANSACTIONAL(msr)) | |
469 | kvmppc_emulate_tabort(vcpu, | |
470 | TM_CAUSE_KVM_FAC_UNAV | TM_CAUSE_PERSISTENT); | |
471 | #endif | |
472 | ||
473 | old_msr = kvmppc_get_msr(vcpu); | |
f05ed4d5 | 474 | msr &= to_book3s(vcpu)->msr_mask; |
5deb8e7a | 475 | kvmppc_set_msr_fast(vcpu, msr); |
f05ed4d5 PM |
476 | kvmppc_recalc_shadow_msr(vcpu); |
477 | ||
478 | if (msr & MSR_POW) { | |
479 | if (!vcpu->arch.pending_exceptions) { | |
480 | kvm_vcpu_block(vcpu); | |
72875d8a | 481 | kvm_clear_request(KVM_REQ_UNHALT, vcpu); |
f05ed4d5 PM |
482 | vcpu->stat.halt_wakeup++; |
483 | ||
484 | /* Unset POW bit after we woke up */ | |
485 | msr &= ~MSR_POW; | |
5deb8e7a | 486 | kvmppc_set_msr_fast(vcpu, msr); |
f05ed4d5 PM |
487 | } |
488 | } | |
489 | ||
c01e3f66 AG |
490 | if (kvmppc_is_split_real(vcpu)) |
491 | kvmppc_fixup_split_real(vcpu); | |
492 | else | |
493 | kvmppc_unfixup_split_real(vcpu); | |
494 | ||
5deb8e7a | 495 | if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) != |
f05ed4d5 PM |
496 | (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { |
497 | kvmppc_mmu_flush_segments(vcpu); | |
498 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | |
499 | ||
500 | /* Preload magic page segment when in kernel mode */ | |
501 | if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) { | |
502 | struct kvm_vcpu_arch *a = &vcpu->arch; | |
503 | ||
504 | if (msr & MSR_DR) | |
505 | kvmppc_mmu_map_segment(vcpu, a->magic_page_ea); | |
506 | else | |
507 | kvmppc_mmu_map_segment(vcpu, a->magic_page_pa); | |
508 | } | |
509 | } | |
510 | ||
bbcc9c06 BH |
511 | /* |
512 | * When switching from 32 to 64-bit, we may have a stale 32-bit | |
513 | * magic page around, we need to flush it. Typically 32-bit magic | |
3cc97bea | 514 | * page will be instantiated when calling into RTAS. Note: We |
bbcc9c06 BH |
515 | * assume that such transition only happens while in kernel mode, |
516 | * ie, we never transition from user 32-bit to kernel 64-bit with | |
517 | * a 32-bit magic page around. | |
518 | */ | |
519 | if (vcpu->arch.magic_page_pa && | |
520 | !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) { | |
521 | /* going from RTAS to normal kernel code */ | |
522 | kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa, | |
523 | ~0xFFFUL); | |
524 | } | |
525 | ||
f05ed4d5 | 526 | /* Preload FPU if it's enabled */ |
5deb8e7a | 527 | if (kvmppc_get_msr(vcpu) & MSR_FP) |
f05ed4d5 | 528 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
13989b65 SG |
529 | |
530 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
531 | if (kvmppc_get_msr(vcpu) & MSR_TM) | |
532 | kvmppc_handle_lost_math_exts(vcpu); | |
533 | #endif | |
f05ed4d5 PM |
534 | } |
535 | ||
3a167bea | 536 | void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) |
f05ed4d5 PM |
537 | { |
538 | u32 host_pvr; | |
539 | ||
540 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; | |
541 | vcpu->arch.pvr = pvr; | |
542 | #ifdef CONFIG_PPC_BOOK3S_64 | |
543 | if ((pvr >= 0x330000) && (pvr < 0x70330000)) { | |
544 | kvmppc_mmu_book3s_64_init(vcpu); | |
1022fc3d AG |
545 | if (!to_book3s(vcpu)->hior_explicit) |
546 | to_book3s(vcpu)->hior = 0xfff00000; | |
f05ed4d5 | 547 | to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; |
af8f38b3 | 548 | vcpu->arch.cpu_type = KVM_CPU_3S_64; |
f05ed4d5 PM |
549 | } else |
550 | #endif | |
551 | { | |
552 | kvmppc_mmu_book3s_32_init(vcpu); | |
1022fc3d AG |
553 | if (!to_book3s(vcpu)->hior_explicit) |
554 | to_book3s(vcpu)->hior = 0; | |
f05ed4d5 | 555 | to_book3s(vcpu)->msr_mask = 0xffffffffULL; |
af8f38b3 | 556 | vcpu->arch.cpu_type = KVM_CPU_3S_32; |
f05ed4d5 PM |
557 | } |
558 | ||
af8f38b3 AG |
559 | kvmppc_sanity_check(vcpu); |
560 | ||
f05ed4d5 PM |
561 | /* If we are in hypervisor level on 970, we can tell the CPU to |
562 | * treat DCBZ as 32 bytes store */ | |
563 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32; | |
564 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && | |
565 | !strcmp(cur_cpu_spec->platform, "ppc970")) | |
566 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | |
567 | ||
568 | /* Cell performs badly if MSR_FEx are set. So let's hope nobody | |
569 | really needs them in a VM on Cell and force disable them. */ | |
570 | if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be")) | |
571 | to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); | |
572 | ||
a4a0f252 PM |
573 | /* |
574 | * If they're asking for POWER6 or later, set the flag | |
575 | * indicating that we can do multiple large page sizes | |
576 | * and 1TB segments. | |
577 | * Also set the flag that indicates that tlbie has the large | |
578 | * page bit in the RB operand instead of the instruction. | |
579 | */ | |
580 | switch (PVR_VER(pvr)) { | |
581 | case PVR_POWER6: | |
582 | case PVR_POWER7: | |
583 | case PVR_POWER7p: | |
584 | case PVR_POWER8: | |
2365f6b6 TH |
585 | case PVR_POWER8E: |
586 | case PVR_POWER8NVL: | |
6142236c | 587 | case PVR_POWER9: |
a4a0f252 PM |
588 | vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE | |
589 | BOOK3S_HFLAG_NEW_TLBIE; | |
590 | break; | |
591 | } | |
592 | ||
f05ed4d5 PM |
593 | #ifdef CONFIG_PPC_BOOK3S_32 |
594 | /* 32 bit Book3S always has 32 byte dcbz */ | |
595 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | |
596 | #endif | |
597 | ||
598 | /* On some CPUs we can execute paired single operations natively */ | |
599 | asm ( "mfpvr %0" : "=r"(host_pvr)); | |
600 | switch (host_pvr) { | |
601 | case 0x00080200: /* lonestar 2.0 */ | |
602 | case 0x00088202: /* lonestar 2.2 */ | |
603 | case 0x70000100: /* gekko 1.0 */ | |
604 | case 0x00080100: /* gekko 2.0 */ | |
605 | case 0x00083203: /* gekko 2.3a */ | |
606 | case 0x00083213: /* gekko 2.3b */ | |
607 | case 0x00083204: /* gekko 2.4 */ | |
608 | case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ | |
609 | case 0x00087200: /* broadway */ | |
610 | vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS; | |
611 | /* Enable HID2.PSE - in case we need it later */ | |
612 | mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29)); | |
613 | } | |
614 | } | |
615 | ||
616 | /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To | |
617 | * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to | |
618 | * emulate 32 bytes dcbz length. | |
619 | * | |
620 | * The Book3s_64 inventors also realized this case and implemented a special bit | |
621 | * in the HID5 register, which is a hypervisor ressource. Thus we can't use it. | |
622 | * | |
623 | * My approach here is to patch the dcbz instruction on executing pages. | |
624 | */ | |
625 | static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | |
626 | { | |
627 | struct page *hpage; | |
628 | u64 hpage_offset; | |
629 | u32 *page; | |
630 | int i; | |
631 | ||
632 | hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); | |
32cad84f | 633 | if (is_error_page(hpage)) |
f05ed4d5 | 634 | return; |
f05ed4d5 PM |
635 | |
636 | hpage_offset = pte->raddr & ~PAGE_MASK; | |
637 | hpage_offset &= ~0xFFFULL; | |
638 | hpage_offset /= 4; | |
639 | ||
640 | get_page(hpage); | |
2480b208 | 641 | page = kmap_atomic(hpage); |
f05ed4d5 PM |
642 | |
643 | /* patch dcbz into reserved instruction, so we trap */ | |
644 | for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) | |
cd087eef AG |
645 | if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ) |
646 | page[i] &= cpu_to_be32(0xfffffff7); | |
f05ed4d5 | 647 | |
2480b208 | 648 | kunmap_atomic(page); |
f05ed4d5 PM |
649 | put_page(hpage); |
650 | } | |
651 | ||
378b417d | 652 | static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) |
f05ed4d5 PM |
653 | { |
654 | ulong mp_pa = vcpu->arch.magic_page_pa; | |
655 | ||
5deb8e7a | 656 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) |
bbcc9c06 BH |
657 | mp_pa = (uint32_t)mp_pa; |
658 | ||
89b68c96 AG |
659 | gpa &= ~0xFFFULL; |
660 | if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) { | |
378b417d | 661 | return true; |
f05ed4d5 PM |
662 | } |
663 | ||
89b68c96 | 664 | return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT); |
f05ed4d5 PM |
665 | } |
666 | ||
667 | int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
668 | ulong eaddr, int vec) | |
669 | { | |
670 | bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); | |
93b159b4 | 671 | bool iswrite = false; |
f05ed4d5 PM |
672 | int r = RESUME_GUEST; |
673 | int relocated; | |
674 | int page_found = 0; | |
96df2267 | 675 | struct kvmppc_pte pte = { 0 }; |
5deb8e7a AG |
676 | bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false; |
677 | bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false; | |
f05ed4d5 PM |
678 | u64 vsid; |
679 | ||
680 | relocated = data ? dr : ir; | |
93b159b4 PM |
681 | if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE)) |
682 | iswrite = true; | |
f05ed4d5 PM |
683 | |
684 | /* Resolve real address if translation turned on */ | |
685 | if (relocated) { | |
93b159b4 | 686 | page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); |
f05ed4d5 PM |
687 | } else { |
688 | pte.may_execute = true; | |
689 | pte.may_read = true; | |
690 | pte.may_write = true; | |
691 | pte.raddr = eaddr & KVM_PAM; | |
692 | pte.eaddr = eaddr; | |
693 | pte.vpage = eaddr >> 12; | |
c9029c34 | 694 | pte.page_size = MMU_PAGE_64K; |
6c7d47c3 | 695 | pte.wimg = HPTE_R_M; |
f05ed4d5 PM |
696 | } |
697 | ||
5deb8e7a | 698 | switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { |
f05ed4d5 PM |
699 | case 0: |
700 | pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); | |
701 | break; | |
702 | case MSR_DR: | |
c01e3f66 AG |
703 | if (!data && |
704 | (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && | |
705 | ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)) | |
706 | pte.raddr &= ~SPLIT_HACK_MASK; | |
707 | /* fall through */ | |
f05ed4d5 PM |
708 | case MSR_IR: |
709 | vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); | |
710 | ||
5deb8e7a | 711 | if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR) |
f05ed4d5 PM |
712 | pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); |
713 | else | |
714 | pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); | |
715 | pte.vpage |= vsid; | |
716 | ||
717 | if (vsid == -1) | |
718 | page_found = -EINVAL; | |
719 | break; | |
720 | } | |
721 | ||
722 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
723 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | |
724 | /* | |
725 | * If we do the dcbz hack, we have to NX on every execution, | |
726 | * so we can patch the executing code. This renders our guest | |
727 | * NX-less. | |
728 | */ | |
729 | pte.may_execute = !data; | |
730 | } | |
731 | ||
916ccadc PM |
732 | if (page_found == -ENOENT || page_found == -EPERM) { |
733 | /* Page not found in guest PTE entries, or protection fault */ | |
734 | u64 flags; | |
735 | ||
736 | if (page_found == -EPERM) | |
737 | flags = DSISR_PROTFAULT; | |
738 | else | |
739 | flags = DSISR_NOHPTE; | |
740 | if (data) { | |
741 | flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE; | |
742 | kvmppc_core_queue_data_storage(vcpu, eaddr, flags); | |
743 | } else { | |
744 | kvmppc_core_queue_inst_storage(vcpu, flags); | |
745 | } | |
f05ed4d5 PM |
746 | } else if (page_found == -EINVAL) { |
747 | /* Page not found in guest SLB */ | |
5deb8e7a | 748 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); |
f05ed4d5 | 749 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); |
9eecec12 | 750 | } else if (kvmppc_visible_gpa(vcpu, pte.raddr)) { |
93b159b4 PM |
751 | if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { |
752 | /* | |
753 | * There is already a host HPTE there, presumably | |
754 | * a read-only one for a page the guest thinks | |
755 | * is writable, so get rid of it first. | |
756 | */ | |
757 | kvmppc_mmu_unmap_page(vcpu, &pte); | |
758 | } | |
f05ed4d5 | 759 | /* The guest's PTE is not mapped yet. Map on the host */ |
bd9166ff AK |
760 | if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) { |
761 | /* Exit KVM if mapping failed */ | |
762 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
763 | return RESUME_HOST; | |
764 | } | |
f05ed4d5 PM |
765 | if (data) |
766 | vcpu->stat.sp_storage++; | |
767 | else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
93b159b4 | 768 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) |
f05ed4d5 PM |
769 | kvmppc_patch_dcbz(vcpu, &pte); |
770 | } else { | |
771 | /* MMIO */ | |
772 | vcpu->stat.mmio_exits++; | |
773 | vcpu->arch.paddr_accessed = pte.raddr; | |
6020c0f6 | 774 | vcpu->arch.vaddr_accessed = pte.eaddr; |
f05ed4d5 PM |
775 | r = kvmppc_emulate_mmio(run, vcpu); |
776 | if ( r == RESUME_HOST_NV ) | |
777 | r = RESUME_HOST; | |
778 | } | |
779 | ||
780 | return r; | |
781 | } | |
782 | ||
f05ed4d5 PM |
783 | /* Give up external provider (FPU, Altivec, VSX) */ |
784 | void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) | |
785 | { | |
786 | struct thread_struct *t = ¤t->thread; | |
f05ed4d5 | 787 | |
28c483b6 PM |
788 | /* |
789 | * VSX instructions can access FP and vector registers, so if | |
790 | * we are giving up VSX, make sure we give up FP and VMX as well. | |
791 | */ | |
792 | if (msr & MSR_VSX) | |
793 | msr |= MSR_FP | MSR_VEC; | |
794 | ||
795 | msr &= vcpu->arch.guest_owned_ext; | |
796 | if (!msr) | |
f05ed4d5 PM |
797 | return; |
798 | ||
799 | #ifdef DEBUG_EXT | |
800 | printk(KERN_INFO "Giving up ext 0x%lx\n", msr); | |
801 | #endif | |
802 | ||
28c483b6 PM |
803 | if (msr & MSR_FP) { |
804 | /* | |
805 | * Note that on CPUs with VSX, giveup_fpu stores | |
806 | * both the traditional FP registers and the added VSX | |
de79f7b9 | 807 | * registers into thread.fp_state.fpr[]. |
28c483b6 | 808 | */ |
99dae3ba | 809 | if (t->regs->msr & MSR_FP) |
9d1ffdd8 | 810 | giveup_fpu(current); |
99dae3ba | 811 | t->fp_save_area = NULL; |
28c483b6 PM |
812 | } |
813 | ||
f05ed4d5 | 814 | #ifdef CONFIG_ALTIVEC |
28c483b6 | 815 | if (msr & MSR_VEC) { |
9d1ffdd8 PM |
816 | if (current->thread.regs->msr & MSR_VEC) |
817 | giveup_altivec(current); | |
99dae3ba | 818 | t->vr_save_area = NULL; |
f05ed4d5 | 819 | } |
28c483b6 | 820 | #endif |
f05ed4d5 | 821 | |
28c483b6 | 822 | vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX); |
f05ed4d5 PM |
823 | kvmppc_recalc_shadow_msr(vcpu); |
824 | } | |
825 | ||
616dff86 | 826 | /* Give up facility (TAR / EBB / DSCR) */ |
7284ca8a | 827 | void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac) |
616dff86 AG |
828 | { |
829 | #ifdef CONFIG_PPC_BOOK3S_64 | |
830 | if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) { | |
831 | /* Facility not available to the guest, ignore giveup request*/ | |
832 | return; | |
833 | } | |
e14e7a1e AG |
834 | |
835 | switch (fac) { | |
836 | case FSCR_TAR_LG: | |
837 | vcpu->arch.tar = mfspr(SPRN_TAR); | |
838 | mtspr(SPRN_TAR, current->thread.tar); | |
839 | vcpu->arch.shadow_fscr &= ~FSCR_TAR; | |
840 | break; | |
841 | } | |
616dff86 AG |
842 | #endif |
843 | } | |
844 | ||
f05ed4d5 PM |
845 | /* Handle external providers (FPU, Altivec, VSX) */ |
846 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |
847 | ulong msr) | |
848 | { | |
849 | struct thread_struct *t = ¤t->thread; | |
f05ed4d5 PM |
850 | |
851 | /* When we have paired singles, we emulate in software */ | |
852 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) | |
853 | return RESUME_GUEST; | |
854 | ||
5deb8e7a | 855 | if (!(kvmppc_get_msr(vcpu) & msr)) { |
f05ed4d5 PM |
856 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
857 | return RESUME_GUEST; | |
858 | } | |
859 | ||
28c483b6 PM |
860 | if (msr == MSR_VSX) { |
861 | /* No VSX? Give an illegal instruction interrupt */ | |
862 | #ifdef CONFIG_VSX | |
863 | if (!cpu_has_feature(CPU_FTR_VSX)) | |
864 | #endif | |
865 | { | |
866 | kvmppc_core_queue_program(vcpu, SRR1_PROGILL); | |
867 | return RESUME_GUEST; | |
868 | } | |
869 | ||
870 | /* | |
871 | * We have to load up all the FP and VMX registers before | |
872 | * we can let the guest use VSX instructions. | |
873 | */ | |
874 | msr = MSR_FP | MSR_VEC | MSR_VSX; | |
f05ed4d5 PM |
875 | } |
876 | ||
28c483b6 PM |
877 | /* See if we already own all the ext(s) needed */ |
878 | msr &= ~vcpu->arch.guest_owned_ext; | |
879 | if (!msr) | |
880 | return RESUME_GUEST; | |
881 | ||
f05ed4d5 PM |
882 | #ifdef DEBUG_EXT |
883 | printk(KERN_INFO "Loading up ext 0x%lx\n", msr); | |
884 | #endif | |
885 | ||
28c483b6 | 886 | if (msr & MSR_FP) { |
7562c4fd | 887 | preempt_disable(); |
09548fda | 888 | enable_kernel_fp(); |
99dae3ba | 889 | load_fp_state(&vcpu->arch.fp); |
dc4fbba1 | 890 | disable_kernel_fp(); |
99dae3ba | 891 | t->fp_save_area = &vcpu->arch.fp; |
7562c4fd | 892 | preempt_enable(); |
28c483b6 PM |
893 | } |
894 | ||
895 | if (msr & MSR_VEC) { | |
f05ed4d5 | 896 | #ifdef CONFIG_ALTIVEC |
7562c4fd | 897 | preempt_disable(); |
09548fda | 898 | enable_kernel_altivec(); |
99dae3ba | 899 | load_vr_state(&vcpu->arch.vr); |
dc4fbba1 | 900 | disable_kernel_altivec(); |
99dae3ba | 901 | t->vr_save_area = &vcpu->arch.vr; |
7562c4fd | 902 | preempt_enable(); |
f05ed4d5 | 903 | #endif |
f05ed4d5 PM |
904 | } |
905 | ||
99dae3ba | 906 | t->regs->msr |= msr; |
f05ed4d5 | 907 | vcpu->arch.guest_owned_ext |= msr; |
f05ed4d5 PM |
908 | kvmppc_recalc_shadow_msr(vcpu); |
909 | ||
910 | return RESUME_GUEST; | |
911 | } | |
912 | ||
9d1ffdd8 PM |
913 | /* |
914 | * Kernel code using FP or VMX could have flushed guest state to | |
915 | * the thread_struct; if so, get it back now. | |
916 | */ | |
917 | static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) | |
918 | { | |
919 | unsigned long lost_ext; | |
920 | ||
921 | lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr; | |
922 | if (!lost_ext) | |
923 | return; | |
924 | ||
09548fda | 925 | if (lost_ext & MSR_FP) { |
7562c4fd | 926 | preempt_disable(); |
09548fda | 927 | enable_kernel_fp(); |
99dae3ba | 928 | load_fp_state(&vcpu->arch.fp); |
dc4fbba1 | 929 | disable_kernel_fp(); |
7562c4fd | 930 | preempt_enable(); |
09548fda | 931 | } |
f2481771 | 932 | #ifdef CONFIG_ALTIVEC |
09548fda | 933 | if (lost_ext & MSR_VEC) { |
7562c4fd | 934 | preempt_disable(); |
09548fda | 935 | enable_kernel_altivec(); |
99dae3ba | 936 | load_vr_state(&vcpu->arch.vr); |
dc4fbba1 | 937 | disable_kernel_altivec(); |
7562c4fd | 938 | preempt_enable(); |
09548fda | 939 | } |
f2481771 | 940 | #endif |
9d1ffdd8 PM |
941 | current->thread.regs->msr |= lost_ext; |
942 | } | |
943 | ||
616dff86 AG |
944 | #ifdef CONFIG_PPC_BOOK3S_64 |
945 | ||
533082ae | 946 | void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac) |
616dff86 AG |
947 | { |
948 | /* Inject the Interrupt Cause field and trigger a guest interrupt */ | |
949 | vcpu->arch.fscr &= ~(0xffULL << 56); | |
950 | vcpu->arch.fscr |= (fac << 56); | |
951 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL); | |
952 | } | |
953 | ||
954 | static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac) | |
955 | { | |
956 | enum emulation_result er = EMULATE_FAIL; | |
957 | ||
958 | if (!(kvmppc_get_msr(vcpu) & MSR_PR)) | |
959 | er = kvmppc_emulate_instruction(vcpu->run, vcpu); | |
960 | ||
961 | if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) { | |
962 | /* Couldn't emulate, trigger interrupt in guest */ | |
963 | kvmppc_trigger_fac_interrupt(vcpu, fac); | |
964 | } | |
965 | } | |
966 | ||
967 | /* Enable facilities (TAR, EBB, DSCR) for the guest */ | |
968 | static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac) | |
969 | { | |
9916d57e | 970 | bool guest_fac_enabled; |
616dff86 AG |
971 | BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S)); |
972 | ||
9916d57e AG |
973 | /* |
974 | * Not every facility is enabled by FSCR bits, check whether the | |
975 | * guest has this facility enabled at all. | |
976 | */ | |
977 | switch (fac) { | |
978 | case FSCR_TAR_LG: | |
979 | case FSCR_EBB_LG: | |
980 | guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac)); | |
981 | break; | |
982 | case FSCR_TM_LG: | |
983 | guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM; | |
984 | break; | |
985 | default: | |
986 | guest_fac_enabled = false; | |
987 | break; | |
988 | } | |
989 | ||
990 | if (!guest_fac_enabled) { | |
616dff86 AG |
991 | /* Facility not enabled by the guest */ |
992 | kvmppc_trigger_fac_interrupt(vcpu, fac); | |
993 | return RESUME_GUEST; | |
994 | } | |
995 | ||
996 | switch (fac) { | |
e14e7a1e AG |
997 | case FSCR_TAR_LG: |
998 | /* TAR switching isn't lazy in Linux yet */ | |
999 | current->thread.tar = mfspr(SPRN_TAR); | |
1000 | mtspr(SPRN_TAR, vcpu->arch.tar); | |
1001 | vcpu->arch.shadow_fscr |= FSCR_TAR; | |
1002 | break; | |
616dff86 AG |
1003 | default: |
1004 | kvmppc_emulate_fac(vcpu, fac); | |
1005 | break; | |
1006 | } | |
1007 | ||
19c585eb SG |
1008 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1009 | /* Since we disabled MSR_TM at privilege state, the mfspr instruction | |
1010 | * for TM spr can trigger TM fac unavailable. In this case, the | |
1011 | * emulation is handled by kvmppc_emulate_fac(), which invokes | |
1012 | * kvmppc_emulate_mfspr() finally. But note the mfspr can include | |
1013 | * RT for NV registers. So it need to restore those NV reg to reflect | |
1014 | * the update. | |
1015 | */ | |
1016 | if ((fac == FSCR_TM_LG) && !(kvmppc_get_msr(vcpu) & MSR_PR)) | |
1017 | return RESUME_GUEST_NV; | |
1018 | #endif | |
1019 | ||
616dff86 AG |
1020 | return RESUME_GUEST; |
1021 | } | |
8e6afa36 AG |
1022 | |
1023 | void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr) | |
1024 | { | |
1025 | if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) { | |
1026 | /* TAR got dropped, drop it in shadow too */ | |
1027 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); | |
7284ca8a SG |
1028 | } else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) { |
1029 | vcpu->arch.fscr = fscr; | |
1030 | kvmppc_handle_fac(vcpu, FSCR_TAR_LG); | |
1031 | return; | |
8e6afa36 | 1032 | } |
7284ca8a | 1033 | |
8e6afa36 AG |
1034 | vcpu->arch.fscr = fscr; |
1035 | } | |
616dff86 AG |
1036 | #endif |
1037 | ||
11dd6ac0 LV |
1038 | static void kvmppc_setup_debug(struct kvm_vcpu *vcpu) |
1039 | { | |
1040 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { | |
1041 | u64 msr = kvmppc_get_msr(vcpu); | |
1042 | ||
1043 | kvmppc_set_msr(vcpu, msr | MSR_SE); | |
1044 | } | |
1045 | } | |
1046 | ||
1047 | static void kvmppc_clear_debug(struct kvm_vcpu *vcpu) | |
1048 | { | |
1049 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { | |
1050 | u64 msr = kvmppc_get_msr(vcpu); | |
1051 | ||
1052 | kvmppc_set_msr(vcpu, msr & ~MSR_SE); | |
1053 | } | |
1054 | } | |
1055 | ||
fcd4f3c6 TH |
1056 | static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu, |
1057 | unsigned int exit_nr) | |
1058 | { | |
1059 | enum emulation_result er; | |
1060 | ulong flags; | |
1061 | u32 last_inst; | |
1062 | int emul, r; | |
1063 | ||
1064 | /* | |
1065 | * shadow_srr1 only contains valid flags if we came here via a program | |
1066 | * exception. The other exceptions (emulation assist, FP unavailable, | |
1067 | * etc.) do not provide flags in SRR1, so use an illegal-instruction | |
1068 | * exception when injecting a program interrupt into the guest. | |
1069 | */ | |
1070 | if (exit_nr == BOOK3S_INTERRUPT_PROGRAM) | |
1071 | flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; | |
1072 | else | |
1073 | flags = SRR1_PROGILL; | |
1074 | ||
1075 | emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); | |
1076 | if (emul != EMULATE_DONE) | |
1077 | return RESUME_GUEST; | |
1078 | ||
1079 | if (kvmppc_get_msr(vcpu) & MSR_PR) { | |
1080 | #ifdef EXIT_DEBUG | |
1081 | pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n", | |
1082 | kvmppc_get_pc(vcpu), last_inst); | |
1083 | #endif | |
1084 | if ((last_inst & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) { | |
1085 | kvmppc_core_queue_program(vcpu, flags); | |
1086 | return RESUME_GUEST; | |
1087 | } | |
1088 | } | |
1089 | ||
1090 | vcpu->stat.emulated_inst_exits++; | |
1091 | er = kvmppc_emulate_instruction(run, vcpu); | |
1092 | switch (er) { | |
1093 | case EMULATE_DONE: | |
1094 | r = RESUME_GUEST_NV; | |
1095 | break; | |
1096 | case EMULATE_AGAIN: | |
1097 | r = RESUME_GUEST; | |
1098 | break; | |
1099 | case EMULATE_FAIL: | |
1100 | pr_crit("%s: emulation at %lx failed (%08x)\n", | |
1101 | __func__, kvmppc_get_pc(vcpu), last_inst); | |
1102 | kvmppc_core_queue_program(vcpu, flags); | |
1103 | r = RESUME_GUEST; | |
1104 | break; | |
1105 | case EMULATE_DO_MMIO: | |
1106 | run->exit_reason = KVM_EXIT_MMIO; | |
1107 | r = RESUME_HOST_NV; | |
1108 | break; | |
1109 | case EMULATE_EXIT_USER: | |
1110 | r = RESUME_HOST_NV; | |
1111 | break; | |
1112 | default: | |
1113 | BUG(); | |
1114 | } | |
1115 | ||
1116 | return r; | |
1117 | } | |
1118 | ||
3a167bea AK |
1119 | int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, |
1120 | unsigned int exit_nr) | |
f05ed4d5 PM |
1121 | { |
1122 | int r = RESUME_HOST; | |
7ee78855 | 1123 | int s; |
f05ed4d5 PM |
1124 | |
1125 | vcpu->stat.sum_exits++; | |
1126 | ||
1127 | run->exit_reason = KVM_EXIT_UNKNOWN; | |
1128 | run->ready_for_interrupt_injection = 1; | |
1129 | ||
bd2be683 | 1130 | /* We get here with MSR.EE=1 */ |
3b1d9d7d | 1131 | |
97c95059 | 1132 | trace_kvm_exit(exit_nr, vcpu); |
6edaa530 | 1133 | guest_exit(); |
c63ddcb4 | 1134 | |
f05ed4d5 PM |
1135 | switch (exit_nr) { |
1136 | case BOOK3S_INTERRUPT_INST_STORAGE: | |
468a12c2 | 1137 | { |
a2d56020 | 1138 | ulong shadow_srr1 = vcpu->arch.shadow_srr1; |
f05ed4d5 PM |
1139 | vcpu->stat.pf_instruc++; |
1140 | ||
c01e3f66 AG |
1141 | if (kvmppc_is_split_real(vcpu)) |
1142 | kvmppc_fixup_split_real(vcpu); | |
1143 | ||
f05ed4d5 PM |
1144 | #ifdef CONFIG_PPC_BOOK3S_32 |
1145 | /* We set segments as unused segments when invalidating them. So | |
1146 | * treat the respective fault as segment fault. */ | |
a2d56020 PM |
1147 | { |
1148 | struct kvmppc_book3s_shadow_vcpu *svcpu; | |
1149 | u32 sr; | |
1150 | ||
1151 | svcpu = svcpu_get(vcpu); | |
1152 | sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]; | |
468a12c2 | 1153 | svcpu_put(svcpu); |
a2d56020 PM |
1154 | if (sr == SR_INVALID) { |
1155 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | |
1156 | r = RESUME_GUEST; | |
1157 | break; | |
1158 | } | |
f05ed4d5 PM |
1159 | } |
1160 | #endif | |
1161 | ||
1162 | /* only care about PTEG not found errors, but leave NX alone */ | |
468a12c2 | 1163 | if (shadow_srr1 & 0x40000000) { |
93b159b4 | 1164 | int idx = srcu_read_lock(&vcpu->kvm->srcu); |
f05ed4d5 | 1165 | r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); |
93b159b4 | 1166 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
f05ed4d5 PM |
1167 | vcpu->stat.sp_instruc++; |
1168 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
1169 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | |
1170 | /* | |
1171 | * XXX If we do the dcbz hack we use the NX bit to flush&patch the page, | |
1172 | * so we can't use the NX bit inside the guest. Let's cross our fingers, | |
1173 | * that no guest that needs the dcbz hack does NX. | |
1174 | */ | |
1175 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); | |
1176 | r = RESUME_GUEST; | |
1177 | } else { | |
916ccadc PM |
1178 | kvmppc_core_queue_inst_storage(vcpu, |
1179 | shadow_srr1 & 0x58000000); | |
f05ed4d5 PM |
1180 | r = RESUME_GUEST; |
1181 | } | |
1182 | break; | |
468a12c2 | 1183 | } |
f05ed4d5 PM |
1184 | case BOOK3S_INTERRUPT_DATA_STORAGE: |
1185 | { | |
1186 | ulong dar = kvmppc_get_fault_dar(vcpu); | |
a2d56020 | 1187 | u32 fault_dsisr = vcpu->arch.fault_dsisr; |
f05ed4d5 PM |
1188 | vcpu->stat.pf_storage++; |
1189 | ||
1190 | #ifdef CONFIG_PPC_BOOK3S_32 | |
1191 | /* We set segments as unused segments when invalidating them. So | |
1192 | * treat the respective fault as segment fault. */ | |
a2d56020 PM |
1193 | { |
1194 | struct kvmppc_book3s_shadow_vcpu *svcpu; | |
1195 | u32 sr; | |
1196 | ||
1197 | svcpu = svcpu_get(vcpu); | |
1198 | sr = svcpu->sr[dar >> SID_SHIFT]; | |
468a12c2 | 1199 | svcpu_put(svcpu); |
a2d56020 PM |
1200 | if (sr == SR_INVALID) { |
1201 | kvmppc_mmu_map_segment(vcpu, dar); | |
1202 | r = RESUME_GUEST; | |
1203 | break; | |
1204 | } | |
f05ed4d5 PM |
1205 | } |
1206 | #endif | |
1207 | ||
93b159b4 PM |
1208 | /* |
1209 | * We need to handle missing shadow PTEs, and | |
1210 | * protection faults due to us mapping a page read-only | |
1211 | * when the guest thinks it is writable. | |
1212 | */ | |
1213 | if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) { | |
1214 | int idx = srcu_read_lock(&vcpu->kvm->srcu); | |
f05ed4d5 | 1215 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); |
93b159b4 | 1216 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
f05ed4d5 | 1217 | } else { |
916ccadc | 1218 | kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr); |
f05ed4d5 PM |
1219 | r = RESUME_GUEST; |
1220 | } | |
1221 | break; | |
1222 | } | |
1223 | case BOOK3S_INTERRUPT_DATA_SEGMENT: | |
1224 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { | |
5deb8e7a | 1225 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); |
f05ed4d5 PM |
1226 | kvmppc_book3s_queue_irqprio(vcpu, |
1227 | BOOK3S_INTERRUPT_DATA_SEGMENT); | |
1228 | } | |
1229 | r = RESUME_GUEST; | |
1230 | break; | |
1231 | case BOOK3S_INTERRUPT_INST_SEGMENT: | |
1232 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) { | |
1233 | kvmppc_book3s_queue_irqprio(vcpu, | |
1234 | BOOK3S_INTERRUPT_INST_SEGMENT); | |
1235 | } | |
1236 | r = RESUME_GUEST; | |
1237 | break; | |
1238 | /* We're good on these - the host merely wanted to get our attention */ | |
1239 | case BOOK3S_INTERRUPT_DECREMENTER: | |
4f225ae0 | 1240 | case BOOK3S_INTERRUPT_HV_DECREMENTER: |
40688909 | 1241 | case BOOK3S_INTERRUPT_DOORBELL: |
568fccc4 | 1242 | case BOOK3S_INTERRUPT_H_DOORBELL: |
f05ed4d5 PM |
1243 | vcpu->stat.dec_exits++; |
1244 | r = RESUME_GUEST; | |
1245 | break; | |
1246 | case BOOK3S_INTERRUPT_EXTERNAL: | |
4f225ae0 | 1247 | case BOOK3S_INTERRUPT_EXTERNAL_HV: |
b71dc519 | 1248 | case BOOK3S_INTERRUPT_H_VIRT: |
f05ed4d5 PM |
1249 | vcpu->stat.ext_intr_exits++; |
1250 | r = RESUME_GUEST; | |
1251 | break; | |
b71dc519 | 1252 | case BOOK3S_INTERRUPT_HMI: |
f05ed4d5 | 1253 | case BOOK3S_INTERRUPT_PERFMON: |
b71dc519 | 1254 | case BOOK3S_INTERRUPT_SYSTEM_RESET: |
f05ed4d5 PM |
1255 | r = RESUME_GUEST; |
1256 | break; | |
1257 | case BOOK3S_INTERRUPT_PROGRAM: | |
4f225ae0 | 1258 | case BOOK3S_INTERRUPT_H_EMUL_ASSIST: |
fcd4f3c6 | 1259 | r = kvmppc_exit_pr_progint(run, vcpu, exit_nr); |
f05ed4d5 | 1260 | break; |
f05ed4d5 | 1261 | case BOOK3S_INTERRUPT_SYSCALL: |
51f04726 MC |
1262 | { |
1263 | u32 last_sc; | |
1264 | int emul; | |
1265 | ||
1266 | /* Get last sc for papr */ | |
1267 | if (vcpu->arch.papr_enabled) { | |
1268 | /* The sc instuction points SRR0 to the next inst */ | |
1269 | emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc); | |
1270 | if (emul != EMULATE_DONE) { | |
1271 | kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4); | |
1272 | r = RESUME_GUEST; | |
1273 | break; | |
1274 | } | |
1275 | } | |
1276 | ||
a668f2bd | 1277 | if (vcpu->arch.papr_enabled && |
51f04726 | 1278 | (last_sc == 0x44000022) && |
5deb8e7a | 1279 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
a668f2bd AG |
1280 | /* SC 1 papr hypercalls */ |
1281 | ulong cmd = kvmppc_get_gpr(vcpu, 3); | |
1282 | int i; | |
1283 | ||
2ba9f0d8 | 1284 | #ifdef CONFIG_PPC_BOOK3S_64 |
a668f2bd AG |
1285 | if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { |
1286 | r = RESUME_GUEST; | |
1287 | break; | |
1288 | } | |
96f38d72 | 1289 | #endif |
a668f2bd AG |
1290 | |
1291 | run->papr_hcall.nr = cmd; | |
1292 | for (i = 0; i < 9; ++i) { | |
1293 | ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); | |
1294 | run->papr_hcall.args[i] = gpr; | |
1295 | } | |
1296 | run->exit_reason = KVM_EXIT_PAPR_HCALL; | |
1297 | vcpu->arch.hcall_needed = 1; | |
1298 | r = RESUME_HOST; | |
1299 | } else if (vcpu->arch.osi_enabled && | |
f05ed4d5 PM |
1300 | (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && |
1301 | (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { | |
1302 | /* MOL hypercalls */ | |
1303 | u64 *gprs = run->osi.gprs; | |
1304 | int i; | |
1305 | ||
1306 | run->exit_reason = KVM_EXIT_OSI; | |
1307 | for (i = 0; i < 32; i++) | |
1308 | gprs[i] = kvmppc_get_gpr(vcpu, i); | |
1309 | vcpu->arch.osi_needed = 1; | |
1310 | r = RESUME_HOST_NV; | |
5deb8e7a | 1311 | } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && |
f05ed4d5 PM |
1312 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { |
1313 | /* KVM PV hypercalls */ | |
1314 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | |
1315 | r = RESUME_GUEST; | |
1316 | } else { | |
1317 | /* Guest syscalls */ | |
1318 | vcpu->stat.syscall_exits++; | |
1319 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
1320 | r = RESUME_GUEST; | |
1321 | } | |
1322 | break; | |
51f04726 | 1323 | } |
f05ed4d5 PM |
1324 | case BOOK3S_INTERRUPT_FP_UNAVAIL: |
1325 | case BOOK3S_INTERRUPT_ALTIVEC: | |
1326 | case BOOK3S_INTERRUPT_VSX: | |
1327 | { | |
1328 | int ext_msr = 0; | |
9a26af64 | 1329 | int emul; |
9a26af64 MC |
1330 | u32 last_inst; |
1331 | ||
1332 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) { | |
1333 | /* Do paired single instruction emulation */ | |
51f04726 MC |
1334 | emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, |
1335 | &last_inst); | |
9a26af64 | 1336 | if (emul == EMULATE_DONE) |
fcd4f3c6 | 1337 | r = kvmppc_exit_pr_progint(run, vcpu, exit_nr); |
9a26af64 MC |
1338 | else |
1339 | r = RESUME_GUEST; | |
f05ed4d5 | 1340 | |
9a26af64 | 1341 | break; |
f05ed4d5 PM |
1342 | } |
1343 | ||
9a26af64 MC |
1344 | /* Enable external provider */ |
1345 | switch (exit_nr) { | |
1346 | case BOOK3S_INTERRUPT_FP_UNAVAIL: | |
1347 | ext_msr = MSR_FP; | |
f05ed4d5 | 1348 | break; |
9a26af64 MC |
1349 | |
1350 | case BOOK3S_INTERRUPT_ALTIVEC: | |
1351 | ext_msr = MSR_VEC; | |
f05ed4d5 | 1352 | break; |
9a26af64 MC |
1353 | |
1354 | case BOOK3S_INTERRUPT_VSX: | |
1355 | ext_msr = MSR_VSX; | |
f05ed4d5 PM |
1356 | break; |
1357 | } | |
9a26af64 MC |
1358 | |
1359 | r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr); | |
f05ed4d5 PM |
1360 | break; |
1361 | } | |
1362 | case BOOK3S_INTERRUPT_ALIGNMENT: | |
9a26af64 | 1363 | { |
51f04726 MC |
1364 | u32 last_inst; |
1365 | int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); | |
9a26af64 MC |
1366 | |
1367 | if (emul == EMULATE_DONE) { | |
5deb8e7a AG |
1368 | u32 dsisr; |
1369 | u64 dar; | |
1370 | ||
1371 | dsisr = kvmppc_alignment_dsisr(vcpu, last_inst); | |
1372 | dar = kvmppc_alignment_dar(vcpu, last_inst); | |
1373 | ||
1374 | kvmppc_set_dsisr(vcpu, dsisr); | |
1375 | kvmppc_set_dar(vcpu, dar); | |
1376 | ||
f05ed4d5 PM |
1377 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
1378 | } | |
1379 | r = RESUME_GUEST; | |
1380 | break; | |
9a26af64 | 1381 | } |
616dff86 AG |
1382 | #ifdef CONFIG_PPC_BOOK3S_64 |
1383 | case BOOK3S_INTERRUPT_FAC_UNAVAIL: | |
19c585eb | 1384 | r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); |
616dff86 AG |
1385 | break; |
1386 | #endif | |
f05ed4d5 | 1387 | case BOOK3S_INTERRUPT_MACHINE_CHECK: |
f05ed4d5 PM |
1388 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
1389 | r = RESUME_GUEST; | |
1390 | break; | |
11dd6ac0 LV |
1391 | case BOOK3S_INTERRUPT_TRACE: |
1392 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { | |
1393 | run->exit_reason = KVM_EXIT_DEBUG; | |
1394 | r = RESUME_HOST; | |
1395 | } else { | |
1396 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
1397 | r = RESUME_GUEST; | |
1398 | } | |
1399 | break; | |
f05ed4d5 | 1400 | default: |
468a12c2 | 1401 | { |
a2d56020 | 1402 | ulong shadow_srr1 = vcpu->arch.shadow_srr1; |
f05ed4d5 PM |
1403 | /* Ugh - bork here! What did we get? */ |
1404 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", | |
468a12c2 | 1405 | exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); |
f05ed4d5 PM |
1406 | r = RESUME_HOST; |
1407 | BUG(); | |
1408 | break; | |
1409 | } | |
468a12c2 | 1410 | } |
f05ed4d5 PM |
1411 | |
1412 | if (!(r & RESUME_HOST)) { | |
1413 | /* To avoid clobbering exit_reason, only check for signals if | |
1414 | * we aren't already exiting to userspace for some other | |
1415 | * reason. */ | |
e371f713 AG |
1416 | |
1417 | /* | |
1418 | * Interrupts could be timers for the guest which we have to | |
1419 | * inject again, so let's postpone them until we're in the guest | |
1420 | * and if we really did time things so badly, then we just exit | |
1421 | * again due to a host external interrupt. | |
1422 | */ | |
7ee78855 | 1423 | s = kvmppc_prepare_to_enter(vcpu); |
6c85f52b | 1424 | if (s <= 0) |
7ee78855 | 1425 | r = s; |
6c85f52b SW |
1426 | else { |
1427 | /* interrupts now hard-disabled */ | |
5f1c248f | 1428 | kvmppc_fix_ee_before_entry(); |
f05ed4d5 | 1429 | } |
6c85f52b | 1430 | |
9d1ffdd8 | 1431 | kvmppc_handle_lost_ext(vcpu); |
f05ed4d5 PM |
1432 | } |
1433 | ||
1434 | trace_kvm_book3s_reenter(r, vcpu); | |
1435 | ||
1436 | return r; | |
1437 | } | |
1438 | ||
3a167bea AK |
1439 | static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu, |
1440 | struct kvm_sregs *sregs) | |
f05ed4d5 PM |
1441 | { |
1442 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | |
1443 | int i; | |
1444 | ||
1445 | sregs->pvr = vcpu->arch.pvr; | |
1446 | ||
1447 | sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; | |
1448 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | |
1449 | for (i = 0; i < 64; i++) { | |
1450 | sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i; | |
1451 | sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; | |
1452 | } | |
1453 | } else { | |
1454 | for (i = 0; i < 16; i++) | |
5deb8e7a | 1455 | sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i); |
f05ed4d5 PM |
1456 | |
1457 | for (i = 0; i < 8; i++) { | |
1458 | sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; | |
1459 | sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; | |
1460 | } | |
1461 | } | |
1462 | ||
1463 | return 0; | |
1464 | } | |
1465 | ||
3a167bea AK |
1466 | static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu, |
1467 | struct kvm_sregs *sregs) | |
f05ed4d5 PM |
1468 | { |
1469 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | |
1470 | int i; | |
1471 | ||
3a167bea | 1472 | kvmppc_set_pvr_pr(vcpu, sregs->pvr); |
f05ed4d5 PM |
1473 | |
1474 | vcpu3s->sdr1 = sregs->u.s.sdr1; | |
f4093ee9 | 1475 | #ifdef CONFIG_PPC_BOOK3S_64 |
f05ed4d5 | 1476 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { |
f4093ee9 GK |
1477 | /* Flush all SLB entries */ |
1478 | vcpu->arch.mmu.slbmte(vcpu, 0, 0); | |
1479 | vcpu->arch.mmu.slbia(vcpu); | |
1480 | ||
f05ed4d5 | 1481 | for (i = 0; i < 64; i++) { |
f4093ee9 GK |
1482 | u64 rb = sregs->u.s.ppc64.slb[i].slbe; |
1483 | u64 rs = sregs->u.s.ppc64.slb[i].slbv; | |
1484 | ||
1485 | if (rb & SLB_ESID_V) | |
1486 | vcpu->arch.mmu.slbmte(vcpu, rs, rb); | |
f05ed4d5 | 1487 | } |
f4093ee9 GK |
1488 | } else |
1489 | #endif | |
1490 | { | |
f05ed4d5 PM |
1491 | for (i = 0; i < 16; i++) { |
1492 | vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); | |
1493 | } | |
1494 | for (i = 0; i < 8; i++) { | |
1495 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, | |
1496 | (u32)sregs->u.s.ppc32.ibat[i]); | |
1497 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, | |
1498 | (u32)(sregs->u.s.ppc32.ibat[i] >> 32)); | |
1499 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, | |
1500 | (u32)sregs->u.s.ppc32.dbat[i]); | |
1501 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, | |
1502 | (u32)(sregs->u.s.ppc32.dbat[i] >> 32)); | |
1503 | } | |
1504 | } | |
1505 | ||
1506 | /* Flush the MMU after messing with the segments */ | |
1507 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | |
1508 | ||
1509 | return 0; | |
1510 | } | |
1511 | ||
3a167bea AK |
1512 | static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, |
1513 | union kvmppc_one_reg *val) | |
31f3438e | 1514 | { |
a136a8bd | 1515 | int r = 0; |
31f3438e | 1516 | |
a136a8bd | 1517 | switch (id) { |
a59c1d9e MS |
1518 | case KVM_REG_PPC_DEBUG_INST: |
1519 | *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT); | |
1520 | break; | |
31f3438e | 1521 | case KVM_REG_PPC_HIOR: |
a136a8bd | 1522 | *val = get_reg_val(id, to_book3s(vcpu)->hior); |
31f3438e | 1523 | break; |
88b02cf9 PM |
1524 | case KVM_REG_PPC_VTB: |
1525 | *val = get_reg_val(id, to_book3s(vcpu)->vtb); | |
1526 | break; | |
e5ee5422 | 1527 | case KVM_REG_PPC_LPCR: |
a0840240 | 1528 | case KVM_REG_PPC_LPCR_64: |
e5ee5422 AK |
1529 | /* |
1530 | * We are only interested in the LPCR_ILE bit | |
1531 | */ | |
1532 | if (vcpu->arch.intr_msr & MSR_LE) | |
1533 | *val = get_reg_val(id, LPCR_ILE); | |
1534 | else | |
1535 | *val = get_reg_val(id, 0); | |
1536 | break; | |
deeb879d SG |
1537 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1538 | case KVM_REG_PPC_TFHAR: | |
1539 | *val = get_reg_val(id, vcpu->arch.tfhar); | |
1540 | break; | |
1541 | case KVM_REG_PPC_TFIAR: | |
1542 | *val = get_reg_val(id, vcpu->arch.tfiar); | |
1543 | break; | |
1544 | case KVM_REG_PPC_TEXASR: | |
1545 | *val = get_reg_val(id, vcpu->arch.texasr); | |
1546 | break; | |
1547 | case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: | |
1548 | *val = get_reg_val(id, | |
1549 | vcpu->arch.gpr_tm[id-KVM_REG_PPC_TM_GPR0]); | |
1550 | break; | |
1551 | case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: | |
1552 | { | |
1553 | int i, j; | |
1554 | ||
1555 | i = id - KVM_REG_PPC_TM_VSR0; | |
1556 | if (i < 32) | |
1557 | for (j = 0; j < TS_FPRWIDTH; j++) | |
1558 | val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; | |
1559 | else { | |
1560 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | |
1561 | val->vval = vcpu->arch.vr_tm.vr[i-32]; | |
1562 | else | |
1563 | r = -ENXIO; | |
1564 | } | |
1565 | break; | |
1566 | } | |
1567 | case KVM_REG_PPC_TM_CR: | |
1568 | *val = get_reg_val(id, vcpu->arch.cr_tm); | |
1569 | break; | |
1570 | case KVM_REG_PPC_TM_XER: | |
1571 | *val = get_reg_val(id, vcpu->arch.xer_tm); | |
1572 | break; | |
1573 | case KVM_REG_PPC_TM_LR: | |
1574 | *val = get_reg_val(id, vcpu->arch.lr_tm); | |
1575 | break; | |
1576 | case KVM_REG_PPC_TM_CTR: | |
1577 | *val = get_reg_val(id, vcpu->arch.ctr_tm); | |
1578 | break; | |
1579 | case KVM_REG_PPC_TM_FPSCR: | |
1580 | *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); | |
1581 | break; | |
1582 | case KVM_REG_PPC_TM_AMR: | |
1583 | *val = get_reg_val(id, vcpu->arch.amr_tm); | |
1584 | break; | |
1585 | case KVM_REG_PPC_TM_PPR: | |
1586 | *val = get_reg_val(id, vcpu->arch.ppr_tm); | |
1587 | break; | |
1588 | case KVM_REG_PPC_TM_VRSAVE: | |
1589 | *val = get_reg_val(id, vcpu->arch.vrsave_tm); | |
1590 | break; | |
1591 | case KVM_REG_PPC_TM_VSCR: | |
1592 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | |
1593 | *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); | |
1594 | else | |
1595 | r = -ENXIO; | |
1596 | break; | |
1597 | case KVM_REG_PPC_TM_DSCR: | |
1598 | *val = get_reg_val(id, vcpu->arch.dscr_tm); | |
1599 | break; | |
1600 | case KVM_REG_PPC_TM_TAR: | |
1601 | *val = get_reg_val(id, vcpu->arch.tar_tm); | |
1602 | break; | |
1603 | #endif | |
31f3438e | 1604 | default: |
a136a8bd | 1605 | r = -EINVAL; |
31f3438e PM |
1606 | break; |
1607 | } | |
1608 | ||
1609 | return r; | |
1610 | } | |
1611 | ||
e5ee5422 AK |
1612 | static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr) |
1613 | { | |
1614 | if (new_lpcr & LPCR_ILE) | |
1615 | vcpu->arch.intr_msr |= MSR_LE; | |
1616 | else | |
1617 | vcpu->arch.intr_msr &= ~MSR_LE; | |
1618 | } | |
1619 | ||
3a167bea AK |
1620 | static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, |
1621 | union kvmppc_one_reg *val) | |
31f3438e | 1622 | { |
a136a8bd | 1623 | int r = 0; |
31f3438e | 1624 | |
a136a8bd | 1625 | switch (id) { |
31f3438e | 1626 | case KVM_REG_PPC_HIOR: |
a136a8bd PM |
1627 | to_book3s(vcpu)->hior = set_reg_val(id, *val); |
1628 | to_book3s(vcpu)->hior_explicit = true; | |
31f3438e | 1629 | break; |
88b02cf9 PM |
1630 | case KVM_REG_PPC_VTB: |
1631 | to_book3s(vcpu)->vtb = set_reg_val(id, *val); | |
1632 | break; | |
e5ee5422 | 1633 | case KVM_REG_PPC_LPCR: |
a0840240 | 1634 | case KVM_REG_PPC_LPCR_64: |
e5ee5422 AK |
1635 | kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val)); |
1636 | break; | |
deeb879d SG |
1637 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1638 | case KVM_REG_PPC_TFHAR: | |
1639 | vcpu->arch.tfhar = set_reg_val(id, *val); | |
1640 | break; | |
1641 | case KVM_REG_PPC_TFIAR: | |
1642 | vcpu->arch.tfiar = set_reg_val(id, *val); | |
1643 | break; | |
1644 | case KVM_REG_PPC_TEXASR: | |
1645 | vcpu->arch.texasr = set_reg_val(id, *val); | |
1646 | break; | |
1647 | case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: | |
1648 | vcpu->arch.gpr_tm[id - KVM_REG_PPC_TM_GPR0] = | |
1649 | set_reg_val(id, *val); | |
1650 | break; | |
1651 | case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: | |
1652 | { | |
1653 | int i, j; | |
1654 | ||
1655 | i = id - KVM_REG_PPC_TM_VSR0; | |
1656 | if (i < 32) | |
1657 | for (j = 0; j < TS_FPRWIDTH; j++) | |
1658 | vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; | |
1659 | else | |
1660 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | |
1661 | vcpu->arch.vr_tm.vr[i-32] = val->vval; | |
1662 | else | |
1663 | r = -ENXIO; | |
1664 | break; | |
1665 | } | |
1666 | case KVM_REG_PPC_TM_CR: | |
1667 | vcpu->arch.cr_tm = set_reg_val(id, *val); | |
1668 | break; | |
1669 | case KVM_REG_PPC_TM_XER: | |
1670 | vcpu->arch.xer_tm = set_reg_val(id, *val); | |
1671 | break; | |
1672 | case KVM_REG_PPC_TM_LR: | |
1673 | vcpu->arch.lr_tm = set_reg_val(id, *val); | |
1674 | break; | |
1675 | case KVM_REG_PPC_TM_CTR: | |
1676 | vcpu->arch.ctr_tm = set_reg_val(id, *val); | |
1677 | break; | |
1678 | case KVM_REG_PPC_TM_FPSCR: | |
1679 | vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); | |
1680 | break; | |
1681 | case KVM_REG_PPC_TM_AMR: | |
1682 | vcpu->arch.amr_tm = set_reg_val(id, *val); | |
1683 | break; | |
1684 | case KVM_REG_PPC_TM_PPR: | |
1685 | vcpu->arch.ppr_tm = set_reg_val(id, *val); | |
1686 | break; | |
1687 | case KVM_REG_PPC_TM_VRSAVE: | |
1688 | vcpu->arch.vrsave_tm = set_reg_val(id, *val); | |
1689 | break; | |
1690 | case KVM_REG_PPC_TM_VSCR: | |
1691 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | |
1692 | vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); | |
1693 | else | |
1694 | r = -ENXIO; | |
1695 | break; | |
1696 | case KVM_REG_PPC_TM_DSCR: | |
1697 | vcpu->arch.dscr_tm = set_reg_val(id, *val); | |
1698 | break; | |
1699 | case KVM_REG_PPC_TM_TAR: | |
1700 | vcpu->arch.tar_tm = set_reg_val(id, *val); | |
1701 | break; | |
1702 | #endif | |
31f3438e | 1703 | default: |
a136a8bd | 1704 | r = -EINVAL; |
31f3438e PM |
1705 | break; |
1706 | } | |
1707 | ||
1708 | return r; | |
1709 | } | |
1710 | ||
3a167bea AK |
1711 | static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm, |
1712 | unsigned int id) | |
f05ed4d5 PM |
1713 | { |
1714 | struct kvmppc_vcpu_book3s *vcpu_book3s; | |
1715 | struct kvm_vcpu *vcpu; | |
1716 | int err = -ENOMEM; | |
1717 | unsigned long p; | |
1718 | ||
3ff95502 PM |
1719 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); |
1720 | if (!vcpu) | |
f05ed4d5 PM |
1721 | goto out; |
1722 | ||
f05ed4d5 PM |
1723 | vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); |
1724 | if (!vcpu_book3s) | |
f05ed4d5 | 1725 | goto free_vcpu; |
3ff95502 | 1726 | vcpu->arch.book3s = vcpu_book3s; |
f05ed4d5 | 1727 | |
ab78475c | 1728 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
3ff95502 PM |
1729 | vcpu->arch.shadow_vcpu = |
1730 | kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL); | |
1731 | if (!vcpu->arch.shadow_vcpu) | |
1732 | goto free_vcpu3s; | |
a2d56020 | 1733 | #endif |
f05ed4d5 | 1734 | |
f05ed4d5 PM |
1735 | err = kvm_vcpu_init(vcpu, kvm, id); |
1736 | if (err) | |
1737 | goto free_shadow_vcpu; | |
1738 | ||
7c7b406e | 1739 | err = -ENOMEM; |
f05ed4d5 | 1740 | p = __get_free_page(GFP_KERNEL|__GFP_ZERO); |
f05ed4d5 PM |
1741 | if (!p) |
1742 | goto uninit_vcpu; | |
89b68c96 | 1743 | vcpu->arch.shared = (void *)p; |
f05ed4d5 | 1744 | #ifdef CONFIG_PPC_BOOK3S_64 |
5deb8e7a AG |
1745 | /* Always start the shared struct in native endian mode */ |
1746 | #ifdef __BIG_ENDIAN__ | |
1747 | vcpu->arch.shared_big_endian = true; | |
1748 | #else | |
1749 | vcpu->arch.shared_big_endian = false; | |
1750 | #endif | |
1751 | ||
a4a0f252 PM |
1752 | /* |
1753 | * Default to the same as the host if we're on sufficiently | |
1754 | * recent machine that we have 1TB segments; | |
1755 | * otherwise default to PPC970FX. | |
1756 | */ | |
f05ed4d5 | 1757 | vcpu->arch.pvr = 0x3C0301; |
a4a0f252 PM |
1758 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) |
1759 | vcpu->arch.pvr = mfspr(SPRN_PVR); | |
e5ee5422 | 1760 | vcpu->arch.intr_msr = MSR_SF; |
f05ed4d5 PM |
1761 | #else |
1762 | /* default to book3s_32 (750) */ | |
1763 | vcpu->arch.pvr = 0x84202; | |
1764 | #endif | |
3a167bea | 1765 | kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr); |
f05ed4d5 PM |
1766 | vcpu->arch.slb_nr = 64; |
1767 | ||
94810ba4 | 1768 | vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE; |
f05ed4d5 PM |
1769 | |
1770 | err = kvmppc_mmu_init(vcpu); | |
1771 | if (err < 0) | |
1772 | goto uninit_vcpu; | |
1773 | ||
1774 | return vcpu; | |
1775 | ||
1776 | uninit_vcpu: | |
1777 | kvm_vcpu_uninit(vcpu); | |
1778 | free_shadow_vcpu: | |
ab78475c | 1779 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
3ff95502 PM |
1780 | kfree(vcpu->arch.shadow_vcpu); |
1781 | free_vcpu3s: | |
a2d56020 | 1782 | #endif |
f05ed4d5 | 1783 | vfree(vcpu_book3s); |
3ff95502 PM |
1784 | free_vcpu: |
1785 | kmem_cache_free(kvm_vcpu_cache, vcpu); | |
f05ed4d5 PM |
1786 | out: |
1787 | return ERR_PTR(err); | |
1788 | } | |
1789 | ||
3a167bea | 1790 | static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu) |
f05ed4d5 PM |
1791 | { |
1792 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | |
1793 | ||
1794 | free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); | |
1795 | kvm_vcpu_uninit(vcpu); | |
ab78475c | 1796 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
3ff95502 PM |
1797 | kfree(vcpu->arch.shadow_vcpu); |
1798 | #endif | |
f05ed4d5 | 1799 | vfree(vcpu_book3s); |
3ff95502 | 1800 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
f05ed4d5 PM |
1801 | } |
1802 | ||
3a167bea | 1803 | static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
f05ed4d5 PM |
1804 | { |
1805 | int ret; | |
f05ed4d5 | 1806 | #ifdef CONFIG_ALTIVEC |
f05ed4d5 | 1807 | unsigned long uninitialized_var(vrsave); |
f05ed4d5 | 1808 | #endif |
f05ed4d5 | 1809 | |
af8f38b3 AG |
1810 | /* Check if we can run the vcpu at all */ |
1811 | if (!vcpu->arch.sane) { | |
1812 | kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
7d82714d AG |
1813 | ret = -EINVAL; |
1814 | goto out; | |
af8f38b3 AG |
1815 | } |
1816 | ||
11dd6ac0 LV |
1817 | kvmppc_setup_debug(vcpu); |
1818 | ||
e371f713 AG |
1819 | /* |
1820 | * Interrupts could be timers for the guest which we have to inject | |
1821 | * again, so let's postpone them until we're in the guest and if we | |
1822 | * really did time things so badly, then we just exit again due to | |
1823 | * a host external interrupt. | |
1824 | */ | |
7ee78855 | 1825 | ret = kvmppc_prepare_to_enter(vcpu); |
6c85f52b | 1826 | if (ret <= 0) |
7d82714d | 1827 | goto out; |
6c85f52b | 1828 | /* interrupts now hard-disabled */ |
f05ed4d5 | 1829 | |
c2085059 AB |
1830 | /* Save FPU, Altivec and VSX state */ |
1831 | giveup_all(current); | |
f05ed4d5 | 1832 | |
f05ed4d5 | 1833 | /* Preload FPU if it's enabled */ |
5deb8e7a | 1834 | if (kvmppc_get_msr(vcpu) & MSR_FP) |
f05ed4d5 PM |
1835 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
1836 | ||
5f1c248f | 1837 | kvmppc_fix_ee_before_entry(); |
df6909e5 PM |
1838 | |
1839 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); | |
1840 | ||
11dd6ac0 LV |
1841 | kvmppc_clear_debug(vcpu); |
1842 | ||
6edaa530 | 1843 | /* No need for guest_exit. It's done in handle_exit. |
24afa37b | 1844 | We also get here with interrupts enabled. */ |
f05ed4d5 | 1845 | |
f05ed4d5 | 1846 | /* Make sure we save the guest FPU/Altivec/VSX state */ |
28c483b6 PM |
1847 | kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); |
1848 | ||
e14e7a1e AG |
1849 | /* Make sure we save the guest TAR/EBB/DSCR state */ |
1850 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); | |
1851 | ||
7d82714d | 1852 | out: |
0652eaae | 1853 | vcpu->mode = OUTSIDE_GUEST_MODE; |
f05ed4d5 PM |
1854 | return ret; |
1855 | } | |
1856 | ||
82ed3616 PM |
1857 | /* |
1858 | * Get (and clear) the dirty memory log for a memory slot. | |
1859 | */ | |
3a167bea AK |
1860 | static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm, |
1861 | struct kvm_dirty_log *log) | |
82ed3616 | 1862 | { |
9f6b8029 | 1863 | struct kvm_memslots *slots; |
82ed3616 PM |
1864 | struct kvm_memory_slot *memslot; |
1865 | struct kvm_vcpu *vcpu; | |
1866 | ulong ga, ga_end; | |
1867 | int is_dirty = 0; | |
1868 | int r; | |
1869 | unsigned long n; | |
1870 | ||
1871 | mutex_lock(&kvm->slots_lock); | |
1872 | ||
1873 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | |
1874 | if (r) | |
1875 | goto out; | |
1876 | ||
1877 | /* If nothing is dirty, don't bother messing with page tables. */ | |
1878 | if (is_dirty) { | |
9f6b8029 PB |
1879 | slots = kvm_memslots(kvm); |
1880 | memslot = id_to_memslot(slots, log->slot); | |
82ed3616 PM |
1881 | |
1882 | ga = memslot->base_gfn << PAGE_SHIFT; | |
1883 | ga_end = ga + (memslot->npages << PAGE_SHIFT); | |
1884 | ||
1885 | kvm_for_each_vcpu(n, vcpu, kvm) | |
1886 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); | |
1887 | ||
1888 | n = kvm_dirty_bitmap_bytes(memslot); | |
1889 | memset(memslot->dirty_bitmap, 0, n); | |
1890 | } | |
1891 | ||
1892 | r = 0; | |
1893 | out: | |
1894 | mutex_unlock(&kvm->slots_lock); | |
1895 | return r; | |
1896 | } | |
1897 | ||
3a167bea AK |
1898 | static void kvmppc_core_flush_memslot_pr(struct kvm *kvm, |
1899 | struct kvm_memory_slot *memslot) | |
5b74716e | 1900 | { |
3a167bea AK |
1901 | return; |
1902 | } | |
5b74716e | 1903 | |
3a167bea AK |
1904 | static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm, |
1905 | struct kvm_memory_slot *memslot, | |
09170a49 | 1906 | const struct kvm_userspace_memory_region *mem) |
3a167bea | 1907 | { |
5b74716e BH |
1908 | return 0; |
1909 | } | |
5b74716e | 1910 | |
3a167bea | 1911 | static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm, |
09170a49 | 1912 | const struct kvm_userspace_memory_region *mem, |
f36f3f28 | 1913 | const struct kvm_memory_slot *old, |
f032b734 BR |
1914 | const struct kvm_memory_slot *new, |
1915 | enum kvm_mr_change change) | |
a66b48c3 | 1916 | { |
3a167bea | 1917 | return; |
a66b48c3 PM |
1918 | } |
1919 | ||
3a167bea AK |
1920 | static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free, |
1921 | struct kvm_memory_slot *dont) | |
a66b48c3 | 1922 | { |
3a167bea | 1923 | return; |
a66b48c3 PM |
1924 | } |
1925 | ||
3a167bea AK |
1926 | static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot, |
1927 | unsigned long npages) | |
f9e0554d PM |
1928 | { |
1929 | return 0; | |
1930 | } | |
1931 | ||
3a167bea | 1932 | |
5b74716e | 1933 | #ifdef CONFIG_PPC64 |
3a167bea AK |
1934 | static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, |
1935 | struct kvm_ppc_smmu_info *info) | |
dfe49dbd | 1936 | { |
a4a0f252 PM |
1937 | long int i; |
1938 | struct kvm_vcpu *vcpu; | |
1939 | ||
1940 | info->flags = 0; | |
5b74716e BH |
1941 | |
1942 | /* SLB is always 64 entries */ | |
1943 | info->slb_size = 64; | |
1944 | ||
1945 | /* Standard 4k base page size segment */ | |
1946 | info->sps[0].page_shift = 12; | |
1947 | info->sps[0].slb_enc = 0; | |
1948 | info->sps[0].enc[0].page_shift = 12; | |
1949 | info->sps[0].enc[0].pte_enc = 0; | |
1950 | ||
a4a0f252 PM |
1951 | /* |
1952 | * 64k large page size. | |
1953 | * We only want to put this in if the CPUs we're emulating | |
1954 | * support it, but unfortunately we don't have a vcpu easily | |
1955 | * to hand here to test. Just pick the first vcpu, and if | |
1956 | * that doesn't exist yet, report the minimum capability, | |
1957 | * i.e., no 64k pages. | |
1958 | * 1T segment support goes along with 64k pages. | |
1959 | */ | |
1960 | i = 1; | |
1961 | vcpu = kvm_get_vcpu(kvm, 0); | |
1962 | if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { | |
1963 | info->flags = KVM_PPC_1T_SEGMENTS; | |
1964 | info->sps[i].page_shift = 16; | |
1965 | info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01; | |
1966 | info->sps[i].enc[0].page_shift = 16; | |
1967 | info->sps[i].enc[0].pte_enc = 1; | |
1968 | ++i; | |
1969 | } | |
1970 | ||
5b74716e | 1971 | /* Standard 16M large page size segment */ |
a4a0f252 PM |
1972 | info->sps[i].page_shift = 24; |
1973 | info->sps[i].slb_enc = SLB_VSID_L; | |
1974 | info->sps[i].enc[0].page_shift = 24; | |
1975 | info->sps[i].enc[0].pte_enc = 0; | |
dfe49dbd | 1976 | |
5b74716e BH |
1977 | return 0; |
1978 | } | |
9617a0b3 PM |
1979 | |
1980 | static int kvm_configure_mmu_pr(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) | |
1981 | { | |
1982 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) | |
1983 | return -ENODEV; | |
1984 | /* Require flags and process table base and size to all be zero. */ | |
1985 | if (cfg->flags || cfg->process_table) | |
1986 | return -EINVAL; | |
1987 | return 0; | |
1988 | } | |
1989 | ||
3a167bea AK |
1990 | #else |
1991 | static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, | |
1992 | struct kvm_ppc_smmu_info *info) | |
f9e0554d | 1993 | { |
3a167bea AK |
1994 | /* We should not get called */ |
1995 | BUG(); | |
f9e0554d | 1996 | } |
3a167bea | 1997 | #endif /* CONFIG_PPC64 */ |
f9e0554d | 1998 | |
a413f474 IM |
1999 | static unsigned int kvm_global_user_count = 0; |
2000 | static DEFINE_SPINLOCK(kvm_global_user_count_lock); | |
2001 | ||
3a167bea | 2002 | static int kvmppc_core_init_vm_pr(struct kvm *kvm) |
f9e0554d | 2003 | { |
9308ab8e | 2004 | mutex_init(&kvm->arch.hpt_mutex); |
f31e65e1 | 2005 | |
699a0ea0 PM |
2006 | #ifdef CONFIG_PPC_BOOK3S_64 |
2007 | /* Start out with the default set of hcalls enabled */ | |
2008 | kvmppc_pr_init_default_hcalls(kvm); | |
2009 | #endif | |
2010 | ||
a413f474 IM |
2011 | if (firmware_has_feature(FW_FEATURE_SET_MODE)) { |
2012 | spin_lock(&kvm_global_user_count_lock); | |
2013 | if (++kvm_global_user_count == 1) | |
d3cbff1b | 2014 | pseries_disable_reloc_on_exc(); |
a413f474 IM |
2015 | spin_unlock(&kvm_global_user_count_lock); |
2016 | } | |
f9e0554d PM |
2017 | return 0; |
2018 | } | |
2019 | ||
3a167bea | 2020 | static void kvmppc_core_destroy_vm_pr(struct kvm *kvm) |
f9e0554d | 2021 | { |
f31e65e1 BH |
2022 | #ifdef CONFIG_PPC64 |
2023 | WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); | |
2024 | #endif | |
a413f474 IM |
2025 | |
2026 | if (firmware_has_feature(FW_FEATURE_SET_MODE)) { | |
2027 | spin_lock(&kvm_global_user_count_lock); | |
2028 | BUG_ON(kvm_global_user_count == 0); | |
2029 | if (--kvm_global_user_count == 0) | |
d3cbff1b | 2030 | pseries_enable_reloc_on_exc(); |
a413f474 IM |
2031 | spin_unlock(&kvm_global_user_count_lock); |
2032 | } | |
f9e0554d PM |
2033 | } |
2034 | ||
3a167bea | 2035 | static int kvmppc_core_check_processor_compat_pr(void) |
f05ed4d5 | 2036 | { |
50de596d | 2037 | /* |
ec531d02 PM |
2038 | * PR KVM can work on POWER9 inside a guest partition |
2039 | * running in HPT mode. It can't work if we are using | |
2040 | * radix translation (because radix provides no way for | |
db96a04a | 2041 | * a process to have unique translations in quadrant 3). |
50de596d | 2042 | */ |
db96a04a | 2043 | if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled()) |
50de596d | 2044 | return -EIO; |
3a167bea AK |
2045 | return 0; |
2046 | } | |
f05ed4d5 | 2047 | |
3a167bea AK |
2048 | static long kvm_arch_vm_ioctl_pr(struct file *filp, |
2049 | unsigned int ioctl, unsigned long arg) | |
2050 | { | |
2051 | return -ENOTTY; | |
2052 | } | |
f05ed4d5 | 2053 | |
cbbc58d4 | 2054 | static struct kvmppc_ops kvm_ops_pr = { |
3a167bea AK |
2055 | .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr, |
2056 | .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr, | |
2057 | .get_one_reg = kvmppc_get_one_reg_pr, | |
2058 | .set_one_reg = kvmppc_set_one_reg_pr, | |
2059 | .vcpu_load = kvmppc_core_vcpu_load_pr, | |
2060 | .vcpu_put = kvmppc_core_vcpu_put_pr, | |
2061 | .set_msr = kvmppc_set_msr_pr, | |
2062 | .vcpu_run = kvmppc_vcpu_run_pr, | |
2063 | .vcpu_create = kvmppc_core_vcpu_create_pr, | |
2064 | .vcpu_free = kvmppc_core_vcpu_free_pr, | |
2065 | .check_requests = kvmppc_core_check_requests_pr, | |
2066 | .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr, | |
2067 | .flush_memslot = kvmppc_core_flush_memslot_pr, | |
2068 | .prepare_memory_region = kvmppc_core_prepare_memory_region_pr, | |
2069 | .commit_memory_region = kvmppc_core_commit_memory_region_pr, | |
3a167bea AK |
2070 | .unmap_hva_range = kvm_unmap_hva_range_pr, |
2071 | .age_hva = kvm_age_hva_pr, | |
2072 | .test_age_hva = kvm_test_age_hva_pr, | |
2073 | .set_spte_hva = kvm_set_spte_hva_pr, | |
2074 | .mmu_destroy = kvmppc_mmu_destroy_pr, | |
2075 | .free_memslot = kvmppc_core_free_memslot_pr, | |
2076 | .create_memslot = kvmppc_core_create_memslot_pr, | |
2077 | .init_vm = kvmppc_core_init_vm_pr, | |
2078 | .destroy_vm = kvmppc_core_destroy_vm_pr, | |
3a167bea AK |
2079 | .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr, |
2080 | .emulate_op = kvmppc_core_emulate_op_pr, | |
2081 | .emulate_mtspr = kvmppc_core_emulate_mtspr_pr, | |
2082 | .emulate_mfspr = kvmppc_core_emulate_mfspr_pr, | |
2083 | .fast_vcpu_kick = kvm_vcpu_kick, | |
2084 | .arch_vm_ioctl = kvm_arch_vm_ioctl_pr, | |
ae2113a4 PM |
2085 | #ifdef CONFIG_PPC_BOOK3S_64 |
2086 | .hcall_implemented = kvmppc_hcall_impl_pr, | |
9617a0b3 | 2087 | .configure_mmu = kvm_configure_mmu_pr, |
ae2113a4 | 2088 | #endif |
2e6baa46 | 2089 | .giveup_ext = kvmppc_giveup_ext, |
3a167bea AK |
2090 | }; |
2091 | ||
cbbc58d4 AK |
2092 | |
2093 | int kvmppc_book3s_init_pr(void) | |
f05ed4d5 PM |
2094 | { |
2095 | int r; | |
2096 | ||
cbbc58d4 AK |
2097 | r = kvmppc_core_check_processor_compat_pr(); |
2098 | if (r < 0) | |
f05ed4d5 PM |
2099 | return r; |
2100 | ||
cbbc58d4 AK |
2101 | kvm_ops_pr.owner = THIS_MODULE; |
2102 | kvmppc_pr_ops = &kvm_ops_pr; | |
f05ed4d5 | 2103 | |
cbbc58d4 | 2104 | r = kvmppc_mmu_hpte_sysinit(); |
f05ed4d5 PM |
2105 | return r; |
2106 | } | |
2107 | ||
cbbc58d4 | 2108 | void kvmppc_book3s_exit_pr(void) |
f05ed4d5 | 2109 | { |
cbbc58d4 | 2110 | kvmppc_pr_ops = NULL; |
f05ed4d5 | 2111 | kvmppc_mmu_hpte_sysexit(); |
f05ed4d5 PM |
2112 | } |
2113 | ||
cbbc58d4 AK |
2114 | /* |
2115 | * We only support separate modules for book3s 64 | |
2116 | */ | |
2117 | #ifdef CONFIG_PPC_BOOK3S_64 | |
2118 | ||
3a167bea AK |
2119 | module_init(kvmppc_book3s_init_pr); |
2120 | module_exit(kvmppc_book3s_exit_pr); | |
2ba9f0d8 AK |
2121 | |
2122 | MODULE_LICENSE("GPL"); | |
398a76c6 AG |
2123 | MODULE_ALIAS_MISCDEV(KVM_MINOR); |
2124 | MODULE_ALIAS("devname:kvm"); | |
cbbc58d4 | 2125 | #endif |