]>
Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
f05ed4d5 PM |
2 | /* |
3 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. | |
4 | * | |
5 | * Authors: | |
6 | * Alexander Graf <agraf@suse.de> | |
7 | * Kevin Wolf <mail@kevin-wolf.de> | |
8 | * Paul Mackerras <paulus@samba.org> | |
9 | * | |
10 | * Description: | |
11 | * Functions relating to running KVM on Book 3S processors where | |
12 | * we don't have access to hypervisor mode, and we run the guest | |
13 | * in problem state (user mode). | |
14 | * | |
15 | * This file is derived from arch/powerpc/kvm/44x.c, | |
16 | * by Hollis Blanchard <hollisb@us.ibm.com>. | |
f05ed4d5 PM |
17 | */ |
18 | ||
19 | #include <linux/kvm_host.h> | |
93087948 | 20 | #include <linux/export.h> |
f05ed4d5 PM |
21 | #include <linux/err.h> |
22 | #include <linux/slab.h> | |
23 | ||
24 | #include <asm/reg.h> | |
25 | #include <asm/cputable.h> | |
26 | #include <asm/cacheflush.h> | |
7c0f6ba6 | 27 | #include <linux/uaccess.h> |
f05ed4d5 PM |
28 | #include <asm/io.h> |
29 | #include <asm/kvm_ppc.h> | |
30 | #include <asm/kvm_book3s.h> | |
31 | #include <asm/mmu_context.h> | |
95327d08 | 32 | #include <asm/switch_to.h> |
a413f474 | 33 | #include <asm/firmware.h> |
d3cbff1b | 34 | #include <asm/setup.h> |
f05ed4d5 PM |
35 | #include <linux/gfp.h> |
36 | #include <linux/sched.h> | |
37 | #include <linux/vmalloc.h> | |
38 | #include <linux/highmem.h> | |
2ba9f0d8 | 39 | #include <linux/module.h> |
398a76c6 | 40 | #include <linux/miscdevice.h> |
66c33e79 | 41 | #include <asm/asm-prototypes.h> |
8d2e2fc5 | 42 | #include <asm/tm.h> |
f05ed4d5 | 43 | |
3a167bea | 44 | #include "book3s.h" |
72c12535 AK |
45 | |
46 | #define CREATE_TRACE_POINTS | |
47 | #include "trace_pr.h" | |
f05ed4d5 PM |
48 | |
49 | /* #define EXIT_DEBUG */ | |
50 | /* #define DEBUG_EXT */ | |
51 | ||
52 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |
53 | ulong msr); | |
7284ca8a SG |
54 | #ifdef CONFIG_PPC_BOOK3S_64 |
55 | static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac); | |
56 | #endif | |
f05ed4d5 PM |
57 | |
58 | /* Some compatibility defines */ | |
59 | #ifdef CONFIG_PPC_BOOK3S_32 | |
60 | #define MSR_USER32 MSR_USER | |
61 | #define MSR_USER64 MSR_USER | |
62 | #define HW_PAGE_SIZE PAGE_SIZE | |
6c7d47c3 | 63 | #define HPTE_R_M _PAGE_COHERENT |
f05ed4d5 PM |
64 | #endif |
65 | ||
c01e3f66 AG |
66 | static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) |
67 | { | |
68 | ulong msr = kvmppc_get_msr(vcpu); | |
69 | return (msr & (MSR_IR|MSR_DR)) == MSR_DR; | |
70 | } | |
71 | ||
72 | static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu) | |
73 | { | |
74 | ulong msr = kvmppc_get_msr(vcpu); | |
75 | ulong pc = kvmppc_get_pc(vcpu); | |
76 | ||
77 | /* We are in DR only split real mode */ | |
78 | if ((msr & (MSR_IR|MSR_DR)) != MSR_DR) | |
79 | return; | |
80 | ||
81 | /* We have not fixed up the guest already */ | |
82 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) | |
83 | return; | |
84 | ||
85 | /* The code is in fixupable address space */ | |
86 | if (pc & SPLIT_HACK_MASK) | |
87 | return; | |
88 | ||
89 | vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK; | |
90 | kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS); | |
91 | } | |
92 | ||
87a45e07 NP |
93 | static void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu) |
94 | { | |
95 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { | |
96 | ulong pc = kvmppc_get_pc(vcpu); | |
97 | ulong lr = kvmppc_get_lr(vcpu); | |
98 | if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) | |
99 | kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK); | |
100 | if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) | |
101 | kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK); | |
102 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; | |
103 | } | |
104 | } | |
105 | ||
106 | static void kvmppc_inject_interrupt_pr(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) | |
107 | { | |
108 | unsigned long msr, pc, new_msr, new_pc; | |
109 | ||
110 | kvmppc_unfixup_split_real(vcpu); | |
111 | ||
112 | msr = kvmppc_get_msr(vcpu); | |
113 | pc = kvmppc_get_pc(vcpu); | |
114 | new_msr = vcpu->arch.intr_msr; | |
115 | new_pc = to_book3s(vcpu)->hior + vec; | |
116 | ||
117 | #ifdef CONFIG_PPC_BOOK3S_64 | |
118 | /* If transactional, change to suspend mode on IRQ delivery */ | |
119 | if (MSR_TM_TRANSACTIONAL(msr)) | |
120 | new_msr |= MSR_TS_S; | |
121 | else | |
122 | new_msr |= msr & MSR_TS_MASK; | |
123 | #endif | |
124 | ||
125 | kvmppc_set_srr0(vcpu, pc); | |
126 | kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); | |
127 | kvmppc_set_pc(vcpu, new_pc); | |
128 | kvmppc_set_msr(vcpu, new_msr); | |
129 | } | |
c01e3f66 | 130 | |
3a167bea | 131 | static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) |
f05ed4d5 PM |
132 | { |
133 | #ifdef CONFIG_PPC_BOOK3S_64 | |
468a12c2 AG |
134 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
135 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); | |
468a12c2 | 136 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; |
40fdd8c8 | 137 | svcpu->in_use = 0; |
468a12c2 | 138 | svcpu_put(svcpu); |
f05ed4d5 | 139 | #endif |
fb4188ba AG |
140 | |
141 | /* Disable AIL if supported */ | |
142 | if (cpu_has_feature(CPU_FTR_HVMODE) && | |
143 | cpu_has_feature(CPU_FTR_ARCH_207S)) | |
144 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL); | |
145 | ||
a47d72f3 | 146 | vcpu->cpu = smp_processor_id(); |
f05ed4d5 | 147 | #ifdef CONFIG_PPC_BOOK3S_32 |
3ff95502 | 148 | current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; |
f05ed4d5 | 149 | #endif |
c01e3f66 AG |
150 | |
151 | if (kvmppc_is_split_real(vcpu)) | |
152 | kvmppc_fixup_split_real(vcpu); | |
8d2e2fc5 SG |
153 | |
154 | kvmppc_restore_tm_pr(vcpu); | |
f05ed4d5 PM |
155 | } |
156 | ||
3a167bea | 157 | static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) |
f05ed4d5 PM |
158 | { |
159 | #ifdef CONFIG_PPC_BOOK3S_64 | |
468a12c2 | 160 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
40fdd8c8 | 161 | if (svcpu->in_use) { |
07ae5389 | 162 | kvmppc_copy_from_svcpu(vcpu); |
40fdd8c8 | 163 | } |
468a12c2 | 164 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); |
468a12c2 AG |
165 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; |
166 | svcpu_put(svcpu); | |
f05ed4d5 PM |
167 | #endif |
168 | ||
c01e3f66 AG |
169 | if (kvmppc_is_split_real(vcpu)) |
170 | kvmppc_unfixup_split_real(vcpu); | |
171 | ||
28c483b6 | 172 | kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); |
e14e7a1e | 173 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); |
8d2e2fc5 | 174 | kvmppc_save_tm_pr(vcpu); |
fb4188ba AG |
175 | |
176 | /* Enable AIL if supported */ | |
177 | if (cpu_has_feature(CPU_FTR_HVMODE) && | |
178 | cpu_has_feature(CPU_FTR_ARCH_207S)) | |
179 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3); | |
180 | ||
a47d72f3 | 181 | vcpu->cpu = -1; |
f05ed4d5 PM |
182 | } |
183 | ||
a2d56020 | 184 | /* Copy data needed by real-mode code from vcpu to shadow vcpu */ |
07ae5389 | 185 | void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu) |
a2d56020 | 186 | { |
07ae5389 AG |
187 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
188 | ||
1143a706 SG |
189 | svcpu->gpr[0] = vcpu->arch.regs.gpr[0]; |
190 | svcpu->gpr[1] = vcpu->arch.regs.gpr[1]; | |
191 | svcpu->gpr[2] = vcpu->arch.regs.gpr[2]; | |
192 | svcpu->gpr[3] = vcpu->arch.regs.gpr[3]; | |
193 | svcpu->gpr[4] = vcpu->arch.regs.gpr[4]; | |
194 | svcpu->gpr[5] = vcpu->arch.regs.gpr[5]; | |
195 | svcpu->gpr[6] = vcpu->arch.regs.gpr[6]; | |
196 | svcpu->gpr[7] = vcpu->arch.regs.gpr[7]; | |
197 | svcpu->gpr[8] = vcpu->arch.regs.gpr[8]; | |
198 | svcpu->gpr[9] = vcpu->arch.regs.gpr[9]; | |
199 | svcpu->gpr[10] = vcpu->arch.regs.gpr[10]; | |
200 | svcpu->gpr[11] = vcpu->arch.regs.gpr[11]; | |
201 | svcpu->gpr[12] = vcpu->arch.regs.gpr[12]; | |
202 | svcpu->gpr[13] = vcpu->arch.regs.gpr[13]; | |
fd0944ba | 203 | svcpu->cr = vcpu->arch.regs.ccr; |
173c520a SG |
204 | svcpu->xer = vcpu->arch.regs.xer; |
205 | svcpu->ctr = vcpu->arch.regs.ctr; | |
206 | svcpu->lr = vcpu->arch.regs.link; | |
207 | svcpu->pc = vcpu->arch.regs.nip; | |
616dff86 AG |
208 | #ifdef CONFIG_PPC_BOOK3S_64 |
209 | svcpu->shadow_fscr = vcpu->arch.shadow_fscr; | |
210 | #endif | |
3cd60e31 AK |
211 | /* |
212 | * Now also save the current time base value. We use this | |
213 | * to find the guest purr and spurr value. | |
214 | */ | |
215 | vcpu->arch.entry_tb = get_tb(); | |
8f42ab27 | 216 | vcpu->arch.entry_vtb = get_vtb(); |
06da28e7 AK |
217 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
218 | vcpu->arch.entry_ic = mfspr(SPRN_IC); | |
40fdd8c8 | 219 | svcpu->in_use = true; |
07ae5389 AG |
220 | |
221 | svcpu_put(svcpu); | |
a2d56020 PM |
222 | } |
223 | ||
95757bfc SG |
224 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) |
225 | { | |
226 | ulong guest_msr = kvmppc_get_msr(vcpu); | |
227 | ulong smsr = guest_msr; | |
228 | ||
229 | /* Guest MSR values */ | |
230 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
231 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE | | |
232 | MSR_TM | MSR_TS_MASK; | |
233 | #else | |
234 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE; | |
235 | #endif | |
236 | /* Process MSR values */ | |
237 | smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; | |
238 | /* External providers the guest reserved */ | |
239 | smsr |= (guest_msr & vcpu->arch.guest_owned_ext); | |
240 | /* 64-bit Process MSR values */ | |
241 | #ifdef CONFIG_PPC_BOOK3S_64 | |
e89a8ca9 | 242 | smsr |= MSR_HV; |
5706340a SG |
243 | #endif |
244 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
245 | /* | |
246 | * in guest privileged state, we want to fail all TM transactions. | |
247 | * So disable MSR TM bit so that all tbegin. will be able to be | |
248 | * trapped into host. | |
249 | */ | |
250 | if (!(guest_msr & MSR_PR)) | |
251 | smsr &= ~MSR_TM; | |
95757bfc SG |
252 | #endif |
253 | vcpu->arch.shadow_msr = smsr; | |
254 | } | |
255 | ||
a2d56020 | 256 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ |
07ae5389 | 257 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) |
a2d56020 | 258 | { |
07ae5389 | 259 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
95757bfc SG |
260 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
261 | ulong old_msr; | |
262 | #endif | |
40fdd8c8 AG |
263 | |
264 | /* | |
265 | * Maybe we were already preempted and synced the svcpu from | |
266 | * our preempt notifiers. Don't bother touching this svcpu then. | |
267 | */ | |
268 | if (!svcpu->in_use) | |
269 | goto out; | |
270 | ||
1143a706 SG |
271 | vcpu->arch.regs.gpr[0] = svcpu->gpr[0]; |
272 | vcpu->arch.regs.gpr[1] = svcpu->gpr[1]; | |
273 | vcpu->arch.regs.gpr[2] = svcpu->gpr[2]; | |
274 | vcpu->arch.regs.gpr[3] = svcpu->gpr[3]; | |
275 | vcpu->arch.regs.gpr[4] = svcpu->gpr[4]; | |
276 | vcpu->arch.regs.gpr[5] = svcpu->gpr[5]; | |
277 | vcpu->arch.regs.gpr[6] = svcpu->gpr[6]; | |
278 | vcpu->arch.regs.gpr[7] = svcpu->gpr[7]; | |
279 | vcpu->arch.regs.gpr[8] = svcpu->gpr[8]; | |
280 | vcpu->arch.regs.gpr[9] = svcpu->gpr[9]; | |
281 | vcpu->arch.regs.gpr[10] = svcpu->gpr[10]; | |
282 | vcpu->arch.regs.gpr[11] = svcpu->gpr[11]; | |
283 | vcpu->arch.regs.gpr[12] = svcpu->gpr[12]; | |
284 | vcpu->arch.regs.gpr[13] = svcpu->gpr[13]; | |
fd0944ba | 285 | vcpu->arch.regs.ccr = svcpu->cr; |
173c520a SG |
286 | vcpu->arch.regs.xer = svcpu->xer; |
287 | vcpu->arch.regs.ctr = svcpu->ctr; | |
288 | vcpu->arch.regs.link = svcpu->lr; | |
289 | vcpu->arch.regs.nip = svcpu->pc; | |
a2d56020 PM |
290 | vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; |
291 | vcpu->arch.fault_dar = svcpu->fault_dar; | |
292 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; | |
293 | vcpu->arch.last_inst = svcpu->last_inst; | |
616dff86 AG |
294 | #ifdef CONFIG_PPC_BOOK3S_64 |
295 | vcpu->arch.shadow_fscr = svcpu->shadow_fscr; | |
296 | #endif | |
3cd60e31 AK |
297 | /* |
298 | * Update purr and spurr using time base on exit. | |
299 | */ | |
300 | vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb; | |
301 | vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb; | |
88b02cf9 | 302 | to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb; |
06da28e7 AK |
303 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
304 | vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic; | |
95757bfc SG |
305 | |
306 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
307 | /* | |
308 | * Unlike other MSR bits, MSR[TS]bits can be changed at guest without | |
309 | * notifying host: | |
310 | * modified by unprivileged instructions like "tbegin"/"tend"/ | |
311 | * "tresume"/"tsuspend" in PR KVM guest. | |
312 | * | |
313 | * It is necessary to sync here to calculate a correct shadow_msr. | |
314 | * | |
315 | * privileged guest's tbegin will be failed at present. So we | |
316 | * only take care of problem state guest. | |
317 | */ | |
318 | old_msr = kvmppc_get_msr(vcpu); | |
319 | if (unlikely((old_msr & MSR_PR) && | |
320 | (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) != | |
321 | (old_msr & (MSR_TS_MASK)))) { | |
322 | old_msr &= ~(MSR_TS_MASK); | |
323 | old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)); | |
324 | kvmppc_set_msr_fast(vcpu, old_msr); | |
325 | kvmppc_recalc_shadow_msr(vcpu); | |
326 | } | |
327 | #endif | |
328 | ||
40fdd8c8 AG |
329 | svcpu->in_use = false; |
330 | ||
331 | out: | |
07ae5389 | 332 | svcpu_put(svcpu); |
a2d56020 PM |
333 | } |
334 | ||
66c33e79 | 335 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
e32c53d1 | 336 | void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) |
66c33e79 SG |
337 | { |
338 | tm_enable(); | |
339 | vcpu->arch.tfhar = mfspr(SPRN_TFHAR); | |
340 | vcpu->arch.texasr = mfspr(SPRN_TEXASR); | |
341 | vcpu->arch.tfiar = mfspr(SPRN_TFIAR); | |
342 | tm_disable(); | |
343 | } | |
344 | ||
5706340a | 345 | void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) |
66c33e79 SG |
346 | { |
347 | tm_enable(); | |
348 | mtspr(SPRN_TFHAR, vcpu->arch.tfhar); | |
349 | mtspr(SPRN_TEXASR, vcpu->arch.texasr); | |
350 | mtspr(SPRN_TFIAR, vcpu->arch.tfiar); | |
351 | tm_disable(); | |
352 | } | |
353 | ||
13989b65 SG |
354 | /* loadup math bits which is enabled at kvmppc_get_msr() but not enabled at |
355 | * hardware. | |
356 | */ | |
357 | static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu) | |
358 | { | |
359 | ulong exit_nr; | |
360 | ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) & | |
361 | (MSR_FP | MSR_VEC | MSR_VSX); | |
362 | ||
363 | if (!ext_diff) | |
364 | return; | |
365 | ||
366 | if (ext_diff == MSR_FP) | |
367 | exit_nr = BOOK3S_INTERRUPT_FP_UNAVAIL; | |
368 | else if (ext_diff == MSR_VEC) | |
369 | exit_nr = BOOK3S_INTERRUPT_ALTIVEC; | |
370 | else | |
371 | exit_nr = BOOK3S_INTERRUPT_VSX; | |
372 | ||
373 | kvmppc_handle_ext(vcpu, exit_nr, ext_diff); | |
374 | } | |
375 | ||
8d2e2fc5 SG |
376 | void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) |
377 | { | |
378 | if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) { | |
379 | kvmppc_save_tm_sprs(vcpu); | |
380 | return; | |
381 | } | |
382 | ||
7284ca8a | 383 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); |
13989b65 SG |
384 | kvmppc_giveup_ext(vcpu, MSR_VSX); |
385 | ||
8d2e2fc5 SG |
386 | preempt_disable(); |
387 | _kvmppc_save_tm_pr(vcpu, mfmsr()); | |
388 | preempt_enable(); | |
389 | } | |
390 | ||
391 | void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) | |
392 | { | |
393 | if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) { | |
394 | kvmppc_restore_tm_sprs(vcpu); | |
7284ca8a | 395 | if (kvmppc_get_msr(vcpu) & MSR_TM) { |
13989b65 | 396 | kvmppc_handle_lost_math_exts(vcpu); |
7284ca8a SG |
397 | if (vcpu->arch.fscr & FSCR_TAR) |
398 | kvmppc_handle_fac(vcpu, FSCR_TAR_LG); | |
399 | } | |
8d2e2fc5 SG |
400 | return; |
401 | } | |
402 | ||
403 | preempt_disable(); | |
404 | _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu)); | |
405 | preempt_enable(); | |
13989b65 | 406 | |
7284ca8a | 407 | if (kvmppc_get_msr(vcpu) & MSR_TM) { |
13989b65 | 408 | kvmppc_handle_lost_math_exts(vcpu); |
7284ca8a SG |
409 | if (vcpu->arch.fscr & FSCR_TAR) |
410 | kvmppc_handle_fac(vcpu, FSCR_TAR_LG); | |
411 | } | |
8d2e2fc5 | 412 | } |
66c33e79 SG |
413 | #endif |
414 | ||
3a167bea | 415 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) |
03d25c5b | 416 | { |
7c973a2e AG |
417 | int r = 1; /* Indicate we want to get back into the guest */ |
418 | ||
9b0cb3c8 AG |
419 | /* We misuse TLB_FLUSH to indicate that we want to clear |
420 | all shadow cache entries */ | |
421 | if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) | |
422 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | |
7c973a2e AG |
423 | |
424 | return r; | |
03d25c5b AG |
425 | } |
426 | ||
9b0cb3c8 | 427 | /************* MMU Notifiers *************/ |
491d6ecc PM |
428 | static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start, |
429 | unsigned long end) | |
430 | { | |
431 | long i; | |
432 | struct kvm_vcpu *vcpu; | |
433 | struct kvm_memslots *slots; | |
434 | struct kvm_memory_slot *memslot; | |
435 | ||
436 | slots = kvm_memslots(kvm); | |
437 | kvm_for_each_memslot(memslot, slots) { | |
438 | unsigned long hva_start, hva_end; | |
439 | gfn_t gfn, gfn_end; | |
440 | ||
441 | hva_start = max(start, memslot->userspace_addr); | |
442 | hva_end = min(end, memslot->userspace_addr + | |
443 | (memslot->npages << PAGE_SHIFT)); | |
444 | if (hva_start >= hva_end) | |
445 | continue; | |
446 | /* | |
447 | * {gfn(page) | page intersects with [hva_start, hva_end)} = | |
448 | * {gfn, gfn+1, ..., gfn_end-1}. | |
449 | */ | |
450 | gfn = hva_to_gfn_memslot(hva_start, memslot); | |
451 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); | |
452 | kvm_for_each_vcpu(i, vcpu, kvm) | |
453 | kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT, | |
454 | gfn_end << PAGE_SHIFT); | |
455 | } | |
456 | } | |
9b0cb3c8 | 457 | |
3a167bea AK |
458 | static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start, |
459 | unsigned long end) | |
9b0cb3c8 | 460 | { |
491d6ecc | 461 | do_kvm_unmap_hva(kvm, start, end); |
9b0cb3c8 AG |
462 | |
463 | return 0; | |
464 | } | |
465 | ||
57128468 ALC |
466 | static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start, |
467 | unsigned long end) | |
9b0cb3c8 AG |
468 | { |
469 | /* XXX could be more clever ;) */ | |
470 | return 0; | |
471 | } | |
472 | ||
3a167bea | 473 | static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva) |
9b0cb3c8 AG |
474 | { |
475 | /* XXX could be more clever ;) */ | |
476 | return 0; | |
477 | } | |
478 | ||
3a167bea | 479 | static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte) |
9b0cb3c8 AG |
480 | { |
481 | /* The page will get remapped properly on its next fault */ | |
491d6ecc | 482 | do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE); |
9b0cb3c8 AG |
483 | } |
484 | ||
485 | /*****************************************/ | |
486 | ||
3a167bea | 487 | static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) |
f05ed4d5 | 488 | { |
68ab07b9 | 489 | ulong old_msr; |
f05ed4d5 | 490 | |
4f169d21 PM |
491 | /* For PAPR guest, make sure MSR reflects guest mode */ |
492 | if (vcpu->arch.papr_enabled) | |
493 | msr = (msr & ~MSR_HV) | MSR_ME; | |
494 | ||
f05ed4d5 PM |
495 | #ifdef EXIT_DEBUG |
496 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); | |
497 | #endif | |
498 | ||
68ab07b9 SG |
499 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
500 | /* We should never target guest MSR to TS=10 && PR=0, | |
501 | * since we always fail transaction for guest privilege | |
502 | * state. | |
503 | */ | |
504 | if (!(msr & MSR_PR) && MSR_TM_TRANSACTIONAL(msr)) | |
505 | kvmppc_emulate_tabort(vcpu, | |
506 | TM_CAUSE_KVM_FAC_UNAV | TM_CAUSE_PERSISTENT); | |
507 | #endif | |
508 | ||
509 | old_msr = kvmppc_get_msr(vcpu); | |
f05ed4d5 | 510 | msr &= to_book3s(vcpu)->msr_mask; |
5deb8e7a | 511 | kvmppc_set_msr_fast(vcpu, msr); |
f05ed4d5 PM |
512 | kvmppc_recalc_shadow_msr(vcpu); |
513 | ||
514 | if (msr & MSR_POW) { | |
515 | if (!vcpu->arch.pending_exceptions) { | |
516 | kvm_vcpu_block(vcpu); | |
72875d8a | 517 | kvm_clear_request(KVM_REQ_UNHALT, vcpu); |
f05ed4d5 PM |
518 | vcpu->stat.halt_wakeup++; |
519 | ||
520 | /* Unset POW bit after we woke up */ | |
521 | msr &= ~MSR_POW; | |
5deb8e7a | 522 | kvmppc_set_msr_fast(vcpu, msr); |
f05ed4d5 PM |
523 | } |
524 | } | |
525 | ||
c01e3f66 AG |
526 | if (kvmppc_is_split_real(vcpu)) |
527 | kvmppc_fixup_split_real(vcpu); | |
528 | else | |
529 | kvmppc_unfixup_split_real(vcpu); | |
530 | ||
5deb8e7a | 531 | if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) != |
f05ed4d5 PM |
532 | (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { |
533 | kvmppc_mmu_flush_segments(vcpu); | |
534 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | |
535 | ||
536 | /* Preload magic page segment when in kernel mode */ | |
537 | if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) { | |
538 | struct kvm_vcpu_arch *a = &vcpu->arch; | |
539 | ||
540 | if (msr & MSR_DR) | |
541 | kvmppc_mmu_map_segment(vcpu, a->magic_page_ea); | |
542 | else | |
543 | kvmppc_mmu_map_segment(vcpu, a->magic_page_pa); | |
544 | } | |
545 | } | |
546 | ||
bbcc9c06 BH |
547 | /* |
548 | * When switching from 32 to 64-bit, we may have a stale 32-bit | |
549 | * magic page around, we need to flush it. Typically 32-bit magic | |
3cc97bea | 550 | * page will be instantiated when calling into RTAS. Note: We |
bbcc9c06 BH |
551 | * assume that such transition only happens while in kernel mode, |
552 | * ie, we never transition from user 32-bit to kernel 64-bit with | |
553 | * a 32-bit magic page around. | |
554 | */ | |
555 | if (vcpu->arch.magic_page_pa && | |
556 | !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) { | |
557 | /* going from RTAS to normal kernel code */ | |
558 | kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa, | |
559 | ~0xFFFUL); | |
560 | } | |
561 | ||
f05ed4d5 | 562 | /* Preload FPU if it's enabled */ |
5deb8e7a | 563 | if (kvmppc_get_msr(vcpu) & MSR_FP) |
f05ed4d5 | 564 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
13989b65 SG |
565 | |
566 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
567 | if (kvmppc_get_msr(vcpu) & MSR_TM) | |
568 | kvmppc_handle_lost_math_exts(vcpu); | |
569 | #endif | |
f05ed4d5 PM |
570 | } |
571 | ||
cf59eb13 | 572 | static void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) |
f05ed4d5 PM |
573 | { |
574 | u32 host_pvr; | |
575 | ||
576 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; | |
577 | vcpu->arch.pvr = pvr; | |
578 | #ifdef CONFIG_PPC_BOOK3S_64 | |
579 | if ((pvr >= 0x330000) && (pvr < 0x70330000)) { | |
580 | kvmppc_mmu_book3s_64_init(vcpu); | |
1022fc3d AG |
581 | if (!to_book3s(vcpu)->hior_explicit) |
582 | to_book3s(vcpu)->hior = 0xfff00000; | |
f05ed4d5 | 583 | to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; |
af8f38b3 | 584 | vcpu->arch.cpu_type = KVM_CPU_3S_64; |
f05ed4d5 PM |
585 | } else |
586 | #endif | |
587 | { | |
588 | kvmppc_mmu_book3s_32_init(vcpu); | |
1022fc3d AG |
589 | if (!to_book3s(vcpu)->hior_explicit) |
590 | to_book3s(vcpu)->hior = 0; | |
f05ed4d5 | 591 | to_book3s(vcpu)->msr_mask = 0xffffffffULL; |
af8f38b3 | 592 | vcpu->arch.cpu_type = KVM_CPU_3S_32; |
f05ed4d5 PM |
593 | } |
594 | ||
af8f38b3 AG |
595 | kvmppc_sanity_check(vcpu); |
596 | ||
f05ed4d5 PM |
597 | /* If we are in hypervisor level on 970, we can tell the CPU to |
598 | * treat DCBZ as 32 bytes store */ | |
599 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32; | |
600 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && | |
601 | !strcmp(cur_cpu_spec->platform, "ppc970")) | |
602 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | |
603 | ||
604 | /* Cell performs badly if MSR_FEx are set. So let's hope nobody | |
605 | really needs them in a VM on Cell and force disable them. */ | |
606 | if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be")) | |
607 | to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); | |
608 | ||
a4a0f252 PM |
609 | /* |
610 | * If they're asking for POWER6 or later, set the flag | |
611 | * indicating that we can do multiple large page sizes | |
612 | * and 1TB segments. | |
613 | * Also set the flag that indicates that tlbie has the large | |
614 | * page bit in the RB operand instead of the instruction. | |
615 | */ | |
616 | switch (PVR_VER(pvr)) { | |
617 | case PVR_POWER6: | |
618 | case PVR_POWER7: | |
619 | case PVR_POWER7p: | |
620 | case PVR_POWER8: | |
2365f6b6 TH |
621 | case PVR_POWER8E: |
622 | case PVR_POWER8NVL: | |
6142236c | 623 | case PVR_POWER9: |
a4a0f252 PM |
624 | vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE | |
625 | BOOK3S_HFLAG_NEW_TLBIE; | |
626 | break; | |
627 | } | |
628 | ||
f05ed4d5 PM |
629 | #ifdef CONFIG_PPC_BOOK3S_32 |
630 | /* 32 bit Book3S always has 32 byte dcbz */ | |
631 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | |
632 | #endif | |
633 | ||
634 | /* On some CPUs we can execute paired single operations natively */ | |
635 | asm ( "mfpvr %0" : "=r"(host_pvr)); | |
636 | switch (host_pvr) { | |
637 | case 0x00080200: /* lonestar 2.0 */ | |
638 | case 0x00088202: /* lonestar 2.2 */ | |
639 | case 0x70000100: /* gekko 1.0 */ | |
640 | case 0x00080100: /* gekko 2.0 */ | |
641 | case 0x00083203: /* gekko 2.3a */ | |
642 | case 0x00083213: /* gekko 2.3b */ | |
643 | case 0x00083204: /* gekko 2.4 */ | |
644 | case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ | |
645 | case 0x00087200: /* broadway */ | |
646 | vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS; | |
647 | /* Enable HID2.PSE - in case we need it later */ | |
648 | mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29)); | |
649 | } | |
650 | } | |
651 | ||
652 | /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To | |
653 | * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to | |
654 | * emulate 32 bytes dcbz length. | |
655 | * | |
656 | * The Book3s_64 inventors also realized this case and implemented a special bit | |
657 | * in the HID5 register, which is a hypervisor ressource. Thus we can't use it. | |
658 | * | |
659 | * My approach here is to patch the dcbz instruction on executing pages. | |
660 | */ | |
661 | static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | |
662 | { | |
663 | struct page *hpage; | |
664 | u64 hpage_offset; | |
665 | u32 *page; | |
666 | int i; | |
667 | ||
668 | hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); | |
32cad84f | 669 | if (is_error_page(hpage)) |
f05ed4d5 | 670 | return; |
f05ed4d5 PM |
671 | |
672 | hpage_offset = pte->raddr & ~PAGE_MASK; | |
673 | hpage_offset &= ~0xFFFULL; | |
674 | hpage_offset /= 4; | |
675 | ||
676 | get_page(hpage); | |
2480b208 | 677 | page = kmap_atomic(hpage); |
f05ed4d5 PM |
678 | |
679 | /* patch dcbz into reserved instruction, so we trap */ | |
680 | for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) | |
cd087eef AG |
681 | if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ) |
682 | page[i] &= cpu_to_be32(0xfffffff7); | |
f05ed4d5 | 683 | |
2480b208 | 684 | kunmap_atomic(page); |
f05ed4d5 PM |
685 | put_page(hpage); |
686 | } | |
687 | ||
378b417d | 688 | static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) |
f05ed4d5 PM |
689 | { |
690 | ulong mp_pa = vcpu->arch.magic_page_pa; | |
691 | ||
5deb8e7a | 692 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) |
bbcc9c06 BH |
693 | mp_pa = (uint32_t)mp_pa; |
694 | ||
89b68c96 AG |
695 | gpa &= ~0xFFFULL; |
696 | if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) { | |
378b417d | 697 | return true; |
f05ed4d5 PM |
698 | } |
699 | ||
89b68c96 | 700 | return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT); |
f05ed4d5 PM |
701 | } |
702 | ||
8c99d345 | 703 | static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu, |
f05ed4d5 PM |
704 | ulong eaddr, int vec) |
705 | { | |
706 | bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); | |
93b159b4 | 707 | bool iswrite = false; |
f05ed4d5 PM |
708 | int r = RESUME_GUEST; |
709 | int relocated; | |
710 | int page_found = 0; | |
96df2267 | 711 | struct kvmppc_pte pte = { 0 }; |
5deb8e7a AG |
712 | bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false; |
713 | bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false; | |
f05ed4d5 PM |
714 | u64 vsid; |
715 | ||
716 | relocated = data ? dr : ir; | |
93b159b4 PM |
717 | if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE)) |
718 | iswrite = true; | |
f05ed4d5 PM |
719 | |
720 | /* Resolve real address if translation turned on */ | |
721 | if (relocated) { | |
93b159b4 | 722 | page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); |
f05ed4d5 PM |
723 | } else { |
724 | pte.may_execute = true; | |
725 | pte.may_read = true; | |
726 | pte.may_write = true; | |
727 | pte.raddr = eaddr & KVM_PAM; | |
728 | pte.eaddr = eaddr; | |
729 | pte.vpage = eaddr >> 12; | |
c9029c34 | 730 | pte.page_size = MMU_PAGE_64K; |
6c7d47c3 | 731 | pte.wimg = HPTE_R_M; |
f05ed4d5 PM |
732 | } |
733 | ||
5deb8e7a | 734 | switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { |
f05ed4d5 PM |
735 | case 0: |
736 | pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); | |
737 | break; | |
738 | case MSR_DR: | |
c01e3f66 AG |
739 | if (!data && |
740 | (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && | |
741 | ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)) | |
742 | pte.raddr &= ~SPLIT_HACK_MASK; | |
8fc6ba0a | 743 | fallthrough; |
f05ed4d5 PM |
744 | case MSR_IR: |
745 | vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); | |
746 | ||
5deb8e7a | 747 | if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR) |
f05ed4d5 PM |
748 | pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); |
749 | else | |
750 | pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); | |
751 | pte.vpage |= vsid; | |
752 | ||
753 | if (vsid == -1) | |
754 | page_found = -EINVAL; | |
755 | break; | |
756 | } | |
757 | ||
758 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
759 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | |
760 | /* | |
761 | * If we do the dcbz hack, we have to NX on every execution, | |
762 | * so we can patch the executing code. This renders our guest | |
763 | * NX-less. | |
764 | */ | |
765 | pte.may_execute = !data; | |
766 | } | |
767 | ||
916ccadc PM |
768 | if (page_found == -ENOENT || page_found == -EPERM) { |
769 | /* Page not found in guest PTE entries, or protection fault */ | |
770 | u64 flags; | |
771 | ||
772 | if (page_found == -EPERM) | |
773 | flags = DSISR_PROTFAULT; | |
774 | else | |
775 | flags = DSISR_NOHPTE; | |
776 | if (data) { | |
777 | flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE; | |
778 | kvmppc_core_queue_data_storage(vcpu, eaddr, flags); | |
779 | } else { | |
780 | kvmppc_core_queue_inst_storage(vcpu, flags); | |
781 | } | |
f05ed4d5 PM |
782 | } else if (page_found == -EINVAL) { |
783 | /* Page not found in guest SLB */ | |
5deb8e7a | 784 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); |
f05ed4d5 | 785 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); |
9eecec12 | 786 | } else if (kvmppc_visible_gpa(vcpu, pte.raddr)) { |
93b159b4 PM |
787 | if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { |
788 | /* | |
789 | * There is already a host HPTE there, presumably | |
790 | * a read-only one for a page the guest thinks | |
791 | * is writable, so get rid of it first. | |
792 | */ | |
793 | kvmppc_mmu_unmap_page(vcpu, &pte); | |
794 | } | |
f05ed4d5 | 795 | /* The guest's PTE is not mapped yet. Map on the host */ |
bd9166ff AK |
796 | if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) { |
797 | /* Exit KVM if mapping failed */ | |
8c99d345 | 798 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
bd9166ff AK |
799 | return RESUME_HOST; |
800 | } | |
f05ed4d5 PM |
801 | if (data) |
802 | vcpu->stat.sp_storage++; | |
803 | else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
93b159b4 | 804 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) |
f05ed4d5 PM |
805 | kvmppc_patch_dcbz(vcpu, &pte); |
806 | } else { | |
807 | /* MMIO */ | |
808 | vcpu->stat.mmio_exits++; | |
809 | vcpu->arch.paddr_accessed = pte.raddr; | |
6020c0f6 | 810 | vcpu->arch.vaddr_accessed = pte.eaddr; |
8c99d345 | 811 | r = kvmppc_emulate_mmio(vcpu); |
f05ed4d5 PM |
812 | if ( r == RESUME_HOST_NV ) |
813 | r = RESUME_HOST; | |
814 | } | |
815 | ||
816 | return r; | |
817 | } | |
818 | ||
f05ed4d5 PM |
819 | /* Give up external provider (FPU, Altivec, VSX) */ |
820 | void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) | |
821 | { | |
822 | struct thread_struct *t = ¤t->thread; | |
f05ed4d5 | 823 | |
28c483b6 PM |
824 | /* |
825 | * VSX instructions can access FP and vector registers, so if | |
826 | * we are giving up VSX, make sure we give up FP and VMX as well. | |
827 | */ | |
828 | if (msr & MSR_VSX) | |
829 | msr |= MSR_FP | MSR_VEC; | |
830 | ||
831 | msr &= vcpu->arch.guest_owned_ext; | |
832 | if (!msr) | |
f05ed4d5 PM |
833 | return; |
834 | ||
835 | #ifdef DEBUG_EXT | |
836 | printk(KERN_INFO "Giving up ext 0x%lx\n", msr); | |
837 | #endif | |
838 | ||
28c483b6 PM |
839 | if (msr & MSR_FP) { |
840 | /* | |
841 | * Note that on CPUs with VSX, giveup_fpu stores | |
842 | * both the traditional FP registers and the added VSX | |
de79f7b9 | 843 | * registers into thread.fp_state.fpr[]. |
28c483b6 | 844 | */ |
99dae3ba | 845 | if (t->regs->msr & MSR_FP) |
9d1ffdd8 | 846 | giveup_fpu(current); |
99dae3ba | 847 | t->fp_save_area = NULL; |
28c483b6 PM |
848 | } |
849 | ||
f05ed4d5 | 850 | #ifdef CONFIG_ALTIVEC |
28c483b6 | 851 | if (msr & MSR_VEC) { |
9d1ffdd8 PM |
852 | if (current->thread.regs->msr & MSR_VEC) |
853 | giveup_altivec(current); | |
99dae3ba | 854 | t->vr_save_area = NULL; |
f05ed4d5 | 855 | } |
28c483b6 | 856 | #endif |
f05ed4d5 | 857 | |
28c483b6 | 858 | vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX); |
f05ed4d5 PM |
859 | kvmppc_recalc_shadow_msr(vcpu); |
860 | } | |
861 | ||
616dff86 | 862 | /* Give up facility (TAR / EBB / DSCR) */ |
7284ca8a | 863 | void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac) |
616dff86 AG |
864 | { |
865 | #ifdef CONFIG_PPC_BOOK3S_64 | |
866 | if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) { | |
867 | /* Facility not available to the guest, ignore giveup request*/ | |
868 | return; | |
869 | } | |
e14e7a1e AG |
870 | |
871 | switch (fac) { | |
872 | case FSCR_TAR_LG: | |
873 | vcpu->arch.tar = mfspr(SPRN_TAR); | |
874 | mtspr(SPRN_TAR, current->thread.tar); | |
875 | vcpu->arch.shadow_fscr &= ~FSCR_TAR; | |
876 | break; | |
877 | } | |
616dff86 AG |
878 | #endif |
879 | } | |
880 | ||
f05ed4d5 PM |
881 | /* Handle external providers (FPU, Altivec, VSX) */ |
882 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |
883 | ulong msr) | |
884 | { | |
885 | struct thread_struct *t = ¤t->thread; | |
f05ed4d5 PM |
886 | |
887 | /* When we have paired singles, we emulate in software */ | |
888 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) | |
889 | return RESUME_GUEST; | |
890 | ||
5deb8e7a | 891 | if (!(kvmppc_get_msr(vcpu) & msr)) { |
f05ed4d5 PM |
892 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
893 | return RESUME_GUEST; | |
894 | } | |
895 | ||
28c483b6 PM |
896 | if (msr == MSR_VSX) { |
897 | /* No VSX? Give an illegal instruction interrupt */ | |
898 | #ifdef CONFIG_VSX | |
899 | if (!cpu_has_feature(CPU_FTR_VSX)) | |
900 | #endif | |
901 | { | |
902 | kvmppc_core_queue_program(vcpu, SRR1_PROGILL); | |
903 | return RESUME_GUEST; | |
904 | } | |
905 | ||
906 | /* | |
907 | * We have to load up all the FP and VMX registers before | |
908 | * we can let the guest use VSX instructions. | |
909 | */ | |
910 | msr = MSR_FP | MSR_VEC | MSR_VSX; | |
f05ed4d5 PM |
911 | } |
912 | ||
28c483b6 PM |
913 | /* See if we already own all the ext(s) needed */ |
914 | msr &= ~vcpu->arch.guest_owned_ext; | |
915 | if (!msr) | |
916 | return RESUME_GUEST; | |
917 | ||
f05ed4d5 PM |
918 | #ifdef DEBUG_EXT |
919 | printk(KERN_INFO "Loading up ext 0x%lx\n", msr); | |
920 | #endif | |
921 | ||
28c483b6 | 922 | if (msr & MSR_FP) { |
7562c4fd | 923 | preempt_disable(); |
09548fda | 924 | enable_kernel_fp(); |
99dae3ba | 925 | load_fp_state(&vcpu->arch.fp); |
dc4fbba1 | 926 | disable_kernel_fp(); |
99dae3ba | 927 | t->fp_save_area = &vcpu->arch.fp; |
7562c4fd | 928 | preempt_enable(); |
28c483b6 PM |
929 | } |
930 | ||
931 | if (msr & MSR_VEC) { | |
f05ed4d5 | 932 | #ifdef CONFIG_ALTIVEC |
7562c4fd | 933 | preempt_disable(); |
09548fda | 934 | enable_kernel_altivec(); |
99dae3ba | 935 | load_vr_state(&vcpu->arch.vr); |
dc4fbba1 | 936 | disable_kernel_altivec(); |
99dae3ba | 937 | t->vr_save_area = &vcpu->arch.vr; |
7562c4fd | 938 | preempt_enable(); |
f05ed4d5 | 939 | #endif |
f05ed4d5 PM |
940 | } |
941 | ||
99dae3ba | 942 | t->regs->msr |= msr; |
f05ed4d5 | 943 | vcpu->arch.guest_owned_ext |= msr; |
f05ed4d5 PM |
944 | kvmppc_recalc_shadow_msr(vcpu); |
945 | ||
946 | return RESUME_GUEST; | |
947 | } | |
948 | ||
9d1ffdd8 PM |
949 | /* |
950 | * Kernel code using FP or VMX could have flushed guest state to | |
951 | * the thread_struct; if so, get it back now. | |
952 | */ | |
953 | static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) | |
954 | { | |
955 | unsigned long lost_ext; | |
956 | ||
957 | lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr; | |
958 | if (!lost_ext) | |
959 | return; | |
960 | ||
09548fda | 961 | if (lost_ext & MSR_FP) { |
7562c4fd | 962 | preempt_disable(); |
09548fda | 963 | enable_kernel_fp(); |
99dae3ba | 964 | load_fp_state(&vcpu->arch.fp); |
dc4fbba1 | 965 | disable_kernel_fp(); |
7562c4fd | 966 | preempt_enable(); |
09548fda | 967 | } |
f2481771 | 968 | #ifdef CONFIG_ALTIVEC |
09548fda | 969 | if (lost_ext & MSR_VEC) { |
7562c4fd | 970 | preempt_disable(); |
09548fda | 971 | enable_kernel_altivec(); |
99dae3ba | 972 | load_vr_state(&vcpu->arch.vr); |
dc4fbba1 | 973 | disable_kernel_altivec(); |
7562c4fd | 974 | preempt_enable(); |
09548fda | 975 | } |
f2481771 | 976 | #endif |
9d1ffdd8 PM |
977 | current->thread.regs->msr |= lost_ext; |
978 | } | |
979 | ||
616dff86 AG |
980 | #ifdef CONFIG_PPC_BOOK3S_64 |
981 | ||
533082ae | 982 | void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac) |
616dff86 AG |
983 | { |
984 | /* Inject the Interrupt Cause field and trigger a guest interrupt */ | |
985 | vcpu->arch.fscr &= ~(0xffULL << 56); | |
986 | vcpu->arch.fscr |= (fac << 56); | |
987 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL); | |
988 | } | |
989 | ||
990 | static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac) | |
991 | { | |
992 | enum emulation_result er = EMULATE_FAIL; | |
993 | ||
994 | if (!(kvmppc_get_msr(vcpu) & MSR_PR)) | |
8c99d345 | 995 | er = kvmppc_emulate_instruction(vcpu); |
616dff86 AG |
996 | |
997 | if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) { | |
998 | /* Couldn't emulate, trigger interrupt in guest */ | |
999 | kvmppc_trigger_fac_interrupt(vcpu, fac); | |
1000 | } | |
1001 | } | |
1002 | ||
1003 | /* Enable facilities (TAR, EBB, DSCR) for the guest */ | |
1004 | static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac) | |
1005 | { | |
9916d57e | 1006 | bool guest_fac_enabled; |
616dff86 AG |
1007 | BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S)); |
1008 | ||
9916d57e AG |
1009 | /* |
1010 | * Not every facility is enabled by FSCR bits, check whether the | |
1011 | * guest has this facility enabled at all. | |
1012 | */ | |
1013 | switch (fac) { | |
1014 | case FSCR_TAR_LG: | |
1015 | case FSCR_EBB_LG: | |
1016 | guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac)); | |
1017 | break; | |
1018 | case FSCR_TM_LG: | |
1019 | guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM; | |
1020 | break; | |
1021 | default: | |
1022 | guest_fac_enabled = false; | |
1023 | break; | |
1024 | } | |
1025 | ||
1026 | if (!guest_fac_enabled) { | |
616dff86 AG |
1027 | /* Facility not enabled by the guest */ |
1028 | kvmppc_trigger_fac_interrupt(vcpu, fac); | |
1029 | return RESUME_GUEST; | |
1030 | } | |
1031 | ||
1032 | switch (fac) { | |
e14e7a1e AG |
1033 | case FSCR_TAR_LG: |
1034 | /* TAR switching isn't lazy in Linux yet */ | |
1035 | current->thread.tar = mfspr(SPRN_TAR); | |
1036 | mtspr(SPRN_TAR, vcpu->arch.tar); | |
1037 | vcpu->arch.shadow_fscr |= FSCR_TAR; | |
1038 | break; | |
616dff86 AG |
1039 | default: |
1040 | kvmppc_emulate_fac(vcpu, fac); | |
1041 | break; | |
1042 | } | |
1043 | ||
19c585eb SG |
1044 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1045 | /* Since we disabled MSR_TM at privilege state, the mfspr instruction | |
1046 | * for TM spr can trigger TM fac unavailable. In this case, the | |
1047 | * emulation is handled by kvmppc_emulate_fac(), which invokes | |
1048 | * kvmppc_emulate_mfspr() finally. But note the mfspr can include | |
1049 | * RT for NV registers. So it need to restore those NV reg to reflect | |
1050 | * the update. | |
1051 | */ | |
1052 | if ((fac == FSCR_TM_LG) && !(kvmppc_get_msr(vcpu) & MSR_PR)) | |
1053 | return RESUME_GUEST_NV; | |
1054 | #endif | |
1055 | ||
616dff86 AG |
1056 | return RESUME_GUEST; |
1057 | } | |
8e6afa36 AG |
1058 | |
1059 | void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr) | |
1060 | { | |
1061 | if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) { | |
1062 | /* TAR got dropped, drop it in shadow too */ | |
1063 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); | |
7284ca8a SG |
1064 | } else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) { |
1065 | vcpu->arch.fscr = fscr; | |
1066 | kvmppc_handle_fac(vcpu, FSCR_TAR_LG); | |
1067 | return; | |
8e6afa36 | 1068 | } |
7284ca8a | 1069 | |
8e6afa36 AG |
1070 | vcpu->arch.fscr = fscr; |
1071 | } | |
616dff86 AG |
1072 | #endif |
1073 | ||
11dd6ac0 LV |
1074 | static void kvmppc_setup_debug(struct kvm_vcpu *vcpu) |
1075 | { | |
1076 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { | |
1077 | u64 msr = kvmppc_get_msr(vcpu); | |
1078 | ||
1079 | kvmppc_set_msr(vcpu, msr | MSR_SE); | |
1080 | } | |
1081 | } | |
1082 | ||
1083 | static void kvmppc_clear_debug(struct kvm_vcpu *vcpu) | |
1084 | { | |
1085 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { | |
1086 | u64 msr = kvmppc_get_msr(vcpu); | |
1087 | ||
1088 | kvmppc_set_msr(vcpu, msr & ~MSR_SE); | |
1089 | } | |
1090 | } | |
1091 | ||
8c99d345 | 1092 | static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr) |
fcd4f3c6 TH |
1093 | { |
1094 | enum emulation_result er; | |
1095 | ulong flags; | |
1096 | u32 last_inst; | |
1097 | int emul, r; | |
1098 | ||
1099 | /* | |
1100 | * shadow_srr1 only contains valid flags if we came here via a program | |
1101 | * exception. The other exceptions (emulation assist, FP unavailable, | |
1102 | * etc.) do not provide flags in SRR1, so use an illegal-instruction | |
1103 | * exception when injecting a program interrupt into the guest. | |
1104 | */ | |
1105 | if (exit_nr == BOOK3S_INTERRUPT_PROGRAM) | |
1106 | flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; | |
1107 | else | |
1108 | flags = SRR1_PROGILL; | |
1109 | ||
1110 | emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); | |
1111 | if (emul != EMULATE_DONE) | |
1112 | return RESUME_GUEST; | |
1113 | ||
1114 | if (kvmppc_get_msr(vcpu) & MSR_PR) { | |
1115 | #ifdef EXIT_DEBUG | |
1116 | pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n", | |
1117 | kvmppc_get_pc(vcpu), last_inst); | |
1118 | #endif | |
1119 | if ((last_inst & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) { | |
1120 | kvmppc_core_queue_program(vcpu, flags); | |
1121 | return RESUME_GUEST; | |
1122 | } | |
1123 | } | |
1124 | ||
1125 | vcpu->stat.emulated_inst_exits++; | |
8c99d345 | 1126 | er = kvmppc_emulate_instruction(vcpu); |
fcd4f3c6 TH |
1127 | switch (er) { |
1128 | case EMULATE_DONE: | |
1129 | r = RESUME_GUEST_NV; | |
1130 | break; | |
1131 | case EMULATE_AGAIN: | |
1132 | r = RESUME_GUEST; | |
1133 | break; | |
1134 | case EMULATE_FAIL: | |
1135 | pr_crit("%s: emulation at %lx failed (%08x)\n", | |
1136 | __func__, kvmppc_get_pc(vcpu), last_inst); | |
1137 | kvmppc_core_queue_program(vcpu, flags); | |
1138 | r = RESUME_GUEST; | |
1139 | break; | |
1140 | case EMULATE_DO_MMIO: | |
8c99d345 | 1141 | vcpu->run->exit_reason = KVM_EXIT_MMIO; |
fcd4f3c6 TH |
1142 | r = RESUME_HOST_NV; |
1143 | break; | |
1144 | case EMULATE_EXIT_USER: | |
1145 | r = RESUME_HOST_NV; | |
1146 | break; | |
1147 | default: | |
1148 | BUG(); | |
1149 | } | |
1150 | ||
1151 | return r; | |
1152 | } | |
1153 | ||
7ec21d9d | 1154 | int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr) |
f05ed4d5 | 1155 | { |
7ec21d9d | 1156 | struct kvm_run *run = vcpu->run; |
f05ed4d5 | 1157 | int r = RESUME_HOST; |
7ee78855 | 1158 | int s; |
f05ed4d5 PM |
1159 | |
1160 | vcpu->stat.sum_exits++; | |
1161 | ||
1162 | run->exit_reason = KVM_EXIT_UNKNOWN; | |
1163 | run->ready_for_interrupt_injection = 1; | |
1164 | ||
bd2be683 | 1165 | /* We get here with MSR.EE=1 */ |
3b1d9d7d | 1166 | |
97c95059 | 1167 | trace_kvm_exit(exit_nr, vcpu); |
6edaa530 | 1168 | guest_exit(); |
c63ddcb4 | 1169 | |
f05ed4d5 PM |
1170 | switch (exit_nr) { |
1171 | case BOOK3S_INTERRUPT_INST_STORAGE: | |
468a12c2 | 1172 | { |
a2d56020 | 1173 | ulong shadow_srr1 = vcpu->arch.shadow_srr1; |
f05ed4d5 PM |
1174 | vcpu->stat.pf_instruc++; |
1175 | ||
c01e3f66 AG |
1176 | if (kvmppc_is_split_real(vcpu)) |
1177 | kvmppc_fixup_split_real(vcpu); | |
1178 | ||
f05ed4d5 PM |
1179 | #ifdef CONFIG_PPC_BOOK3S_32 |
1180 | /* We set segments as unused segments when invalidating them. So | |
1181 | * treat the respective fault as segment fault. */ | |
a2d56020 PM |
1182 | { |
1183 | struct kvmppc_book3s_shadow_vcpu *svcpu; | |
1184 | u32 sr; | |
1185 | ||
1186 | svcpu = svcpu_get(vcpu); | |
1187 | sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]; | |
468a12c2 | 1188 | svcpu_put(svcpu); |
a2d56020 PM |
1189 | if (sr == SR_INVALID) { |
1190 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | |
1191 | r = RESUME_GUEST; | |
1192 | break; | |
1193 | } | |
f05ed4d5 PM |
1194 | } |
1195 | #endif | |
1196 | ||
1197 | /* only care about PTEG not found errors, but leave NX alone */ | |
468a12c2 | 1198 | if (shadow_srr1 & 0x40000000) { |
93b159b4 | 1199 | int idx = srcu_read_lock(&vcpu->kvm->srcu); |
8c99d345 | 1200 | r = kvmppc_handle_pagefault(vcpu, kvmppc_get_pc(vcpu), exit_nr); |
93b159b4 | 1201 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
f05ed4d5 PM |
1202 | vcpu->stat.sp_instruc++; |
1203 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
1204 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | |
1205 | /* | |
1206 | * XXX If we do the dcbz hack we use the NX bit to flush&patch the page, | |
1207 | * so we can't use the NX bit inside the guest. Let's cross our fingers, | |
1208 | * that no guest that needs the dcbz hack does NX. | |
1209 | */ | |
1210 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); | |
1211 | r = RESUME_GUEST; | |
1212 | } else { | |
916ccadc PM |
1213 | kvmppc_core_queue_inst_storage(vcpu, |
1214 | shadow_srr1 & 0x58000000); | |
f05ed4d5 PM |
1215 | r = RESUME_GUEST; |
1216 | } | |
1217 | break; | |
468a12c2 | 1218 | } |
f05ed4d5 PM |
1219 | case BOOK3S_INTERRUPT_DATA_STORAGE: |
1220 | { | |
1221 | ulong dar = kvmppc_get_fault_dar(vcpu); | |
a2d56020 | 1222 | u32 fault_dsisr = vcpu->arch.fault_dsisr; |
f05ed4d5 PM |
1223 | vcpu->stat.pf_storage++; |
1224 | ||
1225 | #ifdef CONFIG_PPC_BOOK3S_32 | |
1226 | /* We set segments as unused segments when invalidating them. So | |
1227 | * treat the respective fault as segment fault. */ | |
a2d56020 PM |
1228 | { |
1229 | struct kvmppc_book3s_shadow_vcpu *svcpu; | |
1230 | u32 sr; | |
1231 | ||
1232 | svcpu = svcpu_get(vcpu); | |
1233 | sr = svcpu->sr[dar >> SID_SHIFT]; | |
468a12c2 | 1234 | svcpu_put(svcpu); |
a2d56020 PM |
1235 | if (sr == SR_INVALID) { |
1236 | kvmppc_mmu_map_segment(vcpu, dar); | |
1237 | r = RESUME_GUEST; | |
1238 | break; | |
1239 | } | |
f05ed4d5 PM |
1240 | } |
1241 | #endif | |
1242 | ||
93b159b4 PM |
1243 | /* |
1244 | * We need to handle missing shadow PTEs, and | |
1245 | * protection faults due to us mapping a page read-only | |
1246 | * when the guest thinks it is writable. | |
1247 | */ | |
1248 | if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) { | |
1249 | int idx = srcu_read_lock(&vcpu->kvm->srcu); | |
8c99d345 | 1250 | r = kvmppc_handle_pagefault(vcpu, dar, exit_nr); |
93b159b4 | 1251 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
f05ed4d5 | 1252 | } else { |
916ccadc | 1253 | kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr); |
f05ed4d5 PM |
1254 | r = RESUME_GUEST; |
1255 | } | |
1256 | break; | |
1257 | } | |
1258 | case BOOK3S_INTERRUPT_DATA_SEGMENT: | |
1259 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { | |
5deb8e7a | 1260 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); |
f05ed4d5 PM |
1261 | kvmppc_book3s_queue_irqprio(vcpu, |
1262 | BOOK3S_INTERRUPT_DATA_SEGMENT); | |
1263 | } | |
1264 | r = RESUME_GUEST; | |
1265 | break; | |
1266 | case BOOK3S_INTERRUPT_INST_SEGMENT: | |
1267 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) { | |
1268 | kvmppc_book3s_queue_irqprio(vcpu, | |
1269 | BOOK3S_INTERRUPT_INST_SEGMENT); | |
1270 | } | |
1271 | r = RESUME_GUEST; | |
1272 | break; | |
1273 | /* We're good on these - the host merely wanted to get our attention */ | |
1274 | case BOOK3S_INTERRUPT_DECREMENTER: | |
4f225ae0 | 1275 | case BOOK3S_INTERRUPT_HV_DECREMENTER: |
40688909 | 1276 | case BOOK3S_INTERRUPT_DOORBELL: |
568fccc4 | 1277 | case BOOK3S_INTERRUPT_H_DOORBELL: |
f05ed4d5 PM |
1278 | vcpu->stat.dec_exits++; |
1279 | r = RESUME_GUEST; | |
1280 | break; | |
1281 | case BOOK3S_INTERRUPT_EXTERNAL: | |
4f225ae0 | 1282 | case BOOK3S_INTERRUPT_EXTERNAL_HV: |
b71dc519 | 1283 | case BOOK3S_INTERRUPT_H_VIRT: |
f05ed4d5 PM |
1284 | vcpu->stat.ext_intr_exits++; |
1285 | r = RESUME_GUEST; | |
1286 | break; | |
b71dc519 | 1287 | case BOOK3S_INTERRUPT_HMI: |
f05ed4d5 | 1288 | case BOOK3S_INTERRUPT_PERFMON: |
b71dc519 | 1289 | case BOOK3S_INTERRUPT_SYSTEM_RESET: |
f05ed4d5 PM |
1290 | r = RESUME_GUEST; |
1291 | break; | |
1292 | case BOOK3S_INTERRUPT_PROGRAM: | |
4f225ae0 | 1293 | case BOOK3S_INTERRUPT_H_EMUL_ASSIST: |
8c99d345 | 1294 | r = kvmppc_exit_pr_progint(vcpu, exit_nr); |
f05ed4d5 | 1295 | break; |
f05ed4d5 | 1296 | case BOOK3S_INTERRUPT_SYSCALL: |
51f04726 MC |
1297 | { |
1298 | u32 last_sc; | |
1299 | int emul; | |
1300 | ||
1301 | /* Get last sc for papr */ | |
1302 | if (vcpu->arch.papr_enabled) { | |
1303 | /* The sc instuction points SRR0 to the next inst */ | |
1304 | emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc); | |
1305 | if (emul != EMULATE_DONE) { | |
1306 | kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4); | |
1307 | r = RESUME_GUEST; | |
1308 | break; | |
1309 | } | |
1310 | } | |
1311 | ||
a668f2bd | 1312 | if (vcpu->arch.papr_enabled && |
51f04726 | 1313 | (last_sc == 0x44000022) && |
5deb8e7a | 1314 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
a668f2bd AG |
1315 | /* SC 1 papr hypercalls */ |
1316 | ulong cmd = kvmppc_get_gpr(vcpu, 3); | |
1317 | int i; | |
1318 | ||
2ba9f0d8 | 1319 | #ifdef CONFIG_PPC_BOOK3S_64 |
a668f2bd AG |
1320 | if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { |
1321 | r = RESUME_GUEST; | |
1322 | break; | |
1323 | } | |
96f38d72 | 1324 | #endif |
a668f2bd AG |
1325 | |
1326 | run->papr_hcall.nr = cmd; | |
1327 | for (i = 0; i < 9; ++i) { | |
1328 | ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); | |
1329 | run->papr_hcall.args[i] = gpr; | |
1330 | } | |
1331 | run->exit_reason = KVM_EXIT_PAPR_HCALL; | |
1332 | vcpu->arch.hcall_needed = 1; | |
1333 | r = RESUME_HOST; | |
1334 | } else if (vcpu->arch.osi_enabled && | |
f05ed4d5 PM |
1335 | (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && |
1336 | (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { | |
1337 | /* MOL hypercalls */ | |
1338 | u64 *gprs = run->osi.gprs; | |
1339 | int i; | |
1340 | ||
1341 | run->exit_reason = KVM_EXIT_OSI; | |
1342 | for (i = 0; i < 32; i++) | |
1343 | gprs[i] = kvmppc_get_gpr(vcpu, i); | |
1344 | vcpu->arch.osi_needed = 1; | |
1345 | r = RESUME_HOST_NV; | |
5deb8e7a | 1346 | } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && |
f05ed4d5 PM |
1347 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { |
1348 | /* KVM PV hypercalls */ | |
1349 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | |
1350 | r = RESUME_GUEST; | |
1351 | } else { | |
1352 | /* Guest syscalls */ | |
1353 | vcpu->stat.syscall_exits++; | |
1354 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
1355 | r = RESUME_GUEST; | |
1356 | } | |
1357 | break; | |
51f04726 | 1358 | } |
f05ed4d5 PM |
1359 | case BOOK3S_INTERRUPT_FP_UNAVAIL: |
1360 | case BOOK3S_INTERRUPT_ALTIVEC: | |
1361 | case BOOK3S_INTERRUPT_VSX: | |
1362 | { | |
1363 | int ext_msr = 0; | |
9a26af64 | 1364 | int emul; |
9a26af64 MC |
1365 | u32 last_inst; |
1366 | ||
1367 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) { | |
1368 | /* Do paired single instruction emulation */ | |
51f04726 MC |
1369 | emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, |
1370 | &last_inst); | |
9a26af64 | 1371 | if (emul == EMULATE_DONE) |
8c99d345 | 1372 | r = kvmppc_exit_pr_progint(vcpu, exit_nr); |
9a26af64 MC |
1373 | else |
1374 | r = RESUME_GUEST; | |
f05ed4d5 | 1375 | |
9a26af64 | 1376 | break; |
f05ed4d5 PM |
1377 | } |
1378 | ||
9a26af64 MC |
1379 | /* Enable external provider */ |
1380 | switch (exit_nr) { | |
1381 | case BOOK3S_INTERRUPT_FP_UNAVAIL: | |
1382 | ext_msr = MSR_FP; | |
f05ed4d5 | 1383 | break; |
9a26af64 MC |
1384 | |
1385 | case BOOK3S_INTERRUPT_ALTIVEC: | |
1386 | ext_msr = MSR_VEC; | |
f05ed4d5 | 1387 | break; |
9a26af64 MC |
1388 | |
1389 | case BOOK3S_INTERRUPT_VSX: | |
1390 | ext_msr = MSR_VSX; | |
f05ed4d5 PM |
1391 | break; |
1392 | } | |
9a26af64 MC |
1393 | |
1394 | r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr); | |
f05ed4d5 PM |
1395 | break; |
1396 | } | |
1397 | case BOOK3S_INTERRUPT_ALIGNMENT: | |
9a26af64 | 1398 | { |
51f04726 MC |
1399 | u32 last_inst; |
1400 | int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); | |
9a26af64 MC |
1401 | |
1402 | if (emul == EMULATE_DONE) { | |
5deb8e7a AG |
1403 | u32 dsisr; |
1404 | u64 dar; | |
1405 | ||
1406 | dsisr = kvmppc_alignment_dsisr(vcpu, last_inst); | |
1407 | dar = kvmppc_alignment_dar(vcpu, last_inst); | |
1408 | ||
1409 | kvmppc_set_dsisr(vcpu, dsisr); | |
1410 | kvmppc_set_dar(vcpu, dar); | |
1411 | ||
f05ed4d5 PM |
1412 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
1413 | } | |
1414 | r = RESUME_GUEST; | |
1415 | break; | |
9a26af64 | 1416 | } |
616dff86 AG |
1417 | #ifdef CONFIG_PPC_BOOK3S_64 |
1418 | case BOOK3S_INTERRUPT_FAC_UNAVAIL: | |
19c585eb | 1419 | r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); |
616dff86 AG |
1420 | break; |
1421 | #endif | |
f05ed4d5 | 1422 | case BOOK3S_INTERRUPT_MACHINE_CHECK: |
f05ed4d5 PM |
1423 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
1424 | r = RESUME_GUEST; | |
1425 | break; | |
11dd6ac0 LV |
1426 | case BOOK3S_INTERRUPT_TRACE: |
1427 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { | |
1428 | run->exit_reason = KVM_EXIT_DEBUG; | |
1429 | r = RESUME_HOST; | |
1430 | } else { | |
1431 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
1432 | r = RESUME_GUEST; | |
1433 | } | |
1434 | break; | |
f05ed4d5 | 1435 | default: |
468a12c2 | 1436 | { |
a2d56020 | 1437 | ulong shadow_srr1 = vcpu->arch.shadow_srr1; |
f05ed4d5 PM |
1438 | /* Ugh - bork here! What did we get? */ |
1439 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", | |
468a12c2 | 1440 | exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); |
f05ed4d5 PM |
1441 | r = RESUME_HOST; |
1442 | BUG(); | |
1443 | break; | |
1444 | } | |
468a12c2 | 1445 | } |
f05ed4d5 PM |
1446 | |
1447 | if (!(r & RESUME_HOST)) { | |
1448 | /* To avoid clobbering exit_reason, only check for signals if | |
1449 | * we aren't already exiting to userspace for some other | |
1450 | * reason. */ | |
e371f713 AG |
1451 | |
1452 | /* | |
1453 | * Interrupts could be timers for the guest which we have to | |
1454 | * inject again, so let's postpone them until we're in the guest | |
1455 | * and if we really did time things so badly, then we just exit | |
1456 | * again due to a host external interrupt. | |
1457 | */ | |
7ee78855 | 1458 | s = kvmppc_prepare_to_enter(vcpu); |
6c85f52b | 1459 | if (s <= 0) |
7ee78855 | 1460 | r = s; |
6c85f52b SW |
1461 | else { |
1462 | /* interrupts now hard-disabled */ | |
5f1c248f | 1463 | kvmppc_fix_ee_before_entry(); |
f05ed4d5 | 1464 | } |
6c85f52b | 1465 | |
9d1ffdd8 | 1466 | kvmppc_handle_lost_ext(vcpu); |
f05ed4d5 PM |
1467 | } |
1468 | ||
1469 | trace_kvm_book3s_reenter(r, vcpu); | |
1470 | ||
1471 | return r; | |
1472 | } | |
1473 | ||
3a167bea AK |
1474 | static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu, |
1475 | struct kvm_sregs *sregs) | |
f05ed4d5 PM |
1476 | { |
1477 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | |
1478 | int i; | |
1479 | ||
1480 | sregs->pvr = vcpu->arch.pvr; | |
1481 | ||
1482 | sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; | |
1483 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | |
1484 | for (i = 0; i < 64; i++) { | |
1485 | sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i; | |
1486 | sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; | |
1487 | } | |
1488 | } else { | |
1489 | for (i = 0; i < 16; i++) | |
5deb8e7a | 1490 | sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i); |
f05ed4d5 PM |
1491 | |
1492 | for (i = 0; i < 8; i++) { | |
1493 | sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; | |
1494 | sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; | |
1495 | } | |
1496 | } | |
1497 | ||
1498 | return 0; | |
1499 | } | |
1500 | ||
3a167bea AK |
1501 | static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu, |
1502 | struct kvm_sregs *sregs) | |
f05ed4d5 PM |
1503 | { |
1504 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | |
1505 | int i; | |
1506 | ||
3a167bea | 1507 | kvmppc_set_pvr_pr(vcpu, sregs->pvr); |
f05ed4d5 PM |
1508 | |
1509 | vcpu3s->sdr1 = sregs->u.s.sdr1; | |
f4093ee9 | 1510 | #ifdef CONFIG_PPC_BOOK3S_64 |
f05ed4d5 | 1511 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { |
f4093ee9 GK |
1512 | /* Flush all SLB entries */ |
1513 | vcpu->arch.mmu.slbmte(vcpu, 0, 0); | |
1514 | vcpu->arch.mmu.slbia(vcpu); | |
1515 | ||
f05ed4d5 | 1516 | for (i = 0; i < 64; i++) { |
f4093ee9 GK |
1517 | u64 rb = sregs->u.s.ppc64.slb[i].slbe; |
1518 | u64 rs = sregs->u.s.ppc64.slb[i].slbv; | |
1519 | ||
1520 | if (rb & SLB_ESID_V) | |
1521 | vcpu->arch.mmu.slbmte(vcpu, rs, rb); | |
f05ed4d5 | 1522 | } |
f4093ee9 GK |
1523 | } else |
1524 | #endif | |
1525 | { | |
f05ed4d5 PM |
1526 | for (i = 0; i < 16; i++) { |
1527 | vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); | |
1528 | } | |
1529 | for (i = 0; i < 8; i++) { | |
1530 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, | |
1531 | (u32)sregs->u.s.ppc32.ibat[i]); | |
1532 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, | |
1533 | (u32)(sregs->u.s.ppc32.ibat[i] >> 32)); | |
1534 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, | |
1535 | (u32)sregs->u.s.ppc32.dbat[i]); | |
1536 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, | |
1537 | (u32)(sregs->u.s.ppc32.dbat[i] >> 32)); | |
1538 | } | |
1539 | } | |
1540 | ||
1541 | /* Flush the MMU after messing with the segments */ | |
1542 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | |
1543 | ||
1544 | return 0; | |
1545 | } | |
1546 | ||
3a167bea AK |
1547 | static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, |
1548 | union kvmppc_one_reg *val) | |
31f3438e | 1549 | { |
a136a8bd | 1550 | int r = 0; |
31f3438e | 1551 | |
a136a8bd | 1552 | switch (id) { |
a59c1d9e MS |
1553 | case KVM_REG_PPC_DEBUG_INST: |
1554 | *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT); | |
1555 | break; | |
31f3438e | 1556 | case KVM_REG_PPC_HIOR: |
a136a8bd | 1557 | *val = get_reg_val(id, to_book3s(vcpu)->hior); |
31f3438e | 1558 | break; |
88b02cf9 PM |
1559 | case KVM_REG_PPC_VTB: |
1560 | *val = get_reg_val(id, to_book3s(vcpu)->vtb); | |
1561 | break; | |
e5ee5422 | 1562 | case KVM_REG_PPC_LPCR: |
a0840240 | 1563 | case KVM_REG_PPC_LPCR_64: |
e5ee5422 AK |
1564 | /* |
1565 | * We are only interested in the LPCR_ILE bit | |
1566 | */ | |
1567 | if (vcpu->arch.intr_msr & MSR_LE) | |
1568 | *val = get_reg_val(id, LPCR_ILE); | |
1569 | else | |
1570 | *val = get_reg_val(id, 0); | |
1571 | break; | |
deeb879d SG |
1572 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1573 | case KVM_REG_PPC_TFHAR: | |
1574 | *val = get_reg_val(id, vcpu->arch.tfhar); | |
1575 | break; | |
1576 | case KVM_REG_PPC_TFIAR: | |
1577 | *val = get_reg_val(id, vcpu->arch.tfiar); | |
1578 | break; | |
1579 | case KVM_REG_PPC_TEXASR: | |
1580 | *val = get_reg_val(id, vcpu->arch.texasr); | |
1581 | break; | |
1582 | case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: | |
1583 | *val = get_reg_val(id, | |
1584 | vcpu->arch.gpr_tm[id-KVM_REG_PPC_TM_GPR0]); | |
1585 | break; | |
1586 | case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: | |
1587 | { | |
1588 | int i, j; | |
1589 | ||
1590 | i = id - KVM_REG_PPC_TM_VSR0; | |
1591 | if (i < 32) | |
1592 | for (j = 0; j < TS_FPRWIDTH; j++) | |
1593 | val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; | |
1594 | else { | |
1595 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | |
1596 | val->vval = vcpu->arch.vr_tm.vr[i-32]; | |
1597 | else | |
1598 | r = -ENXIO; | |
1599 | } | |
1600 | break; | |
1601 | } | |
1602 | case KVM_REG_PPC_TM_CR: | |
1603 | *val = get_reg_val(id, vcpu->arch.cr_tm); | |
1604 | break; | |
1605 | case KVM_REG_PPC_TM_XER: | |
1606 | *val = get_reg_val(id, vcpu->arch.xer_tm); | |
1607 | break; | |
1608 | case KVM_REG_PPC_TM_LR: | |
1609 | *val = get_reg_val(id, vcpu->arch.lr_tm); | |
1610 | break; | |
1611 | case KVM_REG_PPC_TM_CTR: | |
1612 | *val = get_reg_val(id, vcpu->arch.ctr_tm); | |
1613 | break; | |
1614 | case KVM_REG_PPC_TM_FPSCR: | |
1615 | *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); | |
1616 | break; | |
1617 | case KVM_REG_PPC_TM_AMR: | |
1618 | *val = get_reg_val(id, vcpu->arch.amr_tm); | |
1619 | break; | |
1620 | case KVM_REG_PPC_TM_PPR: | |
1621 | *val = get_reg_val(id, vcpu->arch.ppr_tm); | |
1622 | break; | |
1623 | case KVM_REG_PPC_TM_VRSAVE: | |
1624 | *val = get_reg_val(id, vcpu->arch.vrsave_tm); | |
1625 | break; | |
1626 | case KVM_REG_PPC_TM_VSCR: | |
1627 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | |
1628 | *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); | |
1629 | else | |
1630 | r = -ENXIO; | |
1631 | break; | |
1632 | case KVM_REG_PPC_TM_DSCR: | |
1633 | *val = get_reg_val(id, vcpu->arch.dscr_tm); | |
1634 | break; | |
1635 | case KVM_REG_PPC_TM_TAR: | |
1636 | *val = get_reg_val(id, vcpu->arch.tar_tm); | |
1637 | break; | |
1638 | #endif | |
31f3438e | 1639 | default: |
a136a8bd | 1640 | r = -EINVAL; |
31f3438e PM |
1641 | break; |
1642 | } | |
1643 | ||
1644 | return r; | |
1645 | } | |
1646 | ||
e5ee5422 AK |
1647 | static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr) |
1648 | { | |
1649 | if (new_lpcr & LPCR_ILE) | |
1650 | vcpu->arch.intr_msr |= MSR_LE; | |
1651 | else | |
1652 | vcpu->arch.intr_msr &= ~MSR_LE; | |
1653 | } | |
1654 | ||
3a167bea AK |
1655 | static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, |
1656 | union kvmppc_one_reg *val) | |
31f3438e | 1657 | { |
a136a8bd | 1658 | int r = 0; |
31f3438e | 1659 | |
a136a8bd | 1660 | switch (id) { |
31f3438e | 1661 | case KVM_REG_PPC_HIOR: |
a136a8bd PM |
1662 | to_book3s(vcpu)->hior = set_reg_val(id, *val); |
1663 | to_book3s(vcpu)->hior_explicit = true; | |
31f3438e | 1664 | break; |
88b02cf9 PM |
1665 | case KVM_REG_PPC_VTB: |
1666 | to_book3s(vcpu)->vtb = set_reg_val(id, *val); | |
1667 | break; | |
e5ee5422 | 1668 | case KVM_REG_PPC_LPCR: |
a0840240 | 1669 | case KVM_REG_PPC_LPCR_64: |
e5ee5422 AK |
1670 | kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val)); |
1671 | break; | |
deeb879d SG |
1672 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1673 | case KVM_REG_PPC_TFHAR: | |
1674 | vcpu->arch.tfhar = set_reg_val(id, *val); | |
1675 | break; | |
1676 | case KVM_REG_PPC_TFIAR: | |
1677 | vcpu->arch.tfiar = set_reg_val(id, *val); | |
1678 | break; | |
1679 | case KVM_REG_PPC_TEXASR: | |
1680 | vcpu->arch.texasr = set_reg_val(id, *val); | |
1681 | break; | |
1682 | case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: | |
1683 | vcpu->arch.gpr_tm[id - KVM_REG_PPC_TM_GPR0] = | |
1684 | set_reg_val(id, *val); | |
1685 | break; | |
1686 | case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: | |
1687 | { | |
1688 | int i, j; | |
1689 | ||
1690 | i = id - KVM_REG_PPC_TM_VSR0; | |
1691 | if (i < 32) | |
1692 | for (j = 0; j < TS_FPRWIDTH; j++) | |
1693 | vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; | |
1694 | else | |
1695 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | |
1696 | vcpu->arch.vr_tm.vr[i-32] = val->vval; | |
1697 | else | |
1698 | r = -ENXIO; | |
1699 | break; | |
1700 | } | |
1701 | case KVM_REG_PPC_TM_CR: | |
1702 | vcpu->arch.cr_tm = set_reg_val(id, *val); | |
1703 | break; | |
1704 | case KVM_REG_PPC_TM_XER: | |
1705 | vcpu->arch.xer_tm = set_reg_val(id, *val); | |
1706 | break; | |
1707 | case KVM_REG_PPC_TM_LR: | |
1708 | vcpu->arch.lr_tm = set_reg_val(id, *val); | |
1709 | break; | |
1710 | case KVM_REG_PPC_TM_CTR: | |
1711 | vcpu->arch.ctr_tm = set_reg_val(id, *val); | |
1712 | break; | |
1713 | case KVM_REG_PPC_TM_FPSCR: | |
1714 | vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); | |
1715 | break; | |
1716 | case KVM_REG_PPC_TM_AMR: | |
1717 | vcpu->arch.amr_tm = set_reg_val(id, *val); | |
1718 | break; | |
1719 | case KVM_REG_PPC_TM_PPR: | |
1720 | vcpu->arch.ppr_tm = set_reg_val(id, *val); | |
1721 | break; | |
1722 | case KVM_REG_PPC_TM_VRSAVE: | |
1723 | vcpu->arch.vrsave_tm = set_reg_val(id, *val); | |
1724 | break; | |
1725 | case KVM_REG_PPC_TM_VSCR: | |
1726 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | |
1727 | vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); | |
1728 | else | |
1729 | r = -ENXIO; | |
1730 | break; | |
1731 | case KVM_REG_PPC_TM_DSCR: | |
1732 | vcpu->arch.dscr_tm = set_reg_val(id, *val); | |
1733 | break; | |
1734 | case KVM_REG_PPC_TM_TAR: | |
1735 | vcpu->arch.tar_tm = set_reg_val(id, *val); | |
1736 | break; | |
1737 | #endif | |
31f3438e | 1738 | default: |
a136a8bd | 1739 | r = -EINVAL; |
31f3438e PM |
1740 | break; |
1741 | } | |
1742 | ||
1743 | return r; | |
1744 | } | |
1745 | ||
ff030fdf | 1746 | static int kvmppc_core_vcpu_create_pr(struct kvm_vcpu *vcpu) |
f05ed4d5 PM |
1747 | { |
1748 | struct kvmppc_vcpu_book3s *vcpu_book3s; | |
f05ed4d5 | 1749 | unsigned long p; |
d3076952 SC |
1750 | int err; |
1751 | ||
d3076952 | 1752 | err = -ENOMEM; |
f05ed4d5 PM |
1753 | |
1754 | vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); | |
1755 | if (!vcpu_book3s) | |
ff030fdf | 1756 | goto out; |
3ff95502 | 1757 | vcpu->arch.book3s = vcpu_book3s; |
f05ed4d5 | 1758 | |
ab78475c | 1759 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
3ff95502 PM |
1760 | vcpu->arch.shadow_vcpu = |
1761 | kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL); | |
1762 | if (!vcpu->arch.shadow_vcpu) | |
1763 | goto free_vcpu3s; | |
a2d56020 | 1764 | #endif |
f05ed4d5 | 1765 | |
f05ed4d5 | 1766 | p = __get_free_page(GFP_KERNEL|__GFP_ZERO); |
f05ed4d5 | 1767 | if (!p) |
d3076952 | 1768 | goto free_shadow_vcpu; |
89b68c96 | 1769 | vcpu->arch.shared = (void *)p; |
f05ed4d5 | 1770 | #ifdef CONFIG_PPC_BOOK3S_64 |
5deb8e7a AG |
1771 | /* Always start the shared struct in native endian mode */ |
1772 | #ifdef __BIG_ENDIAN__ | |
1773 | vcpu->arch.shared_big_endian = true; | |
1774 | #else | |
1775 | vcpu->arch.shared_big_endian = false; | |
1776 | #endif | |
1777 | ||
a4a0f252 PM |
1778 | /* |
1779 | * Default to the same as the host if we're on sufficiently | |
1780 | * recent machine that we have 1TB segments; | |
1781 | * otherwise default to PPC970FX. | |
1782 | */ | |
f05ed4d5 | 1783 | vcpu->arch.pvr = 0x3C0301; |
a4a0f252 PM |
1784 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) |
1785 | vcpu->arch.pvr = mfspr(SPRN_PVR); | |
e5ee5422 | 1786 | vcpu->arch.intr_msr = MSR_SF; |
f05ed4d5 PM |
1787 | #else |
1788 | /* default to book3s_32 (750) */ | |
1789 | vcpu->arch.pvr = 0x84202; | |
87a45e07 | 1790 | vcpu->arch.intr_msr = 0; |
f05ed4d5 | 1791 | #endif |
3a167bea | 1792 | kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr); |
f05ed4d5 PM |
1793 | vcpu->arch.slb_nr = 64; |
1794 | ||
94810ba4 | 1795 | vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE; |
f05ed4d5 | 1796 | |
3f1268dd | 1797 | err = kvmppc_mmu_init_pr(vcpu); |
f05ed4d5 | 1798 | if (err < 0) |
cb10bf91 | 1799 | goto free_shared_page; |
f05ed4d5 | 1800 | |
c50bfbdc | 1801 | return 0; |
f05ed4d5 | 1802 | |
cb10bf91 SC |
1803 | free_shared_page: |
1804 | free_page((unsigned long)vcpu->arch.shared); | |
f05ed4d5 | 1805 | free_shadow_vcpu: |
ab78475c | 1806 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
3ff95502 PM |
1807 | kfree(vcpu->arch.shadow_vcpu); |
1808 | free_vcpu3s: | |
a2d56020 | 1809 | #endif |
f05ed4d5 | 1810 | vfree(vcpu_book3s); |
ff030fdf | 1811 | out: |
c50bfbdc | 1812 | return err; |
f05ed4d5 PM |
1813 | } |
1814 | ||
3a167bea | 1815 | static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu) |
f05ed4d5 PM |
1816 | { |
1817 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | |
1818 | ||
b2fa4f90 | 1819 | kvmppc_mmu_destroy_pr(vcpu); |
f05ed4d5 | 1820 | free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); |
ab78475c | 1821 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
3ff95502 PM |
1822 | kfree(vcpu->arch.shadow_vcpu); |
1823 | #endif | |
f05ed4d5 PM |
1824 | vfree(vcpu_book3s); |
1825 | } | |
1826 | ||
8c99d345 | 1827 | static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu) |
f05ed4d5 PM |
1828 | { |
1829 | int ret; | |
f05ed4d5 | 1830 | |
af8f38b3 AG |
1831 | /* Check if we can run the vcpu at all */ |
1832 | if (!vcpu->arch.sane) { | |
7ec21d9d | 1833 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
7d82714d AG |
1834 | ret = -EINVAL; |
1835 | goto out; | |
af8f38b3 AG |
1836 | } |
1837 | ||
11dd6ac0 LV |
1838 | kvmppc_setup_debug(vcpu); |
1839 | ||
e371f713 AG |
1840 | /* |
1841 | * Interrupts could be timers for the guest which we have to inject | |
1842 | * again, so let's postpone them until we're in the guest and if we | |
1843 | * really did time things so badly, then we just exit again due to | |
1844 | * a host external interrupt. | |
1845 | */ | |
7ee78855 | 1846 | ret = kvmppc_prepare_to_enter(vcpu); |
6c85f52b | 1847 | if (ret <= 0) |
7d82714d | 1848 | goto out; |
6c85f52b | 1849 | /* interrupts now hard-disabled */ |
f05ed4d5 | 1850 | |
c2085059 AB |
1851 | /* Save FPU, Altivec and VSX state */ |
1852 | giveup_all(current); | |
f05ed4d5 | 1853 | |
f05ed4d5 | 1854 | /* Preload FPU if it's enabled */ |
5deb8e7a | 1855 | if (kvmppc_get_msr(vcpu) & MSR_FP) |
f05ed4d5 PM |
1856 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
1857 | ||
5f1c248f | 1858 | kvmppc_fix_ee_before_entry(); |
df6909e5 | 1859 | |
7ec21d9d | 1860 | ret = __kvmppc_vcpu_run(vcpu); |
df6909e5 | 1861 | |
11dd6ac0 LV |
1862 | kvmppc_clear_debug(vcpu); |
1863 | ||
6edaa530 | 1864 | /* No need for guest_exit. It's done in handle_exit. |
24afa37b | 1865 | We also get here with interrupts enabled. */ |
f05ed4d5 | 1866 | |
f05ed4d5 | 1867 | /* Make sure we save the guest FPU/Altivec/VSX state */ |
28c483b6 PM |
1868 | kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); |
1869 | ||
e14e7a1e AG |
1870 | /* Make sure we save the guest TAR/EBB/DSCR state */ |
1871 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); | |
1872 | ||
7d82714d | 1873 | out: |
0652eaae | 1874 | vcpu->mode = OUTSIDE_GUEST_MODE; |
f05ed4d5 PM |
1875 | return ret; |
1876 | } | |
1877 | ||
82ed3616 PM |
1878 | /* |
1879 | * Get (and clear) the dirty memory log for a memory slot. | |
1880 | */ | |
3a167bea AK |
1881 | static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm, |
1882 | struct kvm_dirty_log *log) | |
82ed3616 PM |
1883 | { |
1884 | struct kvm_memory_slot *memslot; | |
1885 | struct kvm_vcpu *vcpu; | |
1886 | ulong ga, ga_end; | |
1887 | int is_dirty = 0; | |
1888 | int r; | |
1889 | unsigned long n; | |
1890 | ||
1891 | mutex_lock(&kvm->slots_lock); | |
1892 | ||
2a49f61d | 1893 | r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot); |
82ed3616 PM |
1894 | if (r) |
1895 | goto out; | |
1896 | ||
1897 | /* If nothing is dirty, don't bother messing with page tables. */ | |
1898 | if (is_dirty) { | |
82ed3616 PM |
1899 | ga = memslot->base_gfn << PAGE_SHIFT; |
1900 | ga_end = ga + (memslot->npages << PAGE_SHIFT); | |
1901 | ||
1902 | kvm_for_each_vcpu(n, vcpu, kvm) | |
1903 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); | |
1904 | ||
1905 | n = kvm_dirty_bitmap_bytes(memslot); | |
1906 | memset(memslot->dirty_bitmap, 0, n); | |
1907 | } | |
1908 | ||
1909 | r = 0; | |
1910 | out: | |
1911 | mutex_unlock(&kvm->slots_lock); | |
1912 | return r; | |
1913 | } | |
1914 | ||
3a167bea AK |
1915 | static void kvmppc_core_flush_memslot_pr(struct kvm *kvm, |
1916 | struct kvm_memory_slot *memslot) | |
5b74716e | 1917 | { |
3a167bea AK |
1918 | return; |
1919 | } | |
5b74716e | 1920 | |
3a167bea AK |
1921 | static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm, |
1922 | struct kvm_memory_slot *memslot, | |
82307e67 SC |
1923 | const struct kvm_userspace_memory_region *mem, |
1924 | enum kvm_mr_change change) | |
3a167bea | 1925 | { |
5b74716e BH |
1926 | return 0; |
1927 | } | |
5b74716e | 1928 | |
3a167bea | 1929 | static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm, |
09170a49 | 1930 | const struct kvm_userspace_memory_region *mem, |
f36f3f28 | 1931 | const struct kvm_memory_slot *old, |
f032b734 BR |
1932 | const struct kvm_memory_slot *new, |
1933 | enum kvm_mr_change change) | |
a66b48c3 | 1934 | { |
3a167bea | 1935 | return; |
a66b48c3 PM |
1936 | } |
1937 | ||
e96c81ee | 1938 | static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *slot) |
a66b48c3 | 1939 | { |
3a167bea | 1940 | return; |
a66b48c3 PM |
1941 | } |
1942 | ||
5b74716e | 1943 | #ifdef CONFIG_PPC64 |
3a167bea AK |
1944 | static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, |
1945 | struct kvm_ppc_smmu_info *info) | |
dfe49dbd | 1946 | { |
a4a0f252 PM |
1947 | long int i; |
1948 | struct kvm_vcpu *vcpu; | |
1949 | ||
1950 | info->flags = 0; | |
5b74716e BH |
1951 | |
1952 | /* SLB is always 64 entries */ | |
1953 | info->slb_size = 64; | |
1954 | ||
1955 | /* Standard 4k base page size segment */ | |
1956 | info->sps[0].page_shift = 12; | |
1957 | info->sps[0].slb_enc = 0; | |
1958 | info->sps[0].enc[0].page_shift = 12; | |
1959 | info->sps[0].enc[0].pte_enc = 0; | |
1960 | ||
a4a0f252 PM |
1961 | /* |
1962 | * 64k large page size. | |
1963 | * We only want to put this in if the CPUs we're emulating | |
1964 | * support it, but unfortunately we don't have a vcpu easily | |
1965 | * to hand here to test. Just pick the first vcpu, and if | |
1966 | * that doesn't exist yet, report the minimum capability, | |
1967 | * i.e., no 64k pages. | |
1968 | * 1T segment support goes along with 64k pages. | |
1969 | */ | |
1970 | i = 1; | |
1971 | vcpu = kvm_get_vcpu(kvm, 0); | |
1972 | if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { | |
1973 | info->flags = KVM_PPC_1T_SEGMENTS; | |
1974 | info->sps[i].page_shift = 16; | |
1975 | info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01; | |
1976 | info->sps[i].enc[0].page_shift = 16; | |
1977 | info->sps[i].enc[0].pte_enc = 1; | |
1978 | ++i; | |
1979 | } | |
1980 | ||
5b74716e | 1981 | /* Standard 16M large page size segment */ |
a4a0f252 PM |
1982 | info->sps[i].page_shift = 24; |
1983 | info->sps[i].slb_enc = SLB_VSID_L; | |
1984 | info->sps[i].enc[0].page_shift = 24; | |
1985 | info->sps[i].enc[0].pte_enc = 0; | |
dfe49dbd | 1986 | |
5b74716e BH |
1987 | return 0; |
1988 | } | |
9617a0b3 PM |
1989 | |
1990 | static int kvm_configure_mmu_pr(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) | |
1991 | { | |
1992 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) | |
1993 | return -ENODEV; | |
1994 | /* Require flags and process table base and size to all be zero. */ | |
1995 | if (cfg->flags || cfg->process_table) | |
1996 | return -EINVAL; | |
1997 | return 0; | |
1998 | } | |
1999 | ||
3a167bea AK |
2000 | #else |
2001 | static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, | |
2002 | struct kvm_ppc_smmu_info *info) | |
f9e0554d | 2003 | { |
3a167bea AK |
2004 | /* We should not get called */ |
2005 | BUG(); | |
fd24a862 | 2006 | return 0; |
f9e0554d | 2007 | } |
3a167bea | 2008 | #endif /* CONFIG_PPC64 */ |
f9e0554d | 2009 | |
a413f474 IM |
2010 | static unsigned int kvm_global_user_count = 0; |
2011 | static DEFINE_SPINLOCK(kvm_global_user_count_lock); | |
2012 | ||
3a167bea | 2013 | static int kvmppc_core_init_vm_pr(struct kvm *kvm) |
f9e0554d | 2014 | { |
9308ab8e | 2015 | mutex_init(&kvm->arch.hpt_mutex); |
f31e65e1 | 2016 | |
699a0ea0 PM |
2017 | #ifdef CONFIG_PPC_BOOK3S_64 |
2018 | /* Start out with the default set of hcalls enabled */ | |
2019 | kvmppc_pr_init_default_hcalls(kvm); | |
2020 | #endif | |
2021 | ||
a413f474 IM |
2022 | if (firmware_has_feature(FW_FEATURE_SET_MODE)) { |
2023 | spin_lock(&kvm_global_user_count_lock); | |
2024 | if (++kvm_global_user_count == 1) | |
d3cbff1b | 2025 | pseries_disable_reloc_on_exc(); |
a413f474 IM |
2026 | spin_unlock(&kvm_global_user_count_lock); |
2027 | } | |
f9e0554d PM |
2028 | return 0; |
2029 | } | |
2030 | ||
3a167bea | 2031 | static void kvmppc_core_destroy_vm_pr(struct kvm *kvm) |
f9e0554d | 2032 | { |
f31e65e1 BH |
2033 | #ifdef CONFIG_PPC64 |
2034 | WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); | |
2035 | #endif | |
a413f474 IM |
2036 | |
2037 | if (firmware_has_feature(FW_FEATURE_SET_MODE)) { | |
2038 | spin_lock(&kvm_global_user_count_lock); | |
2039 | BUG_ON(kvm_global_user_count == 0); | |
2040 | if (--kvm_global_user_count == 0) | |
d3cbff1b | 2041 | pseries_enable_reloc_on_exc(); |
a413f474 IM |
2042 | spin_unlock(&kvm_global_user_count_lock); |
2043 | } | |
f9e0554d PM |
2044 | } |
2045 | ||
3a167bea | 2046 | static int kvmppc_core_check_processor_compat_pr(void) |
f05ed4d5 | 2047 | { |
50de596d | 2048 | /* |
ec531d02 PM |
2049 | * PR KVM can work on POWER9 inside a guest partition |
2050 | * running in HPT mode. It can't work if we are using | |
2051 | * radix translation (because radix provides no way for | |
db96a04a | 2052 | * a process to have unique translations in quadrant 3). |
50de596d | 2053 | */ |
db96a04a | 2054 | if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled()) |
50de596d | 2055 | return -EIO; |
3a167bea AK |
2056 | return 0; |
2057 | } | |
f05ed4d5 | 2058 | |
3a167bea AK |
2059 | static long kvm_arch_vm_ioctl_pr(struct file *filp, |
2060 | unsigned int ioctl, unsigned long arg) | |
2061 | { | |
2062 | return -ENOTTY; | |
2063 | } | |
f05ed4d5 | 2064 | |
cbbc58d4 | 2065 | static struct kvmppc_ops kvm_ops_pr = { |
3a167bea AK |
2066 | .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr, |
2067 | .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr, | |
2068 | .get_one_reg = kvmppc_get_one_reg_pr, | |
2069 | .set_one_reg = kvmppc_set_one_reg_pr, | |
2070 | .vcpu_load = kvmppc_core_vcpu_load_pr, | |
2071 | .vcpu_put = kvmppc_core_vcpu_put_pr, | |
87a45e07 | 2072 | .inject_interrupt = kvmppc_inject_interrupt_pr, |
3a167bea AK |
2073 | .set_msr = kvmppc_set_msr_pr, |
2074 | .vcpu_run = kvmppc_vcpu_run_pr, | |
2075 | .vcpu_create = kvmppc_core_vcpu_create_pr, | |
2076 | .vcpu_free = kvmppc_core_vcpu_free_pr, | |
2077 | .check_requests = kvmppc_core_check_requests_pr, | |
2078 | .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr, | |
2079 | .flush_memslot = kvmppc_core_flush_memslot_pr, | |
2080 | .prepare_memory_region = kvmppc_core_prepare_memory_region_pr, | |
2081 | .commit_memory_region = kvmppc_core_commit_memory_region_pr, | |
3a167bea AK |
2082 | .unmap_hva_range = kvm_unmap_hva_range_pr, |
2083 | .age_hva = kvm_age_hva_pr, | |
2084 | .test_age_hva = kvm_test_age_hva_pr, | |
2085 | .set_spte_hva = kvm_set_spte_hva_pr, | |
3a167bea | 2086 | .free_memslot = kvmppc_core_free_memslot_pr, |
3a167bea AK |
2087 | .init_vm = kvmppc_core_init_vm_pr, |
2088 | .destroy_vm = kvmppc_core_destroy_vm_pr, | |
3a167bea AK |
2089 | .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr, |
2090 | .emulate_op = kvmppc_core_emulate_op_pr, | |
2091 | .emulate_mtspr = kvmppc_core_emulate_mtspr_pr, | |
2092 | .emulate_mfspr = kvmppc_core_emulate_mfspr_pr, | |
2093 | .fast_vcpu_kick = kvm_vcpu_kick, | |
2094 | .arch_vm_ioctl = kvm_arch_vm_ioctl_pr, | |
ae2113a4 PM |
2095 | #ifdef CONFIG_PPC_BOOK3S_64 |
2096 | .hcall_implemented = kvmppc_hcall_impl_pr, | |
9617a0b3 | 2097 | .configure_mmu = kvm_configure_mmu_pr, |
ae2113a4 | 2098 | #endif |
2e6baa46 | 2099 | .giveup_ext = kvmppc_giveup_ext, |
3a167bea AK |
2100 | }; |
2101 | ||
cbbc58d4 AK |
2102 | |
2103 | int kvmppc_book3s_init_pr(void) | |
f05ed4d5 PM |
2104 | { |
2105 | int r; | |
2106 | ||
cbbc58d4 AK |
2107 | r = kvmppc_core_check_processor_compat_pr(); |
2108 | if (r < 0) | |
f05ed4d5 PM |
2109 | return r; |
2110 | ||
cbbc58d4 AK |
2111 | kvm_ops_pr.owner = THIS_MODULE; |
2112 | kvmppc_pr_ops = &kvm_ops_pr; | |
f05ed4d5 | 2113 | |
cbbc58d4 | 2114 | r = kvmppc_mmu_hpte_sysinit(); |
f05ed4d5 PM |
2115 | return r; |
2116 | } | |
2117 | ||
cbbc58d4 | 2118 | void kvmppc_book3s_exit_pr(void) |
f05ed4d5 | 2119 | { |
cbbc58d4 | 2120 | kvmppc_pr_ops = NULL; |
f05ed4d5 | 2121 | kvmppc_mmu_hpte_sysexit(); |
f05ed4d5 PM |
2122 | } |
2123 | ||
cbbc58d4 AK |
2124 | /* |
2125 | * We only support separate modules for book3s 64 | |
2126 | */ | |
2127 | #ifdef CONFIG_PPC_BOOK3S_64 | |
2128 | ||
3a167bea AK |
2129 | module_init(kvmppc_book3s_init_pr); |
2130 | module_exit(kvmppc_book3s_exit_pr); | |
2ba9f0d8 AK |
2131 | |
2132 | MODULE_LICENSE("GPL"); | |
398a76c6 AG |
2133 | MODULE_ALIAS_MISCDEV(KVM_MINOR); |
2134 | MODULE_ALIAS("devname:kvm"); | |
cbbc58d4 | 2135 | #endif |