2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
22 #include <linux/kvm_host.h>
23 #include <linux/export.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
28 #include <asm/cputable.h>
29 #include <asm/cacheflush.h>
30 #include <asm/tlbflush.h>
31 #include <asm/uaccess.h>
33 #include <asm/kvm_ppc.h>
34 #include <asm/kvm_book3s.h>
35 #include <asm/mmu_context.h>
36 #include <asm/switch_to.h>
37 #include <asm/firmware.h>
38 #include <asm/hvcall.h>
39 #include <linux/gfp.h>
40 #include <linux/sched.h>
41 #include <linux/vmalloc.h>
42 #include <linux/highmem.h>
43 #include <linux/module.h>
47 #define CREATE_TRACE_POINTS
50 /* #define EXIT_DEBUG */
51 /* #define DEBUG_EXT */
53 static int kvmppc_handle_ext(struct kvm_vcpu
*vcpu
, unsigned int exit_nr
,
56 /* Some compatibility defines */
57 #ifdef CONFIG_PPC_BOOK3S_32
58 #define MSR_USER32 MSR_USER
59 #define MSR_USER64 MSR_USER
60 #define HW_PAGE_SIZE PAGE_SIZE
63 static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu
*vcpu
, int cpu
)
65 #ifdef CONFIG_PPC_BOOK3S_64
66 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
67 memcpy(svcpu
->slb
, to_book3s(vcpu
)->slb_shadow
, sizeof(svcpu
->slb
));
68 svcpu
->slb_max
= to_book3s(vcpu
)->slb_shadow_max
;
72 vcpu
->cpu
= smp_processor_id();
73 #ifdef CONFIG_PPC_BOOK3S_32
74 current
->thread
.kvm_shadow_vcpu
= vcpu
->arch
.shadow_vcpu
;
78 static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu
*vcpu
)
80 #ifdef CONFIG_PPC_BOOK3S_64
81 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
83 kvmppc_copy_from_svcpu(vcpu
, svcpu
);
85 memcpy(to_book3s(vcpu
)->slb_shadow
, svcpu
->slb
, sizeof(svcpu
->slb
));
86 to_book3s(vcpu
)->slb_shadow_max
= svcpu
->slb_max
;
90 kvmppc_giveup_ext(vcpu
, MSR_FP
| MSR_VEC
| MSR_VSX
);
94 /* Copy data needed by real-mode code from vcpu to shadow vcpu */
95 void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu
*svcpu
,
96 struct kvm_vcpu
*vcpu
)
98 svcpu
->gpr
[0] = vcpu
->arch
.gpr
[0];
99 svcpu
->gpr
[1] = vcpu
->arch
.gpr
[1];
100 svcpu
->gpr
[2] = vcpu
->arch
.gpr
[2];
101 svcpu
->gpr
[3] = vcpu
->arch
.gpr
[3];
102 svcpu
->gpr
[4] = vcpu
->arch
.gpr
[4];
103 svcpu
->gpr
[5] = vcpu
->arch
.gpr
[5];
104 svcpu
->gpr
[6] = vcpu
->arch
.gpr
[6];
105 svcpu
->gpr
[7] = vcpu
->arch
.gpr
[7];
106 svcpu
->gpr
[8] = vcpu
->arch
.gpr
[8];
107 svcpu
->gpr
[9] = vcpu
->arch
.gpr
[9];
108 svcpu
->gpr
[10] = vcpu
->arch
.gpr
[10];
109 svcpu
->gpr
[11] = vcpu
->arch
.gpr
[11];
110 svcpu
->gpr
[12] = vcpu
->arch
.gpr
[12];
111 svcpu
->gpr
[13] = vcpu
->arch
.gpr
[13];
112 svcpu
->cr
= vcpu
->arch
.cr
;
113 svcpu
->xer
= vcpu
->arch
.xer
;
114 svcpu
->ctr
= vcpu
->arch
.ctr
;
115 svcpu
->lr
= vcpu
->arch
.lr
;
116 svcpu
->pc
= vcpu
->arch
.pc
;
117 svcpu
->in_use
= true;
120 /* Copy data touched by real-mode code from shadow vcpu back to vcpu */
121 void kvmppc_copy_from_svcpu(struct kvm_vcpu
*vcpu
,
122 struct kvmppc_book3s_shadow_vcpu
*svcpu
)
125 * vcpu_put would just call us again because in_use hasn't
131 * Maybe we were already preempted and synced the svcpu from
132 * our preempt notifiers. Don't bother touching this svcpu then.
137 vcpu
->arch
.gpr
[0] = svcpu
->gpr
[0];
138 vcpu
->arch
.gpr
[1] = svcpu
->gpr
[1];
139 vcpu
->arch
.gpr
[2] = svcpu
->gpr
[2];
140 vcpu
->arch
.gpr
[3] = svcpu
->gpr
[3];
141 vcpu
->arch
.gpr
[4] = svcpu
->gpr
[4];
142 vcpu
->arch
.gpr
[5] = svcpu
->gpr
[5];
143 vcpu
->arch
.gpr
[6] = svcpu
->gpr
[6];
144 vcpu
->arch
.gpr
[7] = svcpu
->gpr
[7];
145 vcpu
->arch
.gpr
[8] = svcpu
->gpr
[8];
146 vcpu
->arch
.gpr
[9] = svcpu
->gpr
[9];
147 vcpu
->arch
.gpr
[10] = svcpu
->gpr
[10];
148 vcpu
->arch
.gpr
[11] = svcpu
->gpr
[11];
149 vcpu
->arch
.gpr
[12] = svcpu
->gpr
[12];
150 vcpu
->arch
.gpr
[13] = svcpu
->gpr
[13];
151 vcpu
->arch
.cr
= svcpu
->cr
;
152 vcpu
->arch
.xer
= svcpu
->xer
;
153 vcpu
->arch
.ctr
= svcpu
->ctr
;
154 vcpu
->arch
.lr
= svcpu
->lr
;
155 vcpu
->arch
.pc
= svcpu
->pc
;
156 vcpu
->arch
.shadow_srr1
= svcpu
->shadow_srr1
;
157 vcpu
->arch
.fault_dar
= svcpu
->fault_dar
;
158 vcpu
->arch
.fault_dsisr
= svcpu
->fault_dsisr
;
159 vcpu
->arch
.last_inst
= svcpu
->last_inst
;
160 svcpu
->in_use
= false;
166 static int kvmppc_core_check_requests_pr(struct kvm_vcpu
*vcpu
)
168 int r
= 1; /* Indicate we want to get back into the guest */
170 /* We misuse TLB_FLUSH to indicate that we want to clear
171 all shadow cache entries */
172 if (kvm_check_request(KVM_REQ_TLB_FLUSH
, vcpu
))
173 kvmppc_mmu_pte_flush(vcpu
, 0, 0);
178 /************* MMU Notifiers *************/
179 static void do_kvm_unmap_hva(struct kvm
*kvm
, unsigned long start
,
183 struct kvm_vcpu
*vcpu
;
184 struct kvm_memslots
*slots
;
185 struct kvm_memory_slot
*memslot
;
187 slots
= kvm_memslots(kvm
);
188 kvm_for_each_memslot(memslot
, slots
) {
189 unsigned long hva_start
, hva_end
;
192 hva_start
= max(start
, memslot
->userspace_addr
);
193 hva_end
= min(end
, memslot
->userspace_addr
+
194 (memslot
->npages
<< PAGE_SHIFT
));
195 if (hva_start
>= hva_end
)
198 * {gfn(page) | page intersects with [hva_start, hva_end)} =
199 * {gfn, gfn+1, ..., gfn_end-1}.
201 gfn
= hva_to_gfn_memslot(hva_start
, memslot
);
202 gfn_end
= hva_to_gfn_memslot(hva_end
+ PAGE_SIZE
- 1, memslot
);
203 kvm_for_each_vcpu(i
, vcpu
, kvm
)
204 kvmppc_mmu_pte_pflush(vcpu
, gfn
<< PAGE_SHIFT
,
205 gfn_end
<< PAGE_SHIFT
);
209 static int kvm_unmap_hva_pr(struct kvm
*kvm
, unsigned long hva
)
211 trace_kvm_unmap_hva(hva
);
213 do_kvm_unmap_hva(kvm
, hva
, hva
+ PAGE_SIZE
);
218 static int kvm_unmap_hva_range_pr(struct kvm
*kvm
, unsigned long start
,
221 do_kvm_unmap_hva(kvm
, start
, end
);
226 static int kvm_age_hva_pr(struct kvm
*kvm
, unsigned long hva
)
228 /* XXX could be more clever ;) */
232 static int kvm_test_age_hva_pr(struct kvm
*kvm
, unsigned long hva
)
234 /* XXX could be more clever ;) */
238 static void kvm_set_spte_hva_pr(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
240 /* The page will get remapped properly on its next fault */
241 do_kvm_unmap_hva(kvm
, hva
, hva
+ PAGE_SIZE
);
244 /*****************************************/
246 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu
*vcpu
)
248 ulong smsr
= vcpu
->arch
.shared
->msr
;
250 /* Guest MSR values */
251 smsr
&= MSR_FE0
| MSR_FE1
| MSR_SF
| MSR_SE
| MSR_BE
;
252 /* Process MSR values */
253 smsr
|= MSR_ME
| MSR_RI
| MSR_IR
| MSR_DR
| MSR_PR
| MSR_EE
;
254 /* External providers the guest reserved */
255 smsr
|= (vcpu
->arch
.shared
->msr
& vcpu
->arch
.guest_owned_ext
);
256 /* 64-bit Process MSR values */
257 #ifdef CONFIG_PPC_BOOK3S_64
258 smsr
|= MSR_ISF
| MSR_HV
;
260 vcpu
->arch
.shadow_msr
= smsr
;
263 static void kvmppc_set_msr_pr(struct kvm_vcpu
*vcpu
, u64 msr
)
265 ulong old_msr
= vcpu
->arch
.shared
->msr
;
268 printk(KERN_INFO
"KVM: Set MSR to 0x%llx\n", msr
);
271 msr
&= to_book3s(vcpu
)->msr_mask
;
272 vcpu
->arch
.shared
->msr
= msr
;
273 kvmppc_recalc_shadow_msr(vcpu
);
276 if (!vcpu
->arch
.pending_exceptions
) {
277 kvm_vcpu_block(vcpu
);
278 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
279 vcpu
->stat
.halt_wakeup
++;
281 /* Unset POW bit after we woke up */
283 vcpu
->arch
.shared
->msr
= msr
;
287 if ((vcpu
->arch
.shared
->msr
& (MSR_PR
|MSR_IR
|MSR_DR
)) !=
288 (old_msr
& (MSR_PR
|MSR_IR
|MSR_DR
))) {
289 kvmppc_mmu_flush_segments(vcpu
);
290 kvmppc_mmu_map_segment(vcpu
, kvmppc_get_pc(vcpu
));
292 /* Preload magic page segment when in kernel mode */
293 if (!(msr
& MSR_PR
) && vcpu
->arch
.magic_page_pa
) {
294 struct kvm_vcpu_arch
*a
= &vcpu
->arch
;
297 kvmppc_mmu_map_segment(vcpu
, a
->magic_page_ea
);
299 kvmppc_mmu_map_segment(vcpu
, a
->magic_page_pa
);
304 * When switching from 32 to 64-bit, we may have a stale 32-bit
305 * magic page around, we need to flush it. Typically 32-bit magic
306 * page will be instanciated when calling into RTAS. Note: We
307 * assume that such transition only happens while in kernel mode,
308 * ie, we never transition from user 32-bit to kernel 64-bit with
309 * a 32-bit magic page around.
311 if (vcpu
->arch
.magic_page_pa
&&
312 !(old_msr
& MSR_PR
) && !(old_msr
& MSR_SF
) && (msr
& MSR_SF
)) {
313 /* going from RTAS to normal kernel code */
314 kvmppc_mmu_pte_flush(vcpu
, (uint32_t)vcpu
->arch
.magic_page_pa
,
318 /* Preload FPU if it's enabled */
319 if (vcpu
->arch
.shared
->msr
& MSR_FP
)
320 kvmppc_handle_ext(vcpu
, BOOK3S_INTERRUPT_FP_UNAVAIL
, MSR_FP
);
323 void kvmppc_set_pvr_pr(struct kvm_vcpu
*vcpu
, u32 pvr
)
327 vcpu
->arch
.hflags
&= ~BOOK3S_HFLAG_SLB
;
328 vcpu
->arch
.pvr
= pvr
;
329 #ifdef CONFIG_PPC_BOOK3S_64
330 if ((pvr
>= 0x330000) && (pvr
< 0x70330000)) {
331 kvmppc_mmu_book3s_64_init(vcpu
);
332 if (!to_book3s(vcpu
)->hior_explicit
)
333 to_book3s(vcpu
)->hior
= 0xfff00000;
334 to_book3s(vcpu
)->msr_mask
= 0xffffffffffffffffULL
;
335 vcpu
->arch
.cpu_type
= KVM_CPU_3S_64
;
339 kvmppc_mmu_book3s_32_init(vcpu
);
340 if (!to_book3s(vcpu
)->hior_explicit
)
341 to_book3s(vcpu
)->hior
= 0;
342 to_book3s(vcpu
)->msr_mask
= 0xffffffffULL
;
343 vcpu
->arch
.cpu_type
= KVM_CPU_3S_32
;
346 kvmppc_sanity_check(vcpu
);
348 /* If we are in hypervisor level on 970, we can tell the CPU to
349 * treat DCBZ as 32 bytes store */
350 vcpu
->arch
.hflags
&= ~BOOK3S_HFLAG_DCBZ32
;
351 if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) && (mfmsr() & MSR_HV
) &&
352 !strcmp(cur_cpu_spec
->platform
, "ppc970"))
353 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_DCBZ32
;
355 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
356 really needs them in a VM on Cell and force disable them. */
357 if (!strcmp(cur_cpu_spec
->platform
, "ppc-cell-be"))
358 to_book3s(vcpu
)->msr_mask
&= ~(MSR_FE0
| MSR_FE1
);
361 * If they're asking for POWER6 or later, set the flag
362 * indicating that we can do multiple large page sizes
364 * Also set the flag that indicates that tlbie has the large
365 * page bit in the RB operand instead of the instruction.
367 switch (PVR_VER(pvr
)) {
372 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_MULTI_PGSIZE
|
373 BOOK3S_HFLAG_NEW_TLBIE
;
377 #ifdef CONFIG_PPC_BOOK3S_32
378 /* 32 bit Book3S always has 32 byte dcbz */
379 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_DCBZ32
;
382 /* On some CPUs we can execute paired single operations natively */
383 asm ( "mfpvr %0" : "=r"(host_pvr
));
385 case 0x00080200: /* lonestar 2.0 */
386 case 0x00088202: /* lonestar 2.2 */
387 case 0x70000100: /* gekko 1.0 */
388 case 0x00080100: /* gekko 2.0 */
389 case 0x00083203: /* gekko 2.3a */
390 case 0x00083213: /* gekko 2.3b */
391 case 0x00083204: /* gekko 2.4 */
392 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
393 case 0x00087200: /* broadway */
394 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_NATIVE_PS
;
395 /* Enable HID2.PSE - in case we need it later */
396 mtspr(SPRN_HID2_GEKKO
, mfspr(SPRN_HID2_GEKKO
) | (1 << 29));
400 /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
401 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
402 * emulate 32 bytes dcbz length.
404 * The Book3s_64 inventors also realized this case and implemented a special bit
405 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
407 * My approach here is to patch the dcbz instruction on executing pages.
409 static void kvmppc_patch_dcbz(struct kvm_vcpu
*vcpu
, struct kvmppc_pte
*pte
)
416 hpage
= gfn_to_page(vcpu
->kvm
, pte
->raddr
>> PAGE_SHIFT
);
417 if (is_error_page(hpage
))
420 hpage_offset
= pte
->raddr
& ~PAGE_MASK
;
421 hpage_offset
&= ~0xFFFULL
;
425 page
= kmap_atomic(hpage
);
427 /* patch dcbz into reserved instruction, so we trap */
428 for (i
=hpage_offset
; i
< hpage_offset
+ (HW_PAGE_SIZE
/ 4); i
++)
429 if ((page
[i
] & 0xff0007ff) == INS_DCBZ
)
430 page
[i
] &= 0xfffffff7;
436 static int kvmppc_visible_gfn(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
438 ulong mp_pa
= vcpu
->arch
.magic_page_pa
;
440 if (!(vcpu
->arch
.shared
->msr
& MSR_SF
))
441 mp_pa
= (uint32_t)mp_pa
;
443 if (unlikely(mp_pa
) &&
444 unlikely((mp_pa
& KVM_PAM
) >> PAGE_SHIFT
== gfn
)) {
448 return kvm_is_visible_gfn(vcpu
->kvm
, gfn
);
451 int kvmppc_handle_pagefault(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
452 ulong eaddr
, int vec
)
454 bool data
= (vec
== BOOK3S_INTERRUPT_DATA_STORAGE
);
455 bool iswrite
= false;
456 int r
= RESUME_GUEST
;
459 struct kvmppc_pte pte
;
460 bool is_mmio
= false;
461 bool dr
= (vcpu
->arch
.shared
->msr
& MSR_DR
) ? true : false;
462 bool ir
= (vcpu
->arch
.shared
->msr
& MSR_IR
) ? true : false;
465 relocated
= data
? dr
: ir
;
466 if (data
&& (vcpu
->arch
.fault_dsisr
& DSISR_ISSTORE
))
469 /* Resolve real address if translation turned on */
471 page_found
= vcpu
->arch
.mmu
.xlate(vcpu
, eaddr
, &pte
, data
, iswrite
);
473 pte
.may_execute
= true;
475 pte
.may_write
= true;
476 pte
.raddr
= eaddr
& KVM_PAM
;
478 pte
.vpage
= eaddr
>> 12;
479 pte
.page_size
= MMU_PAGE_64K
;
482 switch (vcpu
->arch
.shared
->msr
& (MSR_DR
|MSR_IR
)) {
484 pte
.vpage
|= ((u64
)VSID_REAL
<< (SID_SHIFT
- 12));
488 vcpu
->arch
.mmu
.esid_to_vsid(vcpu
, eaddr
>> SID_SHIFT
, &vsid
);
490 if ((vcpu
->arch
.shared
->msr
& (MSR_DR
|MSR_IR
)) == MSR_DR
)
491 pte
.vpage
|= ((u64
)VSID_REAL_DR
<< (SID_SHIFT
- 12));
493 pte
.vpage
|= ((u64
)VSID_REAL_IR
<< (SID_SHIFT
- 12));
497 page_found
= -EINVAL
;
501 if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) &&
502 (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_DCBZ32
))) {
504 * If we do the dcbz hack, we have to NX on every execution,
505 * so we can patch the executing code. This renders our guest
508 pte
.may_execute
= !data
;
511 if (page_found
== -ENOENT
) {
512 /* Page not found in guest PTE entries */
513 vcpu
->arch
.shared
->dar
= kvmppc_get_fault_dar(vcpu
);
514 vcpu
->arch
.shared
->dsisr
= vcpu
->arch
.fault_dsisr
;
515 vcpu
->arch
.shared
->msr
|=
516 vcpu
->arch
.shadow_srr1
& 0x00000000f8000000ULL
;
517 kvmppc_book3s_queue_irqprio(vcpu
, vec
);
518 } else if (page_found
== -EPERM
) {
519 /* Storage protection */
520 vcpu
->arch
.shared
->dar
= kvmppc_get_fault_dar(vcpu
);
521 vcpu
->arch
.shared
->dsisr
= vcpu
->arch
.fault_dsisr
& ~DSISR_NOHPTE
;
522 vcpu
->arch
.shared
->dsisr
|= DSISR_PROTFAULT
;
523 vcpu
->arch
.shared
->msr
|=
524 vcpu
->arch
.shadow_srr1
& 0x00000000f8000000ULL
;
525 kvmppc_book3s_queue_irqprio(vcpu
, vec
);
526 } else if (page_found
== -EINVAL
) {
527 /* Page not found in guest SLB */
528 vcpu
->arch
.shared
->dar
= kvmppc_get_fault_dar(vcpu
);
529 kvmppc_book3s_queue_irqprio(vcpu
, vec
+ 0x80);
530 } else if (!is_mmio
&&
531 kvmppc_visible_gfn(vcpu
, pte
.raddr
>> PAGE_SHIFT
)) {
532 if (data
&& !(vcpu
->arch
.fault_dsisr
& DSISR_NOHPTE
)) {
534 * There is already a host HPTE there, presumably
535 * a read-only one for a page the guest thinks
536 * is writable, so get rid of it first.
538 kvmppc_mmu_unmap_page(vcpu
, &pte
);
540 /* The guest's PTE is not mapped yet. Map on the host */
541 kvmppc_mmu_map_page(vcpu
, &pte
, iswrite
);
543 vcpu
->stat
.sp_storage
++;
544 else if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) &&
545 (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_DCBZ32
)))
546 kvmppc_patch_dcbz(vcpu
, &pte
);
549 vcpu
->stat
.mmio_exits
++;
550 vcpu
->arch
.paddr_accessed
= pte
.raddr
;
551 vcpu
->arch
.vaddr_accessed
= pte
.eaddr
;
552 r
= kvmppc_emulate_mmio(run
, vcpu
);
553 if ( r
== RESUME_HOST_NV
)
560 static inline int get_fpr_index(int i
)
562 return i
* TS_FPRWIDTH
;
565 /* Give up external provider (FPU, Altivec, VSX) */
566 void kvmppc_giveup_ext(struct kvm_vcpu
*vcpu
, ulong msr
)
568 struct thread_struct
*t
= ¤t
->thread
;
569 u64
*vcpu_fpr
= vcpu
->arch
.fpr
;
571 u64
*vcpu_vsx
= vcpu
->arch
.vsr
;
573 u64
*thread_fpr
= &t
->fp_state
.fpr
[0][0];
577 * VSX instructions can access FP and vector registers, so if
578 * we are giving up VSX, make sure we give up FP and VMX as well.
581 msr
|= MSR_FP
| MSR_VEC
;
583 msr
&= vcpu
->arch
.guest_owned_ext
;
588 printk(KERN_INFO
"Giving up ext 0x%lx\n", msr
);
593 * Note that on CPUs with VSX, giveup_fpu stores
594 * both the traditional FP registers and the added VSX
595 * registers into thread.fp_state.fpr[].
597 if (current
->thread
.regs
->msr
& MSR_FP
)
599 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.fpr
); i
++)
600 vcpu_fpr
[i
] = thread_fpr
[get_fpr_index(i
)];
602 vcpu
->arch
.fpscr
= t
->fp_state
.fpscr
;
605 if (cpu_has_feature(CPU_FTR_VSX
))
606 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.vsr
) / 2; i
++)
607 vcpu_vsx
[i
] = thread_fpr
[get_fpr_index(i
) + 1];
611 #ifdef CONFIG_ALTIVEC
613 if (current
->thread
.regs
->msr
& MSR_VEC
)
614 giveup_altivec(current
);
615 memcpy(vcpu
->arch
.vr
, t
->vr_state
.vr
, sizeof(vcpu
->arch
.vr
));
616 vcpu
->arch
.vscr
= t
->vr_state
.vscr
;
620 vcpu
->arch
.guest_owned_ext
&= ~(msr
| MSR_VSX
);
621 kvmppc_recalc_shadow_msr(vcpu
);
624 static int kvmppc_read_inst(struct kvm_vcpu
*vcpu
)
626 ulong srr0
= kvmppc_get_pc(vcpu
);
627 u32 last_inst
= kvmppc_get_last_inst(vcpu
);
630 ret
= kvmppc_ld(vcpu
, &srr0
, sizeof(u32
), &last_inst
, false);
631 if (ret
== -ENOENT
) {
632 ulong msr
= vcpu
->arch
.shared
->msr
;
634 msr
= kvmppc_set_field(msr
, 33, 33, 1);
635 msr
= kvmppc_set_field(msr
, 34, 36, 0);
636 vcpu
->arch
.shared
->msr
= kvmppc_set_field(msr
, 42, 47, 0);
637 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_INST_STORAGE
);
638 return EMULATE_AGAIN
;
644 static int kvmppc_check_ext(struct kvm_vcpu
*vcpu
, unsigned int exit_nr
)
647 /* Need to do paired single emulation? */
648 if (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_PAIRED_SINGLE
))
651 /* Read out the instruction */
652 if (kvmppc_read_inst(vcpu
) == EMULATE_DONE
)
653 /* Need to emulate */
656 return EMULATE_AGAIN
;
659 /* Handle external providers (FPU, Altivec, VSX) */
660 static int kvmppc_handle_ext(struct kvm_vcpu
*vcpu
, unsigned int exit_nr
,
663 struct thread_struct
*t
= ¤t
->thread
;
664 u64
*vcpu_fpr
= vcpu
->arch
.fpr
;
666 u64
*vcpu_vsx
= vcpu
->arch
.vsr
;
668 u64
*thread_fpr
= &t
->fp_state
.fpr
[0][0];
671 /* When we have paired singles, we emulate in software */
672 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_PAIRED_SINGLE
)
675 if (!(vcpu
->arch
.shared
->msr
& msr
)) {
676 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
680 if (msr
== MSR_VSX
) {
681 /* No VSX? Give an illegal instruction interrupt */
683 if (!cpu_has_feature(CPU_FTR_VSX
))
686 kvmppc_core_queue_program(vcpu
, SRR1_PROGILL
);
691 * We have to load up all the FP and VMX registers before
692 * we can let the guest use VSX instructions.
694 msr
= MSR_FP
| MSR_VEC
| MSR_VSX
;
697 /* See if we already own all the ext(s) needed */
698 msr
&= ~vcpu
->arch
.guest_owned_ext
;
703 printk(KERN_INFO
"Loading up ext 0x%lx\n", msr
);
707 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.fpr
); i
++)
708 thread_fpr
[get_fpr_index(i
)] = vcpu_fpr
[i
];
710 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.vsr
) / 2; i
++)
711 thread_fpr
[get_fpr_index(i
) + 1] = vcpu_vsx
[i
];
713 t
->fp_state
.fpscr
= vcpu
->arch
.fpscr
;
715 kvmppc_load_up_fpu();
719 #ifdef CONFIG_ALTIVEC
720 memcpy(t
->vr_state
.vr
, vcpu
->arch
.vr
, sizeof(vcpu
->arch
.vr
));
721 t
->vr_state
.vscr
= vcpu
->arch
.vscr
;
723 kvmppc_load_up_altivec();
727 current
->thread
.regs
->msr
|= msr
;
728 vcpu
->arch
.guest_owned_ext
|= msr
;
729 kvmppc_recalc_shadow_msr(vcpu
);
735 * Kernel code using FP or VMX could have flushed guest state to
736 * the thread_struct; if so, get it back now.
738 static void kvmppc_handle_lost_ext(struct kvm_vcpu
*vcpu
)
740 unsigned long lost_ext
;
742 lost_ext
= vcpu
->arch
.guest_owned_ext
& ~current
->thread
.regs
->msr
;
746 if (lost_ext
& MSR_FP
)
747 kvmppc_load_up_fpu();
748 #ifdef CONFIG_ALTIVEC
749 if (lost_ext
& MSR_VEC
)
750 kvmppc_load_up_altivec();
752 current
->thread
.regs
->msr
|= lost_ext
;
755 int kvmppc_handle_exit_pr(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
756 unsigned int exit_nr
)
761 vcpu
->stat
.sum_exits
++;
763 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
764 run
->ready_for_interrupt_injection
= 1;
766 /* We get here with MSR.EE=1 */
768 trace_kvm_exit(exit_nr
, vcpu
);
772 case BOOK3S_INTERRUPT_INST_STORAGE
:
774 ulong shadow_srr1
= vcpu
->arch
.shadow_srr1
;
775 vcpu
->stat
.pf_instruc
++;
777 #ifdef CONFIG_PPC_BOOK3S_32
778 /* We set segments as unused segments when invalidating them. So
779 * treat the respective fault as segment fault. */
781 struct kvmppc_book3s_shadow_vcpu
*svcpu
;
784 svcpu
= svcpu_get(vcpu
);
785 sr
= svcpu
->sr
[kvmppc_get_pc(vcpu
) >> SID_SHIFT
];
787 if (sr
== SR_INVALID
) {
788 kvmppc_mmu_map_segment(vcpu
, kvmppc_get_pc(vcpu
));
795 /* only care about PTEG not found errors, but leave NX alone */
796 if (shadow_srr1
& 0x40000000) {
797 int idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
798 r
= kvmppc_handle_pagefault(run
, vcpu
, kvmppc_get_pc(vcpu
), exit_nr
);
799 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
800 vcpu
->stat
.sp_instruc
++;
801 } else if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) &&
802 (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_DCBZ32
))) {
804 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
805 * so we can't use the NX bit inside the guest. Let's cross our fingers,
806 * that no guest that needs the dcbz hack does NX.
808 kvmppc_mmu_pte_flush(vcpu
, kvmppc_get_pc(vcpu
), ~0xFFFUL
);
811 vcpu
->arch
.shared
->msr
|= shadow_srr1
& 0x58000000;
812 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
817 case BOOK3S_INTERRUPT_DATA_STORAGE
:
819 ulong dar
= kvmppc_get_fault_dar(vcpu
);
820 u32 fault_dsisr
= vcpu
->arch
.fault_dsisr
;
821 vcpu
->stat
.pf_storage
++;
823 #ifdef CONFIG_PPC_BOOK3S_32
824 /* We set segments as unused segments when invalidating them. So
825 * treat the respective fault as segment fault. */
827 struct kvmppc_book3s_shadow_vcpu
*svcpu
;
830 svcpu
= svcpu_get(vcpu
);
831 sr
= svcpu
->sr
[dar
>> SID_SHIFT
];
833 if (sr
== SR_INVALID
) {
834 kvmppc_mmu_map_segment(vcpu
, dar
);
842 * We need to handle missing shadow PTEs, and
843 * protection faults due to us mapping a page read-only
844 * when the guest thinks it is writable.
846 if (fault_dsisr
& (DSISR_NOHPTE
| DSISR_PROTFAULT
)) {
847 int idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
848 r
= kvmppc_handle_pagefault(run
, vcpu
, dar
, exit_nr
);
849 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
851 vcpu
->arch
.shared
->dar
= dar
;
852 vcpu
->arch
.shared
->dsisr
= fault_dsisr
;
853 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
858 case BOOK3S_INTERRUPT_DATA_SEGMENT
:
859 if (kvmppc_mmu_map_segment(vcpu
, kvmppc_get_fault_dar(vcpu
)) < 0) {
860 vcpu
->arch
.shared
->dar
= kvmppc_get_fault_dar(vcpu
);
861 kvmppc_book3s_queue_irqprio(vcpu
,
862 BOOK3S_INTERRUPT_DATA_SEGMENT
);
866 case BOOK3S_INTERRUPT_INST_SEGMENT
:
867 if (kvmppc_mmu_map_segment(vcpu
, kvmppc_get_pc(vcpu
)) < 0) {
868 kvmppc_book3s_queue_irqprio(vcpu
,
869 BOOK3S_INTERRUPT_INST_SEGMENT
);
873 /* We're good on these - the host merely wanted to get our attention */
874 case BOOK3S_INTERRUPT_DECREMENTER
:
875 case BOOK3S_INTERRUPT_HV_DECREMENTER
:
876 vcpu
->stat
.dec_exits
++;
879 case BOOK3S_INTERRUPT_EXTERNAL
:
880 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL
:
881 case BOOK3S_INTERRUPT_EXTERNAL_HV
:
882 vcpu
->stat
.ext_intr_exits
++;
885 case BOOK3S_INTERRUPT_PERFMON
:
888 case BOOK3S_INTERRUPT_PROGRAM
:
889 case BOOK3S_INTERRUPT_H_EMUL_ASSIST
:
891 enum emulation_result er
;
895 flags
= vcpu
->arch
.shadow_srr1
& 0x1f0000ull
;
897 if (vcpu
->arch
.shared
->msr
& MSR_PR
) {
899 printk(KERN_INFO
"Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu
), kvmppc_get_last_inst(vcpu
));
901 if ((kvmppc_get_last_inst(vcpu
) & 0xff0007ff) !=
902 (INS_DCBZ
& 0xfffffff7)) {
903 kvmppc_core_queue_program(vcpu
, flags
);
909 vcpu
->stat
.emulated_inst_exits
++;
910 er
= kvmppc_emulate_instruction(run
, vcpu
);
919 printk(KERN_CRIT
"%s: emulation at %lx failed (%08x)\n",
920 __func__
, kvmppc_get_pc(vcpu
), kvmppc_get_last_inst(vcpu
));
921 kvmppc_core_queue_program(vcpu
, flags
);
924 case EMULATE_DO_MMIO
:
925 run
->exit_reason
= KVM_EXIT_MMIO
;
928 case EMULATE_EXIT_USER
:
936 case BOOK3S_INTERRUPT_SYSCALL
:
937 if (vcpu
->arch
.papr_enabled
&&
938 (kvmppc_get_last_sc(vcpu
) == 0x44000022) &&
939 !(vcpu
->arch
.shared
->msr
& MSR_PR
)) {
940 /* SC 1 papr hypercalls */
941 ulong cmd
= kvmppc_get_gpr(vcpu
, 3);
944 #ifdef CONFIG_PPC_BOOK3S_64
945 if (kvmppc_h_pr(vcpu
, cmd
) == EMULATE_DONE
) {
951 run
->papr_hcall
.nr
= cmd
;
952 for (i
= 0; i
< 9; ++i
) {
953 ulong gpr
= kvmppc_get_gpr(vcpu
, 4 + i
);
954 run
->papr_hcall
.args
[i
] = gpr
;
956 run
->exit_reason
= KVM_EXIT_PAPR_HCALL
;
957 vcpu
->arch
.hcall_needed
= 1;
959 } else if (vcpu
->arch
.osi_enabled
&&
960 (((u32
)kvmppc_get_gpr(vcpu
, 3)) == OSI_SC_MAGIC_R3
) &&
961 (((u32
)kvmppc_get_gpr(vcpu
, 4)) == OSI_SC_MAGIC_R4
)) {
963 u64
*gprs
= run
->osi
.gprs
;
966 run
->exit_reason
= KVM_EXIT_OSI
;
967 for (i
= 0; i
< 32; i
++)
968 gprs
[i
] = kvmppc_get_gpr(vcpu
, i
);
969 vcpu
->arch
.osi_needed
= 1;
971 } else if (!(vcpu
->arch
.shared
->msr
& MSR_PR
) &&
972 (((u32
)kvmppc_get_gpr(vcpu
, 0)) == KVM_SC_MAGIC_R0
)) {
973 /* KVM PV hypercalls */
974 kvmppc_set_gpr(vcpu
, 3, kvmppc_kvm_pv(vcpu
));
978 vcpu
->stat
.syscall_exits
++;
979 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
983 case BOOK3S_INTERRUPT_FP_UNAVAIL
:
984 case BOOK3S_INTERRUPT_ALTIVEC
:
985 case BOOK3S_INTERRUPT_VSX
:
990 case BOOK3S_INTERRUPT_FP_UNAVAIL
: ext_msr
= MSR_FP
; break;
991 case BOOK3S_INTERRUPT_ALTIVEC
: ext_msr
= MSR_VEC
; break;
992 case BOOK3S_INTERRUPT_VSX
: ext_msr
= MSR_VSX
; break;
995 switch (kvmppc_check_ext(vcpu
, exit_nr
)) {
997 /* everything ok - let's enable the ext */
998 r
= kvmppc_handle_ext(vcpu
, exit_nr
, ext_msr
);
1001 /* we need to emulate this instruction */
1002 goto program_interrupt
;
1005 /* nothing to worry about - go again */
1010 case BOOK3S_INTERRUPT_ALIGNMENT
:
1011 if (kvmppc_read_inst(vcpu
) == EMULATE_DONE
) {
1012 vcpu
->arch
.shared
->dsisr
= kvmppc_alignment_dsisr(vcpu
,
1013 kvmppc_get_last_inst(vcpu
));
1014 vcpu
->arch
.shared
->dar
= kvmppc_alignment_dar(vcpu
,
1015 kvmppc_get_last_inst(vcpu
));
1016 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
1020 case BOOK3S_INTERRUPT_MACHINE_CHECK
:
1021 case BOOK3S_INTERRUPT_TRACE
:
1022 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
1027 ulong shadow_srr1
= vcpu
->arch
.shadow_srr1
;
1028 /* Ugh - bork here! What did we get? */
1029 printk(KERN_EMERG
"exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
1030 exit_nr
, kvmppc_get_pc(vcpu
), shadow_srr1
);
1037 if (!(r
& RESUME_HOST
)) {
1038 /* To avoid clobbering exit_reason, only check for signals if
1039 * we aren't already exiting to userspace for some other
1043 * Interrupts could be timers for the guest which we have to
1044 * inject again, so let's postpone them until we're in the guest
1045 * and if we really did time things so badly, then we just exit
1046 * again due to a host external interrupt.
1048 local_irq_disable();
1049 s
= kvmppc_prepare_to_enter(vcpu
);
1054 kvmppc_fix_ee_before_entry();
1056 kvmppc_handle_lost_ext(vcpu
);
1059 trace_kvm_book3s_reenter(r
, vcpu
);
1064 static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu
*vcpu
,
1065 struct kvm_sregs
*sregs
)
1067 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
1070 sregs
->pvr
= vcpu
->arch
.pvr
;
1072 sregs
->u
.s
.sdr1
= to_book3s(vcpu
)->sdr1
;
1073 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_SLB
) {
1074 for (i
= 0; i
< 64; i
++) {
1075 sregs
->u
.s
.ppc64
.slb
[i
].slbe
= vcpu
->arch
.slb
[i
].orige
| i
;
1076 sregs
->u
.s
.ppc64
.slb
[i
].slbv
= vcpu
->arch
.slb
[i
].origv
;
1079 for (i
= 0; i
< 16; i
++)
1080 sregs
->u
.s
.ppc32
.sr
[i
] = vcpu
->arch
.shared
->sr
[i
];
1082 for (i
= 0; i
< 8; i
++) {
1083 sregs
->u
.s
.ppc32
.ibat
[i
] = vcpu3s
->ibat
[i
].raw
;
1084 sregs
->u
.s
.ppc32
.dbat
[i
] = vcpu3s
->dbat
[i
].raw
;
1091 static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu
*vcpu
,
1092 struct kvm_sregs
*sregs
)
1094 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
1097 kvmppc_set_pvr_pr(vcpu
, sregs
->pvr
);
1099 vcpu3s
->sdr1
= sregs
->u
.s
.sdr1
;
1100 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_SLB
) {
1101 for (i
= 0; i
< 64; i
++) {
1102 vcpu
->arch
.mmu
.slbmte(vcpu
, sregs
->u
.s
.ppc64
.slb
[i
].slbv
,
1103 sregs
->u
.s
.ppc64
.slb
[i
].slbe
);
1106 for (i
= 0; i
< 16; i
++) {
1107 vcpu
->arch
.mmu
.mtsrin(vcpu
, i
, sregs
->u
.s
.ppc32
.sr
[i
]);
1109 for (i
= 0; i
< 8; i
++) {
1110 kvmppc_set_bat(vcpu
, &(vcpu3s
->ibat
[i
]), false,
1111 (u32
)sregs
->u
.s
.ppc32
.ibat
[i
]);
1112 kvmppc_set_bat(vcpu
, &(vcpu3s
->ibat
[i
]), true,
1113 (u32
)(sregs
->u
.s
.ppc32
.ibat
[i
] >> 32));
1114 kvmppc_set_bat(vcpu
, &(vcpu3s
->dbat
[i
]), false,
1115 (u32
)sregs
->u
.s
.ppc32
.dbat
[i
]);
1116 kvmppc_set_bat(vcpu
, &(vcpu3s
->dbat
[i
]), true,
1117 (u32
)(sregs
->u
.s
.ppc32
.dbat
[i
] >> 32));
1121 /* Flush the MMU after messing with the segments */
1122 kvmppc_mmu_pte_flush(vcpu
, 0, 0);
1127 static int kvmppc_get_one_reg_pr(struct kvm_vcpu
*vcpu
, u64 id
,
1128 union kvmppc_one_reg
*val
)
1133 case KVM_REG_PPC_HIOR
:
1134 *val
= get_reg_val(id
, to_book3s(vcpu
)->hior
);
1137 case KVM_REG_PPC_VSR0
... KVM_REG_PPC_VSR31
: {
1138 long int i
= id
- KVM_REG_PPC_VSR0
;
1140 if (!cpu_has_feature(CPU_FTR_VSX
)) {
1144 val
->vsxval
[0] = vcpu
->arch
.fpr
[i
];
1145 val
->vsxval
[1] = vcpu
->arch
.vsr
[i
];
1148 #endif /* CONFIG_VSX */
1157 static int kvmppc_set_one_reg_pr(struct kvm_vcpu
*vcpu
, u64 id
,
1158 union kvmppc_one_reg
*val
)
1163 case KVM_REG_PPC_HIOR
:
1164 to_book3s(vcpu
)->hior
= set_reg_val(id
, *val
);
1165 to_book3s(vcpu
)->hior_explicit
= true;
1168 case KVM_REG_PPC_VSR0
... KVM_REG_PPC_VSR31
: {
1169 long int i
= id
- KVM_REG_PPC_VSR0
;
1171 if (!cpu_has_feature(CPU_FTR_VSX
)) {
1175 vcpu
->arch
.fpr
[i
] = val
->vsxval
[0];
1176 vcpu
->arch
.vsr
[i
] = val
->vsxval
[1];
1179 #endif /* CONFIG_VSX */
1188 static struct kvm_vcpu
*kvmppc_core_vcpu_create_pr(struct kvm
*kvm
,
1191 struct kvmppc_vcpu_book3s
*vcpu_book3s
;
1192 struct kvm_vcpu
*vcpu
;
1196 vcpu
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
1200 vcpu_book3s
= vzalloc(sizeof(struct kvmppc_vcpu_book3s
));
1203 vcpu
->arch
.book3s
= vcpu_book3s
;
1205 #ifdef CONFIG_KVM_BOOK3S_32
1206 vcpu
->arch
.shadow_vcpu
=
1207 kzalloc(sizeof(*vcpu
->arch
.shadow_vcpu
), GFP_KERNEL
);
1208 if (!vcpu
->arch
.shadow_vcpu
)
1212 err
= kvm_vcpu_init(vcpu
, kvm
, id
);
1214 goto free_shadow_vcpu
;
1217 p
= __get_free_page(GFP_KERNEL
|__GFP_ZERO
);
1220 /* the real shared page fills the last 4k of our page */
1221 vcpu
->arch
.shared
= (void *)(p
+ PAGE_SIZE
- 4096);
1223 #ifdef CONFIG_PPC_BOOK3S_64
1225 * Default to the same as the host if we're on sufficiently
1226 * recent machine that we have 1TB segments;
1227 * otherwise default to PPC970FX.
1229 vcpu
->arch
.pvr
= 0x3C0301;
1230 if (mmu_has_feature(MMU_FTR_1T_SEGMENT
))
1231 vcpu
->arch
.pvr
= mfspr(SPRN_PVR
);
1233 /* default to book3s_32 (750) */
1234 vcpu
->arch
.pvr
= 0x84202;
1236 kvmppc_set_pvr_pr(vcpu
, vcpu
->arch
.pvr
);
1237 vcpu
->arch
.slb_nr
= 64;
1239 vcpu
->arch
.shadow_msr
= MSR_USER64
;
1241 err
= kvmppc_mmu_init(vcpu
);
1248 kvm_vcpu_uninit(vcpu
);
1250 #ifdef CONFIG_KVM_BOOK3S_32
1251 kfree(vcpu
->arch
.shadow_vcpu
);
1256 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
1258 return ERR_PTR(err
);
1261 static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu
*vcpu
)
1263 struct kvmppc_vcpu_book3s
*vcpu_book3s
= to_book3s(vcpu
);
1265 free_page((unsigned long)vcpu
->arch
.shared
& PAGE_MASK
);
1266 kvm_vcpu_uninit(vcpu
);
1267 #ifdef CONFIG_KVM_BOOK3S_32
1268 kfree(vcpu
->arch
.shadow_vcpu
);
1271 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
1274 static int kvmppc_vcpu_run_pr(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
1277 struct thread_fp_state fp
;
1279 #ifdef CONFIG_ALTIVEC
1280 struct thread_vr_state vr
;
1281 unsigned long uninitialized_var(vrsave
);
1289 /* Check if we can run the vcpu at all */
1290 if (!vcpu
->arch
.sane
) {
1291 kvm_run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1297 * Interrupts could be timers for the guest which we have to inject
1298 * again, so let's postpone them until we're in the guest and if we
1299 * really did time things so badly, then we just exit again due to
1300 * a host external interrupt.
1302 local_irq_disable();
1303 ret
= kvmppc_prepare_to_enter(vcpu
);
1309 /* Save FPU state in stack */
1310 if (current
->thread
.regs
->msr
& MSR_FP
)
1311 giveup_fpu(current
);
1312 fp
= current
->thread
.fp_state
;
1313 fpexc_mode
= current
->thread
.fpexc_mode
;
1315 #ifdef CONFIG_ALTIVEC
1316 /* Save Altivec state in stack */
1317 used_vr
= current
->thread
.used_vr
;
1319 if (current
->thread
.regs
->msr
& MSR_VEC
)
1320 giveup_altivec(current
);
1321 vr
= current
->thread
.vr_state
;
1322 vrsave
= current
->thread
.vrsave
;
1327 /* Save VSX state in stack */
1328 used_vsr
= current
->thread
.used_vsr
;
1329 if (used_vsr
&& (current
->thread
.regs
->msr
& MSR_VSX
))
1330 __giveup_vsx(current
);
1333 /* Remember the MSR with disabled extensions */
1334 ext_msr
= current
->thread
.regs
->msr
;
1336 /* Preload FPU if it's enabled */
1337 if (vcpu
->arch
.shared
->msr
& MSR_FP
)
1338 kvmppc_handle_ext(vcpu
, BOOK3S_INTERRUPT_FP_UNAVAIL
, MSR_FP
);
1340 kvmppc_fix_ee_before_entry();
1342 ret
= __kvmppc_vcpu_run(kvm_run
, vcpu
);
1344 /* No need for kvm_guest_exit. It's done in handle_exit.
1345 We also get here with interrupts enabled. */
1347 /* Make sure we save the guest FPU/Altivec/VSX state */
1348 kvmppc_giveup_ext(vcpu
, MSR_FP
| MSR_VEC
| MSR_VSX
);
1350 current
->thread
.regs
->msr
= ext_msr
;
1352 /* Restore FPU/VSX state from stack */
1353 current
->thread
.fp_state
= fp
;
1354 current
->thread
.fpexc_mode
= fpexc_mode
;
1356 #ifdef CONFIG_ALTIVEC
1357 /* Restore Altivec state from stack */
1358 if (used_vr
&& current
->thread
.used_vr
) {
1359 current
->thread
.vr_state
= vr
;
1360 current
->thread
.vrsave
= vrsave
;
1362 current
->thread
.used_vr
= used_vr
;
1366 current
->thread
.used_vsr
= used_vsr
;
1370 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
1375 * Get (and clear) the dirty memory log for a memory slot.
1377 static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm
*kvm
,
1378 struct kvm_dirty_log
*log
)
1380 struct kvm_memory_slot
*memslot
;
1381 struct kvm_vcpu
*vcpu
;
1387 mutex_lock(&kvm
->slots_lock
);
1389 r
= kvm_get_dirty_log(kvm
, log
, &is_dirty
);
1393 /* If nothing is dirty, don't bother messing with page tables. */
1395 memslot
= id_to_memslot(kvm
->memslots
, log
->slot
);
1397 ga
= memslot
->base_gfn
<< PAGE_SHIFT
;
1398 ga_end
= ga
+ (memslot
->npages
<< PAGE_SHIFT
);
1400 kvm_for_each_vcpu(n
, vcpu
, kvm
)
1401 kvmppc_mmu_pte_pflush(vcpu
, ga
, ga_end
);
1403 n
= kvm_dirty_bitmap_bytes(memslot
);
1404 memset(memslot
->dirty_bitmap
, 0, n
);
1409 mutex_unlock(&kvm
->slots_lock
);
1413 static void kvmppc_core_flush_memslot_pr(struct kvm
*kvm
,
1414 struct kvm_memory_slot
*memslot
)
1419 static int kvmppc_core_prepare_memory_region_pr(struct kvm
*kvm
,
1420 struct kvm_memory_slot
*memslot
,
1421 struct kvm_userspace_memory_region
*mem
)
1426 static void kvmppc_core_commit_memory_region_pr(struct kvm
*kvm
,
1427 struct kvm_userspace_memory_region
*mem
,
1428 const struct kvm_memory_slot
*old
)
1433 static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot
*free
,
1434 struct kvm_memory_slot
*dont
)
1439 static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot
*slot
,
1440 unsigned long npages
)
1447 static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm
*kvm
,
1448 struct kvm_ppc_smmu_info
*info
)
1451 struct kvm_vcpu
*vcpu
;
1455 /* SLB is always 64 entries */
1456 info
->slb_size
= 64;
1458 /* Standard 4k base page size segment */
1459 info
->sps
[0].page_shift
= 12;
1460 info
->sps
[0].slb_enc
= 0;
1461 info
->sps
[0].enc
[0].page_shift
= 12;
1462 info
->sps
[0].enc
[0].pte_enc
= 0;
1465 * 64k large page size.
1466 * We only want to put this in if the CPUs we're emulating
1467 * support it, but unfortunately we don't have a vcpu easily
1468 * to hand here to test. Just pick the first vcpu, and if
1469 * that doesn't exist yet, report the minimum capability,
1470 * i.e., no 64k pages.
1471 * 1T segment support goes along with 64k pages.
1474 vcpu
= kvm_get_vcpu(kvm
, 0);
1475 if (vcpu
&& (vcpu
->arch
.hflags
& BOOK3S_HFLAG_MULTI_PGSIZE
)) {
1476 info
->flags
= KVM_PPC_1T_SEGMENTS
;
1477 info
->sps
[i
].page_shift
= 16;
1478 info
->sps
[i
].slb_enc
= SLB_VSID_L
| SLB_VSID_LP_01
;
1479 info
->sps
[i
].enc
[0].page_shift
= 16;
1480 info
->sps
[i
].enc
[0].pte_enc
= 1;
1484 /* Standard 16M large page size segment */
1485 info
->sps
[i
].page_shift
= 24;
1486 info
->sps
[i
].slb_enc
= SLB_VSID_L
;
1487 info
->sps
[i
].enc
[0].page_shift
= 24;
1488 info
->sps
[i
].enc
[0].pte_enc
= 0;
1493 static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm
*kvm
,
1494 struct kvm_ppc_smmu_info
*info
)
1496 /* We should not get called */
1499 #endif /* CONFIG_PPC64 */
1501 static unsigned int kvm_global_user_count
= 0;
1502 static DEFINE_SPINLOCK(kvm_global_user_count_lock
);
1504 static int kvmppc_core_init_vm_pr(struct kvm
*kvm
)
1506 mutex_init(&kvm
->arch
.hpt_mutex
);
1508 if (firmware_has_feature(FW_FEATURE_SET_MODE
)) {
1509 spin_lock(&kvm_global_user_count_lock
);
1510 if (++kvm_global_user_count
== 1)
1511 pSeries_disable_reloc_on_exc();
1512 spin_unlock(&kvm_global_user_count_lock
);
1517 static void kvmppc_core_destroy_vm_pr(struct kvm
*kvm
)
1520 WARN_ON(!list_empty(&kvm
->arch
.spapr_tce_tables
));
1523 if (firmware_has_feature(FW_FEATURE_SET_MODE
)) {
1524 spin_lock(&kvm_global_user_count_lock
);
1525 BUG_ON(kvm_global_user_count
== 0);
1526 if (--kvm_global_user_count
== 0)
1527 pSeries_enable_reloc_on_exc();
1528 spin_unlock(&kvm_global_user_count_lock
);
1532 static int kvmppc_core_check_processor_compat_pr(void)
1534 /* we are always compatible */
1538 static long kvm_arch_vm_ioctl_pr(struct file
*filp
,
1539 unsigned int ioctl
, unsigned long arg
)
1544 static struct kvmppc_ops kvm_ops_pr
= {
1545 .get_sregs
= kvm_arch_vcpu_ioctl_get_sregs_pr
,
1546 .set_sregs
= kvm_arch_vcpu_ioctl_set_sregs_pr
,
1547 .get_one_reg
= kvmppc_get_one_reg_pr
,
1548 .set_one_reg
= kvmppc_set_one_reg_pr
,
1549 .vcpu_load
= kvmppc_core_vcpu_load_pr
,
1550 .vcpu_put
= kvmppc_core_vcpu_put_pr
,
1551 .set_msr
= kvmppc_set_msr_pr
,
1552 .vcpu_run
= kvmppc_vcpu_run_pr
,
1553 .vcpu_create
= kvmppc_core_vcpu_create_pr
,
1554 .vcpu_free
= kvmppc_core_vcpu_free_pr
,
1555 .check_requests
= kvmppc_core_check_requests_pr
,
1556 .get_dirty_log
= kvm_vm_ioctl_get_dirty_log_pr
,
1557 .flush_memslot
= kvmppc_core_flush_memslot_pr
,
1558 .prepare_memory_region
= kvmppc_core_prepare_memory_region_pr
,
1559 .commit_memory_region
= kvmppc_core_commit_memory_region_pr
,
1560 .unmap_hva
= kvm_unmap_hva_pr
,
1561 .unmap_hva_range
= kvm_unmap_hva_range_pr
,
1562 .age_hva
= kvm_age_hva_pr
,
1563 .test_age_hva
= kvm_test_age_hva_pr
,
1564 .set_spte_hva
= kvm_set_spte_hva_pr
,
1565 .mmu_destroy
= kvmppc_mmu_destroy_pr
,
1566 .free_memslot
= kvmppc_core_free_memslot_pr
,
1567 .create_memslot
= kvmppc_core_create_memslot_pr
,
1568 .init_vm
= kvmppc_core_init_vm_pr
,
1569 .destroy_vm
= kvmppc_core_destroy_vm_pr
,
1570 .get_smmu_info
= kvm_vm_ioctl_get_smmu_info_pr
,
1571 .emulate_op
= kvmppc_core_emulate_op_pr
,
1572 .emulate_mtspr
= kvmppc_core_emulate_mtspr_pr
,
1573 .emulate_mfspr
= kvmppc_core_emulate_mfspr_pr
,
1574 .fast_vcpu_kick
= kvm_vcpu_kick
,
1575 .arch_vm_ioctl
= kvm_arch_vm_ioctl_pr
,
1579 int kvmppc_book3s_init_pr(void)
1583 r
= kvmppc_core_check_processor_compat_pr();
1587 kvm_ops_pr
.owner
= THIS_MODULE
;
1588 kvmppc_pr_ops
= &kvm_ops_pr
;
1590 r
= kvmppc_mmu_hpte_sysinit();
1594 void kvmppc_book3s_exit_pr(void)
1596 kvmppc_pr_ops
= NULL
;
1597 kvmppc_mmu_hpte_sysexit();
1601 * We only support separate modules for book3s 64
1603 #ifdef CONFIG_PPC_BOOK3S_64
1605 module_init(kvmppc_book3s_init_pr
);
1606 module_exit(kvmppc_book3s_exit_pr
);
1608 MODULE_LICENSE("GPL");