2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
18 #include <linux/types.h>
19 #include <linux/string.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/highmem.h>
23 #include <linux/gfp.h>
24 #include <linux/slab.h>
25 #include <linux/hugetlb.h>
26 #include <linux/vmalloc.h>
27 #include <linux/srcu.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/file.h>
30 #include <linux/debugfs.h>
32 #include <asm/tlbflush.h>
33 #include <asm/kvm_ppc.h>
34 #include <asm/kvm_book3s.h>
35 #include <asm/book3s/64/mmu-hash.h>
36 #include <asm/hvcall.h>
37 #include <asm/synch.h>
38 #include <asm/ppc-opcode.h>
39 #include <asm/cputable.h>
43 //#define DEBUG_RESIZE_HPT 1
45 #ifdef DEBUG_RESIZE_HPT
46 #define resize_hpt_debug(resize, ...) \
48 printk(KERN_DEBUG "RESIZE HPT %p: ", resize); \
49 printk(__VA_ARGS__); \
52 #define resize_hpt_debug(resize, ...) \
56 static long kvmppc_virtmode_do_h_enter(struct kvm
*kvm
, unsigned long flags
,
57 long pte_index
, unsigned long pteh
,
58 unsigned long ptel
, unsigned long *pte_idx_ret
);
60 struct kvm_resize_hpt
{
61 /* These fields read-only after init */
63 struct work_struct work
;
66 /* These fields protected by kvm->lock */
70 /* Private to the work thread, until prepare_done is true,
71 * then protected by kvm->resize_hpt_sem */
72 struct kvm_hpt_info hpt
;
75 static void kvmppc_rmap_reset(struct kvm
*kvm
);
77 int kvmppc_allocate_hpt(struct kvm_hpt_info
*info
, u32 order
)
79 unsigned long hpt
= 0;
81 struct page
*page
= NULL
;
82 struct revmap_entry
*rev
;
85 if ((order
< PPC_MIN_HPT_ORDER
) || (order
> PPC_MAX_HPT_ORDER
))
88 page
= kvm_alloc_hpt_cma(1ul << (order
- PAGE_SHIFT
));
90 hpt
= (unsigned long)pfn_to_kaddr(page_to_pfn(page
));
91 memset((void *)hpt
, 0, (1ul << order
));
96 hpt
= __get_free_pages(GFP_KERNEL
|__GFP_ZERO
|__GFP_REPEAT
97 |__GFP_NOWARN
, order
- PAGE_SHIFT
);
102 /* HPTEs are 2**4 bytes long */
103 npte
= 1ul << (order
- 4);
105 /* Allocate reverse map array */
106 rev
= vmalloc(sizeof(struct revmap_entry
) * npte
);
108 pr_err("kvmppc_allocate_hpt: Couldn't alloc reverse map array\n");
110 kvm_free_hpt_cma(page
, 1 << (order
- PAGE_SHIFT
));
112 free_pages(hpt
, order
- PAGE_SHIFT
);
124 void kvmppc_set_hpt(struct kvm
*kvm
, struct kvm_hpt_info
*info
)
126 atomic64_set(&kvm
->arch
.mmio_update
, 0);
127 kvm
->arch
.hpt
= *info
;
128 kvm
->arch
.sdr1
= __pa(info
->virt
) | (info
->order
- 18);
130 pr_debug("KVM guest htab at %lx (order %ld), LPID %x\n",
131 info
->virt
, (long)info
->order
, kvm
->arch
.lpid
);
134 long kvmppc_alloc_reset_hpt(struct kvm
*kvm
, int order
)
137 struct kvm_hpt_info info
;
139 if (kvm_is_radix(kvm
))
142 mutex_lock(&kvm
->lock
);
143 if (kvm
->arch
.hpte_setup_done
) {
144 kvm
->arch
.hpte_setup_done
= 0;
145 /* order hpte_setup_done vs. vcpus_running */
147 if (atomic_read(&kvm
->arch
.vcpus_running
)) {
148 kvm
->arch
.hpte_setup_done
= 1;
152 if (kvm
->arch
.hpt
.order
== order
) {
153 /* We already have a suitable HPT */
155 /* Set the entire HPT to 0, i.e. invalid HPTEs */
156 memset((void *)kvm
->arch
.hpt
.virt
, 0, 1ul << order
);
158 * Reset all the reverse-mapping chains for all memslots
160 kvmppc_rmap_reset(kvm
);
161 /* Ensure that each vcpu will flush its TLB on next entry. */
162 cpumask_setall(&kvm
->arch
.need_tlb_flush
);
167 if (kvm
->arch
.hpt
.virt
)
168 kvmppc_free_hpt(&kvm
->arch
.hpt
);
170 err
= kvmppc_allocate_hpt(&info
, order
);
173 kvmppc_set_hpt(kvm
, &info
);
176 mutex_unlock(&kvm
->lock
);
180 void kvmppc_free_hpt(struct kvm_hpt_info
*info
)
184 kvm_free_hpt_cma(virt_to_page(info
->virt
),
185 1 << (info
->order
- PAGE_SHIFT
));
187 free_pages(info
->virt
, info
->order
- PAGE_SHIFT
);
192 /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
193 static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize
)
195 return (pgsize
> 0x1000) ? HPTE_V_LARGE
: 0;
198 /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */
199 static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize
)
201 return (pgsize
== 0x10000) ? 0x1000 : 0;
204 void kvmppc_map_vrma(struct kvm_vcpu
*vcpu
, struct kvm_memory_slot
*memslot
,
205 unsigned long porder
)
208 unsigned long npages
;
209 unsigned long hp_v
, hp_r
;
210 unsigned long addr
, hash
;
212 unsigned long hp0
, hp1
;
213 unsigned long idx_ret
;
215 struct kvm
*kvm
= vcpu
->kvm
;
217 psize
= 1ul << porder
;
218 npages
= memslot
->npages
>> (porder
- PAGE_SHIFT
);
220 /* VRMA can't be > 1TB */
221 if (npages
> 1ul << (40 - porder
))
222 npages
= 1ul << (40 - porder
);
223 /* Can't use more than 1 HPTE per HPTEG */
224 if (npages
> kvmppc_hpt_mask(&kvm
->arch
.hpt
) + 1)
225 npages
= kvmppc_hpt_mask(&kvm
->arch
.hpt
) + 1;
227 hp0
= HPTE_V_1TB_SEG
| (VRMA_VSID
<< (40 - 16)) |
228 HPTE_V_BOLTED
| hpte0_pgsize_encoding(psize
);
229 hp1
= hpte1_pgsize_encoding(psize
) |
230 HPTE_R_R
| HPTE_R_C
| HPTE_R_M
| PP_RWXX
;
232 for (i
= 0; i
< npages
; ++i
) {
234 /* can't use hpt_hash since va > 64 bits */
235 hash
= (i
^ (VRMA_VSID
^ (VRMA_VSID
<< 25)))
236 & kvmppc_hpt_mask(&kvm
->arch
.hpt
);
238 * We assume that the hash table is empty and no
239 * vcpus are using it at this stage. Since we create
240 * at most one HPTE per HPTEG, we just assume entry 7
241 * is available and use it.
243 hash
= (hash
<< 3) + 7;
244 hp_v
= hp0
| ((addr
>> 16) & ~0x7fUL
);
246 ret
= kvmppc_virtmode_do_h_enter(kvm
, H_EXACT
, hash
, hp_v
, hp_r
,
248 if (ret
!= H_SUCCESS
) {
249 pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
256 int kvmppc_mmu_hv_init(void)
258 unsigned long host_lpid
, rsvd_lpid
;
260 if (!cpu_has_feature(CPU_FTR_HVMODE
))
263 /* POWER7 has 10-bit LPIDs (12-bit in POWER8) */
264 host_lpid
= mfspr(SPRN_LPID
);
265 rsvd_lpid
= LPID_RSVD
;
267 kvmppc_init_lpid(rsvd_lpid
+ 1);
269 kvmppc_claim_lpid(host_lpid
);
270 /* rsvd_lpid is reserved for use in partition switching */
271 kvmppc_claim_lpid(rsvd_lpid
);
276 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu
*vcpu
)
278 unsigned long msr
= vcpu
->arch
.intr_msr
;
280 /* If transactional, change to suspend mode on IRQ delivery */
281 if (MSR_TM_TRANSACTIONAL(vcpu
->arch
.shregs
.msr
))
284 msr
|= vcpu
->arch
.shregs
.msr
& MSR_TS_MASK
;
285 kvmppc_set_msr(vcpu
, msr
);
288 static long kvmppc_virtmode_do_h_enter(struct kvm
*kvm
, unsigned long flags
,
289 long pte_index
, unsigned long pteh
,
290 unsigned long ptel
, unsigned long *pte_idx_ret
)
294 /* Protect linux PTE lookup from page table destruction */
295 rcu_read_lock_sched(); /* this disables preemption too */
296 ret
= kvmppc_do_h_enter(kvm
, flags
, pte_index
, pteh
, ptel
,
297 current
->mm
->pgd
, false, pte_idx_ret
);
298 rcu_read_unlock_sched();
299 if (ret
== H_TOO_HARD
) {
300 /* this can't happen */
301 pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
302 ret
= H_RESOURCE
; /* or something */
308 static struct kvmppc_slb
*kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu
*vcpu
,
314 for (i
= 0; i
< vcpu
->arch
.slb_nr
; i
++) {
315 if (!(vcpu
->arch
.slb
[i
].orige
& SLB_ESID_V
))
318 if (vcpu
->arch
.slb
[i
].origv
& SLB_VSID_B_1T
)
323 if (((vcpu
->arch
.slb
[i
].orige
^ eaddr
) & mask
) == 0)
324 return &vcpu
->arch
.slb
[i
];
329 static unsigned long kvmppc_mmu_get_real_addr(unsigned long v
, unsigned long r
,
332 unsigned long ra_mask
;
334 ra_mask
= hpte_page_size(v
, r
) - 1;
335 return (r
& HPTE_R_RPN
& ~ra_mask
) | (ea
& ra_mask
);
338 static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu
*vcpu
, gva_t eaddr
,
339 struct kvmppc_pte
*gpte
, bool data
, bool iswrite
)
341 struct kvm
*kvm
= vcpu
->kvm
;
342 struct kvmppc_slb
*slbe
;
344 unsigned long pp
, key
;
345 unsigned long v
, orig_v
, gr
;
348 int virtmode
= vcpu
->arch
.shregs
.msr
& (data
? MSR_DR
: MSR_IR
);
352 slbe
= kvmppc_mmu_book3s_hv_find_slbe(vcpu
, eaddr
);
357 /* real mode access */
358 slb_v
= vcpu
->kvm
->arch
.vrma_slb_v
;
362 /* Find the HPTE in the hash table */
363 index
= kvmppc_hv_find_lock_hpte(kvm
, eaddr
, slb_v
,
364 HPTE_V_VALID
| HPTE_V_ABSENT
);
369 hptep
= (__be64
*)(kvm
->arch
.hpt
.virt
+ (index
<< 4));
370 v
= orig_v
= be64_to_cpu(hptep
[0]) & ~HPTE_V_HVLOCK
;
371 if (cpu_has_feature(CPU_FTR_ARCH_300
))
372 v
= hpte_new_to_old_v(v
, be64_to_cpu(hptep
[1]));
373 gr
= kvm
->arch
.hpt
.rev
[index
].guest_rpte
;
375 unlock_hpte(hptep
, orig_v
);
379 gpte
->vpage
= ((v
& HPTE_V_AVPN
) << 4) | ((eaddr
>> 12) & 0xfff);
381 /* Get PP bits and key for permission check */
382 pp
= gr
& (HPTE_R_PP0
| HPTE_R_PP
);
383 key
= (vcpu
->arch
.shregs
.msr
& MSR_PR
) ? SLB_VSID_KP
: SLB_VSID_KS
;
386 /* Calculate permissions */
387 gpte
->may_read
= hpte_read_permission(pp
, key
);
388 gpte
->may_write
= hpte_write_permission(pp
, key
);
389 gpte
->may_execute
= gpte
->may_read
&& !(gr
& (HPTE_R_N
| HPTE_R_G
));
391 /* Storage key permission check for POWER7 */
392 if (data
&& virtmode
) {
393 int amrfield
= hpte_get_skey_perm(gr
, vcpu
->arch
.amr
);
400 /* Get the guest physical address */
401 gpte
->raddr
= kvmppc_mmu_get_real_addr(v
, gr
, eaddr
);
406 * Quick test for whether an instruction is a load or a store.
407 * If the instruction is a load or a store, then this will indicate
408 * which it is, at least on server processors. (Embedded processors
409 * have some external PID instructions that don't follow the rule
410 * embodied here.) If the instruction isn't a load or store, then
411 * this doesn't return anything useful.
413 static int instruction_is_store(unsigned int instr
)
418 if ((instr
& 0xfc000000) == 0x7c000000)
419 mask
= 0x100; /* major opcode 31 */
420 return (instr
& mask
) != 0;
423 int kvmppc_hv_emulate_mmio(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
424 unsigned long gpa
, gva_t ea
, int is_store
)
429 * If we fail, we just return to the guest and try executing it again.
431 if (kvmppc_get_last_inst(vcpu
, INST_GENERIC
, &last_inst
) !=
436 * WARNING: We do not know for sure whether the instruction we just
437 * read from memory is the same that caused the fault in the first
438 * place. If the instruction we read is neither an load or a store,
439 * then it can't access memory, so we don't need to worry about
440 * enforcing access permissions. So, assuming it is a load or
441 * store, we just check that its direction (load or store) is
442 * consistent with the original fault, since that's what we
443 * checked the access permissions against. If there is a mismatch
444 * we just return and retry the instruction.
447 if (instruction_is_store(last_inst
) != !!is_store
)
451 * Emulated accesses are emulated by looking at the hash for
452 * translation once, then performing the access later. The
453 * translation could be invalidated in the meantime in which
454 * point performing the subsequent memory access on the old
455 * physical address could possibly be a security hole for the
456 * guest (but not the host).
458 * This is less of an issue for MMIO stores since they aren't
459 * globally visible. It could be an issue for MMIO loads to
460 * a certain extent but we'll ignore it for now.
463 vcpu
->arch
.paddr_accessed
= gpa
;
464 vcpu
->arch
.vaddr_accessed
= ea
;
465 return kvmppc_emulate_mmio(run
, vcpu
);
468 int kvmppc_book3s_hv_page_fault(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
469 unsigned long ea
, unsigned long dsisr
)
471 struct kvm
*kvm
= vcpu
->kvm
;
472 unsigned long hpte
[3], r
;
473 unsigned long hnow_v
, hnow_r
;
475 unsigned long mmu_seq
, psize
, pte_size
;
476 unsigned long gpa_base
, gfn_base
;
477 unsigned long gpa
, gfn
, hva
, pfn
;
478 struct kvm_memory_slot
*memslot
;
480 struct revmap_entry
*rev
;
481 struct page
*page
, *pages
[1];
482 long index
, ret
, npages
;
484 unsigned int writing
, write_ok
;
485 struct vm_area_struct
*vma
;
486 unsigned long rcbits
;
489 if (kvm_is_radix(kvm
))
490 return kvmppc_book3s_radix_page_fault(run
, vcpu
, ea
, dsisr
);
493 * Real-mode code has already searched the HPT and found the
494 * entry we're interested in. Lock the entry and check that
495 * it hasn't changed. If it has, just return and re-execute the
498 if (ea
!= vcpu
->arch
.pgfault_addr
)
501 if (vcpu
->arch
.pgfault_cache
) {
502 mmio_update
= atomic64_read(&kvm
->arch
.mmio_update
);
503 if (mmio_update
== vcpu
->arch
.pgfault_cache
->mmio_update
) {
504 r
= vcpu
->arch
.pgfault_cache
->rpte
;
505 psize
= hpte_page_size(vcpu
->arch
.pgfault_hpte
[0], r
);
506 gpa_base
= r
& HPTE_R_RPN
& ~(psize
- 1);
507 gfn_base
= gpa_base
>> PAGE_SHIFT
;
508 gpa
= gpa_base
| (ea
& (psize
- 1));
509 return kvmppc_hv_emulate_mmio(run
, vcpu
, gpa
, ea
,
510 dsisr
& DSISR_ISSTORE
);
513 index
= vcpu
->arch
.pgfault_index
;
514 hptep
= (__be64
*)(kvm
->arch
.hpt
.virt
+ (index
<< 4));
515 rev
= &kvm
->arch
.hpt
.rev
[index
];
517 while (!try_lock_hpte(hptep
, HPTE_V_HVLOCK
))
519 hpte
[0] = be64_to_cpu(hptep
[0]) & ~HPTE_V_HVLOCK
;
520 hpte
[1] = be64_to_cpu(hptep
[1]);
521 hpte
[2] = r
= rev
->guest_rpte
;
522 unlock_hpte(hptep
, hpte
[0]);
525 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
526 hpte
[0] = hpte_new_to_old_v(hpte
[0], hpte
[1]);
527 hpte
[1] = hpte_new_to_old_r(hpte
[1]);
529 if (hpte
[0] != vcpu
->arch
.pgfault_hpte
[0] ||
530 hpte
[1] != vcpu
->arch
.pgfault_hpte
[1])
533 /* Translate the logical address and get the page */
534 psize
= hpte_page_size(hpte
[0], r
);
535 gpa_base
= r
& HPTE_R_RPN
& ~(psize
- 1);
536 gfn_base
= gpa_base
>> PAGE_SHIFT
;
537 gpa
= gpa_base
| (ea
& (psize
- 1));
538 gfn
= gpa
>> PAGE_SHIFT
;
539 memslot
= gfn_to_memslot(kvm
, gfn
);
541 trace_kvm_page_fault_enter(vcpu
, hpte
, memslot
, ea
, dsisr
);
543 /* No memslot means it's an emulated MMIO region */
544 if (!memslot
|| (memslot
->flags
& KVM_MEMSLOT_INVALID
))
545 return kvmppc_hv_emulate_mmio(run
, vcpu
, gpa
, ea
,
546 dsisr
& DSISR_ISSTORE
);
549 * This should never happen, because of the slot_is_aligned()
550 * check in kvmppc_do_h_enter().
552 if (gfn_base
< memslot
->base_gfn
)
555 /* used to check for invalidations in progress */
556 mmu_seq
= kvm
->mmu_notifier_seq
;
563 pte_size
= PAGE_SIZE
;
564 writing
= (dsisr
& DSISR_ISSTORE
) != 0;
565 /* If writing != 0, then the HPTE must allow writing, if we get here */
567 hva
= gfn_to_hva_memslot(memslot
, gfn
);
568 npages
= get_user_pages_fast(hva
, 1, writing
, pages
);
570 /* Check if it's an I/O mapping */
571 down_read(¤t
->mm
->mmap_sem
);
572 vma
= find_vma(current
->mm
, hva
);
573 if (vma
&& vma
->vm_start
<= hva
&& hva
+ psize
<= vma
->vm_end
&&
574 (vma
->vm_flags
& VM_PFNMAP
)) {
575 pfn
= vma
->vm_pgoff
+
576 ((hva
- vma
->vm_start
) >> PAGE_SHIFT
);
578 is_ci
= pte_ci(__pte((pgprot_val(vma
->vm_page_prot
))));
579 write_ok
= vma
->vm_flags
& VM_WRITE
;
581 up_read(¤t
->mm
->mmap_sem
);
586 pfn
= page_to_pfn(page
);
587 if (PageHuge(page
)) {
588 page
= compound_head(page
);
589 pte_size
<<= compound_order(page
);
591 /* if the guest wants write access, see if that is OK */
592 if (!writing
&& hpte_is_writable(r
)) {
596 * We need to protect against page table destruction
597 * hugepage split and collapse.
599 local_irq_save(flags
);
600 ptep
= find_linux_pte_or_hugepte(current
->mm
->pgd
,
603 pte
= kvmppc_read_update_linux_pte(ptep
, 1);
604 if (__pte_write(pte
))
607 local_irq_restore(flags
);
611 if (psize
> pte_size
)
614 /* Check WIMG vs. the actual page we're accessing */
615 if (!hpte_cache_flags_ok(r
, is_ci
)) {
619 * Allow guest to map emulated device memory as
620 * uncacheable, but actually make it cacheable.
622 r
= (r
& ~(HPTE_R_W
|HPTE_R_I
|HPTE_R_G
)) | HPTE_R_M
;
626 * Set the HPTE to point to pfn.
627 * Since the pfn is at PAGE_SIZE granularity, make sure we
628 * don't mask out lower-order bits if psize < PAGE_SIZE.
630 if (psize
< PAGE_SIZE
)
632 r
= (r
& HPTE_R_KEY_HI
) | (r
& ~(HPTE_R_PP0
- psize
)) |
633 ((pfn
<< PAGE_SHIFT
) & ~(psize
- 1));
634 if (hpte_is_writable(r
) && !write_ok
)
635 r
= hpte_make_readonly(r
);
638 while (!try_lock_hpte(hptep
, HPTE_V_HVLOCK
))
640 hnow_v
= be64_to_cpu(hptep
[0]);
641 hnow_r
= be64_to_cpu(hptep
[1]);
642 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
643 hnow_v
= hpte_new_to_old_v(hnow_v
, hnow_r
);
644 hnow_r
= hpte_new_to_old_r(hnow_r
);
646 if ((hnow_v
& ~HPTE_V_HVLOCK
) != hpte
[0] || hnow_r
!= hpte
[1] ||
647 rev
->guest_rpte
!= hpte
[2])
648 /* HPTE has been changed under us; let the guest retry */
650 hpte
[0] = (hpte
[0] & ~HPTE_V_ABSENT
) | HPTE_V_VALID
;
652 /* Always put the HPTE in the rmap chain for the page base address */
653 rmap
= &memslot
->arch
.rmap
[gfn_base
- memslot
->base_gfn
];
656 /* Check if we might have been invalidated; let the guest retry if so */
658 if (mmu_notifier_retry(vcpu
->kvm
, mmu_seq
)) {
663 /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */
664 rcbits
= *rmap
>> KVMPPC_RMAP_RC_SHIFT
;
665 r
&= rcbits
| ~(HPTE_R_R
| HPTE_R_C
);
667 if (be64_to_cpu(hptep
[0]) & HPTE_V_VALID
) {
668 /* HPTE was previously valid, so we need to invalidate it */
670 hptep
[0] |= cpu_to_be64(HPTE_V_ABSENT
);
671 kvmppc_invalidate_hpte(kvm
, hptep
, index
);
672 /* don't lose previous R and C bits */
673 r
|= be64_to_cpu(hptep
[1]) & (HPTE_R_R
| HPTE_R_C
);
675 kvmppc_add_revmap_chain(kvm
, rev
, rmap
, index
, 0);
678 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
679 r
= hpte_old_to_new_r(hpte
[0], r
);
680 hpte
[0] = hpte_old_to_new_v(hpte
[0]);
682 hptep
[1] = cpu_to_be64(r
);
684 __unlock_hpte(hptep
, hpte
[0]);
685 asm volatile("ptesync" : : : "memory");
687 if (page
&& hpte_is_writable(r
))
691 trace_kvm_page_fault_exit(vcpu
, hpte
, ret
);
695 * We drop pages[0] here, not page because page might
696 * have been set to the head page of a compound, but
697 * we have to drop the reference on the correct tail
698 * page to match the get inside gup()
705 __unlock_hpte(hptep
, be64_to_cpu(hptep
[0]));
710 static void kvmppc_rmap_reset(struct kvm
*kvm
)
712 struct kvm_memslots
*slots
;
713 struct kvm_memory_slot
*memslot
;
716 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
717 slots
= kvm_memslots(kvm
);
718 kvm_for_each_memslot(memslot
, slots
) {
720 * This assumes it is acceptable to lose reference and
721 * change bits across a reset.
723 memset(memslot
->arch
.rmap
, 0,
724 memslot
->npages
* sizeof(*memslot
->arch
.rmap
));
726 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
729 typedef int (*hva_handler_fn
)(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
,
732 static int kvm_handle_hva_range(struct kvm
*kvm
,
735 hva_handler_fn handler
)
739 struct kvm_memslots
*slots
;
740 struct kvm_memory_slot
*memslot
;
742 slots
= kvm_memslots(kvm
);
743 kvm_for_each_memslot(memslot
, slots
) {
744 unsigned long hva_start
, hva_end
;
747 hva_start
= max(start
, memslot
->userspace_addr
);
748 hva_end
= min(end
, memslot
->userspace_addr
+
749 (memslot
->npages
<< PAGE_SHIFT
));
750 if (hva_start
>= hva_end
)
753 * {gfn(page) | page intersects with [hva_start, hva_end)} =
754 * {gfn, gfn+1, ..., gfn_end-1}.
756 gfn
= hva_to_gfn_memslot(hva_start
, memslot
);
757 gfn_end
= hva_to_gfn_memslot(hva_end
+ PAGE_SIZE
- 1, memslot
);
759 for (; gfn
< gfn_end
; ++gfn
) {
760 ret
= handler(kvm
, memslot
, gfn
);
768 static int kvm_handle_hva(struct kvm
*kvm
, unsigned long hva
,
769 hva_handler_fn handler
)
771 return kvm_handle_hva_range(kvm
, hva
, hva
+ 1, handler
);
774 /* Must be called with both HPTE and rmap locked */
775 static void kvmppc_unmap_hpte(struct kvm
*kvm
, unsigned long i
,
776 unsigned long *rmapp
, unsigned long gfn
)
778 __be64
*hptep
= (__be64
*) (kvm
->arch
.hpt
.virt
+ (i
<< 4));
779 struct revmap_entry
*rev
= kvm
->arch
.hpt
.rev
;
781 unsigned long ptel
, psize
, rcbits
;
785 /* chain is now empty */
786 *rmapp
&= ~(KVMPPC_RMAP_PRESENT
| KVMPPC_RMAP_INDEX
);
788 /* remove i from chain */
792 rev
[i
].forw
= rev
[i
].back
= i
;
793 *rmapp
= (*rmapp
& ~KVMPPC_RMAP_INDEX
) | j
;
796 /* Now check and modify the HPTE */
797 ptel
= rev
[i
].guest_rpte
;
798 psize
= hpte_page_size(be64_to_cpu(hptep
[0]), ptel
);
799 if ((be64_to_cpu(hptep
[0]) & HPTE_V_VALID
) &&
800 hpte_rpn(ptel
, psize
) == gfn
) {
801 hptep
[0] |= cpu_to_be64(HPTE_V_ABSENT
);
802 kvmppc_invalidate_hpte(kvm
, hptep
, i
);
803 hptep
[1] &= ~cpu_to_be64(HPTE_R_KEY_HI
| HPTE_R_KEY_LO
);
804 /* Harvest R and C */
805 rcbits
= be64_to_cpu(hptep
[1]) & (HPTE_R_R
| HPTE_R_C
);
806 *rmapp
|= rcbits
<< KVMPPC_RMAP_RC_SHIFT
;
807 if (rcbits
& HPTE_R_C
)
808 kvmppc_update_rmap_change(rmapp
, psize
);
809 if (rcbits
& ~rev
[i
].guest_rpte
) {
810 rev
[i
].guest_rpte
= ptel
| rcbits
;
811 note_hpte_modification(kvm
, &rev
[i
]);
816 static int kvm_unmap_rmapp(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
,
821 unsigned long *rmapp
;
823 rmapp
= &memslot
->arch
.rmap
[gfn
- memslot
->base_gfn
];
826 if (!(*rmapp
& KVMPPC_RMAP_PRESENT
)) {
832 * To avoid an ABBA deadlock with the HPTE lock bit,
833 * we can't spin on the HPTE lock while holding the
836 i
= *rmapp
& KVMPPC_RMAP_INDEX
;
837 hptep
= (__be64
*) (kvm
->arch
.hpt
.virt
+ (i
<< 4));
838 if (!try_lock_hpte(hptep
, HPTE_V_HVLOCK
)) {
839 /* unlock rmap before spinning on the HPTE lock */
841 while (be64_to_cpu(hptep
[0]) & HPTE_V_HVLOCK
)
846 kvmppc_unmap_hpte(kvm
, i
, rmapp
, gfn
);
848 __unlock_hpte(hptep
, be64_to_cpu(hptep
[0]));
853 int kvm_unmap_hva_hv(struct kvm
*kvm
, unsigned long hva
)
855 hva_handler_fn handler
;
857 handler
= kvm_is_radix(kvm
) ? kvm_unmap_radix
: kvm_unmap_rmapp
;
858 kvm_handle_hva(kvm
, hva
, handler
);
862 int kvm_unmap_hva_range_hv(struct kvm
*kvm
, unsigned long start
, unsigned long end
)
864 hva_handler_fn handler
;
866 handler
= kvm_is_radix(kvm
) ? kvm_unmap_radix
: kvm_unmap_rmapp
;
867 kvm_handle_hva_range(kvm
, start
, end
, handler
);
871 void kvmppc_core_flush_memslot_hv(struct kvm
*kvm
,
872 struct kvm_memory_slot
*memslot
)
876 unsigned long *rmapp
;
878 gfn
= memslot
->base_gfn
;
879 rmapp
= memslot
->arch
.rmap
;
880 for (n
= memslot
->npages
; n
; --n
, ++gfn
) {
881 if (kvm_is_radix(kvm
)) {
882 kvm_unmap_radix(kvm
, memslot
, gfn
);
886 * Testing the present bit without locking is OK because
887 * the memslot has been marked invalid already, and hence
888 * no new HPTEs referencing this page can be created,
889 * thus the present bit can't go from 0 to 1.
891 if (*rmapp
& KVMPPC_RMAP_PRESENT
)
892 kvm_unmap_rmapp(kvm
, memslot
, gfn
);
897 static int kvm_age_rmapp(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
,
900 struct revmap_entry
*rev
= kvm
->arch
.hpt
.rev
;
901 unsigned long head
, i
, j
;
904 unsigned long *rmapp
;
906 rmapp
= &memslot
->arch
.rmap
[gfn
- memslot
->base_gfn
];
909 if (*rmapp
& KVMPPC_RMAP_REFERENCED
) {
910 *rmapp
&= ~KVMPPC_RMAP_REFERENCED
;
913 if (!(*rmapp
& KVMPPC_RMAP_PRESENT
)) {
918 i
= head
= *rmapp
& KVMPPC_RMAP_INDEX
;
920 hptep
= (__be64
*) (kvm
->arch
.hpt
.virt
+ (i
<< 4));
923 /* If this HPTE isn't referenced, ignore it */
924 if (!(be64_to_cpu(hptep
[1]) & HPTE_R_R
))
927 if (!try_lock_hpte(hptep
, HPTE_V_HVLOCK
)) {
928 /* unlock rmap before spinning on the HPTE lock */
930 while (be64_to_cpu(hptep
[0]) & HPTE_V_HVLOCK
)
935 /* Now check and modify the HPTE */
936 if ((be64_to_cpu(hptep
[0]) & HPTE_V_VALID
) &&
937 (be64_to_cpu(hptep
[1]) & HPTE_R_R
)) {
938 kvmppc_clear_ref_hpte(kvm
, hptep
, i
);
939 if (!(rev
[i
].guest_rpte
& HPTE_R_R
)) {
940 rev
[i
].guest_rpte
|= HPTE_R_R
;
941 note_hpte_modification(kvm
, &rev
[i
]);
945 __unlock_hpte(hptep
, be64_to_cpu(hptep
[0]));
946 } while ((i
= j
) != head
);
952 int kvm_age_hva_hv(struct kvm
*kvm
, unsigned long start
, unsigned long end
)
954 hva_handler_fn handler
;
956 handler
= kvm_is_radix(kvm
) ? kvm_age_radix
: kvm_age_rmapp
;
957 return kvm_handle_hva_range(kvm
, start
, end
, handler
);
960 static int kvm_test_age_rmapp(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
,
963 struct revmap_entry
*rev
= kvm
->arch
.hpt
.rev
;
964 unsigned long head
, i
, j
;
967 unsigned long *rmapp
;
969 rmapp
= &memslot
->arch
.rmap
[gfn
- memslot
->base_gfn
];
970 if (*rmapp
& KVMPPC_RMAP_REFERENCED
)
974 if (*rmapp
& KVMPPC_RMAP_REFERENCED
)
977 if (*rmapp
& KVMPPC_RMAP_PRESENT
) {
978 i
= head
= *rmapp
& KVMPPC_RMAP_INDEX
;
980 hp
= (unsigned long *)(kvm
->arch
.hpt
.virt
+ (i
<< 4));
982 if (be64_to_cpu(hp
[1]) & HPTE_R_R
)
984 } while ((i
= j
) != head
);
993 int kvm_test_age_hva_hv(struct kvm
*kvm
, unsigned long hva
)
995 hva_handler_fn handler
;
997 handler
= kvm_is_radix(kvm
) ? kvm_test_age_radix
: kvm_test_age_rmapp
;
998 return kvm_handle_hva(kvm
, hva
, handler
);
1001 void kvm_set_spte_hva_hv(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
1003 hva_handler_fn handler
;
1005 handler
= kvm_is_radix(kvm
) ? kvm_unmap_radix
: kvm_unmap_rmapp
;
1006 kvm_handle_hva(kvm
, hva
, handler
);
1009 static int vcpus_running(struct kvm
*kvm
)
1011 return atomic_read(&kvm
->arch
.vcpus_running
) != 0;
1015 * Returns the number of system pages that are dirty.
1016 * This can be more than 1 if we find a huge-page HPTE.
1018 static int kvm_test_clear_dirty_npages(struct kvm
*kvm
, unsigned long *rmapp
)
1020 struct revmap_entry
*rev
= kvm
->arch
.hpt
.rev
;
1021 unsigned long head
, i
, j
;
1025 int npages_dirty
= 0;
1029 if (*rmapp
& KVMPPC_RMAP_CHANGED
) {
1030 long change_order
= (*rmapp
& KVMPPC_RMAP_CHG_ORDER
)
1031 >> KVMPPC_RMAP_CHG_SHIFT
;
1032 *rmapp
&= ~(KVMPPC_RMAP_CHANGED
| KVMPPC_RMAP_CHG_ORDER
);
1034 if (change_order
> PAGE_SHIFT
)
1035 npages_dirty
= 1ul << (change_order
- PAGE_SHIFT
);
1037 if (!(*rmapp
& KVMPPC_RMAP_PRESENT
)) {
1039 return npages_dirty
;
1042 i
= head
= *rmapp
& KVMPPC_RMAP_INDEX
;
1044 unsigned long hptep1
;
1045 hptep
= (__be64
*) (kvm
->arch
.hpt
.virt
+ (i
<< 4));
1049 * Checking the C (changed) bit here is racy since there
1050 * is no guarantee about when the hardware writes it back.
1051 * If the HPTE is not writable then it is stable since the
1052 * page can't be written to, and we would have done a tlbie
1053 * (which forces the hardware to complete any writeback)
1054 * when making the HPTE read-only.
1055 * If vcpus are running then this call is racy anyway
1056 * since the page could get dirtied subsequently, so we
1057 * expect there to be a further call which would pick up
1058 * any delayed C bit writeback.
1059 * Otherwise we need to do the tlbie even if C==0 in
1060 * order to pick up any delayed writeback of C.
1062 hptep1
= be64_to_cpu(hptep
[1]);
1063 if (!(hptep1
& HPTE_R_C
) &&
1064 (!hpte_is_writable(hptep1
) || vcpus_running(kvm
)))
1067 if (!try_lock_hpte(hptep
, HPTE_V_HVLOCK
)) {
1068 /* unlock rmap before spinning on the HPTE lock */
1070 while (hptep
[0] & cpu_to_be64(HPTE_V_HVLOCK
))
1075 /* Now check and modify the HPTE */
1076 if (!(hptep
[0] & cpu_to_be64(HPTE_V_VALID
))) {
1077 __unlock_hpte(hptep
, be64_to_cpu(hptep
[0]));
1081 /* need to make it temporarily absent so C is stable */
1082 hptep
[0] |= cpu_to_be64(HPTE_V_ABSENT
);
1083 kvmppc_invalidate_hpte(kvm
, hptep
, i
);
1084 v
= be64_to_cpu(hptep
[0]);
1085 r
= be64_to_cpu(hptep
[1]);
1087 hptep
[1] = cpu_to_be64(r
& ~HPTE_R_C
);
1088 if (!(rev
[i
].guest_rpte
& HPTE_R_C
)) {
1089 rev
[i
].guest_rpte
|= HPTE_R_C
;
1090 note_hpte_modification(kvm
, &rev
[i
]);
1092 n
= hpte_page_size(v
, r
);
1093 n
= (n
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1094 if (n
> npages_dirty
)
1098 v
&= ~HPTE_V_ABSENT
;
1100 __unlock_hpte(hptep
, v
);
1101 } while ((i
= j
) != head
);
1104 return npages_dirty
;
1107 void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa
*vpa
,
1108 struct kvm_memory_slot
*memslot
,
1113 if (!vpa
->dirty
|| !vpa
->pinned_addr
)
1115 gfn
= vpa
->gpa
>> PAGE_SHIFT
;
1116 if (gfn
< memslot
->base_gfn
||
1117 gfn
>= memslot
->base_gfn
+ memslot
->npages
)
1122 __set_bit_le(gfn
- memslot
->base_gfn
, map
);
1125 long kvmppc_hv_get_dirty_log_hpt(struct kvm
*kvm
,
1126 struct kvm_memory_slot
*memslot
, unsigned long *map
)
1129 unsigned long *rmapp
;
1132 rmapp
= memslot
->arch
.rmap
;
1133 for (i
= 0; i
< memslot
->npages
; ++i
) {
1134 int npages
= kvm_test_clear_dirty_npages(kvm
, rmapp
);
1136 * Note that if npages > 0 then i must be a multiple of npages,
1137 * since we always put huge-page HPTEs in the rmap chain
1138 * corresponding to their page base address.
1141 for (j
= i
; npages
; ++j
, --npages
)
1142 __set_bit_le(j
, map
);
1149 void *kvmppc_pin_guest_page(struct kvm
*kvm
, unsigned long gpa
,
1150 unsigned long *nb_ret
)
1152 struct kvm_memory_slot
*memslot
;
1153 unsigned long gfn
= gpa
>> PAGE_SHIFT
;
1154 struct page
*page
, *pages
[1];
1156 unsigned long hva
, offset
;
1159 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
1160 memslot
= gfn_to_memslot(kvm
, gfn
);
1161 if (!memslot
|| (memslot
->flags
& KVM_MEMSLOT_INVALID
))
1163 hva
= gfn_to_hva_memslot(memslot
, gfn
);
1164 npages
= get_user_pages_fast(hva
, 1, 1, pages
);
1168 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
1170 offset
= gpa
& (PAGE_SIZE
- 1);
1172 *nb_ret
= PAGE_SIZE
- offset
;
1173 return page_address(page
) + offset
;
1176 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
1180 void kvmppc_unpin_guest_page(struct kvm
*kvm
, void *va
, unsigned long gpa
,
1183 struct page
*page
= virt_to_page(va
);
1184 struct kvm_memory_slot
*memslot
;
1186 unsigned long *rmap
;
1194 /* We need to mark this page dirty in the rmap chain */
1195 gfn
= gpa
>> PAGE_SHIFT
;
1196 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
1197 memslot
= gfn_to_memslot(kvm
, gfn
);
1199 if (!kvm_is_radix(kvm
)) {
1200 rmap
= &memslot
->arch
.rmap
[gfn
- memslot
->base_gfn
];
1202 *rmap
|= KVMPPC_RMAP_CHANGED
;
1204 } else if (memslot
->dirty_bitmap
) {
1205 mark_page_dirty(kvm
, gfn
);
1208 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
1214 static int resize_hpt_allocate(struct kvm_resize_hpt
*resize
)
1218 rc
= kvmppc_allocate_hpt(&resize
->hpt
, resize
->order
);
1222 resize_hpt_debug(resize
, "resize_hpt_allocate(): HPT @ 0x%lx\n",
1228 static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt
*resize
,
1231 struct kvm
*kvm
= resize
->kvm
;
1232 struct kvm_hpt_info
*old
= &kvm
->arch
.hpt
;
1233 struct kvm_hpt_info
*new = &resize
->hpt
;
1234 unsigned long old_hash_mask
= (1ULL << (old
->order
- 7)) - 1;
1235 unsigned long new_hash_mask
= (1ULL << (new->order
- 7)) - 1;
1236 __be64
*hptep
, *new_hptep
;
1237 unsigned long vpte
, rpte
, guest_rpte
;
1239 struct revmap_entry
*rev
;
1240 unsigned long apsize
, psize
, avpn
, pteg
, hash
;
1241 unsigned long new_idx
, new_pteg
, replace_vpte
;
1243 hptep
= (__be64
*)(old
->virt
+ (idx
<< 4));
1245 /* Guest is stopped, so new HPTEs can't be added or faulted
1246 * in, only unmapped or altered by host actions. So, it's
1247 * safe to check this before we take the HPTE lock */
1248 vpte
= be64_to_cpu(hptep
[0]);
1249 if (!(vpte
& HPTE_V_VALID
) && !(vpte
& HPTE_V_ABSENT
))
1250 return 0; /* nothing to do */
1252 while (!try_lock_hpte(hptep
, HPTE_V_HVLOCK
))
1255 vpte
= be64_to_cpu(hptep
[0]);
1258 if (!(vpte
& HPTE_V_VALID
) && !(vpte
& HPTE_V_ABSENT
))
1263 rev
= &old
->rev
[idx
];
1264 guest_rpte
= rev
->guest_rpte
;
1267 apsize
= hpte_page_size(vpte
, guest_rpte
);
1271 if (vpte
& HPTE_V_VALID
) {
1272 unsigned long gfn
= hpte_rpn(guest_rpte
, apsize
);
1273 int srcu_idx
= srcu_read_lock(&kvm
->srcu
);
1274 struct kvm_memory_slot
*memslot
=
1275 __gfn_to_memslot(kvm_memslots(kvm
), gfn
);
1278 unsigned long *rmapp
;
1279 rmapp
= &memslot
->arch
.rmap
[gfn
- memslot
->base_gfn
];
1282 kvmppc_unmap_hpte(kvm
, idx
, rmapp
, gfn
);
1286 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
1289 /* Reload PTE after unmap */
1290 vpte
= be64_to_cpu(hptep
[0]);
1292 BUG_ON(vpte
& HPTE_V_VALID
);
1293 BUG_ON(!(vpte
& HPTE_V_ABSENT
));
1296 if (!(vpte
& HPTE_V_BOLTED
))
1299 rpte
= be64_to_cpu(hptep
[1]);
1300 psize
= hpte_base_page_size(vpte
, rpte
);
1301 avpn
= HPTE_V_AVPN_VAL(vpte
) & ~((psize
- 1) >> 23);
1302 pteg
= idx
/ HPTES_PER_GROUP
;
1303 if (vpte
& HPTE_V_SECONDARY
)
1306 if (!(vpte
& HPTE_V_1TB_SEG
)) {
1307 unsigned long offset
, vsid
;
1309 /* We only have 28 - 23 bits of offset in avpn */
1310 offset
= (avpn
& 0x1f) << 23;
1312 /* We can find more bits from the pteg value */
1313 if (psize
< (1ULL << 23))
1314 offset
|= ((vsid
^ pteg
) & old_hash_mask
) * psize
;
1316 hash
= vsid
^ (offset
/ psize
);
1318 unsigned long offset
, vsid
;
1320 /* We only have 40 - 23 bits of seg_off in avpn */
1321 offset
= (avpn
& 0x1ffff) << 23;
1323 if (psize
< (1ULL << 23))
1324 offset
|= ((vsid
^ (vsid
<< 25) ^ pteg
) & old_hash_mask
) * psize
;
1326 hash
= vsid
^ (vsid
<< 25) ^ (offset
/ psize
);
1329 new_pteg
= hash
& new_hash_mask
;
1330 if (vpte
& HPTE_V_SECONDARY
) {
1331 BUG_ON(~pteg
!= (hash
& old_hash_mask
));
1332 new_pteg
= ~new_pteg
;
1334 BUG_ON(pteg
!= (hash
& old_hash_mask
));
1337 new_idx
= new_pteg
* HPTES_PER_GROUP
+ (idx
% HPTES_PER_GROUP
);
1338 new_hptep
= (__be64
*)(new->virt
+ (new_idx
<< 4));
1340 replace_vpte
= be64_to_cpu(new_hptep
[0]);
1342 if (replace_vpte
& (HPTE_V_VALID
| HPTE_V_ABSENT
)) {
1343 BUG_ON(new->order
>= old
->order
);
1345 if (replace_vpte
& HPTE_V_BOLTED
) {
1346 if (vpte
& HPTE_V_BOLTED
)
1347 /* Bolted collision, nothing we can do */
1349 /* Discard the new HPTE */
1353 /* Discard the previous HPTE */
1356 new_hptep
[1] = cpu_to_be64(rpte
);
1357 new->rev
[new_idx
].guest_rpte
= guest_rpte
;
1358 /* No need for a barrier, since new HPT isn't active */
1359 new_hptep
[0] = cpu_to_be64(vpte
);
1360 unlock_hpte(new_hptep
, vpte
);
1363 unlock_hpte(hptep
, vpte
);
1367 static int resize_hpt_rehash(struct kvm_resize_hpt
*resize
)
1369 struct kvm
*kvm
= resize
->kvm
;
1374 * resize_hpt_rehash_hpte() doesn't handle the new-format HPTEs
1375 * that POWER9 uses, and could well hit a BUG_ON on POWER9.
1377 if (cpu_has_feature(CPU_FTR_ARCH_300
))
1379 for (i
= 0; i
< kvmppc_hpt_npte(&kvm
->arch
.hpt
); i
++) {
1380 rc
= resize_hpt_rehash_hpte(resize
, i
);
1388 static void resize_hpt_pivot(struct kvm_resize_hpt
*resize
)
1390 struct kvm
*kvm
= resize
->kvm
;
1391 struct kvm_hpt_info hpt_tmp
;
1393 /* Exchange the pending tables in the resize structure with
1394 * the active tables */
1396 resize_hpt_debug(resize
, "resize_hpt_pivot()\n");
1398 spin_lock(&kvm
->mmu_lock
);
1399 asm volatile("ptesync" : : : "memory");
1401 hpt_tmp
= kvm
->arch
.hpt
;
1402 kvmppc_set_hpt(kvm
, &resize
->hpt
);
1403 resize
->hpt
= hpt_tmp
;
1405 spin_unlock(&kvm
->mmu_lock
);
1407 synchronize_srcu_expedited(&kvm
->srcu
);
1409 resize_hpt_debug(resize
, "resize_hpt_pivot() done\n");
1412 static void resize_hpt_release(struct kvm
*kvm
, struct kvm_resize_hpt
*resize
)
1414 BUG_ON(kvm
->arch
.resize_hpt
!= resize
);
1419 if (resize
->hpt
.virt
)
1420 kvmppc_free_hpt(&resize
->hpt
);
1422 kvm
->arch
.resize_hpt
= NULL
;
1426 static void resize_hpt_prepare_work(struct work_struct
*work
)
1428 struct kvm_resize_hpt
*resize
= container_of(work
,
1429 struct kvm_resize_hpt
,
1431 struct kvm
*kvm
= resize
->kvm
;
1434 resize_hpt_debug(resize
, "resize_hpt_prepare_work(): order = %d\n",
1437 err
= resize_hpt_allocate(resize
);
1439 mutex_lock(&kvm
->lock
);
1441 resize
->error
= err
;
1442 resize
->prepare_done
= true;
1444 mutex_unlock(&kvm
->lock
);
1447 long kvm_vm_ioctl_resize_hpt_prepare(struct kvm
*kvm
,
1448 struct kvm_ppc_resize_hpt
*rhpt
)
1450 unsigned long flags
= rhpt
->flags
;
1451 unsigned long shift
= rhpt
->shift
;
1452 struct kvm_resize_hpt
*resize
;
1458 if (shift
&& ((shift
< 18) || (shift
> 46)))
1461 mutex_lock(&kvm
->lock
);
1463 resize
= kvm
->arch
.resize_hpt
;
1466 if (resize
->order
== shift
) {
1467 /* Suitable resize in progress */
1468 if (resize
->prepare_done
) {
1469 ret
= resize
->error
;
1471 resize_hpt_release(kvm
, resize
);
1473 ret
= 100; /* estimated time in ms */
1479 /* not suitable, cancel it */
1480 resize_hpt_release(kvm
, resize
);
1485 goto out
; /* nothing to do */
1487 /* start new resize */
1489 resize
= kzalloc(sizeof(*resize
), GFP_KERNEL
);
1490 resize
->order
= shift
;
1492 INIT_WORK(&resize
->work
, resize_hpt_prepare_work
);
1493 kvm
->arch
.resize_hpt
= resize
;
1495 schedule_work(&resize
->work
);
1497 ret
= 100; /* estimated time in ms */
1500 mutex_unlock(&kvm
->lock
);
1504 static void resize_hpt_boot_vcpu(void *opaque
)
1506 /* Nothing to do, just force a KVM exit */
1509 long kvm_vm_ioctl_resize_hpt_commit(struct kvm
*kvm
,
1510 struct kvm_ppc_resize_hpt
*rhpt
)
1512 unsigned long flags
= rhpt
->flags
;
1513 unsigned long shift
= rhpt
->shift
;
1514 struct kvm_resize_hpt
*resize
;
1520 if (shift
&& ((shift
< 18) || (shift
> 46)))
1523 mutex_lock(&kvm
->lock
);
1525 resize
= kvm
->arch
.resize_hpt
;
1527 /* This shouldn't be possible */
1529 if (WARN_ON(!kvm
->arch
.hpte_setup_done
))
1532 /* Stop VCPUs from running while we mess with the HPT */
1533 kvm
->arch
.hpte_setup_done
= 0;
1536 /* Boot all CPUs out of the guest so they re-read
1537 * hpte_setup_done */
1538 on_each_cpu(resize_hpt_boot_vcpu
, NULL
, 1);
1541 if (!resize
|| (resize
->order
!= shift
))
1545 if (!resize
->prepare_done
)
1548 ret
= resize
->error
;
1552 ret
= resize_hpt_rehash(resize
);
1556 resize_hpt_pivot(resize
);
1559 /* Let VCPUs run again */
1560 kvm
->arch
.hpte_setup_done
= 1;
1563 resize_hpt_release(kvm
, resize
);
1564 mutex_unlock(&kvm
->lock
);
1569 * Functions for reading and writing the hash table via reads and
1570 * writes on a file descriptor.
1572 * Reads return the guest view of the hash table, which has to be
1573 * pieced together from the real hash table and the guest_rpte
1574 * values in the revmap array.
1576 * On writes, each HPTE written is considered in turn, and if it
1577 * is valid, it is written to the HPT as if an H_ENTER with the
1578 * exact flag set was done. When the invalid count is non-zero
1579 * in the header written to the stream, the kernel will make
1580 * sure that that many HPTEs are invalid, and invalidate them
1584 struct kvm_htab_ctx
{
1585 unsigned long index
;
1586 unsigned long flags
;
1591 #define HPTE_SIZE (2 * sizeof(unsigned long))
1594 * Returns 1 if this HPT entry has been modified or has pending
1597 static int hpte_dirty(struct revmap_entry
*revp
, __be64
*hptp
)
1599 unsigned long rcbits_unset
;
1601 if (revp
->guest_rpte
& HPTE_GR_MODIFIED
)
1604 /* Also need to consider changes in reference and changed bits */
1605 rcbits_unset
= ~revp
->guest_rpte
& (HPTE_R_R
| HPTE_R_C
);
1606 if ((be64_to_cpu(hptp
[0]) & HPTE_V_VALID
) &&
1607 (be64_to_cpu(hptp
[1]) & rcbits_unset
))
1613 static long record_hpte(unsigned long flags
, __be64
*hptp
,
1614 unsigned long *hpte
, struct revmap_entry
*revp
,
1615 int want_valid
, int first_pass
)
1617 unsigned long v
, r
, hr
;
1618 unsigned long rcbits_unset
;
1622 /* Unmodified entries are uninteresting except on the first pass */
1623 dirty
= hpte_dirty(revp
, hptp
);
1624 if (!first_pass
&& !dirty
)
1628 if (be64_to_cpu(hptp
[0]) & (HPTE_V_VALID
| HPTE_V_ABSENT
)) {
1630 if ((flags
& KVM_GET_HTAB_BOLTED_ONLY
) &&
1631 !(be64_to_cpu(hptp
[0]) & HPTE_V_BOLTED
))
1634 if (valid
!= want_valid
)
1638 if (valid
|| dirty
) {
1639 /* lock the HPTE so it's stable and read it */
1641 while (!try_lock_hpte(hptp
, HPTE_V_HVLOCK
))
1643 v
= be64_to_cpu(hptp
[0]);
1644 hr
= be64_to_cpu(hptp
[1]);
1645 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
1646 v
= hpte_new_to_old_v(v
, hr
);
1647 hr
= hpte_new_to_old_r(hr
);
1650 /* re-evaluate valid and dirty from synchronized HPTE value */
1651 valid
= !!(v
& HPTE_V_VALID
);
1652 dirty
= !!(revp
->guest_rpte
& HPTE_GR_MODIFIED
);
1654 /* Harvest R and C into guest view if necessary */
1655 rcbits_unset
= ~revp
->guest_rpte
& (HPTE_R_R
| HPTE_R_C
);
1656 if (valid
&& (rcbits_unset
& hr
)) {
1657 revp
->guest_rpte
|= (hr
&
1658 (HPTE_R_R
| HPTE_R_C
)) | HPTE_GR_MODIFIED
;
1662 if (v
& HPTE_V_ABSENT
) {
1663 v
&= ~HPTE_V_ABSENT
;
1667 if ((flags
& KVM_GET_HTAB_BOLTED_ONLY
) && !(v
& HPTE_V_BOLTED
))
1670 r
= revp
->guest_rpte
;
1671 /* only clear modified if this is the right sort of entry */
1672 if (valid
== want_valid
&& dirty
) {
1673 r
&= ~HPTE_GR_MODIFIED
;
1674 revp
->guest_rpte
= r
;
1676 unlock_hpte(hptp
, be64_to_cpu(hptp
[0]));
1678 if (!(valid
== want_valid
&& (first_pass
|| dirty
)))
1681 hpte
[0] = cpu_to_be64(v
);
1682 hpte
[1] = cpu_to_be64(r
);
1686 static ssize_t
kvm_htab_read(struct file
*file
, char __user
*buf
,
1687 size_t count
, loff_t
*ppos
)
1689 struct kvm_htab_ctx
*ctx
= file
->private_data
;
1690 struct kvm
*kvm
= ctx
->kvm
;
1691 struct kvm_get_htab_header hdr
;
1693 struct revmap_entry
*revp
;
1694 unsigned long i
, nb
, nw
;
1695 unsigned long __user
*lbuf
;
1696 struct kvm_get_htab_header __user
*hptr
;
1697 unsigned long flags
;
1699 unsigned long hpte
[2];
1701 if (!access_ok(VERIFY_WRITE
, buf
, count
))
1704 first_pass
= ctx
->first_pass
;
1708 hptp
= (__be64
*)(kvm
->arch
.hpt
.virt
+ (i
* HPTE_SIZE
));
1709 revp
= kvm
->arch
.hpt
.rev
+ i
;
1710 lbuf
= (unsigned long __user
*)buf
;
1713 while (nb
+ sizeof(hdr
) + HPTE_SIZE
< count
) {
1714 /* Initialize header */
1715 hptr
= (struct kvm_get_htab_header __user
*)buf
;
1720 lbuf
= (unsigned long __user
*)(buf
+ sizeof(hdr
));
1722 /* Skip uninteresting entries, i.e. clean on not-first pass */
1724 while (i
< kvmppc_hpt_npte(&kvm
->arch
.hpt
) &&
1725 !hpte_dirty(revp
, hptp
)) {
1733 /* Grab a series of valid entries */
1734 while (i
< kvmppc_hpt_npte(&kvm
->arch
.hpt
) &&
1735 hdr
.n_valid
< 0xffff &&
1736 nb
+ HPTE_SIZE
< count
&&
1737 record_hpte(flags
, hptp
, hpte
, revp
, 1, first_pass
)) {
1738 /* valid entry, write it out */
1740 if (__put_user(hpte
[0], lbuf
) ||
1741 __put_user(hpte
[1], lbuf
+ 1))
1749 /* Now skip invalid entries while we can */
1750 while (i
< kvmppc_hpt_npte(&kvm
->arch
.hpt
) &&
1751 hdr
.n_invalid
< 0xffff &&
1752 record_hpte(flags
, hptp
, hpte
, revp
, 0, first_pass
)) {
1753 /* found an invalid entry */
1760 if (hdr
.n_valid
|| hdr
.n_invalid
) {
1761 /* write back the header */
1762 if (__copy_to_user(hptr
, &hdr
, sizeof(hdr
)))
1765 buf
= (char __user
*)lbuf
;
1770 /* Check if we've wrapped around the hash table */
1771 if (i
>= kvmppc_hpt_npte(&kvm
->arch
.hpt
)) {
1773 ctx
->first_pass
= 0;
1783 static ssize_t
kvm_htab_write(struct file
*file
, const char __user
*buf
,
1784 size_t count
, loff_t
*ppos
)
1786 struct kvm_htab_ctx
*ctx
= file
->private_data
;
1787 struct kvm
*kvm
= ctx
->kvm
;
1788 struct kvm_get_htab_header hdr
;
1791 unsigned long __user
*lbuf
;
1793 unsigned long tmp
[2];
1798 if (!access_ok(VERIFY_READ
, buf
, count
))
1801 /* lock out vcpus from running while we're doing this */
1802 mutex_lock(&kvm
->lock
);
1803 hpte_setup
= kvm
->arch
.hpte_setup_done
;
1805 kvm
->arch
.hpte_setup_done
= 0; /* temporarily */
1806 /* order hpte_setup_done vs. vcpus_running */
1808 if (atomic_read(&kvm
->arch
.vcpus_running
)) {
1809 kvm
->arch
.hpte_setup_done
= 1;
1810 mutex_unlock(&kvm
->lock
);
1816 for (nb
= 0; nb
+ sizeof(hdr
) <= count
; ) {
1818 if (__copy_from_user(&hdr
, buf
, sizeof(hdr
)))
1822 if (nb
+ hdr
.n_valid
* HPTE_SIZE
> count
)
1830 if (i
>= kvmppc_hpt_npte(&kvm
->arch
.hpt
) ||
1831 i
+ hdr
.n_valid
+ hdr
.n_invalid
> kvmppc_hpt_npte(&kvm
->arch
.hpt
))
1834 hptp
= (__be64
*)(kvm
->arch
.hpt
.virt
+ (i
* HPTE_SIZE
));
1835 lbuf
= (unsigned long __user
*)buf
;
1836 for (j
= 0; j
< hdr
.n_valid
; ++j
) {
1841 if (__get_user(hpte_v
, lbuf
) ||
1842 __get_user(hpte_r
, lbuf
+ 1))
1844 v
= be64_to_cpu(hpte_v
);
1845 r
= be64_to_cpu(hpte_r
);
1847 if (!(v
& HPTE_V_VALID
))
1852 if (be64_to_cpu(hptp
[0]) & (HPTE_V_VALID
| HPTE_V_ABSENT
))
1853 kvmppc_do_h_remove(kvm
, 0, i
, 0, tmp
);
1855 ret
= kvmppc_virtmode_do_h_enter(kvm
, H_EXACT
, i
, v
, r
,
1857 if (ret
!= H_SUCCESS
) {
1858 pr_err("kvm_htab_write ret %ld i=%ld v=%lx "
1859 "r=%lx\n", ret
, i
, v
, r
);
1862 if (!hpte_setup
&& is_vrma_hpte(v
)) {
1863 unsigned long psize
= hpte_base_page_size(v
, r
);
1864 unsigned long senc
= slb_pgsize_encoding(psize
);
1867 kvm
->arch
.vrma_slb_v
= senc
| SLB_VSID_B_1T
|
1868 (VRMA_VSID
<< SLB_VSID_SHIFT_1T
);
1869 lpcr
= senc
<< (LPCR_VRMASD_SH
- 4);
1870 kvmppc_update_lpcr(kvm
, lpcr
, LPCR_VRMASD
);
1877 for (j
= 0; j
< hdr
.n_invalid
; ++j
) {
1878 if (be64_to_cpu(hptp
[0]) & (HPTE_V_VALID
| HPTE_V_ABSENT
))
1879 kvmppc_do_h_remove(kvm
, 0, i
, 0, tmp
);
1887 /* Order HPTE updates vs. hpte_setup_done */
1889 kvm
->arch
.hpte_setup_done
= hpte_setup
;
1890 mutex_unlock(&kvm
->lock
);
1897 static int kvm_htab_release(struct inode
*inode
, struct file
*filp
)
1899 struct kvm_htab_ctx
*ctx
= filp
->private_data
;
1901 filp
->private_data
= NULL
;
1902 if (!(ctx
->flags
& KVM_GET_HTAB_WRITE
))
1903 atomic_dec(&ctx
->kvm
->arch
.hpte_mod_interest
);
1904 kvm_put_kvm(ctx
->kvm
);
1909 static const struct file_operations kvm_htab_fops
= {
1910 .read
= kvm_htab_read
,
1911 .write
= kvm_htab_write
,
1912 .llseek
= default_llseek
,
1913 .release
= kvm_htab_release
,
1916 int kvm_vm_ioctl_get_htab_fd(struct kvm
*kvm
, struct kvm_get_htab_fd
*ghf
)
1919 struct kvm_htab_ctx
*ctx
;
1922 /* reject flags we don't recognize */
1923 if (ghf
->flags
& ~(KVM_GET_HTAB_BOLTED_ONLY
| KVM_GET_HTAB_WRITE
))
1925 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
1930 ctx
->index
= ghf
->start_index
;
1931 ctx
->flags
= ghf
->flags
;
1932 ctx
->first_pass
= 1;
1934 rwflag
= (ghf
->flags
& KVM_GET_HTAB_WRITE
) ? O_WRONLY
: O_RDONLY
;
1935 ret
= anon_inode_getfd("kvm-htab", &kvm_htab_fops
, ctx
, rwflag
| O_CLOEXEC
);
1941 if (rwflag
== O_RDONLY
) {
1942 mutex_lock(&kvm
->slots_lock
);
1943 atomic_inc(&kvm
->arch
.hpte_mod_interest
);
1944 /* make sure kvmppc_do_h_enter etc. see the increment */
1945 synchronize_srcu_expedited(&kvm
->srcu
);
1946 mutex_unlock(&kvm
->slots_lock
);
1952 struct debugfs_htab_state
{
1955 unsigned long hpt_index
;
1961 static int debugfs_htab_open(struct inode
*inode
, struct file
*file
)
1963 struct kvm
*kvm
= inode
->i_private
;
1964 struct debugfs_htab_state
*p
;
1966 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
1972 mutex_init(&p
->mutex
);
1973 file
->private_data
= p
;
1975 return nonseekable_open(inode
, file
);
1978 static int debugfs_htab_release(struct inode
*inode
, struct file
*file
)
1980 struct debugfs_htab_state
*p
= file
->private_data
;
1982 kvm_put_kvm(p
->kvm
);
1987 static ssize_t
debugfs_htab_read(struct file
*file
, char __user
*buf
,
1988 size_t len
, loff_t
*ppos
)
1990 struct debugfs_htab_state
*p
= file
->private_data
;
1993 unsigned long v
, hr
, gr
;
1997 ret
= mutex_lock_interruptible(&p
->mutex
);
2001 if (p
->chars_left
) {
2005 r
= copy_to_user(buf
, p
->buf
+ p
->buf_index
, n
);
2021 hptp
= (__be64
*)(kvm
->arch
.hpt
.virt
+ (i
* HPTE_SIZE
));
2022 for (; len
!= 0 && i
< kvmppc_hpt_npte(&kvm
->arch
.hpt
);
2024 if (!(be64_to_cpu(hptp
[0]) & (HPTE_V_VALID
| HPTE_V_ABSENT
)))
2027 /* lock the HPTE so it's stable and read it */
2029 while (!try_lock_hpte(hptp
, HPTE_V_HVLOCK
))
2031 v
= be64_to_cpu(hptp
[0]) & ~HPTE_V_HVLOCK
;
2032 hr
= be64_to_cpu(hptp
[1]);
2033 gr
= kvm
->arch
.hpt
.rev
[i
].guest_rpte
;
2034 unlock_hpte(hptp
, v
);
2037 if (!(v
& (HPTE_V_VALID
| HPTE_V_ABSENT
)))
2040 n
= scnprintf(p
->buf
, sizeof(p
->buf
),
2041 "%6lx %.16lx %.16lx %.16lx\n",
2046 r
= copy_to_user(buf
, p
->buf
, n
);
2062 mutex_unlock(&p
->mutex
);
2066 static ssize_t
debugfs_htab_write(struct file
*file
, const char __user
*buf
,
2067 size_t len
, loff_t
*ppos
)
2072 static const struct file_operations debugfs_htab_fops
= {
2073 .owner
= THIS_MODULE
,
2074 .open
= debugfs_htab_open
,
2075 .release
= debugfs_htab_release
,
2076 .read
= debugfs_htab_read
,
2077 .write
= debugfs_htab_write
,
2078 .llseek
= generic_file_llseek
,
2081 void kvmppc_mmu_debugfs_init(struct kvm
*kvm
)
2083 kvm
->arch
.htab_dentry
= debugfs_create_file("htab", 0400,
2084 kvm
->arch
.debugfs_dir
, kvm
,
2085 &debugfs_htab_fops
);
2088 void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu
*vcpu
)
2090 struct kvmppc_mmu
*mmu
= &vcpu
->arch
.mmu
;
2092 vcpu
->arch
.slb_nr
= 32; /* POWER7/POWER8 */
2094 if (kvm_is_radix(vcpu
->kvm
))
2095 mmu
->xlate
= kvmppc_mmu_radix_xlate
;
2097 mmu
->xlate
= kvmppc_mmu_book3s_64_hv_xlate
;
2098 mmu
->reset_msr
= kvmppc_mmu_book3s_64_hv_reset_msr
;
2100 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_SLB
;