]>
Commit | Line | Data |
---|---|---|
de56a948 PM |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
16 | */ | |
17 | ||
18 | #include <linux/types.h> | |
19 | #include <linux/string.h> | |
20 | #include <linux/kvm.h> | |
21 | #include <linux/kvm_host.h> | |
22 | #include <linux/highmem.h> | |
23 | #include <linux/gfp.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/hugetlb.h> | |
8936dda4 | 26 | #include <linux/vmalloc.h> |
2c9097e4 | 27 | #include <linux/srcu.h> |
de56a948 PM |
28 | |
29 | #include <asm/tlbflush.h> | |
30 | #include <asm/kvm_ppc.h> | |
31 | #include <asm/kvm_book3s.h> | |
32 | #include <asm/mmu-hash64.h> | |
33 | #include <asm/hvcall.h> | |
34 | #include <asm/synch.h> | |
35 | #include <asm/ppc-opcode.h> | |
36 | #include <asm/cputable.h> | |
37 | ||
9e368f29 PM |
38 | /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ |
39 | #define MAX_LPID_970 63 | |
de56a948 | 40 | |
32fad281 PM |
41 | /* Power architecture requires HPT is at least 256kB */ |
42 | #define PPC_MIN_HPT_ORDER 18 | |
43 | ||
44 | long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) | |
de56a948 PM |
45 | { |
46 | unsigned long hpt; | |
8936dda4 | 47 | struct revmap_entry *rev; |
d2a1b483 | 48 | struct kvmppc_linear_info *li; |
32fad281 | 49 | long order = kvm_hpt_order; |
de56a948 | 50 | |
32fad281 PM |
51 | if (htab_orderp) { |
52 | order = *htab_orderp; | |
53 | if (order < PPC_MIN_HPT_ORDER) | |
54 | order = PPC_MIN_HPT_ORDER; | |
55 | } | |
56 | ||
57 | /* | |
58 | * If the user wants a different size from default, | |
59 | * try first to allocate it from the kernel page allocator. | |
60 | */ | |
61 | hpt = 0; | |
62 | if (order != kvm_hpt_order) { | |
d2a1b483 | 63 | hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT| |
32fad281 PM |
64 | __GFP_NOWARN, order - PAGE_SHIFT); |
65 | if (!hpt) | |
66 | --order; | |
d2a1b483 AG |
67 | } |
68 | ||
32fad281 | 69 | /* Next try to allocate from the preallocated pool */ |
de56a948 | 70 | if (!hpt) { |
32fad281 PM |
71 | li = kvm_alloc_hpt(); |
72 | if (li) { | |
73 | hpt = (ulong)li->base_virt; | |
74 | kvm->arch.hpt_li = li; | |
75 | order = kvm_hpt_order; | |
76 | } | |
de56a948 | 77 | } |
32fad281 PM |
78 | |
79 | /* Lastly try successively smaller sizes from the page allocator */ | |
80 | while (!hpt && order > PPC_MIN_HPT_ORDER) { | |
81 | hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT| | |
82 | __GFP_NOWARN, order - PAGE_SHIFT); | |
83 | if (!hpt) | |
84 | --order; | |
85 | } | |
86 | ||
87 | if (!hpt) | |
88 | return -ENOMEM; | |
89 | ||
de56a948 | 90 | kvm->arch.hpt_virt = hpt; |
32fad281 PM |
91 | kvm->arch.hpt_order = order; |
92 | /* HPTEs are 2**4 bytes long */ | |
93 | kvm->arch.hpt_npte = 1ul << (order - 4); | |
94 | /* 128 (2**7) bytes in each HPTEG */ | |
95 | kvm->arch.hpt_mask = (1ul << (order - 7)) - 1; | |
de56a948 | 96 | |
8936dda4 | 97 | /* Allocate reverse map array */ |
32fad281 | 98 | rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte); |
8936dda4 PM |
99 | if (!rev) { |
100 | pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n"); | |
101 | goto out_freehpt; | |
102 | } | |
103 | kvm->arch.revmap = rev; | |
32fad281 | 104 | kvm->arch.sdr1 = __pa(hpt) | (order - 18); |
8936dda4 | 105 | |
32fad281 PM |
106 | pr_info("KVM guest htab at %lx (order %ld), LPID %x\n", |
107 | hpt, order, kvm->arch.lpid); | |
de56a948 | 108 | |
32fad281 PM |
109 | if (htab_orderp) |
110 | *htab_orderp = order; | |
de56a948 | 111 | return 0; |
8936dda4 | 112 | |
8936dda4 | 113 | out_freehpt: |
32fad281 PM |
114 | if (kvm->arch.hpt_li) |
115 | kvm_release_hpt(kvm->arch.hpt_li); | |
116 | else | |
117 | free_pages(hpt, order - PAGE_SHIFT); | |
8936dda4 | 118 | return -ENOMEM; |
de56a948 PM |
119 | } |
120 | ||
32fad281 PM |
121 | long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp) |
122 | { | |
123 | long err = -EBUSY; | |
124 | long order; | |
125 | ||
126 | mutex_lock(&kvm->lock); | |
127 | if (kvm->arch.rma_setup_done) { | |
128 | kvm->arch.rma_setup_done = 0; | |
129 | /* order rma_setup_done vs. vcpus_running */ | |
130 | smp_mb(); | |
131 | if (atomic_read(&kvm->arch.vcpus_running)) { | |
132 | kvm->arch.rma_setup_done = 1; | |
133 | goto out; | |
134 | } | |
135 | } | |
136 | if (kvm->arch.hpt_virt) { | |
137 | order = kvm->arch.hpt_order; | |
138 | /* Set the entire HPT to 0, i.e. invalid HPTEs */ | |
139 | memset((void *)kvm->arch.hpt_virt, 0, 1ul << order); | |
140 | /* | |
141 | * Set the whole last_vcpu array to an invalid vcpu number. | |
142 | * This ensures that each vcpu will flush its TLB on next entry. | |
143 | */ | |
144 | memset(kvm->arch.last_vcpu, 0xff, sizeof(kvm->arch.last_vcpu)); | |
145 | *htab_orderp = order; | |
146 | err = 0; | |
147 | } else { | |
148 | err = kvmppc_alloc_hpt(kvm, htab_orderp); | |
149 | order = *htab_orderp; | |
150 | } | |
151 | out: | |
152 | mutex_unlock(&kvm->lock); | |
153 | return err; | |
154 | } | |
155 | ||
de56a948 PM |
156 | void kvmppc_free_hpt(struct kvm *kvm) |
157 | { | |
043cc4d7 | 158 | kvmppc_free_lpid(kvm->arch.lpid); |
8936dda4 | 159 | vfree(kvm->arch.revmap); |
d2a1b483 AG |
160 | if (kvm->arch.hpt_li) |
161 | kvm_release_hpt(kvm->arch.hpt_li); | |
162 | else | |
32fad281 PM |
163 | free_pages(kvm->arch.hpt_virt, |
164 | kvm->arch.hpt_order - PAGE_SHIFT); | |
de56a948 PM |
165 | } |
166 | ||
da9d1d7f PM |
167 | /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */ |
168 | static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize) | |
169 | { | |
170 | return (pgsize > 0x1000) ? HPTE_V_LARGE : 0; | |
171 | } | |
172 | ||
173 | /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */ | |
174 | static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize) | |
175 | { | |
176 | return (pgsize == 0x10000) ? 0x1000 : 0; | |
177 | } | |
178 | ||
179 | void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, | |
180 | unsigned long porder) | |
de56a948 PM |
181 | { |
182 | unsigned long i; | |
b2b2f165 | 183 | unsigned long npages; |
c77162de PM |
184 | unsigned long hp_v, hp_r; |
185 | unsigned long addr, hash; | |
da9d1d7f PM |
186 | unsigned long psize; |
187 | unsigned long hp0, hp1; | |
c77162de | 188 | long ret; |
32fad281 | 189 | struct kvm *kvm = vcpu->kvm; |
de56a948 | 190 | |
da9d1d7f PM |
191 | psize = 1ul << porder; |
192 | npages = memslot->npages >> (porder - PAGE_SHIFT); | |
de56a948 PM |
193 | |
194 | /* VRMA can't be > 1TB */ | |
8936dda4 PM |
195 | if (npages > 1ul << (40 - porder)) |
196 | npages = 1ul << (40 - porder); | |
de56a948 | 197 | /* Can't use more than 1 HPTE per HPTEG */ |
32fad281 PM |
198 | if (npages > kvm->arch.hpt_mask + 1) |
199 | npages = kvm->arch.hpt_mask + 1; | |
de56a948 | 200 | |
da9d1d7f PM |
201 | hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) | |
202 | HPTE_V_BOLTED | hpte0_pgsize_encoding(psize); | |
203 | hp1 = hpte1_pgsize_encoding(psize) | | |
204 | HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX; | |
205 | ||
de56a948 | 206 | for (i = 0; i < npages; ++i) { |
c77162de | 207 | addr = i << porder; |
de56a948 | 208 | /* can't use hpt_hash since va > 64 bits */ |
32fad281 | 209 | hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt_mask; |
de56a948 PM |
210 | /* |
211 | * We assume that the hash table is empty and no | |
212 | * vcpus are using it at this stage. Since we create | |
213 | * at most one HPTE per HPTEG, we just assume entry 7 | |
214 | * is available and use it. | |
215 | */ | |
8936dda4 | 216 | hash = (hash << 3) + 7; |
da9d1d7f PM |
217 | hp_v = hp0 | ((addr >> 16) & ~0x7fUL); |
218 | hp_r = hp1 | addr; | |
c77162de PM |
219 | ret = kvmppc_virtmode_h_enter(vcpu, H_EXACT, hash, hp_v, hp_r); |
220 | if (ret != H_SUCCESS) { | |
221 | pr_err("KVM: map_vrma at %lx failed, ret=%ld\n", | |
222 | addr, ret); | |
223 | break; | |
224 | } | |
de56a948 PM |
225 | } |
226 | } | |
227 | ||
228 | int kvmppc_mmu_hv_init(void) | |
229 | { | |
9e368f29 PM |
230 | unsigned long host_lpid, rsvd_lpid; |
231 | ||
232 | if (!cpu_has_feature(CPU_FTR_HVMODE)) | |
de56a948 | 233 | return -EINVAL; |
9e368f29 | 234 | |
043cc4d7 | 235 | /* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */ |
9e368f29 PM |
236 | if (cpu_has_feature(CPU_FTR_ARCH_206)) { |
237 | host_lpid = mfspr(SPRN_LPID); /* POWER7 */ | |
238 | rsvd_lpid = LPID_RSVD; | |
239 | } else { | |
240 | host_lpid = 0; /* PPC970 */ | |
241 | rsvd_lpid = MAX_LPID_970; | |
242 | } | |
243 | ||
043cc4d7 SW |
244 | kvmppc_init_lpid(rsvd_lpid + 1); |
245 | ||
246 | kvmppc_claim_lpid(host_lpid); | |
9e368f29 | 247 | /* rsvd_lpid is reserved for use in partition switching */ |
043cc4d7 | 248 | kvmppc_claim_lpid(rsvd_lpid); |
de56a948 PM |
249 | |
250 | return 0; | |
251 | } | |
252 | ||
253 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | |
254 | { | |
255 | } | |
256 | ||
257 | static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) | |
258 | { | |
259 | kvmppc_set_msr(vcpu, MSR_SF | MSR_ME); | |
260 | } | |
261 | ||
c77162de PM |
262 | /* |
263 | * This is called to get a reference to a guest page if there isn't | |
a66b48c3 | 264 | * one already in the memslot->arch.slot_phys[] array. |
c77162de PM |
265 | */ |
266 | static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, | |
da9d1d7f PM |
267 | struct kvm_memory_slot *memslot, |
268 | unsigned long psize) | |
c77162de PM |
269 | { |
270 | unsigned long start; | |
da9d1d7f PM |
271 | long np, err; |
272 | struct page *page, *hpage, *pages[1]; | |
273 | unsigned long s, pgsize; | |
c77162de | 274 | unsigned long *physp; |
9d0ef5ea PM |
275 | unsigned int is_io, got, pgorder; |
276 | struct vm_area_struct *vma; | |
da9d1d7f | 277 | unsigned long pfn, i, npages; |
c77162de | 278 | |
a66b48c3 | 279 | physp = memslot->arch.slot_phys; |
c77162de PM |
280 | if (!physp) |
281 | return -EINVAL; | |
da9d1d7f | 282 | if (physp[gfn - memslot->base_gfn]) |
c77162de PM |
283 | return 0; |
284 | ||
9d0ef5ea PM |
285 | is_io = 0; |
286 | got = 0; | |
c77162de | 287 | page = NULL; |
da9d1d7f | 288 | pgsize = psize; |
9d0ef5ea | 289 | err = -EINVAL; |
c77162de PM |
290 | start = gfn_to_hva_memslot(memslot, gfn); |
291 | ||
292 | /* Instantiate and get the page we want access to */ | |
293 | np = get_user_pages_fast(start, 1, 1, pages); | |
9d0ef5ea PM |
294 | if (np != 1) { |
295 | /* Look up the vma for the page */ | |
296 | down_read(¤t->mm->mmap_sem); | |
297 | vma = find_vma(current->mm, start); | |
298 | if (!vma || vma->vm_start > start || | |
299 | start + psize > vma->vm_end || | |
300 | !(vma->vm_flags & VM_PFNMAP)) | |
301 | goto up_err; | |
302 | is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot)); | |
303 | pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); | |
304 | /* check alignment of pfn vs. requested page size */ | |
305 | if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1))) | |
306 | goto up_err; | |
307 | up_read(¤t->mm->mmap_sem); | |
308 | ||
309 | } else { | |
310 | page = pages[0]; | |
311 | got = KVMPPC_GOT_PAGE; | |
312 | ||
313 | /* See if this is a large page */ | |
314 | s = PAGE_SIZE; | |
315 | if (PageHuge(page)) { | |
316 | hpage = compound_head(page); | |
317 | s <<= compound_order(hpage); | |
318 | /* Get the whole large page if slot alignment is ok */ | |
319 | if (s > psize && slot_is_aligned(memslot, s) && | |
320 | !(memslot->userspace_addr & (s - 1))) { | |
321 | start &= ~(s - 1); | |
322 | pgsize = s; | |
de6c0b02 DG |
323 | get_page(hpage); |
324 | put_page(page); | |
9d0ef5ea PM |
325 | page = hpage; |
326 | } | |
da9d1d7f | 327 | } |
9d0ef5ea PM |
328 | if (s < psize) |
329 | goto out; | |
330 | pfn = page_to_pfn(page); | |
c77162de | 331 | } |
c77162de | 332 | |
da9d1d7f PM |
333 | npages = pgsize >> PAGE_SHIFT; |
334 | pgorder = __ilog2(npages); | |
335 | physp += (gfn - memslot->base_gfn) & ~(npages - 1); | |
c77162de | 336 | spin_lock(&kvm->arch.slot_phys_lock); |
da9d1d7f PM |
337 | for (i = 0; i < npages; ++i) { |
338 | if (!physp[i]) { | |
9d0ef5ea PM |
339 | physp[i] = ((pfn + i) << PAGE_SHIFT) + |
340 | got + is_io + pgorder; | |
da9d1d7f PM |
341 | got = 0; |
342 | } | |
343 | } | |
c77162de | 344 | spin_unlock(&kvm->arch.slot_phys_lock); |
da9d1d7f | 345 | err = 0; |
c77162de | 346 | |
da9d1d7f | 347 | out: |
de6c0b02 | 348 | if (got) |
da9d1d7f | 349 | put_page(page); |
da9d1d7f | 350 | return err; |
9d0ef5ea PM |
351 | |
352 | up_err: | |
353 | up_read(¤t->mm->mmap_sem); | |
354 | return err; | |
c77162de PM |
355 | } |
356 | ||
357 | /* | |
342d3db7 PM |
358 | * We come here on a H_ENTER call from the guest when we are not |
359 | * using mmu notifiers and we don't have the requested page pinned | |
360 | * already. | |
c77162de PM |
361 | */ |
362 | long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, | |
363 | long pte_index, unsigned long pteh, unsigned long ptel) | |
364 | { | |
365 | struct kvm *kvm = vcpu->kvm; | |
366 | unsigned long psize, gpa, gfn; | |
367 | struct kvm_memory_slot *memslot; | |
368 | long ret; | |
369 | ||
342d3db7 PM |
370 | if (kvm->arch.using_mmu_notifiers) |
371 | goto do_insert; | |
372 | ||
c77162de PM |
373 | psize = hpte_page_size(pteh, ptel); |
374 | if (!psize) | |
375 | return H_PARAMETER; | |
376 | ||
697d3899 PM |
377 | pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID); |
378 | ||
c77162de PM |
379 | /* Find the memslot (if any) for this address */ |
380 | gpa = (ptel & HPTE_R_RPN) & ~(psize - 1); | |
381 | gfn = gpa >> PAGE_SHIFT; | |
382 | memslot = gfn_to_memslot(kvm, gfn); | |
697d3899 PM |
383 | if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) { |
384 | if (!slot_is_aligned(memslot, psize)) | |
385 | return H_PARAMETER; | |
386 | if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0) | |
387 | return H_PARAMETER; | |
388 | } | |
c77162de | 389 | |
342d3db7 PM |
390 | do_insert: |
391 | /* Protect linux PTE lookup from page table destruction */ | |
392 | rcu_read_lock_sched(); /* this disables preemption too */ | |
393 | vcpu->arch.pgdir = current->mm->pgd; | |
c77162de | 394 | ret = kvmppc_h_enter(vcpu, flags, pte_index, pteh, ptel); |
342d3db7 | 395 | rcu_read_unlock_sched(); |
c77162de PM |
396 | if (ret == H_TOO_HARD) { |
397 | /* this can't happen */ | |
398 | pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n"); | |
399 | ret = H_RESOURCE; /* or something */ | |
400 | } | |
401 | return ret; | |
402 | ||
403 | } | |
404 | ||
697d3899 PM |
405 | static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu, |
406 | gva_t eaddr) | |
407 | { | |
408 | u64 mask; | |
409 | int i; | |
410 | ||
411 | for (i = 0; i < vcpu->arch.slb_nr; i++) { | |
412 | if (!(vcpu->arch.slb[i].orige & SLB_ESID_V)) | |
413 | continue; | |
414 | ||
415 | if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T) | |
416 | mask = ESID_MASK_1T; | |
417 | else | |
418 | mask = ESID_MASK; | |
419 | ||
420 | if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0) | |
421 | return &vcpu->arch.slb[i]; | |
422 | } | |
423 | return NULL; | |
424 | } | |
425 | ||
426 | static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r, | |
427 | unsigned long ea) | |
428 | { | |
429 | unsigned long ra_mask; | |
430 | ||
431 | ra_mask = hpte_page_size(v, r) - 1; | |
432 | return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask); | |
433 | } | |
434 | ||
de56a948 | 435 | static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, |
697d3899 | 436 | struct kvmppc_pte *gpte, bool data) |
de56a948 | 437 | { |
697d3899 PM |
438 | struct kvm *kvm = vcpu->kvm; |
439 | struct kvmppc_slb *slbe; | |
440 | unsigned long slb_v; | |
441 | unsigned long pp, key; | |
442 | unsigned long v, gr; | |
443 | unsigned long *hptep; | |
444 | int index; | |
445 | int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); | |
446 | ||
447 | /* Get SLB entry */ | |
448 | if (virtmode) { | |
449 | slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr); | |
450 | if (!slbe) | |
451 | return -EINVAL; | |
452 | slb_v = slbe->origv; | |
453 | } else { | |
454 | /* real mode access */ | |
455 | slb_v = vcpu->kvm->arch.vrma_slb_v; | |
456 | } | |
457 | ||
458 | /* Find the HPTE in the hash table */ | |
459 | index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, | |
460 | HPTE_V_VALID | HPTE_V_ABSENT); | |
461 | if (index < 0) | |
462 | return -ENOENT; | |
463 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); | |
464 | v = hptep[0] & ~HPTE_V_HVLOCK; | |
465 | gr = kvm->arch.revmap[index].guest_rpte; | |
466 | ||
467 | /* Unlock the HPTE */ | |
468 | asm volatile("lwsync" : : : "memory"); | |
469 | hptep[0] = v; | |
470 | ||
471 | gpte->eaddr = eaddr; | |
472 | gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); | |
473 | ||
474 | /* Get PP bits and key for permission check */ | |
475 | pp = gr & (HPTE_R_PP0 | HPTE_R_PP); | |
476 | key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; | |
477 | key &= slb_v; | |
478 | ||
479 | /* Calculate permissions */ | |
480 | gpte->may_read = hpte_read_permission(pp, key); | |
481 | gpte->may_write = hpte_write_permission(pp, key); | |
482 | gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G)); | |
483 | ||
484 | /* Storage key permission check for POWER7 */ | |
485 | if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) { | |
486 | int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr); | |
487 | if (amrfield & 1) | |
488 | gpte->may_read = 0; | |
489 | if (amrfield & 2) | |
490 | gpte->may_write = 0; | |
491 | } | |
492 | ||
493 | /* Get the guest physical address */ | |
494 | gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr); | |
495 | return 0; | |
496 | } | |
497 | ||
498 | /* | |
499 | * Quick test for whether an instruction is a load or a store. | |
500 | * If the instruction is a load or a store, then this will indicate | |
501 | * which it is, at least on server processors. (Embedded processors | |
502 | * have some external PID instructions that don't follow the rule | |
503 | * embodied here.) If the instruction isn't a load or store, then | |
504 | * this doesn't return anything useful. | |
505 | */ | |
506 | static int instruction_is_store(unsigned int instr) | |
507 | { | |
508 | unsigned int mask; | |
509 | ||
510 | mask = 0x10000000; | |
511 | if ((instr & 0xfc000000) == 0x7c000000) | |
512 | mask = 0x100; /* major opcode 31 */ | |
513 | return (instr & mask) != 0; | |
514 | } | |
515 | ||
516 | static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
6020c0f6 | 517 | unsigned long gpa, gva_t ea, int is_store) |
697d3899 PM |
518 | { |
519 | int ret; | |
520 | u32 last_inst; | |
521 | unsigned long srr0 = kvmppc_get_pc(vcpu); | |
522 | ||
523 | /* We try to load the last instruction. We don't let | |
524 | * emulate_instruction do it as it doesn't check what | |
525 | * kvmppc_ld returns. | |
526 | * If we fail, we just return to the guest and try executing it again. | |
527 | */ | |
528 | if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) { | |
529 | ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); | |
530 | if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED) | |
531 | return RESUME_GUEST; | |
532 | vcpu->arch.last_inst = last_inst; | |
533 | } | |
534 | ||
535 | /* | |
536 | * WARNING: We do not know for sure whether the instruction we just | |
537 | * read from memory is the same that caused the fault in the first | |
538 | * place. If the instruction we read is neither an load or a store, | |
539 | * then it can't access memory, so we don't need to worry about | |
540 | * enforcing access permissions. So, assuming it is a load or | |
541 | * store, we just check that its direction (load or store) is | |
542 | * consistent with the original fault, since that's what we | |
543 | * checked the access permissions against. If there is a mismatch | |
544 | * we just return and retry the instruction. | |
545 | */ | |
546 | ||
547 | if (instruction_is_store(vcpu->arch.last_inst) != !!is_store) | |
548 | return RESUME_GUEST; | |
549 | ||
550 | /* | |
551 | * Emulated accesses are emulated by looking at the hash for | |
552 | * translation once, then performing the access later. The | |
553 | * translation could be invalidated in the meantime in which | |
554 | * point performing the subsequent memory access on the old | |
555 | * physical address could possibly be a security hole for the | |
556 | * guest (but not the host). | |
557 | * | |
558 | * This is less of an issue for MMIO stores since they aren't | |
559 | * globally visible. It could be an issue for MMIO loads to | |
560 | * a certain extent but we'll ignore it for now. | |
561 | */ | |
562 | ||
563 | vcpu->arch.paddr_accessed = gpa; | |
6020c0f6 | 564 | vcpu->arch.vaddr_accessed = ea; |
697d3899 PM |
565 | return kvmppc_emulate_mmio(run, vcpu); |
566 | } | |
567 | ||
568 | int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
569 | unsigned long ea, unsigned long dsisr) | |
570 | { | |
571 | struct kvm *kvm = vcpu->kvm; | |
342d3db7 PM |
572 | unsigned long *hptep, hpte[3], r; |
573 | unsigned long mmu_seq, psize, pte_size; | |
70bddfef | 574 | unsigned long gpa, gfn, hva, pfn; |
697d3899 | 575 | struct kvm_memory_slot *memslot; |
342d3db7 | 576 | unsigned long *rmap; |
697d3899 | 577 | struct revmap_entry *rev; |
342d3db7 PM |
578 | struct page *page, *pages[1]; |
579 | long index, ret, npages; | |
580 | unsigned long is_io; | |
4cf302bc | 581 | unsigned int writing, write_ok; |
342d3db7 | 582 | struct vm_area_struct *vma; |
bad3b507 | 583 | unsigned long rcbits; |
697d3899 PM |
584 | |
585 | /* | |
586 | * Real-mode code has already searched the HPT and found the | |
587 | * entry we're interested in. Lock the entry and check that | |
588 | * it hasn't changed. If it has, just return and re-execute the | |
589 | * instruction. | |
590 | */ | |
591 | if (ea != vcpu->arch.pgfault_addr) | |
592 | return RESUME_GUEST; | |
593 | index = vcpu->arch.pgfault_index; | |
594 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); | |
595 | rev = &kvm->arch.revmap[index]; | |
596 | preempt_disable(); | |
597 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) | |
598 | cpu_relax(); | |
599 | hpte[0] = hptep[0] & ~HPTE_V_HVLOCK; | |
600 | hpte[1] = hptep[1]; | |
342d3db7 | 601 | hpte[2] = r = rev->guest_rpte; |
697d3899 PM |
602 | asm volatile("lwsync" : : : "memory"); |
603 | hptep[0] = hpte[0]; | |
604 | preempt_enable(); | |
605 | ||
606 | if (hpte[0] != vcpu->arch.pgfault_hpte[0] || | |
607 | hpte[1] != vcpu->arch.pgfault_hpte[1]) | |
608 | return RESUME_GUEST; | |
609 | ||
610 | /* Translate the logical address and get the page */ | |
342d3db7 | 611 | psize = hpte_page_size(hpte[0], r); |
70bddfef PM |
612 | gpa = (r & HPTE_R_RPN & ~(psize - 1)) | (ea & (psize - 1)); |
613 | gfn = gpa >> PAGE_SHIFT; | |
697d3899 PM |
614 | memslot = gfn_to_memslot(kvm, gfn); |
615 | ||
616 | /* No memslot means it's an emulated MMIO region */ | |
70bddfef | 617 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) |
6020c0f6 | 618 | return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, |
697d3899 | 619 | dsisr & DSISR_ISSTORE); |
697d3899 | 620 | |
342d3db7 PM |
621 | if (!kvm->arch.using_mmu_notifiers) |
622 | return -EFAULT; /* should never get here */ | |
623 | ||
624 | /* used to check for invalidations in progress */ | |
625 | mmu_seq = kvm->mmu_notifier_seq; | |
626 | smp_rmb(); | |
627 | ||
628 | is_io = 0; | |
629 | pfn = 0; | |
630 | page = NULL; | |
631 | pte_size = PAGE_SIZE; | |
4cf302bc PM |
632 | writing = (dsisr & DSISR_ISSTORE) != 0; |
633 | /* If writing != 0, then the HPTE must allow writing, if we get here */ | |
634 | write_ok = writing; | |
342d3db7 | 635 | hva = gfn_to_hva_memslot(memslot, gfn); |
4cf302bc | 636 | npages = get_user_pages_fast(hva, 1, writing, pages); |
342d3db7 PM |
637 | if (npages < 1) { |
638 | /* Check if it's an I/O mapping */ | |
639 | down_read(¤t->mm->mmap_sem); | |
640 | vma = find_vma(current->mm, hva); | |
641 | if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end && | |
642 | (vma->vm_flags & VM_PFNMAP)) { | |
643 | pfn = vma->vm_pgoff + | |
644 | ((hva - vma->vm_start) >> PAGE_SHIFT); | |
645 | pte_size = psize; | |
646 | is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot)); | |
4cf302bc | 647 | write_ok = vma->vm_flags & VM_WRITE; |
342d3db7 PM |
648 | } |
649 | up_read(¤t->mm->mmap_sem); | |
650 | if (!pfn) | |
651 | return -EFAULT; | |
652 | } else { | |
653 | page = pages[0]; | |
654 | if (PageHuge(page)) { | |
655 | page = compound_head(page); | |
656 | pte_size <<= compound_order(page); | |
657 | } | |
4cf302bc PM |
658 | /* if the guest wants write access, see if that is OK */ |
659 | if (!writing && hpte_is_writable(r)) { | |
660 | pte_t *ptep, pte; | |
661 | ||
662 | /* | |
663 | * We need to protect against page table destruction | |
664 | * while looking up and updating the pte. | |
665 | */ | |
666 | rcu_read_lock_sched(); | |
667 | ptep = find_linux_pte_or_hugepte(current->mm->pgd, | |
668 | hva, NULL); | |
669 | if (ptep && pte_present(*ptep)) { | |
670 | pte = kvmppc_read_update_linux_pte(ptep, 1); | |
671 | if (pte_write(pte)) | |
672 | write_ok = 1; | |
673 | } | |
674 | rcu_read_unlock_sched(); | |
675 | } | |
342d3db7 PM |
676 | pfn = page_to_pfn(page); |
677 | } | |
678 | ||
679 | ret = -EFAULT; | |
680 | if (psize > pte_size) | |
681 | goto out_put; | |
682 | ||
683 | /* Check WIMG vs. the actual page we're accessing */ | |
684 | if (!hpte_cache_flags_ok(r, is_io)) { | |
685 | if (is_io) | |
686 | return -EFAULT; | |
687 | /* | |
688 | * Allow guest to map emulated device memory as | |
689 | * uncacheable, but actually make it cacheable. | |
690 | */ | |
691 | r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; | |
692 | } | |
693 | ||
694 | /* Set the HPTE to point to pfn */ | |
695 | r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT); | |
4cf302bc PM |
696 | if (hpte_is_writable(r) && !write_ok) |
697 | r = hpte_make_readonly(r); | |
342d3db7 PM |
698 | ret = RESUME_GUEST; |
699 | preempt_disable(); | |
700 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) | |
701 | cpu_relax(); | |
702 | if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] || | |
703 | rev->guest_rpte != hpte[2]) | |
704 | /* HPTE has been changed under us; let the guest retry */ | |
705 | goto out_unlock; | |
706 | hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; | |
707 | ||
d89cc617 | 708 | rmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; |
342d3db7 PM |
709 | lock_rmap(rmap); |
710 | ||
711 | /* Check if we might have been invalidated; let the guest retry if so */ | |
712 | ret = RESUME_GUEST; | |
713 | if (mmu_notifier_retry(vcpu, mmu_seq)) { | |
714 | unlock_rmap(rmap); | |
715 | goto out_unlock; | |
716 | } | |
4cf302bc | 717 | |
bad3b507 PM |
718 | /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */ |
719 | rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; | |
720 | r &= rcbits | ~(HPTE_R_R | HPTE_R_C); | |
721 | ||
4cf302bc PM |
722 | if (hptep[0] & HPTE_V_VALID) { |
723 | /* HPTE was previously valid, so we need to invalidate it */ | |
724 | unlock_rmap(rmap); | |
725 | hptep[0] |= HPTE_V_ABSENT; | |
726 | kvmppc_invalidate_hpte(kvm, hptep, index); | |
bad3b507 PM |
727 | /* don't lose previous R and C bits */ |
728 | r |= hptep[1] & (HPTE_R_R | HPTE_R_C); | |
4cf302bc PM |
729 | } else { |
730 | kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0); | |
731 | } | |
342d3db7 PM |
732 | |
733 | hptep[1] = r; | |
734 | eieio(); | |
735 | hptep[0] = hpte[0]; | |
736 | asm volatile("ptesync" : : : "memory"); | |
737 | preempt_enable(); | |
4cf302bc | 738 | if (page && hpte_is_writable(r)) |
342d3db7 PM |
739 | SetPageDirty(page); |
740 | ||
741 | out_put: | |
de6c0b02 DG |
742 | if (page) { |
743 | /* | |
744 | * We drop pages[0] here, not page because page might | |
745 | * have been set to the head page of a compound, but | |
746 | * we have to drop the reference on the correct tail | |
747 | * page to match the get inside gup() | |
748 | */ | |
749 | put_page(pages[0]); | |
750 | } | |
342d3db7 PM |
751 | return ret; |
752 | ||
753 | out_unlock: | |
754 | hptep[0] &= ~HPTE_V_HVLOCK; | |
755 | preempt_enable(); | |
756 | goto out_put; | |
757 | } | |
758 | ||
84504ef3 TY |
759 | static int kvm_handle_hva_range(struct kvm *kvm, |
760 | unsigned long start, | |
761 | unsigned long end, | |
762 | int (*handler)(struct kvm *kvm, | |
763 | unsigned long *rmapp, | |
764 | unsigned long gfn)) | |
342d3db7 PM |
765 | { |
766 | int ret; | |
767 | int retval = 0; | |
768 | struct kvm_memslots *slots; | |
769 | struct kvm_memory_slot *memslot; | |
770 | ||
771 | slots = kvm_memslots(kvm); | |
772 | kvm_for_each_memslot(memslot, slots) { | |
84504ef3 TY |
773 | unsigned long hva_start, hva_end; |
774 | gfn_t gfn, gfn_end; | |
775 | ||
776 | hva_start = max(start, memslot->userspace_addr); | |
777 | hva_end = min(end, memslot->userspace_addr + | |
778 | (memslot->npages << PAGE_SHIFT)); | |
779 | if (hva_start >= hva_end) | |
780 | continue; | |
781 | /* | |
782 | * {gfn(page) | page intersects with [hva_start, hva_end)} = | |
783 | * {gfn, gfn+1, ..., gfn_end-1}. | |
784 | */ | |
785 | gfn = hva_to_gfn_memslot(hva_start, memslot); | |
786 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); | |
342d3db7 | 787 | |
84504ef3 | 788 | for (; gfn < gfn_end; ++gfn) { |
d19a748b | 789 | gfn_t gfn_offset = gfn - memslot->base_gfn; |
342d3db7 | 790 | |
d89cc617 | 791 | ret = handler(kvm, &memslot->arch.rmap[gfn_offset], gfn); |
342d3db7 PM |
792 | retval |= ret; |
793 | } | |
794 | } | |
795 | ||
796 | return retval; | |
797 | } | |
798 | ||
84504ef3 TY |
799 | static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, |
800 | int (*handler)(struct kvm *kvm, unsigned long *rmapp, | |
801 | unsigned long gfn)) | |
802 | { | |
803 | return kvm_handle_hva_range(kvm, hva, hva + 1, handler); | |
804 | } | |
805 | ||
342d3db7 PM |
806 | static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, |
807 | unsigned long gfn) | |
808 | { | |
809 | struct revmap_entry *rev = kvm->arch.revmap; | |
810 | unsigned long h, i, j; | |
811 | unsigned long *hptep; | |
bad3b507 | 812 | unsigned long ptel, psize, rcbits; |
342d3db7 PM |
813 | |
814 | for (;;) { | |
bad3b507 | 815 | lock_rmap(rmapp); |
342d3db7 | 816 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { |
bad3b507 | 817 | unlock_rmap(rmapp); |
342d3db7 PM |
818 | break; |
819 | } | |
820 | ||
821 | /* | |
822 | * To avoid an ABBA deadlock with the HPTE lock bit, | |
bad3b507 PM |
823 | * we can't spin on the HPTE lock while holding the |
824 | * rmap chain lock. | |
342d3db7 PM |
825 | */ |
826 | i = *rmapp & KVMPPC_RMAP_INDEX; | |
bad3b507 PM |
827 | hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); |
828 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { | |
829 | /* unlock rmap before spinning on the HPTE lock */ | |
830 | unlock_rmap(rmapp); | |
831 | while (hptep[0] & HPTE_V_HVLOCK) | |
832 | cpu_relax(); | |
833 | continue; | |
834 | } | |
342d3db7 PM |
835 | j = rev[i].forw; |
836 | if (j == i) { | |
837 | /* chain is now empty */ | |
bad3b507 | 838 | *rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX); |
342d3db7 PM |
839 | } else { |
840 | /* remove i from chain */ | |
841 | h = rev[i].back; | |
842 | rev[h].forw = j; | |
843 | rev[j].back = h; | |
844 | rev[i].forw = rev[i].back = i; | |
bad3b507 | 845 | *rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j; |
342d3db7 | 846 | } |
342d3db7 | 847 | |
bad3b507 | 848 | /* Now check and modify the HPTE */ |
342d3db7 PM |
849 | ptel = rev[i].guest_rpte; |
850 | psize = hpte_page_size(hptep[0], ptel); | |
851 | if ((hptep[0] & HPTE_V_VALID) && | |
852 | hpte_rpn(ptel, psize) == gfn) { | |
dfe49dbd PM |
853 | if (kvm->arch.using_mmu_notifiers) |
854 | hptep[0] |= HPTE_V_ABSENT; | |
bad3b507 PM |
855 | kvmppc_invalidate_hpte(kvm, hptep, i); |
856 | /* Harvest R and C */ | |
857 | rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C); | |
858 | *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT; | |
859 | rev[i].guest_rpte = ptel | rcbits; | |
342d3db7 | 860 | } |
bad3b507 | 861 | unlock_rmap(rmapp); |
342d3db7 PM |
862 | hptep[0] &= ~HPTE_V_HVLOCK; |
863 | } | |
864 | return 0; | |
865 | } | |
866 | ||
867 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | |
868 | { | |
869 | if (kvm->arch.using_mmu_notifiers) | |
870 | kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); | |
871 | return 0; | |
872 | } | |
873 | ||
b3ae2096 TY |
874 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) |
875 | { | |
876 | if (kvm->arch.using_mmu_notifiers) | |
877 | kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp); | |
878 | return 0; | |
879 | } | |
880 | ||
dfe49dbd PM |
881 | void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) |
882 | { | |
883 | unsigned long *rmapp; | |
884 | unsigned long gfn; | |
885 | unsigned long n; | |
886 | ||
887 | rmapp = memslot->arch.rmap; | |
888 | gfn = memslot->base_gfn; | |
889 | for (n = memslot->npages; n; --n) { | |
890 | /* | |
891 | * Testing the present bit without locking is OK because | |
892 | * the memslot has been marked invalid already, and hence | |
893 | * no new HPTEs referencing this page can be created, | |
894 | * thus the present bit can't go from 0 to 1. | |
895 | */ | |
896 | if (*rmapp & KVMPPC_RMAP_PRESENT) | |
897 | kvm_unmap_rmapp(kvm, rmapp, gfn); | |
898 | ++rmapp; | |
899 | ++gfn; | |
900 | } | |
901 | } | |
902 | ||
342d3db7 PM |
903 | static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, |
904 | unsigned long gfn) | |
905 | { | |
55514893 PM |
906 | struct revmap_entry *rev = kvm->arch.revmap; |
907 | unsigned long head, i, j; | |
908 | unsigned long *hptep; | |
909 | int ret = 0; | |
910 | ||
911 | retry: | |
912 | lock_rmap(rmapp); | |
913 | if (*rmapp & KVMPPC_RMAP_REFERENCED) { | |
914 | *rmapp &= ~KVMPPC_RMAP_REFERENCED; | |
915 | ret = 1; | |
916 | } | |
917 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { | |
918 | unlock_rmap(rmapp); | |
919 | return ret; | |
920 | } | |
921 | ||
922 | i = head = *rmapp & KVMPPC_RMAP_INDEX; | |
923 | do { | |
924 | hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); | |
925 | j = rev[i].forw; | |
926 | ||
927 | /* If this HPTE isn't referenced, ignore it */ | |
928 | if (!(hptep[1] & HPTE_R_R)) | |
929 | continue; | |
930 | ||
931 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { | |
932 | /* unlock rmap before spinning on the HPTE lock */ | |
933 | unlock_rmap(rmapp); | |
934 | while (hptep[0] & HPTE_V_HVLOCK) | |
935 | cpu_relax(); | |
936 | goto retry; | |
937 | } | |
938 | ||
939 | /* Now check and modify the HPTE */ | |
940 | if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) { | |
941 | kvmppc_clear_ref_hpte(kvm, hptep, i); | |
942 | rev[i].guest_rpte |= HPTE_R_R; | |
943 | ret = 1; | |
944 | } | |
945 | hptep[0] &= ~HPTE_V_HVLOCK; | |
946 | } while ((i = j) != head); | |
947 | ||
948 | unlock_rmap(rmapp); | |
949 | return ret; | |
342d3db7 PM |
950 | } |
951 | ||
952 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) | |
953 | { | |
954 | if (!kvm->arch.using_mmu_notifiers) | |
955 | return 0; | |
956 | return kvm_handle_hva(kvm, hva, kvm_age_rmapp); | |
957 | } | |
958 | ||
959 | static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, | |
960 | unsigned long gfn) | |
961 | { | |
55514893 PM |
962 | struct revmap_entry *rev = kvm->arch.revmap; |
963 | unsigned long head, i, j; | |
964 | unsigned long *hp; | |
965 | int ret = 1; | |
966 | ||
967 | if (*rmapp & KVMPPC_RMAP_REFERENCED) | |
968 | return 1; | |
969 | ||
970 | lock_rmap(rmapp); | |
971 | if (*rmapp & KVMPPC_RMAP_REFERENCED) | |
972 | goto out; | |
973 | ||
974 | if (*rmapp & KVMPPC_RMAP_PRESENT) { | |
975 | i = head = *rmapp & KVMPPC_RMAP_INDEX; | |
976 | do { | |
977 | hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4)); | |
978 | j = rev[i].forw; | |
979 | if (hp[1] & HPTE_R_R) | |
980 | goto out; | |
981 | } while ((i = j) != head); | |
982 | } | |
983 | ret = 0; | |
984 | ||
985 | out: | |
986 | unlock_rmap(rmapp); | |
987 | return ret; | |
342d3db7 PM |
988 | } |
989 | ||
990 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | |
991 | { | |
992 | if (!kvm->arch.using_mmu_notifiers) | |
993 | return 0; | |
994 | return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp); | |
995 | } | |
996 | ||
997 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | |
998 | { | |
999 | if (!kvm->arch.using_mmu_notifiers) | |
1000 | return; | |
1001 | kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); | |
de56a948 PM |
1002 | } |
1003 | ||
82ed3616 PM |
1004 | static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp) |
1005 | { | |
1006 | struct revmap_entry *rev = kvm->arch.revmap; | |
1007 | unsigned long head, i, j; | |
1008 | unsigned long *hptep; | |
1009 | int ret = 0; | |
1010 | ||
1011 | retry: | |
1012 | lock_rmap(rmapp); | |
1013 | if (*rmapp & KVMPPC_RMAP_CHANGED) { | |
1014 | *rmapp &= ~KVMPPC_RMAP_CHANGED; | |
1015 | ret = 1; | |
1016 | } | |
1017 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { | |
1018 | unlock_rmap(rmapp); | |
1019 | return ret; | |
1020 | } | |
1021 | ||
1022 | i = head = *rmapp & KVMPPC_RMAP_INDEX; | |
1023 | do { | |
1024 | hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); | |
1025 | j = rev[i].forw; | |
1026 | ||
1027 | if (!(hptep[1] & HPTE_R_C)) | |
1028 | continue; | |
1029 | ||
1030 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { | |
1031 | /* unlock rmap before spinning on the HPTE lock */ | |
1032 | unlock_rmap(rmapp); | |
1033 | while (hptep[0] & HPTE_V_HVLOCK) | |
1034 | cpu_relax(); | |
1035 | goto retry; | |
1036 | } | |
1037 | ||
1038 | /* Now check and modify the HPTE */ | |
1039 | if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_C)) { | |
1040 | /* need to make it temporarily absent to clear C */ | |
1041 | hptep[0] |= HPTE_V_ABSENT; | |
1042 | kvmppc_invalidate_hpte(kvm, hptep, i); | |
1043 | hptep[1] &= ~HPTE_R_C; | |
1044 | eieio(); | |
1045 | hptep[0] = (hptep[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; | |
1046 | rev[i].guest_rpte |= HPTE_R_C; | |
1047 | ret = 1; | |
1048 | } | |
1049 | hptep[0] &= ~HPTE_V_HVLOCK; | |
1050 | } while ((i = j) != head); | |
1051 | ||
1052 | unlock_rmap(rmapp); | |
1053 | return ret; | |
1054 | } | |
1055 | ||
dfe49dbd PM |
1056 | long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot, |
1057 | unsigned long *map) | |
82ed3616 PM |
1058 | { |
1059 | unsigned long i; | |
dfe49dbd | 1060 | unsigned long *rmapp; |
82ed3616 PM |
1061 | |
1062 | preempt_disable(); | |
d89cc617 | 1063 | rmapp = memslot->arch.rmap; |
82ed3616 | 1064 | for (i = 0; i < memslot->npages; ++i) { |
dfe49dbd | 1065 | if (kvm_test_clear_dirty(kvm, rmapp) && map) |
82ed3616 PM |
1066 | __set_bit_le(i, map); |
1067 | ++rmapp; | |
1068 | } | |
1069 | preempt_enable(); | |
1070 | return 0; | |
1071 | } | |
1072 | ||
93e60249 PM |
1073 | void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, |
1074 | unsigned long *nb_ret) | |
1075 | { | |
1076 | struct kvm_memory_slot *memslot; | |
1077 | unsigned long gfn = gpa >> PAGE_SHIFT; | |
342d3db7 PM |
1078 | struct page *page, *pages[1]; |
1079 | int npages; | |
1080 | unsigned long hva, psize, offset; | |
da9d1d7f | 1081 | unsigned long pa; |
93e60249 | 1082 | unsigned long *physp; |
2c9097e4 | 1083 | int srcu_idx; |
93e60249 | 1084 | |
2c9097e4 | 1085 | srcu_idx = srcu_read_lock(&kvm->srcu); |
93e60249 PM |
1086 | memslot = gfn_to_memslot(kvm, gfn); |
1087 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) | |
2c9097e4 | 1088 | goto err; |
342d3db7 | 1089 | if (!kvm->arch.using_mmu_notifiers) { |
a66b48c3 | 1090 | physp = memslot->arch.slot_phys; |
342d3db7 | 1091 | if (!physp) |
2c9097e4 | 1092 | goto err; |
342d3db7 | 1093 | physp += gfn - memslot->base_gfn; |
c77162de | 1094 | pa = *physp; |
342d3db7 PM |
1095 | if (!pa) { |
1096 | if (kvmppc_get_guest_page(kvm, gfn, memslot, | |
1097 | PAGE_SIZE) < 0) | |
2c9097e4 | 1098 | goto err; |
342d3db7 PM |
1099 | pa = *physp; |
1100 | } | |
1101 | page = pfn_to_page(pa >> PAGE_SHIFT); | |
de6c0b02 | 1102 | get_page(page); |
342d3db7 PM |
1103 | } else { |
1104 | hva = gfn_to_hva_memslot(memslot, gfn); | |
1105 | npages = get_user_pages_fast(hva, 1, 1, pages); | |
1106 | if (npages < 1) | |
2c9097e4 | 1107 | goto err; |
342d3db7 | 1108 | page = pages[0]; |
c77162de | 1109 | } |
2c9097e4 PM |
1110 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
1111 | ||
da9d1d7f PM |
1112 | psize = PAGE_SIZE; |
1113 | if (PageHuge(page)) { | |
1114 | page = compound_head(page); | |
1115 | psize <<= compound_order(page); | |
1116 | } | |
da9d1d7f | 1117 | offset = gpa & (psize - 1); |
93e60249 | 1118 | if (nb_ret) |
da9d1d7f | 1119 | *nb_ret = psize - offset; |
93e60249 | 1120 | return page_address(page) + offset; |
2c9097e4 PM |
1121 | |
1122 | err: | |
1123 | srcu_read_unlock(&kvm->srcu, srcu_idx); | |
1124 | return NULL; | |
93e60249 PM |
1125 | } |
1126 | ||
1127 | void kvmppc_unpin_guest_page(struct kvm *kvm, void *va) | |
1128 | { | |
1129 | struct page *page = virt_to_page(va); | |
1130 | ||
93e60249 PM |
1131 | put_page(page); |
1132 | } | |
1133 | ||
de56a948 PM |
1134 | void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu) |
1135 | { | |
1136 | struct kvmppc_mmu *mmu = &vcpu->arch.mmu; | |
1137 | ||
9e368f29 PM |
1138 | if (cpu_has_feature(CPU_FTR_ARCH_206)) |
1139 | vcpu->arch.slb_nr = 32; /* POWER7 */ | |
1140 | else | |
1141 | vcpu->arch.slb_nr = 64; | |
de56a948 PM |
1142 | |
1143 | mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate; | |
1144 | mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr; | |
1145 | ||
1146 | vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; | |
1147 | } |