]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/powerpc/kvm/book3s_hv_rm_mmu.c
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / kvm / book3s_hv_rm_mmu.c
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 */
8
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/hugetlb.h>
14 #include <linux/module.h>
15
16 #include <asm/tlbflush.h>
17 #include <asm/kvm_ppc.h>
18 #include <asm/kvm_book3s.h>
19 #include <asm/mmu-hash64.h>
20 #include <asm/hvcall.h>
21 #include <asm/synch.h>
22 #include <asm/ppc-opcode.h>
23
24 /* Translate address of a vmalloc'd thing to a linear map address */
25 static void *real_vmalloc_addr(void *x)
26 {
27 unsigned long addr = (unsigned long) x;
28 pte_t *p;
29
30 p = find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL);
31 if (!p || !pte_present(*p))
32 return NULL;
33 /* assume we don't have huge pages in vmalloc space... */
34 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
35 return __va(addr);
36 }
37
38 /* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
39 static int global_invalidates(struct kvm *kvm, unsigned long flags)
40 {
41 int global;
42
43 /*
44 * If there is only one vcore, and it's currently running,
45 * as indicated by local_paca->kvm_hstate.kvm_vcpu being set,
46 * we can use tlbiel as long as we mark all other physical
47 * cores as potentially having stale TLB entries for this lpid.
48 * If we're not using MMU notifiers, we never take pages away
49 * from the guest, so we can use tlbiel if requested.
50 * Otherwise, don't use tlbiel.
51 */
52 if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu)
53 global = 0;
54 else if (kvm->arch.using_mmu_notifiers)
55 global = 1;
56 else
57 global = !(flags & H_LOCAL);
58
59 if (!global) {
60 /* any other core might now have stale TLB entries... */
61 smp_wmb();
62 cpumask_setall(&kvm->arch.need_tlb_flush);
63 cpumask_clear_cpu(local_paca->kvm_hstate.kvm_vcore->pcpu,
64 &kvm->arch.need_tlb_flush);
65 }
66
67 return global;
68 }
69
70 /*
71 * Add this HPTE into the chain for the real page.
72 * Must be called with the chain locked; it unlocks the chain.
73 */
74 void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
75 unsigned long *rmap, long pte_index, int realmode)
76 {
77 struct revmap_entry *head, *tail;
78 unsigned long i;
79
80 if (*rmap & KVMPPC_RMAP_PRESENT) {
81 i = *rmap & KVMPPC_RMAP_INDEX;
82 head = &kvm->arch.revmap[i];
83 if (realmode)
84 head = real_vmalloc_addr(head);
85 tail = &kvm->arch.revmap[head->back];
86 if (realmode)
87 tail = real_vmalloc_addr(tail);
88 rev->forw = i;
89 rev->back = head->back;
90 tail->forw = pte_index;
91 head->back = pte_index;
92 } else {
93 rev->forw = rev->back = pte_index;
94 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) |
95 pte_index | KVMPPC_RMAP_PRESENT;
96 }
97 unlock_rmap(rmap);
98 }
99 EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
100
101 /* Remove this HPTE from the chain for a real page */
102 static void remove_revmap_chain(struct kvm *kvm, long pte_index,
103 struct revmap_entry *rev,
104 unsigned long hpte_v, unsigned long hpte_r)
105 {
106 struct revmap_entry *next, *prev;
107 unsigned long gfn, ptel, head;
108 struct kvm_memory_slot *memslot;
109 unsigned long *rmap;
110 unsigned long rcbits;
111
112 rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
113 ptel = rev->guest_rpte |= rcbits;
114 gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
115 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
116 if (!memslot)
117 return;
118
119 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
120 lock_rmap(rmap);
121
122 head = *rmap & KVMPPC_RMAP_INDEX;
123 next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]);
124 prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]);
125 next->back = rev->back;
126 prev->forw = rev->forw;
127 if (head == pte_index) {
128 head = rev->forw;
129 if (head == pte_index)
130 *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
131 else
132 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
133 }
134 *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
135 unlock_rmap(rmap);
136 }
137
138 static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva,
139 int writing, unsigned long *pte_sizep)
140 {
141 pte_t *ptep;
142 unsigned long ps = *pte_sizep;
143 unsigned int hugepage_shift;
144
145 ptep = find_linux_pte_or_hugepte(pgdir, hva, &hugepage_shift);
146 if (!ptep)
147 return __pte(0);
148 if (hugepage_shift)
149 *pte_sizep = 1ul << hugepage_shift;
150 else
151 *pte_sizep = PAGE_SIZE;
152 if (ps > *pte_sizep)
153 return __pte(0);
154 return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift);
155 }
156
157 static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
158 {
159 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
160 hpte[0] = cpu_to_be64(hpte_v);
161 }
162
163 long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
164 long pte_index, unsigned long pteh, unsigned long ptel,
165 pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
166 {
167 unsigned long i, pa, gpa, gfn, psize;
168 unsigned long slot_fn, hva;
169 __be64 *hpte;
170 struct revmap_entry *rev;
171 unsigned long g_ptel;
172 struct kvm_memory_slot *memslot;
173 unsigned long *physp, pte_size;
174 unsigned long is_io;
175 unsigned long *rmap;
176 pte_t pte;
177 unsigned int writing;
178 unsigned long mmu_seq;
179 unsigned long rcbits;
180
181 psize = hpte_page_size(pteh, ptel);
182 if (!psize)
183 return H_PARAMETER;
184 writing = hpte_is_writable(ptel);
185 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
186 ptel &= ~HPTE_GR_RESERVED;
187 g_ptel = ptel;
188
189 /* used later to detect if we might have been invalidated */
190 mmu_seq = kvm->mmu_notifier_seq;
191 smp_rmb();
192
193 /* Find the memslot (if any) for this address */
194 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
195 gfn = gpa >> PAGE_SHIFT;
196 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
197 pa = 0;
198 is_io = ~0ul;
199 rmap = NULL;
200 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
201 /* PPC970 can't do emulated MMIO */
202 if (!cpu_has_feature(CPU_FTR_ARCH_206))
203 return H_PARAMETER;
204 /* Emulated MMIO - mark this with key=31 */
205 pteh |= HPTE_V_ABSENT;
206 ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
207 goto do_insert;
208 }
209
210 /* Check if the requested page fits entirely in the memslot. */
211 if (!slot_is_aligned(memslot, psize))
212 return H_PARAMETER;
213 slot_fn = gfn - memslot->base_gfn;
214 rmap = &memslot->arch.rmap[slot_fn];
215
216 if (!kvm->arch.using_mmu_notifiers) {
217 physp = memslot->arch.slot_phys;
218 if (!physp)
219 return H_PARAMETER;
220 physp += slot_fn;
221 if (realmode)
222 physp = real_vmalloc_addr(physp);
223 pa = *physp;
224 if (!pa)
225 return H_TOO_HARD;
226 is_io = pa & (HPTE_R_I | HPTE_R_W);
227 pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
228 pa &= PAGE_MASK;
229 pa |= gpa & ~PAGE_MASK;
230 } else {
231 /* Translate to host virtual address */
232 hva = __gfn_to_hva_memslot(memslot, gfn);
233
234 /* Look up the Linux PTE for the backing page */
235 pte_size = psize;
236 pte = lookup_linux_pte_and_update(pgdir, hva, writing,
237 &pte_size);
238 if (pte_present(pte) && !pte_numa(pte)) {
239 if (writing && !pte_write(pte))
240 /* make the actual HPTE be read-only */
241 ptel = hpte_make_readonly(ptel);
242 is_io = hpte_cache_bits(pte_val(pte));
243 pa = pte_pfn(pte) << PAGE_SHIFT;
244 pa |= hva & (pte_size - 1);
245 pa |= gpa & ~PAGE_MASK;
246 }
247 }
248
249 if (pte_size < psize)
250 return H_PARAMETER;
251
252 ptel &= ~(HPTE_R_PP0 - psize);
253 ptel |= pa;
254
255 if (pa)
256 pteh |= HPTE_V_VALID;
257 else
258 pteh |= HPTE_V_ABSENT;
259
260 /* Check WIMG */
261 if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) {
262 if (is_io)
263 return H_PARAMETER;
264 /*
265 * Allow guest to map emulated device memory as
266 * uncacheable, but actually make it cacheable.
267 */
268 ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
269 ptel |= HPTE_R_M;
270 }
271
272 /* Find and lock the HPTEG slot to use */
273 do_insert:
274 if (pte_index >= kvm->arch.hpt_npte)
275 return H_PARAMETER;
276 if (likely((flags & H_EXACT) == 0)) {
277 pte_index &= ~7UL;
278 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
279 for (i = 0; i < 8; ++i) {
280 if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 &&
281 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
282 HPTE_V_ABSENT))
283 break;
284 hpte += 2;
285 }
286 if (i == 8) {
287 /*
288 * Since try_lock_hpte doesn't retry (not even stdcx.
289 * failures), it could be that there is a free slot
290 * but we transiently failed to lock it. Try again,
291 * actually locking each slot and checking it.
292 */
293 hpte -= 16;
294 for (i = 0; i < 8; ++i) {
295 u64 pte;
296 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
297 cpu_relax();
298 pte = be64_to_cpu(*hpte);
299 if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
300 break;
301 *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK);
302 hpte += 2;
303 }
304 if (i == 8)
305 return H_PTEG_FULL;
306 }
307 pte_index += i;
308 } else {
309 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
310 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
311 HPTE_V_ABSENT)) {
312 /* Lock the slot and check again */
313 u64 pte;
314
315 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
316 cpu_relax();
317 pte = be64_to_cpu(*hpte);
318 if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
319 *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK);
320 return H_PTEG_FULL;
321 }
322 }
323 }
324
325 /* Save away the guest's idea of the second HPTE dword */
326 rev = &kvm->arch.revmap[pte_index];
327 if (realmode)
328 rev = real_vmalloc_addr(rev);
329 if (rev) {
330 rev->guest_rpte = g_ptel;
331 note_hpte_modification(kvm, rev);
332 }
333
334 /* Link HPTE into reverse-map chain */
335 if (pteh & HPTE_V_VALID) {
336 if (realmode)
337 rmap = real_vmalloc_addr(rmap);
338 lock_rmap(rmap);
339 /* Check for pending invalidations under the rmap chain lock */
340 if (kvm->arch.using_mmu_notifiers &&
341 mmu_notifier_retry(kvm, mmu_seq)) {
342 /* inval in progress, write a non-present HPTE */
343 pteh |= HPTE_V_ABSENT;
344 pteh &= ~HPTE_V_VALID;
345 unlock_rmap(rmap);
346 } else {
347 kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
348 realmode);
349 /* Only set R/C in real HPTE if already set in *rmap */
350 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
351 ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C);
352 }
353 }
354
355 hpte[1] = cpu_to_be64(ptel);
356
357 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
358 eieio();
359 hpte[0] = cpu_to_be64(pteh);
360 asm volatile("ptesync" : : : "memory");
361
362 *pte_idx_ret = pte_index;
363 return H_SUCCESS;
364 }
365 EXPORT_SYMBOL_GPL(kvmppc_do_h_enter);
366
367 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
368 long pte_index, unsigned long pteh, unsigned long ptel)
369 {
370 return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
371 vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]);
372 }
373
374 #ifdef __BIG_ENDIAN__
375 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
376 #else
377 #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
378 #endif
379
380 static inline int try_lock_tlbie(unsigned int *lock)
381 {
382 unsigned int tmp, old;
383 unsigned int token = LOCK_TOKEN;
384
385 asm volatile("1:lwarx %1,0,%2\n"
386 " cmpwi cr0,%1,0\n"
387 " bne 2f\n"
388 " stwcx. %3,0,%2\n"
389 " bne- 1b\n"
390 " isync\n"
391 "2:"
392 : "=&r" (tmp), "=&r" (old)
393 : "r" (lock), "r" (token)
394 : "cc", "memory");
395 return old == 0;
396 }
397
398 /*
399 * tlbie/tlbiel is a bit different on the PPC970 compared to later
400 * processors such as POWER7; the large page bit is in the instruction
401 * not RB, and the top 16 bits and the bottom 12 bits of the VA
402 * in RB must be 0.
403 */
404 static void do_tlbies_970(struct kvm *kvm, unsigned long *rbvalues,
405 long npages, int global, bool need_sync)
406 {
407 long i;
408
409 if (global) {
410 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
411 cpu_relax();
412 if (need_sync)
413 asm volatile("ptesync" : : : "memory");
414 for (i = 0; i < npages; ++i) {
415 unsigned long rb = rbvalues[i];
416
417 if (rb & 1) /* large page */
418 asm volatile("tlbie %0,1" : :
419 "r" (rb & 0x0000fffffffff000ul));
420 else
421 asm volatile("tlbie %0,0" : :
422 "r" (rb & 0x0000fffffffff000ul));
423 }
424 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
425 kvm->arch.tlbie_lock = 0;
426 } else {
427 if (need_sync)
428 asm volatile("ptesync" : : : "memory");
429 for (i = 0; i < npages; ++i) {
430 unsigned long rb = rbvalues[i];
431
432 if (rb & 1) /* large page */
433 asm volatile("tlbiel %0,1" : :
434 "r" (rb & 0x0000fffffffff000ul));
435 else
436 asm volatile("tlbiel %0,0" : :
437 "r" (rb & 0x0000fffffffff000ul));
438 }
439 asm volatile("ptesync" : : : "memory");
440 }
441 }
442
443 static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
444 long npages, int global, bool need_sync)
445 {
446 long i;
447
448 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
449 /* PPC970 tlbie instruction is a bit different */
450 do_tlbies_970(kvm, rbvalues, npages, global, need_sync);
451 return;
452 }
453 if (global) {
454 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
455 cpu_relax();
456 if (need_sync)
457 asm volatile("ptesync" : : : "memory");
458 for (i = 0; i < npages; ++i)
459 asm volatile(PPC_TLBIE(%1,%0) : :
460 "r" (rbvalues[i]), "r" (kvm->arch.lpid));
461 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
462 kvm->arch.tlbie_lock = 0;
463 } else {
464 if (need_sync)
465 asm volatile("ptesync" : : : "memory");
466 for (i = 0; i < npages; ++i)
467 asm volatile("tlbiel %0" : : "r" (rbvalues[i]));
468 asm volatile("ptesync" : : : "memory");
469 }
470 }
471
472 long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
473 unsigned long pte_index, unsigned long avpn,
474 unsigned long *hpret)
475 {
476 __be64 *hpte;
477 unsigned long v, r, rb;
478 struct revmap_entry *rev;
479 u64 pte;
480
481 if (pte_index >= kvm->arch.hpt_npte)
482 return H_PARAMETER;
483 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
484 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
485 cpu_relax();
486 pte = be64_to_cpu(hpte[0]);
487 if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
488 ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
489 ((flags & H_ANDCOND) && (pte & avpn) != 0)) {
490 hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
491 return H_NOT_FOUND;
492 }
493
494 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
495 v = pte & ~HPTE_V_HVLOCK;
496 if (v & HPTE_V_VALID) {
497 u64 pte1;
498
499 pte1 = be64_to_cpu(hpte[1]);
500 hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
501 rb = compute_tlbie_rb(v, pte1, pte_index);
502 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
503 /* Read PTE low word after tlbie to get final R/C values */
504 remove_revmap_chain(kvm, pte_index, rev, v, pte1);
505 }
506 r = rev->guest_rpte & ~HPTE_GR_RESERVED;
507 note_hpte_modification(kvm, rev);
508 unlock_hpte(hpte, 0);
509
510 hpret[0] = v;
511 hpret[1] = r;
512 return H_SUCCESS;
513 }
514 EXPORT_SYMBOL_GPL(kvmppc_do_h_remove);
515
516 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
517 unsigned long pte_index, unsigned long avpn)
518 {
519 return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
520 &vcpu->arch.gpr[4]);
521 }
522
523 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
524 {
525 struct kvm *kvm = vcpu->kvm;
526 unsigned long *args = &vcpu->arch.gpr[4];
527 __be64 *hp, *hptes[4];
528 unsigned long tlbrb[4];
529 long int i, j, k, n, found, indexes[4];
530 unsigned long flags, req, pte_index, rcbits;
531 int global;
532 long int ret = H_SUCCESS;
533 struct revmap_entry *rev, *revs[4];
534 u64 hp0;
535
536 global = global_invalidates(kvm, 0);
537 for (i = 0; i < 4 && ret == H_SUCCESS; ) {
538 n = 0;
539 for (; i < 4; ++i) {
540 j = i * 2;
541 pte_index = args[j];
542 flags = pte_index >> 56;
543 pte_index &= ((1ul << 56) - 1);
544 req = flags >> 6;
545 flags &= 3;
546 if (req == 3) { /* no more requests */
547 i = 4;
548 break;
549 }
550 if (req != 1 || flags == 3 ||
551 pte_index >= kvm->arch.hpt_npte) {
552 /* parameter error */
553 args[j] = ((0xa0 | flags) << 56) + pte_index;
554 ret = H_PARAMETER;
555 break;
556 }
557 hp = (__be64 *) (kvm->arch.hpt_virt + (pte_index << 4));
558 /* to avoid deadlock, don't spin except for first */
559 if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
560 if (n)
561 break;
562 while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
563 cpu_relax();
564 }
565 found = 0;
566 hp0 = be64_to_cpu(hp[0]);
567 if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) {
568 switch (flags & 3) {
569 case 0: /* absolute */
570 found = 1;
571 break;
572 case 1: /* andcond */
573 if (!(hp0 & args[j + 1]))
574 found = 1;
575 break;
576 case 2: /* AVPN */
577 if ((hp0 & ~0x7fUL) == args[j + 1])
578 found = 1;
579 break;
580 }
581 }
582 if (!found) {
583 hp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
584 args[j] = ((0x90 | flags) << 56) + pte_index;
585 continue;
586 }
587
588 args[j] = ((0x80 | flags) << 56) + pte_index;
589 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
590 note_hpte_modification(kvm, rev);
591
592 if (!(hp0 & HPTE_V_VALID)) {
593 /* insert R and C bits from PTE */
594 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
595 args[j] |= rcbits << (56 - 5);
596 hp[0] = 0;
597 continue;
598 }
599
600 /* leave it locked */
601 hp[0] &= ~cpu_to_be64(HPTE_V_VALID);
602 tlbrb[n] = compute_tlbie_rb(be64_to_cpu(hp[0]),
603 be64_to_cpu(hp[1]), pte_index);
604 indexes[n] = j;
605 hptes[n] = hp;
606 revs[n] = rev;
607 ++n;
608 }
609
610 if (!n)
611 break;
612
613 /* Now that we've collected a batch, do the tlbies */
614 do_tlbies(kvm, tlbrb, n, global, true);
615
616 /* Read PTE low words after tlbie to get final R/C values */
617 for (k = 0; k < n; ++k) {
618 j = indexes[k];
619 pte_index = args[j] & ((1ul << 56) - 1);
620 hp = hptes[k];
621 rev = revs[k];
622 remove_revmap_chain(kvm, pte_index, rev,
623 be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
624 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
625 args[j] |= rcbits << (56 - 5);
626 hp[0] = 0;
627 }
628 }
629
630 return ret;
631 }
632
633 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
634 unsigned long pte_index, unsigned long avpn,
635 unsigned long va)
636 {
637 struct kvm *kvm = vcpu->kvm;
638 __be64 *hpte;
639 struct revmap_entry *rev;
640 unsigned long v, r, rb, mask, bits;
641 u64 pte;
642
643 if (pte_index >= kvm->arch.hpt_npte)
644 return H_PARAMETER;
645
646 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
647 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
648 cpu_relax();
649 pte = be64_to_cpu(hpte[0]);
650 if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
651 ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) {
652 hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
653 return H_NOT_FOUND;
654 }
655
656 v = pte;
657 bits = (flags << 55) & HPTE_R_PP0;
658 bits |= (flags << 48) & HPTE_R_KEY_HI;
659 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
660
661 /* Update guest view of 2nd HPTE dword */
662 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
663 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
664 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
665 if (rev) {
666 r = (rev->guest_rpte & ~mask) | bits;
667 rev->guest_rpte = r;
668 note_hpte_modification(kvm, rev);
669 }
670 r = (be64_to_cpu(hpte[1]) & ~mask) | bits;
671
672 /* Update HPTE */
673 if (v & HPTE_V_VALID) {
674 rb = compute_tlbie_rb(v, r, pte_index);
675 hpte[0] = cpu_to_be64(v & ~HPTE_V_VALID);
676 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
677 /*
678 * If the host has this page as readonly but the guest
679 * wants to make it read/write, reduce the permissions.
680 * Checking the host permissions involves finding the
681 * memslot and then the Linux PTE for the page.
682 */
683 if (hpte_is_writable(r) && kvm->arch.using_mmu_notifiers) {
684 unsigned long psize, gfn, hva;
685 struct kvm_memory_slot *memslot;
686 pgd_t *pgdir = vcpu->arch.pgdir;
687 pte_t pte;
688
689 psize = hpte_page_size(v, r);
690 gfn = ((r & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
691 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
692 if (memslot) {
693 hva = __gfn_to_hva_memslot(memslot, gfn);
694 pte = lookup_linux_pte_and_update(pgdir, hva,
695 1, &psize);
696 if (pte_present(pte) && !pte_write(pte))
697 r = hpte_make_readonly(r);
698 }
699 }
700 }
701 hpte[1] = cpu_to_be64(r);
702 eieio();
703 hpte[0] = cpu_to_be64(v & ~HPTE_V_HVLOCK);
704 asm volatile("ptesync" : : : "memory");
705 return H_SUCCESS;
706 }
707
708 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
709 unsigned long pte_index)
710 {
711 struct kvm *kvm = vcpu->kvm;
712 __be64 *hpte;
713 unsigned long v, r;
714 int i, n = 1;
715 struct revmap_entry *rev = NULL;
716
717 if (pte_index >= kvm->arch.hpt_npte)
718 return H_PARAMETER;
719 if (flags & H_READ_4) {
720 pte_index &= ~3;
721 n = 4;
722 }
723 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
724 for (i = 0; i < n; ++i, ++pte_index) {
725 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
726 v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
727 r = be64_to_cpu(hpte[1]);
728 if (v & HPTE_V_ABSENT) {
729 v &= ~HPTE_V_ABSENT;
730 v |= HPTE_V_VALID;
731 }
732 if (v & HPTE_V_VALID) {
733 r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
734 r &= ~HPTE_GR_RESERVED;
735 }
736 vcpu->arch.gpr[4 + i * 2] = v;
737 vcpu->arch.gpr[5 + i * 2] = r;
738 }
739 return H_SUCCESS;
740 }
741
742 void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
743 unsigned long pte_index)
744 {
745 unsigned long rb;
746
747 hptep[0] &= ~cpu_to_be64(HPTE_V_VALID);
748 rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
749 pte_index);
750 do_tlbies(kvm, &rb, 1, 1, true);
751 }
752 EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
753
754 void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
755 unsigned long pte_index)
756 {
757 unsigned long rb;
758 unsigned char rbyte;
759
760 rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
761 pte_index);
762 rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8;
763 /* modify only the second-last byte, which contains the ref bit */
764 *((char *)hptep + 14) = rbyte;
765 do_tlbies(kvm, &rb, 1, 1, false);
766 }
767 EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
768
769 static int slb_base_page_shift[4] = {
770 24, /* 16M */
771 16, /* 64k */
772 34, /* 16G */
773 20, /* 1M, unsupported */
774 };
775
776 /* When called from virtmode, this func should be protected by
777 * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
778 * can trigger deadlock issue.
779 */
780 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
781 unsigned long valid)
782 {
783 unsigned int i;
784 unsigned int pshift;
785 unsigned long somask;
786 unsigned long vsid, hash;
787 unsigned long avpn;
788 __be64 *hpte;
789 unsigned long mask, val;
790 unsigned long v, r;
791
792 /* Get page shift, work out hash and AVPN etc. */
793 mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
794 val = 0;
795 pshift = 12;
796 if (slb_v & SLB_VSID_L) {
797 mask |= HPTE_V_LARGE;
798 val |= HPTE_V_LARGE;
799 pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
800 }
801 if (slb_v & SLB_VSID_B_1T) {
802 somask = (1UL << 40) - 1;
803 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
804 vsid ^= vsid << 25;
805 } else {
806 somask = (1UL << 28) - 1;
807 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
808 }
809 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask;
810 avpn = slb_v & ~(somask >> 16); /* also includes B */
811 avpn |= (eaddr & somask) >> 16;
812
813 if (pshift >= 24)
814 avpn &= ~((1UL << (pshift - 16)) - 1);
815 else
816 avpn &= ~0x7fUL;
817 val |= avpn;
818
819 for (;;) {
820 hpte = (__be64 *)(kvm->arch.hpt_virt + (hash << 7));
821
822 for (i = 0; i < 16; i += 2) {
823 /* Read the PTE racily */
824 v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
825
826 /* Check valid/absent, hash, segment size and AVPN */
827 if (!(v & valid) || (v & mask) != val)
828 continue;
829
830 /* Lock the PTE and read it under the lock */
831 while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
832 cpu_relax();
833 v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
834 r = be64_to_cpu(hpte[i+1]);
835
836 /*
837 * Check the HPTE again, including base page size
838 */
839 if ((v & valid) && (v & mask) == val &&
840 hpte_base_page_size(v, r) == (1ul << pshift))
841 /* Return with the HPTE still locked */
842 return (hash << 3) + (i >> 1);
843
844 /* Unlock and move on */
845 hpte[i] = cpu_to_be64(v);
846 }
847
848 if (val & HPTE_V_SECONDARY)
849 break;
850 val |= HPTE_V_SECONDARY;
851 hash = hash ^ kvm->arch.hpt_mask;
852 }
853 return -1;
854 }
855 EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
856
857 /*
858 * Called in real mode to check whether an HPTE not found fault
859 * is due to accessing a paged-out page or an emulated MMIO page,
860 * or if a protection fault is due to accessing a page that the
861 * guest wanted read/write access to but which we made read-only.
862 * Returns a possibly modified status (DSISR) value if not
863 * (i.e. pass the interrupt to the guest),
864 * -1 to pass the fault up to host kernel mode code, -2 to do that
865 * and also load the instruction word (for MMIO emulation),
866 * or 0 if we should make the guest retry the access.
867 */
868 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
869 unsigned long slb_v, unsigned int status, bool data)
870 {
871 struct kvm *kvm = vcpu->kvm;
872 long int index;
873 unsigned long v, r, gr;
874 __be64 *hpte;
875 unsigned long valid;
876 struct revmap_entry *rev;
877 unsigned long pp, key;
878
879 /* For protection fault, expect to find a valid HPTE */
880 valid = HPTE_V_VALID;
881 if (status & DSISR_NOHPTE)
882 valid |= HPTE_V_ABSENT;
883
884 index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
885 if (index < 0) {
886 if (status & DSISR_NOHPTE)
887 return status; /* there really was no HPTE */
888 return 0; /* for prot fault, HPTE disappeared */
889 }
890 hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
891 v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
892 r = be64_to_cpu(hpte[1]);
893 rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
894 gr = rev->guest_rpte;
895
896 unlock_hpte(hpte, v);
897
898 /* For not found, if the HPTE is valid by now, retry the instruction */
899 if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
900 return 0;
901
902 /* Check access permissions to the page */
903 pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
904 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
905 status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
906 if (!data) {
907 if (gr & (HPTE_R_N | HPTE_R_G))
908 return status | SRR1_ISI_N_OR_G;
909 if (!hpte_read_permission(pp, slb_v & key))
910 return status | SRR1_ISI_PROT;
911 } else if (status & DSISR_ISSTORE) {
912 /* check write permission */
913 if (!hpte_write_permission(pp, slb_v & key))
914 return status | DSISR_PROTFAULT;
915 } else {
916 if (!hpte_read_permission(pp, slb_v & key))
917 return status | DSISR_PROTFAULT;
918 }
919
920 /* Check storage key, if applicable */
921 if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
922 unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
923 if (status & DSISR_ISSTORE)
924 perm >>= 1;
925 if (perm & 1)
926 return status | DSISR_KEYFAULT;
927 }
928
929 /* Save HPTE info for virtual-mode handler */
930 vcpu->arch.pgfault_addr = addr;
931 vcpu->arch.pgfault_index = index;
932 vcpu->arch.pgfault_hpte[0] = v;
933 vcpu->arch.pgfault_hpte[1] = r;
934
935 /* Check the storage key to see if it is possibly emulated MMIO */
936 if (data && (vcpu->arch.shregs.msr & MSR_IR) &&
937 (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
938 (HPTE_R_KEY_HI | HPTE_R_KEY_LO))
939 return -2; /* MMIO emulation - load instr word */
940
941 return -1; /* send fault up to host kernel mode */
942 }