]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/powerpc/kvm/book3s_64_vio_hv.c
KVM: PPC: Book3S HV: Add radix checks in real-mode hypercall handlers
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kvm / book3s_64_vio_hv.c
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
17 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
18 */
19
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/hugetlb.h>
28 #include <linux/list.h>
29
30 #include <asm/tlbflush.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/kvm_book3s.h>
33 #include <asm/book3s/64/mmu-hash.h>
34 #include <asm/mmu_context.h>
35 #include <asm/hvcall.h>
36 #include <asm/synch.h>
37 #include <asm/ppc-opcode.h>
38 #include <asm/kvm_host.h>
39 #include <asm/udbg.h>
40 #include <asm/iommu.h>
41 #include <asm/tce.h>
42
43 #ifdef CONFIG_BUG
44
45 #define WARN_ON_ONCE_RM(condition) ({ \
46 static bool __section(.data.unlikely) __warned; \
47 int __ret_warn_once = !!(condition); \
48 \
49 if (unlikely(__ret_warn_once && !__warned)) { \
50 __warned = true; \
51 pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \
52 __stringify(condition), \
53 __func__, __LINE__); \
54 dump_stack(); \
55 } \
56 unlikely(__ret_warn_once); \
57 })
58
59 #else
60
61 #define WARN_ON_ONCE_RM(condition) ({ \
62 int __ret_warn_on = !!(condition); \
63 unlikely(__ret_warn_on); \
64 })
65
66 #endif
67
68 #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
69
70 /*
71 * Finds a TCE table descriptor by LIOBN.
72 *
73 * WARNING: This will be called in real or virtual mode on HV KVM and virtual
74 * mode on PR KVM
75 */
76 struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
77 unsigned long liobn)
78 {
79 struct kvmppc_spapr_tce_table *stt;
80
81 list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
82 if (stt->liobn == liobn)
83 return stt;
84
85 return NULL;
86 }
87 EXPORT_SYMBOL_GPL(kvmppc_find_table);
88
89 /*
90 * Validates TCE address.
91 * At the moment flags and page mask are validated.
92 * As the host kernel does not access those addresses (just puts them
93 * to the table and user space is supposed to process them), we can skip
94 * checking other things (such as TCE is a guest RAM address or the page
95 * was actually allocated).
96 *
97 * WARNING: This will be called in real-mode on HV KVM and virtual
98 * mode on PR KVM
99 */
100 long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
101 {
102 unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
103 enum dma_data_direction dir = iommu_tce_direction(tce);
104
105 /* Allow userspace to poison TCE table */
106 if (dir == DMA_NONE)
107 return H_SUCCESS;
108
109 if (iommu_tce_check_gpa(stt->page_shift, gpa))
110 return H_PARAMETER;
111
112 return H_SUCCESS;
113 }
114 EXPORT_SYMBOL_GPL(kvmppc_tce_validate);
115
116 /* Note on the use of page_address() in real mode,
117 *
118 * It is safe to use page_address() in real mode on ppc64 because
119 * page_address() is always defined as lowmem_page_address()
120 * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
121 * operation and does not access page struct.
122 *
123 * Theoretically page_address() could be defined different
124 * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
125 * would have to be enabled.
126 * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
127 * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
128 * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
129 * is not expected to be enabled on ppc32, page_address()
130 * is safe for ppc32 as well.
131 *
132 * WARNING: This will be called in real-mode on HV KVM and virtual
133 * mode on PR KVM
134 */
135 static u64 *kvmppc_page_address(struct page *page)
136 {
137 #if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
138 #error TODO: fix to avoid page_address() here
139 #endif
140 return (u64 *) page_address(page);
141 }
142
143 /*
144 * Handles TCE requests for emulated devices.
145 * Puts guest TCE values to the table and expects user space to convert them.
146 * Called in both real and virtual modes.
147 * Cannot fail so kvmppc_tce_validate must be called before it.
148 *
149 * WARNING: This will be called in real-mode on HV KVM and virtual
150 * mode on PR KVM
151 */
152 void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
153 unsigned long idx, unsigned long tce)
154 {
155 struct page *page;
156 u64 *tbl;
157
158 idx -= stt->offset;
159 page = stt->pages[idx / TCES_PER_PAGE];
160 tbl = kvmppc_page_address(page);
161
162 tbl[idx % TCES_PER_PAGE] = tce;
163 }
164 EXPORT_SYMBOL_GPL(kvmppc_tce_put);
165
166 long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
167 unsigned long *ua, unsigned long **prmap)
168 {
169 unsigned long gfn = gpa >> PAGE_SHIFT;
170 struct kvm_memory_slot *memslot;
171
172 memslot = search_memslots(kvm_memslots(kvm), gfn);
173 if (!memslot)
174 return -EINVAL;
175
176 *ua = __gfn_to_hva_memslot(memslot, gfn) |
177 (gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
178
179 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
180 if (prmap)
181 *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
182 #endif
183
184 return 0;
185 }
186 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
187
188 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
189 static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry)
190 {
191 unsigned long hpa = 0;
192 enum dma_data_direction dir = DMA_NONE;
193
194 iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
195 }
196
197 static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
198 struct iommu_table *tbl, unsigned long entry)
199 {
200 struct mm_iommu_table_group_mem_t *mem = NULL;
201 const unsigned long pgsize = 1ULL << tbl->it_page_shift;
202 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
203
204 if (!pua)
205 /* it_userspace allocation might be delayed */
206 return H_TOO_HARD;
207
208 pua = (void *) vmalloc_to_phys(pua);
209 if (WARN_ON_ONCE_RM(!pua))
210 return H_HARDWARE;
211
212 mem = mm_iommu_lookup_rm(kvm->mm, *pua, pgsize);
213 if (!mem)
214 return H_TOO_HARD;
215
216 mm_iommu_mapped_dec(mem);
217
218 *pua = 0;
219
220 return H_SUCCESS;
221 }
222
223 static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
224 struct iommu_table *tbl, unsigned long entry)
225 {
226 enum dma_data_direction dir = DMA_NONE;
227 unsigned long hpa = 0;
228 long ret;
229
230 if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir))
231 /*
232 * real mode xchg can fail if struct page crosses
233 * a page boundary
234 */
235 return H_TOO_HARD;
236
237 if (dir == DMA_NONE)
238 return H_SUCCESS;
239
240 ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
241 if (ret)
242 iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
243
244 return ret;
245 }
246
247 static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
248 unsigned long entry, unsigned long ua,
249 enum dma_data_direction dir)
250 {
251 long ret;
252 unsigned long hpa = 0;
253 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
254 struct mm_iommu_table_group_mem_t *mem;
255
256 if (!pua)
257 /* it_userspace allocation might be delayed */
258 return H_TOO_HARD;
259
260 mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
261 if (!mem)
262 return H_TOO_HARD;
263
264 if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa)))
265 return H_HARDWARE;
266
267 pua = (void *) vmalloc_to_phys(pua);
268 if (WARN_ON_ONCE_RM(!pua))
269 return H_HARDWARE;
270
271 if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
272 return H_CLOSED;
273
274 ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
275 if (ret) {
276 mm_iommu_mapped_dec(mem);
277 /*
278 * real mode xchg can fail if struct page crosses
279 * a page boundary
280 */
281 return H_TOO_HARD;
282 }
283
284 if (dir != DMA_NONE)
285 kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
286
287 *pua = ua;
288
289 return 0;
290 }
291
292 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
293 unsigned long ioba, unsigned long tce)
294 {
295 struct kvmppc_spapr_tce_table *stt;
296 long ret;
297 struct kvmppc_spapr_tce_iommu_table *stit;
298 unsigned long entry, ua = 0;
299 enum dma_data_direction dir;
300
301 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
302 /* liobn, ioba, tce); */
303
304 /* For radix, we might be in virtual mode, so punt */
305 if (kvm_is_radix(vcpu->kvm))
306 return H_TOO_HARD;
307
308 stt = kvmppc_find_table(vcpu->kvm, liobn);
309 if (!stt)
310 return H_TOO_HARD;
311
312 ret = kvmppc_ioba_validate(stt, ioba, 1);
313 if (ret != H_SUCCESS)
314 return ret;
315
316 ret = kvmppc_tce_validate(stt, tce);
317 if (ret != H_SUCCESS)
318 return ret;
319
320 dir = iommu_tce_direction(tce);
321 if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
322 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
323 return H_PARAMETER;
324
325 entry = ioba >> stt->page_shift;
326
327 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
328 if (dir == DMA_NONE)
329 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
330 stit->tbl, entry);
331 else
332 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
333 stit->tbl, entry, ua, dir);
334
335 if (ret == H_SUCCESS)
336 continue;
337
338 if (ret == H_TOO_HARD)
339 return ret;
340
341 WARN_ON_ONCE_RM(1);
342 kvmppc_rm_clear_tce(stit->tbl, entry);
343 }
344
345 kvmppc_tce_put(stt, entry, tce);
346
347 return H_SUCCESS;
348 }
349
350 static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
351 unsigned long ua, unsigned long *phpa)
352 {
353 pte_t *ptep, pte;
354 unsigned shift = 0;
355
356 ptep = __find_linux_pte_or_hugepte(vcpu->arch.pgdir, ua, NULL, &shift);
357 if (!ptep || !pte_present(*ptep))
358 return -ENXIO;
359 pte = *ptep;
360
361 if (!shift)
362 shift = PAGE_SHIFT;
363
364 /* Avoid handling anything potentially complicated in realmode */
365 if (shift > PAGE_SHIFT)
366 return -EAGAIN;
367
368 if (!pte_young(pte))
369 return -EAGAIN;
370
371 *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
372 (ua & ~PAGE_MASK);
373
374 return 0;
375 }
376
377 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
378 unsigned long liobn, unsigned long ioba,
379 unsigned long tce_list, unsigned long npages)
380 {
381 struct kvmppc_spapr_tce_table *stt;
382 long i, ret = H_SUCCESS;
383 unsigned long tces, entry, ua = 0;
384 unsigned long *rmap = NULL;
385 bool prereg = false;
386 struct kvmppc_spapr_tce_iommu_table *stit;
387
388 /* For radix, we might be in virtual mode, so punt */
389 if (kvm_is_radix(vcpu->kvm))
390 return H_TOO_HARD;
391
392 stt = kvmppc_find_table(vcpu->kvm, liobn);
393 if (!stt)
394 return H_TOO_HARD;
395
396 entry = ioba >> stt->page_shift;
397 /*
398 * The spec says that the maximum size of the list is 512 TCEs
399 * so the whole table addressed resides in 4K page
400 */
401 if (npages > 512)
402 return H_PARAMETER;
403
404 if (tce_list & (SZ_4K - 1))
405 return H_PARAMETER;
406
407 ret = kvmppc_ioba_validate(stt, ioba, npages);
408 if (ret != H_SUCCESS)
409 return ret;
410
411 if (mm_iommu_preregistered(vcpu->kvm->mm)) {
412 /*
413 * We get here if guest memory was pre-registered which
414 * is normally VFIO case and gpa->hpa translation does not
415 * depend on hpt.
416 */
417 struct mm_iommu_table_group_mem_t *mem;
418
419 if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL))
420 return H_TOO_HARD;
421
422 mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
423 if (mem)
424 prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0;
425 }
426
427 if (!prereg) {
428 /*
429 * This is usually a case of a guest with emulated devices only
430 * when TCE list is not in preregistered memory.
431 * We do not require memory to be preregistered in this case
432 * so lock rmap and do __find_linux_pte_or_hugepte().
433 */
434 if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
435 return H_TOO_HARD;
436
437 rmap = (void *) vmalloc_to_phys(rmap);
438 if (WARN_ON_ONCE_RM(!rmap))
439 return H_HARDWARE;
440
441 /*
442 * Synchronize with the MMU notifier callbacks in
443 * book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.).
444 * While we have the rmap lock, code running on other CPUs
445 * cannot finish unmapping the host real page that backs
446 * this guest real page, so we are OK to access the host
447 * real page.
448 */
449 lock_rmap(rmap);
450 if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
451 ret = H_TOO_HARD;
452 goto unlock_exit;
453 }
454 }
455
456 for (i = 0; i < npages; ++i) {
457 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
458
459 ret = kvmppc_tce_validate(stt, tce);
460 if (ret != H_SUCCESS)
461 goto unlock_exit;
462
463 ua = 0;
464 if (kvmppc_gpa_to_ua(vcpu->kvm,
465 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
466 &ua, NULL))
467 return H_PARAMETER;
468
469 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
470 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
471 stit->tbl, entry + i, ua,
472 iommu_tce_direction(tce));
473
474 if (ret == H_SUCCESS)
475 continue;
476
477 if (ret == H_TOO_HARD)
478 goto unlock_exit;
479
480 WARN_ON_ONCE_RM(1);
481 kvmppc_rm_clear_tce(stit->tbl, entry);
482 }
483
484 kvmppc_tce_put(stt, entry + i, tce);
485 }
486
487 unlock_exit:
488 if (rmap)
489 unlock_rmap(rmap);
490
491 return ret;
492 }
493
494 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
495 unsigned long liobn, unsigned long ioba,
496 unsigned long tce_value, unsigned long npages)
497 {
498 struct kvmppc_spapr_tce_table *stt;
499 long i, ret;
500 struct kvmppc_spapr_tce_iommu_table *stit;
501
502 /* For radix, we might be in virtual mode, so punt */
503 if (kvm_is_radix(vcpu->kvm))
504 return H_TOO_HARD;
505
506 stt = kvmppc_find_table(vcpu->kvm, liobn);
507 if (!stt)
508 return H_TOO_HARD;
509
510 ret = kvmppc_ioba_validate(stt, ioba, npages);
511 if (ret != H_SUCCESS)
512 return ret;
513
514 /* Check permission bits only to allow userspace poison TCE for debug */
515 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
516 return H_PARAMETER;
517
518 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
519 unsigned long entry = ioba >> stit->tbl->it_page_shift;
520
521 for (i = 0; i < npages; ++i) {
522 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
523 stit->tbl, entry + i);
524
525 if (ret == H_SUCCESS)
526 continue;
527
528 if (ret == H_TOO_HARD)
529 return ret;
530
531 WARN_ON_ONCE_RM(1);
532 kvmppc_rm_clear_tce(stit->tbl, entry);
533 }
534 }
535
536 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
537 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
538
539 return H_SUCCESS;
540 }
541
542 /* This can be called in either virtual mode or real mode */
543 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
544 unsigned long ioba)
545 {
546 struct kvmppc_spapr_tce_table *stt;
547 long ret;
548 unsigned long idx;
549 struct page *page;
550 u64 *tbl;
551
552 stt = kvmppc_find_table(vcpu->kvm, liobn);
553 if (!stt)
554 return H_TOO_HARD;
555
556 ret = kvmppc_ioba_validate(stt, ioba, 1);
557 if (ret != H_SUCCESS)
558 return ret;
559
560 idx = (ioba >> stt->page_shift) - stt->offset;
561 page = stt->pages[idx / TCES_PER_PAGE];
562 tbl = (u64 *)page_address(page);
563
564 vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE];
565
566 return H_SUCCESS;
567 }
568 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
569
570 #endif /* KVM_BOOK3S_HV_POSSIBLE */