]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/kvm/book3s_64_vio_hv.c
KVM: PPC: Book3S HV: Add radix checks in real-mode hypercall handlers
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kvm / book3s_64_vio_hv.c
CommitLineData
54738c09
DG
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
d3695aa4 17 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
54738c09
DG
18 */
19
20#include <linux/types.h>
21#include <linux/string.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/highmem.h>
25#include <linux/gfp.h>
26#include <linux/slab.h>
27#include <linux/hugetlb.h>
28#include <linux/list.h>
29
30#include <asm/tlbflush.h>
31#include <asm/kvm_ppc.h>
32#include <asm/kvm_book3s.h>
f64e8084 33#include <asm/book3s/64/mmu-hash.h>
d3695aa4 34#include <asm/mmu_context.h>
54738c09
DG
35#include <asm/hvcall.h>
36#include <asm/synch.h>
37#include <asm/ppc-opcode.h>
38#include <asm/kvm_host.h>
39#include <asm/udbg.h>
fcbb2ce6 40#include <asm/iommu.h>
5ee7af18 41#include <asm/tce.h>
54738c09 42
121f80ba
AK
43#ifdef CONFIG_BUG
44
45#define WARN_ON_ONCE_RM(condition) ({ \
46 static bool __section(.data.unlikely) __warned; \
47 int __ret_warn_once = !!(condition); \
48 \
49 if (unlikely(__ret_warn_once && !__warned)) { \
50 __warned = true; \
51 pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \
52 __stringify(condition), \
53 __func__, __LINE__); \
54 dump_stack(); \
55 } \
56 unlikely(__ret_warn_once); \
57})
58
59#else
60
61#define WARN_ON_ONCE_RM(condition) ({ \
62 int __ret_warn_on = !!(condition); \
63 unlikely(__ret_warn_on); \
64})
65
66#endif
67
54738c09
DG
68#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
69
fcbb2ce6
AK
70/*
71 * Finds a TCE table descriptor by LIOBN.
72 *
73 * WARNING: This will be called in real or virtual mode on HV KVM and virtual
74 * mode on PR KVM
75 */
503bfcbe 76struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
fcbb2ce6
AK
77 unsigned long liobn)
78{
fcbb2ce6
AK
79 struct kvmppc_spapr_tce_table *stt;
80
366baf28 81 list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
fcbb2ce6
AK
82 if (stt->liobn == liobn)
83 return stt;
84
85 return NULL;
86}
d3695aa4 87EXPORT_SYMBOL_GPL(kvmppc_find_table);
fcbb2ce6 88
5ee7af18
AK
89/*
90 * Validates TCE address.
91 * At the moment flags and page mask are validated.
92 * As the host kernel does not access those addresses (just puts them
93 * to the table and user space is supposed to process them), we can skip
94 * checking other things (such as TCE is a guest RAM address or the page
95 * was actually allocated).
96 *
97 * WARNING: This will be called in real-mode on HV KVM and virtual
98 * mode on PR KVM
99 */
100long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
101{
b1af23d8
AK
102 unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
103 enum dma_data_direction dir = iommu_tce_direction(tce);
104
105 /* Allow userspace to poison TCE table */
106 if (dir == DMA_NONE)
107 return H_SUCCESS;
5ee7af18 108
b1af23d8 109 if (iommu_tce_check_gpa(stt->page_shift, gpa))
5ee7af18
AK
110 return H_PARAMETER;
111
112 return H_SUCCESS;
113}
114EXPORT_SYMBOL_GPL(kvmppc_tce_validate);
115
116/* Note on the use of page_address() in real mode,
117 *
118 * It is safe to use page_address() in real mode on ppc64 because
119 * page_address() is always defined as lowmem_page_address()
120 * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
121 * operation and does not access page struct.
122 *
123 * Theoretically page_address() could be defined different
124 * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
125 * would have to be enabled.
126 * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
127 * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
128 * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
129 * is not expected to be enabled on ppc32, page_address()
130 * is safe for ppc32 as well.
131 *
132 * WARNING: This will be called in real-mode on HV KVM and virtual
133 * mode on PR KVM
134 */
135static u64 *kvmppc_page_address(struct page *page)
136{
137#if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
138#error TODO: fix to avoid page_address() here
139#endif
140 return (u64 *) page_address(page);
141}
142
143/*
144 * Handles TCE requests for emulated devices.
145 * Puts guest TCE values to the table and expects user space to convert them.
146 * Called in both real and virtual modes.
147 * Cannot fail so kvmppc_tce_validate must be called before it.
148 *
149 * WARNING: This will be called in real-mode on HV KVM and virtual
150 * mode on PR KVM
151 */
152void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
153 unsigned long idx, unsigned long tce)
154{
155 struct page *page;
156 u64 *tbl;
157
14f853f1 158 idx -= stt->offset;
5ee7af18
AK
159 page = stt->pages[idx / TCES_PER_PAGE];
160 tbl = kvmppc_page_address(page);
161
162 tbl[idx % TCES_PER_PAGE] = tce;
163}
164EXPORT_SYMBOL_GPL(kvmppc_tce_put);
fcbb2ce6 165
d3695aa4
AK
166long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
167 unsigned long *ua, unsigned long **prmap)
168{
169 unsigned long gfn = gpa >> PAGE_SHIFT;
170 struct kvm_memory_slot *memslot;
171
172 memslot = search_memslots(kvm_memslots(kvm), gfn);
173 if (!memslot)
174 return -EINVAL;
175
176 *ua = __gfn_to_hva_memslot(memslot, gfn) |
177 (gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
178
179#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
180 if (prmap)
181 *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
182#endif
183
184 return 0;
185}
186EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
187
188#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
121f80ba
AK
189static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry)
190{
191 unsigned long hpa = 0;
192 enum dma_data_direction dir = DMA_NONE;
193
194 iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
195}
196
197static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
198 struct iommu_table *tbl, unsigned long entry)
199{
200 struct mm_iommu_table_group_mem_t *mem = NULL;
201 const unsigned long pgsize = 1ULL << tbl->it_page_shift;
202 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
203
204 if (!pua)
205 /* it_userspace allocation might be delayed */
206 return H_TOO_HARD;
207
208 pua = (void *) vmalloc_to_phys(pua);
209 if (WARN_ON_ONCE_RM(!pua))
210 return H_HARDWARE;
211
212 mem = mm_iommu_lookup_rm(kvm->mm, *pua, pgsize);
213 if (!mem)
214 return H_TOO_HARD;
215
216 mm_iommu_mapped_dec(mem);
217
218 *pua = 0;
219
220 return H_SUCCESS;
221}
222
223static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
224 struct iommu_table *tbl, unsigned long entry)
225{
226 enum dma_data_direction dir = DMA_NONE;
227 unsigned long hpa = 0;
228 long ret;
229
230 if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir))
231 /*
232 * real mode xchg can fail if struct page crosses
233 * a page boundary
234 */
235 return H_TOO_HARD;
236
237 if (dir == DMA_NONE)
238 return H_SUCCESS;
239
240 ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
241 if (ret)
242 iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
243
244 return ret;
245}
246
247static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
248 unsigned long entry, unsigned long ua,
249 enum dma_data_direction dir)
250{
251 long ret;
252 unsigned long hpa = 0;
253 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
254 struct mm_iommu_table_group_mem_t *mem;
255
256 if (!pua)
257 /* it_userspace allocation might be delayed */
258 return H_TOO_HARD;
259
260 mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
261 if (!mem)
262 return H_TOO_HARD;
263
264 if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa)))
265 return H_HARDWARE;
266
267 pua = (void *) vmalloc_to_phys(pua);
268 if (WARN_ON_ONCE_RM(!pua))
269 return H_HARDWARE;
270
271 if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
272 return H_CLOSED;
273
274 ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
275 if (ret) {
276 mm_iommu_mapped_dec(mem);
277 /*
278 * real mode xchg can fail if struct page crosses
279 * a page boundary
280 */
281 return H_TOO_HARD;
282 }
283
284 if (dir != DMA_NONE)
285 kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
286
287 *pua = ua;
288
289 return 0;
290}
291
31217db7
AK
292long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
293 unsigned long ioba, unsigned long tce)
54738c09 294{
503bfcbe 295 struct kvmppc_spapr_tce_table *stt;
fcbb2ce6 296 long ret;
121f80ba
AK
297 struct kvmppc_spapr_tce_iommu_table *stit;
298 unsigned long entry, ua = 0;
299 enum dma_data_direction dir;
54738c09
DG
300
301 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
302 /* liobn, ioba, tce); */
303
acde2572
PM
304 /* For radix, we might be in virtual mode, so punt */
305 if (kvm_is_radix(vcpu->kvm))
306 return H_TOO_HARD;
307
503bfcbe 308 stt = kvmppc_find_table(vcpu->kvm, liobn);
fcbb2ce6
AK
309 if (!stt)
310 return H_TOO_HARD;
311
312 ret = kvmppc_ioba_validate(stt, ioba, 1);
313 if (ret != H_SUCCESS)
314 return ret;
315
5ee7af18
AK
316 ret = kvmppc_tce_validate(stt, tce);
317 if (ret != H_SUCCESS)
318 return ret;
fcbb2ce6 319
121f80ba
AK
320 dir = iommu_tce_direction(tce);
321 if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
322 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
323 return H_PARAMETER;
324
325 entry = ioba >> stt->page_shift;
326
327 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
328 if (dir == DMA_NONE)
329 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
330 stit->tbl, entry);
331 else
332 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
333 stit->tbl, entry, ua, dir);
334
335 if (ret == H_SUCCESS)
336 continue;
337
338 if (ret == H_TOO_HARD)
339 return ret;
340
341 WARN_ON_ONCE_RM(1);
342 kvmppc_rm_clear_tce(stit->tbl, entry);
343 }
344
345 kvmppc_tce_put(stt, entry, tce);
fcbb2ce6
AK
346
347 return H_SUCCESS;
54738c09 348}
69e9fbb2 349
d3695aa4
AK
350static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
351 unsigned long ua, unsigned long *phpa)
352{
353 pte_t *ptep, pte;
354 unsigned shift = 0;
355
356 ptep = __find_linux_pte_or_hugepte(vcpu->arch.pgdir, ua, NULL, &shift);
357 if (!ptep || !pte_present(*ptep))
358 return -ENXIO;
359 pte = *ptep;
360
361 if (!shift)
362 shift = PAGE_SHIFT;
363
364 /* Avoid handling anything potentially complicated in realmode */
365 if (shift > PAGE_SHIFT)
366 return -EAGAIN;
367
368 if (!pte_young(pte))
369 return -EAGAIN;
370
371 *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
372 (ua & ~PAGE_MASK);
373
374 return 0;
375}
376
377long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
378 unsigned long liobn, unsigned long ioba,
379 unsigned long tce_list, unsigned long npages)
380{
381 struct kvmppc_spapr_tce_table *stt;
382 long i, ret = H_SUCCESS;
383 unsigned long tces, entry, ua = 0;
384 unsigned long *rmap = NULL;
da6f59e1 385 bool prereg = false;
121f80ba 386 struct kvmppc_spapr_tce_iommu_table *stit;
d3695aa4 387
acde2572
PM
388 /* For radix, we might be in virtual mode, so punt */
389 if (kvm_is_radix(vcpu->kvm))
390 return H_TOO_HARD;
391
503bfcbe 392 stt = kvmppc_find_table(vcpu->kvm, liobn);
d3695aa4
AK
393 if (!stt)
394 return H_TOO_HARD;
395
fe26e527 396 entry = ioba >> stt->page_shift;
d3695aa4
AK
397 /*
398 * The spec says that the maximum size of the list is 512 TCEs
399 * so the whole table addressed resides in 4K page
400 */
401 if (npages > 512)
402 return H_PARAMETER;
403
404 if (tce_list & (SZ_4K - 1))
405 return H_PARAMETER;
406
407 ret = kvmppc_ioba_validate(stt, ioba, npages);
408 if (ret != H_SUCCESS)
409 return ret;
410
da6f59e1
AK
411 if (mm_iommu_preregistered(vcpu->kvm->mm)) {
412 /*
413 * We get here if guest memory was pre-registered which
414 * is normally VFIO case and gpa->hpa translation does not
415 * depend on hpt.
416 */
417 struct mm_iommu_table_group_mem_t *mem;
d3695aa4 418
da6f59e1
AK
419 if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL))
420 return H_TOO_HARD;
d3695aa4 421
da6f59e1
AK
422 mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
423 if (mem)
424 prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0;
425 }
426
427 if (!prereg) {
428 /*
429 * This is usually a case of a guest with emulated devices only
430 * when TCE list is not in preregistered memory.
431 * We do not require memory to be preregistered in this case
432 * so lock rmap and do __find_linux_pte_or_hugepte().
433 */
434 if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
435 return H_TOO_HARD;
436
437 rmap = (void *) vmalloc_to_phys(rmap);
121f80ba
AK
438 if (WARN_ON_ONCE_RM(!rmap))
439 return H_HARDWARE;
da6f59e1
AK
440
441 /*
442 * Synchronize with the MMU notifier callbacks in
443 * book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.).
444 * While we have the rmap lock, code running on other CPUs
445 * cannot finish unmapping the host real page that backs
446 * this guest real page, so we are OK to access the host
447 * real page.
448 */
449 lock_rmap(rmap);
450 if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
451 ret = H_TOO_HARD;
452 goto unlock_exit;
453 }
d3695aa4
AK
454 }
455
456 for (i = 0; i < npages; ++i) {
457 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
458
459 ret = kvmppc_tce_validate(stt, tce);
460 if (ret != H_SUCCESS)
461 goto unlock_exit;
462
121f80ba
AK
463 ua = 0;
464 if (kvmppc_gpa_to_ua(vcpu->kvm,
465 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
466 &ua, NULL))
467 return H_PARAMETER;
468
469 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
470 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
471 stit->tbl, entry + i, ua,
472 iommu_tce_direction(tce));
473
474 if (ret == H_SUCCESS)
475 continue;
476
477 if (ret == H_TOO_HARD)
478 goto unlock_exit;
479
480 WARN_ON_ONCE_RM(1);
481 kvmppc_rm_clear_tce(stit->tbl, entry);
482 }
483
d3695aa4
AK
484 kvmppc_tce_put(stt, entry + i, tce);
485 }
486
487unlock_exit:
da6f59e1
AK
488 if (rmap)
489 unlock_rmap(rmap);
d3695aa4
AK
490
491 return ret;
492}
493
31217db7 494long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
d3695aa4
AK
495 unsigned long liobn, unsigned long ioba,
496 unsigned long tce_value, unsigned long npages)
497{
498 struct kvmppc_spapr_tce_table *stt;
499 long i, ret;
121f80ba 500 struct kvmppc_spapr_tce_iommu_table *stit;
d3695aa4 501
acde2572
PM
502 /* For radix, we might be in virtual mode, so punt */
503 if (kvm_is_radix(vcpu->kvm))
504 return H_TOO_HARD;
505
503bfcbe 506 stt = kvmppc_find_table(vcpu->kvm, liobn);
d3695aa4
AK
507 if (!stt)
508 return H_TOO_HARD;
509
510 ret = kvmppc_ioba_validate(stt, ioba, npages);
511 if (ret != H_SUCCESS)
512 return ret;
513
514 /* Check permission bits only to allow userspace poison TCE for debug */
515 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
516 return H_PARAMETER;
517
121f80ba
AK
518 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
519 unsigned long entry = ioba >> stit->tbl->it_page_shift;
520
521 for (i = 0; i < npages; ++i) {
522 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
523 stit->tbl, entry + i);
524
525 if (ret == H_SUCCESS)
526 continue;
527
528 if (ret == H_TOO_HARD)
529 return ret;
530
531 WARN_ON_ONCE_RM(1);
532 kvmppc_rm_clear_tce(stit->tbl, entry);
533 }
534 }
535
fe26e527
AK
536 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
537 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
d3695aa4
AK
538
539 return H_SUCCESS;
540}
d3695aa4 541
acde2572 542/* This can be called in either virtual mode or real mode */
69e9fbb2
LD
543long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
544 unsigned long ioba)
545{
503bfcbe 546 struct kvmppc_spapr_tce_table *stt;
fcbb2ce6
AK
547 long ret;
548 unsigned long idx;
549 struct page *page;
550 u64 *tbl;
69e9fbb2 551
503bfcbe 552 stt = kvmppc_find_table(vcpu->kvm, liobn);
fcbb2ce6
AK
553 if (!stt)
554 return H_TOO_HARD;
69e9fbb2 555
fcbb2ce6
AK
556 ret = kvmppc_ioba_validate(stt, ioba, 1);
557 if (ret != H_SUCCESS)
558 return ret;
69e9fbb2 559
14f853f1 560 idx = (ioba >> stt->page_shift) - stt->offset;
fcbb2ce6
AK
561 page = stt->pages[idx / TCES_PER_PAGE];
562 tbl = (u64 *)page_address(page);
69e9fbb2 563
fcbb2ce6 564 vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE];
69e9fbb2 565
fcbb2ce6 566 return H_SUCCESS;
69e9fbb2
LD
567}
568EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
d3695aa4
AK
569
570#endif /* KVM_BOOK3S_HV_POSSIBLE */