]>
git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/powerpc/kvm/book3s_64_vio_hv.c
c786a58c28a7761647c06df0835065a91796d598
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
17 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/hugetlb.h>
28 #include <linux/list.h>
30 #include <asm/tlbflush.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/kvm_book3s.h>
33 #include <asm/mmu-hash64.h>
34 #include <asm/mmu_context.h>
35 #include <asm/hvcall.h>
36 #include <asm/synch.h>
37 #include <asm/ppc-opcode.h>
38 #include <asm/kvm_host.h>
40 #include <asm/iommu.h>
42 #include <asm/iommu.h>
44 #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
47 * Finds a TCE table descriptor by LIOBN.
49 * WARNING: This will be called in real or virtual mode on HV KVM and virtual
52 struct kvmppc_spapr_tce_table
*kvmppc_find_table(struct kvm_vcpu
*vcpu
,
55 struct kvm
*kvm
= vcpu
->kvm
;
56 struct kvmppc_spapr_tce_table
*stt
;
58 list_for_each_entry_lockless(stt
, &kvm
->arch
.spapr_tce_tables
, list
)
59 if (stt
->liobn
== liobn
)
64 EXPORT_SYMBOL_GPL(kvmppc_find_table
);
67 * Validates IO address.
69 * WARNING: This will be called in real-mode on HV KVM and virtual
72 long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table
*stt
,
73 unsigned long ioba
, unsigned long npages
)
75 unsigned long mask
= (1ULL << stt
->page_shift
) - 1;
76 unsigned long idx
= ioba
>> stt
->page_shift
;
78 if ((ioba
& mask
) || (idx
+ npages
> stt
->size
) || (idx
+ npages
< idx
))
83 EXPORT_SYMBOL_GPL(kvmppc_ioba_validate
);
86 * Validates TCE address.
87 * At the moment flags and page mask are validated.
88 * As the host kernel does not access those addresses (just puts them
89 * to the table and user space is supposed to process them), we can skip
90 * checking other things (such as TCE is a guest RAM address or the page
91 * was actually allocated).
93 * WARNING: This will be called in real-mode on HV KVM and virtual
96 long kvmppc_tce_validate(struct kvmppc_spapr_tce_table
*stt
, unsigned long tce
)
98 unsigned long page_mask
= ~((1ULL << stt
->page_shift
) - 1);
99 unsigned long mask
= ~(page_mask
| TCE_PCI_WRITE
| TCE_PCI_READ
);
106 EXPORT_SYMBOL_GPL(kvmppc_tce_validate
);
108 /* Note on the use of page_address() in real mode,
110 * It is safe to use page_address() in real mode on ppc64 because
111 * page_address() is always defined as lowmem_page_address()
112 * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
113 * operation and does not access page struct.
115 * Theoretically page_address() could be defined different
116 * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
117 * would have to be enabled.
118 * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
119 * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
120 * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
121 * is not expected to be enabled on ppc32, page_address()
122 * is safe for ppc32 as well.
124 * WARNING: This will be called in real-mode on HV KVM and virtual
127 static u64
*kvmppc_page_address(struct page
*page
)
129 #if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
130 #error TODO: fix to avoid page_address() here
132 return (u64
*) page_address(page
);
136 * Handles TCE requests for emulated devices.
137 * Puts guest TCE values to the table and expects user space to convert them.
138 * Called in both real and virtual modes.
139 * Cannot fail so kvmppc_tce_validate must be called before it.
141 * WARNING: This will be called in real-mode on HV KVM and virtual
144 void kvmppc_tce_put(struct kvmppc_spapr_tce_table
*stt
,
145 unsigned long idx
, unsigned long tce
)
150 page
= stt
->pages
[idx
/ TCES_PER_PAGE
];
151 tbl
= kvmppc_page_address(page
);
153 tbl
[idx
% TCES_PER_PAGE
] = tce
;
155 EXPORT_SYMBOL_GPL(kvmppc_tce_put
);
157 long kvmppc_gpa_to_ua(struct kvm
*kvm
, unsigned long gpa
,
158 unsigned long *ua
, unsigned long **prmap
)
160 unsigned long gfn
= gpa
>> PAGE_SHIFT
;
161 struct kvm_memory_slot
*memslot
;
163 memslot
= search_memslots(kvm_memslots(kvm
), gfn
);
167 *ua
= __gfn_to_hva_memslot(memslot
, gfn
) |
168 (gpa
& ~(PAGE_MASK
| TCE_PCI_READ
| TCE_PCI_WRITE
));
170 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
172 *prmap
= &memslot
->arch
.rmap
[gfn
- memslot
->base_gfn
];
177 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua
);
179 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
180 long kvmppc_h_put_tce(struct kvm_vcpu
*vcpu
, unsigned long liobn
,
181 unsigned long ioba
, unsigned long tce
)
183 struct kvmppc_spapr_tce_table
*stt
= kvmppc_find_table(vcpu
, liobn
);
186 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
187 /* liobn, ioba, tce); */
192 ret
= kvmppc_ioba_validate(stt
, ioba
, 1);
193 if (ret
!= H_SUCCESS
)
196 ret
= kvmppc_tce_validate(stt
, tce
);
197 if (ret
!= H_SUCCESS
)
200 kvmppc_tce_put(stt
, ioba
>> stt
->page_shift
, tce
);
204 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce
);
206 static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu
*vcpu
,
207 unsigned long ua
, unsigned long *phpa
)
212 ptep
= __find_linux_pte_or_hugepte(vcpu
->arch
.pgdir
, ua
, NULL
, &shift
);
213 if (!ptep
|| !pte_present(*ptep
))
220 /* Avoid handling anything potentially complicated in realmode */
221 if (shift
> PAGE_SHIFT
)
227 *phpa
= (pte_pfn(pte
) << PAGE_SHIFT
) | (ua
& ((1ULL << shift
) - 1)) |
233 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu
*vcpu
,
234 unsigned long liobn
, unsigned long ioba
,
235 unsigned long tce_list
, unsigned long npages
)
237 struct kvmppc_spapr_tce_table
*stt
;
238 long i
, ret
= H_SUCCESS
;
239 unsigned long tces
, entry
, ua
= 0;
240 unsigned long *rmap
= NULL
;
242 stt
= kvmppc_find_table(vcpu
, liobn
);
246 entry
= ioba
>> stt
->page_shift
;
248 * The spec says that the maximum size of the list is 512 TCEs
249 * so the whole table addressed resides in 4K page
254 if (tce_list
& (SZ_4K
- 1))
257 ret
= kvmppc_ioba_validate(stt
, ioba
, npages
);
258 if (ret
!= H_SUCCESS
)
261 if (kvmppc_gpa_to_ua(vcpu
->kvm
, tce_list
, &ua
, &rmap
))
264 rmap
= (void *) vmalloc_to_phys(rmap
);
267 * Synchronize with the MMU notifier callbacks in
268 * book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.).
269 * While we have the rmap lock, code running on other CPUs
270 * cannot finish unmapping the host real page that backs
271 * this guest real page, so we are OK to access the host
275 if (kvmppc_rm_ua_to_hpa(vcpu
, ua
, &tces
)) {
280 for (i
= 0; i
< npages
; ++i
) {
281 unsigned long tce
= be64_to_cpu(((u64
*)tces
)[i
]);
283 ret
= kvmppc_tce_validate(stt
, tce
);
284 if (ret
!= H_SUCCESS
)
287 kvmppc_tce_put(stt
, entry
+ i
, tce
);
296 long kvmppc_h_stuff_tce(struct kvm_vcpu
*vcpu
,
297 unsigned long liobn
, unsigned long ioba
,
298 unsigned long tce_value
, unsigned long npages
)
300 struct kvmppc_spapr_tce_table
*stt
;
303 stt
= kvmppc_find_table(vcpu
, liobn
);
307 ret
= kvmppc_ioba_validate(stt
, ioba
, npages
);
308 if (ret
!= H_SUCCESS
)
311 /* Check permission bits only to allow userspace poison TCE for debug */
312 if (tce_value
& (TCE_PCI_WRITE
| TCE_PCI_READ
))
315 for (i
= 0; i
< npages
; ++i
, ioba
+= (1ULL << stt
->page_shift
))
316 kvmppc_tce_put(stt
, ioba
>> stt
->page_shift
, tce_value
);
320 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce
);
322 long kvmppc_h_get_tce(struct kvm_vcpu
*vcpu
, unsigned long liobn
,
325 struct kvmppc_spapr_tce_table
*stt
= kvmppc_find_table(vcpu
, liobn
);
334 ret
= kvmppc_ioba_validate(stt
, ioba
, 1);
335 if (ret
!= H_SUCCESS
)
338 idx
= ioba
>> stt
->page_shift
;
339 page
= stt
->pages
[idx
/ TCES_PER_PAGE
];
340 tbl
= (u64
*)page_address(page
);
342 vcpu
->arch
.gpr
[4] = tbl
[idx
% TCES_PER_PAGE
];
346 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce
);
348 #endif /* KVM_BOOK3S_HV_POSSIBLE */