]>
Commit | Line | Data |
---|---|---|
54738c09 DG |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
16 | * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com> | |
d3695aa4 | 17 | * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com> |
54738c09 DG |
18 | */ |
19 | ||
20 | #include <linux/types.h> | |
21 | #include <linux/string.h> | |
22 | #include <linux/kvm.h> | |
23 | #include <linux/kvm_host.h> | |
24 | #include <linux/highmem.h> | |
25 | #include <linux/gfp.h> | |
26 | #include <linux/slab.h> | |
27 | #include <linux/hugetlb.h> | |
28 | #include <linux/list.h> | |
29 | ||
30 | #include <asm/tlbflush.h> | |
31 | #include <asm/kvm_ppc.h> | |
32 | #include <asm/kvm_book3s.h> | |
f64e8084 | 33 | #include <asm/book3s/64/mmu-hash.h> |
d3695aa4 | 34 | #include <asm/mmu_context.h> |
54738c09 DG |
35 | #include <asm/hvcall.h> |
36 | #include <asm/synch.h> | |
37 | #include <asm/ppc-opcode.h> | |
38 | #include <asm/kvm_host.h> | |
39 | #include <asm/udbg.h> | |
fcbb2ce6 | 40 | #include <asm/iommu.h> |
5ee7af18 | 41 | #include <asm/tce.h> |
54738c09 | 42 | |
20464b9b AK |
43 | #ifdef CONFIG_BUG |
44 | ||
45 | #define WARN_ON_ONCE_RM(condition) ({ \ | |
46 | static bool __section(.data.unlikely) __warned; \ | |
47 | int __ret_warn_once = !!(condition); \ | |
48 | \ | |
49 | if (unlikely(__ret_warn_once && !__warned)) { \ | |
50 | __warned = true; \ | |
51 | pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \ | |
52 | __stringify(condition), \ | |
53 | __func__, __LINE__); \ | |
54 | dump_stack(); \ | |
55 | } \ | |
56 | unlikely(__ret_warn_once); \ | |
57 | }) | |
58 | ||
59 | #else | |
60 | ||
61 | #define WARN_ON_ONCE_RM(condition) ({ \ | |
62 | int __ret_warn_on = !!(condition); \ | |
63 | unlikely(__ret_warn_on); \ | |
64 | }) | |
65 | ||
66 | #endif | |
67 | ||
54738c09 DG |
68 | #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) |
69 | ||
fcbb2ce6 AK |
70 | /* |
71 | * Finds a TCE table descriptor by LIOBN. | |
72 | * | |
73 | * WARNING: This will be called in real or virtual mode on HV KVM and virtual | |
74 | * mode on PR KVM | |
75 | */ | |
69e0373c | 76 | struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm, |
fcbb2ce6 AK |
77 | unsigned long liobn) |
78 | { | |
fcbb2ce6 AK |
79 | struct kvmppc_spapr_tce_table *stt; |
80 | ||
366baf28 | 81 | list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list) |
fcbb2ce6 AK |
82 | if (stt->liobn == liobn) |
83 | return stt; | |
84 | ||
85 | return NULL; | |
86 | } | |
d3695aa4 | 87 | EXPORT_SYMBOL_GPL(kvmppc_find_table); |
fcbb2ce6 AK |
88 | |
89 | /* | |
90 | * Validates IO address. | |
91 | * | |
92 | * WARNING: This will be called in real-mode on HV KVM and virtual | |
93 | * mode on PR KVM | |
94 | */ | |
5ee7af18 | 95 | long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt, |
fcbb2ce6 AK |
96 | unsigned long ioba, unsigned long npages) |
97 | { | |
fe26e527 AK |
98 | unsigned long mask = (1ULL << stt->page_shift) - 1; |
99 | unsigned long idx = ioba >> stt->page_shift; | |
fcbb2ce6 | 100 | |
14f853f1 AK |
101 | if ((ioba & mask) || (idx < stt->offset) || |
102 | (idx - stt->offset + npages > stt->size) || | |
103 | (idx + npages < idx)) | |
fcbb2ce6 AK |
104 | return H_PARAMETER; |
105 | ||
106 | return H_SUCCESS; | |
107 | } | |
5ee7af18 AK |
108 | EXPORT_SYMBOL_GPL(kvmppc_ioba_validate); |
109 | ||
110 | /* | |
111 | * Validates TCE address. | |
112 | * At the moment flags and page mask are validated. | |
113 | * As the host kernel does not access those addresses (just puts them | |
114 | * to the table and user space is supposed to process them), we can skip | |
115 | * checking other things (such as TCE is a guest RAM address or the page | |
116 | * was actually allocated). | |
117 | * | |
118 | * WARNING: This will be called in real-mode on HV KVM and virtual | |
119 | * mode on PR KVM | |
120 | */ | |
121 | long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce) | |
122 | { | |
fe26e527 AK |
123 | unsigned long page_mask = ~((1ULL << stt->page_shift) - 1); |
124 | unsigned long mask = ~(page_mask | TCE_PCI_WRITE | TCE_PCI_READ); | |
5ee7af18 AK |
125 | |
126 | if (tce & mask) | |
127 | return H_PARAMETER; | |
128 | ||
129 | return H_SUCCESS; | |
130 | } | |
131 | EXPORT_SYMBOL_GPL(kvmppc_tce_validate); | |
132 | ||
133 | /* Note on the use of page_address() in real mode, | |
134 | * | |
135 | * It is safe to use page_address() in real mode on ppc64 because | |
136 | * page_address() is always defined as lowmem_page_address() | |
137 | * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic | |
138 | * operation and does not access page struct. | |
139 | * | |
140 | * Theoretically page_address() could be defined different | |
141 | * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL | |
142 | * would have to be enabled. | |
143 | * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64, | |
144 | * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only | |
145 | * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP | |
146 | * is not expected to be enabled on ppc32, page_address() | |
147 | * is safe for ppc32 as well. | |
148 | * | |
149 | * WARNING: This will be called in real-mode on HV KVM and virtual | |
150 | * mode on PR KVM | |
151 | */ | |
152 | static u64 *kvmppc_page_address(struct page *page) | |
153 | { | |
154 | #if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL) | |
155 | #error TODO: fix to avoid page_address() here | |
156 | #endif | |
157 | return (u64 *) page_address(page); | |
158 | } | |
159 | ||
160 | /* | |
161 | * Handles TCE requests for emulated devices. | |
162 | * Puts guest TCE values to the table and expects user space to convert them. | |
163 | * Called in both real and virtual modes. | |
164 | * Cannot fail so kvmppc_tce_validate must be called before it. | |
165 | * | |
166 | * WARNING: This will be called in real-mode on HV KVM and virtual | |
167 | * mode on PR KVM | |
168 | */ | |
169 | void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt, | |
170 | unsigned long idx, unsigned long tce) | |
171 | { | |
172 | struct page *page; | |
173 | u64 *tbl; | |
174 | ||
14f853f1 | 175 | idx -= stt->offset; |
5ee7af18 AK |
176 | page = stt->pages[idx / TCES_PER_PAGE]; |
177 | tbl = kvmppc_page_address(page); | |
178 | ||
179 | tbl[idx % TCES_PER_PAGE] = tce; | |
180 | } | |
181 | EXPORT_SYMBOL_GPL(kvmppc_tce_put); | |
fcbb2ce6 | 182 | |
d3695aa4 AK |
183 | long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, |
184 | unsigned long *ua, unsigned long **prmap) | |
185 | { | |
186 | unsigned long gfn = gpa >> PAGE_SHIFT; | |
187 | struct kvm_memory_slot *memslot; | |
188 | ||
189 | memslot = search_memslots(kvm_memslots(kvm), gfn); | |
190 | if (!memslot) | |
191 | return -EINVAL; | |
192 | ||
193 | *ua = __gfn_to_hva_memslot(memslot, gfn) | | |
194 | (gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE)); | |
195 | ||
196 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | |
197 | if (prmap) | |
198 | *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; | |
199 | #endif | |
200 | ||
201 | return 0; | |
202 | } | |
203 | EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua); | |
204 | ||
205 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | |
20464b9b AK |
206 | static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry) |
207 | { | |
208 | unsigned long hpa = 0; | |
209 | enum dma_data_direction dir = DMA_NONE; | |
210 | ||
211 | iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); | |
212 | } | |
213 | ||
214 | static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm, | |
215 | struct iommu_table *tbl, unsigned long entry) | |
216 | { | |
217 | struct mm_iommu_table_group_mem_t *mem = NULL; | |
218 | const unsigned long pgsize = 1ULL << tbl->it_page_shift; | |
219 | unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); | |
220 | ||
221 | if (!pua) | |
222 | /* it_userspace allocation might be delayed */ | |
223 | return H_TOO_HARD; | |
224 | ||
225 | pua = (void *) vmalloc_to_phys(pua); | |
226 | if (WARN_ON_ONCE_RM(!pua)) | |
227 | return H_HARDWARE; | |
228 | ||
229 | mem = mm_iommu_lookup_rm(kvm->mm, *pua, pgsize); | |
230 | if (!mem) | |
231 | return H_TOO_HARD; | |
232 | ||
233 | mm_iommu_mapped_dec(mem); | |
234 | ||
235 | *pua = 0; | |
236 | ||
237 | return H_SUCCESS; | |
238 | } | |
239 | ||
240 | static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm, | |
241 | struct iommu_table *tbl, unsigned long entry) | |
242 | { | |
243 | enum dma_data_direction dir = DMA_NONE; | |
244 | unsigned long hpa = 0; | |
245 | long ret; | |
246 | ||
247 | if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir)) | |
248 | /* | |
249 | * real mode xchg can fail if struct page crosses | |
250 | * a page boundary | |
251 | */ | |
252 | return H_TOO_HARD; | |
253 | ||
254 | if (dir == DMA_NONE) | |
255 | return H_SUCCESS; | |
256 | ||
257 | ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry); | |
258 | if (ret) | |
259 | iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); | |
260 | ||
261 | return ret; | |
262 | } | |
263 | ||
264 | static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, | |
265 | unsigned long entry, unsigned long ua, | |
266 | enum dma_data_direction dir) | |
267 | { | |
268 | long ret; | |
269 | unsigned long hpa = 0; | |
270 | unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); | |
271 | struct mm_iommu_table_group_mem_t *mem; | |
272 | ||
273 | if (!pua) | |
274 | /* it_userspace allocation might be delayed */ | |
275 | return H_TOO_HARD; | |
276 | ||
277 | mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift); | |
278 | if (!mem) | |
279 | return H_TOO_HARD; | |
280 | ||
281 | if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa))) | |
282 | return H_HARDWARE; | |
283 | ||
284 | pua = (void *) vmalloc_to_phys(pua); | |
285 | if (WARN_ON_ONCE_RM(!pua)) | |
286 | return H_HARDWARE; | |
287 | ||
288 | if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem))) | |
289 | return H_CLOSED; | |
290 | ||
291 | ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); | |
292 | if (ret) { | |
293 | mm_iommu_mapped_dec(mem); | |
294 | /* | |
295 | * real mode xchg can fail if struct page crosses | |
296 | * a page boundary | |
297 | */ | |
298 | return H_TOO_HARD; | |
299 | } | |
300 | ||
301 | if (dir != DMA_NONE) | |
302 | kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry); | |
303 | ||
304 | *pua = ua; | |
305 | ||
306 | return 0; | |
307 | } | |
308 | ||
31217db7 AK |
309 | long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, |
310 | unsigned long ioba, unsigned long tce) | |
54738c09 | 311 | { |
69e0373c | 312 | struct kvmppc_spapr_tce_table *stt; |
fcbb2ce6 | 313 | long ret; |
20464b9b AK |
314 | struct kvmppc_spapr_tce_iommu_table *stit; |
315 | unsigned long entry, ua = 0; | |
316 | enum dma_data_direction dir; | |
54738c09 DG |
317 | |
318 | /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */ | |
319 | /* liobn, ioba, tce); */ | |
320 | ||
69e0373c | 321 | stt = kvmppc_find_table(vcpu->kvm, liobn); |
fcbb2ce6 AK |
322 | if (!stt) |
323 | return H_TOO_HARD; | |
324 | ||
325 | ret = kvmppc_ioba_validate(stt, ioba, 1); | |
326 | if (ret != H_SUCCESS) | |
327 | return ret; | |
328 | ||
5ee7af18 AK |
329 | ret = kvmppc_tce_validate(stt, tce); |
330 | if (ret != H_SUCCESS) | |
331 | return ret; | |
fcbb2ce6 | 332 | |
20464b9b AK |
333 | dir = iommu_tce_direction(tce); |
334 | if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm, | |
335 | tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) | |
336 | return H_PARAMETER; | |
337 | ||
338 | entry = ioba >> stt->page_shift; | |
339 | ||
340 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { | |
341 | if (dir == DMA_NONE) | |
342 | ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, | |
343 | stit->tbl, entry); | |
344 | else | |
345 | ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, | |
346 | stit->tbl, entry, ua, dir); | |
347 | ||
348 | if (ret == H_SUCCESS) | |
349 | continue; | |
350 | ||
351 | if (ret == H_TOO_HARD) | |
352 | return ret; | |
353 | ||
354 | WARN_ON_ONCE_RM(1); | |
355 | kvmppc_rm_clear_tce(stit->tbl, entry); | |
356 | } | |
357 | ||
358 | kvmppc_tce_put(stt, entry, tce); | |
fcbb2ce6 AK |
359 | |
360 | return H_SUCCESS; | |
54738c09 | 361 | } |
69e9fbb2 | 362 | |
d3695aa4 AK |
363 | static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu, |
364 | unsigned long ua, unsigned long *phpa) | |
365 | { | |
366 | pte_t *ptep, pte; | |
367 | unsigned shift = 0; | |
368 | ||
369 | ptep = __find_linux_pte_or_hugepte(vcpu->arch.pgdir, ua, NULL, &shift); | |
370 | if (!ptep || !pte_present(*ptep)) | |
371 | return -ENXIO; | |
372 | pte = *ptep; | |
373 | ||
374 | if (!shift) | |
375 | shift = PAGE_SHIFT; | |
376 | ||
377 | /* Avoid handling anything potentially complicated in realmode */ | |
378 | if (shift > PAGE_SHIFT) | |
379 | return -EAGAIN; | |
380 | ||
381 | if (!pte_young(pte)) | |
382 | return -EAGAIN; | |
383 | ||
384 | *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) | | |
385 | (ua & ~PAGE_MASK); | |
386 | ||
387 | return 0; | |
388 | } | |
389 | ||
390 | long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, | |
391 | unsigned long liobn, unsigned long ioba, | |
392 | unsigned long tce_list, unsigned long npages) | |
393 | { | |
394 | struct kvmppc_spapr_tce_table *stt; | |
395 | long i, ret = H_SUCCESS; | |
396 | unsigned long tces, entry, ua = 0; | |
397 | unsigned long *rmap = NULL; | |
dde1b0df | 398 | bool prereg = false; |
20464b9b | 399 | struct kvmppc_spapr_tce_iommu_table *stit; |
d3695aa4 | 400 | |
69e0373c | 401 | stt = kvmppc_find_table(vcpu->kvm, liobn); |
d3695aa4 AK |
402 | if (!stt) |
403 | return H_TOO_HARD; | |
404 | ||
fe26e527 | 405 | entry = ioba >> stt->page_shift; |
d3695aa4 AK |
406 | /* |
407 | * The spec says that the maximum size of the list is 512 TCEs | |
408 | * so the whole table addressed resides in 4K page | |
409 | */ | |
410 | if (npages > 512) | |
411 | return H_PARAMETER; | |
412 | ||
413 | if (tce_list & (SZ_4K - 1)) | |
414 | return H_PARAMETER; | |
415 | ||
416 | ret = kvmppc_ioba_validate(stt, ioba, npages); | |
417 | if (ret != H_SUCCESS) | |
418 | return ret; | |
419 | ||
dde1b0df AK |
420 | if (mm_iommu_preregistered(vcpu->kvm->mm)) { |
421 | /* | |
422 | * We get here if guest memory was pre-registered which | |
423 | * is normally VFIO case and gpa->hpa translation does not | |
424 | * depend on hpt. | |
425 | */ | |
426 | struct mm_iommu_table_group_mem_t *mem; | |
d3695aa4 | 427 | |
dde1b0df AK |
428 | if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) |
429 | return H_TOO_HARD; | |
d3695aa4 | 430 | |
dde1b0df AK |
431 | mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K); |
432 | if (mem) | |
433 | prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0; | |
434 | } | |
435 | ||
436 | if (!prereg) { | |
437 | /* | |
438 | * This is usually a case of a guest with emulated devices only | |
439 | * when TCE list is not in preregistered memory. | |
440 | * We do not require memory to be preregistered in this case | |
441 | * so lock rmap and do __find_linux_pte_or_hugepte(). | |
442 | */ | |
443 | if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap)) | |
444 | return H_TOO_HARD; | |
445 | ||
446 | rmap = (void *) vmalloc_to_phys(rmap); | |
20464b9b AK |
447 | if (WARN_ON_ONCE_RM(!rmap)) |
448 | return H_HARDWARE; | |
dde1b0df AK |
449 | |
450 | /* | |
451 | * Synchronize with the MMU notifier callbacks in | |
452 | * book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.). | |
453 | * While we have the rmap lock, code running on other CPUs | |
454 | * cannot finish unmapping the host real page that backs | |
455 | * this guest real page, so we are OK to access the host | |
456 | * real page. | |
457 | */ | |
458 | lock_rmap(rmap); | |
459 | if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) { | |
460 | ret = H_TOO_HARD; | |
461 | goto unlock_exit; | |
462 | } | |
d3695aa4 AK |
463 | } |
464 | ||
465 | for (i = 0; i < npages; ++i) { | |
466 | unsigned long tce = be64_to_cpu(((u64 *)tces)[i]); | |
467 | ||
468 | ret = kvmppc_tce_validate(stt, tce); | |
469 | if (ret != H_SUCCESS) | |
470 | goto unlock_exit; | |
471 | ||
20464b9b AK |
472 | ua = 0; |
473 | if (kvmppc_gpa_to_ua(vcpu->kvm, | |
474 | tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), | |
475 | &ua, NULL)) | |
476 | return H_PARAMETER; | |
477 | ||
478 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { | |
479 | ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, | |
480 | stit->tbl, entry + i, ua, | |
481 | iommu_tce_direction(tce)); | |
482 | ||
483 | if (ret == H_SUCCESS) | |
484 | continue; | |
485 | ||
486 | if (ret == H_TOO_HARD) | |
487 | goto unlock_exit; | |
488 | ||
489 | WARN_ON_ONCE_RM(1); | |
490 | kvmppc_rm_clear_tce(stit->tbl, entry); | |
491 | } | |
492 | ||
d3695aa4 AK |
493 | kvmppc_tce_put(stt, entry + i, tce); |
494 | } | |
495 | ||
496 | unlock_exit: | |
dde1b0df AK |
497 | if (rmap) |
498 | unlock_rmap(rmap); | |
d3695aa4 AK |
499 | |
500 | return ret; | |
501 | } | |
502 | ||
31217db7 | 503 | long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, |
d3695aa4 AK |
504 | unsigned long liobn, unsigned long ioba, |
505 | unsigned long tce_value, unsigned long npages) | |
506 | { | |
507 | struct kvmppc_spapr_tce_table *stt; | |
508 | long i, ret; | |
20464b9b | 509 | struct kvmppc_spapr_tce_iommu_table *stit; |
d3695aa4 | 510 | |
69e0373c | 511 | stt = kvmppc_find_table(vcpu->kvm, liobn); |
d3695aa4 AK |
512 | if (!stt) |
513 | return H_TOO_HARD; | |
514 | ||
515 | ret = kvmppc_ioba_validate(stt, ioba, npages); | |
516 | if (ret != H_SUCCESS) | |
517 | return ret; | |
518 | ||
519 | /* Check permission bits only to allow userspace poison TCE for debug */ | |
520 | if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ)) | |
521 | return H_PARAMETER; | |
522 | ||
20464b9b AK |
523 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { |
524 | unsigned long entry = ioba >> stit->tbl->it_page_shift; | |
525 | ||
526 | for (i = 0; i < npages; ++i) { | |
527 | ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, | |
528 | stit->tbl, entry + i); | |
529 | ||
530 | if (ret == H_SUCCESS) | |
531 | continue; | |
532 | ||
533 | if (ret == H_TOO_HARD) | |
534 | return ret; | |
535 | ||
536 | WARN_ON_ONCE_RM(1); | |
537 | kvmppc_rm_clear_tce(stit->tbl, entry); | |
538 | } | |
539 | } | |
540 | ||
fe26e527 AK |
541 | for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) |
542 | kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value); | |
d3695aa4 AK |
543 | |
544 | return H_SUCCESS; | |
545 | } | |
d3695aa4 | 546 | |
69e9fbb2 LD |
547 | long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, |
548 | unsigned long ioba) | |
549 | { | |
69e0373c | 550 | struct kvmppc_spapr_tce_table *stt; |
fcbb2ce6 AK |
551 | long ret; |
552 | unsigned long idx; | |
553 | struct page *page; | |
554 | u64 *tbl; | |
69e9fbb2 | 555 | |
69e0373c | 556 | stt = kvmppc_find_table(vcpu->kvm, liobn); |
fcbb2ce6 AK |
557 | if (!stt) |
558 | return H_TOO_HARD; | |
69e9fbb2 | 559 | |
fcbb2ce6 AK |
560 | ret = kvmppc_ioba_validate(stt, ioba, 1); |
561 | if (ret != H_SUCCESS) | |
562 | return ret; | |
69e9fbb2 | 563 | |
14f853f1 | 564 | idx = (ioba >> stt->page_shift) - stt->offset; |
fcbb2ce6 AK |
565 | page = stt->pages[idx / TCES_PER_PAGE]; |
566 | tbl = (u64 *)page_address(page); | |
69e9fbb2 | 567 | |
fcbb2ce6 | 568 | vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE]; |
69e9fbb2 | 569 | |
fcbb2ce6 | 570 | return H_SUCCESS; |
69e9fbb2 LD |
571 | } |
572 | EXPORT_SYMBOL_GPL(kvmppc_h_get_tce); | |
d3695aa4 AK |
573 | |
574 | #endif /* KVM_BOOK3S_HV_POSSIBLE */ |