]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/kvm/iommu.c
x86/kvm: Audit and remove any unnecessary uses of module.h
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kvm / iommu.c
CommitLineData
62c476c7
BAY
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Copyright IBM Corporation, 2008
221d059d
AK
19 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
20 *
62c476c7
BAY
21 * Author: Allen M. Kay <allen.m.kay@intel.com>
22 * Author: Weidong Han <weidong.han@intel.com>
23 * Author: Ben-Ami Yassour <benami@il.ibm.com>
24 */
25
26#include <linux/list.h>
27#include <linux/kvm_host.h>
1767e931 28#include <linux/moduleparam.h>
62c476c7 29#include <linux/pci.h>
799fd8b2 30#include <linux/stat.h>
62c476c7 31#include <linux/dmar.h>
19de40a8 32#include <linux/iommu.h>
62c476c7 33#include <linux/intel-iommu.h>
c9eab58f 34#include "assigned-dev.h"
62c476c7 35
90ab5ee9 36static bool allow_unsafe_assigned_interrupts;
3f68b031
AW
37module_param_named(allow_unsafe_assigned_interrupts,
38 allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR);
39MODULE_PARM_DESC(allow_unsafe_assigned_interrupts,
40 "Enable device assignment on platforms without interrupt remapping support.");
41
62c476c7
BAY
42static int kvm_iommu_unmap_memslots(struct kvm *kvm);
43static void kvm_iommu_put_pages(struct kvm *kvm,
44 gfn_t base_gfn, unsigned long npages);
45
ba049e93 46static kvm_pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
3d32e4db 47 unsigned long npages)
fcd95807
JR
48{
49 gfn_t end_gfn;
ba049e93 50 kvm_pfn_t pfn;
fcd95807 51
d5661048 52 pfn = gfn_to_pfn_memslot(slot, gfn);
3d32e4db 53 end_gfn = gfn + npages;
fcd95807
JR
54 gfn += 1;
55
81c52c56 56 if (is_error_noslot_pfn(pfn))
fcd95807
JR
57 return pfn;
58
59 while (gfn < end_gfn)
d5661048 60 gfn_to_pfn_memslot(slot, gfn++);
fcd95807
JR
61
62 return pfn;
63}
64
ba049e93
DW
65static void kvm_unpin_pages(struct kvm *kvm, kvm_pfn_t pfn,
66 unsigned long npages)
350b8bdd
MT
67{
68 unsigned long i;
69
70 for (i = 0; i < npages; ++i)
71 kvm_release_pfn_clean(pfn + i);
72}
73
3ad26d81 74int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
62c476c7 75{
fcd95807 76 gfn_t gfn, end_gfn;
ba049e93 77 kvm_pfn_t pfn;
fcd95807 78 int r = 0;
19de40a8 79 struct iommu_domain *domain = kvm->arch.iommu_domain;
522c68c4 80 int flags;
62c476c7
BAY
81
82 /* check if iommu exists and in use */
83 if (!domain)
84 return 0;
85
fcd95807
JR
86 gfn = slot->base_gfn;
87 end_gfn = gfn + slot->npages;
88
d47510e2
AW
89 flags = IOMMU_READ;
90 if (!(slot->flags & KVM_MEM_READONLY))
91 flags |= IOMMU_WRITE;
d96eb2c6 92 if (!kvm->arch.iommu_noncoherent)
522c68c4
SY
93 flags |= IOMMU_CACHE;
94
fcd95807
JR
95
96 while (gfn < end_gfn) {
97 unsigned long page_size;
98
99 /* Check if already mapped */
100 if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
101 gfn += 1;
102 continue;
103 }
104
105 /* Get the page size we could use to map */
106 page_size = kvm_host_page_size(kvm, gfn);
107
108 /* Make sure the page_size does not exceed the memslot */
109 while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
110 page_size >>= 1;
111
112 /* Make sure gfn is aligned to the page size we want to map */
113 while ((gfn << PAGE_SHIFT) & (page_size - 1))
114 page_size >>= 1;
115
27ef63c7
GE
116 /* Make sure hva is aligned to the page size we want to map */
117 while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
118 page_size >>= 1;
119
fcd95807
JR
120 /*
121 * Pin all pages we are about to map in memory. This is
122 * important because we unmap and unpin in 4kb steps later.
123 */
3d32e4db 124 pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
81c52c56 125 if (is_error_noslot_pfn(pfn)) {
fcd95807 126 gfn += 1;
62c476c7 127 continue;
fcd95807 128 }
62c476c7 129
fcd95807
JR
130 /* Map into IO address space */
131 r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
7d3002cc 132 page_size, flags);
e5fcfc82 133 if (r) {
260782bc 134 printk(KERN_ERR "kvm_iommu_map_address:"
5689cc53 135 "iommu failed to map pfn=%llx\n", pfn);
3d32e4db 136 kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
62c476c7
BAY
137 goto unmap_pages;
138 }
fcd95807
JR
139
140 gfn += page_size >> PAGE_SHIFT;
141
128ca093 142 cond_resched();
62c476c7 143 }
fcd95807 144
62c476c7
BAY
145 return 0;
146
147unmap_pages:
350b8bdd 148 kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
62c476c7
BAY
149 return r;
150}
151
152static int kvm_iommu_map_memslots(struct kvm *kvm)
153{
be6ba0f0 154 int idx, r = 0;
46a26bf5 155 struct kvm_memslots *slots;
be6ba0f0 156 struct kvm_memory_slot *memslot;
62c476c7 157
e0f0bbc5
AW
158 if (kvm->arch.iommu_noncoherent)
159 kvm_arch_register_noncoherent_dma(kvm);
160
95c87e2b 161 idx = srcu_read_lock(&kvm->srcu);
90d83dc3 162 slots = kvm_memslots(kvm);
46a26bf5 163
be6ba0f0
XG
164 kvm_for_each_memslot(memslot, slots) {
165 r = kvm_iommu_map_pages(kvm, memslot);
62c476c7
BAY
166 if (r)
167 break;
168 }
95c87e2b 169 srcu_read_unlock(&kvm->srcu, idx);
682edb4c 170
62c476c7
BAY
171 return r;
172}
173
c9eab58f 174int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev)
62c476c7 175{
19de40a8 176 struct iommu_domain *domain = kvm->arch.iommu_domain;
d96eb2c6
AW
177 int r;
178 bool noncoherent;
62c476c7 179
260782bc
WH
180 /* check if iommu exists and in use */
181 if (!domain)
182 return 0;
183
260782bc 184 if (pdev == NULL)
62c476c7 185 return -ENODEV;
260782bc 186
19de40a8 187 r = iommu_attach_device(domain, &pdev->dev);
260782bc 188 if (r) {
d151f63f 189 dev_err(&pdev->dev, "kvm assign device failed ret %d", r);
260782bc 190 return r;
62c476c7
BAY
191 }
192
ee5ba30f 193 noncoherent = !iommu_capable(&pci_bus_type, IOMMU_CAP_CACHE_COHERENCY);
522c68c4
SY
194
195 /* Check if need to update IOMMU page table for guest memory */
d96eb2c6 196 if (noncoherent != kvm->arch.iommu_noncoherent) {
522c68c4 197 kvm_iommu_unmap_memslots(kvm);
d96eb2c6 198 kvm->arch.iommu_noncoherent = noncoherent;
522c68c4
SY
199 r = kvm_iommu_map_memslots(kvm);
200 if (r)
201 goto out_unmap;
202 }
203
5544eb9b 204 kvm_arch_start_assignment(kvm);
ad0d217c 205 pci_set_dev_assigned(pdev);
6777829c 206
29242cb5 207 dev_info(&pdev->dev, "kvm assign device\n");
62c476c7 208
260782bc 209 return 0;
522c68c4
SY
210out_unmap:
211 kvm_iommu_unmap_memslots(kvm);
212 return r;
260782bc 213}
62c476c7 214
c9eab58f 215int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev)
0a920356 216{
19de40a8 217 struct iommu_domain *domain = kvm->arch.iommu_domain;
0a920356
WH
218
219 /* check if iommu exists and in use */
220 if (!domain)
221 return 0;
222
0a920356
WH
223 if (pdev == NULL)
224 return -ENODEV;
225
19de40a8 226 iommu_detach_device(domain, &pdev->dev);
0a920356 227
ad0d217c 228 pci_clear_dev_assigned(pdev);
5544eb9b 229 kvm_arch_end_assignment(kvm);
6777829c 230
29242cb5 231 dev_info(&pdev->dev, "kvm deassign device\n");
0a920356
WH
232
233 return 0;
234}
235
260782bc
WH
236int kvm_iommu_map_guest(struct kvm *kvm)
237{
238 int r;
239
a1b60c1c 240 if (!iommu_present(&pci_bus_type)) {
19de40a8 241 printk(KERN_ERR "%s: iommu not found\n", __func__);
62c476c7
BAY
242 return -ENODEV;
243 }
244
21a1416a
AW
245 mutex_lock(&kvm->slots_lock);
246
905d66c1 247 kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type);
21a1416a
AW
248 if (!kvm->arch.iommu_domain) {
249 r = -ENOMEM;
250 goto out_unlock;
251 }
62c476c7 252
3f68b031 253 if (!allow_unsafe_assigned_interrupts &&
ee5ba30f 254 !iommu_capable(&pci_bus_type, IOMMU_CAP_INTR_REMAP)) {
3f68b031
AW
255 printk(KERN_WARNING "%s: No interrupt remapping support,"
256 " disallowing device assignment."
c19ca6cb 257 " Re-enable with \"allow_unsafe_assigned_interrupts=1\""
3f68b031
AW
258 " module option.\n", __func__);
259 iommu_domain_free(kvm->arch.iommu_domain);
260 kvm->arch.iommu_domain = NULL;
21a1416a
AW
261 r = -EPERM;
262 goto out_unlock;
3f68b031
AW
263 }
264
62c476c7
BAY
265 r = kvm_iommu_map_memslots(kvm);
266 if (r)
21a1416a 267 kvm_iommu_unmap_memslots(kvm);
62c476c7 268
21a1416a
AW
269out_unlock:
270 mutex_unlock(&kvm->slots_lock);
62c476c7
BAY
271 return r;
272}
273
274static void kvm_iommu_put_pages(struct kvm *kvm,
260782bc 275 gfn_t base_gfn, unsigned long npages)
62c476c7 276{
fcd95807
JR
277 struct iommu_domain *domain;
278 gfn_t end_gfn, gfn;
ba049e93 279 kvm_pfn_t pfn;
260782bc
WH
280 u64 phys;
281
fcd95807
JR
282 domain = kvm->arch.iommu_domain;
283 end_gfn = base_gfn + npages;
284 gfn = base_gfn;
285
260782bc
WH
286 /* check if iommu exists and in use */
287 if (!domain)
288 return;
62c476c7 289
fcd95807
JR
290 while (gfn < end_gfn) {
291 unsigned long unmap_pages;
7d3002cc 292 size_t size;
fcd95807
JR
293
294 /* Get physical address */
19de40a8 295 phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
16b854c8
XG
296
297 if (!phys) {
298 gfn++;
299 continue;
300 }
301
fcd95807
JR
302 pfn = phys >> PAGE_SHIFT;
303
304 /* Unmap address from IO address space */
7d3002cc
OBC
305 size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE);
306 unmap_pages = 1ULL << get_order(size);
260782bc 307
fcd95807
JR
308 /* Unpin all pages we just unmapped to not leak any memory */
309 kvm_unpin_pages(kvm, pfn, unmap_pages);
310
311 gfn += unmap_pages;
128ca093
JR
312
313 cond_resched();
fcd95807 314 }
62c476c7
BAY
315}
316
32f6daad
AW
317void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
318{
319 kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
320}
321
62c476c7
BAY
322static int kvm_iommu_unmap_memslots(struct kvm *kvm)
323{
be6ba0f0 324 int idx;
46a26bf5 325 struct kvm_memslots *slots;
be6ba0f0 326 struct kvm_memory_slot *memslot;
46a26bf5 327
95c87e2b 328 idx = srcu_read_lock(&kvm->srcu);
90d83dc3 329 slots = kvm_memslots(kvm);
682edb4c 330
be6ba0f0 331 kvm_for_each_memslot(memslot, slots)
32f6daad 332 kvm_iommu_unmap_pages(kvm, memslot);
be6ba0f0 333
95c87e2b 334 srcu_read_unlock(&kvm->srcu, idx);
62c476c7 335
e0f0bbc5
AW
336 if (kvm->arch.iommu_noncoherent)
337 kvm_arch_unregister_noncoherent_dma(kvm);
338
62c476c7
BAY
339 return 0;
340}
341
342int kvm_iommu_unmap_guest(struct kvm *kvm)
343{
19de40a8 344 struct iommu_domain *domain = kvm->arch.iommu_domain;
62c476c7
BAY
345
346 /* check if iommu exists and in use */
347 if (!domain)
348 return 0;
349
21a1416a 350 mutex_lock(&kvm->slots_lock);
62c476c7 351 kvm_iommu_unmap_memslots(kvm);
21a1416a 352 kvm->arch.iommu_domain = NULL;
d96eb2c6 353 kvm->arch.iommu_noncoherent = false;
21a1416a
AW
354 mutex_unlock(&kvm->slots_lock);
355
19de40a8 356 iommu_domain_free(domain);
62c476c7
BAY
357 return 0;
358}