]>
Commit | Line | Data |
---|---|---|
f30437c5 JS |
1 | /* |
2 | * KVMGT - the implementation of Intel mediated pass-through framework for KVM | |
3 | * | |
4 | * Copyright(c) 2014-2016 Intel Corporation. All rights reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice (including the next | |
14 | * paragraph) shall be included in all copies or substantial portions of the | |
15 | * Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
23 | * SOFTWARE. | |
24 | * | |
25 | * Authors: | |
26 | * Kevin Tian <kevin.tian@intel.com> | |
27 | * Jike Song <jike.song@intel.com> | |
28 | * Xiaoguang Chen <xiaoguang.chen@intel.com> | |
29 | */ | |
30 | ||
31 | #include <linux/init.h> | |
32 | #include <linux/device.h> | |
33 | #include <linux/mm.h> | |
f440c8a5 | 34 | #include <linux/mmu_context.h> |
f30437c5 JS |
35 | #include <linux/types.h> |
36 | #include <linux/list.h> | |
37 | #include <linux/rbtree.h> | |
38 | #include <linux/spinlock.h> | |
39 | #include <linux/eventfd.h> | |
40 | #include <linux/uuid.h> | |
41 | #include <linux/kvm_host.h> | |
42 | #include <linux/vfio.h> | |
659643f7 | 43 | #include <linux/mdev.h> |
f30437c5 JS |
44 | |
45 | #include "i915_drv.h" | |
46 | #include "gvt.h" | |
47 | ||
f30437c5 JS |
48 | static const struct intel_gvt_ops *intel_gvt_ops; |
49 | ||
f30437c5 JS |
50 | /* helper macros copied from vfio-pci */ |
51 | #define VFIO_PCI_OFFSET_SHIFT 40 | |
52 | #define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT) | |
53 | #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT) | |
54 | #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1) | |
55 | ||
56 | struct vfio_region { | |
57 | u32 type; | |
58 | u32 subtype; | |
59 | size_t size; | |
60 | u32 flags; | |
61 | }; | |
62 | ||
63 | struct kvmgt_pgfn { | |
64 | gfn_t gfn; | |
65 | struct hlist_node hnode; | |
66 | }; | |
67 | ||
68 | struct kvmgt_guest_info { | |
69 | struct kvm *kvm; | |
70 | struct intel_vgpu *vgpu; | |
71 | struct kvm_page_track_notifier_node track_node; | |
72 | #define NR_BKT (1 << 18) | |
73 | struct hlist_head ptable[NR_BKT]; | |
74 | #undef NR_BKT | |
75 | }; | |
76 | ||
77 | struct gvt_dma { | |
78 | struct rb_node node; | |
79 | gfn_t gfn; | |
b86dc6ed | 80 | unsigned long iova; |
f30437c5 JS |
81 | }; |
82 | ||
659643f7 JS |
83 | static inline bool handle_valid(unsigned long handle) |
84 | { | |
85 | return !!(handle & ~0xff); | |
86 | } | |
87 | ||
88 | static int kvmgt_guest_init(struct mdev_device *mdev); | |
89 | static void intel_vgpu_release_work(struct work_struct *work); | |
90 | static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); | |
91 | ||
b86dc6ed CD |
92 | static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn, |
93 | unsigned long *iova) | |
94 | { | |
95 | struct page *page; | |
96 | struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; | |
97 | dma_addr_t daddr; | |
98 | ||
b6b6fbc8 | 99 | if (unlikely(!pfn_valid(pfn))) |
b86dc6ed CD |
100 | return -EFAULT; |
101 | ||
b6b6fbc8 | 102 | page = pfn_to_page(pfn); |
b86dc6ed CD |
103 | daddr = dma_map_page(dev, page, 0, PAGE_SIZE, |
104 | PCI_DMA_BIDIRECTIONAL); | |
105 | if (dma_mapping_error(dev, daddr)) | |
106 | return -ENOMEM; | |
107 | ||
108 | *iova = (unsigned long)(daddr >> PAGE_SHIFT); | |
109 | return 0; | |
110 | } | |
111 | ||
112 | static void gvt_dma_unmap_iova(struct intel_vgpu *vgpu, unsigned long iova) | |
113 | { | |
114 | struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; | |
115 | dma_addr_t daddr; | |
116 | ||
117 | daddr = (dma_addr_t)(iova << PAGE_SHIFT); | |
118 | dma_unmap_page(dev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | |
119 | } | |
120 | ||
f30437c5 JS |
121 | static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) |
122 | { | |
123 | struct rb_node *node = vgpu->vdev.cache.rb_node; | |
124 | struct gvt_dma *ret = NULL; | |
125 | ||
126 | while (node) { | |
127 | struct gvt_dma *itr = rb_entry(node, struct gvt_dma, node); | |
128 | ||
129 | if (gfn < itr->gfn) | |
130 | node = node->rb_left; | |
131 | else if (gfn > itr->gfn) | |
132 | node = node->rb_right; | |
133 | else { | |
134 | ret = itr; | |
135 | goto out; | |
136 | } | |
137 | } | |
138 | ||
139 | out: | |
140 | return ret; | |
141 | } | |
142 | ||
b86dc6ed | 143 | static unsigned long gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) |
f30437c5 JS |
144 | { |
145 | struct gvt_dma *entry; | |
b86dc6ed | 146 | unsigned long iova; |
f30437c5 JS |
147 | |
148 | mutex_lock(&vgpu->vdev.cache_lock); | |
bfeca3e5 | 149 | |
f30437c5 | 150 | entry = __gvt_cache_find(vgpu, gfn); |
b86dc6ed | 151 | iova = (entry == NULL) ? INTEL_GVT_INVALID_ADDR : entry->iova; |
f30437c5 | 152 | |
bfeca3e5 | 153 | mutex_unlock(&vgpu->vdev.cache_lock); |
b86dc6ed | 154 | return iova; |
f30437c5 JS |
155 | } |
156 | ||
b86dc6ed CD |
157 | static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, |
158 | unsigned long iova) | |
f30437c5 JS |
159 | { |
160 | struct gvt_dma *new, *itr; | |
161 | struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL; | |
162 | ||
163 | new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL); | |
164 | if (!new) | |
165 | return; | |
166 | ||
167 | new->gfn = gfn; | |
b86dc6ed | 168 | new->iova = iova; |
f30437c5 JS |
169 | |
170 | mutex_lock(&vgpu->vdev.cache_lock); | |
171 | while (*link) { | |
172 | parent = *link; | |
173 | itr = rb_entry(parent, struct gvt_dma, node); | |
174 | ||
175 | if (gfn == itr->gfn) | |
176 | goto out; | |
177 | else if (gfn < itr->gfn) | |
178 | link = &parent->rb_left; | |
179 | else | |
180 | link = &parent->rb_right; | |
181 | } | |
182 | ||
183 | rb_link_node(&new->node, parent, link); | |
184 | rb_insert_color(&new->node, &vgpu->vdev.cache); | |
185 | mutex_unlock(&vgpu->vdev.cache_lock); | |
186 | return; | |
187 | ||
188 | out: | |
189 | mutex_unlock(&vgpu->vdev.cache_lock); | |
190 | kfree(new); | |
191 | } | |
192 | ||
193 | static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu, | |
194 | struct gvt_dma *entry) | |
195 | { | |
196 | rb_erase(&entry->node, &vgpu->vdev.cache); | |
197 | kfree(entry); | |
198 | } | |
199 | ||
200 | static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn) | |
201 | { | |
99e3123e | 202 | struct device *dev = mdev_dev(vgpu->vdev.mdev); |
f30437c5 | 203 | struct gvt_dma *this; |
659643f7 JS |
204 | unsigned long g1; |
205 | int rc; | |
f30437c5 JS |
206 | |
207 | mutex_lock(&vgpu->vdev.cache_lock); | |
208 | this = __gvt_cache_find(vgpu, gfn); | |
209 | if (!this) { | |
210 | mutex_unlock(&vgpu->vdev.cache_lock); | |
211 | return; | |
212 | } | |
213 | ||
659643f7 | 214 | g1 = gfn; |
b86dc6ed | 215 | gvt_dma_unmap_iova(vgpu, this->iova); |
659643f7 JS |
216 | rc = vfio_unpin_pages(dev, &g1, 1); |
217 | WARN_ON(rc != 1); | |
f30437c5 JS |
218 | __gvt_cache_remove_entry(vgpu, this); |
219 | mutex_unlock(&vgpu->vdev.cache_lock); | |
220 | } | |
221 | ||
222 | static void gvt_cache_init(struct intel_vgpu *vgpu) | |
223 | { | |
224 | vgpu->vdev.cache = RB_ROOT; | |
225 | mutex_init(&vgpu->vdev.cache_lock); | |
226 | } | |
227 | ||
228 | static void gvt_cache_destroy(struct intel_vgpu *vgpu) | |
229 | { | |
230 | struct gvt_dma *dma; | |
231 | struct rb_node *node = NULL; | |
99e3123e | 232 | struct device *dev = mdev_dev(vgpu->vdev.mdev); |
659643f7 | 233 | unsigned long gfn; |
f30437c5 JS |
234 | |
235 | mutex_lock(&vgpu->vdev.cache_lock); | |
236 | while ((node = rb_first(&vgpu->vdev.cache))) { | |
237 | dma = rb_entry(node, struct gvt_dma, node); | |
b86dc6ed | 238 | gvt_dma_unmap_iova(vgpu, dma->iova); |
659643f7 | 239 | gfn = dma->gfn; |
f30437c5 | 240 | |
659643f7 | 241 | vfio_unpin_pages(dev, &gfn, 1); |
f30437c5 JS |
242 | __gvt_cache_remove_entry(vgpu, dma); |
243 | } | |
244 | mutex_unlock(&vgpu->vdev.cache_lock); | |
245 | } | |
246 | ||
247 | static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, | |
248 | const char *name) | |
249 | { | |
250 | int i; | |
251 | struct intel_vgpu_type *t; | |
252 | const char *driver_name = dev_driver_string( | |
253 | &gvt->dev_priv->drm.pdev->dev); | |
254 | ||
255 | for (i = 0; i < gvt->num_types; i++) { | |
256 | t = &gvt->types[i]; | |
257 | if (!strncmp(t->name, name + strlen(driver_name) + 1, | |
258 | sizeof(t->name))) | |
259 | return t; | |
260 | } | |
261 | ||
262 | return NULL; | |
263 | } | |
264 | ||
bdbfd519 AW |
265 | static ssize_t available_instances_show(struct kobject *kobj, |
266 | struct device *dev, char *buf) | |
659643f7 JS |
267 | { |
268 | struct intel_vgpu_type *type; | |
269 | unsigned int num = 0; | |
270 | void *gvt = kdev_to_i915(dev)->gvt; | |
271 | ||
272 | type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); | |
273 | if (!type) | |
274 | num = 0; | |
275 | else | |
276 | num = type->avail_instance; | |
277 | ||
278 | return sprintf(buf, "%u\n", num); | |
279 | } | |
280 | ||
281 | static ssize_t device_api_show(struct kobject *kobj, struct device *dev, | |
282 | char *buf) | |
283 | { | |
284 | return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING); | |
285 | } | |
286 | ||
287 | static ssize_t description_show(struct kobject *kobj, struct device *dev, | |
288 | char *buf) | |
289 | { | |
290 | struct intel_vgpu_type *type; | |
291 | void *gvt = kdev_to_i915(dev)->gvt; | |
292 | ||
293 | type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); | |
294 | if (!type) | |
295 | return 0; | |
296 | ||
297 | return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n" | |
d1a513be ZW |
298 | "fence: %d\nresolution: %s\n", |
299 | BYTES_TO_MB(type->low_gm_size), | |
300 | BYTES_TO_MB(type->high_gm_size), | |
301 | type->fence, vgpu_edid_str(type->resolution)); | |
659643f7 JS |
302 | } |
303 | ||
bdbfd519 | 304 | static MDEV_TYPE_ATTR_RO(available_instances); |
659643f7 JS |
305 | static MDEV_TYPE_ATTR_RO(device_api); |
306 | static MDEV_TYPE_ATTR_RO(description); | |
307 | ||
f30437c5 | 308 | static struct attribute *type_attrs[] = { |
bdbfd519 | 309 | &mdev_type_attr_available_instances.attr, |
659643f7 JS |
310 | &mdev_type_attr_device_api.attr, |
311 | &mdev_type_attr_description.attr, | |
f30437c5 JS |
312 | NULL, |
313 | }; | |
314 | ||
315 | static struct attribute_group *intel_vgpu_type_groups[] = { | |
316 | [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL, | |
317 | }; | |
318 | ||
319 | static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt) | |
320 | { | |
321 | int i, j; | |
322 | struct intel_vgpu_type *type; | |
323 | struct attribute_group *group; | |
324 | ||
325 | for (i = 0; i < gvt->num_types; i++) { | |
326 | type = &gvt->types[i]; | |
327 | ||
328 | group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL); | |
329 | if (WARN_ON(!group)) | |
330 | goto unwind; | |
331 | ||
332 | group->name = type->name; | |
333 | group->attrs = type_attrs; | |
334 | intel_vgpu_type_groups[i] = group; | |
335 | } | |
336 | ||
337 | return true; | |
338 | ||
339 | unwind: | |
340 | for (j = 0; j < i; j++) { | |
341 | group = intel_vgpu_type_groups[j]; | |
342 | kfree(group); | |
343 | } | |
344 | ||
345 | return false; | |
346 | } | |
347 | ||
348 | static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) | |
349 | { | |
350 | int i; | |
351 | struct attribute_group *group; | |
352 | ||
353 | for (i = 0; i < gvt->num_types; i++) { | |
354 | group = intel_vgpu_type_groups[i]; | |
355 | kfree(group); | |
356 | } | |
357 | } | |
358 | ||
359 | static void kvmgt_protect_table_init(struct kvmgt_guest_info *info) | |
360 | { | |
361 | hash_init(info->ptable); | |
362 | } | |
363 | ||
364 | static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info) | |
365 | { | |
366 | struct kvmgt_pgfn *p; | |
367 | struct hlist_node *tmp; | |
368 | int i; | |
369 | ||
370 | hash_for_each_safe(info->ptable, i, tmp, p, hnode) { | |
371 | hash_del(&p->hnode); | |
372 | kfree(p); | |
373 | } | |
374 | } | |
375 | ||
376 | static struct kvmgt_pgfn * | |
377 | __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn) | |
378 | { | |
379 | struct kvmgt_pgfn *p, *res = NULL; | |
380 | ||
381 | hash_for_each_possible(info->ptable, p, hnode, gfn) { | |
382 | if (gfn == p->gfn) { | |
383 | res = p; | |
384 | break; | |
385 | } | |
386 | } | |
387 | ||
388 | return res; | |
389 | } | |
390 | ||
391 | static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info, | |
392 | gfn_t gfn) | |
393 | { | |
394 | struct kvmgt_pgfn *p; | |
395 | ||
396 | p = __kvmgt_protect_table_find(info, gfn); | |
397 | return !!p; | |
398 | } | |
399 | ||
400 | static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn) | |
401 | { | |
402 | struct kvmgt_pgfn *p; | |
403 | ||
404 | if (kvmgt_gfn_is_write_protected(info, gfn)) | |
405 | return; | |
406 | ||
c55b1de0 | 407 | p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC); |
f30437c5 JS |
408 | if (WARN(!p, "gfn: 0x%llx\n", gfn)) |
409 | return; | |
410 | ||
411 | p->gfn = gfn; | |
412 | hash_add(info->ptable, &p->hnode, gfn); | |
413 | } | |
414 | ||
415 | static void kvmgt_protect_table_del(struct kvmgt_guest_info *info, | |
416 | gfn_t gfn) | |
417 | { | |
418 | struct kvmgt_pgfn *p; | |
419 | ||
420 | p = __kvmgt_protect_table_find(info, gfn); | |
421 | if (p) { | |
422 | hash_del(&p->hnode); | |
423 | kfree(p); | |
424 | } | |
425 | } | |
426 | ||
659643f7 JS |
427 | static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) |
428 | { | |
695fbc08 | 429 | struct intel_vgpu *vgpu = NULL; |
659643f7 JS |
430 | struct intel_vgpu_type *type; |
431 | struct device *pdev; | |
432 | void *gvt; | |
5753394b | 433 | int ret; |
659643f7 | 434 | |
9372e6fe | 435 | pdev = mdev_parent_dev(mdev); |
659643f7 JS |
436 | gvt = kdev_to_i915(pdev)->gvt; |
437 | ||
438 | type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); | |
439 | if (!type) { | |
695fbc08 | 440 | gvt_vgpu_err("failed to find type %s to create\n", |
659643f7 | 441 | kobject_name(kobj)); |
5753394b JS |
442 | ret = -EINVAL; |
443 | goto out; | |
659643f7 JS |
444 | } |
445 | ||
446 | vgpu = intel_gvt_ops->vgpu_create(gvt, type); | |
447 | if (IS_ERR_OR_NULL(vgpu)) { | |
5753394b | 448 | ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); |
695fbc08 | 449 | gvt_vgpu_err("failed to create intel vgpu: %d\n", ret); |
5753394b | 450 | goto out; |
659643f7 JS |
451 | } |
452 | ||
453 | INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); | |
454 | ||
455 | vgpu->vdev.mdev = mdev; | |
456 | mdev_set_drvdata(mdev, vgpu); | |
457 | ||
458 | gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", | |
99e3123e | 459 | dev_name(mdev_dev(mdev))); |
5753394b JS |
460 | ret = 0; |
461 | ||
462 | out: | |
463 | return ret; | |
659643f7 JS |
464 | } |
465 | ||
466 | static int intel_vgpu_remove(struct mdev_device *mdev) | |
467 | { | |
468 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | |
469 | ||
470 | if (handle_valid(vgpu->handle)) | |
471 | return -EBUSY; | |
472 | ||
473 | intel_gvt_ops->vgpu_destroy(vgpu); | |
474 | return 0; | |
475 | } | |
476 | ||
477 | static int intel_vgpu_iommu_notifier(struct notifier_block *nb, | |
478 | unsigned long action, void *data) | |
479 | { | |
480 | struct intel_vgpu *vgpu = container_of(nb, | |
481 | struct intel_vgpu, | |
482 | vdev.iommu_notifier); | |
483 | ||
484 | if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) { | |
485 | struct vfio_iommu_type1_dma_unmap *unmap = data; | |
486 | unsigned long gfn, end_gfn; | |
487 | ||
488 | gfn = unmap->iova >> PAGE_SHIFT; | |
489 | end_gfn = gfn + unmap->size / PAGE_SIZE; | |
490 | ||
491 | while (gfn < end_gfn) | |
492 | gvt_cache_remove(vgpu, gfn++); | |
493 | } | |
494 | ||
495 | return NOTIFY_OK; | |
496 | } | |
497 | ||
498 | static int intel_vgpu_group_notifier(struct notifier_block *nb, | |
499 | unsigned long action, void *data) | |
500 | { | |
501 | struct intel_vgpu *vgpu = container_of(nb, | |
502 | struct intel_vgpu, | |
503 | vdev.group_notifier); | |
504 | ||
505 | /* the only action we care about */ | |
506 | if (action == VFIO_GROUP_NOTIFY_SET_KVM) { | |
507 | vgpu->vdev.kvm = data; | |
508 | ||
509 | if (!data) | |
510 | schedule_work(&vgpu->vdev.release_work); | |
511 | } | |
512 | ||
513 | return NOTIFY_OK; | |
514 | } | |
515 | ||
516 | static int intel_vgpu_open(struct mdev_device *mdev) | |
517 | { | |
518 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | |
519 | unsigned long events; | |
520 | int ret; | |
521 | ||
522 | vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier; | |
523 | vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier; | |
524 | ||
525 | events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; | |
99e3123e | 526 | ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, |
659643f7 JS |
527 | &vgpu->vdev.iommu_notifier); |
528 | if (ret != 0) { | |
695fbc08 TZ |
529 | gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n", |
530 | ret); | |
659643f7 JS |
531 | goto out; |
532 | } | |
533 | ||
534 | events = VFIO_GROUP_NOTIFY_SET_KVM; | |
99e3123e | 535 | ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, |
659643f7 JS |
536 | &vgpu->vdev.group_notifier); |
537 | if (ret != 0) { | |
695fbc08 TZ |
538 | gvt_vgpu_err("vfio_register_notifier for group failed: %d\n", |
539 | ret); | |
659643f7 JS |
540 | goto undo_iommu; |
541 | } | |
542 | ||
364fb6b7 JS |
543 | ret = kvmgt_guest_init(mdev); |
544 | if (ret) | |
545 | goto undo_group; | |
546 | ||
547 | atomic_set(&vgpu->vdev.released, 0); | |
548 | return ret; | |
549 | ||
550 | undo_group: | |
5824f924 | 551 | vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, |
364fb6b7 | 552 | &vgpu->vdev.group_notifier); |
659643f7 JS |
553 | |
554 | undo_iommu: | |
99e3123e | 555 | vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, |
659643f7 JS |
556 | &vgpu->vdev.iommu_notifier); |
557 | out: | |
558 | return ret; | |
559 | } | |
560 | ||
561 | static void __intel_vgpu_release(struct intel_vgpu *vgpu) | |
562 | { | |
563 | struct kvmgt_guest_info *info; | |
364fb6b7 | 564 | int ret; |
659643f7 JS |
565 | |
566 | if (!handle_valid(vgpu->handle)) | |
567 | return; | |
568 | ||
364fb6b7 JS |
569 | if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1)) |
570 | return; | |
571 | ||
5824f924 | 572 | ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY, |
659643f7 | 573 | &vgpu->vdev.iommu_notifier); |
364fb6b7 JS |
574 | WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); |
575 | ||
5824f924 | 576 | ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY, |
659643f7 | 577 | &vgpu->vdev.group_notifier); |
364fb6b7 | 578 | WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret); |
659643f7 JS |
579 | |
580 | info = (struct kvmgt_guest_info *)vgpu->handle; | |
581 | kvmgt_guest_exit(info); | |
364fb6b7 JS |
582 | |
583 | vgpu->vdev.kvm = NULL; | |
659643f7 JS |
584 | vgpu->handle = 0; |
585 | } | |
586 | ||
587 | static void intel_vgpu_release(struct mdev_device *mdev) | |
588 | { | |
589 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | |
590 | ||
591 | __intel_vgpu_release(vgpu); | |
592 | } | |
593 | ||
594 | static void intel_vgpu_release_work(struct work_struct *work) | |
595 | { | |
596 | struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu, | |
597 | vdev.release_work); | |
8ff842fd | 598 | |
659643f7 JS |
599 | __intel_vgpu_release(vgpu); |
600 | } | |
601 | ||
602 | static uint64_t intel_vgpu_get_bar0_addr(struct intel_vgpu *vgpu) | |
603 | { | |
604 | u32 start_lo, start_hi; | |
605 | u32 mem_type; | |
606 | int pos = PCI_BASE_ADDRESS_0; | |
607 | ||
608 | start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) & | |
609 | PCI_BASE_ADDRESS_MEM_MASK; | |
610 | mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) & | |
611 | PCI_BASE_ADDRESS_MEM_TYPE_MASK; | |
612 | ||
613 | switch (mem_type) { | |
614 | case PCI_BASE_ADDRESS_MEM_TYPE_64: | |
615 | start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space | |
616 | + pos + 4)); | |
617 | break; | |
618 | case PCI_BASE_ADDRESS_MEM_TYPE_32: | |
619 | case PCI_BASE_ADDRESS_MEM_TYPE_1M: | |
620 | /* 1M mem BAR treated as 32-bit BAR */ | |
621 | default: | |
622 | /* mem unknown type treated as 32-bit BAR */ | |
623 | start_hi = 0; | |
624 | break; | |
625 | } | |
626 | ||
627 | return ((u64)start_hi << 32) | start_lo; | |
628 | } | |
629 | ||
630 | static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, | |
631 | size_t count, loff_t *ppos, bool is_write) | |
632 | { | |
633 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | |
634 | unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); | |
635 | uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK; | |
636 | int ret = -EINVAL; | |
637 | ||
638 | ||
639 | if (index >= VFIO_PCI_NUM_REGIONS) { | |
695fbc08 | 640 | gvt_vgpu_err("invalid index: %u\n", index); |
659643f7 JS |
641 | return -EINVAL; |
642 | } | |
643 | ||
644 | switch (index) { | |
645 | case VFIO_PCI_CONFIG_REGION_INDEX: | |
646 | if (is_write) | |
647 | ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos, | |
648 | buf, count); | |
649 | else | |
650 | ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos, | |
651 | buf, count); | |
652 | break; | |
653 | case VFIO_PCI_BAR0_REGION_INDEX: | |
654 | case VFIO_PCI_BAR1_REGION_INDEX: | |
655 | if (is_write) { | |
656 | uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu); | |
657 | ||
658 | ret = intel_gvt_ops->emulate_mmio_write(vgpu, | |
659 | bar0_start + pos, buf, count); | |
660 | } else { | |
661 | uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu); | |
662 | ||
663 | ret = intel_gvt_ops->emulate_mmio_read(vgpu, | |
664 | bar0_start + pos, buf, count); | |
665 | } | |
666 | break; | |
667 | case VFIO_PCI_BAR2_REGION_INDEX: | |
668 | case VFIO_PCI_BAR3_REGION_INDEX: | |
669 | case VFIO_PCI_BAR4_REGION_INDEX: | |
670 | case VFIO_PCI_BAR5_REGION_INDEX: | |
671 | case VFIO_PCI_VGA_REGION_INDEX: | |
672 | case VFIO_PCI_ROM_REGION_INDEX: | |
673 | default: | |
695fbc08 | 674 | gvt_vgpu_err("unsupported region: %u\n", index); |
659643f7 JS |
675 | } |
676 | ||
677 | return ret == 0 ? count : ret; | |
678 | } | |
679 | ||
680 | static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, | |
681 | size_t count, loff_t *ppos) | |
682 | { | |
683 | unsigned int done = 0; | |
684 | int ret; | |
685 | ||
686 | while (count) { | |
687 | size_t filled; | |
688 | ||
689 | if (count >= 4 && !(*ppos % 4)) { | |
690 | u32 val; | |
691 | ||
692 | ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), | |
693 | ppos, false); | |
694 | if (ret <= 0) | |
695 | goto read_err; | |
696 | ||
697 | if (copy_to_user(buf, &val, sizeof(val))) | |
698 | goto read_err; | |
699 | ||
700 | filled = 4; | |
701 | } else if (count >= 2 && !(*ppos % 2)) { | |
702 | u16 val; | |
703 | ||
704 | ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), | |
705 | ppos, false); | |
706 | if (ret <= 0) | |
707 | goto read_err; | |
708 | ||
709 | if (copy_to_user(buf, &val, sizeof(val))) | |
710 | goto read_err; | |
711 | ||
712 | filled = 2; | |
713 | } else { | |
714 | u8 val; | |
715 | ||
716 | ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos, | |
717 | false); | |
718 | if (ret <= 0) | |
719 | goto read_err; | |
720 | ||
721 | if (copy_to_user(buf, &val, sizeof(val))) | |
722 | goto read_err; | |
723 | ||
724 | filled = 1; | |
725 | } | |
726 | ||
727 | count -= filled; | |
728 | done += filled; | |
729 | *ppos += filled; | |
730 | buf += filled; | |
731 | } | |
732 | ||
733 | return done; | |
734 | ||
735 | read_err: | |
736 | return -EFAULT; | |
737 | } | |
738 | ||
739 | static ssize_t intel_vgpu_write(struct mdev_device *mdev, | |
740 | const char __user *buf, | |
741 | size_t count, loff_t *ppos) | |
742 | { | |
743 | unsigned int done = 0; | |
744 | int ret; | |
745 | ||
746 | while (count) { | |
747 | size_t filled; | |
748 | ||
749 | if (count >= 4 && !(*ppos % 4)) { | |
750 | u32 val; | |
751 | ||
752 | if (copy_from_user(&val, buf, sizeof(val))) | |
753 | goto write_err; | |
754 | ||
755 | ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), | |
756 | ppos, true); | |
757 | if (ret <= 0) | |
758 | goto write_err; | |
759 | ||
760 | filled = 4; | |
761 | } else if (count >= 2 && !(*ppos % 2)) { | |
762 | u16 val; | |
763 | ||
764 | if (copy_from_user(&val, buf, sizeof(val))) | |
765 | goto write_err; | |
766 | ||
767 | ret = intel_vgpu_rw(mdev, (char *)&val, | |
768 | sizeof(val), ppos, true); | |
769 | if (ret <= 0) | |
770 | goto write_err; | |
771 | ||
772 | filled = 2; | |
773 | } else { | |
774 | u8 val; | |
775 | ||
776 | if (copy_from_user(&val, buf, sizeof(val))) | |
777 | goto write_err; | |
778 | ||
779 | ret = intel_vgpu_rw(mdev, &val, sizeof(val), | |
780 | ppos, true); | |
781 | if (ret <= 0) | |
782 | goto write_err; | |
783 | ||
784 | filled = 1; | |
785 | } | |
786 | ||
787 | count -= filled; | |
788 | done += filled; | |
789 | *ppos += filled; | |
790 | buf += filled; | |
791 | } | |
792 | ||
793 | return done; | |
794 | write_err: | |
795 | return -EFAULT; | |
796 | } | |
797 | ||
798 | static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) | |
799 | { | |
800 | unsigned int index; | |
801 | u64 virtaddr; | |
802 | unsigned long req_size, pgoff = 0; | |
803 | pgprot_t pg_prot; | |
804 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | |
805 | ||
806 | index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); | |
807 | if (index >= VFIO_PCI_ROM_REGION_INDEX) | |
808 | return -EINVAL; | |
809 | ||
810 | if (vma->vm_end < vma->vm_start) | |
811 | return -EINVAL; | |
812 | if ((vma->vm_flags & VM_SHARED) == 0) | |
813 | return -EINVAL; | |
814 | if (index != VFIO_PCI_BAR2_REGION_INDEX) | |
815 | return -EINVAL; | |
816 | ||
817 | pg_prot = vma->vm_page_prot; | |
818 | virtaddr = vma->vm_start; | |
819 | req_size = vma->vm_end - vma->vm_start; | |
820 | pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; | |
821 | ||
822 | return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); | |
823 | } | |
824 | ||
825 | static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type) | |
826 | { | |
827 | if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX) | |
828 | return 1; | |
829 | ||
830 | return 0; | |
831 | } | |
832 | ||
833 | static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu, | |
834 | unsigned int index, unsigned int start, | |
835 | unsigned int count, uint32_t flags, | |
836 | void *data) | |
837 | { | |
838 | return 0; | |
839 | } | |
840 | ||
841 | static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu, | |
842 | unsigned int index, unsigned int start, | |
843 | unsigned int count, uint32_t flags, void *data) | |
844 | { | |
845 | return 0; | |
846 | } | |
847 | ||
848 | static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu, | |
849 | unsigned int index, unsigned int start, unsigned int count, | |
850 | uint32_t flags, void *data) | |
851 | { | |
852 | return 0; | |
853 | } | |
854 | ||
855 | static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu, | |
856 | unsigned int index, unsigned int start, unsigned int count, | |
857 | uint32_t flags, void *data) | |
858 | { | |
859 | struct eventfd_ctx *trigger; | |
860 | ||
861 | if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { | |
862 | int fd = *(int *)data; | |
863 | ||
864 | trigger = eventfd_ctx_fdget(fd); | |
865 | if (IS_ERR(trigger)) { | |
695fbc08 | 866 | gvt_vgpu_err("eventfd_ctx_fdget failed\n"); |
659643f7 JS |
867 | return PTR_ERR(trigger); |
868 | } | |
869 | vgpu->vdev.msi_trigger = trigger; | |
870 | } | |
871 | ||
872 | return 0; | |
873 | } | |
874 | ||
875 | static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags, | |
876 | unsigned int index, unsigned int start, unsigned int count, | |
877 | void *data) | |
878 | { | |
879 | int (*func)(struct intel_vgpu *vgpu, unsigned int index, | |
880 | unsigned int start, unsigned int count, uint32_t flags, | |
881 | void *data) = NULL; | |
882 | ||
883 | switch (index) { | |
884 | case VFIO_PCI_INTX_IRQ_INDEX: | |
885 | switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { | |
886 | case VFIO_IRQ_SET_ACTION_MASK: | |
887 | func = intel_vgpu_set_intx_mask; | |
888 | break; | |
889 | case VFIO_IRQ_SET_ACTION_UNMASK: | |
890 | func = intel_vgpu_set_intx_unmask; | |
891 | break; | |
892 | case VFIO_IRQ_SET_ACTION_TRIGGER: | |
893 | func = intel_vgpu_set_intx_trigger; | |
894 | break; | |
895 | } | |
896 | break; | |
897 | case VFIO_PCI_MSI_IRQ_INDEX: | |
898 | switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { | |
899 | case VFIO_IRQ_SET_ACTION_MASK: | |
900 | case VFIO_IRQ_SET_ACTION_UNMASK: | |
901 | /* XXX Need masking support exported */ | |
902 | break; | |
903 | case VFIO_IRQ_SET_ACTION_TRIGGER: | |
904 | func = intel_vgpu_set_msi_trigger; | |
905 | break; | |
906 | } | |
907 | break; | |
908 | } | |
909 | ||
910 | if (!func) | |
911 | return -ENOTTY; | |
912 | ||
913 | return func(vgpu, index, start, count, flags, data); | |
914 | } | |
915 | ||
916 | static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, | |
917 | unsigned long arg) | |
918 | { | |
919 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | |
920 | unsigned long minsz; | |
921 | ||
922 | gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd); | |
923 | ||
924 | if (cmd == VFIO_DEVICE_GET_INFO) { | |
925 | struct vfio_device_info info; | |
926 | ||
927 | minsz = offsetofend(struct vfio_device_info, num_irqs); | |
928 | ||
929 | if (copy_from_user(&info, (void __user *)arg, minsz)) | |
930 | return -EFAULT; | |
931 | ||
932 | if (info.argsz < minsz) | |
933 | return -EINVAL; | |
934 | ||
935 | info.flags = VFIO_DEVICE_FLAGS_PCI; | |
936 | info.flags |= VFIO_DEVICE_FLAGS_RESET; | |
937 | info.num_regions = VFIO_PCI_NUM_REGIONS; | |
938 | info.num_irqs = VFIO_PCI_NUM_IRQS; | |
939 | ||
940 | return copy_to_user((void __user *)arg, &info, minsz) ? | |
941 | -EFAULT : 0; | |
942 | ||
943 | } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) { | |
944 | struct vfio_region_info info; | |
945 | struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; | |
946 | int i, ret; | |
947 | struct vfio_region_info_cap_sparse_mmap *sparse = NULL; | |
948 | size_t size; | |
949 | int nr_areas = 1; | |
950 | int cap_type_id; | |
951 | ||
952 | minsz = offsetofend(struct vfio_region_info, offset); | |
953 | ||
954 | if (copy_from_user(&info, (void __user *)arg, minsz)) | |
955 | return -EFAULT; | |
956 | ||
957 | if (info.argsz < minsz) | |
958 | return -EINVAL; | |
959 | ||
960 | switch (info.index) { | |
961 | case VFIO_PCI_CONFIG_REGION_INDEX: | |
962 | info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); | |
963 | info.size = INTEL_GVT_MAX_CFG_SPACE_SZ; | |
964 | info.flags = VFIO_REGION_INFO_FLAG_READ | | |
965 | VFIO_REGION_INFO_FLAG_WRITE; | |
966 | break; | |
967 | case VFIO_PCI_BAR0_REGION_INDEX: | |
968 | info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); | |
969 | info.size = vgpu->cfg_space.bar[info.index].size; | |
970 | if (!info.size) { | |
971 | info.flags = 0; | |
972 | break; | |
973 | } | |
974 | ||
975 | info.flags = VFIO_REGION_INFO_FLAG_READ | | |
976 | VFIO_REGION_INFO_FLAG_WRITE; | |
977 | break; | |
978 | case VFIO_PCI_BAR1_REGION_INDEX: | |
979 | info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); | |
980 | info.size = 0; | |
981 | info.flags = 0; | |
982 | break; | |
983 | case VFIO_PCI_BAR2_REGION_INDEX: | |
984 | info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); | |
985 | info.flags = VFIO_REGION_INFO_FLAG_CAPS | | |
986 | VFIO_REGION_INFO_FLAG_MMAP | | |
987 | VFIO_REGION_INFO_FLAG_READ | | |
988 | VFIO_REGION_INFO_FLAG_WRITE; | |
989 | info.size = gvt_aperture_sz(vgpu->gvt); | |
990 | ||
991 | size = sizeof(*sparse) + | |
992 | (nr_areas * sizeof(*sparse->areas)); | |
993 | sparse = kzalloc(size, GFP_KERNEL); | |
994 | if (!sparse) | |
995 | return -ENOMEM; | |
996 | ||
997 | sparse->nr_areas = nr_areas; | |
998 | cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP; | |
999 | sparse->areas[0].offset = | |
1000 | PAGE_ALIGN(vgpu_aperture_offset(vgpu)); | |
1001 | sparse->areas[0].size = vgpu_aperture_sz(vgpu); | |
659643f7 JS |
1002 | break; |
1003 | ||
1004 | case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: | |
1005 | info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); | |
1006 | info.size = 0; | |
1007 | ||
1008 | info.flags = 0; | |
1009 | gvt_dbg_core("get region info bar:%d\n", info.index); | |
1010 | break; | |
1011 | ||
1012 | case VFIO_PCI_ROM_REGION_INDEX: | |
1013 | case VFIO_PCI_VGA_REGION_INDEX: | |
1014 | gvt_dbg_core("get region info index:%d\n", info.index); | |
1015 | break; | |
1016 | default: | |
1017 | { | |
1018 | struct vfio_region_info_cap_type cap_type; | |
1019 | ||
1020 | if (info.index >= VFIO_PCI_NUM_REGIONS + | |
1021 | vgpu->vdev.num_regions) | |
1022 | return -EINVAL; | |
1023 | ||
1024 | i = info.index - VFIO_PCI_NUM_REGIONS; | |
1025 | ||
1026 | info.offset = | |
1027 | VFIO_PCI_INDEX_TO_OFFSET(info.index); | |
1028 | info.size = vgpu->vdev.region[i].size; | |
1029 | info.flags = vgpu->vdev.region[i].flags; | |
1030 | ||
1031 | cap_type.type = vgpu->vdev.region[i].type; | |
1032 | cap_type.subtype = vgpu->vdev.region[i].subtype; | |
1033 | ||
1034 | ret = vfio_info_add_capability(&caps, | |
1035 | VFIO_REGION_INFO_CAP_TYPE, | |
1036 | &cap_type); | |
1037 | if (ret) | |
1038 | return ret; | |
1039 | } | |
1040 | } | |
1041 | ||
1042 | if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) { | |
1043 | switch (cap_type_id) { | |
1044 | case VFIO_REGION_INFO_CAP_SPARSE_MMAP: | |
1045 | ret = vfio_info_add_capability(&caps, | |
1046 | VFIO_REGION_INFO_CAP_SPARSE_MMAP, | |
1047 | sparse); | |
1048 | kfree(sparse); | |
1049 | if (ret) | |
1050 | return ret; | |
1051 | break; | |
1052 | default: | |
1053 | return -EINVAL; | |
1054 | } | |
1055 | } | |
1056 | ||
1057 | if (caps.size) { | |
1058 | if (info.argsz < sizeof(info) + caps.size) { | |
1059 | info.argsz = sizeof(info) + caps.size; | |
1060 | info.cap_offset = 0; | |
1061 | } else { | |
1062 | vfio_info_cap_shift(&caps, sizeof(info)); | |
1063 | if (copy_to_user((void __user *)arg + | |
1064 | sizeof(info), caps.buf, | |
1065 | caps.size)) { | |
1066 | kfree(caps.buf); | |
1067 | return -EFAULT; | |
1068 | } | |
1069 | info.cap_offset = sizeof(info); | |
1070 | } | |
1071 | ||
1072 | kfree(caps.buf); | |
1073 | } | |
1074 | ||
1075 | return copy_to_user((void __user *)arg, &info, minsz) ? | |
1076 | -EFAULT : 0; | |
1077 | } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) { | |
1078 | struct vfio_irq_info info; | |
1079 | ||
1080 | minsz = offsetofend(struct vfio_irq_info, count); | |
1081 | ||
1082 | if (copy_from_user(&info, (void __user *)arg, minsz)) | |
1083 | return -EFAULT; | |
1084 | ||
1085 | if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS) | |
1086 | return -EINVAL; | |
1087 | ||
1088 | switch (info.index) { | |
1089 | case VFIO_PCI_INTX_IRQ_INDEX: | |
1090 | case VFIO_PCI_MSI_IRQ_INDEX: | |
1091 | break; | |
1092 | default: | |
1093 | return -EINVAL; | |
1094 | } | |
1095 | ||
1096 | info.flags = VFIO_IRQ_INFO_EVENTFD; | |
1097 | ||
1098 | info.count = intel_vgpu_get_irq_count(vgpu, info.index); | |
1099 | ||
1100 | if (info.index == VFIO_PCI_INTX_IRQ_INDEX) | |
1101 | info.flags |= (VFIO_IRQ_INFO_MASKABLE | | |
1102 | VFIO_IRQ_INFO_AUTOMASKED); | |
1103 | else | |
1104 | info.flags |= VFIO_IRQ_INFO_NORESIZE; | |
1105 | ||
1106 | return copy_to_user((void __user *)arg, &info, minsz) ? | |
1107 | -EFAULT : 0; | |
1108 | } else if (cmd == VFIO_DEVICE_SET_IRQS) { | |
1109 | struct vfio_irq_set hdr; | |
1110 | u8 *data = NULL; | |
1111 | int ret = 0; | |
1112 | size_t data_size = 0; | |
1113 | ||
1114 | minsz = offsetofend(struct vfio_irq_set, count); | |
1115 | ||
1116 | if (copy_from_user(&hdr, (void __user *)arg, minsz)) | |
1117 | return -EFAULT; | |
1118 | ||
1119 | if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { | |
1120 | int max = intel_vgpu_get_irq_count(vgpu, hdr.index); | |
1121 | ||
1122 | ret = vfio_set_irqs_validate_and_prepare(&hdr, max, | |
1123 | VFIO_PCI_NUM_IRQS, &data_size); | |
1124 | if (ret) { | |
695fbc08 | 1125 | gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n"); |
659643f7 JS |
1126 | return -EINVAL; |
1127 | } | |
1128 | if (data_size) { | |
1129 | data = memdup_user((void __user *)(arg + minsz), | |
1130 | data_size); | |
1131 | if (IS_ERR(data)) | |
1132 | return PTR_ERR(data); | |
1133 | } | |
1134 | } | |
1135 | ||
1136 | ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index, | |
1137 | hdr.start, hdr.count, data); | |
1138 | kfree(data); | |
1139 | ||
1140 | return ret; | |
1141 | } else if (cmd == VFIO_DEVICE_RESET) { | |
1142 | intel_gvt_ops->vgpu_reset(vgpu); | |
1143 | return 0; | |
1144 | } | |
1145 | ||
1146 | return 0; | |
1147 | } | |
1148 | ||
42930553 | 1149 | static const struct mdev_parent_ops intel_vgpu_ops = { |
659643f7 JS |
1150 | .supported_type_groups = intel_vgpu_type_groups, |
1151 | .create = intel_vgpu_create, | |
1152 | .remove = intel_vgpu_remove, | |
1153 | ||
1154 | .open = intel_vgpu_open, | |
1155 | .release = intel_vgpu_release, | |
1156 | ||
1157 | .read = intel_vgpu_read, | |
1158 | .write = intel_vgpu_write, | |
1159 | .mmap = intel_vgpu_mmap, | |
1160 | .ioctl = intel_vgpu_ioctl, | |
1161 | }; | |
1162 | ||
f30437c5 JS |
1163 | static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops) |
1164 | { | |
1165 | if (!intel_gvt_init_vgpu_type_groups(gvt)) | |
1166 | return -EFAULT; | |
1167 | ||
1168 | intel_gvt_ops = ops; | |
1169 | ||
659643f7 | 1170 | return mdev_register_device(dev, &intel_vgpu_ops); |
f30437c5 JS |
1171 | } |
1172 | ||
1173 | static void kvmgt_host_exit(struct device *dev, void *gvt) | |
1174 | { | |
1175 | intel_gvt_cleanup_vgpu_type_groups(gvt); | |
659643f7 | 1176 | mdev_unregister_device(dev); |
f30437c5 JS |
1177 | } |
1178 | ||
1179 | static int kvmgt_write_protect_add(unsigned long handle, u64 gfn) | |
1180 | { | |
659643f7 JS |
1181 | struct kvmgt_guest_info *info; |
1182 | struct kvm *kvm; | |
f30437c5 JS |
1183 | struct kvm_memory_slot *slot; |
1184 | int idx; | |
1185 | ||
659643f7 JS |
1186 | if (!handle_valid(handle)) |
1187 | return -ESRCH; | |
1188 | ||
1189 | info = (struct kvmgt_guest_info *)handle; | |
1190 | kvm = info->kvm; | |
1191 | ||
f30437c5 JS |
1192 | idx = srcu_read_lock(&kvm->srcu); |
1193 | slot = gfn_to_memslot(kvm, gfn); | |
faaaa53b JS |
1194 | if (!slot) { |
1195 | srcu_read_unlock(&kvm->srcu, idx); | |
1196 | return -EINVAL; | |
1197 | } | |
f30437c5 JS |
1198 | |
1199 | spin_lock(&kvm->mmu_lock); | |
1200 | ||
1201 | if (kvmgt_gfn_is_write_protected(info, gfn)) | |
1202 | goto out; | |
1203 | ||
1204 | kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE); | |
1205 | kvmgt_protect_table_add(info, gfn); | |
1206 | ||
1207 | out: | |
1208 | spin_unlock(&kvm->mmu_lock); | |
1209 | srcu_read_unlock(&kvm->srcu, idx); | |
1210 | return 0; | |
1211 | } | |
1212 | ||
1213 | static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn) | |
1214 | { | |
659643f7 JS |
1215 | struct kvmgt_guest_info *info; |
1216 | struct kvm *kvm; | |
f30437c5 JS |
1217 | struct kvm_memory_slot *slot; |
1218 | int idx; | |
1219 | ||
659643f7 JS |
1220 | if (!handle_valid(handle)) |
1221 | return 0; | |
1222 | ||
1223 | info = (struct kvmgt_guest_info *)handle; | |
1224 | kvm = info->kvm; | |
1225 | ||
f30437c5 JS |
1226 | idx = srcu_read_lock(&kvm->srcu); |
1227 | slot = gfn_to_memslot(kvm, gfn); | |
faaaa53b JS |
1228 | if (!slot) { |
1229 | srcu_read_unlock(&kvm->srcu, idx); | |
1230 | return -EINVAL; | |
1231 | } | |
f30437c5 JS |
1232 | |
1233 | spin_lock(&kvm->mmu_lock); | |
1234 | ||
1235 | if (!kvmgt_gfn_is_write_protected(info, gfn)) | |
1236 | goto out; | |
1237 | ||
1238 | kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE); | |
1239 | kvmgt_protect_table_del(info, gfn); | |
1240 | ||
1241 | out: | |
1242 | spin_unlock(&kvm->mmu_lock); | |
1243 | srcu_read_unlock(&kvm->srcu, idx); | |
1244 | return 0; | |
1245 | } | |
1246 | ||
1247 | static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |
1248 | const u8 *val, int len, | |
1249 | struct kvm_page_track_notifier_node *node) | |
1250 | { | |
1251 | struct kvmgt_guest_info *info = container_of(node, | |
1252 | struct kvmgt_guest_info, track_node); | |
1253 | ||
1254 | if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa))) | |
1255 | intel_gvt_ops->emulate_mmio_write(info->vgpu, gpa, | |
1256 | (void *)val, len); | |
1257 | } | |
1258 | ||
1259 | static void kvmgt_page_track_flush_slot(struct kvm *kvm, | |
1260 | struct kvm_memory_slot *slot, | |
1261 | struct kvm_page_track_notifier_node *node) | |
1262 | { | |
1263 | int i; | |
1264 | gfn_t gfn; | |
1265 | struct kvmgt_guest_info *info = container_of(node, | |
1266 | struct kvmgt_guest_info, track_node); | |
1267 | ||
1268 | spin_lock(&kvm->mmu_lock); | |
1269 | for (i = 0; i < slot->npages; i++) { | |
1270 | gfn = slot->base_gfn + i; | |
1271 | if (kvmgt_gfn_is_write_protected(info, gfn)) { | |
1272 | kvm_slot_page_track_remove_page(kvm, slot, gfn, | |
1273 | KVM_PAGE_TRACK_WRITE); | |
1274 | kvmgt_protect_table_del(info, gfn); | |
1275 | } | |
1276 | } | |
1277 | spin_unlock(&kvm->mmu_lock); | |
1278 | } | |
1279 | ||
659643f7 JS |
1280 | static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) |
1281 | { | |
1282 | struct intel_vgpu *itr; | |
1283 | struct kvmgt_guest_info *info; | |
1284 | int id; | |
1285 | bool ret = false; | |
1286 | ||
1287 | mutex_lock(&vgpu->gvt->lock); | |
1288 | for_each_active_vgpu(vgpu->gvt, itr, id) { | |
1289 | if (!handle_valid(itr->handle)) | |
1290 | continue; | |
1291 | ||
1292 | info = (struct kvmgt_guest_info *)itr->handle; | |
1293 | if (kvm && kvm == info->kvm) { | |
1294 | ret = true; | |
1295 | goto out; | |
1296 | } | |
1297 | } | |
1298 | out: | |
1299 | mutex_unlock(&vgpu->gvt->lock); | |
1300 | return ret; | |
1301 | } | |
1302 | ||
1303 | static int kvmgt_guest_init(struct mdev_device *mdev) | |
1304 | { | |
1305 | struct kvmgt_guest_info *info; | |
1306 | struct intel_vgpu *vgpu; | |
1307 | struct kvm *kvm; | |
1308 | ||
1309 | vgpu = mdev_get_drvdata(mdev); | |
1310 | if (handle_valid(vgpu->handle)) | |
1311 | return -EEXIST; | |
1312 | ||
1313 | kvm = vgpu->vdev.kvm; | |
1314 | if (!kvm || kvm->mm != current->mm) { | |
695fbc08 | 1315 | gvt_vgpu_err("KVM is required to use Intel vGPU\n"); |
659643f7 JS |
1316 | return -ESRCH; |
1317 | } | |
1318 | ||
1319 | if (__kvmgt_vgpu_exist(vgpu, kvm)) | |
1320 | return -EEXIST; | |
1321 | ||
1322 | info = vzalloc(sizeof(struct kvmgt_guest_info)); | |
1323 | if (!info) | |
1324 | return -ENOMEM; | |
1325 | ||
1326 | vgpu->handle = (unsigned long)info; | |
1327 | info->vgpu = vgpu; | |
1328 | info->kvm = kvm; | |
93a15b58 | 1329 | kvm_get_kvm(info->kvm); |
659643f7 JS |
1330 | |
1331 | kvmgt_protect_table_init(info); | |
1332 | gvt_cache_init(vgpu); | |
1333 | ||
1334 | info->track_node.track_write = kvmgt_page_track_write; | |
1335 | info->track_node.track_flush_slot = kvmgt_page_track_flush_slot; | |
1336 | kvm_page_track_register_notifier(kvm, &info->track_node); | |
1337 | ||
1338 | return 0; | |
1339 | } | |
1340 | ||
1341 | static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) | |
1342 | { | |
695fbc08 TZ |
1343 | struct intel_vgpu *vgpu = info->vgpu; |
1344 | ||
659643f7 | 1345 | if (!info) { |
695fbc08 | 1346 | gvt_vgpu_err("kvmgt_guest_info invalid\n"); |
659643f7 JS |
1347 | return false; |
1348 | } | |
1349 | ||
659643f7 | 1350 | kvm_page_track_unregister_notifier(info->kvm, &info->track_node); |
93a15b58 | 1351 | kvm_put_kvm(info->kvm); |
659643f7 | 1352 | kvmgt_protect_table_destroy(info); |
8ff842fd | 1353 | gvt_cache_destroy(info->vgpu); |
659643f7 JS |
1354 | vfree(info); |
1355 | ||
1356 | return true; | |
1357 | } | |
1358 | ||
f30437c5 JS |
1359 | static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle) |
1360 | { | |
1361 | /* nothing to do here */ | |
1362 | return 0; | |
1363 | } | |
1364 | ||
1365 | static void kvmgt_detach_vgpu(unsigned long handle) | |
1366 | { | |
1367 | /* nothing to do here */ | |
1368 | } | |
1369 | ||
1370 | static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) | |
1371 | { | |
659643f7 JS |
1372 | struct kvmgt_guest_info *info; |
1373 | struct intel_vgpu *vgpu; | |
f30437c5 | 1374 | |
659643f7 JS |
1375 | if (!handle_valid(handle)) |
1376 | return -ESRCH; | |
f30437c5 | 1377 | |
659643f7 JS |
1378 | info = (struct kvmgt_guest_info *)handle; |
1379 | vgpu = info->vgpu; | |
1380 | ||
1381 | if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1) | |
1382 | return 0; | |
1383 | ||
1384 | return -EFAULT; | |
f30437c5 JS |
1385 | } |
1386 | ||
1387 | static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) | |
1388 | { | |
b86dc6ed | 1389 | unsigned long iova, pfn; |
659643f7 JS |
1390 | struct kvmgt_guest_info *info; |
1391 | struct device *dev; | |
695fbc08 | 1392 | struct intel_vgpu *vgpu; |
f30437c5 JS |
1393 | int rc; |
1394 | ||
659643f7 JS |
1395 | if (!handle_valid(handle)) |
1396 | return INTEL_GVT_INVALID_ADDR; | |
1397 | ||
1398 | info = (struct kvmgt_guest_info *)handle; | |
695fbc08 | 1399 | vgpu = info->vgpu; |
b86dc6ed CD |
1400 | iova = gvt_cache_find(info->vgpu, gfn); |
1401 | if (iova != INTEL_GVT_INVALID_ADDR) | |
1402 | return iova; | |
f30437c5 | 1403 | |
659643f7 | 1404 | pfn = INTEL_GVT_INVALID_ADDR; |
99e3123e | 1405 | dev = mdev_dev(info->vgpu->vdev.mdev); |
659643f7 | 1406 | rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn); |
f30437c5 | 1407 | if (rc != 1) { |
695fbc08 TZ |
1408 | gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", |
1409 | gfn, rc); | |
659643f7 | 1410 | return INTEL_GVT_INVALID_ADDR; |
f30437c5 | 1411 | } |
b86dc6ed CD |
1412 | /* transfer to host iova for GFX to use DMA */ |
1413 | rc = gvt_dma_map_iova(info->vgpu, pfn, &iova); | |
4a0b3444 | 1414 | if (rc) { |
695fbc08 | 1415 | gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn); |
4a0b3444 CD |
1416 | vfio_unpin_pages(dev, &gfn, 1); |
1417 | return INTEL_GVT_INVALID_ADDR; | |
1418 | } | |
f30437c5 | 1419 | |
b86dc6ed CD |
1420 | gvt_cache_add(info->vgpu, gfn, iova); |
1421 | return iova; | |
f30437c5 JS |
1422 | } |
1423 | ||
f30437c5 JS |
1424 | static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, |
1425 | void *buf, unsigned long len, bool write) | |
1426 | { | |
f440c8a5 JS |
1427 | struct kvmgt_guest_info *info; |
1428 | struct kvm *kvm; | |
5180edc2 | 1429 | int idx, ret; |
f440c8a5 | 1430 | bool kthread = current->mm == NULL; |
f30437c5 | 1431 | |
659643f7 JS |
1432 | if (!handle_valid(handle)) |
1433 | return -ESRCH; | |
1434 | ||
f440c8a5 JS |
1435 | info = (struct kvmgt_guest_info *)handle; |
1436 | kvm = info->kvm; | |
f30437c5 | 1437 | |
f440c8a5 JS |
1438 | if (kthread) |
1439 | use_mm(kvm->mm); | |
f30437c5 | 1440 | |
5180edc2 | 1441 | idx = srcu_read_lock(&kvm->srcu); |
f440c8a5 JS |
1442 | ret = write ? kvm_write_guest(kvm, gpa, buf, len) : |
1443 | kvm_read_guest(kvm, gpa, buf, len); | |
5180edc2 | 1444 | srcu_read_unlock(&kvm->srcu, idx); |
f440c8a5 JS |
1445 | |
1446 | if (kthread) | |
1447 | unuse_mm(kvm->mm); | |
1448 | ||
1449 | return ret; | |
f30437c5 JS |
1450 | } |
1451 | ||
1452 | static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa, | |
1453 | void *buf, unsigned long len) | |
1454 | { | |
1455 | return kvmgt_rw_gpa(handle, gpa, buf, len, false); | |
1456 | } | |
1457 | ||
1458 | static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa, | |
1459 | void *buf, unsigned long len) | |
1460 | { | |
1461 | return kvmgt_rw_gpa(handle, gpa, buf, len, true); | |
1462 | } | |
1463 | ||
1464 | static unsigned long kvmgt_virt_to_pfn(void *addr) | |
1465 | { | |
1466 | return PFN_DOWN(__pa(addr)); | |
1467 | } | |
1468 | ||
1469 | struct intel_gvt_mpt kvmgt_mpt = { | |
f30437c5 JS |
1470 | .host_init = kvmgt_host_init, |
1471 | .host_exit = kvmgt_host_exit, | |
1472 | .attach_vgpu = kvmgt_attach_vgpu, | |
1473 | .detach_vgpu = kvmgt_detach_vgpu, | |
1474 | .inject_msi = kvmgt_inject_msi, | |
1475 | .from_virt_to_mfn = kvmgt_virt_to_pfn, | |
1476 | .set_wp_page = kvmgt_write_protect_add, | |
1477 | .unset_wp_page = kvmgt_write_protect_remove, | |
1478 | .read_gpa = kvmgt_read_gpa, | |
1479 | .write_gpa = kvmgt_write_gpa, | |
1480 | .gfn_to_mfn = kvmgt_gfn_to_pfn, | |
1481 | }; | |
1482 | EXPORT_SYMBOL_GPL(kvmgt_mpt); | |
1483 | ||
1484 | static int __init kvmgt_init(void) | |
1485 | { | |
1486 | return 0; | |
1487 | } | |
1488 | ||
1489 | static void __exit kvmgt_exit(void) | |
1490 | { | |
1491 | } | |
1492 | ||
1493 | module_init(kvmgt_init); | |
1494 | module_exit(kvmgt_exit); | |
1495 | ||
1496 | MODULE_LICENSE("GPL and additional rights"); | |
1497 | MODULE_AUTHOR("Intel Corporation"); |