]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/i915/gvt/kvmgt.c
9d36a3c31062022bb5c920fd967beadec68e2bd6
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / gvt / kvmgt.c
1 /*
2 * KVMGT - the implementation of Intel mediated pass-through framework for KVM
3 *
4 * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Kevin Tian <kevin.tian@intel.com>
27 * Jike Song <jike.song@intel.com>
28 * Xiaoguang Chen <xiaoguang.chen@intel.com>
29 */
30
31 #include <linux/init.h>
32 #include <linux/device.h>
33 #include <linux/mm.h>
34 #include <linux/mmu_context.h>
35 #include <linux/types.h>
36 #include <linux/list.h>
37 #include <linux/rbtree.h>
38 #include <linux/spinlock.h>
39 #include <linux/eventfd.h>
40 #include <linux/uuid.h>
41 #include <linux/kvm_host.h>
42 #include <linux/vfio.h>
43 #include <linux/mdev.h>
44
45 #include <linux/nospec.h>
46
47 #include "i915_drv.h"
48 #include "gvt.h"
49
50 static const struct intel_gvt_ops *intel_gvt_ops;
51
52 /* helper macros copied from vfio-pci */
53 #define VFIO_PCI_OFFSET_SHIFT 40
54 #define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
55 #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
56 #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
57
58 struct vfio_region {
59 u32 type;
60 u32 subtype;
61 size_t size;
62 u32 flags;
63 };
64
65 struct kvmgt_pgfn {
66 gfn_t gfn;
67 struct hlist_node hnode;
68 };
69
70 struct kvmgt_guest_info {
71 struct kvm *kvm;
72 struct intel_vgpu *vgpu;
73 struct kvm_page_track_notifier_node track_node;
74 #define NR_BKT (1 << 18)
75 struct hlist_head ptable[NR_BKT];
76 #undef NR_BKT
77 };
78
79 struct gvt_dma {
80 struct rb_node node;
81 gfn_t gfn;
82 unsigned long iova;
83 };
84
85 static inline bool handle_valid(unsigned long handle)
86 {
87 return !!(handle & ~0xff);
88 }
89
90 static int kvmgt_guest_init(struct mdev_device *mdev);
91 static void intel_vgpu_release_work(struct work_struct *work);
92 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
93
94 static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
95 unsigned long *iova)
96 {
97 struct page *page;
98 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
99 dma_addr_t daddr;
100
101 if (unlikely(!pfn_valid(pfn)))
102 return -EFAULT;
103
104 page = pfn_to_page(pfn);
105 daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
106 PCI_DMA_BIDIRECTIONAL);
107 if (dma_mapping_error(dev, daddr))
108 return -ENOMEM;
109
110 *iova = (unsigned long)(daddr >> PAGE_SHIFT);
111 return 0;
112 }
113
114 static void gvt_dma_unmap_iova(struct intel_vgpu *vgpu, unsigned long iova)
115 {
116 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
117 dma_addr_t daddr;
118
119 daddr = (dma_addr_t)(iova << PAGE_SHIFT);
120 dma_unmap_page(dev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
121 }
122
123 static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
124 {
125 struct rb_node *node = vgpu->vdev.cache.rb_node;
126 struct gvt_dma *ret = NULL;
127
128 while (node) {
129 struct gvt_dma *itr = rb_entry(node, struct gvt_dma, node);
130
131 if (gfn < itr->gfn)
132 node = node->rb_left;
133 else if (gfn > itr->gfn)
134 node = node->rb_right;
135 else {
136 ret = itr;
137 goto out;
138 }
139 }
140
141 out:
142 return ret;
143 }
144
145 static unsigned long gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
146 {
147 struct gvt_dma *entry;
148 unsigned long iova;
149
150 mutex_lock(&vgpu->vdev.cache_lock);
151
152 entry = __gvt_cache_find(vgpu, gfn);
153 iova = (entry == NULL) ? INTEL_GVT_INVALID_ADDR : entry->iova;
154
155 mutex_unlock(&vgpu->vdev.cache_lock);
156 return iova;
157 }
158
159 static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
160 unsigned long iova)
161 {
162 struct gvt_dma *new, *itr;
163 struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
164
165 new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
166 if (!new)
167 return;
168
169 new->gfn = gfn;
170 new->iova = iova;
171
172 mutex_lock(&vgpu->vdev.cache_lock);
173 while (*link) {
174 parent = *link;
175 itr = rb_entry(parent, struct gvt_dma, node);
176
177 if (gfn == itr->gfn)
178 goto out;
179 else if (gfn < itr->gfn)
180 link = &parent->rb_left;
181 else
182 link = &parent->rb_right;
183 }
184
185 rb_link_node(&new->node, parent, link);
186 rb_insert_color(&new->node, &vgpu->vdev.cache);
187 mutex_unlock(&vgpu->vdev.cache_lock);
188 return;
189
190 out:
191 mutex_unlock(&vgpu->vdev.cache_lock);
192 kfree(new);
193 }
194
195 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
196 struct gvt_dma *entry)
197 {
198 rb_erase(&entry->node, &vgpu->vdev.cache);
199 kfree(entry);
200 }
201
202 static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
203 {
204 struct device *dev = mdev_dev(vgpu->vdev.mdev);
205 struct gvt_dma *this;
206 unsigned long g1;
207 int rc;
208
209 mutex_lock(&vgpu->vdev.cache_lock);
210 this = __gvt_cache_find(vgpu, gfn);
211 if (!this) {
212 mutex_unlock(&vgpu->vdev.cache_lock);
213 return;
214 }
215
216 g1 = gfn;
217 gvt_dma_unmap_iova(vgpu, this->iova);
218 rc = vfio_unpin_pages(dev, &g1, 1);
219 WARN_ON(rc != 1);
220 __gvt_cache_remove_entry(vgpu, this);
221 mutex_unlock(&vgpu->vdev.cache_lock);
222 }
223
224 static void gvt_cache_init(struct intel_vgpu *vgpu)
225 {
226 vgpu->vdev.cache = RB_ROOT;
227 mutex_init(&vgpu->vdev.cache_lock);
228 }
229
230 static void gvt_cache_destroy(struct intel_vgpu *vgpu)
231 {
232 struct gvt_dma *dma;
233 struct rb_node *node = NULL;
234 struct device *dev = mdev_dev(vgpu->vdev.mdev);
235 unsigned long gfn;
236
237 for (;;) {
238 mutex_lock(&vgpu->vdev.cache_lock);
239 node = rb_first(&vgpu->vdev.cache);
240 if (!node) {
241 mutex_unlock(&vgpu->vdev.cache_lock);
242 break;
243 }
244 dma = rb_entry(node, struct gvt_dma, node);
245 gvt_dma_unmap_iova(vgpu, dma->iova);
246 gfn = dma->gfn;
247 __gvt_cache_remove_entry(vgpu, dma);
248 mutex_unlock(&vgpu->vdev.cache_lock);
249 vfio_unpin_pages(dev, &gfn, 1);
250 }
251 }
252
253 static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
254 const char *name)
255 {
256 int i;
257 struct intel_vgpu_type *t;
258 const char *driver_name = dev_driver_string(
259 &gvt->dev_priv->drm.pdev->dev);
260
261 for (i = 0; i < gvt->num_types; i++) {
262 t = &gvt->types[i];
263 if (!strncmp(t->name, name + strlen(driver_name) + 1,
264 sizeof(t->name)))
265 return t;
266 }
267
268 return NULL;
269 }
270
271 static ssize_t available_instances_show(struct kobject *kobj,
272 struct device *dev, char *buf)
273 {
274 struct intel_vgpu_type *type;
275 unsigned int num = 0;
276 void *gvt = kdev_to_i915(dev)->gvt;
277
278 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
279 if (!type)
280 num = 0;
281 else
282 num = type->avail_instance;
283
284 return sprintf(buf, "%u\n", num);
285 }
286
287 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
288 char *buf)
289 {
290 return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
291 }
292
293 static ssize_t description_show(struct kobject *kobj, struct device *dev,
294 char *buf)
295 {
296 struct intel_vgpu_type *type;
297 void *gvt = kdev_to_i915(dev)->gvt;
298
299 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
300 if (!type)
301 return 0;
302
303 return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
304 "fence: %d\nresolution: %s\n"
305 "weight: %d\n",
306 BYTES_TO_MB(type->low_gm_size),
307 BYTES_TO_MB(type->high_gm_size),
308 type->fence, vgpu_edid_str(type->resolution),
309 type->weight);
310 }
311
312 static MDEV_TYPE_ATTR_RO(available_instances);
313 static MDEV_TYPE_ATTR_RO(device_api);
314 static MDEV_TYPE_ATTR_RO(description);
315
316 static struct attribute *type_attrs[] = {
317 &mdev_type_attr_available_instances.attr,
318 &mdev_type_attr_device_api.attr,
319 &mdev_type_attr_description.attr,
320 NULL,
321 };
322
323 static struct attribute_group *intel_vgpu_type_groups[] = {
324 [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
325 };
326
327 static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
328 {
329 int i, j;
330 struct intel_vgpu_type *type;
331 struct attribute_group *group;
332
333 for (i = 0; i < gvt->num_types; i++) {
334 type = &gvt->types[i];
335
336 group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
337 if (WARN_ON(!group))
338 goto unwind;
339
340 group->name = type->name;
341 group->attrs = type_attrs;
342 intel_vgpu_type_groups[i] = group;
343 }
344
345 return true;
346
347 unwind:
348 for (j = 0; j < i; j++) {
349 group = intel_vgpu_type_groups[j];
350 kfree(group);
351 }
352
353 return false;
354 }
355
356 static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
357 {
358 int i;
359 struct attribute_group *group;
360
361 for (i = 0; i < gvt->num_types; i++) {
362 group = intel_vgpu_type_groups[i];
363 kfree(group);
364 }
365 }
366
367 static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
368 {
369 hash_init(info->ptable);
370 }
371
372 static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
373 {
374 struct kvmgt_pgfn *p;
375 struct hlist_node *tmp;
376 int i;
377
378 hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
379 hash_del(&p->hnode);
380 kfree(p);
381 }
382 }
383
384 static struct kvmgt_pgfn *
385 __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
386 {
387 struct kvmgt_pgfn *p, *res = NULL;
388
389 hash_for_each_possible(info->ptable, p, hnode, gfn) {
390 if (gfn == p->gfn) {
391 res = p;
392 break;
393 }
394 }
395
396 return res;
397 }
398
399 static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
400 gfn_t gfn)
401 {
402 struct kvmgt_pgfn *p;
403
404 p = __kvmgt_protect_table_find(info, gfn);
405 return !!p;
406 }
407
408 static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
409 {
410 struct kvmgt_pgfn *p;
411
412 if (kvmgt_gfn_is_write_protected(info, gfn))
413 return;
414
415 p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
416 if (WARN(!p, "gfn: 0x%llx\n", gfn))
417 return;
418
419 p->gfn = gfn;
420 hash_add(info->ptable, &p->hnode, gfn);
421 }
422
423 static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
424 gfn_t gfn)
425 {
426 struct kvmgt_pgfn *p;
427
428 p = __kvmgt_protect_table_find(info, gfn);
429 if (p) {
430 hash_del(&p->hnode);
431 kfree(p);
432 }
433 }
434
435 static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
436 {
437 struct intel_vgpu *vgpu = NULL;
438 struct intel_vgpu_type *type;
439 struct device *pdev;
440 void *gvt;
441 int ret;
442
443 pdev = mdev_parent_dev(mdev);
444 gvt = kdev_to_i915(pdev)->gvt;
445
446 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
447 if (!type) {
448 gvt_vgpu_err("failed to find type %s to create\n",
449 kobject_name(kobj));
450 ret = -EINVAL;
451 goto out;
452 }
453
454 vgpu = intel_gvt_ops->vgpu_create(gvt, type);
455 if (IS_ERR_OR_NULL(vgpu)) {
456 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
457 gvt_vgpu_err("failed to create intel vgpu: %d\n", ret);
458 goto out;
459 }
460
461 INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
462
463 vgpu->vdev.mdev = mdev;
464 mdev_set_drvdata(mdev, vgpu);
465
466 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
467 dev_name(mdev_dev(mdev)));
468 ret = 0;
469
470 out:
471 return ret;
472 }
473
474 static int intel_vgpu_remove(struct mdev_device *mdev)
475 {
476 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
477
478 if (handle_valid(vgpu->handle))
479 return -EBUSY;
480
481 intel_gvt_ops->vgpu_destroy(vgpu);
482 return 0;
483 }
484
485 static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
486 unsigned long action, void *data)
487 {
488 struct intel_vgpu *vgpu = container_of(nb,
489 struct intel_vgpu,
490 vdev.iommu_notifier);
491
492 if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
493 struct vfio_iommu_type1_dma_unmap *unmap = data;
494 unsigned long gfn, end_gfn;
495
496 gfn = unmap->iova >> PAGE_SHIFT;
497 end_gfn = gfn + unmap->size / PAGE_SIZE;
498
499 while (gfn < end_gfn)
500 gvt_cache_remove(vgpu, gfn++);
501 }
502
503 return NOTIFY_OK;
504 }
505
506 static int intel_vgpu_group_notifier(struct notifier_block *nb,
507 unsigned long action, void *data)
508 {
509 struct intel_vgpu *vgpu = container_of(nb,
510 struct intel_vgpu,
511 vdev.group_notifier);
512
513 /* the only action we care about */
514 if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
515 vgpu->vdev.kvm = data;
516
517 if (!data)
518 schedule_work(&vgpu->vdev.release_work);
519 }
520
521 return NOTIFY_OK;
522 }
523
524 static int intel_vgpu_open(struct mdev_device *mdev)
525 {
526 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
527 unsigned long events;
528 int ret;
529
530 vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
531 vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
532
533 events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
534 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
535 &vgpu->vdev.iommu_notifier);
536 if (ret != 0) {
537 gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
538 ret);
539 goto out;
540 }
541
542 events = VFIO_GROUP_NOTIFY_SET_KVM;
543 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
544 &vgpu->vdev.group_notifier);
545 if (ret != 0) {
546 gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
547 ret);
548 goto undo_iommu;
549 }
550
551 ret = kvmgt_guest_init(mdev);
552 if (ret)
553 goto undo_group;
554
555 intel_gvt_ops->vgpu_activate(vgpu);
556
557 atomic_set(&vgpu->vdev.released, 0);
558 return ret;
559
560 undo_group:
561 vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
562 &vgpu->vdev.group_notifier);
563
564 undo_iommu:
565 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
566 &vgpu->vdev.iommu_notifier);
567 out:
568 return ret;
569 }
570
571 static void __intel_vgpu_release(struct intel_vgpu *vgpu)
572 {
573 struct kvmgt_guest_info *info;
574 int ret;
575
576 if (!handle_valid(vgpu->handle))
577 return;
578
579 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
580 return;
581
582 intel_gvt_ops->vgpu_deactivate(vgpu);
583
584 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
585 &vgpu->vdev.iommu_notifier);
586 WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
587
588 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
589 &vgpu->vdev.group_notifier);
590 WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
591
592 info = (struct kvmgt_guest_info *)vgpu->handle;
593 kvmgt_guest_exit(info);
594
595 vgpu->vdev.kvm = NULL;
596 vgpu->handle = 0;
597 }
598
599 static void intel_vgpu_release(struct mdev_device *mdev)
600 {
601 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
602
603 __intel_vgpu_release(vgpu);
604 }
605
606 static void intel_vgpu_release_work(struct work_struct *work)
607 {
608 struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
609 vdev.release_work);
610
611 __intel_vgpu_release(vgpu);
612 }
613
614 static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
615 {
616 u32 start_lo, start_hi;
617 u32 mem_type;
618
619 start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
620 PCI_BASE_ADDRESS_MEM_MASK;
621 mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
622 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
623
624 switch (mem_type) {
625 case PCI_BASE_ADDRESS_MEM_TYPE_64:
626 start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
627 + bar + 4));
628 break;
629 case PCI_BASE_ADDRESS_MEM_TYPE_32:
630 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
631 /* 1M mem BAR treated as 32-bit BAR */
632 default:
633 /* mem unknown type treated as 32-bit BAR */
634 start_hi = 0;
635 break;
636 }
637
638 return ((u64)start_hi << 32) | start_lo;
639 }
640
641 static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
642 void *buf, unsigned int count, bool is_write)
643 {
644 uint64_t bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
645 int ret;
646
647 if (is_write)
648 ret = intel_gvt_ops->emulate_mmio_write(vgpu,
649 bar_start + off, buf, count);
650 else
651 ret = intel_gvt_ops->emulate_mmio_read(vgpu,
652 bar_start + off, buf, count);
653 return ret;
654 }
655
656 static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
657 size_t count, loff_t *ppos, bool is_write)
658 {
659 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
660 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
661 uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
662 int ret = -EINVAL;
663
664
665 if (index >= VFIO_PCI_NUM_REGIONS) {
666 gvt_vgpu_err("invalid index: %u\n", index);
667 return -EINVAL;
668 }
669
670 switch (index) {
671 case VFIO_PCI_CONFIG_REGION_INDEX:
672 if (is_write)
673 ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
674 buf, count);
675 else
676 ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
677 buf, count);
678 break;
679 case VFIO_PCI_BAR0_REGION_INDEX:
680 ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
681 buf, count, is_write);
682 break;
683 case VFIO_PCI_BAR2_REGION_INDEX:
684 ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_2, pos,
685 buf, count, is_write);
686 break;
687 case VFIO_PCI_BAR1_REGION_INDEX:
688 case VFIO_PCI_BAR3_REGION_INDEX:
689 case VFIO_PCI_BAR4_REGION_INDEX:
690 case VFIO_PCI_BAR5_REGION_INDEX:
691 case VFIO_PCI_VGA_REGION_INDEX:
692 case VFIO_PCI_ROM_REGION_INDEX:
693 default:
694 gvt_vgpu_err("unsupported region: %u\n", index);
695 }
696
697 return ret == 0 ? count : ret;
698 }
699
700 static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
701 size_t count, loff_t *ppos)
702 {
703 unsigned int done = 0;
704 int ret;
705
706 while (count) {
707 size_t filled;
708
709 if (count >= 4 && !(*ppos % 4)) {
710 u32 val;
711
712 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
713 ppos, false);
714 if (ret <= 0)
715 goto read_err;
716
717 if (copy_to_user(buf, &val, sizeof(val)))
718 goto read_err;
719
720 filled = 4;
721 } else if (count >= 2 && !(*ppos % 2)) {
722 u16 val;
723
724 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
725 ppos, false);
726 if (ret <= 0)
727 goto read_err;
728
729 if (copy_to_user(buf, &val, sizeof(val)))
730 goto read_err;
731
732 filled = 2;
733 } else {
734 u8 val;
735
736 ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
737 false);
738 if (ret <= 0)
739 goto read_err;
740
741 if (copy_to_user(buf, &val, sizeof(val)))
742 goto read_err;
743
744 filled = 1;
745 }
746
747 count -= filled;
748 done += filled;
749 *ppos += filled;
750 buf += filled;
751 }
752
753 return done;
754
755 read_err:
756 return -EFAULT;
757 }
758
759 static ssize_t intel_vgpu_write(struct mdev_device *mdev,
760 const char __user *buf,
761 size_t count, loff_t *ppos)
762 {
763 unsigned int done = 0;
764 int ret;
765
766 while (count) {
767 size_t filled;
768
769 if (count >= 4 && !(*ppos % 4)) {
770 u32 val;
771
772 if (copy_from_user(&val, buf, sizeof(val)))
773 goto write_err;
774
775 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
776 ppos, true);
777 if (ret <= 0)
778 goto write_err;
779
780 filled = 4;
781 } else if (count >= 2 && !(*ppos % 2)) {
782 u16 val;
783
784 if (copy_from_user(&val, buf, sizeof(val)))
785 goto write_err;
786
787 ret = intel_vgpu_rw(mdev, (char *)&val,
788 sizeof(val), ppos, true);
789 if (ret <= 0)
790 goto write_err;
791
792 filled = 2;
793 } else {
794 u8 val;
795
796 if (copy_from_user(&val, buf, sizeof(val)))
797 goto write_err;
798
799 ret = intel_vgpu_rw(mdev, &val, sizeof(val),
800 ppos, true);
801 if (ret <= 0)
802 goto write_err;
803
804 filled = 1;
805 }
806
807 count -= filled;
808 done += filled;
809 *ppos += filled;
810 buf += filled;
811 }
812
813 return done;
814 write_err:
815 return -EFAULT;
816 }
817
818 static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
819 {
820 unsigned int index;
821 u64 virtaddr;
822 unsigned long req_size, pgoff, req_start;
823 pgprot_t pg_prot;
824 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
825
826 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
827 if (index >= VFIO_PCI_ROM_REGION_INDEX)
828 return -EINVAL;
829
830 if (vma->vm_end < vma->vm_start)
831 return -EINVAL;
832 if ((vma->vm_flags & VM_SHARED) == 0)
833 return -EINVAL;
834 if (index != VFIO_PCI_BAR2_REGION_INDEX)
835 return -EINVAL;
836
837 pg_prot = vma->vm_page_prot;
838 virtaddr = vma->vm_start;
839 req_size = vma->vm_end - vma->vm_start;
840 pgoff = vma->vm_pgoff &
841 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
842 req_start = pgoff << PAGE_SHIFT;
843
844 if (!intel_vgpu_in_aperture(vgpu, req_start))
845 return -EINVAL;
846 if (req_start + req_size >
847 vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
848 return -EINVAL;
849
850 pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
851
852 return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
853 }
854
855 static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
856 {
857 if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX)
858 return 1;
859
860 return 0;
861 }
862
863 static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
864 unsigned int index, unsigned int start,
865 unsigned int count, uint32_t flags,
866 void *data)
867 {
868 return 0;
869 }
870
871 static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
872 unsigned int index, unsigned int start,
873 unsigned int count, uint32_t flags, void *data)
874 {
875 return 0;
876 }
877
878 static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
879 unsigned int index, unsigned int start, unsigned int count,
880 uint32_t flags, void *data)
881 {
882 return 0;
883 }
884
885 static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
886 unsigned int index, unsigned int start, unsigned int count,
887 uint32_t flags, void *data)
888 {
889 struct eventfd_ctx *trigger;
890
891 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
892 int fd = *(int *)data;
893
894 trigger = eventfd_ctx_fdget(fd);
895 if (IS_ERR(trigger)) {
896 gvt_vgpu_err("eventfd_ctx_fdget failed\n");
897 return PTR_ERR(trigger);
898 }
899 vgpu->vdev.msi_trigger = trigger;
900 }
901
902 return 0;
903 }
904
905 static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags,
906 unsigned int index, unsigned int start, unsigned int count,
907 void *data)
908 {
909 int (*func)(struct intel_vgpu *vgpu, unsigned int index,
910 unsigned int start, unsigned int count, uint32_t flags,
911 void *data) = NULL;
912
913 switch (index) {
914 case VFIO_PCI_INTX_IRQ_INDEX:
915 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
916 case VFIO_IRQ_SET_ACTION_MASK:
917 func = intel_vgpu_set_intx_mask;
918 break;
919 case VFIO_IRQ_SET_ACTION_UNMASK:
920 func = intel_vgpu_set_intx_unmask;
921 break;
922 case VFIO_IRQ_SET_ACTION_TRIGGER:
923 func = intel_vgpu_set_intx_trigger;
924 break;
925 }
926 break;
927 case VFIO_PCI_MSI_IRQ_INDEX:
928 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
929 case VFIO_IRQ_SET_ACTION_MASK:
930 case VFIO_IRQ_SET_ACTION_UNMASK:
931 /* XXX Need masking support exported */
932 break;
933 case VFIO_IRQ_SET_ACTION_TRIGGER:
934 func = intel_vgpu_set_msi_trigger;
935 break;
936 }
937 break;
938 }
939
940 if (!func)
941 return -ENOTTY;
942
943 return func(vgpu, index, start, count, flags, data);
944 }
945
946 static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
947 unsigned long arg)
948 {
949 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
950 unsigned long minsz;
951
952 gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
953
954 if (cmd == VFIO_DEVICE_GET_INFO) {
955 struct vfio_device_info info;
956
957 minsz = offsetofend(struct vfio_device_info, num_irqs);
958
959 if (copy_from_user(&info, (void __user *)arg, minsz))
960 return -EFAULT;
961
962 if (info.argsz < minsz)
963 return -EINVAL;
964
965 info.flags = VFIO_DEVICE_FLAGS_PCI;
966 info.flags |= VFIO_DEVICE_FLAGS_RESET;
967 info.num_regions = VFIO_PCI_NUM_REGIONS;
968 info.num_irqs = VFIO_PCI_NUM_IRQS;
969
970 return copy_to_user((void __user *)arg, &info, minsz) ?
971 -EFAULT : 0;
972
973 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
974 struct vfio_region_info info;
975 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
976 unsigned int i;
977 int ret;
978 struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
979 size_t size;
980 int nr_areas = 1;
981 int cap_type_id;
982
983 minsz = offsetofend(struct vfio_region_info, offset);
984
985 if (copy_from_user(&info, (void __user *)arg, minsz))
986 return -EFAULT;
987
988 if (info.argsz < minsz)
989 return -EINVAL;
990
991 switch (info.index) {
992 case VFIO_PCI_CONFIG_REGION_INDEX:
993 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
994 info.size = vgpu->gvt->device_info.cfg_space_size;
995 info.flags = VFIO_REGION_INFO_FLAG_READ |
996 VFIO_REGION_INFO_FLAG_WRITE;
997 break;
998 case VFIO_PCI_BAR0_REGION_INDEX:
999 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1000 info.size = vgpu->cfg_space.bar[info.index].size;
1001 if (!info.size) {
1002 info.flags = 0;
1003 break;
1004 }
1005
1006 info.flags = VFIO_REGION_INFO_FLAG_READ |
1007 VFIO_REGION_INFO_FLAG_WRITE;
1008 break;
1009 case VFIO_PCI_BAR1_REGION_INDEX:
1010 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1011 info.size = 0;
1012 info.flags = 0;
1013 break;
1014 case VFIO_PCI_BAR2_REGION_INDEX:
1015 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1016 info.flags = VFIO_REGION_INFO_FLAG_CAPS |
1017 VFIO_REGION_INFO_FLAG_MMAP |
1018 VFIO_REGION_INFO_FLAG_READ |
1019 VFIO_REGION_INFO_FLAG_WRITE;
1020 info.size = gvt_aperture_sz(vgpu->gvt);
1021
1022 size = sizeof(*sparse) +
1023 (nr_areas * sizeof(*sparse->areas));
1024 sparse = kzalloc(size, GFP_KERNEL);
1025 if (!sparse)
1026 return -ENOMEM;
1027
1028 sparse->nr_areas = nr_areas;
1029 cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1030 sparse->areas[0].offset =
1031 PAGE_ALIGN(vgpu_aperture_offset(vgpu));
1032 sparse->areas[0].size = vgpu_aperture_sz(vgpu);
1033 break;
1034
1035 case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1036 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1037 info.size = 0;
1038
1039 info.flags = 0;
1040 gvt_dbg_core("get region info bar:%d\n", info.index);
1041 break;
1042
1043 case VFIO_PCI_ROM_REGION_INDEX:
1044 case VFIO_PCI_VGA_REGION_INDEX:
1045 gvt_dbg_core("get region info index:%d\n", info.index);
1046 break;
1047 default:
1048 {
1049 struct vfio_region_info_cap_type cap_type;
1050
1051 if (info.index >= VFIO_PCI_NUM_REGIONS +
1052 vgpu->vdev.num_regions)
1053 return -EINVAL;
1054 info.index =
1055 array_index_nospec(info.index,
1056 VFIO_PCI_NUM_REGIONS +
1057 vgpu->vdev.num_regions);
1058
1059 i = info.index - VFIO_PCI_NUM_REGIONS;
1060
1061 info.offset =
1062 VFIO_PCI_INDEX_TO_OFFSET(info.index);
1063 info.size = vgpu->vdev.region[i].size;
1064 info.flags = vgpu->vdev.region[i].flags;
1065
1066 cap_type.type = vgpu->vdev.region[i].type;
1067 cap_type.subtype = vgpu->vdev.region[i].subtype;
1068
1069 ret = vfio_info_add_capability(&caps,
1070 VFIO_REGION_INFO_CAP_TYPE,
1071 &cap_type);
1072 if (ret)
1073 return ret;
1074 }
1075 }
1076
1077 if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
1078 switch (cap_type_id) {
1079 case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
1080 ret = vfio_info_add_capability(&caps,
1081 VFIO_REGION_INFO_CAP_SPARSE_MMAP,
1082 sparse);
1083 kfree(sparse);
1084 if (ret)
1085 return ret;
1086 break;
1087 default:
1088 return -EINVAL;
1089 }
1090 }
1091
1092 if (caps.size) {
1093 if (info.argsz < sizeof(info) + caps.size) {
1094 info.argsz = sizeof(info) + caps.size;
1095 info.cap_offset = 0;
1096 } else {
1097 vfio_info_cap_shift(&caps, sizeof(info));
1098 if (copy_to_user((void __user *)arg +
1099 sizeof(info), caps.buf,
1100 caps.size)) {
1101 kfree(caps.buf);
1102 return -EFAULT;
1103 }
1104 info.cap_offset = sizeof(info);
1105 }
1106
1107 kfree(caps.buf);
1108 }
1109
1110 return copy_to_user((void __user *)arg, &info, minsz) ?
1111 -EFAULT : 0;
1112 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
1113 struct vfio_irq_info info;
1114
1115 minsz = offsetofend(struct vfio_irq_info, count);
1116
1117 if (copy_from_user(&info, (void __user *)arg, minsz))
1118 return -EFAULT;
1119
1120 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
1121 return -EINVAL;
1122
1123 switch (info.index) {
1124 case VFIO_PCI_INTX_IRQ_INDEX:
1125 case VFIO_PCI_MSI_IRQ_INDEX:
1126 break;
1127 default:
1128 return -EINVAL;
1129 }
1130
1131 info.flags = VFIO_IRQ_INFO_EVENTFD;
1132
1133 info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1134
1135 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
1136 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
1137 VFIO_IRQ_INFO_AUTOMASKED);
1138 else
1139 info.flags |= VFIO_IRQ_INFO_NORESIZE;
1140
1141 return copy_to_user((void __user *)arg, &info, minsz) ?
1142 -EFAULT : 0;
1143 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
1144 struct vfio_irq_set hdr;
1145 u8 *data = NULL;
1146 int ret = 0;
1147 size_t data_size = 0;
1148
1149 minsz = offsetofend(struct vfio_irq_set, count);
1150
1151 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1152 return -EFAULT;
1153
1154 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
1155 int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
1156
1157 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1158 VFIO_PCI_NUM_IRQS, &data_size);
1159 if (ret) {
1160 gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
1161 return -EINVAL;
1162 }
1163 if (data_size) {
1164 data = memdup_user((void __user *)(arg + minsz),
1165 data_size);
1166 if (IS_ERR(data))
1167 return PTR_ERR(data);
1168 }
1169 }
1170
1171 ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
1172 hdr.start, hdr.count, data);
1173 kfree(data);
1174
1175 return ret;
1176 } else if (cmd == VFIO_DEVICE_RESET) {
1177 intel_gvt_ops->vgpu_reset(vgpu);
1178 return 0;
1179 }
1180
1181 return -ENOTTY;
1182 }
1183
1184 static ssize_t
1185 vgpu_id_show(struct device *dev, struct device_attribute *attr,
1186 char *buf)
1187 {
1188 struct mdev_device *mdev = mdev_from_dev(dev);
1189
1190 if (mdev) {
1191 struct intel_vgpu *vgpu = (struct intel_vgpu *)
1192 mdev_get_drvdata(mdev);
1193 return sprintf(buf, "%d\n", vgpu->id);
1194 }
1195 return sprintf(buf, "\n");
1196 }
1197
1198 static ssize_t
1199 hw_id_show(struct device *dev, struct device_attribute *attr,
1200 char *buf)
1201 {
1202 struct mdev_device *mdev = mdev_from_dev(dev);
1203
1204 if (mdev) {
1205 struct intel_vgpu *vgpu = (struct intel_vgpu *)
1206 mdev_get_drvdata(mdev);
1207 return sprintf(buf, "%u\n",
1208 vgpu->shadow_ctx->hw_id);
1209 }
1210 return sprintf(buf, "\n");
1211 }
1212
1213 static DEVICE_ATTR_RO(vgpu_id);
1214 static DEVICE_ATTR_RO(hw_id);
1215
1216 static struct attribute *intel_vgpu_attrs[] = {
1217 &dev_attr_vgpu_id.attr,
1218 &dev_attr_hw_id.attr,
1219 NULL
1220 };
1221
1222 static const struct attribute_group intel_vgpu_group = {
1223 .name = "intel_vgpu",
1224 .attrs = intel_vgpu_attrs,
1225 };
1226
1227 static const struct attribute_group *intel_vgpu_groups[] = {
1228 &intel_vgpu_group,
1229 NULL,
1230 };
1231
1232 static const struct mdev_parent_ops intel_vgpu_ops = {
1233 .supported_type_groups = intel_vgpu_type_groups,
1234 .mdev_attr_groups = intel_vgpu_groups,
1235 .create = intel_vgpu_create,
1236 .remove = intel_vgpu_remove,
1237
1238 .open = intel_vgpu_open,
1239 .release = intel_vgpu_release,
1240
1241 .read = intel_vgpu_read,
1242 .write = intel_vgpu_write,
1243 .mmap = intel_vgpu_mmap,
1244 .ioctl = intel_vgpu_ioctl,
1245 };
1246
1247 static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
1248 {
1249 if (!intel_gvt_init_vgpu_type_groups(gvt))
1250 return -EFAULT;
1251
1252 intel_gvt_ops = ops;
1253
1254 return mdev_register_device(dev, &intel_vgpu_ops);
1255 }
1256
1257 static void kvmgt_host_exit(struct device *dev, void *gvt)
1258 {
1259 intel_gvt_cleanup_vgpu_type_groups(gvt);
1260 mdev_unregister_device(dev);
1261 }
1262
1263 static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
1264 {
1265 struct kvmgt_guest_info *info;
1266 struct kvm *kvm;
1267 struct kvm_memory_slot *slot;
1268 int idx;
1269
1270 if (!handle_valid(handle))
1271 return -ESRCH;
1272
1273 info = (struct kvmgt_guest_info *)handle;
1274 kvm = info->kvm;
1275
1276 idx = srcu_read_lock(&kvm->srcu);
1277 slot = gfn_to_memslot(kvm, gfn);
1278 if (!slot) {
1279 srcu_read_unlock(&kvm->srcu, idx);
1280 return -EINVAL;
1281 }
1282
1283 spin_lock(&kvm->mmu_lock);
1284
1285 if (kvmgt_gfn_is_write_protected(info, gfn))
1286 goto out;
1287
1288 kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1289 kvmgt_protect_table_add(info, gfn);
1290
1291 out:
1292 spin_unlock(&kvm->mmu_lock);
1293 srcu_read_unlock(&kvm->srcu, idx);
1294 return 0;
1295 }
1296
1297 static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
1298 {
1299 struct kvmgt_guest_info *info;
1300 struct kvm *kvm;
1301 struct kvm_memory_slot *slot;
1302 int idx;
1303
1304 if (!handle_valid(handle))
1305 return 0;
1306
1307 info = (struct kvmgt_guest_info *)handle;
1308 kvm = info->kvm;
1309
1310 idx = srcu_read_lock(&kvm->srcu);
1311 slot = gfn_to_memslot(kvm, gfn);
1312 if (!slot) {
1313 srcu_read_unlock(&kvm->srcu, idx);
1314 return -EINVAL;
1315 }
1316
1317 spin_lock(&kvm->mmu_lock);
1318
1319 if (!kvmgt_gfn_is_write_protected(info, gfn))
1320 goto out;
1321
1322 kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1323 kvmgt_protect_table_del(info, gfn);
1324
1325 out:
1326 spin_unlock(&kvm->mmu_lock);
1327 srcu_read_unlock(&kvm->srcu, idx);
1328 return 0;
1329 }
1330
1331 static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1332 const u8 *val, int len,
1333 struct kvm_page_track_notifier_node *node)
1334 {
1335 struct kvmgt_guest_info *info = container_of(node,
1336 struct kvmgt_guest_info, track_node);
1337
1338 if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
1339 intel_gvt_ops->emulate_mmio_write(info->vgpu, gpa,
1340 (void *)val, len);
1341 }
1342
1343 static void kvmgt_page_track_flush_slot(struct kvm *kvm,
1344 struct kvm_memory_slot *slot,
1345 struct kvm_page_track_notifier_node *node)
1346 {
1347 int i;
1348 gfn_t gfn;
1349 struct kvmgt_guest_info *info = container_of(node,
1350 struct kvmgt_guest_info, track_node);
1351
1352 spin_lock(&kvm->mmu_lock);
1353 for (i = 0; i < slot->npages; i++) {
1354 gfn = slot->base_gfn + i;
1355 if (kvmgt_gfn_is_write_protected(info, gfn)) {
1356 kvm_slot_page_track_remove_page(kvm, slot, gfn,
1357 KVM_PAGE_TRACK_WRITE);
1358 kvmgt_protect_table_del(info, gfn);
1359 }
1360 }
1361 spin_unlock(&kvm->mmu_lock);
1362 }
1363
1364 static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
1365 {
1366 struct intel_vgpu *itr;
1367 struct kvmgt_guest_info *info;
1368 int id;
1369 bool ret = false;
1370
1371 mutex_lock(&vgpu->gvt->lock);
1372 for_each_active_vgpu(vgpu->gvt, itr, id) {
1373 if (!handle_valid(itr->handle))
1374 continue;
1375
1376 info = (struct kvmgt_guest_info *)itr->handle;
1377 if (kvm && kvm == info->kvm) {
1378 ret = true;
1379 goto out;
1380 }
1381 }
1382 out:
1383 mutex_unlock(&vgpu->gvt->lock);
1384 return ret;
1385 }
1386
1387 static int kvmgt_guest_init(struct mdev_device *mdev)
1388 {
1389 struct kvmgt_guest_info *info;
1390 struct intel_vgpu *vgpu;
1391 struct kvm *kvm;
1392
1393 vgpu = mdev_get_drvdata(mdev);
1394 if (handle_valid(vgpu->handle))
1395 return -EEXIST;
1396
1397 kvm = vgpu->vdev.kvm;
1398 if (!kvm || kvm->mm != current->mm) {
1399 gvt_vgpu_err("KVM is required to use Intel vGPU\n");
1400 return -ESRCH;
1401 }
1402
1403 if (__kvmgt_vgpu_exist(vgpu, kvm))
1404 return -EEXIST;
1405
1406 info = vzalloc(sizeof(struct kvmgt_guest_info));
1407 if (!info)
1408 return -ENOMEM;
1409
1410 vgpu->handle = (unsigned long)info;
1411 info->vgpu = vgpu;
1412 info->kvm = kvm;
1413 kvm_get_kvm(info->kvm);
1414
1415 kvmgt_protect_table_init(info);
1416 gvt_cache_init(vgpu);
1417
1418 info->track_node.track_write = kvmgt_page_track_write;
1419 info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
1420 kvm_page_track_register_notifier(kvm, &info->track_node);
1421
1422 return 0;
1423 }
1424
1425 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1426 {
1427 kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1428 kvm_put_kvm(info->kvm);
1429 kvmgt_protect_table_destroy(info);
1430 gvt_cache_destroy(info->vgpu);
1431 vfree(info);
1432
1433 return true;
1434 }
1435
1436 static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
1437 {
1438 /* nothing to do here */
1439 return 0;
1440 }
1441
1442 static void kvmgt_detach_vgpu(unsigned long handle)
1443 {
1444 /* nothing to do here */
1445 }
1446
1447 static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
1448 {
1449 struct kvmgt_guest_info *info;
1450 struct intel_vgpu *vgpu;
1451
1452 if (!handle_valid(handle))
1453 return -ESRCH;
1454
1455 info = (struct kvmgt_guest_info *)handle;
1456 vgpu = info->vgpu;
1457
1458 if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
1459 return 0;
1460
1461 return -EFAULT;
1462 }
1463
1464 static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1465 {
1466 unsigned long iova, pfn;
1467 struct kvmgt_guest_info *info;
1468 struct device *dev;
1469 struct intel_vgpu *vgpu;
1470 int rc;
1471
1472 if (!handle_valid(handle))
1473 return INTEL_GVT_INVALID_ADDR;
1474
1475 info = (struct kvmgt_guest_info *)handle;
1476 vgpu = info->vgpu;
1477 iova = gvt_cache_find(info->vgpu, gfn);
1478 if (iova != INTEL_GVT_INVALID_ADDR)
1479 return iova;
1480
1481 pfn = INTEL_GVT_INVALID_ADDR;
1482 dev = mdev_dev(info->vgpu->vdev.mdev);
1483 rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
1484 if (rc != 1) {
1485 gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
1486 gfn, rc);
1487 return INTEL_GVT_INVALID_ADDR;
1488 }
1489 /* transfer to host iova for GFX to use DMA */
1490 rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
1491 if (rc) {
1492 gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
1493 vfio_unpin_pages(dev, &gfn, 1);
1494 return INTEL_GVT_INVALID_ADDR;
1495 }
1496
1497 gvt_cache_add(info->vgpu, gfn, iova);
1498 return iova;
1499 }
1500
1501 static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1502 void *buf, unsigned long len, bool write)
1503 {
1504 struct kvmgt_guest_info *info;
1505 struct kvm *kvm;
1506 int idx, ret;
1507 bool kthread = current->mm == NULL;
1508
1509 if (!handle_valid(handle))
1510 return -ESRCH;
1511
1512 info = (struct kvmgt_guest_info *)handle;
1513 kvm = info->kvm;
1514
1515 if (kthread)
1516 use_mm(kvm->mm);
1517
1518 idx = srcu_read_lock(&kvm->srcu);
1519 ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
1520 kvm_read_guest(kvm, gpa, buf, len);
1521 srcu_read_unlock(&kvm->srcu, idx);
1522
1523 if (kthread)
1524 unuse_mm(kvm->mm);
1525
1526 return ret;
1527 }
1528
1529 static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
1530 void *buf, unsigned long len)
1531 {
1532 return kvmgt_rw_gpa(handle, gpa, buf, len, false);
1533 }
1534
1535 static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
1536 void *buf, unsigned long len)
1537 {
1538 return kvmgt_rw_gpa(handle, gpa, buf, len, true);
1539 }
1540
1541 static unsigned long kvmgt_virt_to_pfn(void *addr)
1542 {
1543 return PFN_DOWN(__pa(addr));
1544 }
1545
1546 struct intel_gvt_mpt kvmgt_mpt = {
1547 .host_init = kvmgt_host_init,
1548 .host_exit = kvmgt_host_exit,
1549 .attach_vgpu = kvmgt_attach_vgpu,
1550 .detach_vgpu = kvmgt_detach_vgpu,
1551 .inject_msi = kvmgt_inject_msi,
1552 .from_virt_to_mfn = kvmgt_virt_to_pfn,
1553 .set_wp_page = kvmgt_write_protect_add,
1554 .unset_wp_page = kvmgt_write_protect_remove,
1555 .read_gpa = kvmgt_read_gpa,
1556 .write_gpa = kvmgt_write_gpa,
1557 .gfn_to_mfn = kvmgt_gfn_to_pfn,
1558 };
1559 EXPORT_SYMBOL_GPL(kvmgt_mpt);
1560
1561 static int __init kvmgt_init(void)
1562 {
1563 return 0;
1564 }
1565
1566 static void __exit kvmgt_exit(void)
1567 {
1568 }
1569
1570 module_init(kvmgt_init);
1571 module_exit(kvmgt_exit);
1572
1573 MODULE_LICENSE("GPL and additional rights");
1574 MODULE_AUTHOR("Intel Corporation");