]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/i915/gvt/kvmgt.c
drm/i915/gvt: Fix aperture read/write emulation when enable x-no-mmap=on
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / gvt / kvmgt.c
1 /*
2 * KVMGT - the implementation of Intel mediated pass-through framework for KVM
3 *
4 * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Kevin Tian <kevin.tian@intel.com>
27 * Jike Song <jike.song@intel.com>
28 * Xiaoguang Chen <xiaoguang.chen@intel.com>
29 */
30
31 #include <linux/init.h>
32 #include <linux/device.h>
33 #include <linux/mm.h>
34 #include <linux/mmu_context.h>
35 #include <linux/types.h>
36 #include <linux/list.h>
37 #include <linux/rbtree.h>
38 #include <linux/spinlock.h>
39 #include <linux/eventfd.h>
40 #include <linux/uuid.h>
41 #include <linux/kvm_host.h>
42 #include <linux/vfio.h>
43 #include <linux/mdev.h>
44
45 #include <linux/nospec.h>
46
47 #include "i915_drv.h"
48 #include "gvt.h"
49
50 static const struct intel_gvt_ops *intel_gvt_ops;
51
52 /* helper macros copied from vfio-pci */
53 #define VFIO_PCI_OFFSET_SHIFT 40
54 #define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
55 #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
56 #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
57
58 struct vfio_region {
59 u32 type;
60 u32 subtype;
61 size_t size;
62 u32 flags;
63 };
64
65 struct kvmgt_pgfn {
66 gfn_t gfn;
67 struct hlist_node hnode;
68 };
69
70 struct kvmgt_guest_info {
71 struct kvm *kvm;
72 struct intel_vgpu *vgpu;
73 struct kvm_page_track_notifier_node track_node;
74 #define NR_BKT (1 << 18)
75 struct hlist_head ptable[NR_BKT];
76 #undef NR_BKT
77 };
78
79 struct gvt_dma {
80 struct rb_node node;
81 gfn_t gfn;
82 unsigned long iova;
83 };
84
85 static inline bool handle_valid(unsigned long handle)
86 {
87 return !!(handle & ~0xff);
88 }
89
90 static int kvmgt_guest_init(struct mdev_device *mdev);
91 static void intel_vgpu_release_work(struct work_struct *work);
92 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
93
94 static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
95 unsigned long *iova)
96 {
97 struct page *page;
98 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
99 dma_addr_t daddr;
100
101 if (unlikely(!pfn_valid(pfn)))
102 return -EFAULT;
103
104 page = pfn_to_page(pfn);
105 daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
106 PCI_DMA_BIDIRECTIONAL);
107 if (dma_mapping_error(dev, daddr))
108 return -ENOMEM;
109
110 *iova = (unsigned long)(daddr >> PAGE_SHIFT);
111 return 0;
112 }
113
114 static void gvt_dma_unmap_iova(struct intel_vgpu *vgpu, unsigned long iova)
115 {
116 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
117 dma_addr_t daddr;
118
119 daddr = (dma_addr_t)(iova << PAGE_SHIFT);
120 dma_unmap_page(dev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
121 }
122
123 static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
124 {
125 struct rb_node *node = vgpu->vdev.cache.rb_node;
126 struct gvt_dma *ret = NULL;
127
128 while (node) {
129 struct gvt_dma *itr = rb_entry(node, struct gvt_dma, node);
130
131 if (gfn < itr->gfn)
132 node = node->rb_left;
133 else if (gfn > itr->gfn)
134 node = node->rb_right;
135 else {
136 ret = itr;
137 goto out;
138 }
139 }
140
141 out:
142 return ret;
143 }
144
145 static unsigned long gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
146 {
147 struct gvt_dma *entry;
148 unsigned long iova;
149
150 mutex_lock(&vgpu->vdev.cache_lock);
151
152 entry = __gvt_cache_find(vgpu, gfn);
153 iova = (entry == NULL) ? INTEL_GVT_INVALID_ADDR : entry->iova;
154
155 mutex_unlock(&vgpu->vdev.cache_lock);
156 return iova;
157 }
158
159 static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
160 unsigned long iova)
161 {
162 struct gvt_dma *new, *itr;
163 struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
164
165 new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
166 if (!new)
167 return;
168
169 new->gfn = gfn;
170 new->iova = iova;
171
172 mutex_lock(&vgpu->vdev.cache_lock);
173 while (*link) {
174 parent = *link;
175 itr = rb_entry(parent, struct gvt_dma, node);
176
177 if (gfn == itr->gfn)
178 goto out;
179 else if (gfn < itr->gfn)
180 link = &parent->rb_left;
181 else
182 link = &parent->rb_right;
183 }
184
185 rb_link_node(&new->node, parent, link);
186 rb_insert_color(&new->node, &vgpu->vdev.cache);
187 mutex_unlock(&vgpu->vdev.cache_lock);
188 return;
189
190 out:
191 mutex_unlock(&vgpu->vdev.cache_lock);
192 kfree(new);
193 }
194
195 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
196 struct gvt_dma *entry)
197 {
198 rb_erase(&entry->node, &vgpu->vdev.cache);
199 kfree(entry);
200 }
201
202 static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
203 {
204 struct device *dev = mdev_dev(vgpu->vdev.mdev);
205 struct gvt_dma *this;
206 unsigned long g1;
207 int rc;
208
209 mutex_lock(&vgpu->vdev.cache_lock);
210 this = __gvt_cache_find(vgpu, gfn);
211 if (!this) {
212 mutex_unlock(&vgpu->vdev.cache_lock);
213 return;
214 }
215
216 g1 = gfn;
217 gvt_dma_unmap_iova(vgpu, this->iova);
218 rc = vfio_unpin_pages(dev, &g1, 1);
219 WARN_ON(rc != 1);
220 __gvt_cache_remove_entry(vgpu, this);
221 mutex_unlock(&vgpu->vdev.cache_lock);
222 }
223
224 static void gvt_cache_init(struct intel_vgpu *vgpu)
225 {
226 vgpu->vdev.cache = RB_ROOT;
227 mutex_init(&vgpu->vdev.cache_lock);
228 }
229
230 static void gvt_cache_destroy(struct intel_vgpu *vgpu)
231 {
232 struct gvt_dma *dma;
233 struct rb_node *node = NULL;
234 struct device *dev = mdev_dev(vgpu->vdev.mdev);
235 unsigned long gfn;
236
237 for (;;) {
238 mutex_lock(&vgpu->vdev.cache_lock);
239 node = rb_first(&vgpu->vdev.cache);
240 if (!node) {
241 mutex_unlock(&vgpu->vdev.cache_lock);
242 break;
243 }
244 dma = rb_entry(node, struct gvt_dma, node);
245 gvt_dma_unmap_iova(vgpu, dma->iova);
246 gfn = dma->gfn;
247 __gvt_cache_remove_entry(vgpu, dma);
248 mutex_unlock(&vgpu->vdev.cache_lock);
249 vfio_unpin_pages(dev, &gfn, 1);
250 }
251 }
252
253 static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
254 const char *name)
255 {
256 int i;
257 struct intel_vgpu_type *t;
258 const char *driver_name = dev_driver_string(
259 &gvt->dev_priv->drm.pdev->dev);
260
261 for (i = 0; i < gvt->num_types; i++) {
262 t = &gvt->types[i];
263 if (!strncmp(t->name, name + strlen(driver_name) + 1,
264 sizeof(t->name)))
265 return t;
266 }
267
268 return NULL;
269 }
270
271 static ssize_t available_instances_show(struct kobject *kobj,
272 struct device *dev, char *buf)
273 {
274 struct intel_vgpu_type *type;
275 unsigned int num = 0;
276 void *gvt = kdev_to_i915(dev)->gvt;
277
278 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
279 if (!type)
280 num = 0;
281 else
282 num = type->avail_instance;
283
284 return sprintf(buf, "%u\n", num);
285 }
286
287 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
288 char *buf)
289 {
290 return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
291 }
292
293 static ssize_t description_show(struct kobject *kobj, struct device *dev,
294 char *buf)
295 {
296 struct intel_vgpu_type *type;
297 void *gvt = kdev_to_i915(dev)->gvt;
298
299 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
300 if (!type)
301 return 0;
302
303 return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
304 "fence: %d\nresolution: %s\n"
305 "weight: %d\n",
306 BYTES_TO_MB(type->low_gm_size),
307 BYTES_TO_MB(type->high_gm_size),
308 type->fence, vgpu_edid_str(type->resolution),
309 type->weight);
310 }
311
312 static MDEV_TYPE_ATTR_RO(available_instances);
313 static MDEV_TYPE_ATTR_RO(device_api);
314 static MDEV_TYPE_ATTR_RO(description);
315
316 static struct attribute *type_attrs[] = {
317 &mdev_type_attr_available_instances.attr,
318 &mdev_type_attr_device_api.attr,
319 &mdev_type_attr_description.attr,
320 NULL,
321 };
322
323 static struct attribute_group *intel_vgpu_type_groups[] = {
324 [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
325 };
326
327 static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
328 {
329 int i, j;
330 struct intel_vgpu_type *type;
331 struct attribute_group *group;
332
333 for (i = 0; i < gvt->num_types; i++) {
334 type = &gvt->types[i];
335
336 group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
337 if (WARN_ON(!group))
338 goto unwind;
339
340 group->name = type->name;
341 group->attrs = type_attrs;
342 intel_vgpu_type_groups[i] = group;
343 }
344
345 return true;
346
347 unwind:
348 for (j = 0; j < i; j++) {
349 group = intel_vgpu_type_groups[j];
350 kfree(group);
351 }
352
353 return false;
354 }
355
356 static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
357 {
358 int i;
359 struct attribute_group *group;
360
361 for (i = 0; i < gvt->num_types; i++) {
362 group = intel_vgpu_type_groups[i];
363 kfree(group);
364 }
365 }
366
367 static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
368 {
369 hash_init(info->ptable);
370 }
371
372 static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
373 {
374 struct kvmgt_pgfn *p;
375 struct hlist_node *tmp;
376 int i;
377
378 hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
379 hash_del(&p->hnode);
380 kfree(p);
381 }
382 }
383
384 static struct kvmgt_pgfn *
385 __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
386 {
387 struct kvmgt_pgfn *p, *res = NULL;
388
389 hash_for_each_possible(info->ptable, p, hnode, gfn) {
390 if (gfn == p->gfn) {
391 res = p;
392 break;
393 }
394 }
395
396 return res;
397 }
398
399 static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
400 gfn_t gfn)
401 {
402 struct kvmgt_pgfn *p;
403
404 p = __kvmgt_protect_table_find(info, gfn);
405 return !!p;
406 }
407
408 static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
409 {
410 struct kvmgt_pgfn *p;
411
412 if (kvmgt_gfn_is_write_protected(info, gfn))
413 return;
414
415 p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
416 if (WARN(!p, "gfn: 0x%llx\n", gfn))
417 return;
418
419 p->gfn = gfn;
420 hash_add(info->ptable, &p->hnode, gfn);
421 }
422
423 static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
424 gfn_t gfn)
425 {
426 struct kvmgt_pgfn *p;
427
428 p = __kvmgt_protect_table_find(info, gfn);
429 if (p) {
430 hash_del(&p->hnode);
431 kfree(p);
432 }
433 }
434
435 static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
436 {
437 struct intel_vgpu *vgpu = NULL;
438 struct intel_vgpu_type *type;
439 struct device *pdev;
440 void *gvt;
441 int ret;
442
443 pdev = mdev_parent_dev(mdev);
444 gvt = kdev_to_i915(pdev)->gvt;
445
446 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
447 if (!type) {
448 gvt_vgpu_err("failed to find type %s to create\n",
449 kobject_name(kobj));
450 ret = -EINVAL;
451 goto out;
452 }
453
454 vgpu = intel_gvt_ops->vgpu_create(gvt, type);
455 if (IS_ERR_OR_NULL(vgpu)) {
456 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
457 gvt_vgpu_err("failed to create intel vgpu: %d\n", ret);
458 goto out;
459 }
460
461 INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
462
463 vgpu->vdev.mdev = mdev;
464 mdev_set_drvdata(mdev, vgpu);
465
466 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
467 dev_name(mdev_dev(mdev)));
468 ret = 0;
469
470 out:
471 return ret;
472 }
473
474 static int intel_vgpu_remove(struct mdev_device *mdev)
475 {
476 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
477
478 if (handle_valid(vgpu->handle))
479 return -EBUSY;
480
481 intel_gvt_ops->vgpu_destroy(vgpu);
482 return 0;
483 }
484
485 static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
486 unsigned long action, void *data)
487 {
488 struct intel_vgpu *vgpu = container_of(nb,
489 struct intel_vgpu,
490 vdev.iommu_notifier);
491
492 if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
493 struct vfio_iommu_type1_dma_unmap *unmap = data;
494 unsigned long gfn, end_gfn;
495
496 gfn = unmap->iova >> PAGE_SHIFT;
497 end_gfn = gfn + unmap->size / PAGE_SIZE;
498
499 while (gfn < end_gfn)
500 gvt_cache_remove(vgpu, gfn++);
501 }
502
503 return NOTIFY_OK;
504 }
505
506 static int intel_vgpu_group_notifier(struct notifier_block *nb,
507 unsigned long action, void *data)
508 {
509 struct intel_vgpu *vgpu = container_of(nb,
510 struct intel_vgpu,
511 vdev.group_notifier);
512
513 /* the only action we care about */
514 if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
515 vgpu->vdev.kvm = data;
516
517 if (!data)
518 schedule_work(&vgpu->vdev.release_work);
519 }
520
521 return NOTIFY_OK;
522 }
523
524 static int intel_vgpu_open(struct mdev_device *mdev)
525 {
526 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
527 unsigned long events;
528 int ret;
529
530 vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
531 vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
532
533 events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
534 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
535 &vgpu->vdev.iommu_notifier);
536 if (ret != 0) {
537 gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
538 ret);
539 goto out;
540 }
541
542 events = VFIO_GROUP_NOTIFY_SET_KVM;
543 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
544 &vgpu->vdev.group_notifier);
545 if (ret != 0) {
546 gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
547 ret);
548 goto undo_iommu;
549 }
550
551 ret = kvmgt_guest_init(mdev);
552 if (ret)
553 goto undo_group;
554
555 intel_gvt_ops->vgpu_activate(vgpu);
556
557 atomic_set(&vgpu->vdev.released, 0);
558 return ret;
559
560 undo_group:
561 vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
562 &vgpu->vdev.group_notifier);
563
564 undo_iommu:
565 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
566 &vgpu->vdev.iommu_notifier);
567 out:
568 return ret;
569 }
570
571 static void __intel_vgpu_release(struct intel_vgpu *vgpu)
572 {
573 struct kvmgt_guest_info *info;
574 int ret;
575
576 if (!handle_valid(vgpu->handle))
577 return;
578
579 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
580 return;
581
582 intel_gvt_ops->vgpu_deactivate(vgpu);
583
584 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
585 &vgpu->vdev.iommu_notifier);
586 WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
587
588 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
589 &vgpu->vdev.group_notifier);
590 WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
591
592 info = (struct kvmgt_guest_info *)vgpu->handle;
593 kvmgt_guest_exit(info);
594
595 vgpu->vdev.kvm = NULL;
596 vgpu->handle = 0;
597 }
598
599 static void intel_vgpu_release(struct mdev_device *mdev)
600 {
601 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
602
603 __intel_vgpu_release(vgpu);
604 }
605
606 static void intel_vgpu_release_work(struct work_struct *work)
607 {
608 struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
609 vdev.release_work);
610
611 __intel_vgpu_release(vgpu);
612 }
613
614 static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
615 {
616 u32 start_lo, start_hi;
617 u32 mem_type;
618
619 start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
620 PCI_BASE_ADDRESS_MEM_MASK;
621 mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
622 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
623
624 switch (mem_type) {
625 case PCI_BASE_ADDRESS_MEM_TYPE_64:
626 start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
627 + bar + 4));
628 break;
629 case PCI_BASE_ADDRESS_MEM_TYPE_32:
630 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
631 /* 1M mem BAR treated as 32-bit BAR */
632 default:
633 /* mem unknown type treated as 32-bit BAR */
634 start_hi = 0;
635 break;
636 }
637
638 return ((u64)start_hi << 32) | start_lo;
639 }
640
641 static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
642 void *buf, unsigned int count, bool is_write)
643 {
644 uint64_t bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
645 int ret;
646
647 if (is_write)
648 ret = intel_gvt_ops->emulate_mmio_write(vgpu,
649 bar_start + off, buf, count);
650 else
651 ret = intel_gvt_ops->emulate_mmio_read(vgpu,
652 bar_start + off, buf, count);
653 return ret;
654 }
655
656 static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off)
657 {
658 return off >= vgpu_aperture_offset(vgpu) &&
659 off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
660 }
661
662 static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off,
663 void *buf, unsigned long count, bool is_write)
664 {
665 void *aperture_va;
666
667 if (!intel_vgpu_in_aperture(vgpu, off) ||
668 !intel_vgpu_in_aperture(vgpu, off + count)) {
669 gvt_vgpu_err("Invalid aperture offset %llu\n", off);
670 return -EINVAL;
671 }
672
673 aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap,
674 ALIGN_DOWN(off, PAGE_SIZE),
675 count + offset_in_page(off));
676 if (!aperture_va)
677 return -EIO;
678
679 if (is_write)
680 memcpy(aperture_va + offset_in_page(off), buf, count);
681 else
682 memcpy(buf, aperture_va + offset_in_page(off), count);
683
684 io_mapping_unmap(aperture_va);
685
686 return 0;
687 }
688
689 static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
690 size_t count, loff_t *ppos, bool is_write)
691 {
692 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
693 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
694 uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
695 int ret = -EINVAL;
696
697
698 if (index >= VFIO_PCI_NUM_REGIONS) {
699 gvt_vgpu_err("invalid index: %u\n", index);
700 return -EINVAL;
701 }
702
703 switch (index) {
704 case VFIO_PCI_CONFIG_REGION_INDEX:
705 if (is_write)
706 ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
707 buf, count);
708 else
709 ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
710 buf, count);
711 break;
712 case VFIO_PCI_BAR0_REGION_INDEX:
713 ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
714 buf, count, is_write);
715 break;
716 case VFIO_PCI_BAR2_REGION_INDEX:
717 ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
718 break;
719 case VFIO_PCI_BAR1_REGION_INDEX:
720 case VFIO_PCI_BAR3_REGION_INDEX:
721 case VFIO_PCI_BAR4_REGION_INDEX:
722 case VFIO_PCI_BAR5_REGION_INDEX:
723 case VFIO_PCI_VGA_REGION_INDEX:
724 case VFIO_PCI_ROM_REGION_INDEX:
725 default:
726 gvt_vgpu_err("unsupported region: %u\n", index);
727 }
728
729 return ret == 0 ? count : ret;
730 }
731
732 static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
733 size_t count, loff_t *ppos)
734 {
735 unsigned int done = 0;
736 int ret;
737
738 while (count) {
739 size_t filled;
740
741 if (count >= 4 && !(*ppos % 4)) {
742 u32 val;
743
744 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
745 ppos, false);
746 if (ret <= 0)
747 goto read_err;
748
749 if (copy_to_user(buf, &val, sizeof(val)))
750 goto read_err;
751
752 filled = 4;
753 } else if (count >= 2 && !(*ppos % 2)) {
754 u16 val;
755
756 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
757 ppos, false);
758 if (ret <= 0)
759 goto read_err;
760
761 if (copy_to_user(buf, &val, sizeof(val)))
762 goto read_err;
763
764 filled = 2;
765 } else {
766 u8 val;
767
768 ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
769 false);
770 if (ret <= 0)
771 goto read_err;
772
773 if (copy_to_user(buf, &val, sizeof(val)))
774 goto read_err;
775
776 filled = 1;
777 }
778
779 count -= filled;
780 done += filled;
781 *ppos += filled;
782 buf += filled;
783 }
784
785 return done;
786
787 read_err:
788 return -EFAULT;
789 }
790
791 static ssize_t intel_vgpu_write(struct mdev_device *mdev,
792 const char __user *buf,
793 size_t count, loff_t *ppos)
794 {
795 unsigned int done = 0;
796 int ret;
797
798 while (count) {
799 size_t filled;
800
801 if (count >= 4 && !(*ppos % 4)) {
802 u32 val;
803
804 if (copy_from_user(&val, buf, sizeof(val)))
805 goto write_err;
806
807 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
808 ppos, true);
809 if (ret <= 0)
810 goto write_err;
811
812 filled = 4;
813 } else if (count >= 2 && !(*ppos % 2)) {
814 u16 val;
815
816 if (copy_from_user(&val, buf, sizeof(val)))
817 goto write_err;
818
819 ret = intel_vgpu_rw(mdev, (char *)&val,
820 sizeof(val), ppos, true);
821 if (ret <= 0)
822 goto write_err;
823
824 filled = 2;
825 } else {
826 u8 val;
827
828 if (copy_from_user(&val, buf, sizeof(val)))
829 goto write_err;
830
831 ret = intel_vgpu_rw(mdev, &val, sizeof(val),
832 ppos, true);
833 if (ret <= 0)
834 goto write_err;
835
836 filled = 1;
837 }
838
839 count -= filled;
840 done += filled;
841 *ppos += filled;
842 buf += filled;
843 }
844
845 return done;
846 write_err:
847 return -EFAULT;
848 }
849
850 static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
851 {
852 unsigned int index;
853 u64 virtaddr;
854 unsigned long req_size, pgoff, req_start;
855 pgprot_t pg_prot;
856 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
857
858 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
859 if (index >= VFIO_PCI_ROM_REGION_INDEX)
860 return -EINVAL;
861
862 if (vma->vm_end < vma->vm_start)
863 return -EINVAL;
864 if ((vma->vm_flags & VM_SHARED) == 0)
865 return -EINVAL;
866 if (index != VFIO_PCI_BAR2_REGION_INDEX)
867 return -EINVAL;
868
869 pg_prot = vma->vm_page_prot;
870 virtaddr = vma->vm_start;
871 req_size = vma->vm_end - vma->vm_start;
872 pgoff = vma->vm_pgoff &
873 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
874 req_start = pgoff << PAGE_SHIFT;
875
876 if (!intel_vgpu_in_aperture(vgpu, req_start))
877 return -EINVAL;
878 if (req_start + req_size >
879 vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
880 return -EINVAL;
881
882 pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
883
884 return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
885 }
886
887 static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
888 {
889 if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX)
890 return 1;
891
892 return 0;
893 }
894
895 static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
896 unsigned int index, unsigned int start,
897 unsigned int count, uint32_t flags,
898 void *data)
899 {
900 return 0;
901 }
902
903 static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
904 unsigned int index, unsigned int start,
905 unsigned int count, uint32_t flags, void *data)
906 {
907 return 0;
908 }
909
910 static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
911 unsigned int index, unsigned int start, unsigned int count,
912 uint32_t flags, void *data)
913 {
914 return 0;
915 }
916
917 static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
918 unsigned int index, unsigned int start, unsigned int count,
919 uint32_t flags, void *data)
920 {
921 struct eventfd_ctx *trigger;
922
923 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
924 int fd = *(int *)data;
925
926 trigger = eventfd_ctx_fdget(fd);
927 if (IS_ERR(trigger)) {
928 gvt_vgpu_err("eventfd_ctx_fdget failed\n");
929 return PTR_ERR(trigger);
930 }
931 vgpu->vdev.msi_trigger = trigger;
932 }
933
934 return 0;
935 }
936
937 static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags,
938 unsigned int index, unsigned int start, unsigned int count,
939 void *data)
940 {
941 int (*func)(struct intel_vgpu *vgpu, unsigned int index,
942 unsigned int start, unsigned int count, uint32_t flags,
943 void *data) = NULL;
944
945 switch (index) {
946 case VFIO_PCI_INTX_IRQ_INDEX:
947 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
948 case VFIO_IRQ_SET_ACTION_MASK:
949 func = intel_vgpu_set_intx_mask;
950 break;
951 case VFIO_IRQ_SET_ACTION_UNMASK:
952 func = intel_vgpu_set_intx_unmask;
953 break;
954 case VFIO_IRQ_SET_ACTION_TRIGGER:
955 func = intel_vgpu_set_intx_trigger;
956 break;
957 }
958 break;
959 case VFIO_PCI_MSI_IRQ_INDEX:
960 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
961 case VFIO_IRQ_SET_ACTION_MASK:
962 case VFIO_IRQ_SET_ACTION_UNMASK:
963 /* XXX Need masking support exported */
964 break;
965 case VFIO_IRQ_SET_ACTION_TRIGGER:
966 func = intel_vgpu_set_msi_trigger;
967 break;
968 }
969 break;
970 }
971
972 if (!func)
973 return -ENOTTY;
974
975 return func(vgpu, index, start, count, flags, data);
976 }
977
978 static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
979 unsigned long arg)
980 {
981 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
982 unsigned long minsz;
983
984 gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
985
986 if (cmd == VFIO_DEVICE_GET_INFO) {
987 struct vfio_device_info info;
988
989 minsz = offsetofend(struct vfio_device_info, num_irqs);
990
991 if (copy_from_user(&info, (void __user *)arg, minsz))
992 return -EFAULT;
993
994 if (info.argsz < minsz)
995 return -EINVAL;
996
997 info.flags = VFIO_DEVICE_FLAGS_PCI;
998 info.flags |= VFIO_DEVICE_FLAGS_RESET;
999 info.num_regions = VFIO_PCI_NUM_REGIONS;
1000 info.num_irqs = VFIO_PCI_NUM_IRQS;
1001
1002 return copy_to_user((void __user *)arg, &info, minsz) ?
1003 -EFAULT : 0;
1004
1005 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
1006 struct vfio_region_info info;
1007 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
1008 unsigned int i;
1009 int ret;
1010 struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
1011 size_t size;
1012 int nr_areas = 1;
1013 int cap_type_id;
1014
1015 minsz = offsetofend(struct vfio_region_info, offset);
1016
1017 if (copy_from_user(&info, (void __user *)arg, minsz))
1018 return -EFAULT;
1019
1020 if (info.argsz < minsz)
1021 return -EINVAL;
1022
1023 switch (info.index) {
1024 case VFIO_PCI_CONFIG_REGION_INDEX:
1025 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1026 info.size = vgpu->gvt->device_info.cfg_space_size;
1027 info.flags = VFIO_REGION_INFO_FLAG_READ |
1028 VFIO_REGION_INFO_FLAG_WRITE;
1029 break;
1030 case VFIO_PCI_BAR0_REGION_INDEX:
1031 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1032 info.size = vgpu->cfg_space.bar[info.index].size;
1033 if (!info.size) {
1034 info.flags = 0;
1035 break;
1036 }
1037
1038 info.flags = VFIO_REGION_INFO_FLAG_READ |
1039 VFIO_REGION_INFO_FLAG_WRITE;
1040 break;
1041 case VFIO_PCI_BAR1_REGION_INDEX:
1042 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1043 info.size = 0;
1044 info.flags = 0;
1045 break;
1046 case VFIO_PCI_BAR2_REGION_INDEX:
1047 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1048 info.flags = VFIO_REGION_INFO_FLAG_CAPS |
1049 VFIO_REGION_INFO_FLAG_MMAP |
1050 VFIO_REGION_INFO_FLAG_READ |
1051 VFIO_REGION_INFO_FLAG_WRITE;
1052 info.size = gvt_aperture_sz(vgpu->gvt);
1053
1054 size = sizeof(*sparse) +
1055 (nr_areas * sizeof(*sparse->areas));
1056 sparse = kzalloc(size, GFP_KERNEL);
1057 if (!sparse)
1058 return -ENOMEM;
1059
1060 sparse->nr_areas = nr_areas;
1061 cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1062 sparse->areas[0].offset =
1063 PAGE_ALIGN(vgpu_aperture_offset(vgpu));
1064 sparse->areas[0].size = vgpu_aperture_sz(vgpu);
1065 break;
1066
1067 case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1068 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1069 info.size = 0;
1070
1071 info.flags = 0;
1072 gvt_dbg_core("get region info bar:%d\n", info.index);
1073 break;
1074
1075 case VFIO_PCI_ROM_REGION_INDEX:
1076 case VFIO_PCI_VGA_REGION_INDEX:
1077 gvt_dbg_core("get region info index:%d\n", info.index);
1078 break;
1079 default:
1080 {
1081 struct vfio_region_info_cap_type cap_type;
1082
1083 if (info.index >= VFIO_PCI_NUM_REGIONS +
1084 vgpu->vdev.num_regions)
1085 return -EINVAL;
1086 info.index =
1087 array_index_nospec(info.index,
1088 VFIO_PCI_NUM_REGIONS +
1089 vgpu->vdev.num_regions);
1090
1091 i = info.index - VFIO_PCI_NUM_REGIONS;
1092
1093 info.offset =
1094 VFIO_PCI_INDEX_TO_OFFSET(info.index);
1095 info.size = vgpu->vdev.region[i].size;
1096 info.flags = vgpu->vdev.region[i].flags;
1097
1098 cap_type.type = vgpu->vdev.region[i].type;
1099 cap_type.subtype = vgpu->vdev.region[i].subtype;
1100
1101 ret = vfio_info_add_capability(&caps,
1102 VFIO_REGION_INFO_CAP_TYPE,
1103 &cap_type);
1104 if (ret)
1105 return ret;
1106 }
1107 }
1108
1109 if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
1110 switch (cap_type_id) {
1111 case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
1112 ret = vfio_info_add_capability(&caps,
1113 VFIO_REGION_INFO_CAP_SPARSE_MMAP,
1114 sparse);
1115 kfree(sparse);
1116 if (ret)
1117 return ret;
1118 break;
1119 default:
1120 return -EINVAL;
1121 }
1122 }
1123
1124 if (caps.size) {
1125 if (info.argsz < sizeof(info) + caps.size) {
1126 info.argsz = sizeof(info) + caps.size;
1127 info.cap_offset = 0;
1128 } else {
1129 vfio_info_cap_shift(&caps, sizeof(info));
1130 if (copy_to_user((void __user *)arg +
1131 sizeof(info), caps.buf,
1132 caps.size)) {
1133 kfree(caps.buf);
1134 return -EFAULT;
1135 }
1136 info.cap_offset = sizeof(info);
1137 }
1138
1139 kfree(caps.buf);
1140 }
1141
1142 return copy_to_user((void __user *)arg, &info, minsz) ?
1143 -EFAULT : 0;
1144 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
1145 struct vfio_irq_info info;
1146
1147 minsz = offsetofend(struct vfio_irq_info, count);
1148
1149 if (copy_from_user(&info, (void __user *)arg, minsz))
1150 return -EFAULT;
1151
1152 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
1153 return -EINVAL;
1154
1155 switch (info.index) {
1156 case VFIO_PCI_INTX_IRQ_INDEX:
1157 case VFIO_PCI_MSI_IRQ_INDEX:
1158 break;
1159 default:
1160 return -EINVAL;
1161 }
1162
1163 info.flags = VFIO_IRQ_INFO_EVENTFD;
1164
1165 info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1166
1167 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
1168 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
1169 VFIO_IRQ_INFO_AUTOMASKED);
1170 else
1171 info.flags |= VFIO_IRQ_INFO_NORESIZE;
1172
1173 return copy_to_user((void __user *)arg, &info, minsz) ?
1174 -EFAULT : 0;
1175 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
1176 struct vfio_irq_set hdr;
1177 u8 *data = NULL;
1178 int ret = 0;
1179 size_t data_size = 0;
1180
1181 minsz = offsetofend(struct vfio_irq_set, count);
1182
1183 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1184 return -EFAULT;
1185
1186 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
1187 int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
1188
1189 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1190 VFIO_PCI_NUM_IRQS, &data_size);
1191 if (ret) {
1192 gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
1193 return -EINVAL;
1194 }
1195 if (data_size) {
1196 data = memdup_user((void __user *)(arg + minsz),
1197 data_size);
1198 if (IS_ERR(data))
1199 return PTR_ERR(data);
1200 }
1201 }
1202
1203 ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
1204 hdr.start, hdr.count, data);
1205 kfree(data);
1206
1207 return ret;
1208 } else if (cmd == VFIO_DEVICE_RESET) {
1209 intel_gvt_ops->vgpu_reset(vgpu);
1210 return 0;
1211 }
1212
1213 return -ENOTTY;
1214 }
1215
1216 static ssize_t
1217 vgpu_id_show(struct device *dev, struct device_attribute *attr,
1218 char *buf)
1219 {
1220 struct mdev_device *mdev = mdev_from_dev(dev);
1221
1222 if (mdev) {
1223 struct intel_vgpu *vgpu = (struct intel_vgpu *)
1224 mdev_get_drvdata(mdev);
1225 return sprintf(buf, "%d\n", vgpu->id);
1226 }
1227 return sprintf(buf, "\n");
1228 }
1229
1230 static ssize_t
1231 hw_id_show(struct device *dev, struct device_attribute *attr,
1232 char *buf)
1233 {
1234 struct mdev_device *mdev = mdev_from_dev(dev);
1235
1236 if (mdev) {
1237 struct intel_vgpu *vgpu = (struct intel_vgpu *)
1238 mdev_get_drvdata(mdev);
1239 return sprintf(buf, "%u\n",
1240 vgpu->shadow_ctx->hw_id);
1241 }
1242 return sprintf(buf, "\n");
1243 }
1244
1245 static DEVICE_ATTR_RO(vgpu_id);
1246 static DEVICE_ATTR_RO(hw_id);
1247
1248 static struct attribute *intel_vgpu_attrs[] = {
1249 &dev_attr_vgpu_id.attr,
1250 &dev_attr_hw_id.attr,
1251 NULL
1252 };
1253
1254 static const struct attribute_group intel_vgpu_group = {
1255 .name = "intel_vgpu",
1256 .attrs = intel_vgpu_attrs,
1257 };
1258
1259 static const struct attribute_group *intel_vgpu_groups[] = {
1260 &intel_vgpu_group,
1261 NULL,
1262 };
1263
1264 static const struct mdev_parent_ops intel_vgpu_ops = {
1265 .supported_type_groups = intel_vgpu_type_groups,
1266 .mdev_attr_groups = intel_vgpu_groups,
1267 .create = intel_vgpu_create,
1268 .remove = intel_vgpu_remove,
1269
1270 .open = intel_vgpu_open,
1271 .release = intel_vgpu_release,
1272
1273 .read = intel_vgpu_read,
1274 .write = intel_vgpu_write,
1275 .mmap = intel_vgpu_mmap,
1276 .ioctl = intel_vgpu_ioctl,
1277 };
1278
1279 static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
1280 {
1281 if (!intel_gvt_init_vgpu_type_groups(gvt))
1282 return -EFAULT;
1283
1284 intel_gvt_ops = ops;
1285
1286 return mdev_register_device(dev, &intel_vgpu_ops);
1287 }
1288
1289 static void kvmgt_host_exit(struct device *dev, void *gvt)
1290 {
1291 intel_gvt_cleanup_vgpu_type_groups(gvt);
1292 mdev_unregister_device(dev);
1293 }
1294
1295 static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
1296 {
1297 struct kvmgt_guest_info *info;
1298 struct kvm *kvm;
1299 struct kvm_memory_slot *slot;
1300 int idx;
1301
1302 if (!handle_valid(handle))
1303 return -ESRCH;
1304
1305 info = (struct kvmgt_guest_info *)handle;
1306 kvm = info->kvm;
1307
1308 idx = srcu_read_lock(&kvm->srcu);
1309 slot = gfn_to_memslot(kvm, gfn);
1310 if (!slot) {
1311 srcu_read_unlock(&kvm->srcu, idx);
1312 return -EINVAL;
1313 }
1314
1315 spin_lock(&kvm->mmu_lock);
1316
1317 if (kvmgt_gfn_is_write_protected(info, gfn))
1318 goto out;
1319
1320 kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1321 kvmgt_protect_table_add(info, gfn);
1322
1323 out:
1324 spin_unlock(&kvm->mmu_lock);
1325 srcu_read_unlock(&kvm->srcu, idx);
1326 return 0;
1327 }
1328
1329 static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
1330 {
1331 struct kvmgt_guest_info *info;
1332 struct kvm *kvm;
1333 struct kvm_memory_slot *slot;
1334 int idx;
1335
1336 if (!handle_valid(handle))
1337 return 0;
1338
1339 info = (struct kvmgt_guest_info *)handle;
1340 kvm = info->kvm;
1341
1342 idx = srcu_read_lock(&kvm->srcu);
1343 slot = gfn_to_memslot(kvm, gfn);
1344 if (!slot) {
1345 srcu_read_unlock(&kvm->srcu, idx);
1346 return -EINVAL;
1347 }
1348
1349 spin_lock(&kvm->mmu_lock);
1350
1351 if (!kvmgt_gfn_is_write_protected(info, gfn))
1352 goto out;
1353
1354 kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1355 kvmgt_protect_table_del(info, gfn);
1356
1357 out:
1358 spin_unlock(&kvm->mmu_lock);
1359 srcu_read_unlock(&kvm->srcu, idx);
1360 return 0;
1361 }
1362
1363 static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1364 const u8 *val, int len,
1365 struct kvm_page_track_notifier_node *node)
1366 {
1367 struct kvmgt_guest_info *info = container_of(node,
1368 struct kvmgt_guest_info, track_node);
1369
1370 if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
1371 intel_gvt_ops->emulate_mmio_write(info->vgpu, gpa,
1372 (void *)val, len);
1373 }
1374
1375 static void kvmgt_page_track_flush_slot(struct kvm *kvm,
1376 struct kvm_memory_slot *slot,
1377 struct kvm_page_track_notifier_node *node)
1378 {
1379 int i;
1380 gfn_t gfn;
1381 struct kvmgt_guest_info *info = container_of(node,
1382 struct kvmgt_guest_info, track_node);
1383
1384 spin_lock(&kvm->mmu_lock);
1385 for (i = 0; i < slot->npages; i++) {
1386 gfn = slot->base_gfn + i;
1387 if (kvmgt_gfn_is_write_protected(info, gfn)) {
1388 kvm_slot_page_track_remove_page(kvm, slot, gfn,
1389 KVM_PAGE_TRACK_WRITE);
1390 kvmgt_protect_table_del(info, gfn);
1391 }
1392 }
1393 spin_unlock(&kvm->mmu_lock);
1394 }
1395
1396 static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
1397 {
1398 struct intel_vgpu *itr;
1399 struct kvmgt_guest_info *info;
1400 int id;
1401 bool ret = false;
1402
1403 mutex_lock(&vgpu->gvt->lock);
1404 for_each_active_vgpu(vgpu->gvt, itr, id) {
1405 if (!handle_valid(itr->handle))
1406 continue;
1407
1408 info = (struct kvmgt_guest_info *)itr->handle;
1409 if (kvm && kvm == info->kvm) {
1410 ret = true;
1411 goto out;
1412 }
1413 }
1414 out:
1415 mutex_unlock(&vgpu->gvt->lock);
1416 return ret;
1417 }
1418
1419 static int kvmgt_guest_init(struct mdev_device *mdev)
1420 {
1421 struct kvmgt_guest_info *info;
1422 struct intel_vgpu *vgpu;
1423 struct kvm *kvm;
1424
1425 vgpu = mdev_get_drvdata(mdev);
1426 if (handle_valid(vgpu->handle))
1427 return -EEXIST;
1428
1429 kvm = vgpu->vdev.kvm;
1430 if (!kvm || kvm->mm != current->mm) {
1431 gvt_vgpu_err("KVM is required to use Intel vGPU\n");
1432 return -ESRCH;
1433 }
1434
1435 if (__kvmgt_vgpu_exist(vgpu, kvm))
1436 return -EEXIST;
1437
1438 info = vzalloc(sizeof(struct kvmgt_guest_info));
1439 if (!info)
1440 return -ENOMEM;
1441
1442 vgpu->handle = (unsigned long)info;
1443 info->vgpu = vgpu;
1444 info->kvm = kvm;
1445 kvm_get_kvm(info->kvm);
1446
1447 kvmgt_protect_table_init(info);
1448 gvt_cache_init(vgpu);
1449
1450 info->track_node.track_write = kvmgt_page_track_write;
1451 info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
1452 kvm_page_track_register_notifier(kvm, &info->track_node);
1453
1454 return 0;
1455 }
1456
1457 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1458 {
1459 kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1460 kvm_put_kvm(info->kvm);
1461 kvmgt_protect_table_destroy(info);
1462 gvt_cache_destroy(info->vgpu);
1463 vfree(info);
1464
1465 return true;
1466 }
1467
1468 static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
1469 {
1470 /* nothing to do here */
1471 return 0;
1472 }
1473
1474 static void kvmgt_detach_vgpu(unsigned long handle)
1475 {
1476 /* nothing to do here */
1477 }
1478
1479 static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
1480 {
1481 struct kvmgt_guest_info *info;
1482 struct intel_vgpu *vgpu;
1483
1484 if (!handle_valid(handle))
1485 return -ESRCH;
1486
1487 info = (struct kvmgt_guest_info *)handle;
1488 vgpu = info->vgpu;
1489
1490 if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
1491 return 0;
1492
1493 return -EFAULT;
1494 }
1495
1496 static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1497 {
1498 unsigned long iova, pfn;
1499 struct kvmgt_guest_info *info;
1500 struct device *dev;
1501 struct intel_vgpu *vgpu;
1502 int rc;
1503
1504 if (!handle_valid(handle))
1505 return INTEL_GVT_INVALID_ADDR;
1506
1507 info = (struct kvmgt_guest_info *)handle;
1508 vgpu = info->vgpu;
1509 iova = gvt_cache_find(info->vgpu, gfn);
1510 if (iova != INTEL_GVT_INVALID_ADDR)
1511 return iova;
1512
1513 pfn = INTEL_GVT_INVALID_ADDR;
1514 dev = mdev_dev(info->vgpu->vdev.mdev);
1515 rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
1516 if (rc != 1) {
1517 gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
1518 gfn, rc);
1519 return INTEL_GVT_INVALID_ADDR;
1520 }
1521 /* transfer to host iova for GFX to use DMA */
1522 rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
1523 if (rc) {
1524 gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
1525 vfio_unpin_pages(dev, &gfn, 1);
1526 return INTEL_GVT_INVALID_ADDR;
1527 }
1528
1529 gvt_cache_add(info->vgpu, gfn, iova);
1530 return iova;
1531 }
1532
1533 static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1534 void *buf, unsigned long len, bool write)
1535 {
1536 struct kvmgt_guest_info *info;
1537 struct kvm *kvm;
1538 int idx, ret;
1539 bool kthread = current->mm == NULL;
1540
1541 if (!handle_valid(handle))
1542 return -ESRCH;
1543
1544 info = (struct kvmgt_guest_info *)handle;
1545 kvm = info->kvm;
1546
1547 if (kthread)
1548 use_mm(kvm->mm);
1549
1550 idx = srcu_read_lock(&kvm->srcu);
1551 ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
1552 kvm_read_guest(kvm, gpa, buf, len);
1553 srcu_read_unlock(&kvm->srcu, idx);
1554
1555 if (kthread)
1556 unuse_mm(kvm->mm);
1557
1558 return ret;
1559 }
1560
1561 static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
1562 void *buf, unsigned long len)
1563 {
1564 return kvmgt_rw_gpa(handle, gpa, buf, len, false);
1565 }
1566
1567 static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
1568 void *buf, unsigned long len)
1569 {
1570 return kvmgt_rw_gpa(handle, gpa, buf, len, true);
1571 }
1572
1573 static unsigned long kvmgt_virt_to_pfn(void *addr)
1574 {
1575 return PFN_DOWN(__pa(addr));
1576 }
1577
1578 struct intel_gvt_mpt kvmgt_mpt = {
1579 .host_init = kvmgt_host_init,
1580 .host_exit = kvmgt_host_exit,
1581 .attach_vgpu = kvmgt_attach_vgpu,
1582 .detach_vgpu = kvmgt_detach_vgpu,
1583 .inject_msi = kvmgt_inject_msi,
1584 .from_virt_to_mfn = kvmgt_virt_to_pfn,
1585 .set_wp_page = kvmgt_write_protect_add,
1586 .unset_wp_page = kvmgt_write_protect_remove,
1587 .read_gpa = kvmgt_read_gpa,
1588 .write_gpa = kvmgt_write_gpa,
1589 .gfn_to_mfn = kvmgt_gfn_to_pfn,
1590 };
1591 EXPORT_SYMBOL_GPL(kvmgt_mpt);
1592
1593 static int __init kvmgt_init(void)
1594 {
1595 return 0;
1596 }
1597
1598 static void __exit kvmgt_exit(void)
1599 {
1600 }
1601
1602 module_init(kvmgt_init);
1603 module_exit(kvmgt_exit);
1604
1605 MODULE_LICENSE("GPL and additional rights");
1606 MODULE_AUTHOR("Intel Corporation");