]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - virt/kvm/coalesced_mmio.c
KVM: arm/arm64: VGIC/ITS: Promote irq_lock() in update_affinity
[mirror_ubuntu-bionic-kernel.git] / virt / kvm / coalesced_mmio.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * KVM coalesced MMIO
4 *
5 * Copyright (c) 2008 Bull S.A.S.
6 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
7 *
8 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
9 *
10 */
11
12 #include <kvm/iodev.h>
13
14 #include <linux/kvm_host.h>
15 #include <linux/slab.h>
16 #include <linux/kvm.h>
17
18 #include "coalesced_mmio.h"
19
20 static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
21 {
22 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
23 }
24
25 static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
26 gpa_t addr, int len)
27 {
28 /* is it in a batchable area ?
29 * (addr,len) is fully included in
30 * (zone->addr, zone->size)
31 */
32 if (len < 0)
33 return 0;
34 if (addr + len < addr)
35 return 0;
36 if (addr < dev->zone.addr)
37 return 0;
38 if (addr + len > dev->zone.addr + dev->zone.size)
39 return 0;
40 return 1;
41 }
42
43 static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
44 {
45 struct kvm_coalesced_mmio_ring *ring;
46 unsigned avail;
47
48 /* Are we able to batch it ? */
49
50 /* last is the first free entry
51 * check if we don't meet the first used entry
52 * there is always one unused entry in the buffer
53 */
54 ring = dev->kvm->coalesced_mmio_ring;
55 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
56 if (avail == 0) {
57 /* full */
58 return 0;
59 }
60
61 return 1;
62 }
63
64 static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
65 struct kvm_io_device *this, gpa_t addr,
66 int len, const void *val)
67 {
68 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
69 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
70
71 if (!coalesced_mmio_in_range(dev, addr, len))
72 return -EOPNOTSUPP;
73
74 spin_lock(&dev->kvm->ring_lock);
75
76 if (!coalesced_mmio_has_room(dev)) {
77 spin_unlock(&dev->kvm->ring_lock);
78 return -EOPNOTSUPP;
79 }
80
81 /* copy data in first free entry of the ring */
82
83 ring->coalesced_mmio[ring->last].phys_addr = addr;
84 ring->coalesced_mmio[ring->last].len = len;
85 memcpy(ring->coalesced_mmio[ring->last].data, val, len);
86 smp_wmb();
87 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
88 spin_unlock(&dev->kvm->ring_lock);
89 return 0;
90 }
91
92 static void coalesced_mmio_destructor(struct kvm_io_device *this)
93 {
94 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
95
96 list_del(&dev->list);
97
98 kfree(dev);
99 }
100
101 static const struct kvm_io_device_ops coalesced_mmio_ops = {
102 .write = coalesced_mmio_write,
103 .destructor = coalesced_mmio_destructor,
104 };
105
106 int kvm_coalesced_mmio_init(struct kvm *kvm)
107 {
108 struct page *page;
109 int ret;
110
111 ret = -ENOMEM;
112 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
113 if (!page)
114 goto out_err;
115
116 ret = 0;
117 kvm->coalesced_mmio_ring = page_address(page);
118
119 /*
120 * We're using this spinlock to sync access to the coalesced ring.
121 * The list doesn't need it's own lock since device registration and
122 * unregistration should only happen when kvm->slots_lock is held.
123 */
124 spin_lock_init(&kvm->ring_lock);
125 INIT_LIST_HEAD(&kvm->coalesced_zones);
126
127 out_err:
128 return ret;
129 }
130
131 void kvm_coalesced_mmio_free(struct kvm *kvm)
132 {
133 if (kvm->coalesced_mmio_ring)
134 free_page((unsigned long)kvm->coalesced_mmio_ring);
135 }
136
137 int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
138 struct kvm_coalesced_mmio_zone *zone)
139 {
140 int ret;
141 struct kvm_coalesced_mmio_dev *dev;
142
143 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
144 if (!dev)
145 return -ENOMEM;
146
147 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
148 dev->kvm = kvm;
149 dev->zone = *zone;
150
151 mutex_lock(&kvm->slots_lock);
152 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr,
153 zone->size, &dev->dev);
154 if (ret < 0)
155 goto out_free_dev;
156 list_add_tail(&dev->list, &kvm->coalesced_zones);
157 mutex_unlock(&kvm->slots_lock);
158
159 return 0;
160
161 out_free_dev:
162 mutex_unlock(&kvm->slots_lock);
163 kfree(dev);
164
165 return ret;
166 }
167
168 int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
169 struct kvm_coalesced_mmio_zone *zone)
170 {
171 struct kvm_coalesced_mmio_dev *dev, *tmp;
172
173 mutex_lock(&kvm->slots_lock);
174
175 list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
176 if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
177 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev);
178 kvm_iodevice_destructor(&dev->dev);
179 }
180
181 mutex_unlock(&kvm->slots_lock);
182
183 return 0;
184 }