]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - virt/kvm/coalesced_mmio.c
KVM: x86: Raise the hard VCPU count limit
[mirror_ubuntu-jammy-kernel.git] / virt / kvm / coalesced_mmio.c
CommitLineData
5f94c174
LV
1/*
2 * KVM coalesced MMIO
3 *
4 * Copyright (c) 2008 Bull S.A.S.
221d059d 5 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
5f94c174
LV
6 *
7 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
8 *
9 */
10
11#include "iodev.h"
12
13#include <linux/kvm_host.h>
5a0e3ad6 14#include <linux/slab.h>
5f94c174
LV
15#include <linux/kvm.h>
16
17#include "coalesced_mmio.h"
18
d76685c4
GH
19static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
20{
21 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
22}
23
bda9020e
MT
24static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
25 gpa_t addr, int len)
5f94c174 26{
5f94c174 27 struct kvm_coalesced_mmio_zone *zone;
5f94c174
LV
28 int i;
29
5f94c174
LV
30 /* is it in a batchable area ? */
31
32 for (i = 0; i < dev->nb_zones; i++) {
33 zone = &dev->zone[i];
34
35 /* (addr,len) is fully included in
36 * (zone->addr, zone->size)
37 */
38
39 if (zone->addr <= addr &&
40 addr + len <= zone->addr + zone->size)
41 return 1;
42 }
43 return 0;
44}
45
c298125f
SL
46static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
47{
48 struct kvm_coalesced_mmio_ring *ring;
49 unsigned avail;
50
51 /* Are we able to batch it ? */
52
53 /* last is the first free entry
54 * check if we don't meet the first used entry
55 * there is always one unused entry in the buffer
56 */
57 ring = dev->kvm->coalesced_mmio_ring;
58 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
59 if (avail == 0) {
60 /* full */
61 return 0;
62 }
63
64 return 1;
65}
66
bda9020e
MT
67static int coalesced_mmio_write(struct kvm_io_device *this,
68 gpa_t addr, int len, const void *val)
5f94c174 69{
d76685c4 70 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
5f94c174 71 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
c298125f 72
bda9020e
MT
73 if (!coalesced_mmio_in_range(dev, addr, len))
74 return -EOPNOTSUPP;
5f94c174 75
64a2268d 76 spin_lock(&dev->lock);
5f94c174 77
c298125f
SL
78 if (!coalesced_mmio_has_room(dev)) {
79 spin_unlock(&dev->lock);
80 return -EOPNOTSUPP;
81 }
82
5f94c174
LV
83 /* copy data in first free entry of the ring */
84
85 ring->coalesced_mmio[ring->last].phys_addr = addr;
86 ring->coalesced_mmio[ring->last].len = len;
87 memcpy(ring->coalesced_mmio[ring->last].data, val, len);
88 smp_wmb();
89 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
64a2268d 90 spin_unlock(&dev->lock);
bda9020e 91 return 0;
5f94c174
LV
92}
93
94static void coalesced_mmio_destructor(struct kvm_io_device *this)
95{
d76685c4 96 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
787a660a
GH
97
98 kfree(dev);
5f94c174
LV
99}
100
d76685c4
GH
101static const struct kvm_io_device_ops coalesced_mmio_ops = {
102 .write = coalesced_mmio_write,
d76685c4
GH
103 .destructor = coalesced_mmio_destructor,
104};
105
5f94c174
LV
106int kvm_coalesced_mmio_init(struct kvm *kvm)
107{
108 struct kvm_coalesced_mmio_dev *dev;
980da6ce 109 struct page *page;
090b7aff 110 int ret;
5f94c174 111
980da6ce
AK
112 ret = -ENOMEM;
113 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
114 if (!page)
115 goto out_err;
116 kvm->coalesced_mmio_ring = page_address(page);
117
118 ret = -ENOMEM;
5f94c174
LV
119 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
120 if (!dev)
980da6ce 121 goto out_free_page;
64a2268d 122 spin_lock_init(&dev->lock);
d76685c4 123 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
5f94c174
LV
124 dev->kvm = kvm;
125 kvm->coalesced_mmio_dev = dev;
5f94c174 126
79fac95e 127 mutex_lock(&kvm->slots_lock);
e93f8a0f 128 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
79fac95e 129 mutex_unlock(&kvm->slots_lock);
090b7aff 130 if (ret < 0)
980da6ce
AK
131 goto out_free_dev;
132
133 return ret;
090b7aff 134
980da6ce 135out_free_dev:
6ce5a090 136 kvm->coalesced_mmio_dev = NULL;
980da6ce
AK
137 kfree(dev);
138out_free_page:
6ce5a090 139 kvm->coalesced_mmio_ring = NULL;
980da6ce
AK
140 __free_page(page);
141out_err:
090b7aff 142 return ret;
5f94c174
LV
143}
144
980da6ce
AK
145void kvm_coalesced_mmio_free(struct kvm *kvm)
146{
147 if (kvm->coalesced_mmio_ring)
148 free_page((unsigned long)kvm->coalesced_mmio_ring);
149}
150
5f94c174 151int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
43db6697 152 struct kvm_coalesced_mmio_zone *zone)
5f94c174
LV
153{
154 struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
155
156 if (dev == NULL)
a87fa355 157 return -ENXIO;
5f94c174 158
79fac95e 159 mutex_lock(&kvm->slots_lock);
5f94c174 160 if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
79fac95e 161 mutex_unlock(&kvm->slots_lock);
5f94c174
LV
162 return -ENOBUFS;
163 }
164
165 dev->zone[dev->nb_zones] = *zone;
166 dev->nb_zones++;
167
79fac95e 168 mutex_unlock(&kvm->slots_lock);
5f94c174
LV
169 return 0;
170}
171
172int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
173 struct kvm_coalesced_mmio_zone *zone)
174{
175 int i;
176 struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
177 struct kvm_coalesced_mmio_zone *z;
178
179 if (dev == NULL)
a87fa355 180 return -ENXIO;
5f94c174 181
79fac95e 182 mutex_lock(&kvm->slots_lock);
5f94c174
LV
183
184 i = dev->nb_zones;
43db6697 185 while (i) {
5f94c174
LV
186 z = &dev->zone[i - 1];
187
188 /* unregister all zones
189 * included in (zone->addr, zone->size)
190 */
191
192 if (zone->addr <= z->addr &&
193 z->addr + z->size <= zone->addr + zone->size) {
194 dev->nb_zones--;
195 *z = dev->zone[dev->nb_zones];
196 }
197 i--;
198 }
199
79fac95e 200 mutex_unlock(&kvm->slots_lock);
5f94c174
LV
201
202 return 0;
203}