]> git.proxmox.com Git - mirror_qemu.git/blob - hw/mem/memory-device.c
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
[mirror_qemu.git] / hw / mem / memory-device.c
1 /*
2 * Memory Device Interface
3 *
4 * Copyright ProfitBricks GmbH 2012
5 * Copyright (C) 2014 Red Hat Inc
6 * Copyright (c) 2018 Red Hat Inc
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
10 */
11
12 #include "qemu/osdep.h"
13 #include "hw/mem/memory-device.h"
14 #include "hw/qdev.h"
15 #include "qapi/error.h"
16 #include "hw/boards.h"
17 #include "qemu/range.h"
18 #include "hw/virtio/vhost.h"
19 #include "sysemu/kvm.h"
20 #include "trace.h"
21
22 static gint memory_device_addr_sort(gconstpointer a, gconstpointer b)
23 {
24 const MemoryDeviceState *md_a = MEMORY_DEVICE(a);
25 const MemoryDeviceState *md_b = MEMORY_DEVICE(b);
26 const MemoryDeviceClass *mdc_a = MEMORY_DEVICE_GET_CLASS(a);
27 const MemoryDeviceClass *mdc_b = MEMORY_DEVICE_GET_CLASS(b);
28 const uint64_t addr_a = mdc_a->get_addr(md_a);
29 const uint64_t addr_b = mdc_b->get_addr(md_b);
30
31 if (addr_a > addr_b) {
32 return 1;
33 } else if (addr_a < addr_b) {
34 return -1;
35 }
36 return 0;
37 }
38
39 static int memory_device_build_list(Object *obj, void *opaque)
40 {
41 GSList **list = opaque;
42
43 if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) {
44 DeviceState *dev = DEVICE(obj);
45 if (dev->realized) { /* only realized memory devices matter */
46 *list = g_slist_insert_sorted(*list, dev, memory_device_addr_sort);
47 }
48 }
49
50 object_child_foreach(obj, memory_device_build_list, opaque);
51 return 0;
52 }
53
54 static int memory_device_used_region_size(Object *obj, void *opaque)
55 {
56 uint64_t *size = opaque;
57
58 if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) {
59 const DeviceState *dev = DEVICE(obj);
60 const MemoryDeviceState *md = MEMORY_DEVICE(obj);
61
62 if (dev->realized) {
63 *size += memory_device_get_region_size(md, &error_abort);
64 }
65 }
66
67 object_child_foreach(obj, memory_device_used_region_size, opaque);
68 return 0;
69 }
70
71 static void memory_device_check_addable(MachineState *ms, uint64_t size,
72 Error **errp)
73 {
74 uint64_t used_region_size = 0;
75
76 /* we will need a new memory slot for kvm and vhost */
77 if (kvm_enabled() && !kvm_has_free_slot(ms)) {
78 error_setg(errp, "hypervisor has no free memory slots left");
79 return;
80 }
81 if (!vhost_has_free_slot()) {
82 error_setg(errp, "a used vhost backend has no free memory slots left");
83 return;
84 }
85
86 /* will we exceed the total amount of memory specified */
87 memory_device_used_region_size(OBJECT(ms), &used_region_size);
88 if (used_region_size + size < used_region_size ||
89 used_region_size + size > ms->maxram_size - ms->ram_size) {
90 error_setg(errp, "not enough space, currently 0x%" PRIx64
91 " in use of total space for memory devices 0x" RAM_ADDR_FMT,
92 used_region_size, ms->maxram_size - ms->ram_size);
93 return;
94 }
95
96 }
97
98 static uint64_t memory_device_get_free_addr(MachineState *ms,
99 const uint64_t *hint,
100 uint64_t align, uint64_t size,
101 Error **errp)
102 {
103 GSList *list = NULL, *item;
104 Range as, new = range_empty;
105
106 if (!ms->device_memory) {
107 error_setg(errp, "memory devices (e.g. for memory hotplug) are not "
108 "supported by the machine");
109 return 0;
110 }
111
112 if (!memory_region_size(&ms->device_memory->mr)) {
113 error_setg(errp, "memory devices (e.g. for memory hotplug) are not "
114 "enabled, please specify the maxmem option");
115 return 0;
116 }
117 range_init_nofail(&as, ms->device_memory->base,
118 memory_region_size(&ms->device_memory->mr));
119
120 /* start of address space indicates the maximum alignment we expect */
121 if (!QEMU_IS_ALIGNED(range_lob(&as), align)) {
122 error_setg(errp, "the alignment (0x%" PRIx64 ") is not supported",
123 align);
124 return 0;
125 }
126
127 memory_device_check_addable(ms, size, errp);
128 if (*errp) {
129 return 0;
130 }
131
132 if (hint && !QEMU_IS_ALIGNED(*hint, align)) {
133 error_setg(errp, "address must be aligned to 0x%" PRIx64 " bytes",
134 align);
135 return 0;
136 }
137
138 if (!QEMU_IS_ALIGNED(size, align)) {
139 error_setg(errp, "backend memory size must be multiple of 0x%"
140 PRIx64, align);
141 return 0;
142 }
143
144 if (hint) {
145 if (range_init(&new, *hint, size) || !range_contains_range(&as, &new)) {
146 error_setg(errp, "can't add memory device [0x%" PRIx64 ":0x%" PRIx64
147 "], usable range for memory devices [0x%" PRIx64 ":0x%"
148 PRIx64 "]", *hint, size, range_lob(&as),
149 range_size(&as));
150 return 0;
151 }
152 } else {
153 if (range_init(&new, range_lob(&as), size)) {
154 error_setg(errp, "can't add memory device, device too big");
155 return 0;
156 }
157 }
158
159 /* find address range that will fit new memory device */
160 object_child_foreach(OBJECT(ms), memory_device_build_list, &list);
161 for (item = list; item; item = g_slist_next(item)) {
162 const MemoryDeviceState *md = item->data;
163 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(OBJECT(md));
164 uint64_t next_addr;
165 Range tmp;
166
167 range_init_nofail(&tmp, mdc->get_addr(md),
168 memory_device_get_region_size(md, &error_abort));
169
170 if (range_overlaps_range(&tmp, &new)) {
171 if (hint) {
172 const DeviceState *d = DEVICE(md);
173 error_setg(errp, "address range conflicts with memory device"
174 " id='%s'", d->id ? d->id : "(unnamed)");
175 goto out;
176 }
177
178 next_addr = QEMU_ALIGN_UP(range_upb(&tmp) + 1, align);
179 if (!next_addr || range_init(&new, next_addr, range_size(&new))) {
180 range_make_empty(&new);
181 break;
182 }
183 }
184 }
185
186 if (!range_contains_range(&as, &new)) {
187 error_setg(errp, "could not find position in guest address space for "
188 "memory device - memory fragmented due to alignments");
189 goto out;
190 }
191 out:
192 g_slist_free(list);
193 return range_lob(&new);
194 }
195
196 MemoryDeviceInfoList *qmp_memory_device_list(void)
197 {
198 GSList *devices = NULL, *item;
199 MemoryDeviceInfoList *list = NULL, *prev = NULL;
200
201 object_child_foreach(qdev_get_machine(), memory_device_build_list,
202 &devices);
203
204 for (item = devices; item; item = g_slist_next(item)) {
205 const MemoryDeviceState *md = MEMORY_DEVICE(item->data);
206 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(item->data);
207 MemoryDeviceInfoList *elem = g_new0(MemoryDeviceInfoList, 1);
208 MemoryDeviceInfo *info = g_new0(MemoryDeviceInfo, 1);
209
210 mdc->fill_device_info(md, info);
211
212 elem->value = info;
213 elem->next = NULL;
214 if (prev) {
215 prev->next = elem;
216 } else {
217 list = elem;
218 }
219 prev = elem;
220 }
221
222 g_slist_free(devices);
223
224 return list;
225 }
226
227 static int memory_device_plugged_size(Object *obj, void *opaque)
228 {
229 uint64_t *size = opaque;
230
231 if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) {
232 const DeviceState *dev = DEVICE(obj);
233 const MemoryDeviceState *md = MEMORY_DEVICE(obj);
234 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(obj);
235
236 if (dev->realized) {
237 *size += mdc->get_plugged_size(md, &error_abort);
238 }
239 }
240
241 object_child_foreach(obj, memory_device_plugged_size, opaque);
242 return 0;
243 }
244
245 uint64_t get_plugged_memory_size(void)
246 {
247 uint64_t size = 0;
248
249 memory_device_plugged_size(qdev_get_machine(), &size);
250
251 return size;
252 }
253
254 void memory_device_pre_plug(MemoryDeviceState *md, MachineState *ms,
255 const uint64_t *legacy_align, Error **errp)
256 {
257 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md);
258 Error *local_err = NULL;
259 uint64_t addr, align;
260 MemoryRegion *mr;
261
262 mr = mdc->get_memory_region(md, &local_err);
263 if (local_err) {
264 goto out;
265 }
266
267 align = legacy_align ? *legacy_align : memory_region_get_alignment(mr);
268 addr = mdc->get_addr(md);
269 addr = memory_device_get_free_addr(ms, !addr ? NULL : &addr, align,
270 memory_region_size(mr), &local_err);
271 if (local_err) {
272 goto out;
273 }
274 mdc->set_addr(md, addr, &local_err);
275 if (!local_err) {
276 trace_memory_device_pre_plug(DEVICE(md)->id ? DEVICE(md)->id : "",
277 addr);
278 }
279 out:
280 error_propagate(errp, local_err);
281 }
282
283 void memory_device_plug(MemoryDeviceState *md, MachineState *ms)
284 {
285 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md);
286 const uint64_t addr = mdc->get_addr(md);
287 MemoryRegion *mr;
288
289 /*
290 * We expect that a previous call to memory_device_pre_plug() succeeded, so
291 * it can't fail at this point.
292 */
293 mr = mdc->get_memory_region(md, &error_abort);
294 g_assert(ms->device_memory);
295
296 memory_region_add_subregion(&ms->device_memory->mr,
297 addr - ms->device_memory->base, mr);
298 trace_memory_device_plug(DEVICE(md)->id ? DEVICE(md)->id : "", addr);
299 }
300
301 void memory_device_unplug(MemoryDeviceState *md, MachineState *ms)
302 {
303 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md);
304 MemoryRegion *mr;
305
306 /*
307 * We expect that a previous call to memory_device_pre_plug() succeeded, so
308 * it can't fail at this point.
309 */
310 mr = mdc->get_memory_region(md, &error_abort);
311 g_assert(ms->device_memory);
312
313 memory_region_del_subregion(&ms->device_memory->mr, mr);
314 trace_memory_device_unplug(DEVICE(md)->id ? DEVICE(md)->id : "",
315 mdc->get_addr(md));
316 }
317
318 uint64_t memory_device_get_region_size(const MemoryDeviceState *md,
319 Error **errp)
320 {
321 const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md);
322 MemoryRegion *mr;
323
324 /* dropping const here is fine as we don't touch the memory region */
325 mr = mdc->get_memory_region((MemoryDeviceState *)md, errp);
326 if (!mr) {
327 return 0;
328 }
329
330 return memory_region_size(mr);
331 }
332
333 static const TypeInfo memory_device_info = {
334 .name = TYPE_MEMORY_DEVICE,
335 .parent = TYPE_INTERFACE,
336 .class_size = sizeof(MemoryDeviceClass),
337 };
338
339 static void memory_device_register_types(void)
340 {
341 type_register_static(&memory_device_info);
342 }
343
344 type_init(memory_device_register_types)