]> git.proxmox.com Git - mirror_qemu.git/blame - hw/virtio/virtio-mem.c
vfio: Support for RamDiscardManager in the vIOMMU case
[mirror_qemu.git] / hw / virtio / virtio-mem.c
CommitLineData
910b2576
DH
1/*
2 * Virtio MEM device
3 *
4 * Copyright (C) 2020 Red Hat, Inc.
5 *
6 * Authors:
7 * David Hildenbrand <david@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2.
10 * See the COPYING file in the top-level directory.
11 */
12
13#include "qemu/osdep.h"
14#include "qemu-common.h"
15#include "qemu/iov.h"
16#include "qemu/cutils.h"
17#include "qemu/error-report.h"
18#include "qemu/units.h"
19#include "sysemu/numa.h"
20#include "sysemu/sysemu.h"
21#include "sysemu/reset.h"
22#include "hw/virtio/virtio.h"
23#include "hw/virtio/virtio-bus.h"
24#include "hw/virtio/virtio-access.h"
25#include "hw/virtio/virtio-mem.h"
26#include "qapi/error.h"
27#include "qapi/visitor.h"
28#include "exec/ram_addr.h"
29#include "migration/misc.h"
30#include "hw/boards.h"
31#include "hw/qdev-properties.h"
2becc36a 32#include CONFIG_DEVICES
43e54950 33#include "trace.h"
910b2576
DH
34
35/*
228957fe
DH
36 * Let's not allow blocks smaller than 1 MiB, for example, to keep the tracking
37 * bitmap small.
910b2576 38 */
228957fe
DH
39#define VIRTIO_MEM_MIN_BLOCK_SIZE ((uint32_t)(1 * MiB))
40
41#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) || \
42 defined(__powerpc64__)
43#define VIRTIO_MEM_DEFAULT_THP_SIZE ((uint32_t)(2 * MiB))
44#else
45 /* fallback to 1 MiB (e.g., the THP size on s390x) */
46#define VIRTIO_MEM_DEFAULT_THP_SIZE VIRTIO_MEM_MIN_BLOCK_SIZE
47#endif
48
49/*
50 * We want to have a reasonable default block size such that
51 * 1. We avoid splitting THPs when unplugging memory, which degrades
52 * performance.
53 * 2. We avoid placing THPs for plugged blocks that also cover unplugged
54 * blocks.
55 *
56 * The actual THP size might differ between Linux kernels, so we try to probe
57 * it. In the future (if we ever run into issues regarding 2.), we might want
58 * to disable THP in case we fail to properly probe the THP size, or if the
59 * block size is configured smaller than the THP size.
60 */
61static uint32_t thp_size;
62
63#define HPAGE_PMD_SIZE_PATH "/sys/kernel/mm/transparent_hugepage/hpage_pmd_size"
64static uint32_t virtio_mem_thp_size(void)
65{
66 gchar *content = NULL;
67 const char *endptr;
68 uint64_t tmp;
69
70 if (thp_size) {
71 return thp_size;
72 }
73
74 /*
75 * Try to probe the actual THP size, fallback to (sane but eventually
76 * incorrect) default sizes.
77 */
78 if (g_file_get_contents(HPAGE_PMD_SIZE_PATH, &content, NULL, NULL) &&
79 !qemu_strtou64(content, &endptr, 0, &tmp) &&
80 (!endptr || *endptr == '\n')) {
81 /*
82 * Sanity-check the value, if it's too big (e.g., aarch64 with 64k base
83 * pages) or weird, fallback to something smaller.
84 */
85 if (!tmp || !is_power_of_2(tmp) || tmp > 16 * MiB) {
86 warn_report("Read unsupported THP size: %" PRIx64, tmp);
87 } else {
88 thp_size = tmp;
89 }
90 }
91
92 if (!thp_size) {
93 thp_size = VIRTIO_MEM_DEFAULT_THP_SIZE;
94 warn_report("Could not detect THP size, falling back to %" PRIx64
95 " MiB.", thp_size / MiB);
96 }
97
98 g_free(content);
99 return thp_size;
100}
101
102static uint64_t virtio_mem_default_block_size(RAMBlock *rb)
103{
104 const uint64_t page_size = qemu_ram_pagesize(rb);
105
106 /* We can have hugetlbfs with a page size smaller than the THP size. */
107 if (page_size == qemu_real_host_page_size) {
108 return MAX(page_size, virtio_mem_thp_size());
109 }
110 return MAX(page_size, VIRTIO_MEM_MIN_BLOCK_SIZE);
111}
112
910b2576
DH
113/*
114 * Size the usable region bigger than the requested size if possible. Esp.
115 * Linux guests will only add (aligned) memory blocks in case they fully
116 * fit into the usable region, but plug+online only a subset of the pages.
117 * The memory block size corresponds mostly to the section size.
118 *
119 * This allows e.g., to add 20MB with a section size of 128MB on x86_64, and
120 * a section size of 1GB on arm64 (as long as the start address is properly
121 * aligned, similar to ordinary DIMMs).
122 *
123 * We can change this at any time and maybe even make it configurable if
124 * necessary (as the section size can change). But it's more likely that the
125 * section size will rather get smaller and not bigger over time.
126 */
127#if defined(TARGET_X86_64) || defined(TARGET_I386)
128#define VIRTIO_MEM_USABLE_EXTENT (2 * (128 * MiB))
129#else
130#error VIRTIO_MEM_USABLE_EXTENT not defined
131#endif
132
133static bool virtio_mem_is_busy(void)
134{
135 /*
136 * Postcopy cannot handle concurrent discards and we don't want to migrate
137 * pages on-demand with stale content when plugging new blocks.
0bc7806c
DH
138 *
139 * For precopy, we don't want unplugged blocks in our migration stream, and
140 * when plugging new blocks, the page content might differ between source
141 * and destination (observable by the guest when not initializing pages
142 * after plugging them) until we're running on the destination (as we didn't
143 * migrate these blocks when they were unplugged).
910b2576 144 */
0bc7806c 145 return migration_in_incoming_postcopy() || !migration_is_idle();
910b2576
DH
146}
147
7a9d5d02
DH
148typedef int (*virtio_mem_range_cb)(const VirtIOMEM *vmem, void *arg,
149 uint64_t offset, uint64_t size);
150
151static int virtio_mem_for_each_unplugged_range(const VirtIOMEM *vmem, void *arg,
152 virtio_mem_range_cb cb)
153{
154 unsigned long first_zero_bit, last_zero_bit;
155 uint64_t offset, size;
156 int ret = 0;
157
158 first_zero_bit = find_first_zero_bit(vmem->bitmap, vmem->bitmap_size);
159 while (first_zero_bit < vmem->bitmap_size) {
160 offset = first_zero_bit * vmem->block_size;
161 last_zero_bit = find_next_bit(vmem->bitmap, vmem->bitmap_size,
162 first_zero_bit + 1) - 1;
163 size = (last_zero_bit - first_zero_bit + 1) * vmem->block_size;
164
165 ret = cb(vmem, arg, offset, size);
166 if (ret) {
167 break;
168 }
169 first_zero_bit = find_next_zero_bit(vmem->bitmap, vmem->bitmap_size,
170 last_zero_bit + 2);
171 }
172 return ret;
173}
174
2044969f
DH
175/*
176 * Adjust the memory section to cover the intersection with the given range.
177 *
178 * Returns false if the intersection is empty, otherwise returns true.
179 */
180static bool virito_mem_intersect_memory_section(MemoryRegionSection *s,
181 uint64_t offset, uint64_t size)
182{
183 uint64_t start = MAX(s->offset_within_region, offset);
184 uint64_t end = MIN(s->offset_within_region + int128_get64(s->size),
185 offset + size);
186
187 if (end <= start) {
188 return false;
189 }
190
191 s->offset_within_address_space += start - s->offset_within_region;
192 s->offset_within_region = start;
193 s->size = int128_make64(end - start);
194 return true;
195}
196
197typedef int (*virtio_mem_section_cb)(MemoryRegionSection *s, void *arg);
198
199static int virtio_mem_for_each_plugged_section(const VirtIOMEM *vmem,
200 MemoryRegionSection *s,
201 void *arg,
202 virtio_mem_section_cb cb)
203{
204 unsigned long first_bit, last_bit;
205 uint64_t offset, size;
206 int ret = 0;
207
208 first_bit = s->offset_within_region / vmem->bitmap_size;
209 first_bit = find_next_bit(vmem->bitmap, vmem->bitmap_size, first_bit);
210 while (first_bit < vmem->bitmap_size) {
211 MemoryRegionSection tmp = *s;
212
213 offset = first_bit * vmem->block_size;
214 last_bit = find_next_zero_bit(vmem->bitmap, vmem->bitmap_size,
215 first_bit + 1) - 1;
216 size = (last_bit - first_bit + 1) * vmem->block_size;
217
218 if (!virito_mem_intersect_memory_section(&tmp, offset, size)) {
219 break;
220 }
221 ret = cb(&tmp, arg);
222 if (ret) {
223 break;
224 }
225 first_bit = find_next_bit(vmem->bitmap, vmem->bitmap_size,
226 last_bit + 2);
227 }
228 return ret;
229}
230
231static int virtio_mem_notify_populate_cb(MemoryRegionSection *s, void *arg)
232{
233 RamDiscardListener *rdl = arg;
234
235 return rdl->notify_populate(rdl, s);
236}
237
238static int virtio_mem_notify_discard_cb(MemoryRegionSection *s, void *arg)
239{
240 RamDiscardListener *rdl = arg;
241
242 rdl->notify_discard(rdl, s);
243 return 0;
244}
245
246static void virtio_mem_notify_unplug(VirtIOMEM *vmem, uint64_t offset,
247 uint64_t size)
248{
249 RamDiscardListener *rdl;
250
251 QLIST_FOREACH(rdl, &vmem->rdl_list, next) {
252 MemoryRegionSection tmp = *rdl->section;
253
254 if (!virito_mem_intersect_memory_section(&tmp, offset, size)) {
255 continue;
256 }
257 rdl->notify_discard(rdl, &tmp);
258 }
259}
260
261static int virtio_mem_notify_plug(VirtIOMEM *vmem, uint64_t offset,
262 uint64_t size)
263{
264 RamDiscardListener *rdl, *rdl2;
265 int ret = 0;
266
267 QLIST_FOREACH(rdl, &vmem->rdl_list, next) {
268 MemoryRegionSection tmp = *rdl->section;
269
270 if (!virito_mem_intersect_memory_section(&tmp, offset, size)) {
271 continue;
272 }
273 ret = rdl->notify_populate(rdl, &tmp);
274 if (ret) {
275 break;
276 }
277 }
278
279 if (ret) {
280 /* Notify all already-notified listeners. */
281 QLIST_FOREACH(rdl2, &vmem->rdl_list, next) {
282 MemoryRegionSection tmp = *rdl->section;
283
284 if (rdl2 == rdl) {
285 break;
286 }
287 if (!virito_mem_intersect_memory_section(&tmp, offset, size)) {
288 continue;
289 }
290 rdl2->notify_discard(rdl2, &tmp);
291 }
292 }
293 return ret;
294}
295
296static void virtio_mem_notify_unplug_all(VirtIOMEM *vmem)
297{
298 RamDiscardListener *rdl;
299
300 if (!vmem->size) {
301 return;
302 }
303
304 QLIST_FOREACH(rdl, &vmem->rdl_list, next) {
305 if (rdl->double_discard_supported) {
306 rdl->notify_discard(rdl, rdl->section);
307 } else {
308 virtio_mem_for_each_plugged_section(vmem, rdl->section, rdl,
309 virtio_mem_notify_discard_cb);
310 }
311 }
312}
313
314static bool virtio_mem_test_bitmap(const VirtIOMEM *vmem, uint64_t start_gpa,
910b2576
DH
315 uint64_t size, bool plugged)
316{
317 const unsigned long first_bit = (start_gpa - vmem->addr) / vmem->block_size;
318 const unsigned long last_bit = first_bit + (size / vmem->block_size) - 1;
319 unsigned long found_bit;
320
321 /* We fake a shorter bitmap to avoid searching too far. */
322 if (plugged) {
323 found_bit = find_next_zero_bit(vmem->bitmap, last_bit + 1, first_bit);
324 } else {
325 found_bit = find_next_bit(vmem->bitmap, last_bit + 1, first_bit);
326 }
327 return found_bit > last_bit;
328}
329
330static void virtio_mem_set_bitmap(VirtIOMEM *vmem, uint64_t start_gpa,
331 uint64_t size, bool plugged)
332{
333 const unsigned long bit = (start_gpa - vmem->addr) / vmem->block_size;
334 const unsigned long nbits = size / vmem->block_size;
335
336 if (plugged) {
337 bitmap_set(vmem->bitmap, bit, nbits);
338 } else {
339 bitmap_clear(vmem->bitmap, bit, nbits);
340 }
341}
342
343static void virtio_mem_send_response(VirtIOMEM *vmem, VirtQueueElement *elem,
344 struct virtio_mem_resp *resp)
345{
346 VirtIODevice *vdev = VIRTIO_DEVICE(vmem);
347 VirtQueue *vq = vmem->vq;
348
43e54950 349 trace_virtio_mem_send_response(le16_to_cpu(resp->type));
910b2576
DH
350 iov_from_buf(elem->in_sg, elem->in_num, 0, resp, sizeof(*resp));
351
352 virtqueue_push(vq, elem, sizeof(*resp));
353 virtio_notify(vdev, vq);
354}
355
356static void virtio_mem_send_response_simple(VirtIOMEM *vmem,
357 VirtQueueElement *elem,
358 uint16_t type)
359{
360 struct virtio_mem_resp resp = {
361 .type = cpu_to_le16(type),
362 };
363
364 virtio_mem_send_response(vmem, elem, &resp);
365}
366
2044969f
DH
367static bool virtio_mem_valid_range(const VirtIOMEM *vmem, uint64_t gpa,
368 uint64_t size)
910b2576
DH
369{
370 if (!QEMU_IS_ALIGNED(gpa, vmem->block_size)) {
371 return false;
372 }
373 if (gpa + size < gpa || !size) {
374 return false;
375 }
376 if (gpa < vmem->addr || gpa >= vmem->addr + vmem->usable_region_size) {
377 return false;
378 }
379 if (gpa + size > vmem->addr + vmem->usable_region_size) {
380 return false;
381 }
382 return true;
383}
384
385static int virtio_mem_set_block_state(VirtIOMEM *vmem, uint64_t start_gpa,
386 uint64_t size, bool plug)
387{
388 const uint64_t offset = start_gpa - vmem->addr;
3aca6380 389 RAMBlock *rb = vmem->memdev->mr.ram_block;
910b2576
DH
390
391 if (virtio_mem_is_busy()) {
392 return -EBUSY;
393 }
394
395 if (!plug) {
3aca6380 396 if (ram_block_discard_range(rb, offset, size)) {
910b2576
DH
397 return -EBUSY;
398 }
2044969f
DH
399 virtio_mem_notify_unplug(vmem, offset, size);
400 } else if (virtio_mem_notify_plug(vmem, offset, size)) {
401 /* Could be a mapping attempt resulted in memory getting populated. */
402 ram_block_discard_range(vmem->memdev->mr.ram_block, offset, size);
403 return -EBUSY;
910b2576
DH
404 }
405 virtio_mem_set_bitmap(vmem, start_gpa, size, plug);
406 return 0;
407}
408
409static int virtio_mem_state_change_request(VirtIOMEM *vmem, uint64_t gpa,
410 uint16_t nb_blocks, bool plug)
411{
412 const uint64_t size = nb_blocks * vmem->block_size;
413 int ret;
414
415 if (!virtio_mem_valid_range(vmem, gpa, size)) {
416 return VIRTIO_MEM_RESP_ERROR;
417 }
418
419 if (plug && (vmem->size + size > vmem->requested_size)) {
420 return VIRTIO_MEM_RESP_NACK;
421 }
422
423 /* test if really all blocks are in the opposite state */
424 if (!virtio_mem_test_bitmap(vmem, gpa, size, !plug)) {
425 return VIRTIO_MEM_RESP_ERROR;
426 }
427
428 ret = virtio_mem_set_block_state(vmem, gpa, size, plug);
429 if (ret) {
430 return VIRTIO_MEM_RESP_BUSY;
431 }
432 if (plug) {
433 vmem->size += size;
434 } else {
435 vmem->size -= size;
436 }
c95b4437 437 notifier_list_notify(&vmem->size_change_notifiers, &vmem->size);
910b2576
DH
438 return VIRTIO_MEM_RESP_ACK;
439}
440
441static void virtio_mem_plug_request(VirtIOMEM *vmem, VirtQueueElement *elem,
442 struct virtio_mem_req *req)
443{
444 const uint64_t gpa = le64_to_cpu(req->u.plug.addr);
445 const uint16_t nb_blocks = le16_to_cpu(req->u.plug.nb_blocks);
446 uint16_t type;
447
43e54950 448 trace_virtio_mem_plug_request(gpa, nb_blocks);
910b2576
DH
449 type = virtio_mem_state_change_request(vmem, gpa, nb_blocks, true);
450 virtio_mem_send_response_simple(vmem, elem, type);
451}
452
453static void virtio_mem_unplug_request(VirtIOMEM *vmem, VirtQueueElement *elem,
454 struct virtio_mem_req *req)
455{
456 const uint64_t gpa = le64_to_cpu(req->u.unplug.addr);
457 const uint16_t nb_blocks = le16_to_cpu(req->u.unplug.nb_blocks);
458 uint16_t type;
459
43e54950 460 trace_virtio_mem_unplug_request(gpa, nb_blocks);
910b2576
DH
461 type = virtio_mem_state_change_request(vmem, gpa, nb_blocks, false);
462 virtio_mem_send_response_simple(vmem, elem, type);
463}
464
465static void virtio_mem_resize_usable_region(VirtIOMEM *vmem,
466 uint64_t requested_size,
467 bool can_shrink)
468{
469 uint64_t newsize = MIN(memory_region_size(&vmem->memdev->mr),
470 requested_size + VIRTIO_MEM_USABLE_EXTENT);
471
0aed2800
DH
472 /* The usable region size always has to be multiples of the block size. */
473 newsize = QEMU_ALIGN_UP(newsize, vmem->block_size);
474
910b2576
DH
475 if (!requested_size) {
476 newsize = 0;
477 }
478
479 if (newsize < vmem->usable_region_size && !can_shrink) {
480 return;
481 }
482
43e54950 483 trace_virtio_mem_resized_usable_region(vmem->usable_region_size, newsize);
910b2576
DH
484 vmem->usable_region_size = newsize;
485}
486
487static int virtio_mem_unplug_all(VirtIOMEM *vmem)
488{
489 RAMBlock *rb = vmem->memdev->mr.ram_block;
910b2576
DH
490
491 if (virtio_mem_is_busy()) {
492 return -EBUSY;
493 }
494
3aca6380 495 if (ram_block_discard_range(rb, 0, qemu_ram_get_used_length(rb))) {
910b2576
DH
496 return -EBUSY;
497 }
2044969f
DH
498 virtio_mem_notify_unplug_all(vmem);
499
910b2576 500 bitmap_clear(vmem->bitmap, 0, vmem->bitmap_size);
c95b4437
DH
501 if (vmem->size) {
502 vmem->size = 0;
503 notifier_list_notify(&vmem->size_change_notifiers, &vmem->size);
504 }
43e54950 505 trace_virtio_mem_unplugged_all();
910b2576
DH
506 virtio_mem_resize_usable_region(vmem, vmem->requested_size, true);
507 return 0;
508}
509
510static void virtio_mem_unplug_all_request(VirtIOMEM *vmem,
511 VirtQueueElement *elem)
512{
43e54950 513 trace_virtio_mem_unplug_all_request();
910b2576
DH
514 if (virtio_mem_unplug_all(vmem)) {
515 virtio_mem_send_response_simple(vmem, elem, VIRTIO_MEM_RESP_BUSY);
516 } else {
517 virtio_mem_send_response_simple(vmem, elem, VIRTIO_MEM_RESP_ACK);
518 }
519}
520
521static void virtio_mem_state_request(VirtIOMEM *vmem, VirtQueueElement *elem,
522 struct virtio_mem_req *req)
523{
524 const uint16_t nb_blocks = le16_to_cpu(req->u.state.nb_blocks);
525 const uint64_t gpa = le64_to_cpu(req->u.state.addr);
526 const uint64_t size = nb_blocks * vmem->block_size;
527 struct virtio_mem_resp resp = {
528 .type = cpu_to_le16(VIRTIO_MEM_RESP_ACK),
529 };
530
43e54950 531 trace_virtio_mem_state_request(gpa, nb_blocks);
910b2576
DH
532 if (!virtio_mem_valid_range(vmem, gpa, size)) {
533 virtio_mem_send_response_simple(vmem, elem, VIRTIO_MEM_RESP_ERROR);
534 return;
535 }
536
537 if (virtio_mem_test_bitmap(vmem, gpa, size, true)) {
538 resp.u.state.state = cpu_to_le16(VIRTIO_MEM_STATE_PLUGGED);
539 } else if (virtio_mem_test_bitmap(vmem, gpa, size, false)) {
540 resp.u.state.state = cpu_to_le16(VIRTIO_MEM_STATE_UNPLUGGED);
541 } else {
542 resp.u.state.state = cpu_to_le16(VIRTIO_MEM_STATE_MIXED);
543 }
43e54950 544 trace_virtio_mem_state_response(le16_to_cpu(resp.u.state.state));
910b2576
DH
545 virtio_mem_send_response(vmem, elem, &resp);
546}
547
548static void virtio_mem_handle_request(VirtIODevice *vdev, VirtQueue *vq)
549{
550 const int len = sizeof(struct virtio_mem_req);
551 VirtIOMEM *vmem = VIRTIO_MEM(vdev);
552 VirtQueueElement *elem;
553 struct virtio_mem_req req;
554 uint16_t type;
555
556 while (true) {
557 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
558 if (!elem) {
559 return;
560 }
561
562 if (iov_to_buf(elem->out_sg, elem->out_num, 0, &req, len) < len) {
563 virtio_error(vdev, "virtio-mem protocol violation: invalid request"
564 " size: %d", len);
0c404e45 565 virtqueue_detach_element(vq, elem, 0);
910b2576
DH
566 g_free(elem);
567 return;
568 }
569
570 if (iov_size(elem->in_sg, elem->in_num) <
571 sizeof(struct virtio_mem_resp)) {
572 virtio_error(vdev, "virtio-mem protocol violation: not enough space"
573 " for response: %zu",
574 iov_size(elem->in_sg, elem->in_num));
0c404e45 575 virtqueue_detach_element(vq, elem, 0);
910b2576
DH
576 g_free(elem);
577 return;
578 }
579
580 type = le16_to_cpu(req.type);
581 switch (type) {
582 case VIRTIO_MEM_REQ_PLUG:
583 virtio_mem_plug_request(vmem, elem, &req);
584 break;
585 case VIRTIO_MEM_REQ_UNPLUG:
586 virtio_mem_unplug_request(vmem, elem, &req);
587 break;
588 case VIRTIO_MEM_REQ_UNPLUG_ALL:
589 virtio_mem_unplug_all_request(vmem, elem);
590 break;
591 case VIRTIO_MEM_REQ_STATE:
592 virtio_mem_state_request(vmem, elem, &req);
593 break;
594 default:
595 virtio_error(vdev, "virtio-mem protocol violation: unknown request"
596 " type: %d", type);
0c404e45 597 virtqueue_detach_element(vq, elem, 0);
910b2576
DH
598 g_free(elem);
599 return;
600 }
601
602 g_free(elem);
603 }
604}
605
606static void virtio_mem_get_config(VirtIODevice *vdev, uint8_t *config_data)
607{
608 VirtIOMEM *vmem = VIRTIO_MEM(vdev);
609 struct virtio_mem_config *config = (void *) config_data;
610
611 config->block_size = cpu_to_le64(vmem->block_size);
612 config->node_id = cpu_to_le16(vmem->node);
613 config->requested_size = cpu_to_le64(vmem->requested_size);
614 config->plugged_size = cpu_to_le64(vmem->size);
615 config->addr = cpu_to_le64(vmem->addr);
616 config->region_size = cpu_to_le64(memory_region_size(&vmem->memdev->mr));
617 config->usable_region_size = cpu_to_le64(vmem->usable_region_size);
618}
619
620static uint64_t virtio_mem_get_features(VirtIODevice *vdev, uint64_t features,
621 Error **errp)
622{
623 MachineState *ms = MACHINE(qdev_get_machine());
624
625 if (ms->numa_state) {
626#if defined(CONFIG_ACPI)
627 virtio_add_feature(&features, VIRTIO_MEM_F_ACPI_PXM);
628#endif
629 }
630 return features;
631}
632
633static void virtio_mem_system_reset(void *opaque)
634{
635 VirtIOMEM *vmem = VIRTIO_MEM(opaque);
636
637 /*
638 * During usual resets, we will unplug all memory and shrink the usable
639 * region size. This is, however, not possible in all scenarios. Then,
640 * the guest has to deal with this manually (VIRTIO_MEM_REQ_UNPLUG_ALL).
641 */
642 virtio_mem_unplug_all(vmem);
643}
644
645static void virtio_mem_device_realize(DeviceState *dev, Error **errp)
646{
647 MachineState *ms = MACHINE(qdev_get_machine());
648 int nb_numa_nodes = ms->numa_state ? ms->numa_state->num_nodes : 0;
649 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
650 VirtIOMEM *vmem = VIRTIO_MEM(dev);
651 uint64_t page_size;
652 RAMBlock *rb;
653 int ret;
654
655 if (!vmem->memdev) {
656 error_setg(errp, "'%s' property is not set", VIRTIO_MEM_MEMDEV_PROP);
657 return;
658 } else if (host_memory_backend_is_mapped(vmem->memdev)) {
910b2576 659 error_setg(errp, "'%s' property specifies a busy memdev: %s",
7a309cc9
MA
660 VIRTIO_MEM_MEMDEV_PROP,
661 object_get_canonical_path_component(OBJECT(vmem->memdev)));
910b2576
DH
662 return;
663 } else if (!memory_region_is_ram(&vmem->memdev->mr) ||
664 memory_region_is_rom(&vmem->memdev->mr) ||
665 !vmem->memdev->mr.ram_block) {
666 error_setg(errp, "'%s' property specifies an unsupported memdev",
667 VIRTIO_MEM_MEMDEV_PROP);
668 return;
669 }
670
671 if ((nb_numa_nodes && vmem->node >= nb_numa_nodes) ||
672 (!nb_numa_nodes && vmem->node)) {
673 error_setg(errp, "'%s' property has value '%" PRIu32 "', which exceeds"
674 "the number of numa nodes: %d", VIRTIO_MEM_NODE_PROP,
675 vmem->node, nb_numa_nodes ? nb_numa_nodes : 1);
676 return;
677 }
678
679 if (enable_mlock) {
680 error_setg(errp, "Incompatible with mlock");
681 return;
682 }
683
684 rb = vmem->memdev->mr.ram_block;
685 page_size = qemu_ram_pagesize(rb);
686
228957fe
DH
687 /*
688 * If the block size wasn't configured by the user, use a sane default. This
689 * allows using hugetlbfs backends of any page size without manual
690 * intervention.
691 */
692 if (!vmem->block_size) {
693 vmem->block_size = virtio_mem_default_block_size(rb);
694 }
695
910b2576
DH
696 if (vmem->block_size < page_size) {
697 error_setg(errp, "'%s' property has to be at least the page size (0x%"
698 PRIx64 ")", VIRTIO_MEM_BLOCK_SIZE_PROP, page_size);
699 return;
228957fe
DH
700 } else if (vmem->block_size < virtio_mem_default_block_size(rb)) {
701 warn_report("'%s' property is smaller than the default block size (%"
702 PRIx64 " MiB)", VIRTIO_MEM_BLOCK_SIZE_PROP,
703 virtio_mem_default_block_size(rb) / MiB);
910b2576
DH
704 } else if (!QEMU_IS_ALIGNED(vmem->requested_size, vmem->block_size)) {
705 error_setg(errp, "'%s' property has to be multiples of '%s' (0x%" PRIx64
706 ")", VIRTIO_MEM_REQUESTED_SIZE_PROP,
707 VIRTIO_MEM_BLOCK_SIZE_PROP, vmem->block_size);
708 return;
d31992ae
DH
709 } else if (!QEMU_IS_ALIGNED(vmem->addr, vmem->block_size)) {
710 error_setg(errp, "'%s' property has to be multiples of '%s' (0x%" PRIx64
711 ")", VIRTIO_MEM_ADDR_PROP, VIRTIO_MEM_BLOCK_SIZE_PROP,
712 vmem->block_size);
713 return;
910b2576
DH
714 } else if (!QEMU_IS_ALIGNED(memory_region_size(&vmem->memdev->mr),
715 vmem->block_size)) {
716 error_setg(errp, "'%s' property memdev size has to be multiples of"
717 "'%s' (0x%" PRIx64 ")", VIRTIO_MEM_MEMDEV_PROP,
718 VIRTIO_MEM_BLOCK_SIZE_PROP, vmem->block_size);
719 return;
720 }
721
722 if (ram_block_discard_require(true)) {
723 error_setg(errp, "Discarding RAM is disabled");
724 return;
725 }
726
727 ret = ram_block_discard_range(rb, 0, qemu_ram_get_used_length(rb));
728 if (ret) {
729 error_setg_errno(errp, -ret, "Unexpected error discarding RAM");
730 ram_block_discard_require(false);
731 return;
732 }
733
734 virtio_mem_resize_usable_region(vmem, vmem->requested_size, true);
735
736 vmem->bitmap_size = memory_region_size(&vmem->memdev->mr) /
737 vmem->block_size;
738 vmem->bitmap = bitmap_new(vmem->bitmap_size);
739
740 virtio_init(vdev, TYPE_VIRTIO_MEM, VIRTIO_ID_MEM,
741 sizeof(struct virtio_mem_config));
742 vmem->vq = virtio_add_queue(vdev, 128, virtio_mem_handle_request);
743
744 host_memory_backend_set_mapped(vmem->memdev, true);
745 vmstate_register_ram(&vmem->memdev->mr, DEVICE(vmem));
746 qemu_register_reset(virtio_mem_system_reset, vmem);
0bc7806c 747 precopy_add_notifier(&vmem->precopy_notifier);
2044969f
DH
748
749 /*
750 * Set ourselves as RamDiscardManager before the plug handler maps the
751 * memory region and exposes it via an address space.
752 */
753 memory_region_set_ram_discard_manager(&vmem->memdev->mr,
754 RAM_DISCARD_MANAGER(vmem));
910b2576
DH
755}
756
757static void virtio_mem_device_unrealize(DeviceState *dev)
758{
759 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
760 VirtIOMEM *vmem = VIRTIO_MEM(dev);
761
2044969f
DH
762 /*
763 * The unplug handler unmapped the memory region, it cannot be
764 * found via an address space anymore. Unset ourselves.
765 */
766 memory_region_set_ram_discard_manager(&vmem->memdev->mr, NULL);
0bc7806c 767 precopy_remove_notifier(&vmem->precopy_notifier);
910b2576
DH
768 qemu_unregister_reset(virtio_mem_system_reset, vmem);
769 vmstate_unregister_ram(&vmem->memdev->mr, DEVICE(vmem));
770 host_memory_backend_set_mapped(vmem->memdev, false);
771 virtio_del_queue(vdev, 0);
772 virtio_cleanup(vdev);
773 g_free(vmem->bitmap);
774 ram_block_discard_require(false);
775}
776
7a9d5d02
DH
777static int virtio_mem_discard_range_cb(const VirtIOMEM *vmem, void *arg,
778 uint64_t offset, uint64_t size)
910b2576
DH
779{
780 RAMBlock *rb = vmem->memdev->mr.ram_block;
910b2576 781
3aca6380 782 return ram_block_discard_range(rb, offset, size) ? -EINVAL : 0;
910b2576
DH
783}
784
7a9d5d02
DH
785static int virtio_mem_restore_unplugged(VirtIOMEM *vmem)
786{
787 /* Make sure all memory is really discarded after migration. */
788 return virtio_mem_for_each_unplugged_range(vmem, NULL,
789 virtio_mem_discard_range_cb);
790}
791
910b2576
DH
792static int virtio_mem_post_load(void *opaque, int version_id)
793{
2044969f
DH
794 VirtIOMEM *vmem = VIRTIO_MEM(opaque);
795 RamDiscardListener *rdl;
796 int ret;
797
798 /*
799 * We started out with all memory discarded and our memory region is mapped
800 * into an address space. Replay, now that we updated the bitmap.
801 */
802 QLIST_FOREACH(rdl, &vmem->rdl_list, next) {
803 ret = virtio_mem_for_each_plugged_section(vmem, rdl->section, rdl,
804 virtio_mem_notify_populate_cb);
805 if (ret) {
806 return ret;
807 }
808 }
809
910b2576
DH
810 if (migration_in_incoming_postcopy()) {
811 return 0;
812 }
813
2044969f 814 return virtio_mem_restore_unplugged(vmem);
910b2576
DH
815}
816
383ee445
DH
817typedef struct VirtIOMEMMigSanityChecks {
818 VirtIOMEM *parent;
819 uint64_t addr;
820 uint64_t region_size;
821 uint64_t block_size;
822 uint32_t node;
823} VirtIOMEMMigSanityChecks;
824
825static int virtio_mem_mig_sanity_checks_pre_save(void *opaque)
826{
827 VirtIOMEMMigSanityChecks *tmp = opaque;
828 VirtIOMEM *vmem = tmp->parent;
829
830 tmp->addr = vmem->addr;
831 tmp->region_size = memory_region_size(&vmem->memdev->mr);
832 tmp->block_size = vmem->block_size;
833 tmp->node = vmem->node;
834 return 0;
835}
836
837static int virtio_mem_mig_sanity_checks_post_load(void *opaque, int version_id)
838{
839 VirtIOMEMMigSanityChecks *tmp = opaque;
840 VirtIOMEM *vmem = tmp->parent;
841 const uint64_t new_region_size = memory_region_size(&vmem->memdev->mr);
842
843 if (tmp->addr != vmem->addr) {
844 error_report("Property '%s' changed from 0x%" PRIx64 " to 0x%" PRIx64,
845 VIRTIO_MEM_ADDR_PROP, tmp->addr, vmem->addr);
846 return -EINVAL;
847 }
848 /*
849 * Note: Preparation for resizeable memory regions. The maximum size
850 * of the memory region must not change during migration.
851 */
852 if (tmp->region_size != new_region_size) {
853 error_report("Property '%s' size changed from 0x%" PRIx64 " to 0x%"
854 PRIx64, VIRTIO_MEM_MEMDEV_PROP, tmp->region_size,
855 new_region_size);
856 return -EINVAL;
857 }
858 if (tmp->block_size != vmem->block_size) {
859 error_report("Property '%s' changed from 0x%" PRIx64 " to 0x%" PRIx64,
860 VIRTIO_MEM_BLOCK_SIZE_PROP, tmp->block_size,
861 vmem->block_size);
862 return -EINVAL;
863 }
864 if (tmp->node != vmem->node) {
865 error_report("Property '%s' changed from %" PRIu32 " to %" PRIu32,
866 VIRTIO_MEM_NODE_PROP, tmp->node, vmem->node);
867 return -EINVAL;
868 }
869 return 0;
870}
871
872static const VMStateDescription vmstate_virtio_mem_sanity_checks = {
873 .name = "virtio-mem-device/sanity-checks",
874 .pre_save = virtio_mem_mig_sanity_checks_pre_save,
875 .post_load = virtio_mem_mig_sanity_checks_post_load,
876 .fields = (VMStateField[]) {
877 VMSTATE_UINT64(addr, VirtIOMEMMigSanityChecks),
878 VMSTATE_UINT64(region_size, VirtIOMEMMigSanityChecks),
879 VMSTATE_UINT64(block_size, VirtIOMEMMigSanityChecks),
880 VMSTATE_UINT32(node, VirtIOMEMMigSanityChecks),
881 VMSTATE_END_OF_LIST(),
882 },
883};
884
910b2576
DH
885static const VMStateDescription vmstate_virtio_mem_device = {
886 .name = "virtio-mem-device",
887 .minimum_version_id = 1,
888 .version_id = 1,
0fd7616e 889 .priority = MIG_PRI_VIRTIO_MEM,
910b2576
DH
890 .post_load = virtio_mem_post_load,
891 .fields = (VMStateField[]) {
383ee445
DH
892 VMSTATE_WITH_TMP(VirtIOMEM, VirtIOMEMMigSanityChecks,
893 vmstate_virtio_mem_sanity_checks),
910b2576
DH
894 VMSTATE_UINT64(usable_region_size, VirtIOMEM),
895 VMSTATE_UINT64(size, VirtIOMEM),
896 VMSTATE_UINT64(requested_size, VirtIOMEM),
897 VMSTATE_BITMAP(bitmap, VirtIOMEM, 0, bitmap_size),
898 VMSTATE_END_OF_LIST()
899 },
900};
901
902static const VMStateDescription vmstate_virtio_mem = {
903 .name = "virtio-mem",
904 .minimum_version_id = 1,
905 .version_id = 1,
906 .fields = (VMStateField[]) {
907 VMSTATE_VIRTIO_DEVICE,
908 VMSTATE_END_OF_LIST()
909 },
910};
911
912static void virtio_mem_fill_device_info(const VirtIOMEM *vmem,
913 VirtioMEMDeviceInfo *vi)
914{
915 vi->memaddr = vmem->addr;
916 vi->node = vmem->node;
917 vi->requested_size = vmem->requested_size;
918 vi->size = vmem->size;
919 vi->max_size = memory_region_size(&vmem->memdev->mr);
920 vi->block_size = vmem->block_size;
921 vi->memdev = object_get_canonical_path(OBJECT(vmem->memdev));
922}
923
924static MemoryRegion *virtio_mem_get_memory_region(VirtIOMEM *vmem, Error **errp)
925{
926 if (!vmem->memdev) {
927 error_setg(errp, "'%s' property must be set", VIRTIO_MEM_MEMDEV_PROP);
928 return NULL;
929 }
930
931 return &vmem->memdev->mr;
932}
933
c95b4437
DH
934static void virtio_mem_add_size_change_notifier(VirtIOMEM *vmem,
935 Notifier *notifier)
936{
937 notifier_list_add(&vmem->size_change_notifiers, notifier);
938}
939
940static void virtio_mem_remove_size_change_notifier(VirtIOMEM *vmem,
941 Notifier *notifier)
942{
943 notifier_remove(notifier);
944}
945
910b2576
DH
946static void virtio_mem_get_size(Object *obj, Visitor *v, const char *name,
947 void *opaque, Error **errp)
948{
949 const VirtIOMEM *vmem = VIRTIO_MEM(obj);
950 uint64_t value = vmem->size;
951
952 visit_type_size(v, name, &value, errp);
953}
954
955static void virtio_mem_get_requested_size(Object *obj, Visitor *v,
956 const char *name, void *opaque,
957 Error **errp)
958{
959 const VirtIOMEM *vmem = VIRTIO_MEM(obj);
960 uint64_t value = vmem->requested_size;
961
962 visit_type_size(v, name, &value, errp);
963}
964
965static void virtio_mem_set_requested_size(Object *obj, Visitor *v,
966 const char *name, void *opaque,
967 Error **errp)
968{
969 VirtIOMEM *vmem = VIRTIO_MEM(obj);
970 Error *err = NULL;
971 uint64_t value;
972
973 visit_type_size(v, name, &value, &err);
974 if (err) {
975 error_propagate(errp, err);
976 return;
977 }
978
979 /*
980 * The block size and memory backend are not fixed until the device was
981 * realized. realize() will verify these properties then.
982 */
983 if (DEVICE(obj)->realized) {
984 if (!QEMU_IS_ALIGNED(value, vmem->block_size)) {
985 error_setg(errp, "'%s' has to be multiples of '%s' (0x%" PRIx64
986 ")", name, VIRTIO_MEM_BLOCK_SIZE_PROP,
987 vmem->block_size);
988 return;
989 } else if (value > memory_region_size(&vmem->memdev->mr)) {
990 error_setg(errp, "'%s' cannot exceed the memory backend size"
991 "(0x%" PRIx64 ")", name,
992 memory_region_size(&vmem->memdev->mr));
993 return;
994 }
995
996 if (value != vmem->requested_size) {
997 virtio_mem_resize_usable_region(vmem, value, false);
998 vmem->requested_size = value;
999 }
1000 /*
1001 * Trigger a config update so the guest gets notified. We trigger
1002 * even if the size didn't change (especially helpful for debugging).
1003 */
1004 virtio_notify_config(VIRTIO_DEVICE(vmem));
1005 } else {
1006 vmem->requested_size = value;
1007 }
1008}
1009
1010static void virtio_mem_get_block_size(Object *obj, Visitor *v, const char *name,
1011 void *opaque, Error **errp)
1012{
1013 const VirtIOMEM *vmem = VIRTIO_MEM(obj);
1014 uint64_t value = vmem->block_size;
1015
228957fe
DH
1016 /*
1017 * If not configured by the user (and we're not realized yet), use the
1018 * default block size we would use with the current memory backend.
1019 */
1020 if (!value) {
1021 if (vmem->memdev && memory_region_is_ram(&vmem->memdev->mr)) {
1022 value = virtio_mem_default_block_size(vmem->memdev->mr.ram_block);
1023 } else {
1024 value = virtio_mem_thp_size();
1025 }
1026 }
1027
910b2576
DH
1028 visit_type_size(v, name, &value, errp);
1029}
1030
1031static void virtio_mem_set_block_size(Object *obj, Visitor *v, const char *name,
1032 void *opaque, Error **errp)
1033{
1034 VirtIOMEM *vmem = VIRTIO_MEM(obj);
1035 Error *err = NULL;
1036 uint64_t value;
1037
1038 if (DEVICE(obj)->realized) {
1039 error_setg(errp, "'%s' cannot be changed", name);
1040 return;
1041 }
1042
1043 visit_type_size(v, name, &value, &err);
1044 if (err) {
1045 error_propagate(errp, err);
1046 return;
1047 }
1048
1049 if (value < VIRTIO_MEM_MIN_BLOCK_SIZE) {
1050 error_setg(errp, "'%s' property has to be at least 0x%" PRIx32, name,
1051 VIRTIO_MEM_MIN_BLOCK_SIZE);
1052 return;
1053 } else if (!is_power_of_2(value)) {
1054 error_setg(errp, "'%s' property has to be a power of two", name);
1055 return;
1056 }
1057 vmem->block_size = value;
1058}
1059
7a9d5d02
DH
1060static int virtio_mem_precopy_exclude_range_cb(const VirtIOMEM *vmem, void *arg,
1061 uint64_t offset, uint64_t size)
0bc7806c
DH
1062{
1063 void * const host = qemu_ram_get_host_addr(vmem->memdev->mr.ram_block);
0bc7806c 1064
7a9d5d02
DH
1065 qemu_guest_free_page_hint(host + offset, size);
1066 return 0;
1067}
0bc7806c 1068
7a9d5d02
DH
1069static void virtio_mem_precopy_exclude_unplugged(VirtIOMEM *vmem)
1070{
1071 virtio_mem_for_each_unplugged_range(vmem, NULL,
1072 virtio_mem_precopy_exclude_range_cb);
0bc7806c
DH
1073}
1074
1075static int virtio_mem_precopy_notify(NotifierWithReturn *n, void *data)
1076{
1077 VirtIOMEM *vmem = container_of(n, VirtIOMEM, precopy_notifier);
1078 PrecopyNotifyData *pnd = data;
1079
1080 switch (pnd->reason) {
0bc7806c
DH
1081 case PRECOPY_NOTIFY_AFTER_BITMAP_SYNC:
1082 virtio_mem_precopy_exclude_unplugged(vmem);
1083 break;
1084 default:
1085 break;
1086 }
1087
1088 return 0;
1089}
1090
910b2576
DH
1091static void virtio_mem_instance_init(Object *obj)
1092{
1093 VirtIOMEM *vmem = VIRTIO_MEM(obj);
1094
c95b4437 1095 notifier_list_init(&vmem->size_change_notifiers);
0bc7806c 1096 vmem->precopy_notifier.notify = virtio_mem_precopy_notify;
2044969f 1097 QLIST_INIT(&vmem->rdl_list);
910b2576
DH
1098
1099 object_property_add(obj, VIRTIO_MEM_SIZE_PROP, "size", virtio_mem_get_size,
1100 NULL, NULL, NULL);
1101 object_property_add(obj, VIRTIO_MEM_REQUESTED_SIZE_PROP, "size",
1102 virtio_mem_get_requested_size,
1103 virtio_mem_set_requested_size, NULL, NULL);
1104 object_property_add(obj, VIRTIO_MEM_BLOCK_SIZE_PROP, "size",
1105 virtio_mem_get_block_size, virtio_mem_set_block_size,
1106 NULL, NULL);
1107}
1108
1109static Property virtio_mem_properties[] = {
1110 DEFINE_PROP_UINT64(VIRTIO_MEM_ADDR_PROP, VirtIOMEM, addr, 0),
1111 DEFINE_PROP_UINT32(VIRTIO_MEM_NODE_PROP, VirtIOMEM, node, 0),
1112 DEFINE_PROP_LINK(VIRTIO_MEM_MEMDEV_PROP, VirtIOMEM, memdev,
1113 TYPE_MEMORY_BACKEND, HostMemoryBackend *),
1114 DEFINE_PROP_END_OF_LIST(),
1115};
1116
2044969f
DH
1117static uint64_t virtio_mem_rdm_get_min_granularity(const RamDiscardManager *rdm,
1118 const MemoryRegion *mr)
1119{
1120 const VirtIOMEM *vmem = VIRTIO_MEM(rdm);
1121
1122 g_assert(mr == &vmem->memdev->mr);
1123 return vmem->block_size;
1124}
1125
1126static bool virtio_mem_rdm_is_populated(const RamDiscardManager *rdm,
1127 const MemoryRegionSection *s)
1128{
1129 const VirtIOMEM *vmem = VIRTIO_MEM(rdm);
1130 uint64_t start_gpa = vmem->addr + s->offset_within_region;
1131 uint64_t end_gpa = start_gpa + int128_get64(s->size);
1132
1133 g_assert(s->mr == &vmem->memdev->mr);
1134
1135 start_gpa = QEMU_ALIGN_DOWN(start_gpa, vmem->block_size);
1136 end_gpa = QEMU_ALIGN_UP(end_gpa, vmem->block_size);
1137
1138 if (!virtio_mem_valid_range(vmem, start_gpa, end_gpa - start_gpa)) {
1139 return false;
1140 }
1141
1142 return virtio_mem_test_bitmap(vmem, start_gpa, end_gpa - start_gpa, true);
1143}
1144
1145struct VirtIOMEMReplayData {
1146 void *fn;
1147 void *opaque;
1148};
1149
1150static int virtio_mem_rdm_replay_populated_cb(MemoryRegionSection *s, void *arg)
1151{
1152 struct VirtIOMEMReplayData *data = arg;
1153
1154 return ((ReplayRamPopulate)data->fn)(s, data->opaque);
1155}
1156
1157static int virtio_mem_rdm_replay_populated(const RamDiscardManager *rdm,
1158 MemoryRegionSection *s,
1159 ReplayRamPopulate replay_fn,
1160 void *opaque)
1161{
1162 const VirtIOMEM *vmem = VIRTIO_MEM(rdm);
1163 struct VirtIOMEMReplayData data = {
1164 .fn = replay_fn,
1165 .opaque = opaque,
1166 };
1167
1168 g_assert(s->mr == &vmem->memdev->mr);
1169 return virtio_mem_for_each_plugged_section(vmem, s, &data,
1170 virtio_mem_rdm_replay_populated_cb);
1171}
1172
1173static void virtio_mem_rdm_register_listener(RamDiscardManager *rdm,
1174 RamDiscardListener *rdl,
1175 MemoryRegionSection *s)
1176{
1177 VirtIOMEM *vmem = VIRTIO_MEM(rdm);
1178 int ret;
1179
1180 g_assert(s->mr == &vmem->memdev->mr);
1181 rdl->section = memory_region_section_new_copy(s);
1182
1183 QLIST_INSERT_HEAD(&vmem->rdl_list, rdl, next);
1184 ret = virtio_mem_for_each_plugged_section(vmem, rdl->section, rdl,
1185 virtio_mem_notify_populate_cb);
1186 if (ret) {
1187 error_report("%s: Replaying plugged ranges failed: %s", __func__,
1188 strerror(-ret));
1189 }
1190}
1191
1192static void virtio_mem_rdm_unregister_listener(RamDiscardManager *rdm,
1193 RamDiscardListener *rdl)
1194{
1195 VirtIOMEM *vmem = VIRTIO_MEM(rdm);
1196
1197 g_assert(rdl->section->mr == &vmem->memdev->mr);
1198 if (vmem->size) {
1199 if (rdl->double_discard_supported) {
1200 rdl->notify_discard(rdl, rdl->section);
1201 } else {
1202 virtio_mem_for_each_plugged_section(vmem, rdl->section, rdl,
1203 virtio_mem_notify_discard_cb);
1204 }
1205 }
1206
1207 memory_region_section_free_copy(rdl->section);
1208 rdl->section = NULL;
1209 QLIST_REMOVE(rdl, next);
1210}
1211
910b2576
DH
1212static void virtio_mem_class_init(ObjectClass *klass, void *data)
1213{
1214 DeviceClass *dc = DEVICE_CLASS(klass);
1215 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1216 VirtIOMEMClass *vmc = VIRTIO_MEM_CLASS(klass);
2044969f 1217 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_CLASS(klass);
910b2576
DH
1218
1219 device_class_set_props(dc, virtio_mem_properties);
1220 dc->vmsd = &vmstate_virtio_mem;
1221
1222 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
1223 vdc->realize = virtio_mem_device_realize;
1224 vdc->unrealize = virtio_mem_device_unrealize;
1225 vdc->get_config = virtio_mem_get_config;
1226 vdc->get_features = virtio_mem_get_features;
1227 vdc->vmsd = &vmstate_virtio_mem_device;
1228
1229 vmc->fill_device_info = virtio_mem_fill_device_info;
1230 vmc->get_memory_region = virtio_mem_get_memory_region;
c95b4437
DH
1231 vmc->add_size_change_notifier = virtio_mem_add_size_change_notifier;
1232 vmc->remove_size_change_notifier = virtio_mem_remove_size_change_notifier;
2044969f
DH
1233
1234 rdmc->get_min_granularity = virtio_mem_rdm_get_min_granularity;
1235 rdmc->is_populated = virtio_mem_rdm_is_populated;
1236 rdmc->replay_populated = virtio_mem_rdm_replay_populated;
1237 rdmc->register_listener = virtio_mem_rdm_register_listener;
1238 rdmc->unregister_listener = virtio_mem_rdm_unregister_listener;
910b2576
DH
1239}
1240
1241static const TypeInfo virtio_mem_info = {
1242 .name = TYPE_VIRTIO_MEM,
1243 .parent = TYPE_VIRTIO_DEVICE,
1244 .instance_size = sizeof(VirtIOMEM),
1245 .instance_init = virtio_mem_instance_init,
1246 .class_init = virtio_mem_class_init,
1247 .class_size = sizeof(VirtIOMEMClass),
2044969f
DH
1248 .interfaces = (InterfaceInfo[]) {
1249 { TYPE_RAM_DISCARD_MANAGER },
1250 { }
1251 },
910b2576
DH
1252};
1253
1254static void virtio_register_types(void)
1255{
1256 type_register_static(&virtio_mem_info);
1257}
1258
1259type_init(virtio_register_types)