4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
28 #include <sys/types.h>
32 #include "monitor/monitor.h"
33 #include "sysemu/sysemu.h"
34 #include "qemu/bitops.h"
35 #include "qemu/bitmap.h"
36 #include "sysemu/arch_init.h"
37 #include "audio/audio.h"
38 #include "hw/i386/pc.h"
39 #include "hw/pci/pci.h"
40 #include "hw/audio/audio.h"
41 #include "sysemu/kvm.h"
42 #include "migration/migration.h"
43 #include "hw/i386/smbios.h"
44 #include "exec/address-spaces.h"
45 #include "hw/audio/pcspk.h"
46 #include "migration/page_cache.h"
47 #include "qemu/config-file.h"
48 #include "qemu/error-report.h"
49 #include "qmp-commands.h"
51 #include "exec/cpu-all.h"
52 #include "exec/ram_addr.h"
53 #include "hw/acpi/acpi.h"
54 #include "qemu/host-utils.h"
55 #include "qemu/rcu_queue.h"
57 #ifdef DEBUG_ARCH_INIT
58 #define DPRINTF(fmt, ...) \
59 do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
61 #define DPRINTF(fmt, ...) \
66 int graphic_width
= 1024;
67 int graphic_height
= 768;
68 int graphic_depth
= 8;
70 int graphic_width
= 800;
71 int graphic_height
= 600;
72 int graphic_depth
= 32;
76 #if defined(TARGET_ALPHA)
77 #define QEMU_ARCH QEMU_ARCH_ALPHA
78 #elif defined(TARGET_ARM)
79 #define QEMU_ARCH QEMU_ARCH_ARM
80 #elif defined(TARGET_CRIS)
81 #define QEMU_ARCH QEMU_ARCH_CRIS
82 #elif defined(TARGET_I386)
83 #define QEMU_ARCH QEMU_ARCH_I386
84 #elif defined(TARGET_M68K)
85 #define QEMU_ARCH QEMU_ARCH_M68K
86 #elif defined(TARGET_LM32)
87 #define QEMU_ARCH QEMU_ARCH_LM32
88 #elif defined(TARGET_MICROBLAZE)
89 #define QEMU_ARCH QEMU_ARCH_MICROBLAZE
90 #elif defined(TARGET_MIPS)
91 #define QEMU_ARCH QEMU_ARCH_MIPS
92 #elif defined(TARGET_MOXIE)
93 #define QEMU_ARCH QEMU_ARCH_MOXIE
94 #elif defined(TARGET_OPENRISC)
95 #define QEMU_ARCH QEMU_ARCH_OPENRISC
96 #elif defined(TARGET_PPC)
97 #define QEMU_ARCH QEMU_ARCH_PPC
98 #elif defined(TARGET_S390X)
99 #define QEMU_ARCH QEMU_ARCH_S390X
100 #elif defined(TARGET_SH4)
101 #define QEMU_ARCH QEMU_ARCH_SH4
102 #elif defined(TARGET_SPARC)
103 #define QEMU_ARCH QEMU_ARCH_SPARC
104 #elif defined(TARGET_XTENSA)
105 #define QEMU_ARCH QEMU_ARCH_XTENSA
106 #elif defined(TARGET_UNICORE32)
107 #define QEMU_ARCH QEMU_ARCH_UNICORE32
108 #elif defined(TARGET_TRICORE)
109 #define QEMU_ARCH QEMU_ARCH_TRICORE
112 const uint32_t arch_type
= QEMU_ARCH
;
113 static bool mig_throttle_on
;
114 static int dirty_rate_high_cnt
;
115 static void check_guest_throttling(void);
117 static uint64_t bitmap_sync_count
;
119 /***********************************************************/
120 /* ram save/restore */
122 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
123 #define RAM_SAVE_FLAG_COMPRESS 0x02
124 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
125 #define RAM_SAVE_FLAG_PAGE 0x08
126 #define RAM_SAVE_FLAG_EOS 0x10
127 #define RAM_SAVE_FLAG_CONTINUE 0x20
128 #define RAM_SAVE_FLAG_XBZRLE 0x40
129 /* 0x80 is reserved in migration.h start with 0x100 next */
131 static struct defconfig_file
{
132 const char *filename
;
133 /* Indicates it is an user config file (disabled by -no-user-config) */
135 } default_config_files
[] = {
136 { CONFIG_QEMU_CONFDIR
"/qemu.conf", true },
137 { CONFIG_QEMU_CONFDIR
"/target-" TARGET_NAME
".conf", true },
138 { NULL
}, /* end of list */
141 static const uint8_t ZERO_TARGET_PAGE
[TARGET_PAGE_SIZE
];
143 int qemu_read_default_config_files(bool userconfig
)
146 struct defconfig_file
*f
;
148 for (f
= default_config_files
; f
->filename
; f
++) {
149 if (!userconfig
&& f
->userconfig
) {
152 ret
= qemu_read_config_file(f
->filename
);
153 if (ret
< 0 && ret
!= -ENOENT
) {
161 static inline bool is_zero_range(uint8_t *p
, uint64_t size
)
163 return buffer_find_nonzero_offset(p
, size
) == size
;
166 /* struct contains XBZRLE cache and a static page
167 used by the compression */
169 /* buffer used for XBZRLE encoding */
170 uint8_t *encoded_buf
;
171 /* buffer for storing page content */
172 uint8_t *current_buf
;
173 /* Cache for XBZRLE, Protected by lock. */
178 /* buffer used for XBZRLE decoding */
179 static uint8_t *xbzrle_decoded_buf
;
181 static void XBZRLE_cache_lock(void)
183 if (migrate_use_xbzrle())
184 qemu_mutex_lock(&XBZRLE
.lock
);
187 static void XBZRLE_cache_unlock(void)
189 if (migrate_use_xbzrle())
190 qemu_mutex_unlock(&XBZRLE
.lock
);
194 * called from qmp_migrate_set_cache_size in main thread, possibly while
195 * a migration is in progress.
196 * A running migration maybe using the cache and might finish during this
197 * call, hence changes to the cache are protected by XBZRLE.lock().
199 int64_t xbzrle_cache_resize(int64_t new_size
)
201 PageCache
*new_cache
;
204 if (new_size
< TARGET_PAGE_SIZE
) {
210 if (XBZRLE
.cache
!= NULL
) {
211 if (pow2floor(new_size
) == migrate_xbzrle_cache_size()) {
214 new_cache
= cache_init(new_size
/ TARGET_PAGE_SIZE
,
217 error_report("Error creating cache");
222 cache_fini(XBZRLE
.cache
);
223 XBZRLE
.cache
= new_cache
;
227 ret
= pow2floor(new_size
);
229 XBZRLE_cache_unlock();
233 /* accounting for migration statistics */
234 typedef struct AccountingInfo
{
236 uint64_t skipped_pages
;
239 uint64_t xbzrle_bytes
;
240 uint64_t xbzrle_pages
;
241 uint64_t xbzrle_cache_miss
;
242 double xbzrle_cache_miss_rate
;
243 uint64_t xbzrle_overflows
;
246 static AccountingInfo acct_info
;
248 static void acct_clear(void)
250 memset(&acct_info
, 0, sizeof(acct_info
));
253 uint64_t dup_mig_bytes_transferred(void)
255 return acct_info
.dup_pages
* TARGET_PAGE_SIZE
;
258 uint64_t dup_mig_pages_transferred(void)
260 return acct_info
.dup_pages
;
263 uint64_t skipped_mig_bytes_transferred(void)
265 return acct_info
.skipped_pages
* TARGET_PAGE_SIZE
;
268 uint64_t skipped_mig_pages_transferred(void)
270 return acct_info
.skipped_pages
;
273 uint64_t norm_mig_bytes_transferred(void)
275 return acct_info
.norm_pages
* TARGET_PAGE_SIZE
;
278 uint64_t norm_mig_pages_transferred(void)
280 return acct_info
.norm_pages
;
283 uint64_t xbzrle_mig_bytes_transferred(void)
285 return acct_info
.xbzrle_bytes
;
288 uint64_t xbzrle_mig_pages_transferred(void)
290 return acct_info
.xbzrle_pages
;
293 uint64_t xbzrle_mig_pages_cache_miss(void)
295 return acct_info
.xbzrle_cache_miss
;
298 double xbzrle_mig_cache_miss_rate(void)
300 return acct_info
.xbzrle_cache_miss_rate
;
303 uint64_t xbzrle_mig_pages_overflow(void)
305 return acct_info
.xbzrle_overflows
;
308 /* This is the last block that we have visited serching for dirty pages
310 static RAMBlock
*last_seen_block
;
311 /* This is the last block from where we have sent data */
312 static RAMBlock
*last_sent_block
;
313 static ram_addr_t last_offset
;
314 static unsigned long *migration_bitmap
;
315 static uint64_t migration_dirty_pages
;
316 static uint32_t last_version
;
317 static bool ram_bulk_stage
;
319 struct CompressParam
{
322 typedef struct CompressParam CompressParam
;
324 static CompressParam
*comp_param
;
325 static QemuThread
*compress_threads
;
326 static bool quit_comp_thread
;
328 static void *do_data_compress(void *opaque
)
330 while (!quit_comp_thread
) {
339 static inline void terminate_compression_threads(void)
341 quit_comp_thread
= true;
346 void migrate_compress_threads_join(void)
350 if (!migrate_use_compression()) {
353 terminate_compression_threads();
354 thread_count
= migrate_compress_threads();
355 for (i
= 0; i
< thread_count
; i
++) {
356 qemu_thread_join(compress_threads
+ i
);
358 g_free(compress_threads
);
360 compress_threads
= NULL
;
364 void migrate_compress_threads_create(void)
368 if (!migrate_use_compression()) {
371 quit_comp_thread
= false;
372 thread_count
= migrate_compress_threads();
373 compress_threads
= g_new0(QemuThread
, thread_count
);
374 comp_param
= g_new0(CompressParam
, thread_count
);
375 for (i
= 0; i
< thread_count
; i
++) {
376 qemu_thread_create(compress_threads
+ i
, "compress",
377 do_data_compress
, comp_param
+ i
,
378 QEMU_THREAD_JOINABLE
);
383 * save_page_header: Write page header to wire
385 * If this is the 1st block, it also writes the block identification
387 * Returns: Number of bytes written
389 * @f: QEMUFile where to send the data
390 * @block: block that contains the page we want to send
391 * @offset: offset inside the block for the page
392 * in the lower bits, it contains flags
394 static size_t save_page_header(QEMUFile
*f
, RAMBlock
*block
, ram_addr_t offset
)
398 qemu_put_be64(f
, offset
);
401 if (!(offset
& RAM_SAVE_FLAG_CONTINUE
)) {
402 qemu_put_byte(f
, strlen(block
->idstr
));
403 qemu_put_buffer(f
, (uint8_t *)block
->idstr
,
404 strlen(block
->idstr
));
405 size
+= 1 + strlen(block
->idstr
);
410 /* Update the xbzrle cache to reflect a page that's been sent as all 0.
411 * The important thing is that a stale (not-yet-0'd) page be replaced
413 * As a bonus, if the page wasn't in the cache it gets added so that
414 * when a small write is made into the 0'd page it gets XBZRLE sent
416 static void xbzrle_cache_zero_page(ram_addr_t current_addr
)
418 if (ram_bulk_stage
|| !migrate_use_xbzrle()) {
422 /* We don't care if this fails to allocate a new cache page
423 * as long as it updated an old one */
424 cache_insert(XBZRLE
.cache
, current_addr
, ZERO_TARGET_PAGE
,
428 #define ENCODING_FLAG_XBZRLE 0x1
431 * save_xbzrle_page: compress and send current page
433 * Returns: 1 means that we wrote the page
434 * 0 means that page is identical to the one already sent
435 * -1 means that xbzrle would be longer than normal
437 * @f: QEMUFile where to send the data
440 * @block: block that contains the page we want to send
441 * @offset: offset inside the block for the page
442 * @last_stage: if we are at the completion stage
443 * @bytes_transferred: increase it with the number of transferred bytes
445 static int save_xbzrle_page(QEMUFile
*f
, uint8_t **current_data
,
446 ram_addr_t current_addr
, RAMBlock
*block
,
447 ram_addr_t offset
, bool last_stage
,
448 uint64_t *bytes_transferred
)
450 int encoded_len
= 0, bytes_xbzrle
;
451 uint8_t *prev_cached_page
;
453 if (!cache_is_cached(XBZRLE
.cache
, current_addr
, bitmap_sync_count
)) {
454 acct_info
.xbzrle_cache_miss
++;
456 if (cache_insert(XBZRLE
.cache
, current_addr
, *current_data
,
457 bitmap_sync_count
) == -1) {
460 /* update *current_data when the page has been
461 inserted into cache */
462 *current_data
= get_cached_data(XBZRLE
.cache
, current_addr
);
468 prev_cached_page
= get_cached_data(XBZRLE
.cache
, current_addr
);
470 /* save current buffer into memory */
471 memcpy(XBZRLE
.current_buf
, *current_data
, TARGET_PAGE_SIZE
);
473 /* XBZRLE encoding (if there is no overflow) */
474 encoded_len
= xbzrle_encode_buffer(prev_cached_page
, XBZRLE
.current_buf
,
475 TARGET_PAGE_SIZE
, XBZRLE
.encoded_buf
,
477 if (encoded_len
== 0) {
478 DPRINTF("Skipping unmodified page\n");
480 } else if (encoded_len
== -1) {
481 DPRINTF("Overflow\n");
482 acct_info
.xbzrle_overflows
++;
483 /* update data in the cache */
485 memcpy(prev_cached_page
, *current_data
, TARGET_PAGE_SIZE
);
486 *current_data
= prev_cached_page
;
491 /* we need to update the data in the cache, in order to get the same data */
493 memcpy(prev_cached_page
, XBZRLE
.current_buf
, TARGET_PAGE_SIZE
);
496 /* Send XBZRLE based compressed page */
497 bytes_xbzrle
= save_page_header(f
, block
, offset
| RAM_SAVE_FLAG_XBZRLE
);
498 qemu_put_byte(f
, ENCODING_FLAG_XBZRLE
);
499 qemu_put_be16(f
, encoded_len
);
500 qemu_put_buffer(f
, XBZRLE
.encoded_buf
, encoded_len
);
501 bytes_xbzrle
+= encoded_len
+ 1 + 2;
502 acct_info
.xbzrle_pages
++;
503 acct_info
.xbzrle_bytes
+= bytes_xbzrle
;
504 *bytes_transferred
+= bytes_xbzrle
;
510 ram_addr_t
migration_bitmap_find_and_reset_dirty(MemoryRegion
*mr
,
513 unsigned long base
= mr
->ram_addr
>> TARGET_PAGE_BITS
;
514 unsigned long nr
= base
+ (start
>> TARGET_PAGE_BITS
);
515 uint64_t mr_size
= TARGET_PAGE_ALIGN(memory_region_size(mr
));
516 unsigned long size
= base
+ (mr_size
>> TARGET_PAGE_BITS
);
520 if (ram_bulk_stage
&& nr
> base
) {
523 next
= find_next_bit(migration_bitmap
, size
, nr
);
527 clear_bit(next
, migration_bitmap
);
528 migration_dirty_pages
--;
530 return (next
- base
) << TARGET_PAGE_BITS
;
533 static inline bool migration_bitmap_set_dirty(ram_addr_t addr
)
536 int nr
= addr
>> TARGET_PAGE_BITS
;
538 ret
= test_and_set_bit(nr
, migration_bitmap
);
541 migration_dirty_pages
++;
546 static void migration_bitmap_sync_range(ram_addr_t start
, ram_addr_t length
)
549 unsigned long page
= BIT_WORD(start
>> TARGET_PAGE_BITS
);
551 /* start address is aligned at the start of a word? */
552 if (((page
* BITS_PER_LONG
) << TARGET_PAGE_BITS
) == start
) {
554 int nr
= BITS_TO_LONGS(length
>> TARGET_PAGE_BITS
);
555 unsigned long *src
= ram_list
.dirty_memory
[DIRTY_MEMORY_MIGRATION
];
557 for (k
= page
; k
< page
+ nr
; k
++) {
559 unsigned long new_dirty
;
560 new_dirty
= ~migration_bitmap
[k
];
561 migration_bitmap
[k
] |= src
[k
];
563 migration_dirty_pages
+= ctpopl(new_dirty
);
568 for (addr
= 0; addr
< length
; addr
+= TARGET_PAGE_SIZE
) {
569 if (cpu_physical_memory_get_dirty(start
+ addr
,
571 DIRTY_MEMORY_MIGRATION
)) {
572 cpu_physical_memory_reset_dirty(start
+ addr
,
574 DIRTY_MEMORY_MIGRATION
);
575 migration_bitmap_set_dirty(start
+ addr
);
582 /* Fix me: there are too many global variables used in migration process. */
583 static int64_t start_time
;
584 static int64_t bytes_xfer_prev
;
585 static int64_t num_dirty_pages_period
;
587 static void migration_bitmap_sync_init(void)
591 num_dirty_pages_period
= 0;
594 /* Called with iothread lock held, to protect ram_list.dirty_memory[] */
595 static void migration_bitmap_sync(void)
598 uint64_t num_dirty_pages_init
= migration_dirty_pages
;
599 MigrationState
*s
= migrate_get_current();
601 int64_t bytes_xfer_now
;
602 static uint64_t xbzrle_cache_miss_prev
;
603 static uint64_t iterations_prev
;
607 if (!bytes_xfer_prev
) {
608 bytes_xfer_prev
= ram_bytes_transferred();
612 start_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
615 trace_migration_bitmap_sync_start();
616 address_space_sync_dirty_bitmap(&address_space_memory
);
619 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
620 migration_bitmap_sync_range(block
->mr
->ram_addr
, block
->used_length
);
624 trace_migration_bitmap_sync_end(migration_dirty_pages
625 - num_dirty_pages_init
);
626 num_dirty_pages_period
+= migration_dirty_pages
- num_dirty_pages_init
;
627 end_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
629 /* more than 1 second = 1000 millisecons */
630 if (end_time
> start_time
+ 1000) {
631 if (migrate_auto_converge()) {
632 /* The following detection logic can be refined later. For now:
633 Check to see if the dirtied bytes is 50% more than the approx.
634 amount of bytes that just got transferred since the last time we
635 were in this routine. If that happens >N times (for now N==4)
636 we turn on the throttle down logic */
637 bytes_xfer_now
= ram_bytes_transferred();
638 if (s
->dirty_pages_rate
&&
639 (num_dirty_pages_period
* TARGET_PAGE_SIZE
>
640 (bytes_xfer_now
- bytes_xfer_prev
)/2) &&
641 (dirty_rate_high_cnt
++ > 4)) {
642 trace_migration_throttle();
643 mig_throttle_on
= true;
644 dirty_rate_high_cnt
= 0;
646 bytes_xfer_prev
= bytes_xfer_now
;
648 mig_throttle_on
= false;
650 if (migrate_use_xbzrle()) {
651 if (iterations_prev
!= 0) {
652 acct_info
.xbzrle_cache_miss_rate
=
653 (double)(acct_info
.xbzrle_cache_miss
-
654 xbzrle_cache_miss_prev
) /
655 (acct_info
.iterations
- iterations_prev
);
657 iterations_prev
= acct_info
.iterations
;
658 xbzrle_cache_miss_prev
= acct_info
.xbzrle_cache_miss
;
660 s
->dirty_pages_rate
= num_dirty_pages_period
* 1000
661 / (end_time
- start_time
);
662 s
->dirty_bytes_rate
= s
->dirty_pages_rate
* TARGET_PAGE_SIZE
;
663 start_time
= end_time
;
664 num_dirty_pages_period
= 0;
665 s
->dirty_sync_count
= bitmap_sync_count
;
670 * ram_save_page: Send the given page to the stream
672 * Returns: Number of pages written.
674 * @f: QEMUFile where to send the data
675 * @block: block that contains the page we want to send
676 * @offset: offset inside the block for the page
677 * @last_stage: if we are at the completion stage
678 * @bytes_transferred: increase it with the number of transferred bytes
680 static int ram_save_page(QEMUFile
*f
, RAMBlock
* block
, ram_addr_t offset
,
681 bool last_stage
, uint64_t *bytes_transferred
)
685 ram_addr_t current_addr
;
686 MemoryRegion
*mr
= block
->mr
;
689 bool send_async
= true;
691 p
= memory_region_get_ram_ptr(mr
) + offset
;
693 /* In doubt sent page as normal */
695 ret
= ram_control_save_page(f
, block
->offset
,
696 offset
, TARGET_PAGE_SIZE
, &bytes_xmit
);
698 *bytes_transferred
+= bytes_xmit
;
704 current_addr
= block
->offset
+ offset
;
706 if (block
== last_sent_block
) {
707 offset
|= RAM_SAVE_FLAG_CONTINUE
;
709 if (ret
!= RAM_SAVE_CONTROL_NOT_SUPP
) {
710 if (ret
!= RAM_SAVE_CONTROL_DELAYED
) {
711 if (bytes_xmit
> 0) {
712 acct_info
.norm_pages
++;
713 } else if (bytes_xmit
== 0) {
714 acct_info
.dup_pages
++;
717 } else if (is_zero_range(p
, TARGET_PAGE_SIZE
)) {
718 acct_info
.dup_pages
++;
719 *bytes_transferred
+= save_page_header(f
, block
,
720 offset
| RAM_SAVE_FLAG_COMPRESS
);
722 *bytes_transferred
+= 1;
724 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
725 * page would be stale
727 xbzrle_cache_zero_page(current_addr
);
728 } else if (!ram_bulk_stage
&& migrate_use_xbzrle()) {
729 pages
= save_xbzrle_page(f
, &p
, current_addr
, block
,
730 offset
, last_stage
, bytes_transferred
);
732 /* Can't send this cached data async, since the cache page
733 * might get updated before it gets to the wire
739 /* XBZRLE overflow or normal page */
741 *bytes_transferred
+= save_page_header(f
, block
,
742 offset
| RAM_SAVE_FLAG_PAGE
);
744 qemu_put_buffer_async(f
, p
, TARGET_PAGE_SIZE
);
746 qemu_put_buffer(f
, p
, TARGET_PAGE_SIZE
);
748 *bytes_transferred
+= TARGET_PAGE_SIZE
;
750 acct_info
.norm_pages
++;
753 XBZRLE_cache_unlock();
759 * ram_save_compressed_page: compress the given page and send it to the stream
761 * Returns: Number of pages written.
763 * @f: QEMUFile where to send the data
764 * @block: block that contains the page we want to send
765 * @offset: offset inside the block for the page
766 * @last_stage: if we are at the completion stage
767 * @bytes_transferred: increase it with the number of transferred bytes
769 static int ram_save_compressed_page(QEMUFile
*f
, RAMBlock
*block
,
770 ram_addr_t offset
, bool last_stage
,
771 uint64_t *bytes_transferred
)
781 * ram_find_and_save_block: Finds a dirty page and sends it to f
783 * Called within an RCU critical section.
785 * Returns: The number of pages written
786 * 0 means no dirty pages
788 * @f: QEMUFile where to send the data
789 * @last_stage: if we are at the completion stage
790 * @bytes_transferred: increase it with the number of transferred bytes
793 static int ram_find_and_save_block(QEMUFile
*f
, bool last_stage
,
794 uint64_t *bytes_transferred
)
796 RAMBlock
*block
= last_seen_block
;
797 ram_addr_t offset
= last_offset
;
798 bool complete_round
= false;
803 block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
807 offset
= migration_bitmap_find_and_reset_dirty(mr
, offset
);
808 if (complete_round
&& block
== last_seen_block
&&
809 offset
>= last_offset
) {
812 if (offset
>= block
->used_length
) {
814 block
= QLIST_NEXT_RCU(block
, next
);
816 block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
817 complete_round
= true;
818 ram_bulk_stage
= false;
821 if (migrate_use_compression()) {
822 pages
= ram_save_compressed_page(f
, block
, offset
, last_stage
,
825 pages
= ram_save_page(f
, block
, offset
, last_stage
,
829 /* if page is unmodified, continue to the next */
831 last_sent_block
= block
;
837 last_seen_block
= block
;
838 last_offset
= offset
;
843 static uint64_t bytes_transferred
;
845 void acct_update_position(QEMUFile
*f
, size_t size
, bool zero
)
847 uint64_t pages
= size
/ TARGET_PAGE_SIZE
;
849 acct_info
.dup_pages
+= pages
;
851 acct_info
.norm_pages
+= pages
;
852 bytes_transferred
+= size
;
853 qemu_update_position(f
, size
);
857 static ram_addr_t
ram_save_remaining(void)
859 return migration_dirty_pages
;
862 uint64_t ram_bytes_remaining(void)
864 return ram_save_remaining() * TARGET_PAGE_SIZE
;
867 uint64_t ram_bytes_transferred(void)
869 return bytes_transferred
;
872 uint64_t ram_bytes_total(void)
878 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
)
879 total
+= block
->used_length
;
884 void free_xbzrle_decoded_buf(void)
886 g_free(xbzrle_decoded_buf
);
887 xbzrle_decoded_buf
= NULL
;
890 static void migration_end(void)
892 if (migration_bitmap
) {
893 memory_global_dirty_log_stop();
894 g_free(migration_bitmap
);
895 migration_bitmap
= NULL
;
900 cache_fini(XBZRLE
.cache
);
901 g_free(XBZRLE
.encoded_buf
);
902 g_free(XBZRLE
.current_buf
);
904 XBZRLE
.encoded_buf
= NULL
;
905 XBZRLE
.current_buf
= NULL
;
907 XBZRLE_cache_unlock();
910 static void ram_migration_cancel(void *opaque
)
915 static void reset_ram_globals(void)
917 last_seen_block
= NULL
;
918 last_sent_block
= NULL
;
920 last_version
= ram_list
.version
;
921 ram_bulk_stage
= true;
924 #define MAX_WAIT 50 /* ms, half buffered_file limit */
927 /* Each of ram_save_setup, ram_save_iterate and ram_save_complete has
928 * long-running RCU critical section. When rcu-reclaims in the code
929 * start to become numerous it will be necessary to reduce the
930 * granularity of these critical sections.
933 static int ram_save_setup(QEMUFile
*f
, void *opaque
)
936 int64_t ram_bitmap_pages
; /* Size of bitmap in pages, including gaps */
938 mig_throttle_on
= false;
939 dirty_rate_high_cnt
= 0;
940 bitmap_sync_count
= 0;
941 migration_bitmap_sync_init();
943 if (migrate_use_xbzrle()) {
945 XBZRLE
.cache
= cache_init(migrate_xbzrle_cache_size() /
949 XBZRLE_cache_unlock();
950 error_report("Error creating cache");
953 XBZRLE_cache_unlock();
955 /* We prefer not to abort if there is no memory */
956 XBZRLE
.encoded_buf
= g_try_malloc0(TARGET_PAGE_SIZE
);
957 if (!XBZRLE
.encoded_buf
) {
958 error_report("Error allocating encoded_buf");
962 XBZRLE
.current_buf
= g_try_malloc(TARGET_PAGE_SIZE
);
963 if (!XBZRLE
.current_buf
) {
964 error_report("Error allocating current_buf");
965 g_free(XBZRLE
.encoded_buf
);
966 XBZRLE
.encoded_buf
= NULL
;
973 /* iothread lock needed for ram_list.dirty_memory[] */
974 qemu_mutex_lock_iothread();
975 qemu_mutex_lock_ramlist();
977 bytes_transferred
= 0;
980 ram_bitmap_pages
= last_ram_offset() >> TARGET_PAGE_BITS
;
981 migration_bitmap
= bitmap_new(ram_bitmap_pages
);
982 bitmap_set(migration_bitmap
, 0, ram_bitmap_pages
);
985 * Count the total number of pages used by ram blocks not including any
986 * gaps due to alignment or unplugs.
988 migration_dirty_pages
= ram_bytes_total() >> TARGET_PAGE_BITS
;
990 memory_global_dirty_log_start();
991 migration_bitmap_sync();
992 qemu_mutex_unlock_ramlist();
993 qemu_mutex_unlock_iothread();
995 qemu_put_be64(f
, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE
);
997 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
998 qemu_put_byte(f
, strlen(block
->idstr
));
999 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, strlen(block
->idstr
));
1000 qemu_put_be64(f
, block
->used_length
);
1005 ram_control_before_iterate(f
, RAM_CONTROL_SETUP
);
1006 ram_control_after_iterate(f
, RAM_CONTROL_SETUP
);
1008 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
1013 static int ram_save_iterate(QEMUFile
*f
, void *opaque
)
1021 if (ram_list
.version
!= last_version
) {
1022 reset_ram_globals();
1025 /* Read version before ram_list.blocks */
1028 ram_control_before_iterate(f
, RAM_CONTROL_ROUND
);
1030 t0
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
1032 while ((ret
= qemu_file_rate_limit(f
)) == 0) {
1035 pages
= ram_find_and_save_block(f
, false, &bytes_transferred
);
1036 /* no more pages to sent */
1040 pages_sent
+= pages
;
1041 acct_info
.iterations
++;
1042 check_guest_throttling();
1043 /* we want to check in the 1st loop, just in case it was the 1st time
1044 and we had to sync the dirty bitmap.
1045 qemu_get_clock_ns() is a bit expensive, so we only check each some
1048 if ((i
& 63) == 0) {
1049 uint64_t t1
= (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - t0
) / 1000000;
1050 if (t1
> MAX_WAIT
) {
1051 DPRINTF("big wait: %" PRIu64
" milliseconds, %d iterations\n",
1061 * Must occur before EOS (or any QEMUFile operation)
1062 * because of RDMA protocol.
1064 ram_control_after_iterate(f
, RAM_CONTROL_ROUND
);
1066 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
1067 bytes_transferred
+= 8;
1069 ret
= qemu_file_get_error(f
);
1077 /* Called with iothread lock */
1078 static int ram_save_complete(QEMUFile
*f
, void *opaque
)
1082 migration_bitmap_sync();
1084 ram_control_before_iterate(f
, RAM_CONTROL_FINISH
);
1086 /* try transferring iterative blocks of memory */
1088 /* flush all remaining blocks regardless of rate limiting */
1092 pages
= ram_find_and_save_block(f
, true, &bytes_transferred
);
1093 /* no more blocks to sent */
1099 ram_control_after_iterate(f
, RAM_CONTROL_FINISH
);
1103 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
1108 static uint64_t ram_save_pending(QEMUFile
*f
, void *opaque
, uint64_t max_size
)
1110 uint64_t remaining_size
;
1112 remaining_size
= ram_save_remaining() * TARGET_PAGE_SIZE
;
1114 if (remaining_size
< max_size
) {
1115 qemu_mutex_lock_iothread();
1117 migration_bitmap_sync();
1119 qemu_mutex_unlock_iothread();
1120 remaining_size
= ram_save_remaining() * TARGET_PAGE_SIZE
;
1122 return remaining_size
;
1125 static int load_xbzrle(QEMUFile
*f
, ram_addr_t addr
, void *host
)
1127 unsigned int xh_len
;
1130 if (!xbzrle_decoded_buf
) {
1131 xbzrle_decoded_buf
= g_malloc(TARGET_PAGE_SIZE
);
1134 /* extract RLE header */
1135 xh_flags
= qemu_get_byte(f
);
1136 xh_len
= qemu_get_be16(f
);
1138 if (xh_flags
!= ENCODING_FLAG_XBZRLE
) {
1139 error_report("Failed to load XBZRLE page - wrong compression!");
1143 if (xh_len
> TARGET_PAGE_SIZE
) {
1144 error_report("Failed to load XBZRLE page - len overflow!");
1147 /* load data and decode */
1148 qemu_get_buffer(f
, xbzrle_decoded_buf
, xh_len
);
1151 if (xbzrle_decode_buffer(xbzrle_decoded_buf
, xh_len
, host
,
1152 TARGET_PAGE_SIZE
) == -1) {
1153 error_report("Failed to load XBZRLE page - decode error!");
1160 /* Must be called from within a rcu critical section.
1161 * Returns a pointer from within the RCU-protected ram_list.
1163 static inline void *host_from_stream_offset(QEMUFile
*f
,
1167 static RAMBlock
*block
= NULL
;
1171 if (flags
& RAM_SAVE_FLAG_CONTINUE
) {
1172 if (!block
|| block
->max_length
<= offset
) {
1173 error_report("Ack, bad migration stream!");
1177 return memory_region_get_ram_ptr(block
->mr
) + offset
;
1180 len
= qemu_get_byte(f
);
1181 qemu_get_buffer(f
, (uint8_t *)id
, len
);
1184 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1185 if (!strncmp(id
, block
->idstr
, sizeof(id
)) &&
1186 block
->max_length
> offset
) {
1187 return memory_region_get_ram_ptr(block
->mr
) + offset
;
1191 error_report("Can't find block %s!", id
);
1196 * If a page (or a whole RDMA chunk) has been
1197 * determined to be zero, then zap it.
1199 void ram_handle_compressed(void *host
, uint8_t ch
, uint64_t size
)
1201 if (ch
!= 0 || !is_zero_range(host
, size
)) {
1202 memset(host
, ch
, size
);
1206 static int ram_load(QEMUFile
*f
, void *opaque
, int version_id
)
1208 int flags
= 0, ret
= 0;
1209 static uint64_t seq_iter
;
1213 if (version_id
!= 4) {
1217 /* This RCU critical section can be very long running.
1218 * When RCU reclaims in the code start to become numerous,
1219 * it will be necessary to reduce the granularity of this
1223 while (!ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
1224 ram_addr_t addr
, total_ram_bytes
;
1228 addr
= qemu_get_be64(f
);
1229 flags
= addr
& ~TARGET_PAGE_MASK
;
1230 addr
&= TARGET_PAGE_MASK
;
1232 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
1233 case RAM_SAVE_FLAG_MEM_SIZE
:
1234 /* Synchronize RAM block list */
1235 total_ram_bytes
= addr
;
1236 while (!ret
&& total_ram_bytes
) {
1242 len
= qemu_get_byte(f
);
1243 qemu_get_buffer(f
, (uint8_t *)id
, len
);
1245 length
= qemu_get_be64(f
);
1247 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1248 if (!strncmp(id
, block
->idstr
, sizeof(id
))) {
1249 if (length
!= block
->used_length
) {
1250 Error
*local_err
= NULL
;
1252 ret
= qemu_ram_resize(block
->offset
, length
, &local_err
);
1254 error_report_err(local_err
);
1262 error_report("Unknown ramblock \"%s\", cannot "
1263 "accept migration", id
);
1267 total_ram_bytes
-= length
;
1270 case RAM_SAVE_FLAG_COMPRESS
:
1271 host
= host_from_stream_offset(f
, addr
, flags
);
1273 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
1277 ch
= qemu_get_byte(f
);
1278 ram_handle_compressed(host
, ch
, TARGET_PAGE_SIZE
);
1280 case RAM_SAVE_FLAG_PAGE
:
1281 host
= host_from_stream_offset(f
, addr
, flags
);
1283 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
1287 qemu_get_buffer(f
, host
, TARGET_PAGE_SIZE
);
1289 case RAM_SAVE_FLAG_XBZRLE
:
1290 host
= host_from_stream_offset(f
, addr
, flags
);
1292 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
1296 if (load_xbzrle(f
, addr
, host
) < 0) {
1297 error_report("Failed to decompress XBZRLE page at "
1298 RAM_ADDR_FMT
, addr
);
1303 case RAM_SAVE_FLAG_EOS
:
1307 if (flags
& RAM_SAVE_FLAG_HOOK
) {
1308 ram_control_load_hook(f
, flags
);
1310 error_report("Unknown combination of migration flags: %#x",
1316 ret
= qemu_file_get_error(f
);
1321 DPRINTF("Completed load of VM with exit code %d seq iteration "
1322 "%" PRIu64
"\n", ret
, seq_iter
);
1326 static SaveVMHandlers savevm_ram_handlers
= {
1327 .save_live_setup
= ram_save_setup
,
1328 .save_live_iterate
= ram_save_iterate
,
1329 .save_live_complete
= ram_save_complete
,
1330 .save_live_pending
= ram_save_pending
,
1331 .load_state
= ram_load
,
1332 .cancel
= ram_migration_cancel
,
1335 void ram_mig_init(void)
1337 qemu_mutex_init(&XBZRLE
.lock
);
1338 register_savevm_live(NULL
, "ram", 0, 4, &savevm_ram_handlers
, NULL
);
1347 int (*init_isa
) (ISABus
*bus
);
1348 int (*init_pci
) (PCIBus
*bus
);
1352 static struct soundhw soundhw
[9];
1353 static int soundhw_count
;
1355 void isa_register_soundhw(const char *name
, const char *descr
,
1356 int (*init_isa
)(ISABus
*bus
))
1358 assert(soundhw_count
< ARRAY_SIZE(soundhw
) - 1);
1359 soundhw
[soundhw_count
].name
= name
;
1360 soundhw
[soundhw_count
].descr
= descr
;
1361 soundhw
[soundhw_count
].isa
= 1;
1362 soundhw
[soundhw_count
].init
.init_isa
= init_isa
;
1366 void pci_register_soundhw(const char *name
, const char *descr
,
1367 int (*init_pci
)(PCIBus
*bus
))
1369 assert(soundhw_count
< ARRAY_SIZE(soundhw
) - 1);
1370 soundhw
[soundhw_count
].name
= name
;
1371 soundhw
[soundhw_count
].descr
= descr
;
1372 soundhw
[soundhw_count
].isa
= 0;
1373 soundhw
[soundhw_count
].init
.init_pci
= init_pci
;
1377 void select_soundhw(const char *optarg
)
1381 if (is_help_option(optarg
)) {
1384 if (soundhw_count
) {
1385 printf("Valid sound card names (comma separated):\n");
1386 for (c
= soundhw
; c
->name
; ++c
) {
1387 printf ("%-11s %s\n", c
->name
, c
->descr
);
1389 printf("\n-soundhw all will enable all of the above\n");
1391 printf("Machine has no user-selectable audio hardware "
1392 "(it may or may not have always-present audio hardware).\n");
1394 exit(!is_help_option(optarg
));
1402 if (!strcmp(optarg
, "all")) {
1403 for (c
= soundhw
; c
->name
; ++c
) {
1412 l
= !e
? strlen(p
) : (size_t) (e
- p
);
1414 for (c
= soundhw
; c
->name
; ++c
) {
1415 if (!strncmp(c
->name
, p
, l
) && !c
->name
[l
]) {
1423 error_report("Unknown sound card name (too big to show)");
1426 error_report("Unknown sound card name `%.*s'",
1431 p
+= l
+ (e
!= NULL
);
1435 goto show_valid_cards
;
1440 void audio_init(void)
1443 ISABus
*isa_bus
= (ISABus
*) object_resolve_path_type("", TYPE_ISA_BUS
, NULL
);
1444 PCIBus
*pci_bus
= (PCIBus
*) object_resolve_path_type("", TYPE_PCI_BUS
, NULL
);
1446 for (c
= soundhw
; c
->name
; ++c
) {
1450 error_report("ISA bus not available for %s", c
->name
);
1453 c
->init
.init_isa(isa_bus
);
1456 error_report("PCI bus not available for %s", c
->name
);
1459 c
->init
.init_pci(pci_bus
);
1465 int qemu_uuid_parse(const char *str
, uint8_t *uuid
)
1469 if (strlen(str
) != 36) {
1473 ret
= sscanf(str
, UUID_FMT
, &uuid
[0], &uuid
[1], &uuid
[2], &uuid
[3],
1474 &uuid
[4], &uuid
[5], &uuid
[6], &uuid
[7], &uuid
[8], &uuid
[9],
1475 &uuid
[10], &uuid
[11], &uuid
[12], &uuid
[13], &uuid
[14],
1484 void do_acpitable_option(const QemuOpts
*opts
)
1489 acpi_table_add(opts
, &err
);
1491 error_report("Wrong acpi table provided: %s",
1492 error_get_pretty(err
));
1499 void do_smbios_option(QemuOpts
*opts
)
1502 smbios_entry_add(opts
);
1506 void cpudef_init(void)
1508 #if defined(cpudef_setup)
1509 cpudef_setup(); /* parse cpu definitions in target config file */
1513 int kvm_available(void)
1522 int xen_available(void)
1532 TargetInfo
*qmp_query_target(Error
**errp
)
1534 TargetInfo
*info
= g_malloc0(sizeof(*info
));
1536 info
->arch
= g_strdup(TARGET_NAME
);
1541 /* Stub function that's gets run on the vcpu when its brought out of the
1542 VM to run inside qemu via async_run_on_cpu()*/
1543 static void mig_sleep_cpu(void *opq
)
1545 qemu_mutex_unlock_iothread();
1547 qemu_mutex_lock_iothread();
1550 /* To reduce the dirty rate explicitly disallow the VCPUs from spending
1551 much time in the VM. The migration thread will try to catchup.
1552 Workload will experience a performance drop.
1554 static void mig_throttle_guest_down(void)
1558 qemu_mutex_lock_iothread();
1560 async_run_on_cpu(cpu
, mig_sleep_cpu
, NULL
);
1562 qemu_mutex_unlock_iothread();
1565 static void check_guest_throttling(void)
1570 if (!mig_throttle_on
) {
1575 t0
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
1579 t1
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
1581 /* If it has been more than 40 ms since the last time the guest
1582 * was throttled then do it again.
1584 if (40 < (t1
-t0
)/1000000) {
1585 mig_throttle_guest_down();