4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 #include <sys/types.h>
33 #include "monitor/monitor.h"
34 #include "sysemu/sysemu.h"
35 #include "qemu/bitops.h"
36 #include "qemu/bitmap.h"
37 #include "sysemu/arch_init.h"
38 #include "audio/audio.h"
39 #include "hw/i386/pc.h"
40 #include "hw/pci/pci.h"
41 #include "hw/audio/audio.h"
42 #include "sysemu/kvm.h"
43 #include "migration/migration.h"
44 #include "hw/i386/smbios.h"
45 #include "exec/address-spaces.h"
46 #include "hw/audio/pcspk.h"
47 #include "migration/page_cache.h"
48 #include "qemu/config-file.h"
49 #include "qemu/error-report.h"
50 #include "qmp-commands.h"
52 #include "exec/cpu-all.h"
53 #include "exec/ram_addr.h"
54 #include "hw/acpi/acpi.h"
55 #include "qemu/host-utils.h"
56 #include "qemu/rcu_queue.h"
58 #ifdef DEBUG_ARCH_INIT
59 #define DPRINTF(fmt, ...) \
60 do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
62 #define DPRINTF(fmt, ...) \
67 int graphic_width
= 1024;
68 int graphic_height
= 768;
69 int graphic_depth
= 8;
71 int graphic_width
= 800;
72 int graphic_height
= 600;
73 int graphic_depth
= 32;
77 #if defined(TARGET_ALPHA)
78 #define QEMU_ARCH QEMU_ARCH_ALPHA
79 #elif defined(TARGET_ARM)
80 #define QEMU_ARCH QEMU_ARCH_ARM
81 #elif defined(TARGET_CRIS)
82 #define QEMU_ARCH QEMU_ARCH_CRIS
83 #elif defined(TARGET_I386)
84 #define QEMU_ARCH QEMU_ARCH_I386
85 #elif defined(TARGET_M68K)
86 #define QEMU_ARCH QEMU_ARCH_M68K
87 #elif defined(TARGET_LM32)
88 #define QEMU_ARCH QEMU_ARCH_LM32
89 #elif defined(TARGET_MICROBLAZE)
90 #define QEMU_ARCH QEMU_ARCH_MICROBLAZE
91 #elif defined(TARGET_MIPS)
92 #define QEMU_ARCH QEMU_ARCH_MIPS
93 #elif defined(TARGET_MOXIE)
94 #define QEMU_ARCH QEMU_ARCH_MOXIE
95 #elif defined(TARGET_OPENRISC)
96 #define QEMU_ARCH QEMU_ARCH_OPENRISC
97 #elif defined(TARGET_PPC)
98 #define QEMU_ARCH QEMU_ARCH_PPC
99 #elif defined(TARGET_S390X)
100 #define QEMU_ARCH QEMU_ARCH_S390X
101 #elif defined(TARGET_SH4)
102 #define QEMU_ARCH QEMU_ARCH_SH4
103 #elif defined(TARGET_SPARC)
104 #define QEMU_ARCH QEMU_ARCH_SPARC
105 #elif defined(TARGET_XTENSA)
106 #define QEMU_ARCH QEMU_ARCH_XTENSA
107 #elif defined(TARGET_UNICORE32)
108 #define QEMU_ARCH QEMU_ARCH_UNICORE32
109 #elif defined(TARGET_TRICORE)
110 #define QEMU_ARCH QEMU_ARCH_TRICORE
113 const uint32_t arch_type
= QEMU_ARCH
;
114 static bool mig_throttle_on
;
115 static int dirty_rate_high_cnt
;
116 static void check_guest_throttling(void);
118 static uint64_t bitmap_sync_count
;
120 /***********************************************************/
121 /* ram save/restore */
123 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
124 #define RAM_SAVE_FLAG_COMPRESS 0x02
125 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
126 #define RAM_SAVE_FLAG_PAGE 0x08
127 #define RAM_SAVE_FLAG_EOS 0x10
128 #define RAM_SAVE_FLAG_CONTINUE 0x20
129 #define RAM_SAVE_FLAG_XBZRLE 0x40
130 /* 0x80 is reserved in migration.h start with 0x100 next */
131 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
133 static struct defconfig_file
{
134 const char *filename
;
135 /* Indicates it is an user config file (disabled by -no-user-config) */
137 } default_config_files
[] = {
138 { CONFIG_QEMU_CONFDIR
"/qemu.conf", true },
139 { CONFIG_QEMU_CONFDIR
"/target-" TARGET_NAME
".conf", true },
140 { NULL
}, /* end of list */
143 static const uint8_t ZERO_TARGET_PAGE
[TARGET_PAGE_SIZE
];
145 int qemu_read_default_config_files(bool userconfig
)
148 struct defconfig_file
*f
;
150 for (f
= default_config_files
; f
->filename
; f
++) {
151 if (!userconfig
&& f
->userconfig
) {
154 ret
= qemu_read_config_file(f
->filename
);
155 if (ret
< 0 && ret
!= -ENOENT
) {
163 static inline bool is_zero_range(uint8_t *p
, uint64_t size
)
165 return buffer_find_nonzero_offset(p
, size
) == size
;
168 /* struct contains XBZRLE cache and a static page
169 used by the compression */
171 /* buffer used for XBZRLE encoding */
172 uint8_t *encoded_buf
;
173 /* buffer for storing page content */
174 uint8_t *current_buf
;
175 /* Cache for XBZRLE, Protected by lock. */
180 /* buffer used for XBZRLE decoding */
181 static uint8_t *xbzrle_decoded_buf
;
183 static void XBZRLE_cache_lock(void)
185 if (migrate_use_xbzrle())
186 qemu_mutex_lock(&XBZRLE
.lock
);
189 static void XBZRLE_cache_unlock(void)
191 if (migrate_use_xbzrle())
192 qemu_mutex_unlock(&XBZRLE
.lock
);
196 * called from qmp_migrate_set_cache_size in main thread, possibly while
197 * a migration is in progress.
198 * A running migration maybe using the cache and might finish during this
199 * call, hence changes to the cache are protected by XBZRLE.lock().
201 int64_t xbzrle_cache_resize(int64_t new_size
)
203 PageCache
*new_cache
;
206 if (new_size
< TARGET_PAGE_SIZE
) {
212 if (XBZRLE
.cache
!= NULL
) {
213 if (pow2floor(new_size
) == migrate_xbzrle_cache_size()) {
216 new_cache
= cache_init(new_size
/ TARGET_PAGE_SIZE
,
219 error_report("Error creating cache");
224 cache_fini(XBZRLE
.cache
);
225 XBZRLE
.cache
= new_cache
;
229 ret
= pow2floor(new_size
);
231 XBZRLE_cache_unlock();
235 /* accounting for migration statistics */
236 typedef struct AccountingInfo
{
238 uint64_t skipped_pages
;
241 uint64_t xbzrle_bytes
;
242 uint64_t xbzrle_pages
;
243 uint64_t xbzrle_cache_miss
;
244 double xbzrle_cache_miss_rate
;
245 uint64_t xbzrle_overflows
;
248 static AccountingInfo acct_info
;
250 static void acct_clear(void)
252 memset(&acct_info
, 0, sizeof(acct_info
));
255 uint64_t dup_mig_bytes_transferred(void)
257 return acct_info
.dup_pages
* TARGET_PAGE_SIZE
;
260 uint64_t dup_mig_pages_transferred(void)
262 return acct_info
.dup_pages
;
265 uint64_t skipped_mig_bytes_transferred(void)
267 return acct_info
.skipped_pages
* TARGET_PAGE_SIZE
;
270 uint64_t skipped_mig_pages_transferred(void)
272 return acct_info
.skipped_pages
;
275 uint64_t norm_mig_bytes_transferred(void)
277 return acct_info
.norm_pages
* TARGET_PAGE_SIZE
;
280 uint64_t norm_mig_pages_transferred(void)
282 return acct_info
.norm_pages
;
285 uint64_t xbzrle_mig_bytes_transferred(void)
287 return acct_info
.xbzrle_bytes
;
290 uint64_t xbzrle_mig_pages_transferred(void)
292 return acct_info
.xbzrle_pages
;
295 uint64_t xbzrle_mig_pages_cache_miss(void)
297 return acct_info
.xbzrle_cache_miss
;
300 double xbzrle_mig_cache_miss_rate(void)
302 return acct_info
.xbzrle_cache_miss_rate
;
305 uint64_t xbzrle_mig_pages_overflow(void)
307 return acct_info
.xbzrle_overflows
;
310 /* This is the last block that we have visited serching for dirty pages
312 static RAMBlock
*last_seen_block
;
313 /* This is the last block from where we have sent data */
314 static RAMBlock
*last_sent_block
;
315 static ram_addr_t last_offset
;
316 static unsigned long *migration_bitmap
;
317 static uint64_t migration_dirty_pages
;
318 static uint32_t last_version
;
319 static bool ram_bulk_stage
;
321 struct CompressParam
{
330 typedef struct CompressParam CompressParam
;
332 struct DecompressParam
{
340 typedef struct DecompressParam DecompressParam
;
342 static CompressParam
*comp_param
;
343 static QemuThread
*compress_threads
;
344 /* comp_done_cond is used to wake up the migration thread when
345 * one of the compression threads has finished the compression.
346 * comp_done_lock is used to co-work with comp_done_cond.
348 static QemuMutex
*comp_done_lock
;
349 static QemuCond
*comp_done_cond
;
350 /* The empty QEMUFileOps will be used by file in CompressParam */
351 static const QEMUFileOps empty_ops
= { };
352 static bool quit_comp_thread
;
353 static bool quit_decomp_thread
;
354 static DecompressParam
*decomp_param
;
355 static QemuThread
*decompress_threads
;
356 static uint8_t *compressed_data_buf
;
358 static int do_compress_ram_page(CompressParam
*param
);
360 static void *do_data_compress(void *opaque
)
362 CompressParam
*param
= opaque
;
364 while (!quit_comp_thread
) {
365 qemu_mutex_lock(¶m
->mutex
);
366 /* Re-check the quit_comp_thread in case of
367 * terminate_compression_threads is called just before
368 * qemu_mutex_lock(¶m->mutex) and after
369 * while(!quit_comp_thread), re-check it here can make
370 * sure the compression thread terminate as expected.
372 while (!param
->start
&& !quit_comp_thread
) {
373 qemu_cond_wait(¶m
->cond
, ¶m
->mutex
);
375 if (!quit_comp_thread
) {
376 do_compress_ram_page(param
);
378 param
->start
= false;
379 qemu_mutex_unlock(¶m
->mutex
);
381 qemu_mutex_lock(comp_done_lock
);
383 qemu_cond_signal(comp_done_cond
);
384 qemu_mutex_unlock(comp_done_lock
);
390 static inline void terminate_compression_threads(void)
392 int idx
, thread_count
;
394 thread_count
= migrate_compress_threads();
395 quit_comp_thread
= true;
396 for (idx
= 0; idx
< thread_count
; idx
++) {
397 qemu_mutex_lock(&comp_param
[idx
].mutex
);
398 qemu_cond_signal(&comp_param
[idx
].cond
);
399 qemu_mutex_unlock(&comp_param
[idx
].mutex
);
403 void migrate_compress_threads_join(void)
407 if (!migrate_use_compression()) {
410 terminate_compression_threads();
411 thread_count
= migrate_compress_threads();
412 for (i
= 0; i
< thread_count
; i
++) {
413 qemu_thread_join(compress_threads
+ i
);
414 qemu_fclose(comp_param
[i
].file
);
415 qemu_mutex_destroy(&comp_param
[i
].mutex
);
416 qemu_cond_destroy(&comp_param
[i
].cond
);
418 qemu_mutex_destroy(comp_done_lock
);
419 qemu_cond_destroy(comp_done_cond
);
420 g_free(compress_threads
);
422 g_free(comp_done_cond
);
423 g_free(comp_done_lock
);
424 compress_threads
= NULL
;
426 comp_done_cond
= NULL
;
427 comp_done_lock
= NULL
;
430 void migrate_compress_threads_create(void)
434 if (!migrate_use_compression()) {
437 quit_comp_thread
= false;
438 thread_count
= migrate_compress_threads();
439 compress_threads
= g_new0(QemuThread
, thread_count
);
440 comp_param
= g_new0(CompressParam
, thread_count
);
441 comp_done_cond
= g_new0(QemuCond
, 1);
442 comp_done_lock
= g_new0(QemuMutex
, 1);
443 qemu_cond_init(comp_done_cond
);
444 qemu_mutex_init(comp_done_lock
);
445 for (i
= 0; i
< thread_count
; i
++) {
446 /* com_param[i].file is just used as a dummy buffer to save data, set
449 comp_param
[i
].file
= qemu_fopen_ops(NULL
, &empty_ops
);
450 comp_param
[i
].done
= true;
451 qemu_mutex_init(&comp_param
[i
].mutex
);
452 qemu_cond_init(&comp_param
[i
].cond
);
453 qemu_thread_create(compress_threads
+ i
, "compress",
454 do_data_compress
, comp_param
+ i
,
455 QEMU_THREAD_JOINABLE
);
460 * save_page_header: Write page header to wire
462 * If this is the 1st block, it also writes the block identification
464 * Returns: Number of bytes written
466 * @f: QEMUFile where to send the data
467 * @block: block that contains the page we want to send
468 * @offset: offset inside the block for the page
469 * in the lower bits, it contains flags
471 static size_t save_page_header(QEMUFile
*f
, RAMBlock
*block
, ram_addr_t offset
)
475 qemu_put_be64(f
, offset
);
478 if (!(offset
& RAM_SAVE_FLAG_CONTINUE
)) {
479 qemu_put_byte(f
, strlen(block
->idstr
));
480 qemu_put_buffer(f
, (uint8_t *)block
->idstr
,
481 strlen(block
->idstr
));
482 size
+= 1 + strlen(block
->idstr
);
487 /* Update the xbzrle cache to reflect a page that's been sent as all 0.
488 * The important thing is that a stale (not-yet-0'd) page be replaced
490 * As a bonus, if the page wasn't in the cache it gets added so that
491 * when a small write is made into the 0'd page it gets XBZRLE sent
493 static void xbzrle_cache_zero_page(ram_addr_t current_addr
)
495 if (ram_bulk_stage
|| !migrate_use_xbzrle()) {
499 /* We don't care if this fails to allocate a new cache page
500 * as long as it updated an old one */
501 cache_insert(XBZRLE
.cache
, current_addr
, ZERO_TARGET_PAGE
,
505 #define ENCODING_FLAG_XBZRLE 0x1
508 * save_xbzrle_page: compress and send current page
510 * Returns: 1 means that we wrote the page
511 * 0 means that page is identical to the one already sent
512 * -1 means that xbzrle would be longer than normal
514 * @f: QEMUFile where to send the data
517 * @block: block that contains the page we want to send
518 * @offset: offset inside the block for the page
519 * @last_stage: if we are at the completion stage
520 * @bytes_transferred: increase it with the number of transferred bytes
522 static int save_xbzrle_page(QEMUFile
*f
, uint8_t **current_data
,
523 ram_addr_t current_addr
, RAMBlock
*block
,
524 ram_addr_t offset
, bool last_stage
,
525 uint64_t *bytes_transferred
)
527 int encoded_len
= 0, bytes_xbzrle
;
528 uint8_t *prev_cached_page
;
530 if (!cache_is_cached(XBZRLE
.cache
, current_addr
, bitmap_sync_count
)) {
531 acct_info
.xbzrle_cache_miss
++;
533 if (cache_insert(XBZRLE
.cache
, current_addr
, *current_data
,
534 bitmap_sync_count
) == -1) {
537 /* update *current_data when the page has been
538 inserted into cache */
539 *current_data
= get_cached_data(XBZRLE
.cache
, current_addr
);
545 prev_cached_page
= get_cached_data(XBZRLE
.cache
, current_addr
);
547 /* save current buffer into memory */
548 memcpy(XBZRLE
.current_buf
, *current_data
, TARGET_PAGE_SIZE
);
550 /* XBZRLE encoding (if there is no overflow) */
551 encoded_len
= xbzrle_encode_buffer(prev_cached_page
, XBZRLE
.current_buf
,
552 TARGET_PAGE_SIZE
, XBZRLE
.encoded_buf
,
554 if (encoded_len
== 0) {
555 DPRINTF("Skipping unmodified page\n");
557 } else if (encoded_len
== -1) {
558 DPRINTF("Overflow\n");
559 acct_info
.xbzrle_overflows
++;
560 /* update data in the cache */
562 memcpy(prev_cached_page
, *current_data
, TARGET_PAGE_SIZE
);
563 *current_data
= prev_cached_page
;
568 /* we need to update the data in the cache, in order to get the same data */
570 memcpy(prev_cached_page
, XBZRLE
.current_buf
, TARGET_PAGE_SIZE
);
573 /* Send XBZRLE based compressed page */
574 bytes_xbzrle
= save_page_header(f
, block
, offset
| RAM_SAVE_FLAG_XBZRLE
);
575 qemu_put_byte(f
, ENCODING_FLAG_XBZRLE
);
576 qemu_put_be16(f
, encoded_len
);
577 qemu_put_buffer(f
, XBZRLE
.encoded_buf
, encoded_len
);
578 bytes_xbzrle
+= encoded_len
+ 1 + 2;
579 acct_info
.xbzrle_pages
++;
580 acct_info
.xbzrle_bytes
+= bytes_xbzrle
;
581 *bytes_transferred
+= bytes_xbzrle
;
587 ram_addr_t
migration_bitmap_find_and_reset_dirty(MemoryRegion
*mr
,
590 unsigned long base
= mr
->ram_addr
>> TARGET_PAGE_BITS
;
591 unsigned long nr
= base
+ (start
>> TARGET_PAGE_BITS
);
592 uint64_t mr_size
= TARGET_PAGE_ALIGN(memory_region_size(mr
));
593 unsigned long size
= base
+ (mr_size
>> TARGET_PAGE_BITS
);
597 if (ram_bulk_stage
&& nr
> base
) {
600 next
= find_next_bit(migration_bitmap
, size
, nr
);
604 clear_bit(next
, migration_bitmap
);
605 migration_dirty_pages
--;
607 return (next
- base
) << TARGET_PAGE_BITS
;
610 static inline bool migration_bitmap_set_dirty(ram_addr_t addr
)
613 int nr
= addr
>> TARGET_PAGE_BITS
;
615 ret
= test_and_set_bit(nr
, migration_bitmap
);
618 migration_dirty_pages
++;
623 static void migration_bitmap_sync_range(ram_addr_t start
, ram_addr_t length
)
626 unsigned long page
= BIT_WORD(start
>> TARGET_PAGE_BITS
);
628 /* start address is aligned at the start of a word? */
629 if (((page
* BITS_PER_LONG
) << TARGET_PAGE_BITS
) == start
) {
631 int nr
= BITS_TO_LONGS(length
>> TARGET_PAGE_BITS
);
632 unsigned long *src
= ram_list
.dirty_memory
[DIRTY_MEMORY_MIGRATION
];
634 for (k
= page
; k
< page
+ nr
; k
++) {
636 unsigned long new_dirty
;
637 new_dirty
= ~migration_bitmap
[k
];
638 migration_bitmap
[k
] |= src
[k
];
640 migration_dirty_pages
+= ctpopl(new_dirty
);
645 for (addr
= 0; addr
< length
; addr
+= TARGET_PAGE_SIZE
) {
646 if (cpu_physical_memory_get_dirty(start
+ addr
,
648 DIRTY_MEMORY_MIGRATION
)) {
649 cpu_physical_memory_reset_dirty(start
+ addr
,
651 DIRTY_MEMORY_MIGRATION
);
652 migration_bitmap_set_dirty(start
+ addr
);
659 /* Fix me: there are too many global variables used in migration process. */
660 static int64_t start_time
;
661 static int64_t bytes_xfer_prev
;
662 static int64_t num_dirty_pages_period
;
664 static void migration_bitmap_sync_init(void)
668 num_dirty_pages_period
= 0;
671 /* Called with iothread lock held, to protect ram_list.dirty_memory[] */
672 static void migration_bitmap_sync(void)
675 uint64_t num_dirty_pages_init
= migration_dirty_pages
;
676 MigrationState
*s
= migrate_get_current();
678 int64_t bytes_xfer_now
;
679 static uint64_t xbzrle_cache_miss_prev
;
680 static uint64_t iterations_prev
;
684 if (!bytes_xfer_prev
) {
685 bytes_xfer_prev
= ram_bytes_transferred();
689 start_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
692 trace_migration_bitmap_sync_start();
693 address_space_sync_dirty_bitmap(&address_space_memory
);
696 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
697 migration_bitmap_sync_range(block
->mr
->ram_addr
, block
->used_length
);
701 trace_migration_bitmap_sync_end(migration_dirty_pages
702 - num_dirty_pages_init
);
703 num_dirty_pages_period
+= migration_dirty_pages
- num_dirty_pages_init
;
704 end_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
706 /* more than 1 second = 1000 millisecons */
707 if (end_time
> start_time
+ 1000) {
708 if (migrate_auto_converge()) {
709 /* The following detection logic can be refined later. For now:
710 Check to see if the dirtied bytes is 50% more than the approx.
711 amount of bytes that just got transferred since the last time we
712 were in this routine. If that happens >N times (for now N==4)
713 we turn on the throttle down logic */
714 bytes_xfer_now
= ram_bytes_transferred();
715 if (s
->dirty_pages_rate
&&
716 (num_dirty_pages_period
* TARGET_PAGE_SIZE
>
717 (bytes_xfer_now
- bytes_xfer_prev
)/2) &&
718 (dirty_rate_high_cnt
++ > 4)) {
719 trace_migration_throttle();
720 mig_throttle_on
= true;
721 dirty_rate_high_cnt
= 0;
723 bytes_xfer_prev
= bytes_xfer_now
;
725 mig_throttle_on
= false;
727 if (migrate_use_xbzrle()) {
728 if (iterations_prev
!= 0) {
729 acct_info
.xbzrle_cache_miss_rate
=
730 (double)(acct_info
.xbzrle_cache_miss
-
731 xbzrle_cache_miss_prev
) /
732 (acct_info
.iterations
- iterations_prev
);
734 iterations_prev
= acct_info
.iterations
;
735 xbzrle_cache_miss_prev
= acct_info
.xbzrle_cache_miss
;
737 s
->dirty_pages_rate
= num_dirty_pages_period
* 1000
738 / (end_time
- start_time
);
739 s
->dirty_bytes_rate
= s
->dirty_pages_rate
* TARGET_PAGE_SIZE
;
740 start_time
= end_time
;
741 num_dirty_pages_period
= 0;
742 s
->dirty_sync_count
= bitmap_sync_count
;
747 * save_zero_page: Send the zero page to the stream
749 * Returns: Number of pages written.
751 * @f: QEMUFile where to send the data
752 * @block: block that contains the page we want to send
753 * @offset: offset inside the block for the page
754 * @p: pointer to the page
755 * @bytes_transferred: increase it with the number of transferred bytes
757 static int save_zero_page(QEMUFile
*f
, RAMBlock
*block
, ram_addr_t offset
,
758 uint8_t *p
, uint64_t *bytes_transferred
)
762 if (is_zero_range(p
, TARGET_PAGE_SIZE
)) {
763 acct_info
.dup_pages
++;
764 *bytes_transferred
+= save_page_header(f
, block
,
765 offset
| RAM_SAVE_FLAG_COMPRESS
);
767 *bytes_transferred
+= 1;
775 * ram_save_page: Send the given page to the stream
777 * Returns: Number of pages written.
779 * @f: QEMUFile where to send the data
780 * @block: block that contains the page we want to send
781 * @offset: offset inside the block for the page
782 * @last_stage: if we are at the completion stage
783 * @bytes_transferred: increase it with the number of transferred bytes
785 static int ram_save_page(QEMUFile
*f
, RAMBlock
* block
, ram_addr_t offset
,
786 bool last_stage
, uint64_t *bytes_transferred
)
790 ram_addr_t current_addr
;
791 MemoryRegion
*mr
= block
->mr
;
794 bool send_async
= true;
796 p
= memory_region_get_ram_ptr(mr
) + offset
;
798 /* In doubt sent page as normal */
800 ret
= ram_control_save_page(f
, block
->offset
,
801 offset
, TARGET_PAGE_SIZE
, &bytes_xmit
);
803 *bytes_transferred
+= bytes_xmit
;
809 current_addr
= block
->offset
+ offset
;
811 if (block
== last_sent_block
) {
812 offset
|= RAM_SAVE_FLAG_CONTINUE
;
814 if (ret
!= RAM_SAVE_CONTROL_NOT_SUPP
) {
815 if (ret
!= RAM_SAVE_CONTROL_DELAYED
) {
816 if (bytes_xmit
> 0) {
817 acct_info
.norm_pages
++;
818 } else if (bytes_xmit
== 0) {
819 acct_info
.dup_pages
++;
823 pages
= save_zero_page(f
, block
, offset
, p
, bytes_transferred
);
825 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
826 * page would be stale
828 xbzrle_cache_zero_page(current_addr
);
829 } else if (!ram_bulk_stage
&& migrate_use_xbzrle()) {
830 pages
= save_xbzrle_page(f
, &p
, current_addr
, block
,
831 offset
, last_stage
, bytes_transferred
);
833 /* Can't send this cached data async, since the cache page
834 * might get updated before it gets to the wire
841 /* XBZRLE overflow or normal page */
843 *bytes_transferred
+= save_page_header(f
, block
,
844 offset
| RAM_SAVE_FLAG_PAGE
);
846 qemu_put_buffer_async(f
, p
, TARGET_PAGE_SIZE
);
848 qemu_put_buffer(f
, p
, TARGET_PAGE_SIZE
);
850 *bytes_transferred
+= TARGET_PAGE_SIZE
;
852 acct_info
.norm_pages
++;
855 XBZRLE_cache_unlock();
860 static int do_compress_ram_page(CompressParam
*param
)
862 int bytes_sent
, blen
;
864 RAMBlock
*block
= param
->block
;
865 ram_addr_t offset
= param
->offset
;
867 p
= memory_region_get_ram_ptr(block
->mr
) + (offset
& TARGET_PAGE_MASK
);
869 bytes_sent
= save_page_header(param
->file
, block
, offset
|
870 RAM_SAVE_FLAG_COMPRESS_PAGE
);
871 blen
= qemu_put_compression_data(param
->file
, p
, TARGET_PAGE_SIZE
,
872 migrate_compress_level());
878 static inline void start_compression(CompressParam
*param
)
881 qemu_mutex_lock(¶m
->mutex
);
883 qemu_cond_signal(¶m
->cond
);
884 qemu_mutex_unlock(¶m
->mutex
);
888 static uint64_t bytes_transferred
;
890 static void flush_compressed_data(QEMUFile
*f
)
892 int idx
, len
, thread_count
;
894 if (!migrate_use_compression()) {
897 thread_count
= migrate_compress_threads();
898 for (idx
= 0; idx
< thread_count
; idx
++) {
899 if (!comp_param
[idx
].done
) {
900 qemu_mutex_lock(comp_done_lock
);
901 while (!comp_param
[idx
].done
&& !quit_comp_thread
) {
902 qemu_cond_wait(comp_done_cond
, comp_done_lock
);
904 qemu_mutex_unlock(comp_done_lock
);
906 if (!quit_comp_thread
) {
907 len
= qemu_put_qemu_file(f
, comp_param
[idx
].file
);
908 bytes_transferred
+= len
;
913 static inline void set_compress_params(CompressParam
*param
, RAMBlock
*block
,
916 param
->block
= block
;
917 param
->offset
= offset
;
920 static int compress_page_with_multi_thread(QEMUFile
*f
, RAMBlock
*block
,
922 uint64_t *bytes_transferred
)
924 int idx
, thread_count
, bytes_xmit
= -1, pages
= -1;
926 thread_count
= migrate_compress_threads();
927 qemu_mutex_lock(comp_done_lock
);
929 for (idx
= 0; idx
< thread_count
; idx
++) {
930 if (comp_param
[idx
].done
) {
931 bytes_xmit
= qemu_put_qemu_file(f
, comp_param
[idx
].file
);
932 set_compress_params(&comp_param
[idx
], block
, offset
);
933 start_compression(&comp_param
[idx
]);
935 acct_info
.norm_pages
++;
936 *bytes_transferred
+= bytes_xmit
;
943 qemu_cond_wait(comp_done_cond
, comp_done_lock
);
946 qemu_mutex_unlock(comp_done_lock
);
952 * ram_save_compressed_page: compress the given page and send it to the stream
954 * Returns: Number of pages written.
956 * @f: QEMUFile where to send the data
957 * @block: block that contains the page we want to send
958 * @offset: offset inside the block for the page
959 * @last_stage: if we are at the completion stage
960 * @bytes_transferred: increase it with the number of transferred bytes
962 static int ram_save_compressed_page(QEMUFile
*f
, RAMBlock
*block
,
963 ram_addr_t offset
, bool last_stage
,
964 uint64_t *bytes_transferred
)
968 MemoryRegion
*mr
= block
->mr
;
972 p
= memory_region_get_ram_ptr(mr
) + offset
;
975 ret
= ram_control_save_page(f
, block
->offset
,
976 offset
, TARGET_PAGE_SIZE
, &bytes_xmit
);
978 *bytes_transferred
+= bytes_xmit
;
981 if (block
== last_sent_block
) {
982 offset
|= RAM_SAVE_FLAG_CONTINUE
;
984 if (ret
!= RAM_SAVE_CONTROL_NOT_SUPP
) {
985 if (ret
!= RAM_SAVE_CONTROL_DELAYED
) {
986 if (bytes_xmit
> 0) {
987 acct_info
.norm_pages
++;
988 } else if (bytes_xmit
== 0) {
989 acct_info
.dup_pages
++;
993 /* When starting the process of a new block, the first page of
994 * the block should be sent out before other pages in the same
995 * block, and all the pages in last block should have been sent
996 * out, keeping this order is important, because the 'cont' flag
997 * is used to avoid resending the block name.
999 if (block
!= last_sent_block
) {
1000 flush_compressed_data(f
);
1001 pages
= save_zero_page(f
, block
, offset
, p
, bytes_transferred
);
1003 set_compress_params(&comp_param
[0], block
, offset
);
1004 /* Use the qemu thread to compress the data to make sure the
1005 * first page is sent out before other pages
1007 bytes_xmit
= do_compress_ram_page(&comp_param
[0]);
1008 acct_info
.norm_pages
++;
1009 qemu_put_qemu_file(f
, comp_param
[0].file
);
1010 *bytes_transferred
+= bytes_xmit
;
1014 pages
= save_zero_page(f
, block
, offset
, p
, bytes_transferred
);
1016 pages
= compress_page_with_multi_thread(f
, block
, offset
,
1026 * ram_find_and_save_block: Finds a dirty page and sends it to f
1028 * Called within an RCU critical section.
1030 * Returns: The number of pages written
1031 * 0 means no dirty pages
1033 * @f: QEMUFile where to send the data
1034 * @last_stage: if we are at the completion stage
1035 * @bytes_transferred: increase it with the number of transferred bytes
1038 static int ram_find_and_save_block(QEMUFile
*f
, bool last_stage
,
1039 uint64_t *bytes_transferred
)
1041 RAMBlock
*block
= last_seen_block
;
1042 ram_addr_t offset
= last_offset
;
1043 bool complete_round
= false;
1048 block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
1052 offset
= migration_bitmap_find_and_reset_dirty(mr
, offset
);
1053 if (complete_round
&& block
== last_seen_block
&&
1054 offset
>= last_offset
) {
1057 if (offset
>= block
->used_length
) {
1059 block
= QLIST_NEXT_RCU(block
, next
);
1061 block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
1062 complete_round
= true;
1063 ram_bulk_stage
= false;
1066 if (migrate_use_compression()) {
1067 pages
= ram_save_compressed_page(f
, block
, offset
, last_stage
,
1070 pages
= ram_save_page(f
, block
, offset
, last_stage
,
1074 /* if page is unmodified, continue to the next */
1076 last_sent_block
= block
;
1082 last_seen_block
= block
;
1083 last_offset
= offset
;
1088 void acct_update_position(QEMUFile
*f
, size_t size
, bool zero
)
1090 uint64_t pages
= size
/ TARGET_PAGE_SIZE
;
1092 acct_info
.dup_pages
+= pages
;
1094 acct_info
.norm_pages
+= pages
;
1095 bytes_transferred
+= size
;
1096 qemu_update_position(f
, size
);
1100 static ram_addr_t
ram_save_remaining(void)
1102 return migration_dirty_pages
;
1105 uint64_t ram_bytes_remaining(void)
1107 return ram_save_remaining() * TARGET_PAGE_SIZE
;
1110 uint64_t ram_bytes_transferred(void)
1112 return bytes_transferred
;
1115 uint64_t ram_bytes_total(void)
1121 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
)
1122 total
+= block
->used_length
;
1127 void free_xbzrle_decoded_buf(void)
1129 g_free(xbzrle_decoded_buf
);
1130 xbzrle_decoded_buf
= NULL
;
1133 static void migration_end(void)
1135 if (migration_bitmap
) {
1136 memory_global_dirty_log_stop();
1137 g_free(migration_bitmap
);
1138 migration_bitmap
= NULL
;
1141 XBZRLE_cache_lock();
1143 cache_fini(XBZRLE
.cache
);
1144 g_free(XBZRLE
.encoded_buf
);
1145 g_free(XBZRLE
.current_buf
);
1146 XBZRLE
.cache
= NULL
;
1147 XBZRLE
.encoded_buf
= NULL
;
1148 XBZRLE
.current_buf
= NULL
;
1150 XBZRLE_cache_unlock();
1153 static void ram_migration_cancel(void *opaque
)
1158 static void reset_ram_globals(void)
1160 last_seen_block
= NULL
;
1161 last_sent_block
= NULL
;
1163 last_version
= ram_list
.version
;
1164 ram_bulk_stage
= true;
1167 #define MAX_WAIT 50 /* ms, half buffered_file limit */
1170 /* Each of ram_save_setup, ram_save_iterate and ram_save_complete has
1171 * long-running RCU critical section. When rcu-reclaims in the code
1172 * start to become numerous it will be necessary to reduce the
1173 * granularity of these critical sections.
1176 static int ram_save_setup(QEMUFile
*f
, void *opaque
)
1179 int64_t ram_bitmap_pages
; /* Size of bitmap in pages, including gaps */
1181 mig_throttle_on
= false;
1182 dirty_rate_high_cnt
= 0;
1183 bitmap_sync_count
= 0;
1184 migration_bitmap_sync_init();
1186 if (migrate_use_xbzrle()) {
1187 XBZRLE_cache_lock();
1188 XBZRLE
.cache
= cache_init(migrate_xbzrle_cache_size() /
1191 if (!XBZRLE
.cache
) {
1192 XBZRLE_cache_unlock();
1193 error_report("Error creating cache");
1196 XBZRLE_cache_unlock();
1198 /* We prefer not to abort if there is no memory */
1199 XBZRLE
.encoded_buf
= g_try_malloc0(TARGET_PAGE_SIZE
);
1200 if (!XBZRLE
.encoded_buf
) {
1201 error_report("Error allocating encoded_buf");
1205 XBZRLE
.current_buf
= g_try_malloc(TARGET_PAGE_SIZE
);
1206 if (!XBZRLE
.current_buf
) {
1207 error_report("Error allocating current_buf");
1208 g_free(XBZRLE
.encoded_buf
);
1209 XBZRLE
.encoded_buf
= NULL
;
1216 /* iothread lock needed for ram_list.dirty_memory[] */
1217 qemu_mutex_lock_iothread();
1218 qemu_mutex_lock_ramlist();
1220 bytes_transferred
= 0;
1221 reset_ram_globals();
1223 ram_bitmap_pages
= last_ram_offset() >> TARGET_PAGE_BITS
;
1224 migration_bitmap
= bitmap_new(ram_bitmap_pages
);
1225 bitmap_set(migration_bitmap
, 0, ram_bitmap_pages
);
1228 * Count the total number of pages used by ram blocks not including any
1229 * gaps due to alignment or unplugs.
1231 migration_dirty_pages
= ram_bytes_total() >> TARGET_PAGE_BITS
;
1233 memory_global_dirty_log_start();
1234 migration_bitmap_sync();
1235 qemu_mutex_unlock_ramlist();
1236 qemu_mutex_unlock_iothread();
1238 qemu_put_be64(f
, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE
);
1240 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1241 qemu_put_byte(f
, strlen(block
->idstr
));
1242 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, strlen(block
->idstr
));
1243 qemu_put_be64(f
, block
->used_length
);
1248 ram_control_before_iterate(f
, RAM_CONTROL_SETUP
);
1249 ram_control_after_iterate(f
, RAM_CONTROL_SETUP
);
1251 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
1256 static int ram_save_iterate(QEMUFile
*f
, void *opaque
)
1264 if (ram_list
.version
!= last_version
) {
1265 reset_ram_globals();
1268 /* Read version before ram_list.blocks */
1271 ram_control_before_iterate(f
, RAM_CONTROL_ROUND
);
1273 t0
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
1275 while ((ret
= qemu_file_rate_limit(f
)) == 0) {
1278 pages
= ram_find_and_save_block(f
, false, &bytes_transferred
);
1279 /* no more pages to sent */
1283 pages_sent
+= pages
;
1284 acct_info
.iterations
++;
1285 check_guest_throttling();
1286 /* we want to check in the 1st loop, just in case it was the 1st time
1287 and we had to sync the dirty bitmap.
1288 qemu_get_clock_ns() is a bit expensive, so we only check each some
1291 if ((i
& 63) == 0) {
1292 uint64_t t1
= (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - t0
) / 1000000;
1293 if (t1
> MAX_WAIT
) {
1294 DPRINTF("big wait: %" PRIu64
" milliseconds, %d iterations\n",
1301 flush_compressed_data(f
);
1305 * Must occur before EOS (or any QEMUFile operation)
1306 * because of RDMA protocol.
1308 ram_control_after_iterate(f
, RAM_CONTROL_ROUND
);
1310 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
1311 bytes_transferred
+= 8;
1313 ret
= qemu_file_get_error(f
);
1321 /* Called with iothread lock */
1322 static int ram_save_complete(QEMUFile
*f
, void *opaque
)
1326 migration_bitmap_sync();
1328 ram_control_before_iterate(f
, RAM_CONTROL_FINISH
);
1330 /* try transferring iterative blocks of memory */
1332 /* flush all remaining blocks regardless of rate limiting */
1336 pages
= ram_find_and_save_block(f
, true, &bytes_transferred
);
1337 /* no more blocks to sent */
1343 flush_compressed_data(f
);
1344 ram_control_after_iterate(f
, RAM_CONTROL_FINISH
);
1348 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
1353 static uint64_t ram_save_pending(QEMUFile
*f
, void *opaque
, uint64_t max_size
)
1355 uint64_t remaining_size
;
1357 remaining_size
= ram_save_remaining() * TARGET_PAGE_SIZE
;
1359 if (remaining_size
< max_size
) {
1360 qemu_mutex_lock_iothread();
1362 migration_bitmap_sync();
1364 qemu_mutex_unlock_iothread();
1365 remaining_size
= ram_save_remaining() * TARGET_PAGE_SIZE
;
1367 return remaining_size
;
1370 static int load_xbzrle(QEMUFile
*f
, ram_addr_t addr
, void *host
)
1372 unsigned int xh_len
;
1375 if (!xbzrle_decoded_buf
) {
1376 xbzrle_decoded_buf
= g_malloc(TARGET_PAGE_SIZE
);
1379 /* extract RLE header */
1380 xh_flags
= qemu_get_byte(f
);
1381 xh_len
= qemu_get_be16(f
);
1383 if (xh_flags
!= ENCODING_FLAG_XBZRLE
) {
1384 error_report("Failed to load XBZRLE page - wrong compression!");
1388 if (xh_len
> TARGET_PAGE_SIZE
) {
1389 error_report("Failed to load XBZRLE page - len overflow!");
1392 /* load data and decode */
1393 qemu_get_buffer(f
, xbzrle_decoded_buf
, xh_len
);
1396 if (xbzrle_decode_buffer(xbzrle_decoded_buf
, xh_len
, host
,
1397 TARGET_PAGE_SIZE
) == -1) {
1398 error_report("Failed to load XBZRLE page - decode error!");
1405 /* Must be called from within a rcu critical section.
1406 * Returns a pointer from within the RCU-protected ram_list.
1408 static inline void *host_from_stream_offset(QEMUFile
*f
,
1412 static RAMBlock
*block
= NULL
;
1416 if (flags
& RAM_SAVE_FLAG_CONTINUE
) {
1417 if (!block
|| block
->max_length
<= offset
) {
1418 error_report("Ack, bad migration stream!");
1422 return memory_region_get_ram_ptr(block
->mr
) + offset
;
1425 len
= qemu_get_byte(f
);
1426 qemu_get_buffer(f
, (uint8_t *)id
, len
);
1429 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1430 if (!strncmp(id
, block
->idstr
, sizeof(id
)) &&
1431 block
->max_length
> offset
) {
1432 return memory_region_get_ram_ptr(block
->mr
) + offset
;
1436 error_report("Can't find block %s!", id
);
1441 * If a page (or a whole RDMA chunk) has been
1442 * determined to be zero, then zap it.
1444 void ram_handle_compressed(void *host
, uint8_t ch
, uint64_t size
)
1446 if (ch
!= 0 || !is_zero_range(host
, size
)) {
1447 memset(host
, ch
, size
);
1451 static void *do_data_decompress(void *opaque
)
1453 while (!quit_decomp_thread
) {
1460 void migrate_decompress_threads_create(void)
1462 int i
, thread_count
;
1464 thread_count
= migrate_decompress_threads();
1465 decompress_threads
= g_new0(QemuThread
, thread_count
);
1466 decomp_param
= g_new0(DecompressParam
, thread_count
);
1467 compressed_data_buf
= g_malloc0(compressBound(TARGET_PAGE_SIZE
));
1468 quit_decomp_thread
= false;
1469 for (i
= 0; i
< thread_count
; i
++) {
1470 qemu_mutex_init(&decomp_param
[i
].mutex
);
1471 qemu_cond_init(&decomp_param
[i
].cond
);
1472 decomp_param
[i
].compbuf
= g_malloc0(compressBound(TARGET_PAGE_SIZE
));
1473 qemu_thread_create(decompress_threads
+ i
, "decompress",
1474 do_data_decompress
, decomp_param
+ i
,
1475 QEMU_THREAD_JOINABLE
);
1479 void migrate_decompress_threads_join(void)
1481 int i
, thread_count
;
1483 quit_decomp_thread
= true;
1484 thread_count
= migrate_decompress_threads();
1485 for (i
= 0; i
< thread_count
; i
++) {
1486 qemu_thread_join(decompress_threads
+ i
);
1487 qemu_mutex_destroy(&decomp_param
[i
].mutex
);
1488 qemu_cond_destroy(&decomp_param
[i
].cond
);
1489 g_free(decomp_param
[i
].compbuf
);
1491 g_free(decompress_threads
);
1492 g_free(decomp_param
);
1493 g_free(compressed_data_buf
);
1494 decompress_threads
= NULL
;
1495 decomp_param
= NULL
;
1496 compressed_data_buf
= NULL
;
1499 static void decompress_data_with_multi_threads(uint8_t *compbuf
,
1500 void *host
, int len
)
1505 static int ram_load(QEMUFile
*f
, void *opaque
, int version_id
)
1507 int flags
= 0, ret
= 0;
1508 static uint64_t seq_iter
;
1513 if (version_id
!= 4) {
1517 /* This RCU critical section can be very long running.
1518 * When RCU reclaims in the code start to become numerous,
1519 * it will be necessary to reduce the granularity of this
1523 while (!ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
1524 ram_addr_t addr
, total_ram_bytes
;
1528 addr
= qemu_get_be64(f
);
1529 flags
= addr
& ~TARGET_PAGE_MASK
;
1530 addr
&= TARGET_PAGE_MASK
;
1532 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
1533 case RAM_SAVE_FLAG_MEM_SIZE
:
1534 /* Synchronize RAM block list */
1535 total_ram_bytes
= addr
;
1536 while (!ret
&& total_ram_bytes
) {
1542 len
= qemu_get_byte(f
);
1543 qemu_get_buffer(f
, (uint8_t *)id
, len
);
1545 length
= qemu_get_be64(f
);
1547 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1548 if (!strncmp(id
, block
->idstr
, sizeof(id
))) {
1549 if (length
!= block
->used_length
) {
1550 Error
*local_err
= NULL
;
1552 ret
= qemu_ram_resize(block
->offset
, length
, &local_err
);
1554 error_report_err(local_err
);
1562 error_report("Unknown ramblock \"%s\", cannot "
1563 "accept migration", id
);
1567 total_ram_bytes
-= length
;
1570 case RAM_SAVE_FLAG_COMPRESS
:
1571 host
= host_from_stream_offset(f
, addr
, flags
);
1573 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
1577 ch
= qemu_get_byte(f
);
1578 ram_handle_compressed(host
, ch
, TARGET_PAGE_SIZE
);
1580 case RAM_SAVE_FLAG_PAGE
:
1581 host
= host_from_stream_offset(f
, addr
, flags
);
1583 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
1587 qemu_get_buffer(f
, host
, TARGET_PAGE_SIZE
);
1589 case RAM_SAVE_FLAG_COMPRESS_PAGE
:
1590 host
= host_from_stream_offset(f
, addr
, flags
);
1592 error_report("Invalid RAM offset " RAM_ADDR_FMT
, addr
);
1597 len
= qemu_get_be32(f
);
1598 if (len
< 0 || len
> compressBound(TARGET_PAGE_SIZE
)) {
1599 error_report("Invalid compressed data length: %d", len
);
1603 qemu_get_buffer(f
, compressed_data_buf
, len
);
1604 decompress_data_with_multi_threads(compressed_data_buf
, host
, len
);
1606 case RAM_SAVE_FLAG_XBZRLE
:
1607 host
= host_from_stream_offset(f
, addr
, flags
);
1609 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
1613 if (load_xbzrle(f
, addr
, host
) < 0) {
1614 error_report("Failed to decompress XBZRLE page at "
1615 RAM_ADDR_FMT
, addr
);
1620 case RAM_SAVE_FLAG_EOS
:
1624 if (flags
& RAM_SAVE_FLAG_HOOK
) {
1625 ram_control_load_hook(f
, flags
);
1627 error_report("Unknown combination of migration flags: %#x",
1633 ret
= qemu_file_get_error(f
);
1638 DPRINTF("Completed load of VM with exit code %d seq iteration "
1639 "%" PRIu64
"\n", ret
, seq_iter
);
1643 static SaveVMHandlers savevm_ram_handlers
= {
1644 .save_live_setup
= ram_save_setup
,
1645 .save_live_iterate
= ram_save_iterate
,
1646 .save_live_complete
= ram_save_complete
,
1647 .save_live_pending
= ram_save_pending
,
1648 .load_state
= ram_load
,
1649 .cancel
= ram_migration_cancel
,
1652 void ram_mig_init(void)
1654 qemu_mutex_init(&XBZRLE
.lock
);
1655 register_savevm_live(NULL
, "ram", 0, 4, &savevm_ram_handlers
, NULL
);
1664 int (*init_isa
) (ISABus
*bus
);
1665 int (*init_pci
) (PCIBus
*bus
);
1669 static struct soundhw soundhw
[9];
1670 static int soundhw_count
;
1672 void isa_register_soundhw(const char *name
, const char *descr
,
1673 int (*init_isa
)(ISABus
*bus
))
1675 assert(soundhw_count
< ARRAY_SIZE(soundhw
) - 1);
1676 soundhw
[soundhw_count
].name
= name
;
1677 soundhw
[soundhw_count
].descr
= descr
;
1678 soundhw
[soundhw_count
].isa
= 1;
1679 soundhw
[soundhw_count
].init
.init_isa
= init_isa
;
1683 void pci_register_soundhw(const char *name
, const char *descr
,
1684 int (*init_pci
)(PCIBus
*bus
))
1686 assert(soundhw_count
< ARRAY_SIZE(soundhw
) - 1);
1687 soundhw
[soundhw_count
].name
= name
;
1688 soundhw
[soundhw_count
].descr
= descr
;
1689 soundhw
[soundhw_count
].isa
= 0;
1690 soundhw
[soundhw_count
].init
.init_pci
= init_pci
;
1694 void select_soundhw(const char *optarg
)
1698 if (is_help_option(optarg
)) {
1701 if (soundhw_count
) {
1702 printf("Valid sound card names (comma separated):\n");
1703 for (c
= soundhw
; c
->name
; ++c
) {
1704 printf ("%-11s %s\n", c
->name
, c
->descr
);
1706 printf("\n-soundhw all will enable all of the above\n");
1708 printf("Machine has no user-selectable audio hardware "
1709 "(it may or may not have always-present audio hardware).\n");
1711 exit(!is_help_option(optarg
));
1719 if (!strcmp(optarg
, "all")) {
1720 for (c
= soundhw
; c
->name
; ++c
) {
1729 l
= !e
? strlen(p
) : (size_t) (e
- p
);
1731 for (c
= soundhw
; c
->name
; ++c
) {
1732 if (!strncmp(c
->name
, p
, l
) && !c
->name
[l
]) {
1740 error_report("Unknown sound card name (too big to show)");
1743 error_report("Unknown sound card name `%.*s'",
1748 p
+= l
+ (e
!= NULL
);
1752 goto show_valid_cards
;
1757 void audio_init(void)
1760 ISABus
*isa_bus
= (ISABus
*) object_resolve_path_type("", TYPE_ISA_BUS
, NULL
);
1761 PCIBus
*pci_bus
= (PCIBus
*) object_resolve_path_type("", TYPE_PCI_BUS
, NULL
);
1763 for (c
= soundhw
; c
->name
; ++c
) {
1767 error_report("ISA bus not available for %s", c
->name
);
1770 c
->init
.init_isa(isa_bus
);
1773 error_report("PCI bus not available for %s", c
->name
);
1776 c
->init
.init_pci(pci_bus
);
1782 int qemu_uuid_parse(const char *str
, uint8_t *uuid
)
1786 if (strlen(str
) != 36) {
1790 ret
= sscanf(str
, UUID_FMT
, &uuid
[0], &uuid
[1], &uuid
[2], &uuid
[3],
1791 &uuid
[4], &uuid
[5], &uuid
[6], &uuid
[7], &uuid
[8], &uuid
[9],
1792 &uuid
[10], &uuid
[11], &uuid
[12], &uuid
[13], &uuid
[14],
1801 void do_acpitable_option(const QemuOpts
*opts
)
1806 acpi_table_add(opts
, &err
);
1808 error_report("Wrong acpi table provided: %s",
1809 error_get_pretty(err
));
1816 void do_smbios_option(QemuOpts
*opts
)
1819 smbios_entry_add(opts
);
1823 void cpudef_init(void)
1825 #if defined(cpudef_setup)
1826 cpudef_setup(); /* parse cpu definitions in target config file */
1830 int kvm_available(void)
1839 int xen_available(void)
1849 TargetInfo
*qmp_query_target(Error
**errp
)
1851 TargetInfo
*info
= g_malloc0(sizeof(*info
));
1853 info
->arch
= g_strdup(TARGET_NAME
);
1858 /* Stub function that's gets run on the vcpu when its brought out of the
1859 VM to run inside qemu via async_run_on_cpu()*/
1860 static void mig_sleep_cpu(void *opq
)
1862 qemu_mutex_unlock_iothread();
1864 qemu_mutex_lock_iothread();
1867 /* To reduce the dirty rate explicitly disallow the VCPUs from spending
1868 much time in the VM. The migration thread will try to catchup.
1869 Workload will experience a performance drop.
1871 static void mig_throttle_guest_down(void)
1875 qemu_mutex_lock_iothread();
1877 async_run_on_cpu(cpu
, mig_sleep_cpu
, NULL
);
1879 qemu_mutex_unlock_iothread();
1882 static void check_guest_throttling(void)
1887 if (!mig_throttle_on
) {
1892 t0
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
1896 t1
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
1898 /* If it has been more than 40 ms since the last time the guest
1899 * was throttled then do it again.
1901 if (40 < (t1
-t0
)/1000000) {
1902 mig_throttle_guest_down();