4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2011-2015 Red Hat Inc
8 * Juan Quintela <quintela@redhat.com>
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 #include "qemu/osdep.h"
30 #include "qemu/cutils.h"
31 #include "qemu/bitops.h"
32 #include "qemu/bitmap.h"
33 #include "qemu/main-loop.h"
36 #include "migration.h"
37 #include "migration/register.h"
38 #include "migration/misc.h"
39 #include "qemu-file.h"
40 #include "postcopy-ram.h"
41 #include "page_cache.h"
42 #include "qemu/error-report.h"
43 #include "qapi/error.h"
44 #include "qapi/qapi-types-migration.h"
45 #include "qapi/qapi-events-migration.h"
46 #include "qapi/qmp/qerror.h"
48 #include "exec/ram_addr.h"
49 #include "exec/target_page.h"
50 #include "qemu/rcu_queue.h"
51 #include "migration/colo.h"
53 #include "sysemu/cpu-throttle.h"
57 #include "sysemu/runstate.h"
59 #include "hw/boards.h" /* for machine_dump_guest_core() */
61 #if defined(__linux__)
62 #include "qemu/userfaultfd.h"
63 #endif /* defined(__linux__) */
65 /***********************************************************/
66 /* ram save/restore */
68 /* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
69 * worked for pages that where filled with the same char. We switched
70 * it to only search for the zero value. And to avoid confusion with
71 * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
74 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
75 #define RAM_SAVE_FLAG_ZERO 0x02
76 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
77 #define RAM_SAVE_FLAG_PAGE 0x08
78 #define RAM_SAVE_FLAG_EOS 0x10
79 #define RAM_SAVE_FLAG_CONTINUE 0x20
80 #define RAM_SAVE_FLAG_XBZRLE 0x40
81 /* 0x80 is reserved in migration.h start with 0x100 next */
82 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
84 XBZRLECacheStats xbzrle_counters
;
86 /* struct contains XBZRLE cache and a static page
87 used by the compression */
89 /* buffer used for XBZRLE encoding */
91 /* buffer for storing page content */
93 /* Cache for XBZRLE, Protected by lock. */
96 /* it will store a page full of zeros */
97 uint8_t *zero_target_page
;
98 /* buffer used for XBZRLE decoding */
102 static void XBZRLE_cache_lock(void)
104 if (migrate_use_xbzrle()) {
105 qemu_mutex_lock(&XBZRLE
.lock
);
109 static void XBZRLE_cache_unlock(void)
111 if (migrate_use_xbzrle()) {
112 qemu_mutex_unlock(&XBZRLE
.lock
);
117 * xbzrle_cache_resize: resize the xbzrle cache
119 * This function is called from migrate_params_apply in main
120 * thread, possibly while a migration is in progress. A running
121 * migration may be using the cache and might finish during this call,
122 * hence changes to the cache are protected by XBZRLE.lock().
124 * Returns 0 for success or -1 for error
126 * @new_size: new cache size
127 * @errp: set *errp if the check failed, with reason
129 int xbzrle_cache_resize(uint64_t new_size
, Error
**errp
)
131 PageCache
*new_cache
;
134 /* Check for truncation */
135 if (new_size
!= (size_t)new_size
) {
136 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, "cache size",
137 "exceeding address space");
141 if (new_size
== migrate_xbzrle_cache_size()) {
148 if (XBZRLE
.cache
!= NULL
) {
149 new_cache
= cache_init(new_size
, TARGET_PAGE_SIZE
, errp
);
155 cache_fini(XBZRLE
.cache
);
156 XBZRLE
.cache
= new_cache
;
159 XBZRLE_cache_unlock();
163 bool ramblock_is_ignored(RAMBlock
*block
)
165 return !qemu_ram_is_migratable(block
) ||
166 (migrate_ignore_shared() && qemu_ram_is_shared(block
));
169 #undef RAMBLOCK_FOREACH
171 int foreach_not_ignored_block(RAMBlockIterFunc func
, void *opaque
)
176 RCU_READ_LOCK_GUARD();
178 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
179 ret
= func(block
, opaque
);
187 static void ramblock_recv_map_init(void)
191 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
192 assert(!rb
->receivedmap
);
193 rb
->receivedmap
= bitmap_new(rb
->max_length
>> qemu_target_page_bits());
197 int ramblock_recv_bitmap_test(RAMBlock
*rb
, void *host_addr
)
199 return test_bit(ramblock_recv_bitmap_offset(host_addr
, rb
),
203 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock
*rb
, uint64_t byte_offset
)
205 return test_bit(byte_offset
>> TARGET_PAGE_BITS
, rb
->receivedmap
);
208 void ramblock_recv_bitmap_set(RAMBlock
*rb
, void *host_addr
)
210 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr
, rb
), rb
->receivedmap
);
213 void ramblock_recv_bitmap_set_range(RAMBlock
*rb
, void *host_addr
,
216 bitmap_set_atomic(rb
->receivedmap
,
217 ramblock_recv_bitmap_offset(host_addr
, rb
),
221 #define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
224 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
226 * Returns >0 if success with sent bytes, or <0 if error.
228 int64_t ramblock_recv_bitmap_send(QEMUFile
*file
,
229 const char *block_name
)
231 RAMBlock
*block
= qemu_ram_block_by_name(block_name
);
232 unsigned long *le_bitmap
, nbits
;
236 error_report("%s: invalid block name: %s", __func__
, block_name
);
240 nbits
= block
->postcopy_length
>> TARGET_PAGE_BITS
;
243 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
244 * machines we may need 4 more bytes for padding (see below
245 * comment). So extend it a bit before hand.
247 le_bitmap
= bitmap_new(nbits
+ BITS_PER_LONG
);
250 * Always use little endian when sending the bitmap. This is
251 * required that when source and destination VMs are not using the
252 * same endianness. (Note: big endian won't work.)
254 bitmap_to_le(le_bitmap
, block
->receivedmap
, nbits
);
256 /* Size of the bitmap, in bytes */
257 size
= DIV_ROUND_UP(nbits
, 8);
260 * size is always aligned to 8 bytes for 64bit machines, but it
261 * may not be true for 32bit machines. We need this padding to
262 * make sure the migration can survive even between 32bit and
265 size
= ROUND_UP(size
, 8);
267 qemu_put_be64(file
, size
);
268 qemu_put_buffer(file
, (const uint8_t *)le_bitmap
, size
);
270 * Mark as an end, in case the middle part is screwed up due to
271 * some "mysterious" reason.
273 qemu_put_be64(file
, RAMBLOCK_RECV_BITMAP_ENDING
);
278 if (qemu_file_get_error(file
)) {
279 return qemu_file_get_error(file
);
282 return size
+ sizeof(size
);
286 * An outstanding page request, on the source, having been received
289 struct RAMSrcPageRequest
{
294 QSIMPLEQ_ENTRY(RAMSrcPageRequest
) next_req
;
297 /* State of RAM for migration */
299 /* QEMUFile used for this migration */
301 /* UFFD file descriptor, used in 'write-tracking' migration */
303 /* Last block that we have visited searching for dirty pages */
304 RAMBlock
*last_seen_block
;
305 /* Last block from where we have sent data */
306 RAMBlock
*last_sent_block
;
307 /* Last dirty target page we have sent */
308 ram_addr_t last_page
;
309 /* last ram version we have seen */
310 uint32_t last_version
;
311 /* How many times we have dirty too many pages */
312 int dirty_rate_high_cnt
;
313 /* these variables are used for bitmap sync */
314 /* last time we did a full bitmap_sync */
315 int64_t time_last_bitmap_sync
;
316 /* bytes transferred at start_time */
317 uint64_t bytes_xfer_prev
;
318 /* number of dirty pages since start_time */
319 uint64_t num_dirty_pages_period
;
320 /* xbzrle misses since the beginning of the period */
321 uint64_t xbzrle_cache_miss_prev
;
322 /* Amount of xbzrle pages since the beginning of the period */
323 uint64_t xbzrle_pages_prev
;
324 /* Amount of xbzrle encoded bytes since the beginning of the period */
325 uint64_t xbzrle_bytes_prev
;
326 /* Start using XBZRLE (e.g., after the first round). */
328 /* Are we on the last stage of migration */
330 /* compression statistics since the beginning of the period */
331 /* amount of count that no free thread to compress data */
332 uint64_t compress_thread_busy_prev
;
333 /* amount bytes after compression */
334 uint64_t compressed_size_prev
;
335 /* amount of compressed pages */
336 uint64_t compress_pages_prev
;
338 /* total handled target pages at the beginning of period */
339 uint64_t target_page_count_prev
;
340 /* total handled target pages since start */
341 uint64_t target_page_count
;
342 /* number of dirty bits in the bitmap */
343 uint64_t migration_dirty_pages
;
344 /* Protects modification of the bitmap and migration dirty pages */
345 QemuMutex bitmap_mutex
;
346 /* The RAMBlock used in the last src_page_requests */
347 RAMBlock
*last_req_rb
;
348 /* Queue of outstanding page requests from the destination */
349 QemuMutex src_page_req_mutex
;
350 QSIMPLEQ_HEAD(, RAMSrcPageRequest
) src_page_requests
;
352 typedef struct RAMState RAMState
;
354 static RAMState
*ram_state
;
356 static NotifierWithReturnList precopy_notifier_list
;
358 void precopy_infrastructure_init(void)
360 notifier_with_return_list_init(&precopy_notifier_list
);
363 void precopy_add_notifier(NotifierWithReturn
*n
)
365 notifier_with_return_list_add(&precopy_notifier_list
, n
);
368 void precopy_remove_notifier(NotifierWithReturn
*n
)
370 notifier_with_return_remove(n
);
373 int precopy_notify(PrecopyNotifyReason reason
, Error
**errp
)
375 PrecopyNotifyData pnd
;
379 return notifier_with_return_list_notify(&precopy_notifier_list
, &pnd
);
382 uint64_t ram_bytes_remaining(void)
384 return ram_state
? (ram_state
->migration_dirty_pages
* TARGET_PAGE_SIZE
) :
388 MigrationStats ram_counters
;
390 /* used by the search for pages to send */
391 struct PageSearchStatus
{
392 /* Current block being searched */
394 /* Current page to search from */
396 /* Set once we wrap around */
399 typedef struct PageSearchStatus PageSearchStatus
;
401 CompressionStats compression_counters
;
403 struct CompressParam
{
413 /* internally used fields */
417 typedef struct CompressParam CompressParam
;
419 struct DecompressParam
{
429 typedef struct DecompressParam DecompressParam
;
431 static CompressParam
*comp_param
;
432 static QemuThread
*compress_threads
;
433 /* comp_done_cond is used to wake up the migration thread when
434 * one of the compression threads has finished the compression.
435 * comp_done_lock is used to co-work with comp_done_cond.
437 static QemuMutex comp_done_lock
;
438 static QemuCond comp_done_cond
;
439 /* The empty QEMUFileOps will be used by file in CompressParam */
440 static const QEMUFileOps empty_ops
= { };
442 static QEMUFile
*decomp_file
;
443 static DecompressParam
*decomp_param
;
444 static QemuThread
*decompress_threads
;
445 static QemuMutex decomp_done_lock
;
446 static QemuCond decomp_done_cond
;
448 static bool do_compress_ram_page(QEMUFile
*f
, z_stream
*stream
, RAMBlock
*block
,
449 ram_addr_t offset
, uint8_t *source_buf
);
451 static void *do_data_compress(void *opaque
)
453 CompressParam
*param
= opaque
;
458 qemu_mutex_lock(¶m
->mutex
);
459 while (!param
->quit
) {
461 block
= param
->block
;
462 offset
= param
->offset
;
464 qemu_mutex_unlock(¶m
->mutex
);
466 zero_page
= do_compress_ram_page(param
->file
, ¶m
->stream
,
467 block
, offset
, param
->originbuf
);
469 qemu_mutex_lock(&comp_done_lock
);
471 param
->zero_page
= zero_page
;
472 qemu_cond_signal(&comp_done_cond
);
473 qemu_mutex_unlock(&comp_done_lock
);
475 qemu_mutex_lock(¶m
->mutex
);
477 qemu_cond_wait(¶m
->cond
, ¶m
->mutex
);
480 qemu_mutex_unlock(¶m
->mutex
);
485 static void compress_threads_save_cleanup(void)
489 if (!migrate_use_compression() || !comp_param
) {
493 thread_count
= migrate_compress_threads();
494 for (i
= 0; i
< thread_count
; i
++) {
496 * we use it as a indicator which shows if the thread is
497 * properly init'd or not
499 if (!comp_param
[i
].file
) {
503 qemu_mutex_lock(&comp_param
[i
].mutex
);
504 comp_param
[i
].quit
= true;
505 qemu_cond_signal(&comp_param
[i
].cond
);
506 qemu_mutex_unlock(&comp_param
[i
].mutex
);
508 qemu_thread_join(compress_threads
+ i
);
509 qemu_mutex_destroy(&comp_param
[i
].mutex
);
510 qemu_cond_destroy(&comp_param
[i
].cond
);
511 deflateEnd(&comp_param
[i
].stream
);
512 g_free(comp_param
[i
].originbuf
);
513 qemu_fclose(comp_param
[i
].file
);
514 comp_param
[i
].file
= NULL
;
516 qemu_mutex_destroy(&comp_done_lock
);
517 qemu_cond_destroy(&comp_done_cond
);
518 g_free(compress_threads
);
520 compress_threads
= NULL
;
524 static int compress_threads_save_setup(void)
528 if (!migrate_use_compression()) {
531 thread_count
= migrate_compress_threads();
532 compress_threads
= g_new0(QemuThread
, thread_count
);
533 comp_param
= g_new0(CompressParam
, thread_count
);
534 qemu_cond_init(&comp_done_cond
);
535 qemu_mutex_init(&comp_done_lock
);
536 for (i
= 0; i
< thread_count
; i
++) {
537 comp_param
[i
].originbuf
= g_try_malloc(TARGET_PAGE_SIZE
);
538 if (!comp_param
[i
].originbuf
) {
542 if (deflateInit(&comp_param
[i
].stream
,
543 migrate_compress_level()) != Z_OK
) {
544 g_free(comp_param
[i
].originbuf
);
548 /* comp_param[i].file is just used as a dummy buffer to save data,
549 * set its ops to empty.
551 comp_param
[i
].file
= qemu_fopen_ops(NULL
, &empty_ops
, false);
552 comp_param
[i
].done
= true;
553 comp_param
[i
].quit
= false;
554 qemu_mutex_init(&comp_param
[i
].mutex
);
555 qemu_cond_init(&comp_param
[i
].cond
);
556 qemu_thread_create(compress_threads
+ i
, "compress",
557 do_data_compress
, comp_param
+ i
,
558 QEMU_THREAD_JOINABLE
);
563 compress_threads_save_cleanup();
568 * save_page_header: write page header to wire
570 * If this is the 1st block, it also writes the block identification
572 * Returns the number of bytes written
574 * @f: QEMUFile where to send the data
575 * @block: block that contains the page we want to send
576 * @offset: offset inside the block for the page
577 * in the lower bits, it contains flags
579 static size_t save_page_header(RAMState
*rs
, QEMUFile
*f
, RAMBlock
*block
,
584 if (block
== rs
->last_sent_block
) {
585 offset
|= RAM_SAVE_FLAG_CONTINUE
;
587 qemu_put_be64(f
, offset
);
590 if (!(offset
& RAM_SAVE_FLAG_CONTINUE
)) {
591 len
= strlen(block
->idstr
);
592 qemu_put_byte(f
, len
);
593 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, len
);
595 rs
->last_sent_block
= block
;
601 * mig_throttle_guest_down: throttle down the guest
603 * Reduce amount of guest cpu execution to hopefully slow down memory
604 * writes. If guest dirty memory rate is reduced below the rate at
605 * which we can transfer pages to the destination then we should be
606 * able to complete migration. Some workloads dirty memory way too
607 * fast and will not effectively converge, even with auto-converge.
609 static void mig_throttle_guest_down(uint64_t bytes_dirty_period
,
610 uint64_t bytes_dirty_threshold
)
612 MigrationState
*s
= migrate_get_current();
613 uint64_t pct_initial
= s
->parameters
.cpu_throttle_initial
;
614 uint64_t pct_increment
= s
->parameters
.cpu_throttle_increment
;
615 bool pct_tailslow
= s
->parameters
.cpu_throttle_tailslow
;
616 int pct_max
= s
->parameters
.max_cpu_throttle
;
618 uint64_t throttle_now
= cpu_throttle_get_percentage();
619 uint64_t cpu_now
, cpu_ideal
, throttle_inc
;
621 /* We have not started throttling yet. Let's start it. */
622 if (!cpu_throttle_active()) {
623 cpu_throttle_set(pct_initial
);
625 /* Throttling already on, just increase the rate */
627 throttle_inc
= pct_increment
;
629 /* Compute the ideal CPU percentage used by Guest, which may
630 * make the dirty rate match the dirty rate threshold. */
631 cpu_now
= 100 - throttle_now
;
632 cpu_ideal
= cpu_now
* (bytes_dirty_threshold
* 1.0 /
634 throttle_inc
= MIN(cpu_now
- cpu_ideal
, pct_increment
);
636 cpu_throttle_set(MIN(throttle_now
+ throttle_inc
, pct_max
));
640 void mig_throttle_counter_reset(void)
642 RAMState
*rs
= ram_state
;
644 rs
->time_last_bitmap_sync
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
645 rs
->num_dirty_pages_period
= 0;
646 rs
->bytes_xfer_prev
= ram_counters
.transferred
;
650 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
652 * @rs: current RAM state
653 * @current_addr: address for the zero page
655 * Update the xbzrle cache to reflect a page that's been sent as all 0.
656 * The important thing is that a stale (not-yet-0'd) page be replaced
658 * As a bonus, if the page wasn't in the cache it gets added so that
659 * when a small write is made into the 0'd page it gets XBZRLE sent.
661 static void xbzrle_cache_zero_page(RAMState
*rs
, ram_addr_t current_addr
)
663 if (!rs
->xbzrle_enabled
) {
667 /* We don't care if this fails to allocate a new cache page
668 * as long as it updated an old one */
669 cache_insert(XBZRLE
.cache
, current_addr
, XBZRLE
.zero_target_page
,
670 ram_counters
.dirty_sync_count
);
673 #define ENCODING_FLAG_XBZRLE 0x1
676 * save_xbzrle_page: compress and send current page
678 * Returns: 1 means that we wrote the page
679 * 0 means that page is identical to the one already sent
680 * -1 means that xbzrle would be longer than normal
682 * @rs: current RAM state
683 * @current_data: pointer to the address of the page contents
684 * @current_addr: addr of the page
685 * @block: block that contains the page we want to send
686 * @offset: offset inside the block for the page
688 static int save_xbzrle_page(RAMState
*rs
, uint8_t **current_data
,
689 ram_addr_t current_addr
, RAMBlock
*block
,
692 int encoded_len
= 0, bytes_xbzrle
;
693 uint8_t *prev_cached_page
;
695 if (!cache_is_cached(XBZRLE
.cache
, current_addr
,
696 ram_counters
.dirty_sync_count
)) {
697 xbzrle_counters
.cache_miss
++;
698 if (!rs
->last_stage
) {
699 if (cache_insert(XBZRLE
.cache
, current_addr
, *current_data
,
700 ram_counters
.dirty_sync_count
) == -1) {
703 /* update *current_data when the page has been
704 inserted into cache */
705 *current_data
= get_cached_data(XBZRLE
.cache
, current_addr
);
712 * Reaching here means the page has hit the xbzrle cache, no matter what
713 * encoding result it is (normal encoding, overflow or skipping the page),
714 * count the page as encoded. This is used to calculate the encoding rate.
716 * Example: 2 pages (8KB) being encoded, first page encoding generates 2KB,
717 * 2nd page turns out to be skipped (i.e. no new bytes written to the
718 * page), the overall encoding rate will be 8KB / 2KB = 4, which has the
719 * skipped page included. In this way, the encoding rate can tell if the
720 * guest page is good for xbzrle encoding.
722 xbzrle_counters
.pages
++;
723 prev_cached_page
= get_cached_data(XBZRLE
.cache
, current_addr
);
725 /* save current buffer into memory */
726 memcpy(XBZRLE
.current_buf
, *current_data
, TARGET_PAGE_SIZE
);
728 /* XBZRLE encoding (if there is no overflow) */
729 encoded_len
= xbzrle_encode_buffer(prev_cached_page
, XBZRLE
.current_buf
,
730 TARGET_PAGE_SIZE
, XBZRLE
.encoded_buf
,
734 * Update the cache contents, so that it corresponds to the data
735 * sent, in all cases except where we skip the page.
737 if (!rs
->last_stage
&& encoded_len
!= 0) {
738 memcpy(prev_cached_page
, XBZRLE
.current_buf
, TARGET_PAGE_SIZE
);
740 * In the case where we couldn't compress, ensure that the caller
741 * sends the data from the cache, since the guest might have
742 * changed the RAM since we copied it.
744 *current_data
= prev_cached_page
;
747 if (encoded_len
== 0) {
748 trace_save_xbzrle_page_skipping();
750 } else if (encoded_len
== -1) {
751 trace_save_xbzrle_page_overflow();
752 xbzrle_counters
.overflow
++;
753 xbzrle_counters
.bytes
+= TARGET_PAGE_SIZE
;
757 /* Send XBZRLE based compressed page */
758 bytes_xbzrle
= save_page_header(rs
, rs
->f
, block
,
759 offset
| RAM_SAVE_FLAG_XBZRLE
);
760 qemu_put_byte(rs
->f
, ENCODING_FLAG_XBZRLE
);
761 qemu_put_be16(rs
->f
, encoded_len
);
762 qemu_put_buffer(rs
->f
, XBZRLE
.encoded_buf
, encoded_len
);
763 bytes_xbzrle
+= encoded_len
+ 1 + 2;
765 * Like compressed_size (please see update_compress_thread_counts),
766 * the xbzrle encoded bytes don't count the 8 byte header with
767 * RAM_SAVE_FLAG_CONTINUE.
769 xbzrle_counters
.bytes
+= bytes_xbzrle
- 8;
770 ram_counters
.transferred
+= bytes_xbzrle
;
776 * migration_bitmap_find_dirty: find the next dirty page from start
778 * Returns the page offset within memory region of the start of a dirty page
780 * @rs: current RAM state
781 * @rb: RAMBlock where to search for dirty pages
782 * @start: page where we start the search
785 unsigned long migration_bitmap_find_dirty(RAMState
*rs
, RAMBlock
*rb
,
788 unsigned long size
= rb
->used_length
>> TARGET_PAGE_BITS
;
789 unsigned long *bitmap
= rb
->bmap
;
791 if (ramblock_is_ignored(rb
)) {
795 return find_next_bit(bitmap
, size
, start
);
798 static void migration_clear_memory_region_dirty_bitmap(RAMBlock
*rb
,
804 if (!rb
->clear_bmap
|| !clear_bmap_test_and_clear(rb
, page
)) {
808 shift
= rb
->clear_bmap_shift
;
810 * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this
811 * can make things easier sometimes since then start address
812 * of the small chunk will always be 64 pages aligned so the
813 * bitmap will always be aligned to unsigned long. We should
814 * even be able to remove this restriction but I'm simply
819 size
= 1ULL << (TARGET_PAGE_BITS
+ shift
);
820 start
= QEMU_ALIGN_DOWN((ram_addr_t
)page
<< TARGET_PAGE_BITS
, size
);
821 trace_migration_bitmap_clear_dirty(rb
->idstr
, start
, size
, page
);
822 memory_region_clear_dirty_bitmap(rb
->mr
, start
, size
);
826 migration_clear_memory_region_dirty_bitmap_range(RAMBlock
*rb
,
828 unsigned long npages
)
830 unsigned long i
, chunk_pages
= 1UL << rb
->clear_bmap_shift
;
831 unsigned long chunk_start
= QEMU_ALIGN_DOWN(start
, chunk_pages
);
832 unsigned long chunk_end
= QEMU_ALIGN_UP(start
+ npages
, chunk_pages
);
835 * Clear pages from start to start + npages - 1, so the end boundary is
838 for (i
= chunk_start
; i
< chunk_end
; i
+= chunk_pages
) {
839 migration_clear_memory_region_dirty_bitmap(rb
, i
);
844 * colo_bitmap_find_diry:find contiguous dirty pages from start
846 * Returns the page offset within memory region of the start of the contiguout
849 * @rs: current RAM state
850 * @rb: RAMBlock where to search for dirty pages
851 * @start: page where we start the search
852 * @num: the number of contiguous dirty pages
855 unsigned long colo_bitmap_find_dirty(RAMState
*rs
, RAMBlock
*rb
,
856 unsigned long start
, unsigned long *num
)
858 unsigned long size
= rb
->used_length
>> TARGET_PAGE_BITS
;
859 unsigned long *bitmap
= rb
->bmap
;
860 unsigned long first
, next
;
864 if (ramblock_is_ignored(rb
)) {
868 first
= find_next_bit(bitmap
, size
, start
);
872 next
= find_next_zero_bit(bitmap
, size
, first
+ 1);
873 assert(next
>= first
);
878 static inline bool migration_bitmap_clear_dirty(RAMState
*rs
,
885 * Clear dirty bitmap if needed. This _must_ be called before we
886 * send any of the page in the chunk because we need to make sure
887 * we can capture further page content changes when we sync dirty
888 * log the next time. So as long as we are going to send any of
889 * the page in the chunk we clear the remote dirty bitmap for all.
890 * Clearing it earlier won't be a problem, but too late will.
892 migration_clear_memory_region_dirty_bitmap(rb
, page
);
894 ret
= test_and_clear_bit(page
, rb
->bmap
);
896 rs
->migration_dirty_pages
--;
902 static void dirty_bitmap_clear_section(MemoryRegionSection
*section
,
905 const hwaddr offset
= section
->offset_within_region
;
906 const hwaddr size
= int128_get64(section
->size
);
907 const unsigned long start
= offset
>> TARGET_PAGE_BITS
;
908 const unsigned long npages
= size
>> TARGET_PAGE_BITS
;
909 RAMBlock
*rb
= section
->mr
->ram_block
;
910 uint64_t *cleared_bits
= opaque
;
913 * We don't grab ram_state->bitmap_mutex because we expect to run
914 * only when starting migration or during postcopy recovery where
915 * we don't have concurrent access.
917 if (!migration_in_postcopy() && !migrate_background_snapshot()) {
918 migration_clear_memory_region_dirty_bitmap_range(rb
, start
, npages
);
920 *cleared_bits
+= bitmap_count_one_with_offset(rb
->bmap
, start
, npages
);
921 bitmap_clear(rb
->bmap
, start
, npages
);
925 * Exclude all dirty pages from migration that fall into a discarded range as
926 * managed by a RamDiscardManager responsible for the mapped memory region of
927 * the RAMBlock. Clear the corresponding bits in the dirty bitmaps.
929 * Discarded pages ("logically unplugged") have undefined content and must
930 * not get migrated, because even reading these pages for migration might
931 * result in undesired behavior.
933 * Returns the number of cleared bits in the RAMBlock dirty bitmap.
935 * Note: The result is only stable while migrating (precopy/postcopy).
937 static uint64_t ramblock_dirty_bitmap_clear_discarded_pages(RAMBlock
*rb
)
939 uint64_t cleared_bits
= 0;
941 if (rb
->mr
&& rb
->bmap
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
942 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
943 MemoryRegionSection section
= {
945 .offset_within_region
= 0,
946 .size
= int128_make64(qemu_ram_get_used_length(rb
)),
949 ram_discard_manager_replay_discarded(rdm
, §ion
,
950 dirty_bitmap_clear_section
,
957 * Check if a host-page aligned page falls into a discarded range as managed by
958 * a RamDiscardManager responsible for the mapped memory region of the RAMBlock.
960 * Note: The result is only stable while migrating (precopy/postcopy).
962 bool ramblock_page_is_discarded(RAMBlock
*rb
, ram_addr_t start
)
964 if (rb
->mr
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
965 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
966 MemoryRegionSection section
= {
968 .offset_within_region
= start
,
969 .size
= int128_make64(qemu_ram_pagesize(rb
)),
972 return !ram_discard_manager_is_populated(rdm
, §ion
);
977 /* Called with RCU critical section */
978 static void ramblock_sync_dirty_bitmap(RAMState
*rs
, RAMBlock
*rb
)
980 uint64_t new_dirty_pages
=
981 cpu_physical_memory_sync_dirty_bitmap(rb
, 0, rb
->used_length
);
983 rs
->migration_dirty_pages
+= new_dirty_pages
;
984 rs
->num_dirty_pages_period
+= new_dirty_pages
;
988 * ram_pagesize_summary: calculate all the pagesizes of a VM
990 * Returns a summary bitmap of the page sizes of all RAMBlocks
992 * For VMs with just normal pages this is equivalent to the host page
993 * size. If it's got some huge pages then it's the OR of all the
994 * different page sizes.
996 uint64_t ram_pagesize_summary(void)
999 uint64_t summary
= 0;
1001 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1002 summary
|= block
->page_size
;
1008 uint64_t ram_get_total_transferred_pages(void)
1010 return ram_counters
.normal
+ ram_counters
.duplicate
+
1011 compression_counters
.pages
+ xbzrle_counters
.pages
;
1014 static void migration_update_rates(RAMState
*rs
, int64_t end_time
)
1016 uint64_t page_count
= rs
->target_page_count
- rs
->target_page_count_prev
;
1017 double compressed_size
;
1019 /* calculate period counters */
1020 ram_counters
.dirty_pages_rate
= rs
->num_dirty_pages_period
* 1000
1021 / (end_time
- rs
->time_last_bitmap_sync
);
1027 if (migrate_use_xbzrle()) {
1028 double encoded_size
, unencoded_size
;
1030 xbzrle_counters
.cache_miss_rate
= (double)(xbzrle_counters
.cache_miss
-
1031 rs
->xbzrle_cache_miss_prev
) / page_count
;
1032 rs
->xbzrle_cache_miss_prev
= xbzrle_counters
.cache_miss
;
1033 unencoded_size
= (xbzrle_counters
.pages
- rs
->xbzrle_pages_prev
) *
1035 encoded_size
= xbzrle_counters
.bytes
- rs
->xbzrle_bytes_prev
;
1036 if (xbzrle_counters
.pages
== rs
->xbzrle_pages_prev
|| !encoded_size
) {
1037 xbzrle_counters
.encoding_rate
= 0;
1039 xbzrle_counters
.encoding_rate
= unencoded_size
/ encoded_size
;
1041 rs
->xbzrle_pages_prev
= xbzrle_counters
.pages
;
1042 rs
->xbzrle_bytes_prev
= xbzrle_counters
.bytes
;
1045 if (migrate_use_compression()) {
1046 compression_counters
.busy_rate
= (double)(compression_counters
.busy
-
1047 rs
->compress_thread_busy_prev
) / page_count
;
1048 rs
->compress_thread_busy_prev
= compression_counters
.busy
;
1050 compressed_size
= compression_counters
.compressed_size
-
1051 rs
->compressed_size_prev
;
1052 if (compressed_size
) {
1053 double uncompressed_size
= (compression_counters
.pages
-
1054 rs
->compress_pages_prev
) * TARGET_PAGE_SIZE
;
1056 /* Compression-Ratio = Uncompressed-size / Compressed-size */
1057 compression_counters
.compression_rate
=
1058 uncompressed_size
/ compressed_size
;
1060 rs
->compress_pages_prev
= compression_counters
.pages
;
1061 rs
->compressed_size_prev
= compression_counters
.compressed_size
;
1066 static void migration_trigger_throttle(RAMState
*rs
)
1068 MigrationState
*s
= migrate_get_current();
1069 uint64_t threshold
= s
->parameters
.throttle_trigger_threshold
;
1071 uint64_t bytes_xfer_period
= ram_counters
.transferred
- rs
->bytes_xfer_prev
;
1072 uint64_t bytes_dirty_period
= rs
->num_dirty_pages_period
* TARGET_PAGE_SIZE
;
1073 uint64_t bytes_dirty_threshold
= bytes_xfer_period
* threshold
/ 100;
1075 /* During block migration the auto-converge logic incorrectly detects
1076 * that ram migration makes no progress. Avoid this by disabling the
1077 * throttling logic during the bulk phase of block migration. */
1078 if (migrate_auto_converge() && !blk_mig_bulk_active()) {
1079 /* The following detection logic can be refined later. For now:
1080 Check to see if the ratio between dirtied bytes and the approx.
1081 amount of bytes that just got transferred since the last time
1082 we were in this routine reaches the threshold. If that happens
1083 twice, start or increase throttling. */
1085 if ((bytes_dirty_period
> bytes_dirty_threshold
) &&
1086 (++rs
->dirty_rate_high_cnt
>= 2)) {
1087 trace_migration_throttle();
1088 rs
->dirty_rate_high_cnt
= 0;
1089 mig_throttle_guest_down(bytes_dirty_period
,
1090 bytes_dirty_threshold
);
1095 static void migration_bitmap_sync(RAMState
*rs
)
1100 ram_counters
.dirty_sync_count
++;
1102 if (!rs
->time_last_bitmap_sync
) {
1103 rs
->time_last_bitmap_sync
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1106 trace_migration_bitmap_sync_start();
1107 memory_global_dirty_log_sync();
1109 qemu_mutex_lock(&rs
->bitmap_mutex
);
1110 WITH_RCU_READ_LOCK_GUARD() {
1111 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1112 ramblock_sync_dirty_bitmap(rs
, block
);
1114 ram_counters
.remaining
= ram_bytes_remaining();
1116 qemu_mutex_unlock(&rs
->bitmap_mutex
);
1118 memory_global_after_dirty_log_sync();
1119 trace_migration_bitmap_sync_end(rs
->num_dirty_pages_period
);
1121 end_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1123 /* more than 1 second = 1000 millisecons */
1124 if (end_time
> rs
->time_last_bitmap_sync
+ 1000) {
1125 migration_trigger_throttle(rs
);
1127 migration_update_rates(rs
, end_time
);
1129 rs
->target_page_count_prev
= rs
->target_page_count
;
1131 /* reset period counters */
1132 rs
->time_last_bitmap_sync
= end_time
;
1133 rs
->num_dirty_pages_period
= 0;
1134 rs
->bytes_xfer_prev
= ram_counters
.transferred
;
1136 if (migrate_use_events()) {
1137 qapi_event_send_migration_pass(ram_counters
.dirty_sync_count
);
1141 static void migration_bitmap_sync_precopy(RAMState
*rs
)
1143 Error
*local_err
= NULL
;
1146 * The current notifier usage is just an optimization to migration, so we
1147 * don't stop the normal migration process in the error case.
1149 if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC
, &local_err
)) {
1150 error_report_err(local_err
);
1154 migration_bitmap_sync(rs
);
1156 if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC
, &local_err
)) {
1157 error_report_err(local_err
);
1161 static void ram_release_page(const char *rbname
, uint64_t offset
)
1163 if (!migrate_release_ram() || !migration_in_postcopy()) {
1167 ram_discard_range(rbname
, offset
, TARGET_PAGE_SIZE
);
1171 * save_zero_page_to_file: send the zero page to the file
1173 * Returns the size of data written to the file, 0 means the page is not
1176 * @rs: current RAM state
1177 * @file: the file where the data is saved
1178 * @block: block that contains the page we want to send
1179 * @offset: offset inside the block for the page
1181 static int save_zero_page_to_file(RAMState
*rs
, QEMUFile
*file
,
1182 RAMBlock
*block
, ram_addr_t offset
)
1184 uint8_t *p
= block
->host
+ offset
;
1187 if (buffer_is_zero(p
, TARGET_PAGE_SIZE
)) {
1188 len
+= save_page_header(rs
, file
, block
, offset
| RAM_SAVE_FLAG_ZERO
);
1189 qemu_put_byte(file
, 0);
1191 ram_release_page(block
->idstr
, offset
);
1197 * save_zero_page: send the zero page to the stream
1199 * Returns the number of pages written.
1201 * @rs: current RAM state
1202 * @block: block that contains the page we want to send
1203 * @offset: offset inside the block for the page
1205 static int save_zero_page(RAMState
*rs
, RAMBlock
*block
, ram_addr_t offset
)
1207 int len
= save_zero_page_to_file(rs
, rs
->f
, block
, offset
);
1210 ram_counters
.duplicate
++;
1211 ram_counters
.transferred
+= len
;
1218 * @pages: the number of pages written by the control path,
1220 * > 0 - number of pages written
1222 * Return true if the pages has been saved, otherwise false is returned.
1224 static bool control_save_page(RAMState
*rs
, RAMBlock
*block
, ram_addr_t offset
,
1227 uint64_t bytes_xmit
= 0;
1231 ret
= ram_control_save_page(rs
->f
, block
->offset
, offset
, TARGET_PAGE_SIZE
,
1233 if (ret
== RAM_SAVE_CONTROL_NOT_SUPP
) {
1238 ram_counters
.transferred
+= bytes_xmit
;
1242 if (ret
== RAM_SAVE_CONTROL_DELAYED
) {
1246 if (bytes_xmit
> 0) {
1247 ram_counters
.normal
++;
1248 } else if (bytes_xmit
== 0) {
1249 ram_counters
.duplicate
++;
1256 * directly send the page to the stream
1258 * Returns the number of pages written.
1260 * @rs: current RAM state
1261 * @block: block that contains the page we want to send
1262 * @offset: offset inside the block for the page
1263 * @buf: the page to be sent
1264 * @async: send to page asyncly
1266 static int save_normal_page(RAMState
*rs
, RAMBlock
*block
, ram_addr_t offset
,
1267 uint8_t *buf
, bool async
)
1269 ram_counters
.transferred
+= save_page_header(rs
, rs
->f
, block
,
1270 offset
| RAM_SAVE_FLAG_PAGE
);
1272 qemu_put_buffer_async(rs
->f
, buf
, TARGET_PAGE_SIZE
,
1273 migrate_release_ram() &
1274 migration_in_postcopy());
1276 qemu_put_buffer(rs
->f
, buf
, TARGET_PAGE_SIZE
);
1278 ram_counters
.transferred
+= TARGET_PAGE_SIZE
;
1279 ram_counters
.normal
++;
1284 * ram_save_page: send the given page to the stream
1286 * Returns the number of pages written.
1288 * >=0 - Number of pages written - this might legally be 0
1289 * if xbzrle noticed the page was the same.
1291 * @rs: current RAM state
1292 * @block: block that contains the page we want to send
1293 * @offset: offset inside the block for the page
1295 static int ram_save_page(RAMState
*rs
, PageSearchStatus
*pss
)
1299 bool send_async
= true;
1300 RAMBlock
*block
= pss
->block
;
1301 ram_addr_t offset
= ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
;
1302 ram_addr_t current_addr
= block
->offset
+ offset
;
1304 p
= block
->host
+ offset
;
1305 trace_ram_save_page(block
->idstr
, (uint64_t)offset
, p
);
1307 XBZRLE_cache_lock();
1308 if (rs
->xbzrle_enabled
&& !migration_in_postcopy()) {
1309 pages
= save_xbzrle_page(rs
, &p
, current_addr
, block
,
1311 if (!rs
->last_stage
) {
1312 /* Can't send this cached data async, since the cache page
1313 * might get updated before it gets to the wire
1319 /* XBZRLE overflow or normal page */
1321 pages
= save_normal_page(rs
, block
, offset
, p
, send_async
);
1324 XBZRLE_cache_unlock();
1329 static int ram_save_multifd_page(RAMState
*rs
, RAMBlock
*block
,
1332 if (multifd_queue_page(rs
->f
, block
, offset
) < 0) {
1335 ram_counters
.normal
++;
1340 static bool do_compress_ram_page(QEMUFile
*f
, z_stream
*stream
, RAMBlock
*block
,
1341 ram_addr_t offset
, uint8_t *source_buf
)
1343 RAMState
*rs
= ram_state
;
1344 uint8_t *p
= block
->host
+ offset
;
1347 if (save_zero_page_to_file(rs
, f
, block
, offset
)) {
1351 save_page_header(rs
, f
, block
, offset
| RAM_SAVE_FLAG_COMPRESS_PAGE
);
1354 * copy it to a internal buffer to avoid it being modified by VM
1355 * so that we can catch up the error during compression and
1358 memcpy(source_buf
, p
, TARGET_PAGE_SIZE
);
1359 ret
= qemu_put_compression_data(f
, stream
, source_buf
, TARGET_PAGE_SIZE
);
1361 qemu_file_set_error(migrate_get_current()->to_dst_file
, ret
);
1362 error_report("compressed data failed!");
1368 update_compress_thread_counts(const CompressParam
*param
, int bytes_xmit
)
1370 ram_counters
.transferred
+= bytes_xmit
;
1372 if (param
->zero_page
) {
1373 ram_counters
.duplicate
++;
1377 /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
1378 compression_counters
.compressed_size
+= bytes_xmit
- 8;
1379 compression_counters
.pages
++;
1382 static bool save_page_use_compression(RAMState
*rs
);
1384 static void flush_compressed_data(RAMState
*rs
)
1386 int idx
, len
, thread_count
;
1388 if (!save_page_use_compression(rs
)) {
1391 thread_count
= migrate_compress_threads();
1393 qemu_mutex_lock(&comp_done_lock
);
1394 for (idx
= 0; idx
< thread_count
; idx
++) {
1395 while (!comp_param
[idx
].done
) {
1396 qemu_cond_wait(&comp_done_cond
, &comp_done_lock
);
1399 qemu_mutex_unlock(&comp_done_lock
);
1401 for (idx
= 0; idx
< thread_count
; idx
++) {
1402 qemu_mutex_lock(&comp_param
[idx
].mutex
);
1403 if (!comp_param
[idx
].quit
) {
1404 len
= qemu_put_qemu_file(rs
->f
, comp_param
[idx
].file
);
1406 * it's safe to fetch zero_page without holding comp_done_lock
1407 * as there is no further request submitted to the thread,
1408 * i.e, the thread should be waiting for a request at this point.
1410 update_compress_thread_counts(&comp_param
[idx
], len
);
1412 qemu_mutex_unlock(&comp_param
[idx
].mutex
);
1416 static inline void set_compress_params(CompressParam
*param
, RAMBlock
*block
,
1419 param
->block
= block
;
1420 param
->offset
= offset
;
1423 static int compress_page_with_multi_thread(RAMState
*rs
, RAMBlock
*block
,
1426 int idx
, thread_count
, bytes_xmit
= -1, pages
= -1;
1427 bool wait
= migrate_compress_wait_thread();
1429 thread_count
= migrate_compress_threads();
1430 qemu_mutex_lock(&comp_done_lock
);
1432 for (idx
= 0; idx
< thread_count
; idx
++) {
1433 if (comp_param
[idx
].done
) {
1434 comp_param
[idx
].done
= false;
1435 bytes_xmit
= qemu_put_qemu_file(rs
->f
, comp_param
[idx
].file
);
1436 qemu_mutex_lock(&comp_param
[idx
].mutex
);
1437 set_compress_params(&comp_param
[idx
], block
, offset
);
1438 qemu_cond_signal(&comp_param
[idx
].cond
);
1439 qemu_mutex_unlock(&comp_param
[idx
].mutex
);
1441 update_compress_thread_counts(&comp_param
[idx
], bytes_xmit
);
1447 * wait for the free thread if the user specifies 'compress-wait-thread',
1448 * otherwise we will post the page out in the main thread as normal page.
1450 if (pages
< 0 && wait
) {
1451 qemu_cond_wait(&comp_done_cond
, &comp_done_lock
);
1454 qemu_mutex_unlock(&comp_done_lock
);
1460 * find_dirty_block: find the next dirty page and update any state
1461 * associated with the search process.
1463 * Returns true if a page is found
1465 * @rs: current RAM state
1466 * @pss: data about the state of the current dirty page scan
1467 * @again: set to false if the search has scanned the whole of RAM
1469 static bool find_dirty_block(RAMState
*rs
, PageSearchStatus
*pss
, bool *again
)
1471 pss
->page
= migration_bitmap_find_dirty(rs
, pss
->block
, pss
->page
);
1472 if (pss
->complete_round
&& pss
->block
== rs
->last_seen_block
&&
1473 pss
->page
>= rs
->last_page
) {
1475 * We've been once around the RAM and haven't found anything.
1481 if (!offset_in_ramblock(pss
->block
,
1482 ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
)) {
1483 /* Didn't find anything in this RAM Block */
1485 pss
->block
= QLIST_NEXT_RCU(pss
->block
, next
);
1488 * If memory migration starts over, we will meet a dirtied page
1489 * which may still exists in compression threads's ring, so we
1490 * should flush the compressed data to make sure the new page
1491 * is not overwritten by the old one in the destination.
1493 * Also If xbzrle is on, stop using the data compression at this
1494 * point. In theory, xbzrle can do better than compression.
1496 flush_compressed_data(rs
);
1498 /* Hit the end of the list */
1499 pss
->block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
1500 /* Flag that we've looped */
1501 pss
->complete_round
= true;
1502 /* After the first round, enable XBZRLE. */
1503 if (migrate_use_xbzrle()) {
1504 rs
->xbzrle_enabled
= true;
1507 /* Didn't find anything this time, but try again on the new block */
1511 /* Can go around again, but... */
1513 /* We've found something so probably don't need to */
1519 * unqueue_page: gets a page of the queue
1521 * Helper for 'get_queued_page' - gets a page off the queue
1523 * Returns the block of the page (or NULL if none available)
1525 * @rs: current RAM state
1526 * @offset: used to return the offset within the RAMBlock
1528 static RAMBlock
*unqueue_page(RAMState
*rs
, ram_addr_t
*offset
)
1530 RAMBlock
*block
= NULL
;
1532 if (QSIMPLEQ_EMPTY_ATOMIC(&rs
->src_page_requests
)) {
1536 QEMU_LOCK_GUARD(&rs
->src_page_req_mutex
);
1537 if (!QSIMPLEQ_EMPTY(&rs
->src_page_requests
)) {
1538 struct RAMSrcPageRequest
*entry
=
1539 QSIMPLEQ_FIRST(&rs
->src_page_requests
);
1541 *offset
= entry
->offset
;
1543 if (entry
->len
> TARGET_PAGE_SIZE
) {
1544 entry
->len
-= TARGET_PAGE_SIZE
;
1545 entry
->offset
+= TARGET_PAGE_SIZE
;
1547 memory_region_unref(block
->mr
);
1548 QSIMPLEQ_REMOVE_HEAD(&rs
->src_page_requests
, next_req
);
1550 migration_consume_urgent_request();
1557 #if defined(__linux__)
1559 * poll_fault_page: try to get next UFFD write fault page and, if pending fault
1560 * is found, return RAM block pointer and page offset
1562 * Returns pointer to the RAMBlock containing faulting page,
1563 * NULL if no write faults are pending
1565 * @rs: current RAM state
1566 * @offset: page offset from the beginning of the block
1568 static RAMBlock
*poll_fault_page(RAMState
*rs
, ram_addr_t
*offset
)
1570 struct uffd_msg uffd_msg
;
1575 if (!migrate_background_snapshot()) {
1579 res
= uffd_read_events(rs
->uffdio_fd
, &uffd_msg
, 1);
1584 page_address
= (void *)(uintptr_t) uffd_msg
.arg
.pagefault
.address
;
1585 block
= qemu_ram_block_from_host(page_address
, false, offset
);
1586 assert(block
&& (block
->flags
& RAM_UF_WRITEPROTECT
) != 0);
1591 * ram_save_release_protection: release UFFD write protection after
1592 * a range of pages has been saved
1594 * @rs: current RAM state
1595 * @pss: page-search-status structure
1596 * @start_page: index of the first page in the range relative to pss->block
1598 * Returns 0 on success, negative value in case of an error
1600 static int ram_save_release_protection(RAMState
*rs
, PageSearchStatus
*pss
,
1601 unsigned long start_page
)
1605 /* Check if page is from UFFD-managed region. */
1606 if (pss
->block
->flags
& RAM_UF_WRITEPROTECT
) {
1607 void *page_address
= pss
->block
->host
+ (start_page
<< TARGET_PAGE_BITS
);
1608 uint64_t run_length
= (pss
->page
- start_page
+ 1) << TARGET_PAGE_BITS
;
1610 /* Flush async buffers before un-protect. */
1612 /* Un-protect memory range. */
1613 res
= uffd_change_protection(rs
->uffdio_fd
, page_address
, run_length
,
1620 /* ram_write_tracking_available: check if kernel supports required UFFD features
1622 * Returns true if supports, false otherwise
1624 bool ram_write_tracking_available(void)
1626 uint64_t uffd_features
;
1629 res
= uffd_query_features(&uffd_features
);
1631 (uffd_features
& UFFD_FEATURE_PAGEFAULT_FLAG_WP
) != 0);
1634 /* ram_write_tracking_compatible: check if guest configuration is
1635 * compatible with 'write-tracking'
1637 * Returns true if compatible, false otherwise
1639 bool ram_write_tracking_compatible(void)
1641 const uint64_t uffd_ioctls_mask
= BIT(_UFFDIO_WRITEPROTECT
);
1646 /* Open UFFD file descriptor */
1647 uffd_fd
= uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP
, false);
1652 RCU_READ_LOCK_GUARD();
1654 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1655 uint64_t uffd_ioctls
;
1657 /* Nothing to do with read-only and MMIO-writable regions */
1658 if (block
->mr
->readonly
|| block
->mr
->rom_device
) {
1661 /* Try to register block memory via UFFD-IO to track writes */
1662 if (uffd_register_memory(uffd_fd
, block
->host
, block
->max_length
,
1663 UFFDIO_REGISTER_MODE_WP
, &uffd_ioctls
)) {
1666 if ((uffd_ioctls
& uffd_ioctls_mask
) != uffd_ioctls_mask
) {
1673 uffd_close_fd(uffd_fd
);
1677 static inline void populate_read_range(RAMBlock
*block
, ram_addr_t offset
,
1681 * We read one byte of each page; this will preallocate page tables if
1682 * required and populate the shared zeropage on MAP_PRIVATE anonymous memory
1683 * where no page was populated yet. This might require adaption when
1684 * supporting other mappings, like shmem.
1686 for (; offset
< size
; offset
+= block
->page_size
) {
1687 char tmp
= *((char *)block
->host
+ offset
);
1689 /* Don't optimize the read out */
1690 asm volatile("" : "+r" (tmp
));
1694 static inline int populate_read_section(MemoryRegionSection
*section
,
1697 const hwaddr size
= int128_get64(section
->size
);
1698 hwaddr offset
= section
->offset_within_region
;
1699 RAMBlock
*block
= section
->mr
->ram_block
;
1701 populate_read_range(block
, offset
, size
);
1706 * ram_block_populate_read: preallocate page tables and populate pages in the
1707 * RAM block by reading a byte of each page.
1709 * Since it's solely used for userfault_fd WP feature, here we just
1710 * hardcode page size to qemu_real_host_page_size.
1712 * @block: RAM block to populate
1714 static void ram_block_populate_read(RAMBlock
*rb
)
1717 * Skip populating all pages that fall into a discarded range as managed by
1718 * a RamDiscardManager responsible for the mapped memory region of the
1719 * RAMBlock. Such discarded ("logically unplugged") parts of a RAMBlock
1720 * must not get populated automatically. We don't have to track
1721 * modifications via userfaultfd WP reliably, because these pages will
1722 * not be part of the migration stream either way -- see
1723 * ramblock_dirty_bitmap_exclude_discarded_pages().
1725 * Note: The result is only stable while migrating (precopy/postcopy).
1727 if (rb
->mr
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
1728 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
1729 MemoryRegionSection section
= {
1731 .offset_within_region
= 0,
1732 .size
= rb
->mr
->size
,
1735 ram_discard_manager_replay_populated(rdm
, §ion
,
1736 populate_read_section
, NULL
);
1738 populate_read_range(rb
, 0, rb
->used_length
);
1743 * ram_write_tracking_prepare: prepare for UFFD-WP memory tracking
1745 void ram_write_tracking_prepare(void)
1749 RCU_READ_LOCK_GUARD();
1751 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1752 /* Nothing to do with read-only and MMIO-writable regions */
1753 if (block
->mr
->readonly
|| block
->mr
->rom_device
) {
1758 * Populate pages of the RAM block before enabling userfault_fd
1761 * This stage is required since ioctl(UFFDIO_WRITEPROTECT) with
1762 * UFFDIO_WRITEPROTECT_MODE_WP mode setting would silently skip
1763 * pages with pte_none() entries in page table.
1765 ram_block_populate_read(block
);
1770 * ram_write_tracking_start: start UFFD-WP memory tracking
1772 * Returns 0 for success or negative value in case of error
1774 int ram_write_tracking_start(void)
1777 RAMState
*rs
= ram_state
;
1780 /* Open UFFD file descriptor */
1781 uffd_fd
= uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP
, true);
1785 rs
->uffdio_fd
= uffd_fd
;
1787 RCU_READ_LOCK_GUARD();
1789 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1790 /* Nothing to do with read-only and MMIO-writable regions */
1791 if (block
->mr
->readonly
|| block
->mr
->rom_device
) {
1795 /* Register block memory with UFFD to track writes */
1796 if (uffd_register_memory(rs
->uffdio_fd
, block
->host
,
1797 block
->max_length
, UFFDIO_REGISTER_MODE_WP
, NULL
)) {
1800 /* Apply UFFD write protection to the block memory range */
1801 if (uffd_change_protection(rs
->uffdio_fd
, block
->host
,
1802 block
->max_length
, true, false)) {
1805 block
->flags
|= RAM_UF_WRITEPROTECT
;
1806 memory_region_ref(block
->mr
);
1808 trace_ram_write_tracking_ramblock_start(block
->idstr
, block
->page_size
,
1809 block
->host
, block
->max_length
);
1815 error_report("ram_write_tracking_start() failed: restoring initial memory state");
1817 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1818 if ((block
->flags
& RAM_UF_WRITEPROTECT
) == 0) {
1822 * In case some memory block failed to be write-protected
1823 * remove protection and unregister all succeeded RAM blocks
1825 uffd_change_protection(rs
->uffdio_fd
, block
->host
, block
->max_length
,
1827 uffd_unregister_memory(rs
->uffdio_fd
, block
->host
, block
->max_length
);
1828 /* Cleanup flags and remove reference */
1829 block
->flags
&= ~RAM_UF_WRITEPROTECT
;
1830 memory_region_unref(block
->mr
);
1833 uffd_close_fd(uffd_fd
);
1839 * ram_write_tracking_stop: stop UFFD-WP memory tracking and remove protection
1841 void ram_write_tracking_stop(void)
1843 RAMState
*rs
= ram_state
;
1846 RCU_READ_LOCK_GUARD();
1848 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1849 if ((block
->flags
& RAM_UF_WRITEPROTECT
) == 0) {
1852 /* Remove protection and unregister all affected RAM blocks */
1853 uffd_change_protection(rs
->uffdio_fd
, block
->host
, block
->max_length
,
1855 uffd_unregister_memory(rs
->uffdio_fd
, block
->host
, block
->max_length
);
1857 trace_ram_write_tracking_ramblock_stop(block
->idstr
, block
->page_size
,
1858 block
->host
, block
->max_length
);
1860 /* Cleanup flags and remove reference */
1861 block
->flags
&= ~RAM_UF_WRITEPROTECT
;
1862 memory_region_unref(block
->mr
);
1865 /* Finally close UFFD file descriptor */
1866 uffd_close_fd(rs
->uffdio_fd
);
1871 /* No target OS support, stubs just fail or ignore */
1873 static RAMBlock
*poll_fault_page(RAMState
*rs
, ram_addr_t
*offset
)
1881 static int ram_save_release_protection(RAMState
*rs
, PageSearchStatus
*pss
,
1882 unsigned long start_page
)
1891 bool ram_write_tracking_available(void)
1896 bool ram_write_tracking_compatible(void)
1902 int ram_write_tracking_start(void)
1908 void ram_write_tracking_stop(void)
1912 #endif /* defined(__linux__) */
1915 * get_queued_page: unqueue a page from the postcopy requests
1917 * Skips pages that are already sent (!dirty)
1919 * Returns true if a queued page is found
1921 * @rs: current RAM state
1922 * @pss: data about the state of the current dirty page scan
1924 static bool get_queued_page(RAMState
*rs
, PageSearchStatus
*pss
)
1931 block
= unqueue_page(rs
, &offset
);
1933 * We're sending this page, and since it's postcopy nothing else
1934 * will dirty it, and we must make sure it doesn't get sent again
1935 * even if this queue request was received after the background
1936 * search already sent it.
1941 page
= offset
>> TARGET_PAGE_BITS
;
1942 dirty
= test_bit(page
, block
->bmap
);
1944 trace_get_queued_page_not_dirty(block
->idstr
, (uint64_t)offset
,
1947 trace_get_queued_page(block
->idstr
, (uint64_t)offset
, page
);
1951 } while (block
&& !dirty
);
1955 * Poll write faults too if background snapshot is enabled; that's
1956 * when we have vcpus got blocked by the write protected pages.
1958 block
= poll_fault_page(rs
, &offset
);
1963 * We want the background search to continue from the queued page
1964 * since the guest is likely to want other pages near to the page
1965 * it just requested.
1968 pss
->page
= offset
>> TARGET_PAGE_BITS
;
1971 * This unqueued page would break the "one round" check, even is
1974 pss
->complete_round
= false;
1981 * migration_page_queue_free: drop any remaining pages in the ram
1984 * It should be empty at the end anyway, but in error cases there may
1985 * be some left. in case that there is any page left, we drop it.
1988 static void migration_page_queue_free(RAMState
*rs
)
1990 struct RAMSrcPageRequest
*mspr
, *next_mspr
;
1991 /* This queue generally should be empty - but in the case of a failed
1992 * migration might have some droppings in.
1994 RCU_READ_LOCK_GUARD();
1995 QSIMPLEQ_FOREACH_SAFE(mspr
, &rs
->src_page_requests
, next_req
, next_mspr
) {
1996 memory_region_unref(mspr
->rb
->mr
);
1997 QSIMPLEQ_REMOVE_HEAD(&rs
->src_page_requests
, next_req
);
2003 * ram_save_queue_pages: queue the page for transmission
2005 * A request from postcopy destination for example.
2007 * Returns zero on success or negative on error
2009 * @rbname: Name of the RAMBLock of the request. NULL means the
2010 * same that last one.
2011 * @start: starting address from the start of the RAMBlock
2012 * @len: length (in bytes) to send
2014 int ram_save_queue_pages(const char *rbname
, ram_addr_t start
, ram_addr_t len
)
2017 RAMState
*rs
= ram_state
;
2019 ram_counters
.postcopy_requests
++;
2020 RCU_READ_LOCK_GUARD();
2023 /* Reuse last RAMBlock */
2024 ramblock
= rs
->last_req_rb
;
2028 * Shouldn't happen, we can't reuse the last RAMBlock if
2029 * it's the 1st request.
2031 error_report("ram_save_queue_pages no previous block");
2035 ramblock
= qemu_ram_block_by_name(rbname
);
2038 /* We shouldn't be asked for a non-existent RAMBlock */
2039 error_report("ram_save_queue_pages no block '%s'", rbname
);
2042 rs
->last_req_rb
= ramblock
;
2044 trace_ram_save_queue_pages(ramblock
->idstr
, start
, len
);
2045 if (!offset_in_ramblock(ramblock
, start
+ len
- 1)) {
2046 error_report("%s request overrun start=" RAM_ADDR_FMT
" len="
2047 RAM_ADDR_FMT
" blocklen=" RAM_ADDR_FMT
,
2048 __func__
, start
, len
, ramblock
->used_length
);
2052 struct RAMSrcPageRequest
*new_entry
=
2053 g_malloc0(sizeof(struct RAMSrcPageRequest
));
2054 new_entry
->rb
= ramblock
;
2055 new_entry
->offset
= start
;
2056 new_entry
->len
= len
;
2058 memory_region_ref(ramblock
->mr
);
2059 qemu_mutex_lock(&rs
->src_page_req_mutex
);
2060 QSIMPLEQ_INSERT_TAIL(&rs
->src_page_requests
, new_entry
, next_req
);
2061 migration_make_urgent_request();
2062 qemu_mutex_unlock(&rs
->src_page_req_mutex
);
2067 static bool save_page_use_compression(RAMState
*rs
)
2069 if (!migrate_use_compression()) {
2074 * If xbzrle is enabled (e.g., after first round of migration), stop
2075 * using the data compression. In theory, xbzrle can do better than
2078 if (rs
->xbzrle_enabled
) {
2086 * try to compress the page before posting it out, return true if the page
2087 * has been properly handled by compression, otherwise needs other
2088 * paths to handle it
2090 static bool save_compress_page(RAMState
*rs
, RAMBlock
*block
, ram_addr_t offset
)
2092 if (!save_page_use_compression(rs
)) {
2097 * When starting the process of a new block, the first page of
2098 * the block should be sent out before other pages in the same
2099 * block, and all the pages in last block should have been sent
2100 * out, keeping this order is important, because the 'cont' flag
2101 * is used to avoid resending the block name.
2103 * We post the fist page as normal page as compression will take
2104 * much CPU resource.
2106 if (block
!= rs
->last_sent_block
) {
2107 flush_compressed_data(rs
);
2111 if (compress_page_with_multi_thread(rs
, block
, offset
) > 0) {
2115 compression_counters
.busy
++;
2120 * ram_save_target_page: save one target page
2122 * Returns the number of pages written
2124 * @rs: current RAM state
2125 * @pss: data about the page we want to send
2127 static int ram_save_target_page(RAMState
*rs
, PageSearchStatus
*pss
)
2129 RAMBlock
*block
= pss
->block
;
2130 ram_addr_t offset
= ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
;
2133 if (control_save_page(rs
, block
, offset
, &res
)) {
2137 if (save_compress_page(rs
, block
, offset
)) {
2141 res
= save_zero_page(rs
, block
, offset
);
2143 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
2144 * page would be stale
2146 if (!save_page_use_compression(rs
)) {
2147 XBZRLE_cache_lock();
2148 xbzrle_cache_zero_page(rs
, block
->offset
+ offset
);
2149 XBZRLE_cache_unlock();
2155 * Do not use multifd for:
2156 * 1. Compression as the first page in the new block should be posted out
2157 * before sending the compressed page
2158 * 2. In postcopy as one whole host page should be placed
2160 if (!save_page_use_compression(rs
) && migrate_use_multifd()
2161 && !migration_in_postcopy()) {
2162 return ram_save_multifd_page(rs
, block
, offset
);
2165 return ram_save_page(rs
, pss
);
2169 * ram_save_host_page: save a whole host page
2171 * Starting at *offset send pages up to the end of the current host
2172 * page. It's valid for the initial offset to point into the middle of
2173 * a host page in which case the remainder of the hostpage is sent.
2174 * Only dirty target pages are sent. Note that the host page size may
2175 * be a huge page for this block.
2176 * The saving stops at the boundary of the used_length of the block
2177 * if the RAMBlock isn't a multiple of the host page size.
2179 * Returns the number of pages written or negative on error
2181 * @rs: current RAM state
2182 * @pss: data about the page we want to send
2184 static int ram_save_host_page(RAMState
*rs
, PageSearchStatus
*pss
)
2186 int tmppages
, pages
= 0;
2187 size_t pagesize_bits
=
2188 qemu_ram_pagesize(pss
->block
) >> TARGET_PAGE_BITS
;
2189 unsigned long hostpage_boundary
=
2190 QEMU_ALIGN_UP(pss
->page
+ 1, pagesize_bits
);
2191 unsigned long start_page
= pss
->page
;
2194 if (ramblock_is_ignored(pss
->block
)) {
2195 error_report("block %s should not be migrated !", pss
->block
->idstr
);
2200 /* Check the pages is dirty and if it is send it */
2201 if (migration_bitmap_clear_dirty(rs
, pss
->block
, pss
->page
)) {
2202 tmppages
= ram_save_target_page(rs
, pss
);
2209 * Allow rate limiting to happen in the middle of huge pages if
2210 * something is sent in the current iteration.
2212 if (pagesize_bits
> 1 && tmppages
> 0) {
2213 migration_rate_limit();
2216 pss
->page
= migration_bitmap_find_dirty(rs
, pss
->block
, pss
->page
);
2217 } while ((pss
->page
< hostpage_boundary
) &&
2218 offset_in_ramblock(pss
->block
,
2219 ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
));
2220 /* The offset we leave with is the min boundary of host page and block */
2221 pss
->page
= MIN(pss
->page
, hostpage_boundary
) - 1;
2223 res
= ram_save_release_protection(rs
, pss
, start_page
);
2224 return (res
< 0 ? res
: pages
);
2228 * ram_find_and_save_block: finds a dirty page and sends it to f
2230 * Called within an RCU critical section.
2232 * Returns the number of pages written where zero means no dirty pages,
2233 * or negative on error
2235 * @rs: current RAM state
2237 * On systems where host-page-size > target-page-size it will send all the
2238 * pages in a host page that are dirty.
2240 static int ram_find_and_save_block(RAMState
*rs
)
2242 PageSearchStatus pss
;
2246 /* No dirty page as there is zero RAM */
2247 if (!ram_bytes_total()) {
2251 pss
.block
= rs
->last_seen_block
;
2252 pss
.page
= rs
->last_page
;
2253 pss
.complete_round
= false;
2256 pss
.block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
2261 found
= get_queued_page(rs
, &pss
);
2264 /* priority queue empty, so just search for something dirty */
2265 found
= find_dirty_block(rs
, &pss
, &again
);
2269 pages
= ram_save_host_page(rs
, &pss
);
2271 } while (!pages
&& again
);
2273 rs
->last_seen_block
= pss
.block
;
2274 rs
->last_page
= pss
.page
;
2279 void acct_update_position(QEMUFile
*f
, size_t size
, bool zero
)
2281 uint64_t pages
= size
/ TARGET_PAGE_SIZE
;
2284 ram_counters
.duplicate
+= pages
;
2286 ram_counters
.normal
+= pages
;
2287 ram_counters
.transferred
+= size
;
2288 qemu_update_position(f
, size
);
2292 static uint64_t ram_bytes_total_common(bool count_ignored
)
2297 RCU_READ_LOCK_GUARD();
2299 if (count_ignored
) {
2300 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2301 total
+= block
->used_length
;
2304 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2305 total
+= block
->used_length
;
2311 uint64_t ram_bytes_total(void)
2313 return ram_bytes_total_common(false);
2316 static void xbzrle_load_setup(void)
2318 XBZRLE
.decoded_buf
= g_malloc(TARGET_PAGE_SIZE
);
2321 static void xbzrle_load_cleanup(void)
2323 g_free(XBZRLE
.decoded_buf
);
2324 XBZRLE
.decoded_buf
= NULL
;
2327 static void ram_state_cleanup(RAMState
**rsp
)
2330 migration_page_queue_free(*rsp
);
2331 qemu_mutex_destroy(&(*rsp
)->bitmap_mutex
);
2332 qemu_mutex_destroy(&(*rsp
)->src_page_req_mutex
);
2338 static void xbzrle_cleanup(void)
2340 XBZRLE_cache_lock();
2342 cache_fini(XBZRLE
.cache
);
2343 g_free(XBZRLE
.encoded_buf
);
2344 g_free(XBZRLE
.current_buf
);
2345 g_free(XBZRLE
.zero_target_page
);
2346 XBZRLE
.cache
= NULL
;
2347 XBZRLE
.encoded_buf
= NULL
;
2348 XBZRLE
.current_buf
= NULL
;
2349 XBZRLE
.zero_target_page
= NULL
;
2351 XBZRLE_cache_unlock();
2354 static void ram_save_cleanup(void *opaque
)
2356 RAMState
**rsp
= opaque
;
2359 /* We don't use dirty log with background snapshots */
2360 if (!migrate_background_snapshot()) {
2361 /* caller have hold iothread lock or is in a bh, so there is
2362 * no writing race against the migration bitmap
2364 if (global_dirty_tracking
& GLOBAL_DIRTY_MIGRATION
) {
2366 * do not stop dirty log without starting it, since
2367 * memory_global_dirty_log_stop will assert that
2368 * memory_global_dirty_log_start/stop used in pairs
2370 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION
);
2374 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2375 g_free(block
->clear_bmap
);
2376 block
->clear_bmap
= NULL
;
2377 g_free(block
->bmap
);
2382 compress_threads_save_cleanup();
2383 ram_state_cleanup(rsp
);
2386 static void ram_state_reset(RAMState
*rs
)
2388 rs
->last_seen_block
= NULL
;
2389 rs
->last_sent_block
= NULL
;
2391 rs
->last_version
= ram_list
.version
;
2392 rs
->xbzrle_enabled
= false;
2395 #define MAX_WAIT 50 /* ms, half buffered_file limit */
2397 /* **** functions for postcopy ***** */
2399 void ram_postcopy_migrated_memory_release(MigrationState
*ms
)
2401 struct RAMBlock
*block
;
2403 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2404 unsigned long *bitmap
= block
->bmap
;
2405 unsigned long range
= block
->used_length
>> TARGET_PAGE_BITS
;
2406 unsigned long run_start
= find_next_zero_bit(bitmap
, range
, 0);
2408 while (run_start
< range
) {
2409 unsigned long run_end
= find_next_bit(bitmap
, range
, run_start
+ 1);
2410 ram_discard_range(block
->idstr
,
2411 ((ram_addr_t
)run_start
) << TARGET_PAGE_BITS
,
2412 ((ram_addr_t
)(run_end
- run_start
))
2413 << TARGET_PAGE_BITS
);
2414 run_start
= find_next_zero_bit(bitmap
, range
, run_end
+ 1);
2420 * postcopy_send_discard_bm_ram: discard a RAMBlock
2422 * Returns zero on success
2424 * Callback from postcopy_each_ram_send_discard for each RAMBlock
2426 * @ms: current migration state
2427 * @block: RAMBlock to discard
2429 static int postcopy_send_discard_bm_ram(MigrationState
*ms
, RAMBlock
*block
)
2431 unsigned long end
= block
->used_length
>> TARGET_PAGE_BITS
;
2432 unsigned long current
;
2433 unsigned long *bitmap
= block
->bmap
;
2435 for (current
= 0; current
< end
; ) {
2436 unsigned long one
= find_next_bit(bitmap
, end
, current
);
2437 unsigned long zero
, discard_length
;
2443 zero
= find_next_zero_bit(bitmap
, end
, one
+ 1);
2446 discard_length
= end
- one
;
2448 discard_length
= zero
- one
;
2450 postcopy_discard_send_range(ms
, one
, discard_length
);
2451 current
= one
+ discard_length
;
2457 static void postcopy_chunk_hostpages_pass(MigrationState
*ms
, RAMBlock
*block
);
2460 * postcopy_each_ram_send_discard: discard all RAMBlocks
2462 * Returns 0 for success or negative for error
2464 * Utility for the outgoing postcopy code.
2465 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2466 * passing it bitmap indexes and name.
2467 * (qemu_ram_foreach_block ends up passing unscaled lengths
2468 * which would mean postcopy code would have to deal with target page)
2470 * @ms: current migration state
2472 static int postcopy_each_ram_send_discard(MigrationState
*ms
)
2474 struct RAMBlock
*block
;
2477 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2478 postcopy_discard_send_init(ms
, block
->idstr
);
2481 * Deal with TPS != HPS and huge pages. It discard any partially sent
2482 * host-page size chunks, mark any partially dirty host-page size
2483 * chunks as all dirty. In this case the host-page is the host-page
2484 * for the particular RAMBlock, i.e. it might be a huge page.
2486 postcopy_chunk_hostpages_pass(ms
, block
);
2489 * Postcopy sends chunks of bitmap over the wire, but it
2490 * just needs indexes at this point, avoids it having
2491 * target page specific code.
2493 ret
= postcopy_send_discard_bm_ram(ms
, block
);
2494 postcopy_discard_send_finish(ms
);
2504 * postcopy_chunk_hostpages_pass: canonicalize bitmap in hostpages
2506 * Helper for postcopy_chunk_hostpages; it's called twice to
2507 * canonicalize the two bitmaps, that are similar, but one is
2510 * Postcopy requires that all target pages in a hostpage are dirty or
2511 * clean, not a mix. This function canonicalizes the bitmaps.
2513 * @ms: current migration state
2514 * @block: block that contains the page we want to canonicalize
2516 static void postcopy_chunk_hostpages_pass(MigrationState
*ms
, RAMBlock
*block
)
2518 RAMState
*rs
= ram_state
;
2519 unsigned long *bitmap
= block
->bmap
;
2520 unsigned int host_ratio
= block
->page_size
/ TARGET_PAGE_SIZE
;
2521 unsigned long pages
= block
->used_length
>> TARGET_PAGE_BITS
;
2522 unsigned long run_start
;
2524 if (block
->page_size
== TARGET_PAGE_SIZE
) {
2525 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2529 /* Find a dirty page */
2530 run_start
= find_next_bit(bitmap
, pages
, 0);
2532 while (run_start
< pages
) {
2535 * If the start of this run of pages is in the middle of a host
2536 * page, then we need to fixup this host page.
2538 if (QEMU_IS_ALIGNED(run_start
, host_ratio
)) {
2539 /* Find the end of this run */
2540 run_start
= find_next_zero_bit(bitmap
, pages
, run_start
+ 1);
2542 * If the end isn't at the start of a host page, then the
2543 * run doesn't finish at the end of a host page
2544 * and we need to discard.
2548 if (!QEMU_IS_ALIGNED(run_start
, host_ratio
)) {
2550 unsigned long fixup_start_addr
= QEMU_ALIGN_DOWN(run_start
,
2552 run_start
= QEMU_ALIGN_UP(run_start
, host_ratio
);
2554 /* Clean up the bitmap */
2555 for (page
= fixup_start_addr
;
2556 page
< fixup_start_addr
+ host_ratio
; page
++) {
2558 * Remark them as dirty, updating the count for any pages
2559 * that weren't previously dirty.
2561 rs
->migration_dirty_pages
+= !test_and_set_bit(page
, bitmap
);
2565 /* Find the next dirty page for the next iteration */
2566 run_start
= find_next_bit(bitmap
, pages
, run_start
);
2571 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
2573 * Returns zero on success
2575 * Transmit the set of pages to be discarded after precopy to the target
2576 * these are pages that:
2577 * a) Have been previously transmitted but are now dirty again
2578 * b) Pages that have never been transmitted, this ensures that
2579 * any pages on the destination that have been mapped by background
2580 * tasks get discarded (transparent huge pages is the specific concern)
2581 * Hopefully this is pretty sparse
2583 * @ms: current migration state
2585 int ram_postcopy_send_discard_bitmap(MigrationState
*ms
)
2587 RAMState
*rs
= ram_state
;
2589 RCU_READ_LOCK_GUARD();
2591 /* This should be our last sync, the src is now paused */
2592 migration_bitmap_sync(rs
);
2594 /* Easiest way to make sure we don't resume in the middle of a host-page */
2595 rs
->last_seen_block
= NULL
;
2596 rs
->last_sent_block
= NULL
;
2599 trace_ram_postcopy_send_discard_bitmap();
2601 return postcopy_each_ram_send_discard(ms
);
2605 * ram_discard_range: discard dirtied pages at the beginning of postcopy
2607 * Returns zero on success
2609 * @rbname: name of the RAMBlock of the request. NULL means the
2610 * same that last one.
2611 * @start: RAMBlock starting page
2612 * @length: RAMBlock size
2614 int ram_discard_range(const char *rbname
, uint64_t start
, size_t length
)
2616 trace_ram_discard_range(rbname
, start
, length
);
2618 RCU_READ_LOCK_GUARD();
2619 RAMBlock
*rb
= qemu_ram_block_by_name(rbname
);
2622 error_report("ram_discard_range: Failed to find block '%s'", rbname
);
2627 * On source VM, we don't need to update the received bitmap since
2628 * we don't even have one.
2630 if (rb
->receivedmap
) {
2631 bitmap_clear(rb
->receivedmap
, start
>> qemu_target_page_bits(),
2632 length
>> qemu_target_page_bits());
2635 return ram_block_discard_range(rb
, start
, length
);
2639 * For every allocation, we will try not to crash the VM if the
2640 * allocation failed.
2642 static int xbzrle_init(void)
2644 Error
*local_err
= NULL
;
2646 if (!migrate_use_xbzrle()) {
2650 XBZRLE_cache_lock();
2652 XBZRLE
.zero_target_page
= g_try_malloc0(TARGET_PAGE_SIZE
);
2653 if (!XBZRLE
.zero_target_page
) {
2654 error_report("%s: Error allocating zero page", __func__
);
2658 XBZRLE
.cache
= cache_init(migrate_xbzrle_cache_size(),
2659 TARGET_PAGE_SIZE
, &local_err
);
2660 if (!XBZRLE
.cache
) {
2661 error_report_err(local_err
);
2662 goto free_zero_page
;
2665 XBZRLE
.encoded_buf
= g_try_malloc0(TARGET_PAGE_SIZE
);
2666 if (!XBZRLE
.encoded_buf
) {
2667 error_report("%s: Error allocating encoded_buf", __func__
);
2671 XBZRLE
.current_buf
= g_try_malloc(TARGET_PAGE_SIZE
);
2672 if (!XBZRLE
.current_buf
) {
2673 error_report("%s: Error allocating current_buf", __func__
);
2674 goto free_encoded_buf
;
2677 /* We are all good */
2678 XBZRLE_cache_unlock();
2682 g_free(XBZRLE
.encoded_buf
);
2683 XBZRLE
.encoded_buf
= NULL
;
2685 cache_fini(XBZRLE
.cache
);
2686 XBZRLE
.cache
= NULL
;
2688 g_free(XBZRLE
.zero_target_page
);
2689 XBZRLE
.zero_target_page
= NULL
;
2691 XBZRLE_cache_unlock();
2695 static int ram_state_init(RAMState
**rsp
)
2697 *rsp
= g_try_new0(RAMState
, 1);
2700 error_report("%s: Init ramstate fail", __func__
);
2704 qemu_mutex_init(&(*rsp
)->bitmap_mutex
);
2705 qemu_mutex_init(&(*rsp
)->src_page_req_mutex
);
2706 QSIMPLEQ_INIT(&(*rsp
)->src_page_requests
);
2709 * Count the total number of pages used by ram blocks not including any
2710 * gaps due to alignment or unplugs.
2711 * This must match with the initial values of dirty bitmap.
2713 (*rsp
)->migration_dirty_pages
= ram_bytes_total() >> TARGET_PAGE_BITS
;
2714 ram_state_reset(*rsp
);
2719 static void ram_list_init_bitmaps(void)
2721 MigrationState
*ms
= migrate_get_current();
2723 unsigned long pages
;
2726 /* Skip setting bitmap if there is no RAM */
2727 if (ram_bytes_total()) {
2728 shift
= ms
->clear_bitmap_shift
;
2729 if (shift
> CLEAR_BITMAP_SHIFT_MAX
) {
2730 error_report("clear_bitmap_shift (%u) too big, using "
2731 "max value (%u)", shift
, CLEAR_BITMAP_SHIFT_MAX
);
2732 shift
= CLEAR_BITMAP_SHIFT_MAX
;
2733 } else if (shift
< CLEAR_BITMAP_SHIFT_MIN
) {
2734 error_report("clear_bitmap_shift (%u) too small, using "
2735 "min value (%u)", shift
, CLEAR_BITMAP_SHIFT_MIN
);
2736 shift
= CLEAR_BITMAP_SHIFT_MIN
;
2739 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2740 pages
= block
->max_length
>> TARGET_PAGE_BITS
;
2742 * The initial dirty bitmap for migration must be set with all
2743 * ones to make sure we'll migrate every guest RAM page to
2745 * Here we set RAMBlock.bmap all to 1 because when rebegin a
2746 * new migration after a failed migration, ram_list.
2747 * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole
2750 block
->bmap
= bitmap_new(pages
);
2751 bitmap_set(block
->bmap
, 0, pages
);
2752 block
->clear_bmap_shift
= shift
;
2753 block
->clear_bmap
= bitmap_new(clear_bmap_size(pages
, shift
));
2758 static void migration_bitmap_clear_discarded_pages(RAMState
*rs
)
2760 unsigned long pages
;
2763 RCU_READ_LOCK_GUARD();
2765 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
2766 pages
= ramblock_dirty_bitmap_clear_discarded_pages(rb
);
2767 rs
->migration_dirty_pages
-= pages
;
2771 static void ram_init_bitmaps(RAMState
*rs
)
2773 /* For memory_global_dirty_log_start below. */
2774 qemu_mutex_lock_iothread();
2775 qemu_mutex_lock_ramlist();
2777 WITH_RCU_READ_LOCK_GUARD() {
2778 ram_list_init_bitmaps();
2779 /* We don't use dirty log with background snapshots */
2780 if (!migrate_background_snapshot()) {
2781 memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION
);
2782 migration_bitmap_sync_precopy(rs
);
2785 qemu_mutex_unlock_ramlist();
2786 qemu_mutex_unlock_iothread();
2789 * After an eventual first bitmap sync, fixup the initial bitmap
2790 * containing all 1s to exclude any discarded pages from migration.
2792 migration_bitmap_clear_discarded_pages(rs
);
2795 static int ram_init_all(RAMState
**rsp
)
2797 if (ram_state_init(rsp
)) {
2801 if (xbzrle_init()) {
2802 ram_state_cleanup(rsp
);
2806 ram_init_bitmaps(*rsp
);
2811 static void ram_state_resume_prepare(RAMState
*rs
, QEMUFile
*out
)
2817 * Postcopy is not using xbzrle/compression, so no need for that.
2818 * Also, since source are already halted, we don't need to care
2819 * about dirty page logging as well.
2822 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2823 pages
+= bitmap_count_one(block
->bmap
,
2824 block
->used_length
>> TARGET_PAGE_BITS
);
2827 /* This may not be aligned with current bitmaps. Recalculate. */
2828 rs
->migration_dirty_pages
= pages
;
2830 ram_state_reset(rs
);
2832 /* Update RAMState cache of output QEMUFile */
2835 trace_ram_state_resume_prepare(pages
);
2839 * This function clears bits of the free pages reported by the caller from the
2840 * migration dirty bitmap. @addr is the host address corresponding to the
2841 * start of the continuous guest free pages, and @len is the total bytes of
2844 void qemu_guest_free_page_hint(void *addr
, size_t len
)
2848 size_t used_len
, start
, npages
;
2849 MigrationState
*s
= migrate_get_current();
2851 /* This function is currently expected to be used during live migration */
2852 if (!migration_is_setup_or_active(s
->state
)) {
2856 for (; len
> 0; len
-= used_len
, addr
+= used_len
) {
2857 block
= qemu_ram_block_from_host(addr
, false, &offset
);
2858 if (unlikely(!block
|| offset
>= block
->used_length
)) {
2860 * The implementation might not support RAMBlock resize during
2861 * live migration, but it could happen in theory with future
2862 * updates. So we add a check here to capture that case.
2864 error_report_once("%s unexpected error", __func__
);
2868 if (len
<= block
->used_length
- offset
) {
2871 used_len
= block
->used_length
- offset
;
2874 start
= offset
>> TARGET_PAGE_BITS
;
2875 npages
= used_len
>> TARGET_PAGE_BITS
;
2877 qemu_mutex_lock(&ram_state
->bitmap_mutex
);
2879 * The skipped free pages are equavalent to be sent from clear_bmap's
2880 * perspective, so clear the bits from the memory region bitmap which
2881 * are initially set. Otherwise those skipped pages will be sent in
2882 * the next round after syncing from the memory region bitmap.
2884 migration_clear_memory_region_dirty_bitmap_range(block
, start
, npages
);
2885 ram_state
->migration_dirty_pages
-=
2886 bitmap_count_one_with_offset(block
->bmap
, start
, npages
);
2887 bitmap_clear(block
->bmap
, start
, npages
);
2888 qemu_mutex_unlock(&ram_state
->bitmap_mutex
);
2893 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
2894 * long-running RCU critical section. When rcu-reclaims in the code
2895 * start to become numerous it will be necessary to reduce the
2896 * granularity of these critical sections.
2900 * ram_save_setup: Setup RAM for migration
2902 * Returns zero to indicate success and negative for error
2904 * @f: QEMUFile where to send the data
2905 * @opaque: RAMState pointer
2907 static int ram_save_setup(QEMUFile
*f
, void *opaque
)
2909 RAMState
**rsp
= opaque
;
2912 if (compress_threads_save_setup()) {
2916 /* migration has already setup the bitmap, reuse it. */
2917 if (!migration_in_colo_state()) {
2918 if (ram_init_all(rsp
) != 0) {
2919 compress_threads_save_cleanup();
2925 WITH_RCU_READ_LOCK_GUARD() {
2926 qemu_put_be64(f
, ram_bytes_total_common(true) | RAM_SAVE_FLAG_MEM_SIZE
);
2928 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2929 qemu_put_byte(f
, strlen(block
->idstr
));
2930 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, strlen(block
->idstr
));
2931 qemu_put_be64(f
, block
->used_length
);
2932 if (migrate_postcopy_ram() && block
->page_size
!=
2933 qemu_host_page_size
) {
2934 qemu_put_be64(f
, block
->page_size
);
2936 if (migrate_ignore_shared()) {
2937 qemu_put_be64(f
, block
->mr
->addr
);
2942 ram_control_before_iterate(f
, RAM_CONTROL_SETUP
);
2943 ram_control_after_iterate(f
, RAM_CONTROL_SETUP
);
2945 multifd_send_sync_main(f
);
2946 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
2953 * ram_save_iterate: iterative stage for migration
2955 * Returns zero to indicate success and negative for error
2957 * @f: QEMUFile where to send the data
2958 * @opaque: RAMState pointer
2960 static int ram_save_iterate(QEMUFile
*f
, void *opaque
)
2962 RAMState
**temp
= opaque
;
2963 RAMState
*rs
= *temp
;
2969 if (blk_mig_bulk_active()) {
2970 /* Avoid transferring ram during bulk phase of block migration as
2971 * the bulk phase will usually take a long time and transferring
2972 * ram updates during that time is pointless. */
2977 * We'll take this lock a little bit long, but it's okay for two reasons.
2978 * Firstly, the only possible other thread to take it is who calls
2979 * qemu_guest_free_page_hint(), which should be rare; secondly, see
2980 * MAX_WAIT (if curious, further see commit 4508bd9ed8053ce) below, which
2981 * guarantees that we'll at least released it in a regular basis.
2983 qemu_mutex_lock(&rs
->bitmap_mutex
);
2984 WITH_RCU_READ_LOCK_GUARD() {
2985 if (ram_list
.version
!= rs
->last_version
) {
2986 ram_state_reset(rs
);
2989 /* Read version before ram_list.blocks */
2992 ram_control_before_iterate(f
, RAM_CONTROL_ROUND
);
2994 t0
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
2996 while ((ret
= qemu_file_rate_limit(f
)) == 0 ||
2997 !QSIMPLEQ_EMPTY(&rs
->src_page_requests
)) {
3000 if (qemu_file_get_error(f
)) {
3004 pages
= ram_find_and_save_block(rs
);
3005 /* no more pages to sent */
3012 qemu_file_set_error(f
, pages
);
3016 rs
->target_page_count
+= pages
;
3019 * During postcopy, it is necessary to make sure one whole host
3020 * page is sent in one chunk.
3022 if (migrate_postcopy_ram()) {
3023 flush_compressed_data(rs
);
3027 * we want to check in the 1st loop, just in case it was the 1st
3028 * time and we had to sync the dirty bitmap.
3029 * qemu_clock_get_ns() is a bit expensive, so we only check each
3032 if ((i
& 63) == 0) {
3033 uint64_t t1
= (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - t0
) /
3035 if (t1
> MAX_WAIT
) {
3036 trace_ram_save_iterate_big_wait(t1
, i
);
3043 qemu_mutex_unlock(&rs
->bitmap_mutex
);
3046 * Must occur before EOS (or any QEMUFile operation)
3047 * because of RDMA protocol.
3049 ram_control_after_iterate(f
, RAM_CONTROL_ROUND
);
3053 && migration_is_setup_or_active(migrate_get_current()->state
)) {
3054 multifd_send_sync_main(rs
->f
);
3055 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3057 ram_counters
.transferred
+= 8;
3059 ret
= qemu_file_get_error(f
);
3069 * ram_save_complete: function called to send the remaining amount of ram
3071 * Returns zero to indicate success or negative on error
3073 * Called with iothread lock
3075 * @f: QEMUFile where to send the data
3076 * @opaque: RAMState pointer
3078 static int ram_save_complete(QEMUFile
*f
, void *opaque
)
3080 RAMState
**temp
= opaque
;
3081 RAMState
*rs
= *temp
;
3084 rs
->last_stage
= !migration_in_colo_state();
3086 WITH_RCU_READ_LOCK_GUARD() {
3087 if (!migration_in_postcopy()) {
3088 migration_bitmap_sync_precopy(rs
);
3091 ram_control_before_iterate(f
, RAM_CONTROL_FINISH
);
3093 /* try transferring iterative blocks of memory */
3095 /* flush all remaining blocks regardless of rate limiting */
3099 pages
= ram_find_and_save_block(rs
);
3100 /* no more blocks to sent */
3110 flush_compressed_data(rs
);
3111 ram_control_after_iterate(f
, RAM_CONTROL_FINISH
);
3115 multifd_send_sync_main(rs
->f
);
3116 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3123 static void ram_save_pending(QEMUFile
*f
, void *opaque
, uint64_t max_size
,
3124 uint64_t *res_precopy_only
,
3125 uint64_t *res_compatible
,
3126 uint64_t *res_postcopy_only
)
3128 RAMState
**temp
= opaque
;
3129 RAMState
*rs
= *temp
;
3130 uint64_t remaining_size
;
3132 remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
3134 if (!migration_in_postcopy() &&
3135 remaining_size
< max_size
) {
3136 qemu_mutex_lock_iothread();
3137 WITH_RCU_READ_LOCK_GUARD() {
3138 migration_bitmap_sync_precopy(rs
);
3140 qemu_mutex_unlock_iothread();
3141 remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
3144 if (migrate_postcopy_ram()) {
3145 /* We can do postcopy, and all the data is postcopiable */
3146 *res_compatible
+= remaining_size
;
3148 *res_precopy_only
+= remaining_size
;
3152 static int load_xbzrle(QEMUFile
*f
, ram_addr_t addr
, void *host
)
3154 unsigned int xh_len
;
3156 uint8_t *loaded_data
;
3158 /* extract RLE header */
3159 xh_flags
= qemu_get_byte(f
);
3160 xh_len
= qemu_get_be16(f
);
3162 if (xh_flags
!= ENCODING_FLAG_XBZRLE
) {
3163 error_report("Failed to load XBZRLE page - wrong compression!");
3167 if (xh_len
> TARGET_PAGE_SIZE
) {
3168 error_report("Failed to load XBZRLE page - len overflow!");
3171 loaded_data
= XBZRLE
.decoded_buf
;
3172 /* load data and decode */
3173 /* it can change loaded_data to point to an internal buffer */
3174 qemu_get_buffer_in_place(f
, &loaded_data
, xh_len
);
3177 if (xbzrle_decode_buffer(loaded_data
, xh_len
, host
,
3178 TARGET_PAGE_SIZE
) == -1) {
3179 error_report("Failed to load XBZRLE page - decode error!");
3187 * ram_block_from_stream: read a RAMBlock id from the migration stream
3189 * Must be called from within a rcu critical section.
3191 * Returns a pointer from within the RCU-protected ram_list.
3193 * @f: QEMUFile where to read the data from
3194 * @flags: Page flags (mostly to see if it's a continuation of previous block)
3196 static inline RAMBlock
*ram_block_from_stream(QEMUFile
*f
, int flags
)
3198 static RAMBlock
*block
;
3202 if (flags
& RAM_SAVE_FLAG_CONTINUE
) {
3204 error_report("Ack, bad migration stream!");
3210 len
= qemu_get_byte(f
);
3211 qemu_get_buffer(f
, (uint8_t *)id
, len
);
3214 block
= qemu_ram_block_by_name(id
);
3216 error_report("Can't find block %s", id
);
3220 if (ramblock_is_ignored(block
)) {
3221 error_report("block %s should not be migrated !", id
);
3228 static inline void *host_from_ram_block_offset(RAMBlock
*block
,
3231 if (!offset_in_ramblock(block
, offset
)) {
3235 return block
->host
+ offset
;
3238 static void *host_page_from_ram_block_offset(RAMBlock
*block
,
3241 /* Note: Explicitly no check against offset_in_ramblock(). */
3242 return (void *)QEMU_ALIGN_DOWN((uintptr_t)(block
->host
+ offset
),
3246 static ram_addr_t
host_page_offset_from_ram_block_offset(RAMBlock
*block
,
3249 return ((uintptr_t)block
->host
+ offset
) & (block
->page_size
- 1);
3252 static inline void *colo_cache_from_block_offset(RAMBlock
*block
,
3253 ram_addr_t offset
, bool record_bitmap
)
3255 if (!offset_in_ramblock(block
, offset
)) {
3258 if (!block
->colo_cache
) {
3259 error_report("%s: colo_cache is NULL in block :%s",
3260 __func__
, block
->idstr
);
3265 * During colo checkpoint, we need bitmap of these migrated pages.
3266 * It help us to decide which pages in ram cache should be flushed
3267 * into VM's RAM later.
3269 if (record_bitmap
&&
3270 !test_and_set_bit(offset
>> TARGET_PAGE_BITS
, block
->bmap
)) {
3271 ram_state
->migration_dirty_pages
++;
3273 return block
->colo_cache
+ offset
;
3277 * ram_handle_compressed: handle the zero page case
3279 * If a page (or a whole RDMA chunk) has been
3280 * determined to be zero, then zap it.
3282 * @host: host address for the zero page
3283 * @ch: what the page is filled from. We only support zero
3284 * @size: size of the zero page
3286 void ram_handle_compressed(void *host
, uint8_t ch
, uint64_t size
)
3288 if (ch
!= 0 || !buffer_is_zero(host
, size
)) {
3289 memset(host
, ch
, size
);
3293 /* return the size after decompression, or negative value on error */
3295 qemu_uncompress_data(z_stream
*stream
, uint8_t *dest
, size_t dest_len
,
3296 const uint8_t *source
, size_t source_len
)
3300 err
= inflateReset(stream
);
3305 stream
->avail_in
= source_len
;
3306 stream
->next_in
= (uint8_t *)source
;
3307 stream
->avail_out
= dest_len
;
3308 stream
->next_out
= dest
;
3310 err
= inflate(stream
, Z_NO_FLUSH
);
3311 if (err
!= Z_STREAM_END
) {
3315 return stream
->total_out
;
3318 static void *do_data_decompress(void *opaque
)
3320 DecompressParam
*param
= opaque
;
3321 unsigned long pagesize
;
3325 qemu_mutex_lock(¶m
->mutex
);
3326 while (!param
->quit
) {
3331 qemu_mutex_unlock(¶m
->mutex
);
3333 pagesize
= TARGET_PAGE_SIZE
;
3335 ret
= qemu_uncompress_data(¶m
->stream
, des
, pagesize
,
3336 param
->compbuf
, len
);
3337 if (ret
< 0 && migrate_get_current()->decompress_error_check
) {
3338 error_report("decompress data failed");
3339 qemu_file_set_error(decomp_file
, ret
);
3342 qemu_mutex_lock(&decomp_done_lock
);
3344 qemu_cond_signal(&decomp_done_cond
);
3345 qemu_mutex_unlock(&decomp_done_lock
);
3347 qemu_mutex_lock(¶m
->mutex
);
3349 qemu_cond_wait(¶m
->cond
, ¶m
->mutex
);
3352 qemu_mutex_unlock(¶m
->mutex
);
3357 static int wait_for_decompress_done(void)
3359 int idx
, thread_count
;
3361 if (!migrate_use_compression()) {
3365 thread_count
= migrate_decompress_threads();
3366 qemu_mutex_lock(&decomp_done_lock
);
3367 for (idx
= 0; idx
< thread_count
; idx
++) {
3368 while (!decomp_param
[idx
].done
) {
3369 qemu_cond_wait(&decomp_done_cond
, &decomp_done_lock
);
3372 qemu_mutex_unlock(&decomp_done_lock
);
3373 return qemu_file_get_error(decomp_file
);
3376 static void compress_threads_load_cleanup(void)
3378 int i
, thread_count
;
3380 if (!migrate_use_compression()) {
3383 thread_count
= migrate_decompress_threads();
3384 for (i
= 0; i
< thread_count
; i
++) {
3386 * we use it as a indicator which shows if the thread is
3387 * properly init'd or not
3389 if (!decomp_param
[i
].compbuf
) {
3393 qemu_mutex_lock(&decomp_param
[i
].mutex
);
3394 decomp_param
[i
].quit
= true;
3395 qemu_cond_signal(&decomp_param
[i
].cond
);
3396 qemu_mutex_unlock(&decomp_param
[i
].mutex
);
3398 for (i
= 0; i
< thread_count
; i
++) {
3399 if (!decomp_param
[i
].compbuf
) {
3403 qemu_thread_join(decompress_threads
+ i
);
3404 qemu_mutex_destroy(&decomp_param
[i
].mutex
);
3405 qemu_cond_destroy(&decomp_param
[i
].cond
);
3406 inflateEnd(&decomp_param
[i
].stream
);
3407 g_free(decomp_param
[i
].compbuf
);
3408 decomp_param
[i
].compbuf
= NULL
;
3410 g_free(decompress_threads
);
3411 g_free(decomp_param
);
3412 decompress_threads
= NULL
;
3413 decomp_param
= NULL
;
3417 static int compress_threads_load_setup(QEMUFile
*f
)
3419 int i
, thread_count
;
3421 if (!migrate_use_compression()) {
3425 thread_count
= migrate_decompress_threads();
3426 decompress_threads
= g_new0(QemuThread
, thread_count
);
3427 decomp_param
= g_new0(DecompressParam
, thread_count
);
3428 qemu_mutex_init(&decomp_done_lock
);
3429 qemu_cond_init(&decomp_done_cond
);
3431 for (i
= 0; i
< thread_count
; i
++) {
3432 if (inflateInit(&decomp_param
[i
].stream
) != Z_OK
) {
3436 decomp_param
[i
].compbuf
= g_malloc0(compressBound(TARGET_PAGE_SIZE
));
3437 qemu_mutex_init(&decomp_param
[i
].mutex
);
3438 qemu_cond_init(&decomp_param
[i
].cond
);
3439 decomp_param
[i
].done
= true;
3440 decomp_param
[i
].quit
= false;
3441 qemu_thread_create(decompress_threads
+ i
, "decompress",
3442 do_data_decompress
, decomp_param
+ i
,
3443 QEMU_THREAD_JOINABLE
);
3447 compress_threads_load_cleanup();
3451 static void decompress_data_with_multi_threads(QEMUFile
*f
,
3452 void *host
, int len
)
3454 int idx
, thread_count
;
3456 thread_count
= migrate_decompress_threads();
3457 QEMU_LOCK_GUARD(&decomp_done_lock
);
3459 for (idx
= 0; idx
< thread_count
; idx
++) {
3460 if (decomp_param
[idx
].done
) {
3461 decomp_param
[idx
].done
= false;
3462 qemu_mutex_lock(&decomp_param
[idx
].mutex
);
3463 qemu_get_buffer(f
, decomp_param
[idx
].compbuf
, len
);
3464 decomp_param
[idx
].des
= host
;
3465 decomp_param
[idx
].len
= len
;
3466 qemu_cond_signal(&decomp_param
[idx
].cond
);
3467 qemu_mutex_unlock(&decomp_param
[idx
].mutex
);
3471 if (idx
< thread_count
) {
3474 qemu_cond_wait(&decomp_done_cond
, &decomp_done_lock
);
3479 static void colo_init_ram_state(void)
3481 ram_state_init(&ram_state
);
3485 * colo cache: this is for secondary VM, we cache the whole
3486 * memory of the secondary VM, it is need to hold the global lock
3487 * to call this helper.
3489 int colo_init_ram_cache(void)
3493 WITH_RCU_READ_LOCK_GUARD() {
3494 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3495 block
->colo_cache
= qemu_anon_ram_alloc(block
->used_length
,
3496 NULL
, false, false);
3497 if (!block
->colo_cache
) {
3498 error_report("%s: Can't alloc memory for COLO cache of block %s,"
3499 "size 0x" RAM_ADDR_FMT
, __func__
, block
->idstr
,
3500 block
->used_length
);
3501 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3502 if (block
->colo_cache
) {
3503 qemu_anon_ram_free(block
->colo_cache
, block
->used_length
);
3504 block
->colo_cache
= NULL
;
3509 if (!machine_dump_guest_core(current_machine
)) {
3510 qemu_madvise(block
->colo_cache
, block
->used_length
,
3511 QEMU_MADV_DONTDUMP
);
3517 * Record the dirty pages that sent by PVM, we use this dirty bitmap together
3518 * with to decide which page in cache should be flushed into SVM's RAM. Here
3519 * we use the same name 'ram_bitmap' as for migration.
3521 if (ram_bytes_total()) {
3524 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3525 unsigned long pages
= block
->max_length
>> TARGET_PAGE_BITS
;
3526 block
->bmap
= bitmap_new(pages
);
3530 colo_init_ram_state();
3534 /* TODO: duplicated with ram_init_bitmaps */
3535 void colo_incoming_start_dirty_log(void)
3537 RAMBlock
*block
= NULL
;
3538 /* For memory_global_dirty_log_start below. */
3539 qemu_mutex_lock_iothread();
3540 qemu_mutex_lock_ramlist();
3542 memory_global_dirty_log_sync();
3543 WITH_RCU_READ_LOCK_GUARD() {
3544 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3545 ramblock_sync_dirty_bitmap(ram_state
, block
);
3546 /* Discard this dirty bitmap record */
3547 bitmap_zero(block
->bmap
, block
->max_length
>> TARGET_PAGE_BITS
);
3549 memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION
);
3551 ram_state
->migration_dirty_pages
= 0;
3552 qemu_mutex_unlock_ramlist();
3553 qemu_mutex_unlock_iothread();
3556 /* It is need to hold the global lock to call this helper */
3557 void colo_release_ram_cache(void)
3561 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION
);
3562 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3563 g_free(block
->bmap
);
3567 WITH_RCU_READ_LOCK_GUARD() {
3568 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3569 if (block
->colo_cache
) {
3570 qemu_anon_ram_free(block
->colo_cache
, block
->used_length
);
3571 block
->colo_cache
= NULL
;
3575 ram_state_cleanup(&ram_state
);
3579 * ram_load_setup: Setup RAM for migration incoming side
3581 * Returns zero to indicate success and negative for error
3583 * @f: QEMUFile where to receive the data
3584 * @opaque: RAMState pointer
3586 static int ram_load_setup(QEMUFile
*f
, void *opaque
)
3588 if (compress_threads_load_setup(f
)) {
3592 xbzrle_load_setup();
3593 ramblock_recv_map_init();
3598 static int ram_load_cleanup(void *opaque
)
3602 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
3603 qemu_ram_block_writeback(rb
);
3606 xbzrle_load_cleanup();
3607 compress_threads_load_cleanup();
3609 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
3610 g_free(rb
->receivedmap
);
3611 rb
->receivedmap
= NULL
;
3618 * ram_postcopy_incoming_init: allocate postcopy data structures
3620 * Returns 0 for success and negative if there was one error
3622 * @mis: current migration incoming state
3624 * Allocate data structures etc needed by incoming migration with
3625 * postcopy-ram. postcopy-ram's similarly names
3626 * postcopy_ram_incoming_init does the work.
3628 int ram_postcopy_incoming_init(MigrationIncomingState
*mis
)
3630 return postcopy_ram_incoming_init(mis
);
3634 * ram_load_postcopy: load a page in postcopy case
3636 * Returns 0 for success or -errno in case of error
3638 * Called in postcopy mode by ram_load().
3639 * rcu_read_lock is taken prior to this being called.
3641 * @f: QEMUFile where to send the data
3643 static int ram_load_postcopy(QEMUFile
*f
)
3645 int flags
= 0, ret
= 0;
3646 bool place_needed
= false;
3647 bool matches_target_page_size
= false;
3648 MigrationIncomingState
*mis
= migration_incoming_get_current();
3649 /* Temporary page that is later 'placed' */
3650 void *postcopy_host_page
= mis
->postcopy_tmp_page
;
3651 void *host_page
= NULL
;
3652 bool all_zero
= true;
3653 int target_pages
= 0;
3655 while (!ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
3657 void *page_buffer
= NULL
;
3658 void *place_source
= NULL
;
3659 RAMBlock
*block
= NULL
;
3663 addr
= qemu_get_be64(f
);
3666 * If qemu file error, we should stop here, and then "addr"
3669 ret
= qemu_file_get_error(f
);
3674 flags
= addr
& ~TARGET_PAGE_MASK
;
3675 addr
&= TARGET_PAGE_MASK
;
3677 trace_ram_load_postcopy_loop((uint64_t)addr
, flags
);
3678 if (flags
& (RAM_SAVE_FLAG_ZERO
| RAM_SAVE_FLAG_PAGE
|
3679 RAM_SAVE_FLAG_COMPRESS_PAGE
)) {
3680 block
= ram_block_from_stream(f
, flags
);
3687 * Relying on used_length is racy and can result in false positives.
3688 * We might place pages beyond used_length in case RAM was shrunk
3689 * while in postcopy, which is fine - trying to place via
3690 * UFFDIO_COPY/UFFDIO_ZEROPAGE will never segfault.
3692 if (!block
->host
|| addr
>= block
->postcopy_length
) {
3693 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
3698 matches_target_page_size
= block
->page_size
== TARGET_PAGE_SIZE
;
3700 * Postcopy requires that we place whole host pages atomically;
3701 * these may be huge pages for RAMBlocks that are backed by
3703 * To make it atomic, the data is read into a temporary page
3704 * that's moved into place later.
3705 * The migration protocol uses, possibly smaller, target-pages
3706 * however the source ensures it always sends all the components
3707 * of a host page in one chunk.
3709 page_buffer
= postcopy_host_page
+
3710 host_page_offset_from_ram_block_offset(block
, addr
);
3711 /* If all TP are zero then we can optimise the place */
3712 if (target_pages
== 1) {
3713 host_page
= host_page_from_ram_block_offset(block
, addr
);
3714 } else if (host_page
!= host_page_from_ram_block_offset(block
,
3716 /* not the 1st TP within the HP */
3717 error_report("Non-same host page %p/%p", host_page
,
3718 host_page_from_ram_block_offset(block
, addr
));
3724 * If it's the last part of a host page then we place the host
3727 if (target_pages
== (block
->page_size
/ TARGET_PAGE_SIZE
)) {
3728 place_needed
= true;
3730 place_source
= postcopy_host_page
;
3733 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
3734 case RAM_SAVE_FLAG_ZERO
:
3735 ch
= qemu_get_byte(f
);
3737 * Can skip to set page_buffer when
3738 * this is a zero page and (block->page_size == TARGET_PAGE_SIZE).
3740 if (ch
|| !matches_target_page_size
) {
3741 memset(page_buffer
, ch
, TARGET_PAGE_SIZE
);
3748 case RAM_SAVE_FLAG_PAGE
:
3750 if (!matches_target_page_size
) {
3751 /* For huge pages, we always use temporary buffer */
3752 qemu_get_buffer(f
, page_buffer
, TARGET_PAGE_SIZE
);
3755 * For small pages that matches target page size, we
3756 * avoid the qemu_file copy. Instead we directly use
3757 * the buffer of QEMUFile to place the page. Note: we
3758 * cannot do any QEMUFile operation before using that
3759 * buffer to make sure the buffer is valid when
3762 qemu_get_buffer_in_place(f
, (uint8_t **)&place_source
,
3766 case RAM_SAVE_FLAG_COMPRESS_PAGE
:
3768 len
= qemu_get_be32(f
);
3769 if (len
< 0 || len
> compressBound(TARGET_PAGE_SIZE
)) {
3770 error_report("Invalid compressed data length: %d", len
);
3774 decompress_data_with_multi_threads(f
, page_buffer
, len
);
3777 case RAM_SAVE_FLAG_EOS
:
3779 multifd_recv_sync_main();
3782 error_report("Unknown combination of migration flags: 0x%x"
3783 " (postcopy mode)", flags
);
3788 /* Got the whole host page, wait for decompress before placing. */
3790 ret
|= wait_for_decompress_done();
3793 /* Detect for any possible file errors */
3794 if (!ret
&& qemu_file_get_error(f
)) {
3795 ret
= qemu_file_get_error(f
);
3798 if (!ret
&& place_needed
) {
3800 ret
= postcopy_place_page_zero(mis
, host_page
, block
);
3802 ret
= postcopy_place_page(mis
, host_page
, place_source
,
3805 place_needed
= false;
3807 /* Assume we have a zero page until we detect something different */
3815 static bool postcopy_is_advised(void)
3817 PostcopyState ps
= postcopy_state_get();
3818 return ps
>= POSTCOPY_INCOMING_ADVISE
&& ps
< POSTCOPY_INCOMING_END
;
3821 static bool postcopy_is_running(void)
3823 PostcopyState ps
= postcopy_state_get();
3824 return ps
>= POSTCOPY_INCOMING_LISTENING
&& ps
< POSTCOPY_INCOMING_END
;
3828 * Flush content of RAM cache into SVM's memory.
3829 * Only flush the pages that be dirtied by PVM or SVM or both.
3831 void colo_flush_ram_cache(void)
3833 RAMBlock
*block
= NULL
;
3836 unsigned long offset
= 0;
3838 memory_global_dirty_log_sync();
3839 WITH_RCU_READ_LOCK_GUARD() {
3840 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3841 ramblock_sync_dirty_bitmap(ram_state
, block
);
3845 trace_colo_flush_ram_cache_begin(ram_state
->migration_dirty_pages
);
3846 WITH_RCU_READ_LOCK_GUARD() {
3847 block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
3850 unsigned long num
= 0;
3852 offset
= colo_bitmap_find_dirty(ram_state
, block
, offset
, &num
);
3853 if (!offset_in_ramblock(block
,
3854 ((ram_addr_t
)offset
) << TARGET_PAGE_BITS
)) {
3857 block
= QLIST_NEXT_RCU(block
, next
);
3859 unsigned long i
= 0;
3861 for (i
= 0; i
< num
; i
++) {
3862 migration_bitmap_clear_dirty(ram_state
, block
, offset
+ i
);
3864 dst_host
= block
->host
3865 + (((ram_addr_t
)offset
) << TARGET_PAGE_BITS
);
3866 src_host
= block
->colo_cache
3867 + (((ram_addr_t
)offset
) << TARGET_PAGE_BITS
);
3868 memcpy(dst_host
, src_host
, TARGET_PAGE_SIZE
* num
);
3873 trace_colo_flush_ram_cache_end();
3877 * ram_load_precopy: load pages in precopy case
3879 * Returns 0 for success or -errno in case of error
3881 * Called in precopy mode by ram_load().
3882 * rcu_read_lock is taken prior to this being called.
3884 * @f: QEMUFile where to send the data
3886 static int ram_load_precopy(QEMUFile
*f
)
3888 int flags
= 0, ret
= 0, invalid_flags
= 0, len
= 0, i
= 0;
3889 /* ADVISE is earlier, it shows the source has the postcopy capability on */
3890 bool postcopy_advised
= postcopy_is_advised();
3891 if (!migrate_use_compression()) {
3892 invalid_flags
|= RAM_SAVE_FLAG_COMPRESS_PAGE
;
3895 while (!ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
3896 ram_addr_t addr
, total_ram_bytes
;
3897 void *host
= NULL
, *host_bak
= NULL
;
3901 * Yield periodically to let main loop run, but an iteration of
3902 * the main loop is expensive, so do it each some iterations
3904 if ((i
& 32767) == 0 && qemu_in_coroutine()) {
3905 aio_co_schedule(qemu_get_current_aio_context(),
3906 qemu_coroutine_self());
3907 qemu_coroutine_yield();
3911 addr
= qemu_get_be64(f
);
3912 flags
= addr
& ~TARGET_PAGE_MASK
;
3913 addr
&= TARGET_PAGE_MASK
;
3915 if (flags
& invalid_flags
) {
3916 if (flags
& invalid_flags
& RAM_SAVE_FLAG_COMPRESS_PAGE
) {
3917 error_report("Received an unexpected compressed page");
3924 if (flags
& (RAM_SAVE_FLAG_ZERO
| RAM_SAVE_FLAG_PAGE
|
3925 RAM_SAVE_FLAG_COMPRESS_PAGE
| RAM_SAVE_FLAG_XBZRLE
)) {
3926 RAMBlock
*block
= ram_block_from_stream(f
, flags
);
3928 host
= host_from_ram_block_offset(block
, addr
);
3930 * After going into COLO stage, we should not load the page
3931 * into SVM's memory directly, we put them into colo_cache firstly.
3932 * NOTE: We need to keep a copy of SVM's ram in colo_cache.
3933 * Previously, we copied all these memory in preparing stage of COLO
3934 * while we need to stop VM, which is a time-consuming process.
3935 * Here we optimize it by a trick, back-up every page while in
3936 * migration process while COLO is enabled, though it affects the
3937 * speed of the migration, but it obviously reduce the downtime of
3938 * back-up all SVM'S memory in COLO preparing stage.
3940 if (migration_incoming_colo_enabled()) {
3941 if (migration_incoming_in_colo_state()) {
3942 /* In COLO stage, put all pages into cache temporarily */
3943 host
= colo_cache_from_block_offset(block
, addr
, true);
3946 * In migration stage but before COLO stage,
3947 * Put all pages into both cache and SVM's memory.
3949 host_bak
= colo_cache_from_block_offset(block
, addr
, false);
3953 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
3957 if (!migration_incoming_in_colo_state()) {
3958 ramblock_recv_bitmap_set(block
, host
);
3961 trace_ram_load_loop(block
->idstr
, (uint64_t)addr
, flags
, host
);
3964 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
3965 case RAM_SAVE_FLAG_MEM_SIZE
:
3966 /* Synchronize RAM block list */
3967 total_ram_bytes
= addr
;
3968 while (!ret
&& total_ram_bytes
) {
3973 len
= qemu_get_byte(f
);
3974 qemu_get_buffer(f
, (uint8_t *)id
, len
);
3976 length
= qemu_get_be64(f
);
3978 block
= qemu_ram_block_by_name(id
);
3979 if (block
&& !qemu_ram_is_migratable(block
)) {
3980 error_report("block %s should not be migrated !", id
);
3983 if (length
!= block
->used_length
) {
3984 Error
*local_err
= NULL
;
3986 ret
= qemu_ram_resize(block
, length
,
3989 error_report_err(local_err
);
3992 /* For postcopy we need to check hugepage sizes match */
3993 if (postcopy_advised
&& migrate_postcopy_ram() &&
3994 block
->page_size
!= qemu_host_page_size
) {
3995 uint64_t remote_page_size
= qemu_get_be64(f
);
3996 if (remote_page_size
!= block
->page_size
) {
3997 error_report("Mismatched RAM page size %s "
3998 "(local) %zd != %" PRId64
,
3999 id
, block
->page_size
,
4004 if (migrate_ignore_shared()) {
4005 hwaddr addr
= qemu_get_be64(f
);
4006 if (ramblock_is_ignored(block
) &&
4007 block
->mr
->addr
!= addr
) {
4008 error_report("Mismatched GPAs for block %s "
4009 "%" PRId64
"!= %" PRId64
,
4011 (uint64_t)block
->mr
->addr
);
4015 ram_control_load_hook(f
, RAM_CONTROL_BLOCK_REG
,
4018 error_report("Unknown ramblock \"%s\", cannot "
4019 "accept migration", id
);
4023 total_ram_bytes
-= length
;
4027 case RAM_SAVE_FLAG_ZERO
:
4028 ch
= qemu_get_byte(f
);
4029 ram_handle_compressed(host
, ch
, TARGET_PAGE_SIZE
);
4032 case RAM_SAVE_FLAG_PAGE
:
4033 qemu_get_buffer(f
, host
, TARGET_PAGE_SIZE
);
4036 case RAM_SAVE_FLAG_COMPRESS_PAGE
:
4037 len
= qemu_get_be32(f
);
4038 if (len
< 0 || len
> compressBound(TARGET_PAGE_SIZE
)) {
4039 error_report("Invalid compressed data length: %d", len
);
4043 decompress_data_with_multi_threads(f
, host
, len
);
4046 case RAM_SAVE_FLAG_XBZRLE
:
4047 if (load_xbzrle(f
, addr
, host
) < 0) {
4048 error_report("Failed to decompress XBZRLE page at "
4049 RAM_ADDR_FMT
, addr
);
4054 case RAM_SAVE_FLAG_EOS
:
4056 multifd_recv_sync_main();
4059 if (flags
& RAM_SAVE_FLAG_HOOK
) {
4060 ram_control_load_hook(f
, RAM_CONTROL_HOOK
, NULL
);
4062 error_report("Unknown combination of migration flags: 0x%x",
4068 ret
= qemu_file_get_error(f
);
4070 if (!ret
&& host_bak
) {
4071 memcpy(host_bak
, host
, TARGET_PAGE_SIZE
);
4075 ret
|= wait_for_decompress_done();
4079 static int ram_load(QEMUFile
*f
, void *opaque
, int version_id
)
4082 static uint64_t seq_iter
;
4084 * If system is running in postcopy mode, page inserts to host memory must
4087 bool postcopy_running
= postcopy_is_running();
4091 if (version_id
!= 4) {
4096 * This RCU critical section can be very long running.
4097 * When RCU reclaims in the code start to become numerous,
4098 * it will be necessary to reduce the granularity of this
4101 WITH_RCU_READ_LOCK_GUARD() {
4102 if (postcopy_running
) {
4103 ret
= ram_load_postcopy(f
);
4105 ret
= ram_load_precopy(f
);
4108 trace_ram_load_complete(ret
, seq_iter
);
4113 static bool ram_has_postcopy(void *opaque
)
4116 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
4117 if (ramblock_is_pmem(rb
)) {
4118 info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4119 "is not supported now!", rb
->idstr
, rb
->host
);
4124 return migrate_postcopy_ram();
4127 /* Sync all the dirty bitmap with destination VM. */
4128 static int ram_dirty_bitmap_sync_all(MigrationState
*s
, RAMState
*rs
)
4131 QEMUFile
*file
= s
->to_dst_file
;
4132 int ramblock_count
= 0;
4134 trace_ram_dirty_bitmap_sync_start();
4136 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
4137 qemu_savevm_send_recv_bitmap(file
, block
->idstr
);
4138 trace_ram_dirty_bitmap_request(block
->idstr
);
4142 trace_ram_dirty_bitmap_sync_wait();
4144 /* Wait until all the ramblocks' dirty bitmap synced */
4145 while (ramblock_count
--) {
4146 qemu_sem_wait(&s
->rp_state
.rp_sem
);
4149 trace_ram_dirty_bitmap_sync_complete();
4154 static void ram_dirty_bitmap_reload_notify(MigrationState
*s
)
4156 qemu_sem_post(&s
->rp_state
.rp_sem
);
4160 * Read the received bitmap, revert it as the initial dirty bitmap.
4161 * This is only used when the postcopy migration is paused but wants
4162 * to resume from a middle point.
4164 int ram_dirty_bitmap_reload(MigrationState
*s
, RAMBlock
*block
)
4167 /* from_dst_file is always valid because we're within rp_thread */
4168 QEMUFile
*file
= s
->rp_state
.from_dst_file
;
4169 unsigned long *le_bitmap
, nbits
= block
->used_length
>> TARGET_PAGE_BITS
;
4170 uint64_t local_size
= DIV_ROUND_UP(nbits
, 8);
4171 uint64_t size
, end_mark
;
4173 trace_ram_dirty_bitmap_reload_begin(block
->idstr
);
4175 if (s
->state
!= MIGRATION_STATUS_POSTCOPY_RECOVER
) {
4176 error_report("%s: incorrect state %s", __func__
,
4177 MigrationStatus_str(s
->state
));
4182 * Note: see comments in ramblock_recv_bitmap_send() on why we
4183 * need the endianness conversion, and the paddings.
4185 local_size
= ROUND_UP(local_size
, 8);
4188 le_bitmap
= bitmap_new(nbits
+ BITS_PER_LONG
);
4190 size
= qemu_get_be64(file
);
4192 /* The size of the bitmap should match with our ramblock */
4193 if (size
!= local_size
) {
4194 error_report("%s: ramblock '%s' bitmap size mismatch "
4195 "(0x%"PRIx64
" != 0x%"PRIx64
")", __func__
,
4196 block
->idstr
, size
, local_size
);
4201 size
= qemu_get_buffer(file
, (uint8_t *)le_bitmap
, local_size
);
4202 end_mark
= qemu_get_be64(file
);
4204 ret
= qemu_file_get_error(file
);
4205 if (ret
|| size
!= local_size
) {
4206 error_report("%s: read bitmap failed for ramblock '%s': %d"
4207 " (size 0x%"PRIx64
", got: 0x%"PRIx64
")",
4208 __func__
, block
->idstr
, ret
, local_size
, size
);
4213 if (end_mark
!= RAMBLOCK_RECV_BITMAP_ENDING
) {
4214 error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIx64
,
4215 __func__
, block
->idstr
, end_mark
);
4221 * Endianness conversion. We are during postcopy (though paused).
4222 * The dirty bitmap won't change. We can directly modify it.
4224 bitmap_from_le(block
->bmap
, le_bitmap
, nbits
);
4227 * What we received is "received bitmap". Revert it as the initial
4228 * dirty bitmap for this ramblock.
4230 bitmap_complement(block
->bmap
, block
->bmap
, nbits
);
4232 /* Clear dirty bits of discarded ranges that we don't want to migrate. */
4233 ramblock_dirty_bitmap_clear_discarded_pages(block
);
4235 /* We'll recalculate migration_dirty_pages in ram_state_resume_prepare(). */
4236 trace_ram_dirty_bitmap_reload_complete(block
->idstr
);
4239 * We succeeded to sync bitmap for current ramblock. If this is
4240 * the last one to sync, we need to notify the main send thread.
4242 ram_dirty_bitmap_reload_notify(s
);
4250 static int ram_resume_prepare(MigrationState
*s
, void *opaque
)
4252 RAMState
*rs
= *(RAMState
**)opaque
;
4255 ret
= ram_dirty_bitmap_sync_all(s
, rs
);
4260 ram_state_resume_prepare(rs
, s
->to_dst_file
);
4265 static SaveVMHandlers savevm_ram_handlers
= {
4266 .save_setup
= ram_save_setup
,
4267 .save_live_iterate
= ram_save_iterate
,
4268 .save_live_complete_postcopy
= ram_save_complete
,
4269 .save_live_complete_precopy
= ram_save_complete
,
4270 .has_postcopy
= ram_has_postcopy
,
4271 .save_live_pending
= ram_save_pending
,
4272 .load_state
= ram_load
,
4273 .save_cleanup
= ram_save_cleanup
,
4274 .load_setup
= ram_load_setup
,
4275 .load_cleanup
= ram_load_cleanup
,
4276 .resume_prepare
= ram_resume_prepare
,
4279 static void ram_mig_ram_block_resized(RAMBlockNotifier
*n
, void *host
,
4280 size_t old_size
, size_t new_size
)
4282 PostcopyState ps
= postcopy_state_get();
4284 RAMBlock
*rb
= qemu_ram_block_from_host(host
, false, &offset
);
4287 if (ramblock_is_ignored(rb
)) {
4291 if (!migration_is_idle()) {
4293 * Precopy code on the source cannot deal with the size of RAM blocks
4294 * changing at random points in time - especially after sending the
4295 * RAM block sizes in the migration stream, they must no longer change.
4296 * Abort and indicate a proper reason.
4298 error_setg(&err
, "RAM block '%s' resized during precopy.", rb
->idstr
);
4299 migration_cancel(err
);
4304 case POSTCOPY_INCOMING_ADVISE
:
4306 * Update what ram_postcopy_incoming_init()->init_range() does at the
4307 * time postcopy was advised. Syncing RAM blocks with the source will
4308 * result in RAM resizes.
4310 if (old_size
< new_size
) {
4311 if (ram_discard_range(rb
->idstr
, old_size
, new_size
- old_size
)) {
4312 error_report("RAM block '%s' discard of resized RAM failed",
4316 rb
->postcopy_length
= new_size
;
4318 case POSTCOPY_INCOMING_NONE
:
4319 case POSTCOPY_INCOMING_RUNNING
:
4320 case POSTCOPY_INCOMING_END
:
4322 * Once our guest is running, postcopy does no longer care about
4323 * resizes. When growing, the new memory was not available on the
4324 * source, no handler needed.
4328 error_report("RAM block '%s' resized during postcopy state: %d",
4334 static RAMBlockNotifier ram_mig_ram_notifier
= {
4335 .ram_block_resized
= ram_mig_ram_block_resized
,
4338 void ram_mig_init(void)
4340 qemu_mutex_init(&XBZRLE
.lock
);
4341 register_savevm_live("ram", 0, 4, &savevm_ram_handlers
, &ram_state
);
4342 ram_block_notifier_add(&ram_mig_ram_notifier
);