4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2011-2015 Red Hat Inc
8 * Juan Quintela <quintela@redhat.com>
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 #include "qemu/osdep.h"
30 #include "qemu/cutils.h"
31 #include "qemu/bitops.h"
32 #include "qemu/bitmap.h"
33 #include "qemu/madvise.h"
34 #include "qemu/main-loop.h"
36 #include "ram-compress.h"
38 #include "migration.h"
39 #include "migration-stats.h"
40 #include "migration/register.h"
41 #include "migration/misc.h"
42 #include "qemu-file.h"
43 #include "postcopy-ram.h"
44 #include "page_cache.h"
45 #include "qemu/error-report.h"
46 #include "qapi/error.h"
47 #include "qapi/qapi-types-migration.h"
48 #include "qapi/qapi-events-migration.h"
49 #include "qapi/qapi-commands-migration.h"
50 #include "qapi/qmp/qerror.h"
52 #include "exec/ram_addr.h"
53 #include "exec/target_page.h"
54 #include "qemu/rcu_queue.h"
55 #include "migration/colo.h"
56 #include "sysemu/cpu-throttle.h"
60 #include "sysemu/runstate.h"
63 #include "sysemu/dirtylimit.h"
64 #include "sysemu/kvm.h"
66 #include "hw/boards.h" /* for machine_dump_guest_core() */
68 #if defined(__linux__)
69 #include "qemu/userfaultfd.h"
70 #endif /* defined(__linux__) */
72 /***********************************************************/
73 /* ram save/restore */
76 * RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
77 * worked for pages that were filled with the same char. We switched
78 * it to only search for the zero value. And to avoid confusion with
79 * RAM_SAVE_FLAG_COMPRESS_PAGE just rename it.
82 * RAM_SAVE_FLAG_FULL was obsoleted in 2009, it can be reused now
84 #define RAM_SAVE_FLAG_FULL 0x01
85 #define RAM_SAVE_FLAG_ZERO 0x02
86 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
87 #define RAM_SAVE_FLAG_PAGE 0x08
88 #define RAM_SAVE_FLAG_EOS 0x10
89 #define RAM_SAVE_FLAG_CONTINUE 0x20
90 #define RAM_SAVE_FLAG_XBZRLE 0x40
91 /* 0x80 is reserved in rdma.h for RAM_SAVE_FLAG_HOOK */
92 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
93 #define RAM_SAVE_FLAG_MULTIFD_FLUSH 0x200
94 /* We can't use any flag that is bigger than 0x200 */
97 * mapped-ram migration supports O_DIRECT, so we need to make sure the
98 * userspace buffer, the IO operation size and the file offset are
99 * aligned according to the underlying device's block size. The first
100 * two are already aligned to page size, but we need to add padding to
101 * the file to align the offset. We cannot read the block size
102 * dynamically because the migration file can be moved between
103 * different systems, so use 1M to cover most block sizes and to keep
104 * the file offset aligned at page size as well.
106 #define MAPPED_RAM_FILE_OFFSET_ALIGNMENT 0x100000
109 * When doing mapped-ram migration, this is the amount we read from
110 * the pages region in the migration file at a time.
112 #define MAPPED_RAM_LOAD_BUF_SIZE 0x100000
114 XBZRLECacheStats xbzrle_counters
;
116 /* used by the search for pages to send */
117 struct PageSearchStatus
{
118 /* The migration channel used for a specific host page */
119 QEMUFile
*pss_channel
;
120 /* Last block from where we have sent data */
121 RAMBlock
*last_sent_block
;
122 /* Current block being searched */
124 /* Current page to search from */
126 /* Set once we wrap around */
128 /* Whether we're sending a host page */
129 bool host_page_sending
;
130 /* The start/end of current host page. Invalid if host_page_sending==false */
131 unsigned long host_page_start
;
132 unsigned long host_page_end
;
134 typedef struct PageSearchStatus PageSearchStatus
;
136 /* struct contains XBZRLE cache and a static page
137 used by the compression */
139 /* buffer used for XBZRLE encoding */
140 uint8_t *encoded_buf
;
141 /* buffer for storing page content */
142 uint8_t *current_buf
;
143 /* Cache for XBZRLE, Protected by lock. */
146 /* it will store a page full of zeros */
147 uint8_t *zero_target_page
;
148 /* buffer used for XBZRLE decoding */
149 uint8_t *decoded_buf
;
152 static void XBZRLE_cache_lock(void)
154 if (migrate_xbzrle()) {
155 qemu_mutex_lock(&XBZRLE
.lock
);
159 static void XBZRLE_cache_unlock(void)
161 if (migrate_xbzrle()) {
162 qemu_mutex_unlock(&XBZRLE
.lock
);
167 * xbzrle_cache_resize: resize the xbzrle cache
169 * This function is called from migrate_params_apply in main
170 * thread, possibly while a migration is in progress. A running
171 * migration may be using the cache and might finish during this call,
172 * hence changes to the cache are protected by XBZRLE.lock().
174 * Returns 0 for success or -1 for error
176 * @new_size: new cache size
177 * @errp: set *errp if the check failed, with reason
179 int xbzrle_cache_resize(uint64_t new_size
, Error
**errp
)
181 PageCache
*new_cache
;
184 /* Check for truncation */
185 if (new_size
!= (size_t)new_size
) {
186 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, "cache size",
187 "exceeding address space");
191 if (new_size
== migrate_xbzrle_cache_size()) {
198 if (XBZRLE
.cache
!= NULL
) {
199 new_cache
= cache_init(new_size
, TARGET_PAGE_SIZE
, errp
);
205 cache_fini(XBZRLE
.cache
);
206 XBZRLE
.cache
= new_cache
;
209 XBZRLE_cache_unlock();
213 static bool postcopy_preempt_active(void)
215 return migrate_postcopy_preempt() && migration_in_postcopy();
218 bool migrate_ram_is_ignored(RAMBlock
*block
)
220 return !qemu_ram_is_migratable(block
) ||
221 (migrate_ignore_shared() && qemu_ram_is_shared(block
)
222 && qemu_ram_is_named_file(block
));
225 #undef RAMBLOCK_FOREACH
227 int foreach_not_ignored_block(RAMBlockIterFunc func
, void *opaque
)
232 RCU_READ_LOCK_GUARD();
234 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
235 ret
= func(block
, opaque
);
243 static void ramblock_recv_map_init(void)
247 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
248 assert(!rb
->receivedmap
);
249 rb
->receivedmap
= bitmap_new(rb
->max_length
>> qemu_target_page_bits());
253 int ramblock_recv_bitmap_test(RAMBlock
*rb
, void *host_addr
)
255 return test_bit(ramblock_recv_bitmap_offset(host_addr
, rb
),
259 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock
*rb
, uint64_t byte_offset
)
261 return test_bit(byte_offset
>> TARGET_PAGE_BITS
, rb
->receivedmap
);
264 void ramblock_recv_bitmap_set(RAMBlock
*rb
, void *host_addr
)
266 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr
, rb
), rb
->receivedmap
);
269 void ramblock_recv_bitmap_set_range(RAMBlock
*rb
, void *host_addr
,
272 bitmap_set_atomic(rb
->receivedmap
,
273 ramblock_recv_bitmap_offset(host_addr
, rb
),
277 void ramblock_recv_bitmap_set_offset(RAMBlock
*rb
, uint64_t byte_offset
)
279 set_bit_atomic(byte_offset
>> TARGET_PAGE_BITS
, rb
->receivedmap
);
281 #define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
284 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
286 * Returns >0 if success with sent bytes, or <0 if error.
288 int64_t ramblock_recv_bitmap_send(QEMUFile
*file
,
289 const char *block_name
)
291 RAMBlock
*block
= qemu_ram_block_by_name(block_name
);
292 unsigned long *le_bitmap
, nbits
;
296 error_report("%s: invalid block name: %s", __func__
, block_name
);
300 nbits
= block
->postcopy_length
>> TARGET_PAGE_BITS
;
303 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
304 * machines we may need 4 more bytes for padding (see below
305 * comment). So extend it a bit before hand.
307 le_bitmap
= bitmap_new(nbits
+ BITS_PER_LONG
);
310 * Always use little endian when sending the bitmap. This is
311 * required that when source and destination VMs are not using the
312 * same endianness. (Note: big endian won't work.)
314 bitmap_to_le(le_bitmap
, block
->receivedmap
, nbits
);
316 /* Size of the bitmap, in bytes */
317 size
= DIV_ROUND_UP(nbits
, 8);
320 * size is always aligned to 8 bytes for 64bit machines, but it
321 * may not be true for 32bit machines. We need this padding to
322 * make sure the migration can survive even between 32bit and
325 size
= ROUND_UP(size
, 8);
327 qemu_put_be64(file
, size
);
328 qemu_put_buffer(file
, (const uint8_t *)le_bitmap
, size
);
331 * Mark as an end, in case the middle part is screwed up due to
332 * some "mysterious" reason.
334 qemu_put_be64(file
, RAMBLOCK_RECV_BITMAP_ENDING
);
335 int ret
= qemu_fflush(file
);
340 return size
+ sizeof(size
);
344 * An outstanding page request, on the source, having been received
347 struct RAMSrcPageRequest
{
352 QSIMPLEQ_ENTRY(RAMSrcPageRequest
) next_req
;
355 /* State of RAM for migration */
358 * PageSearchStatus structures for the channels when send pages.
359 * Protected by the bitmap_mutex.
361 PageSearchStatus pss
[RAM_CHANNEL_MAX
];
362 /* UFFD file descriptor, used in 'write-tracking' migration */
364 /* total ram size in bytes */
365 uint64_t ram_bytes_total
;
366 /* Last block that we have visited searching for dirty pages */
367 RAMBlock
*last_seen_block
;
368 /* Last dirty target page we have sent */
369 ram_addr_t last_page
;
370 /* last ram version we have seen */
371 uint32_t last_version
;
372 /* How many times we have dirty too many pages */
373 int dirty_rate_high_cnt
;
374 /* these variables are used for bitmap sync */
375 /* last time we did a full bitmap_sync */
376 int64_t time_last_bitmap_sync
;
377 /* bytes transferred at start_time */
378 uint64_t bytes_xfer_prev
;
379 /* number of dirty pages since start_time */
380 uint64_t num_dirty_pages_period
;
381 /* xbzrle misses since the beginning of the period */
382 uint64_t xbzrle_cache_miss_prev
;
383 /* Amount of xbzrle pages since the beginning of the period */
384 uint64_t xbzrle_pages_prev
;
385 /* Amount of xbzrle encoded bytes since the beginning of the period */
386 uint64_t xbzrle_bytes_prev
;
387 /* Are we really using XBZRLE (e.g., after the first round). */
389 /* Are we on the last stage of migration */
392 /* total handled target pages at the beginning of period */
393 uint64_t target_page_count_prev
;
394 /* total handled target pages since start */
395 uint64_t target_page_count
;
396 /* number of dirty bits in the bitmap */
397 uint64_t migration_dirty_pages
;
400 * - dirty/clear bitmap
401 * - migration_dirty_pages
404 QemuMutex bitmap_mutex
;
405 /* The RAMBlock used in the last src_page_requests */
406 RAMBlock
*last_req_rb
;
407 /* Queue of outstanding page requests from the destination */
408 QemuMutex src_page_req_mutex
;
409 QSIMPLEQ_HEAD(, RAMSrcPageRequest
) src_page_requests
;
412 * This is only used when postcopy is in recovery phase, to communicate
413 * between the migration thread and the return path thread on dirty
414 * bitmap synchronizations. This field is unused in other stages of
417 unsigned int postcopy_bmap_sync_requested
;
419 typedef struct RAMState RAMState
;
421 static RAMState
*ram_state
;
423 static NotifierWithReturnList precopy_notifier_list
;
425 /* Whether postcopy has queued requests? */
426 static bool postcopy_has_request(RAMState
*rs
)
428 return !QSIMPLEQ_EMPTY_ATOMIC(&rs
->src_page_requests
);
431 void precopy_infrastructure_init(void)
433 notifier_with_return_list_init(&precopy_notifier_list
);
436 void precopy_add_notifier(NotifierWithReturn
*n
)
438 notifier_with_return_list_add(&precopy_notifier_list
, n
);
441 void precopy_remove_notifier(NotifierWithReturn
*n
)
443 notifier_with_return_remove(n
);
446 int precopy_notify(PrecopyNotifyReason reason
, Error
**errp
)
448 PrecopyNotifyData pnd
;
451 return notifier_with_return_list_notify(&precopy_notifier_list
, &pnd
, errp
);
454 uint64_t ram_bytes_remaining(void)
456 return ram_state
? (ram_state
->migration_dirty_pages
* TARGET_PAGE_SIZE
) :
460 void ram_transferred_add(uint64_t bytes
)
462 if (runstate_is_running()) {
463 stat64_add(&mig_stats
.precopy_bytes
, bytes
);
464 } else if (migration_in_postcopy()) {
465 stat64_add(&mig_stats
.postcopy_bytes
, bytes
);
467 stat64_add(&mig_stats
.downtime_bytes
, bytes
);
471 struct MigrationOps
{
472 int (*ram_save_target_page
)(RAMState
*rs
, PageSearchStatus
*pss
);
474 typedef struct MigrationOps MigrationOps
;
476 MigrationOps
*migration_ops
;
478 static int ram_save_host_page_urgent(PageSearchStatus
*pss
);
480 /* NOTE: page is the PFN not real ram_addr_t. */
481 static void pss_init(PageSearchStatus
*pss
, RAMBlock
*rb
, ram_addr_t page
)
485 pss
->complete_round
= false;
489 * Check whether two PSSs are actively sending the same page. Return true
490 * if it is, false otherwise.
492 static bool pss_overlap(PageSearchStatus
*pss1
, PageSearchStatus
*pss2
)
494 return pss1
->host_page_sending
&& pss2
->host_page_sending
&&
495 (pss1
->host_page_start
== pss2
->host_page_start
);
499 * save_page_header: write page header to wire
501 * If this is the 1st block, it also writes the block identification
503 * Returns the number of bytes written
505 * @pss: current PSS channel status
506 * @block: block that contains the page we want to send
507 * @offset: offset inside the block for the page
508 * in the lower bits, it contains flags
510 static size_t save_page_header(PageSearchStatus
*pss
, QEMUFile
*f
,
511 RAMBlock
*block
, ram_addr_t offset
)
514 bool same_block
= (block
== pss
->last_sent_block
);
517 offset
|= RAM_SAVE_FLAG_CONTINUE
;
519 qemu_put_be64(f
, offset
);
523 len
= strlen(block
->idstr
);
524 qemu_put_byte(f
, len
);
525 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, len
);
527 pss
->last_sent_block
= block
;
533 * mig_throttle_guest_down: throttle down the guest
535 * Reduce amount of guest cpu execution to hopefully slow down memory
536 * writes. If guest dirty memory rate is reduced below the rate at
537 * which we can transfer pages to the destination then we should be
538 * able to complete migration. Some workloads dirty memory way too
539 * fast and will not effectively converge, even with auto-converge.
541 static void mig_throttle_guest_down(uint64_t bytes_dirty_period
,
542 uint64_t bytes_dirty_threshold
)
544 uint64_t pct_initial
= migrate_cpu_throttle_initial();
545 uint64_t pct_increment
= migrate_cpu_throttle_increment();
546 bool pct_tailslow
= migrate_cpu_throttle_tailslow();
547 int pct_max
= migrate_max_cpu_throttle();
549 uint64_t throttle_now
= cpu_throttle_get_percentage();
550 uint64_t cpu_now
, cpu_ideal
, throttle_inc
;
552 /* We have not started throttling yet. Let's start it. */
553 if (!cpu_throttle_active()) {
554 cpu_throttle_set(pct_initial
);
556 /* Throttling already on, just increase the rate */
558 throttle_inc
= pct_increment
;
560 /* Compute the ideal CPU percentage used by Guest, which may
561 * make the dirty rate match the dirty rate threshold. */
562 cpu_now
= 100 - throttle_now
;
563 cpu_ideal
= cpu_now
* (bytes_dirty_threshold
* 1.0 /
565 throttle_inc
= MIN(cpu_now
- cpu_ideal
, pct_increment
);
567 cpu_throttle_set(MIN(throttle_now
+ throttle_inc
, pct_max
));
571 void mig_throttle_counter_reset(void)
573 RAMState
*rs
= ram_state
;
575 rs
->time_last_bitmap_sync
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
576 rs
->num_dirty_pages_period
= 0;
577 rs
->bytes_xfer_prev
= migration_transferred_bytes();
581 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
583 * @current_addr: address for the zero page
585 * Update the xbzrle cache to reflect a page that's been sent as all 0.
586 * The important thing is that a stale (not-yet-0'd) page be replaced
588 * As a bonus, if the page wasn't in the cache it gets added so that
589 * when a small write is made into the 0'd page it gets XBZRLE sent.
591 static void xbzrle_cache_zero_page(ram_addr_t current_addr
)
593 /* We don't care if this fails to allocate a new cache page
594 * as long as it updated an old one */
595 cache_insert(XBZRLE
.cache
, current_addr
, XBZRLE
.zero_target_page
,
596 stat64_get(&mig_stats
.dirty_sync_count
));
599 #define ENCODING_FLAG_XBZRLE 0x1
602 * save_xbzrle_page: compress and send current page
604 * Returns: 1 means that we wrote the page
605 * 0 means that page is identical to the one already sent
606 * -1 means that xbzrle would be longer than normal
608 * @rs: current RAM state
609 * @pss: current PSS channel
610 * @current_data: pointer to the address of the page contents
611 * @current_addr: addr of the page
612 * @block: block that contains the page we want to send
613 * @offset: offset inside the block for the page
615 static int save_xbzrle_page(RAMState
*rs
, PageSearchStatus
*pss
,
616 uint8_t **current_data
, ram_addr_t current_addr
,
617 RAMBlock
*block
, ram_addr_t offset
)
619 int encoded_len
= 0, bytes_xbzrle
;
620 uint8_t *prev_cached_page
;
621 QEMUFile
*file
= pss
->pss_channel
;
622 uint64_t generation
= stat64_get(&mig_stats
.dirty_sync_count
);
624 if (!cache_is_cached(XBZRLE
.cache
, current_addr
, generation
)) {
625 xbzrle_counters
.cache_miss
++;
626 if (!rs
->last_stage
) {
627 if (cache_insert(XBZRLE
.cache
, current_addr
, *current_data
,
631 /* update *current_data when the page has been
632 inserted into cache */
633 *current_data
= get_cached_data(XBZRLE
.cache
, current_addr
);
640 * Reaching here means the page has hit the xbzrle cache, no matter what
641 * encoding result it is (normal encoding, overflow or skipping the page),
642 * count the page as encoded. This is used to calculate the encoding rate.
644 * Example: 2 pages (8KB) being encoded, first page encoding generates 2KB,
645 * 2nd page turns out to be skipped (i.e. no new bytes written to the
646 * page), the overall encoding rate will be 8KB / 2KB = 4, which has the
647 * skipped page included. In this way, the encoding rate can tell if the
648 * guest page is good for xbzrle encoding.
650 xbzrle_counters
.pages
++;
651 prev_cached_page
= get_cached_data(XBZRLE
.cache
, current_addr
);
653 /* save current buffer into memory */
654 memcpy(XBZRLE
.current_buf
, *current_data
, TARGET_PAGE_SIZE
);
656 /* XBZRLE encoding (if there is no overflow) */
657 encoded_len
= xbzrle_encode_buffer(prev_cached_page
, XBZRLE
.current_buf
,
658 TARGET_PAGE_SIZE
, XBZRLE
.encoded_buf
,
662 * Update the cache contents, so that it corresponds to the data
663 * sent, in all cases except where we skip the page.
665 if (!rs
->last_stage
&& encoded_len
!= 0) {
666 memcpy(prev_cached_page
, XBZRLE
.current_buf
, TARGET_PAGE_SIZE
);
668 * In the case where we couldn't compress, ensure that the caller
669 * sends the data from the cache, since the guest might have
670 * changed the RAM since we copied it.
672 *current_data
= prev_cached_page
;
675 if (encoded_len
== 0) {
676 trace_save_xbzrle_page_skipping();
678 } else if (encoded_len
== -1) {
679 trace_save_xbzrle_page_overflow();
680 xbzrle_counters
.overflow
++;
681 xbzrle_counters
.bytes
+= TARGET_PAGE_SIZE
;
685 /* Send XBZRLE based compressed page */
686 bytes_xbzrle
= save_page_header(pss
, pss
->pss_channel
, block
,
687 offset
| RAM_SAVE_FLAG_XBZRLE
);
688 qemu_put_byte(file
, ENCODING_FLAG_XBZRLE
);
689 qemu_put_be16(file
, encoded_len
);
690 qemu_put_buffer(file
, XBZRLE
.encoded_buf
, encoded_len
);
691 bytes_xbzrle
+= encoded_len
+ 1 + 2;
693 * Like compressed_size (please see update_compress_thread_counts),
694 * the xbzrle encoded bytes don't count the 8 byte header with
695 * RAM_SAVE_FLAG_CONTINUE.
697 xbzrle_counters
.bytes
+= bytes_xbzrle
- 8;
698 ram_transferred_add(bytes_xbzrle
);
704 * pss_find_next_dirty: find the next dirty page of current ramblock
706 * This function updates pss->page to point to the next dirty page index
707 * within the ramblock to migrate, or the end of ramblock when nothing
708 * found. Note that when pss->host_page_sending==true it means we're
709 * during sending a host page, so we won't look for dirty page that is
710 * outside the host page boundary.
712 * @pss: the current page search status
714 static void pss_find_next_dirty(PageSearchStatus
*pss
)
716 RAMBlock
*rb
= pss
->block
;
717 unsigned long size
= rb
->used_length
>> TARGET_PAGE_BITS
;
718 unsigned long *bitmap
= rb
->bmap
;
720 if (migrate_ram_is_ignored(rb
)) {
721 /* Points directly to the end, so we know no dirty page */
727 * If during sending a host page, only look for dirty pages within the
728 * current host page being send.
730 if (pss
->host_page_sending
) {
731 assert(pss
->host_page_end
);
732 size
= MIN(size
, pss
->host_page_end
);
735 pss
->page
= find_next_bit(bitmap
, size
, pss
->page
);
738 static void migration_clear_memory_region_dirty_bitmap(RAMBlock
*rb
,
744 if (!rb
->clear_bmap
|| !clear_bmap_test_and_clear(rb
, page
)) {
748 shift
= rb
->clear_bmap_shift
;
750 * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this
751 * can make things easier sometimes since then start address
752 * of the small chunk will always be 64 pages aligned so the
753 * bitmap will always be aligned to unsigned long. We should
754 * even be able to remove this restriction but I'm simply
759 size
= 1ULL << (TARGET_PAGE_BITS
+ shift
);
760 start
= QEMU_ALIGN_DOWN((ram_addr_t
)page
<< TARGET_PAGE_BITS
, size
);
761 trace_migration_bitmap_clear_dirty(rb
->idstr
, start
, size
, page
);
762 memory_region_clear_dirty_bitmap(rb
->mr
, start
, size
);
766 migration_clear_memory_region_dirty_bitmap_range(RAMBlock
*rb
,
768 unsigned long npages
)
770 unsigned long i
, chunk_pages
= 1UL << rb
->clear_bmap_shift
;
771 unsigned long chunk_start
= QEMU_ALIGN_DOWN(start
, chunk_pages
);
772 unsigned long chunk_end
= QEMU_ALIGN_UP(start
+ npages
, chunk_pages
);
775 * Clear pages from start to start + npages - 1, so the end boundary is
778 for (i
= chunk_start
; i
< chunk_end
; i
+= chunk_pages
) {
779 migration_clear_memory_region_dirty_bitmap(rb
, i
);
784 * colo_bitmap_find_diry:find contiguous dirty pages from start
786 * Returns the page offset within memory region of the start of the contiguout
789 * @rs: current RAM state
790 * @rb: RAMBlock where to search for dirty pages
791 * @start: page where we start the search
792 * @num: the number of contiguous dirty pages
795 unsigned long colo_bitmap_find_dirty(RAMState
*rs
, RAMBlock
*rb
,
796 unsigned long start
, unsigned long *num
)
798 unsigned long size
= rb
->used_length
>> TARGET_PAGE_BITS
;
799 unsigned long *bitmap
= rb
->bmap
;
800 unsigned long first
, next
;
804 if (migrate_ram_is_ignored(rb
)) {
808 first
= find_next_bit(bitmap
, size
, start
);
812 next
= find_next_zero_bit(bitmap
, size
, first
+ 1);
813 assert(next
>= first
);
818 static inline bool migration_bitmap_clear_dirty(RAMState
*rs
,
825 * Clear dirty bitmap if needed. This _must_ be called before we
826 * send any of the page in the chunk because we need to make sure
827 * we can capture further page content changes when we sync dirty
828 * log the next time. So as long as we are going to send any of
829 * the page in the chunk we clear the remote dirty bitmap for all.
830 * Clearing it earlier won't be a problem, but too late will.
832 migration_clear_memory_region_dirty_bitmap(rb
, page
);
834 ret
= test_and_clear_bit(page
, rb
->bmap
);
836 rs
->migration_dirty_pages
--;
842 static void dirty_bitmap_clear_section(MemoryRegionSection
*section
,
845 const hwaddr offset
= section
->offset_within_region
;
846 const hwaddr size
= int128_get64(section
->size
);
847 const unsigned long start
= offset
>> TARGET_PAGE_BITS
;
848 const unsigned long npages
= size
>> TARGET_PAGE_BITS
;
849 RAMBlock
*rb
= section
->mr
->ram_block
;
850 uint64_t *cleared_bits
= opaque
;
853 * We don't grab ram_state->bitmap_mutex because we expect to run
854 * only when starting migration or during postcopy recovery where
855 * we don't have concurrent access.
857 if (!migration_in_postcopy() && !migrate_background_snapshot()) {
858 migration_clear_memory_region_dirty_bitmap_range(rb
, start
, npages
);
860 *cleared_bits
+= bitmap_count_one_with_offset(rb
->bmap
, start
, npages
);
861 bitmap_clear(rb
->bmap
, start
, npages
);
865 * Exclude all dirty pages from migration that fall into a discarded range as
866 * managed by a RamDiscardManager responsible for the mapped memory region of
867 * the RAMBlock. Clear the corresponding bits in the dirty bitmaps.
869 * Discarded pages ("logically unplugged") have undefined content and must
870 * not get migrated, because even reading these pages for migration might
871 * result in undesired behavior.
873 * Returns the number of cleared bits in the RAMBlock dirty bitmap.
875 * Note: The result is only stable while migrating (precopy/postcopy).
877 static uint64_t ramblock_dirty_bitmap_clear_discarded_pages(RAMBlock
*rb
)
879 uint64_t cleared_bits
= 0;
881 if (rb
->mr
&& rb
->bmap
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
882 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
883 MemoryRegionSection section
= {
885 .offset_within_region
= 0,
886 .size
= int128_make64(qemu_ram_get_used_length(rb
)),
889 ram_discard_manager_replay_discarded(rdm
, §ion
,
890 dirty_bitmap_clear_section
,
897 * Check if a host-page aligned page falls into a discarded range as managed by
898 * a RamDiscardManager responsible for the mapped memory region of the RAMBlock.
900 * Note: The result is only stable while migrating (precopy/postcopy).
902 bool ramblock_page_is_discarded(RAMBlock
*rb
, ram_addr_t start
)
904 if (rb
->mr
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
905 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
906 MemoryRegionSection section
= {
908 .offset_within_region
= start
,
909 .size
= int128_make64(qemu_ram_pagesize(rb
)),
912 return !ram_discard_manager_is_populated(rdm
, §ion
);
917 /* Called with RCU critical section */
918 static void ramblock_sync_dirty_bitmap(RAMState
*rs
, RAMBlock
*rb
)
920 uint64_t new_dirty_pages
=
921 cpu_physical_memory_sync_dirty_bitmap(rb
, 0, rb
->used_length
);
923 rs
->migration_dirty_pages
+= new_dirty_pages
;
924 rs
->num_dirty_pages_period
+= new_dirty_pages
;
928 * ram_pagesize_summary: calculate all the pagesizes of a VM
930 * Returns a summary bitmap of the page sizes of all RAMBlocks
932 * For VMs with just normal pages this is equivalent to the host page
933 * size. If it's got some huge pages then it's the OR of all the
934 * different page sizes.
936 uint64_t ram_pagesize_summary(void)
939 uint64_t summary
= 0;
941 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
942 summary
|= block
->page_size
;
948 uint64_t ram_get_total_transferred_pages(void)
950 return stat64_get(&mig_stats
.normal_pages
) +
951 stat64_get(&mig_stats
.zero_pages
) +
952 compress_ram_pages() + xbzrle_counters
.pages
;
955 static void migration_update_rates(RAMState
*rs
, int64_t end_time
)
957 uint64_t page_count
= rs
->target_page_count
- rs
->target_page_count_prev
;
959 /* calculate period counters */
960 stat64_set(&mig_stats
.dirty_pages_rate
,
961 rs
->num_dirty_pages_period
* 1000 /
962 (end_time
- rs
->time_last_bitmap_sync
));
968 if (migrate_xbzrle()) {
969 double encoded_size
, unencoded_size
;
971 xbzrle_counters
.cache_miss_rate
= (double)(xbzrle_counters
.cache_miss
-
972 rs
->xbzrle_cache_miss_prev
) / page_count
;
973 rs
->xbzrle_cache_miss_prev
= xbzrle_counters
.cache_miss
;
974 unencoded_size
= (xbzrle_counters
.pages
- rs
->xbzrle_pages_prev
) *
976 encoded_size
= xbzrle_counters
.bytes
- rs
->xbzrle_bytes_prev
;
977 if (xbzrle_counters
.pages
== rs
->xbzrle_pages_prev
|| !encoded_size
) {
978 xbzrle_counters
.encoding_rate
= 0;
980 xbzrle_counters
.encoding_rate
= unencoded_size
/ encoded_size
;
982 rs
->xbzrle_pages_prev
= xbzrle_counters
.pages
;
983 rs
->xbzrle_bytes_prev
= xbzrle_counters
.bytes
;
985 compress_update_rates(page_count
);
989 * Enable dirty-limit to throttle down the guest
991 static void migration_dirty_limit_guest(void)
994 * dirty page rate quota for all vCPUs fetched from
995 * migration parameter 'vcpu_dirty_limit'
997 static int64_t quota_dirtyrate
;
998 MigrationState
*s
= migrate_get_current();
1001 * If dirty limit already enabled and migration parameter
1002 * vcpu-dirty-limit untouched.
1004 if (dirtylimit_in_service() &&
1005 quota_dirtyrate
== s
->parameters
.vcpu_dirty_limit
) {
1009 quota_dirtyrate
= s
->parameters
.vcpu_dirty_limit
;
1012 * Set all vCPU a quota dirtyrate, note that the second
1013 * parameter will be ignored if setting all vCPU for the vm
1015 qmp_set_vcpu_dirty_limit(false, -1, quota_dirtyrate
, NULL
);
1016 trace_migration_dirty_limit_guest(quota_dirtyrate
);
1019 static void migration_trigger_throttle(RAMState
*rs
)
1021 uint64_t threshold
= migrate_throttle_trigger_threshold();
1022 uint64_t bytes_xfer_period
=
1023 migration_transferred_bytes() - rs
->bytes_xfer_prev
;
1024 uint64_t bytes_dirty_period
= rs
->num_dirty_pages_period
* TARGET_PAGE_SIZE
;
1025 uint64_t bytes_dirty_threshold
= bytes_xfer_period
* threshold
/ 100;
1028 * The following detection logic can be refined later. For now:
1029 * Check to see if the ratio between dirtied bytes and the approx.
1030 * amount of bytes that just got transferred since the last time
1031 * we were in this routine reaches the threshold. If that happens
1032 * twice, start or increase throttling.
1034 if ((bytes_dirty_period
> bytes_dirty_threshold
) &&
1035 (++rs
->dirty_rate_high_cnt
>= 2)) {
1036 rs
->dirty_rate_high_cnt
= 0;
1037 if (migrate_auto_converge()) {
1038 trace_migration_throttle();
1039 mig_throttle_guest_down(bytes_dirty_period
,
1040 bytes_dirty_threshold
);
1041 } else if (migrate_dirty_limit()) {
1042 migration_dirty_limit_guest();
1047 static void migration_bitmap_sync(RAMState
*rs
, bool last_stage
)
1052 stat64_add(&mig_stats
.dirty_sync_count
, 1);
1054 if (!rs
->time_last_bitmap_sync
) {
1055 rs
->time_last_bitmap_sync
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1058 trace_migration_bitmap_sync_start();
1059 memory_global_dirty_log_sync(last_stage
);
1061 WITH_QEMU_LOCK_GUARD(&rs
->bitmap_mutex
) {
1062 WITH_RCU_READ_LOCK_GUARD() {
1063 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1064 ramblock_sync_dirty_bitmap(rs
, block
);
1066 stat64_set(&mig_stats
.dirty_bytes_last_sync
, ram_bytes_remaining());
1070 memory_global_after_dirty_log_sync();
1071 trace_migration_bitmap_sync_end(rs
->num_dirty_pages_period
);
1073 end_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1075 /* more than 1 second = 1000 millisecons */
1076 if (end_time
> rs
->time_last_bitmap_sync
+ 1000) {
1077 migration_trigger_throttle(rs
);
1079 migration_update_rates(rs
, end_time
);
1081 rs
->target_page_count_prev
= rs
->target_page_count
;
1083 /* reset period counters */
1084 rs
->time_last_bitmap_sync
= end_time
;
1085 rs
->num_dirty_pages_period
= 0;
1086 rs
->bytes_xfer_prev
= migration_transferred_bytes();
1088 if (migrate_events()) {
1089 uint64_t generation
= stat64_get(&mig_stats
.dirty_sync_count
);
1090 qapi_event_send_migration_pass(generation
);
1094 static void migration_bitmap_sync_precopy(RAMState
*rs
, bool last_stage
)
1096 Error
*local_err
= NULL
;
1099 * The current notifier usage is just an optimization to migration, so we
1100 * don't stop the normal migration process in the error case.
1102 if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC
, &local_err
)) {
1103 error_report_err(local_err
);
1107 migration_bitmap_sync(rs
, last_stage
);
1109 if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC
, &local_err
)) {
1110 error_report_err(local_err
);
1114 void ram_release_page(const char *rbname
, uint64_t offset
)
1116 if (!migrate_release_ram() || !migration_in_postcopy()) {
1120 ram_discard_range(rbname
, offset
, TARGET_PAGE_SIZE
);
1124 * save_zero_page: send the zero page to the stream
1126 * Returns the number of pages written.
1128 * @rs: current RAM state
1129 * @pss: current PSS channel
1130 * @offset: offset inside the block for the page
1132 static int save_zero_page(RAMState
*rs
, PageSearchStatus
*pss
,
1135 uint8_t *p
= pss
->block
->host
+ offset
;
1136 QEMUFile
*file
= pss
->pss_channel
;
1139 if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_NONE
) {
1143 if (!buffer_is_zero(p
, TARGET_PAGE_SIZE
)) {
1147 stat64_add(&mig_stats
.zero_pages
, 1);
1149 if (migrate_mapped_ram()) {
1150 /* zero pages are not transferred with mapped-ram */
1151 clear_bit_atomic(offset
>> TARGET_PAGE_BITS
, pss
->block
->file_bmap
);
1155 len
+= save_page_header(pss
, file
, pss
->block
, offset
| RAM_SAVE_FLAG_ZERO
);
1156 qemu_put_byte(file
, 0);
1158 ram_release_page(pss
->block
->idstr
, offset
);
1159 ram_transferred_add(len
);
1162 * Must let xbzrle know, otherwise a previous (now 0'd) cached
1163 * page would be stale.
1165 if (rs
->xbzrle_started
) {
1166 XBZRLE_cache_lock();
1167 xbzrle_cache_zero_page(pss
->block
->offset
+ offset
);
1168 XBZRLE_cache_unlock();
1175 * @pages: the number of pages written by the control path,
1177 * > 0 - number of pages written
1179 * Return true if the pages has been saved, otherwise false is returned.
1181 static bool control_save_page(PageSearchStatus
*pss
,
1182 ram_addr_t offset
, int *pages
)
1186 ret
= rdma_control_save_page(pss
->pss_channel
, pss
->block
->offset
, offset
,
1188 if (ret
== RAM_SAVE_CONTROL_NOT_SUPP
) {
1192 if (ret
== RAM_SAVE_CONTROL_DELAYED
) {
1201 * directly send the page to the stream
1203 * Returns the number of pages written.
1205 * @pss: current PSS channel
1206 * @block: block that contains the page we want to send
1207 * @offset: offset inside the block for the page
1208 * @buf: the page to be sent
1209 * @async: send to page asyncly
1211 static int save_normal_page(PageSearchStatus
*pss
, RAMBlock
*block
,
1212 ram_addr_t offset
, uint8_t *buf
, bool async
)
1214 QEMUFile
*file
= pss
->pss_channel
;
1216 if (migrate_mapped_ram()) {
1217 qemu_put_buffer_at(file
, buf
, TARGET_PAGE_SIZE
,
1218 block
->pages_offset
+ offset
);
1219 set_bit(offset
>> TARGET_PAGE_BITS
, block
->file_bmap
);
1221 ram_transferred_add(save_page_header(pss
, pss
->pss_channel
, block
,
1222 offset
| RAM_SAVE_FLAG_PAGE
));
1224 qemu_put_buffer_async(file
, buf
, TARGET_PAGE_SIZE
,
1225 migrate_release_ram() &&
1226 migration_in_postcopy());
1228 qemu_put_buffer(file
, buf
, TARGET_PAGE_SIZE
);
1231 ram_transferred_add(TARGET_PAGE_SIZE
);
1232 stat64_add(&mig_stats
.normal_pages
, 1);
1237 * ram_save_page: send the given page to the stream
1239 * Returns the number of pages written.
1241 * >=0 - Number of pages written - this might legally be 0
1242 * if xbzrle noticed the page was the same.
1244 * @rs: current RAM state
1245 * @block: block that contains the page we want to send
1246 * @offset: offset inside the block for the page
1248 static int ram_save_page(RAMState
*rs
, PageSearchStatus
*pss
)
1252 bool send_async
= true;
1253 RAMBlock
*block
= pss
->block
;
1254 ram_addr_t offset
= ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
;
1255 ram_addr_t current_addr
= block
->offset
+ offset
;
1257 p
= block
->host
+ offset
;
1258 trace_ram_save_page(block
->idstr
, (uint64_t)offset
, p
);
1260 XBZRLE_cache_lock();
1261 if (rs
->xbzrle_started
&& !migration_in_postcopy()) {
1262 pages
= save_xbzrle_page(rs
, pss
, &p
, current_addr
,
1264 if (!rs
->last_stage
) {
1265 /* Can't send this cached data async, since the cache page
1266 * might get updated before it gets to the wire
1272 /* XBZRLE overflow or normal page */
1274 pages
= save_normal_page(pss
, block
, offset
, p
, send_async
);
1277 XBZRLE_cache_unlock();
1282 static int ram_save_multifd_page(RAMBlock
*block
, ram_addr_t offset
)
1284 if (!multifd_queue_page(block
, offset
)) {
1291 int compress_send_queued_data(CompressParam
*param
)
1293 PageSearchStatus
*pss
= &ram_state
->pss
[RAM_CHANNEL_PRECOPY
];
1294 MigrationState
*ms
= migrate_get_current();
1295 QEMUFile
*file
= ms
->to_dst_file
;
1298 RAMBlock
*block
= param
->block
;
1299 ram_addr_t offset
= param
->offset
;
1301 if (param
->result
== RES_NONE
) {
1305 assert(block
== pss
->last_sent_block
);
1307 if (param
->result
== RES_ZEROPAGE
) {
1308 assert(qemu_file_buffer_empty(param
->file
));
1309 len
+= save_page_header(pss
, file
, block
, offset
| RAM_SAVE_FLAG_ZERO
);
1310 qemu_put_byte(file
, 0);
1312 ram_release_page(block
->idstr
, offset
);
1313 } else if (param
->result
== RES_COMPRESS
) {
1314 assert(!qemu_file_buffer_empty(param
->file
));
1315 len
+= save_page_header(pss
, file
, block
,
1316 offset
| RAM_SAVE_FLAG_COMPRESS_PAGE
);
1317 len
+= qemu_put_qemu_file(file
, param
->file
);
1322 update_compress_thread_counts(param
, len
);
1327 #define PAGE_ALL_CLEAN 0
1328 #define PAGE_TRY_AGAIN 1
1329 #define PAGE_DIRTY_FOUND 2
1331 * find_dirty_block: find the next dirty page and update any state
1332 * associated with the search process.
1335 * <0: An error happened
1336 * PAGE_ALL_CLEAN: no dirty page found, give up
1337 * PAGE_TRY_AGAIN: no dirty page found, retry for next block
1338 * PAGE_DIRTY_FOUND: dirty page found
1340 * @rs: current RAM state
1341 * @pss: data about the state of the current dirty page scan
1342 * @again: set to false if the search has scanned the whole of RAM
1344 static int find_dirty_block(RAMState
*rs
, PageSearchStatus
*pss
)
1346 /* Update pss->page for the next dirty bit in ramblock */
1347 pss_find_next_dirty(pss
);
1349 if (pss
->complete_round
&& pss
->block
== rs
->last_seen_block
&&
1350 pss
->page
>= rs
->last_page
) {
1352 * We've been once around the RAM and haven't found anything.
1355 return PAGE_ALL_CLEAN
;
1357 if (!offset_in_ramblock(pss
->block
,
1358 ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
)) {
1359 /* Didn't find anything in this RAM Block */
1361 pss
->block
= QLIST_NEXT_RCU(pss
->block
, next
);
1363 if (migrate_multifd() &&
1364 (!migrate_multifd_flush_after_each_section() ||
1365 migrate_mapped_ram())) {
1366 QEMUFile
*f
= rs
->pss
[RAM_CHANNEL_PRECOPY
].pss_channel
;
1367 int ret
= multifd_send_sync_main();
1372 if (!migrate_mapped_ram()) {
1373 qemu_put_be64(f
, RAM_SAVE_FLAG_MULTIFD_FLUSH
);
1378 * If memory migration starts over, we will meet a dirtied page
1379 * which may still exists in compression threads's ring, so we
1380 * should flush the compressed data to make sure the new page
1381 * is not overwritten by the old one in the destination.
1383 * Also If xbzrle is on, stop using the data compression at this
1384 * point. In theory, xbzrle can do better than compression.
1386 compress_flush_data();
1388 /* Hit the end of the list */
1389 pss
->block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
1390 /* Flag that we've looped */
1391 pss
->complete_round
= true;
1392 /* After the first round, enable XBZRLE. */
1393 if (migrate_xbzrle()) {
1394 rs
->xbzrle_started
= true;
1397 /* Didn't find anything this time, but try again on the new block */
1398 return PAGE_TRY_AGAIN
;
1400 /* We've found something */
1401 return PAGE_DIRTY_FOUND
;
1406 * unqueue_page: gets a page of the queue
1408 * Helper for 'get_queued_page' - gets a page off the queue
1410 * Returns the block of the page (or NULL if none available)
1412 * @rs: current RAM state
1413 * @offset: used to return the offset within the RAMBlock
1415 static RAMBlock
*unqueue_page(RAMState
*rs
, ram_addr_t
*offset
)
1417 struct RAMSrcPageRequest
*entry
;
1418 RAMBlock
*block
= NULL
;
1420 if (!postcopy_has_request(rs
)) {
1424 QEMU_LOCK_GUARD(&rs
->src_page_req_mutex
);
1427 * This should _never_ change even after we take the lock, because no one
1428 * should be taking anything off the request list other than us.
1430 assert(postcopy_has_request(rs
));
1432 entry
= QSIMPLEQ_FIRST(&rs
->src_page_requests
);
1434 *offset
= entry
->offset
;
1436 if (entry
->len
> TARGET_PAGE_SIZE
) {
1437 entry
->len
-= TARGET_PAGE_SIZE
;
1438 entry
->offset
+= TARGET_PAGE_SIZE
;
1440 memory_region_unref(block
->mr
);
1441 QSIMPLEQ_REMOVE_HEAD(&rs
->src_page_requests
, next_req
);
1443 migration_consume_urgent_request();
1449 #if defined(__linux__)
1451 * poll_fault_page: try to get next UFFD write fault page and, if pending fault
1452 * is found, return RAM block pointer and page offset
1454 * Returns pointer to the RAMBlock containing faulting page,
1455 * NULL if no write faults are pending
1457 * @rs: current RAM state
1458 * @offset: page offset from the beginning of the block
1460 static RAMBlock
*poll_fault_page(RAMState
*rs
, ram_addr_t
*offset
)
1462 struct uffd_msg uffd_msg
;
1467 if (!migrate_background_snapshot()) {
1471 res
= uffd_read_events(rs
->uffdio_fd
, &uffd_msg
, 1);
1476 page_address
= (void *)(uintptr_t) uffd_msg
.arg
.pagefault
.address
;
1477 block
= qemu_ram_block_from_host(page_address
, false, offset
);
1478 assert(block
&& (block
->flags
& RAM_UF_WRITEPROTECT
) != 0);
1483 * ram_save_release_protection: release UFFD write protection after
1484 * a range of pages has been saved
1486 * @rs: current RAM state
1487 * @pss: page-search-status structure
1488 * @start_page: index of the first page in the range relative to pss->block
1490 * Returns 0 on success, negative value in case of an error
1492 static int ram_save_release_protection(RAMState
*rs
, PageSearchStatus
*pss
,
1493 unsigned long start_page
)
1497 /* Check if page is from UFFD-managed region. */
1498 if (pss
->block
->flags
& RAM_UF_WRITEPROTECT
) {
1499 void *page_address
= pss
->block
->host
+ (start_page
<< TARGET_PAGE_BITS
);
1500 uint64_t run_length
= (pss
->page
- start_page
) << TARGET_PAGE_BITS
;
1502 /* Flush async buffers before un-protect. */
1503 qemu_fflush(pss
->pss_channel
);
1504 /* Un-protect memory range. */
1505 res
= uffd_change_protection(rs
->uffdio_fd
, page_address
, run_length
,
1512 /* ram_write_tracking_available: check if kernel supports required UFFD features
1514 * Returns true if supports, false otherwise
1516 bool ram_write_tracking_available(void)
1518 uint64_t uffd_features
;
1521 res
= uffd_query_features(&uffd_features
);
1523 (uffd_features
& UFFD_FEATURE_PAGEFAULT_FLAG_WP
) != 0);
1526 /* ram_write_tracking_compatible: check if guest configuration is
1527 * compatible with 'write-tracking'
1529 * Returns true if compatible, false otherwise
1531 bool ram_write_tracking_compatible(void)
1533 const uint64_t uffd_ioctls_mask
= BIT(_UFFDIO_WRITEPROTECT
);
1538 /* Open UFFD file descriptor */
1539 uffd_fd
= uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP
, false);
1544 RCU_READ_LOCK_GUARD();
1546 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1547 uint64_t uffd_ioctls
;
1549 /* Nothing to do with read-only and MMIO-writable regions */
1550 if (block
->mr
->readonly
|| block
->mr
->rom_device
) {
1553 /* Try to register block memory via UFFD-IO to track writes */
1554 if (uffd_register_memory(uffd_fd
, block
->host
, block
->max_length
,
1555 UFFDIO_REGISTER_MODE_WP
, &uffd_ioctls
)) {
1558 if ((uffd_ioctls
& uffd_ioctls_mask
) != uffd_ioctls_mask
) {
1565 uffd_close_fd(uffd_fd
);
1569 static inline void populate_read_range(RAMBlock
*block
, ram_addr_t offset
,
1572 const ram_addr_t end
= offset
+ size
;
1575 * We read one byte of each page; this will preallocate page tables if
1576 * required and populate the shared zeropage on MAP_PRIVATE anonymous memory
1577 * where no page was populated yet. This might require adaption when
1578 * supporting other mappings, like shmem.
1580 for (; offset
< end
; offset
+= block
->page_size
) {
1581 char tmp
= *((char *)block
->host
+ offset
);
1583 /* Don't optimize the read out */
1584 asm volatile("" : "+r" (tmp
));
1588 static inline int populate_read_section(MemoryRegionSection
*section
,
1591 const hwaddr size
= int128_get64(section
->size
);
1592 hwaddr offset
= section
->offset_within_region
;
1593 RAMBlock
*block
= section
->mr
->ram_block
;
1595 populate_read_range(block
, offset
, size
);
1600 * ram_block_populate_read: preallocate page tables and populate pages in the
1601 * RAM block by reading a byte of each page.
1603 * Since it's solely used for userfault_fd WP feature, here we just
1604 * hardcode page size to qemu_real_host_page_size.
1606 * @block: RAM block to populate
1608 static void ram_block_populate_read(RAMBlock
*rb
)
1611 * Skip populating all pages that fall into a discarded range as managed by
1612 * a RamDiscardManager responsible for the mapped memory region of the
1613 * RAMBlock. Such discarded ("logically unplugged") parts of a RAMBlock
1614 * must not get populated automatically. We don't have to track
1615 * modifications via userfaultfd WP reliably, because these pages will
1616 * not be part of the migration stream either way -- see
1617 * ramblock_dirty_bitmap_exclude_discarded_pages().
1619 * Note: The result is only stable while migrating (precopy/postcopy).
1621 if (rb
->mr
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
1622 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
1623 MemoryRegionSection section
= {
1625 .offset_within_region
= 0,
1626 .size
= rb
->mr
->size
,
1629 ram_discard_manager_replay_populated(rdm
, §ion
,
1630 populate_read_section
, NULL
);
1632 populate_read_range(rb
, 0, rb
->used_length
);
1637 * ram_write_tracking_prepare: prepare for UFFD-WP memory tracking
1639 void ram_write_tracking_prepare(void)
1643 RCU_READ_LOCK_GUARD();
1645 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1646 /* Nothing to do with read-only and MMIO-writable regions */
1647 if (block
->mr
->readonly
|| block
->mr
->rom_device
) {
1652 * Populate pages of the RAM block before enabling userfault_fd
1655 * This stage is required since ioctl(UFFDIO_WRITEPROTECT) with
1656 * UFFDIO_WRITEPROTECT_MODE_WP mode setting would silently skip
1657 * pages with pte_none() entries in page table.
1659 ram_block_populate_read(block
);
1663 static inline int uffd_protect_section(MemoryRegionSection
*section
,
1666 const hwaddr size
= int128_get64(section
->size
);
1667 const hwaddr offset
= section
->offset_within_region
;
1668 RAMBlock
*rb
= section
->mr
->ram_block
;
1669 int uffd_fd
= (uintptr_t)opaque
;
1671 return uffd_change_protection(uffd_fd
, rb
->host
+ offset
, size
, true,
1675 static int ram_block_uffd_protect(RAMBlock
*rb
, int uffd_fd
)
1677 assert(rb
->flags
& RAM_UF_WRITEPROTECT
);
1679 /* See ram_block_populate_read() */
1680 if (rb
->mr
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
1681 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
1682 MemoryRegionSection section
= {
1684 .offset_within_region
= 0,
1685 .size
= rb
->mr
->size
,
1688 return ram_discard_manager_replay_populated(rdm
, §ion
,
1689 uffd_protect_section
,
1690 (void *)(uintptr_t)uffd_fd
);
1692 return uffd_change_protection(uffd_fd
, rb
->host
,
1693 rb
->used_length
, true, false);
1697 * ram_write_tracking_start: start UFFD-WP memory tracking
1699 * Returns 0 for success or negative value in case of error
1701 int ram_write_tracking_start(void)
1704 RAMState
*rs
= ram_state
;
1707 /* Open UFFD file descriptor */
1708 uffd_fd
= uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP
, true);
1712 rs
->uffdio_fd
= uffd_fd
;
1714 RCU_READ_LOCK_GUARD();
1716 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1717 /* Nothing to do with read-only and MMIO-writable regions */
1718 if (block
->mr
->readonly
|| block
->mr
->rom_device
) {
1722 /* Register block memory with UFFD to track writes */
1723 if (uffd_register_memory(rs
->uffdio_fd
, block
->host
,
1724 block
->max_length
, UFFDIO_REGISTER_MODE_WP
, NULL
)) {
1727 block
->flags
|= RAM_UF_WRITEPROTECT
;
1728 memory_region_ref(block
->mr
);
1730 /* Apply UFFD write protection to the block memory range */
1731 if (ram_block_uffd_protect(block
, uffd_fd
)) {
1735 trace_ram_write_tracking_ramblock_start(block
->idstr
, block
->page_size
,
1736 block
->host
, block
->max_length
);
1742 error_report("ram_write_tracking_start() failed: restoring initial memory state");
1744 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1745 if ((block
->flags
& RAM_UF_WRITEPROTECT
) == 0) {
1748 uffd_unregister_memory(rs
->uffdio_fd
, block
->host
, block
->max_length
);
1749 /* Cleanup flags and remove reference */
1750 block
->flags
&= ~RAM_UF_WRITEPROTECT
;
1751 memory_region_unref(block
->mr
);
1754 uffd_close_fd(uffd_fd
);
1760 * ram_write_tracking_stop: stop UFFD-WP memory tracking and remove protection
1762 void ram_write_tracking_stop(void)
1764 RAMState
*rs
= ram_state
;
1767 RCU_READ_LOCK_GUARD();
1769 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1770 if ((block
->flags
& RAM_UF_WRITEPROTECT
) == 0) {
1773 uffd_unregister_memory(rs
->uffdio_fd
, block
->host
, block
->max_length
);
1775 trace_ram_write_tracking_ramblock_stop(block
->idstr
, block
->page_size
,
1776 block
->host
, block
->max_length
);
1778 /* Cleanup flags and remove reference */
1779 block
->flags
&= ~RAM_UF_WRITEPROTECT
;
1780 memory_region_unref(block
->mr
);
1783 /* Finally close UFFD file descriptor */
1784 uffd_close_fd(rs
->uffdio_fd
);
1789 /* No target OS support, stubs just fail or ignore */
1791 static RAMBlock
*poll_fault_page(RAMState
*rs
, ram_addr_t
*offset
)
1799 static int ram_save_release_protection(RAMState
*rs
, PageSearchStatus
*pss
,
1800 unsigned long start_page
)
1809 bool ram_write_tracking_available(void)
1814 bool ram_write_tracking_compatible(void)
1820 int ram_write_tracking_start(void)
1826 void ram_write_tracking_stop(void)
1830 #endif /* defined(__linux__) */
1833 * get_queued_page: unqueue a page from the postcopy requests
1835 * Skips pages that are already sent (!dirty)
1837 * Returns true if a queued page is found
1839 * @rs: current RAM state
1840 * @pss: data about the state of the current dirty page scan
1842 static bool get_queued_page(RAMState
*rs
, PageSearchStatus
*pss
)
1849 block
= unqueue_page(rs
, &offset
);
1851 * We're sending this page, and since it's postcopy nothing else
1852 * will dirty it, and we must make sure it doesn't get sent again
1853 * even if this queue request was received after the background
1854 * search already sent it.
1859 page
= offset
>> TARGET_PAGE_BITS
;
1860 dirty
= test_bit(page
, block
->bmap
);
1862 trace_get_queued_page_not_dirty(block
->idstr
, (uint64_t)offset
,
1865 trace_get_queued_page(block
->idstr
, (uint64_t)offset
, page
);
1869 } while (block
&& !dirty
);
1873 * Poll write faults too if background snapshot is enabled; that's
1874 * when we have vcpus got blocked by the write protected pages.
1876 block
= poll_fault_page(rs
, &offset
);
1881 * We want the background search to continue from the queued page
1882 * since the guest is likely to want other pages near to the page
1883 * it just requested.
1886 pss
->page
= offset
>> TARGET_PAGE_BITS
;
1889 * This unqueued page would break the "one round" check, even is
1892 pss
->complete_round
= false;
1899 * migration_page_queue_free: drop any remaining pages in the ram
1902 * It should be empty at the end anyway, but in error cases there may
1903 * be some left. in case that there is any page left, we drop it.
1906 static void migration_page_queue_free(RAMState
*rs
)
1908 struct RAMSrcPageRequest
*mspr
, *next_mspr
;
1909 /* This queue generally should be empty - but in the case of a failed
1910 * migration might have some droppings in.
1912 RCU_READ_LOCK_GUARD();
1913 QSIMPLEQ_FOREACH_SAFE(mspr
, &rs
->src_page_requests
, next_req
, next_mspr
) {
1914 memory_region_unref(mspr
->rb
->mr
);
1915 QSIMPLEQ_REMOVE_HEAD(&rs
->src_page_requests
, next_req
);
1921 * ram_save_queue_pages: queue the page for transmission
1923 * A request from postcopy destination for example.
1925 * Returns zero on success or negative on error
1927 * @rbname: Name of the RAMBLock of the request. NULL means the
1928 * same that last one.
1929 * @start: starting address from the start of the RAMBlock
1930 * @len: length (in bytes) to send
1932 int ram_save_queue_pages(const char *rbname
, ram_addr_t start
, ram_addr_t len
,
1936 RAMState
*rs
= ram_state
;
1938 stat64_add(&mig_stats
.postcopy_requests
, 1);
1939 RCU_READ_LOCK_GUARD();
1942 /* Reuse last RAMBlock */
1943 ramblock
= rs
->last_req_rb
;
1947 * Shouldn't happen, we can't reuse the last RAMBlock if
1948 * it's the 1st request.
1950 error_setg(errp
, "MIG_RP_MSG_REQ_PAGES has no previous block");
1954 ramblock
= qemu_ram_block_by_name(rbname
);
1957 /* We shouldn't be asked for a non-existent RAMBlock */
1958 error_setg(errp
, "MIG_RP_MSG_REQ_PAGES has no block '%s'", rbname
);
1961 rs
->last_req_rb
= ramblock
;
1963 trace_ram_save_queue_pages(ramblock
->idstr
, start
, len
);
1964 if (!offset_in_ramblock(ramblock
, start
+ len
- 1)) {
1965 error_setg(errp
, "MIG_RP_MSG_REQ_PAGES request overrun, "
1966 "start=" RAM_ADDR_FMT
" len="
1967 RAM_ADDR_FMT
" blocklen=" RAM_ADDR_FMT
,
1968 start
, len
, ramblock
->used_length
);
1973 * When with postcopy preempt, we send back the page directly in the
1976 if (postcopy_preempt_active()) {
1977 ram_addr_t page_start
= start
>> TARGET_PAGE_BITS
;
1978 size_t page_size
= qemu_ram_pagesize(ramblock
);
1979 PageSearchStatus
*pss
= &ram_state
->pss
[RAM_CHANNEL_POSTCOPY
];
1982 qemu_mutex_lock(&rs
->bitmap_mutex
);
1984 pss_init(pss
, ramblock
, page_start
);
1986 * Always use the preempt channel, and make sure it's there. It's
1987 * safe to access without lock, because when rp-thread is running
1988 * we should be the only one who operates on the qemufile
1990 pss
->pss_channel
= migrate_get_current()->postcopy_qemufile_src
;
1991 assert(pss
->pss_channel
);
1994 * It must be either one or multiple of host page size. Just
1995 * assert; if something wrong we're mostly split brain anyway.
1997 assert(len
% page_size
== 0);
1999 if (ram_save_host_page_urgent(pss
)) {
2000 error_setg(errp
, "ram_save_host_page_urgent() failed: "
2001 "ramblock=%s, start_addr=0x"RAM_ADDR_FMT
,
2002 ramblock
->idstr
, start
);
2007 * NOTE: after ram_save_host_page_urgent() succeeded, pss->page
2008 * will automatically be moved and point to the next host page
2009 * we're going to send, so no need to update here.
2011 * Normally QEMU never sends >1 host page in requests, so
2012 * logically we don't even need that as the loop should only
2013 * run once, but just to be consistent.
2017 qemu_mutex_unlock(&rs
->bitmap_mutex
);
2022 struct RAMSrcPageRequest
*new_entry
=
2023 g_new0(struct RAMSrcPageRequest
, 1);
2024 new_entry
->rb
= ramblock
;
2025 new_entry
->offset
= start
;
2026 new_entry
->len
= len
;
2028 memory_region_ref(ramblock
->mr
);
2029 qemu_mutex_lock(&rs
->src_page_req_mutex
);
2030 QSIMPLEQ_INSERT_TAIL(&rs
->src_page_requests
, new_entry
, next_req
);
2031 migration_make_urgent_request();
2032 qemu_mutex_unlock(&rs
->src_page_req_mutex
);
2038 * try to compress the page before posting it out, return true if the page
2039 * has been properly handled by compression, otherwise needs other
2040 * paths to handle it
2042 static bool save_compress_page(RAMState
*rs
, PageSearchStatus
*pss
,
2045 if (!migrate_compress()) {
2050 * When starting the process of a new block, the first page of
2051 * the block should be sent out before other pages in the same
2052 * block, and all the pages in last block should have been sent
2053 * out, keeping this order is important, because the 'cont' flag
2054 * is used to avoid resending the block name.
2056 * We post the fist page as normal page as compression will take
2057 * much CPU resource.
2059 if (pss
->block
!= pss
->last_sent_block
) {
2060 compress_flush_data();
2064 return compress_page_with_multi_thread(pss
->block
, offset
,
2065 compress_send_queued_data
);
2069 * ram_save_target_page_legacy: save one target page
2071 * Returns the number of pages written
2073 * @rs: current RAM state
2074 * @pss: data about the page we want to send
2076 static int ram_save_target_page_legacy(RAMState
*rs
, PageSearchStatus
*pss
)
2078 ram_addr_t offset
= ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
;
2081 if (control_save_page(pss
, offset
, &res
)) {
2085 if (save_compress_page(rs
, pss
, offset
)) {
2089 if (save_zero_page(rs
, pss
, offset
)) {
2093 return ram_save_page(rs
, pss
);
2097 * ram_save_target_page_multifd: send one target page to multifd workers
2099 * Returns 1 if the page was queued, -1 otherwise.
2101 * @rs: current RAM state
2102 * @pss: data about the page we want to send
2104 static int ram_save_target_page_multifd(RAMState
*rs
, PageSearchStatus
*pss
)
2106 RAMBlock
*block
= pss
->block
;
2107 ram_addr_t offset
= ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
;
2110 * While using multifd live migration, we still need to handle zero
2111 * page checking on the migration main thread.
2113 if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_LEGACY
) {
2114 if (save_zero_page(rs
, pss
, offset
)) {
2119 return ram_save_multifd_page(block
, offset
);
2122 /* Should be called before sending a host page */
2123 static void pss_host_page_prepare(PageSearchStatus
*pss
)
2125 /* How many guest pages are there in one host page? */
2126 size_t guest_pfns
= qemu_ram_pagesize(pss
->block
) >> TARGET_PAGE_BITS
;
2128 pss
->host_page_sending
= true;
2129 if (guest_pfns
<= 1) {
2131 * This covers both when guest psize == host psize, or when guest
2132 * has larger psize than the host (guest_pfns==0).
2134 * For the latter, we always send one whole guest page per
2135 * iteration of the host page (example: an Alpha VM on x86 host
2136 * will have guest psize 8K while host psize 4K).
2138 pss
->host_page_start
= pss
->page
;
2139 pss
->host_page_end
= pss
->page
+ 1;
2142 * The host page spans over multiple guest pages, we send them
2143 * within the same host page iteration.
2145 pss
->host_page_start
= ROUND_DOWN(pss
->page
, guest_pfns
);
2146 pss
->host_page_end
= ROUND_UP(pss
->page
+ 1, guest_pfns
);
2151 * Whether the page pointed by PSS is within the host page being sent.
2152 * Must be called after a previous pss_host_page_prepare().
2154 static bool pss_within_range(PageSearchStatus
*pss
)
2156 ram_addr_t ram_addr
;
2158 assert(pss
->host_page_sending
);
2160 /* Over host-page boundary? */
2161 if (pss
->page
>= pss
->host_page_end
) {
2165 ram_addr
= ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
;
2167 return offset_in_ramblock(pss
->block
, ram_addr
);
2170 static void pss_host_page_finish(PageSearchStatus
*pss
)
2172 pss
->host_page_sending
= false;
2173 /* This is not needed, but just to reset it */
2174 pss
->host_page_start
= pss
->host_page_end
= 0;
2178 * Send an urgent host page specified by `pss'. Need to be called with
2179 * bitmap_mutex held.
2181 * Returns 0 if save host page succeeded, false otherwise.
2183 static int ram_save_host_page_urgent(PageSearchStatus
*pss
)
2185 bool page_dirty
, sent
= false;
2186 RAMState
*rs
= ram_state
;
2189 trace_postcopy_preempt_send_host_page(pss
->block
->idstr
, pss
->page
);
2190 pss_host_page_prepare(pss
);
2193 * If precopy is sending the same page, let it be done in precopy, or
2194 * we could send the same page in two channels and none of them will
2195 * receive the whole page.
2197 if (pss_overlap(pss
, &ram_state
->pss
[RAM_CHANNEL_PRECOPY
])) {
2198 trace_postcopy_preempt_hit(pss
->block
->idstr
,
2199 pss
->page
<< TARGET_PAGE_BITS
);
2204 page_dirty
= migration_bitmap_clear_dirty(rs
, pss
->block
, pss
->page
);
2207 /* Be strict to return code; it must be 1, or what else? */
2208 if (migration_ops
->ram_save_target_page(rs
, pss
) != 1) {
2209 error_report_once("%s: ram_save_target_page failed", __func__
);
2215 pss_find_next_dirty(pss
);
2216 } while (pss_within_range(pss
));
2218 pss_host_page_finish(pss
);
2219 /* For urgent requests, flush immediately if sent */
2221 qemu_fflush(pss
->pss_channel
);
2227 * ram_save_host_page: save a whole host page
2229 * Starting at *offset send pages up to the end of the current host
2230 * page. It's valid for the initial offset to point into the middle of
2231 * a host page in which case the remainder of the hostpage is sent.
2232 * Only dirty target pages are sent. Note that the host page size may
2233 * be a huge page for this block.
2235 * The saving stops at the boundary of the used_length of the block
2236 * if the RAMBlock isn't a multiple of the host page size.
2238 * The caller must be with ram_state.bitmap_mutex held to call this
2239 * function. Note that this function can temporarily release the lock, but
2240 * when the function is returned it'll make sure the lock is still held.
2242 * Returns the number of pages written or negative on error
2244 * @rs: current RAM state
2245 * @pss: data about the page we want to send
2247 static int ram_save_host_page(RAMState
*rs
, PageSearchStatus
*pss
)
2249 bool page_dirty
, preempt_active
= postcopy_preempt_active();
2250 int tmppages
, pages
= 0;
2251 size_t pagesize_bits
=
2252 qemu_ram_pagesize(pss
->block
) >> TARGET_PAGE_BITS
;
2253 unsigned long start_page
= pss
->page
;
2256 if (migrate_ram_is_ignored(pss
->block
)) {
2257 error_report("block %s should not be migrated !", pss
->block
->idstr
);
2261 /* Update host page boundary information */
2262 pss_host_page_prepare(pss
);
2265 page_dirty
= migration_bitmap_clear_dirty(rs
, pss
->block
, pss
->page
);
2267 /* Check the pages is dirty and if it is send it */
2270 * Properly yield the lock only in postcopy preempt mode
2271 * because both migration thread and rp-return thread can
2272 * operate on the bitmaps.
2274 if (preempt_active
) {
2275 qemu_mutex_unlock(&rs
->bitmap_mutex
);
2277 tmppages
= migration_ops
->ram_save_target_page(rs
, pss
);
2278 if (tmppages
>= 0) {
2281 * Allow rate limiting to happen in the middle of huge pages if
2282 * something is sent in the current iteration.
2284 if (pagesize_bits
> 1 && tmppages
> 0) {
2285 migration_rate_limit();
2288 if (preempt_active
) {
2289 qemu_mutex_lock(&rs
->bitmap_mutex
);
2296 pss_host_page_finish(pss
);
2300 pss_find_next_dirty(pss
);
2301 } while (pss_within_range(pss
));
2303 pss_host_page_finish(pss
);
2305 res
= ram_save_release_protection(rs
, pss
, start_page
);
2306 return (res
< 0 ? res
: pages
);
2310 * ram_find_and_save_block: finds a dirty page and sends it to f
2312 * Called within an RCU critical section.
2314 * Returns the number of pages written where zero means no dirty pages,
2315 * or negative on error
2317 * @rs: current RAM state
2319 * On systems where host-page-size > target-page-size it will send all the
2320 * pages in a host page that are dirty.
2322 static int ram_find_and_save_block(RAMState
*rs
)
2324 PageSearchStatus
*pss
= &rs
->pss
[RAM_CHANNEL_PRECOPY
];
2327 /* No dirty page as there is zero RAM */
2328 if (!rs
->ram_bytes_total
) {
2333 * Always keep last_seen_block/last_page valid during this procedure,
2334 * because find_dirty_block() relies on these values (e.g., we compare
2335 * last_seen_block with pss.block to see whether we searched all the
2336 * ramblocks) to detect the completion of migration. Having NULL value
2337 * of last_seen_block can conditionally cause below loop to run forever.
2339 if (!rs
->last_seen_block
) {
2340 rs
->last_seen_block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
2344 pss_init(pss
, rs
->last_seen_block
, rs
->last_page
);
2347 if (!get_queued_page(rs
, pss
)) {
2348 /* priority queue empty, so just search for something dirty */
2349 int res
= find_dirty_block(rs
, pss
);
2350 if (res
!= PAGE_DIRTY_FOUND
) {
2351 if (res
== PAGE_ALL_CLEAN
) {
2353 } else if (res
== PAGE_TRY_AGAIN
) {
2355 } else if (res
< 0) {
2361 pages
= ram_save_host_page(rs
, pss
);
2367 rs
->last_seen_block
= pss
->block
;
2368 rs
->last_page
= pss
->page
;
2373 static uint64_t ram_bytes_total_with_ignored(void)
2378 RCU_READ_LOCK_GUARD();
2380 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2381 total
+= block
->used_length
;
2386 uint64_t ram_bytes_total(void)
2391 RCU_READ_LOCK_GUARD();
2393 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2394 total
+= block
->used_length
;
2399 static void xbzrle_load_setup(void)
2401 XBZRLE
.decoded_buf
= g_malloc(TARGET_PAGE_SIZE
);
2404 static void xbzrle_load_cleanup(void)
2406 g_free(XBZRLE
.decoded_buf
);
2407 XBZRLE
.decoded_buf
= NULL
;
2410 static void ram_state_cleanup(RAMState
**rsp
)
2413 migration_page_queue_free(*rsp
);
2414 qemu_mutex_destroy(&(*rsp
)->bitmap_mutex
);
2415 qemu_mutex_destroy(&(*rsp
)->src_page_req_mutex
);
2421 static void xbzrle_cleanup(void)
2423 XBZRLE_cache_lock();
2425 cache_fini(XBZRLE
.cache
);
2426 g_free(XBZRLE
.encoded_buf
);
2427 g_free(XBZRLE
.current_buf
);
2428 g_free(XBZRLE
.zero_target_page
);
2429 XBZRLE
.cache
= NULL
;
2430 XBZRLE
.encoded_buf
= NULL
;
2431 XBZRLE
.current_buf
= NULL
;
2432 XBZRLE
.zero_target_page
= NULL
;
2434 XBZRLE_cache_unlock();
2437 static void ram_bitmaps_destroy(void)
2441 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2442 g_free(block
->clear_bmap
);
2443 block
->clear_bmap
= NULL
;
2444 g_free(block
->bmap
);
2446 g_free(block
->file_bmap
);
2447 block
->file_bmap
= NULL
;
2451 static void ram_save_cleanup(void *opaque
)
2453 RAMState
**rsp
= opaque
;
2455 /* We don't use dirty log with background snapshots */
2456 if (!migrate_background_snapshot()) {
2457 /* caller have hold BQL or is in a bh, so there is
2458 * no writing race against the migration bitmap
2460 if (global_dirty_tracking
& GLOBAL_DIRTY_MIGRATION
) {
2462 * do not stop dirty log without starting it, since
2463 * memory_global_dirty_log_stop will assert that
2464 * memory_global_dirty_log_start/stop used in pairs
2466 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION
);
2470 ram_bitmaps_destroy();
2473 compress_threads_save_cleanup();
2474 ram_state_cleanup(rsp
);
2475 g_free(migration_ops
);
2476 migration_ops
= NULL
;
2479 static void ram_state_reset(RAMState
*rs
)
2483 for (i
= 0; i
< RAM_CHANNEL_MAX
; i
++) {
2484 rs
->pss
[i
].last_sent_block
= NULL
;
2487 rs
->last_seen_block
= NULL
;
2489 rs
->last_version
= ram_list
.version
;
2490 rs
->xbzrle_started
= false;
2493 #define MAX_WAIT 50 /* ms, half buffered_file limit */
2495 /* **** functions for postcopy ***** */
2497 void ram_postcopy_migrated_memory_release(MigrationState
*ms
)
2499 struct RAMBlock
*block
;
2501 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2502 unsigned long *bitmap
= block
->bmap
;
2503 unsigned long range
= block
->used_length
>> TARGET_PAGE_BITS
;
2504 unsigned long run_start
= find_next_zero_bit(bitmap
, range
, 0);
2506 while (run_start
< range
) {
2507 unsigned long run_end
= find_next_bit(bitmap
, range
, run_start
+ 1);
2508 ram_discard_range(block
->idstr
,
2509 ((ram_addr_t
)run_start
) << TARGET_PAGE_BITS
,
2510 ((ram_addr_t
)(run_end
- run_start
))
2511 << TARGET_PAGE_BITS
);
2512 run_start
= find_next_zero_bit(bitmap
, range
, run_end
+ 1);
2518 * postcopy_send_discard_bm_ram: discard a RAMBlock
2520 * Callback from postcopy_each_ram_send_discard for each RAMBlock
2522 * @ms: current migration state
2523 * @block: RAMBlock to discard
2525 static void postcopy_send_discard_bm_ram(MigrationState
*ms
, RAMBlock
*block
)
2527 unsigned long end
= block
->used_length
>> TARGET_PAGE_BITS
;
2528 unsigned long current
;
2529 unsigned long *bitmap
= block
->bmap
;
2531 for (current
= 0; current
< end
; ) {
2532 unsigned long one
= find_next_bit(bitmap
, end
, current
);
2533 unsigned long zero
, discard_length
;
2539 zero
= find_next_zero_bit(bitmap
, end
, one
+ 1);
2542 discard_length
= end
- one
;
2544 discard_length
= zero
- one
;
2546 postcopy_discard_send_range(ms
, one
, discard_length
);
2547 current
= one
+ discard_length
;
2551 static void postcopy_chunk_hostpages_pass(MigrationState
*ms
, RAMBlock
*block
);
2554 * postcopy_each_ram_send_discard: discard all RAMBlocks
2556 * Utility for the outgoing postcopy code.
2557 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2558 * passing it bitmap indexes and name.
2559 * (qemu_ram_foreach_block ends up passing unscaled lengths
2560 * which would mean postcopy code would have to deal with target page)
2562 * @ms: current migration state
2564 static void postcopy_each_ram_send_discard(MigrationState
*ms
)
2566 struct RAMBlock
*block
;
2568 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2569 postcopy_discard_send_init(ms
, block
->idstr
);
2572 * Deal with TPS != HPS and huge pages. It discard any partially sent
2573 * host-page size chunks, mark any partially dirty host-page size
2574 * chunks as all dirty. In this case the host-page is the host-page
2575 * for the particular RAMBlock, i.e. it might be a huge page.
2577 postcopy_chunk_hostpages_pass(ms
, block
);
2580 * Postcopy sends chunks of bitmap over the wire, but it
2581 * just needs indexes at this point, avoids it having
2582 * target page specific code.
2584 postcopy_send_discard_bm_ram(ms
, block
);
2585 postcopy_discard_send_finish(ms
);
2590 * postcopy_chunk_hostpages_pass: canonicalize bitmap in hostpages
2592 * Helper for postcopy_chunk_hostpages; it's called twice to
2593 * canonicalize the two bitmaps, that are similar, but one is
2596 * Postcopy requires that all target pages in a hostpage are dirty or
2597 * clean, not a mix. This function canonicalizes the bitmaps.
2599 * @ms: current migration state
2600 * @block: block that contains the page we want to canonicalize
2602 static void postcopy_chunk_hostpages_pass(MigrationState
*ms
, RAMBlock
*block
)
2604 RAMState
*rs
= ram_state
;
2605 unsigned long *bitmap
= block
->bmap
;
2606 unsigned int host_ratio
= block
->page_size
/ TARGET_PAGE_SIZE
;
2607 unsigned long pages
= block
->used_length
>> TARGET_PAGE_BITS
;
2608 unsigned long run_start
;
2610 if (block
->page_size
== TARGET_PAGE_SIZE
) {
2611 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2615 /* Find a dirty page */
2616 run_start
= find_next_bit(bitmap
, pages
, 0);
2618 while (run_start
< pages
) {
2621 * If the start of this run of pages is in the middle of a host
2622 * page, then we need to fixup this host page.
2624 if (QEMU_IS_ALIGNED(run_start
, host_ratio
)) {
2625 /* Find the end of this run */
2626 run_start
= find_next_zero_bit(bitmap
, pages
, run_start
+ 1);
2628 * If the end isn't at the start of a host page, then the
2629 * run doesn't finish at the end of a host page
2630 * and we need to discard.
2634 if (!QEMU_IS_ALIGNED(run_start
, host_ratio
)) {
2636 unsigned long fixup_start_addr
= QEMU_ALIGN_DOWN(run_start
,
2638 run_start
= QEMU_ALIGN_UP(run_start
, host_ratio
);
2640 /* Clean up the bitmap */
2641 for (page
= fixup_start_addr
;
2642 page
< fixup_start_addr
+ host_ratio
; page
++) {
2644 * Remark them as dirty, updating the count for any pages
2645 * that weren't previously dirty.
2647 rs
->migration_dirty_pages
+= !test_and_set_bit(page
, bitmap
);
2651 /* Find the next dirty page for the next iteration */
2652 run_start
= find_next_bit(bitmap
, pages
, run_start
);
2657 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
2659 * Transmit the set of pages to be discarded after precopy to the target
2660 * these are pages that:
2661 * a) Have been previously transmitted but are now dirty again
2662 * b) Pages that have never been transmitted, this ensures that
2663 * any pages on the destination that have been mapped by background
2664 * tasks get discarded (transparent huge pages is the specific concern)
2665 * Hopefully this is pretty sparse
2667 * @ms: current migration state
2669 void ram_postcopy_send_discard_bitmap(MigrationState
*ms
)
2671 RAMState
*rs
= ram_state
;
2673 RCU_READ_LOCK_GUARD();
2675 /* This should be our last sync, the src is now paused */
2676 migration_bitmap_sync(rs
, false);
2678 /* Easiest way to make sure we don't resume in the middle of a host-page */
2679 rs
->pss
[RAM_CHANNEL_PRECOPY
].last_sent_block
= NULL
;
2680 rs
->last_seen_block
= NULL
;
2683 postcopy_each_ram_send_discard(ms
);
2685 trace_ram_postcopy_send_discard_bitmap();
2689 * ram_discard_range: discard dirtied pages at the beginning of postcopy
2691 * Returns zero on success
2693 * @rbname: name of the RAMBlock of the request. NULL means the
2694 * same that last one.
2695 * @start: RAMBlock starting page
2696 * @length: RAMBlock size
2698 int ram_discard_range(const char *rbname
, uint64_t start
, size_t length
)
2700 trace_ram_discard_range(rbname
, start
, length
);
2702 RCU_READ_LOCK_GUARD();
2703 RAMBlock
*rb
= qemu_ram_block_by_name(rbname
);
2706 error_report("ram_discard_range: Failed to find block '%s'", rbname
);
2711 * On source VM, we don't need to update the received bitmap since
2712 * we don't even have one.
2714 if (rb
->receivedmap
) {
2715 bitmap_clear(rb
->receivedmap
, start
>> qemu_target_page_bits(),
2716 length
>> qemu_target_page_bits());
2719 return ram_block_discard_range(rb
, start
, length
);
2723 * For every allocation, we will try not to crash the VM if the
2724 * allocation failed.
2726 static bool xbzrle_init(Error
**errp
)
2728 if (!migrate_xbzrle()) {
2732 XBZRLE_cache_lock();
2734 XBZRLE
.zero_target_page
= g_try_malloc0(TARGET_PAGE_SIZE
);
2735 if (!XBZRLE
.zero_target_page
) {
2736 error_setg(errp
, "%s: Error allocating zero page", __func__
);
2740 XBZRLE
.cache
= cache_init(migrate_xbzrle_cache_size(),
2741 TARGET_PAGE_SIZE
, errp
);
2742 if (!XBZRLE
.cache
) {
2743 goto free_zero_page
;
2746 XBZRLE
.encoded_buf
= g_try_malloc0(TARGET_PAGE_SIZE
);
2747 if (!XBZRLE
.encoded_buf
) {
2748 error_setg(errp
, "%s: Error allocating encoded_buf", __func__
);
2752 XBZRLE
.current_buf
= g_try_malloc(TARGET_PAGE_SIZE
);
2753 if (!XBZRLE
.current_buf
) {
2754 error_setg(errp
, "%s: Error allocating current_buf", __func__
);
2755 goto free_encoded_buf
;
2758 /* We are all good */
2759 XBZRLE_cache_unlock();
2763 g_free(XBZRLE
.encoded_buf
);
2764 XBZRLE
.encoded_buf
= NULL
;
2766 cache_fini(XBZRLE
.cache
);
2767 XBZRLE
.cache
= NULL
;
2769 g_free(XBZRLE
.zero_target_page
);
2770 XBZRLE
.zero_target_page
= NULL
;
2772 XBZRLE_cache_unlock();
2776 static bool ram_state_init(RAMState
**rsp
, Error
**errp
)
2778 *rsp
= g_try_new0(RAMState
, 1);
2781 error_setg(errp
, "%s: Init ramstate fail", __func__
);
2785 qemu_mutex_init(&(*rsp
)->bitmap_mutex
);
2786 qemu_mutex_init(&(*rsp
)->src_page_req_mutex
);
2787 QSIMPLEQ_INIT(&(*rsp
)->src_page_requests
);
2788 (*rsp
)->ram_bytes_total
= ram_bytes_total();
2791 * Count the total number of pages used by ram blocks not including any
2792 * gaps due to alignment or unplugs.
2793 * This must match with the initial values of dirty bitmap.
2795 (*rsp
)->migration_dirty_pages
= (*rsp
)->ram_bytes_total
>> TARGET_PAGE_BITS
;
2796 ram_state_reset(*rsp
);
2801 static void ram_list_init_bitmaps(void)
2803 MigrationState
*ms
= migrate_get_current();
2805 unsigned long pages
;
2808 /* Skip setting bitmap if there is no RAM */
2809 if (ram_bytes_total()) {
2810 shift
= ms
->clear_bitmap_shift
;
2811 if (shift
> CLEAR_BITMAP_SHIFT_MAX
) {
2812 error_report("clear_bitmap_shift (%u) too big, using "
2813 "max value (%u)", shift
, CLEAR_BITMAP_SHIFT_MAX
);
2814 shift
= CLEAR_BITMAP_SHIFT_MAX
;
2815 } else if (shift
< CLEAR_BITMAP_SHIFT_MIN
) {
2816 error_report("clear_bitmap_shift (%u) too small, using "
2817 "min value (%u)", shift
, CLEAR_BITMAP_SHIFT_MIN
);
2818 shift
= CLEAR_BITMAP_SHIFT_MIN
;
2821 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2822 pages
= block
->max_length
>> TARGET_PAGE_BITS
;
2824 * The initial dirty bitmap for migration must be set with all
2825 * ones to make sure we'll migrate every guest RAM page to
2827 * Here we set RAMBlock.bmap all to 1 because when rebegin a
2828 * new migration after a failed migration, ram_list.
2829 * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole
2832 block
->bmap
= bitmap_new(pages
);
2833 bitmap_set(block
->bmap
, 0, pages
);
2834 if (migrate_mapped_ram()) {
2835 block
->file_bmap
= bitmap_new(pages
);
2837 block
->clear_bmap_shift
= shift
;
2838 block
->clear_bmap
= bitmap_new(clear_bmap_size(pages
, shift
));
2843 static void migration_bitmap_clear_discarded_pages(RAMState
*rs
)
2845 unsigned long pages
;
2848 RCU_READ_LOCK_GUARD();
2850 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
2851 pages
= ramblock_dirty_bitmap_clear_discarded_pages(rb
);
2852 rs
->migration_dirty_pages
-= pages
;
2856 static bool ram_init_bitmaps(RAMState
*rs
, Error
**errp
)
2860 qemu_mutex_lock_ramlist();
2862 WITH_RCU_READ_LOCK_GUARD() {
2863 ram_list_init_bitmaps();
2864 /* We don't use dirty log with background snapshots */
2865 if (!migrate_background_snapshot()) {
2866 ret
= memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION
, errp
);
2870 migration_bitmap_sync_precopy(rs
, false);
2874 qemu_mutex_unlock_ramlist();
2877 ram_bitmaps_destroy();
2882 * After an eventual first bitmap sync, fixup the initial bitmap
2883 * containing all 1s to exclude any discarded pages from migration.
2885 migration_bitmap_clear_discarded_pages(rs
);
2889 static int ram_init_all(RAMState
**rsp
, Error
**errp
)
2891 if (!ram_state_init(rsp
, errp
)) {
2895 if (!xbzrle_init(errp
)) {
2896 ram_state_cleanup(rsp
);
2900 if (!ram_init_bitmaps(*rsp
, errp
)) {
2907 static void ram_state_resume_prepare(RAMState
*rs
, QEMUFile
*out
)
2913 * Postcopy is not using xbzrle/compression, so no need for that.
2914 * Also, since source are already halted, we don't need to care
2915 * about dirty page logging as well.
2918 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2919 pages
+= bitmap_count_one(block
->bmap
,
2920 block
->used_length
>> TARGET_PAGE_BITS
);
2923 /* This may not be aligned with current bitmaps. Recalculate. */
2924 rs
->migration_dirty_pages
= pages
;
2926 ram_state_reset(rs
);
2928 /* Update RAMState cache of output QEMUFile */
2929 rs
->pss
[RAM_CHANNEL_PRECOPY
].pss_channel
= out
;
2931 trace_ram_state_resume_prepare(pages
);
2935 * This function clears bits of the free pages reported by the caller from the
2936 * migration dirty bitmap. @addr is the host address corresponding to the
2937 * start of the continuous guest free pages, and @len is the total bytes of
2940 void qemu_guest_free_page_hint(void *addr
, size_t len
)
2944 size_t used_len
, start
, npages
;
2946 /* This function is currently expected to be used during live migration */
2947 if (!migration_is_setup_or_active()) {
2951 for (; len
> 0; len
-= used_len
, addr
+= used_len
) {
2952 block
= qemu_ram_block_from_host(addr
, false, &offset
);
2953 if (unlikely(!block
|| offset
>= block
->used_length
)) {
2955 * The implementation might not support RAMBlock resize during
2956 * live migration, but it could happen in theory with future
2957 * updates. So we add a check here to capture that case.
2959 error_report_once("%s unexpected error", __func__
);
2963 if (len
<= block
->used_length
- offset
) {
2966 used_len
= block
->used_length
- offset
;
2969 start
= offset
>> TARGET_PAGE_BITS
;
2970 npages
= used_len
>> TARGET_PAGE_BITS
;
2972 qemu_mutex_lock(&ram_state
->bitmap_mutex
);
2974 * The skipped free pages are equavalent to be sent from clear_bmap's
2975 * perspective, so clear the bits from the memory region bitmap which
2976 * are initially set. Otherwise those skipped pages will be sent in
2977 * the next round after syncing from the memory region bitmap.
2979 migration_clear_memory_region_dirty_bitmap_range(block
, start
, npages
);
2980 ram_state
->migration_dirty_pages
-=
2981 bitmap_count_one_with_offset(block
->bmap
, start
, npages
);
2982 bitmap_clear(block
->bmap
, start
, npages
);
2983 qemu_mutex_unlock(&ram_state
->bitmap_mutex
);
2987 #define MAPPED_RAM_HDR_VERSION 1
2988 struct MappedRamHeader
{
2991 * The target's page size, so we know how many pages are in the
2996 * The offset in the migration file where the pages bitmap is
2999 uint64_t bitmap_offset
;
3001 * The offset in the migration file where the actual pages (data)
3004 uint64_t pages_offset
;
3006 typedef struct MappedRamHeader MappedRamHeader
;
3008 static void mapped_ram_setup_ramblock(QEMUFile
*file
, RAMBlock
*block
)
3010 g_autofree MappedRamHeader
*header
= NULL
;
3011 size_t header_size
, bitmap_size
;
3014 header
= g_new0(MappedRamHeader
, 1);
3015 header_size
= sizeof(MappedRamHeader
);
3017 num_pages
= block
->used_length
>> TARGET_PAGE_BITS
;
3018 bitmap_size
= BITS_TO_LONGS(num_pages
) * sizeof(unsigned long);
3021 * Save the file offsets of where the bitmap and the pages should
3022 * go as they are written at the end of migration and during the
3023 * iterative phase, respectively.
3025 block
->bitmap_offset
= qemu_get_offset(file
) + header_size
;
3026 block
->pages_offset
= ROUND_UP(block
->bitmap_offset
+
3028 MAPPED_RAM_FILE_OFFSET_ALIGNMENT
);
3030 header
->version
= cpu_to_be32(MAPPED_RAM_HDR_VERSION
);
3031 header
->page_size
= cpu_to_be64(TARGET_PAGE_SIZE
);
3032 header
->bitmap_offset
= cpu_to_be64(block
->bitmap_offset
);
3033 header
->pages_offset
= cpu_to_be64(block
->pages_offset
);
3035 qemu_put_buffer(file
, (uint8_t *) header
, header_size
);
3037 /* prepare offset for next ramblock */
3038 qemu_set_offset(file
, block
->pages_offset
+ block
->used_length
, SEEK_SET
);
3041 static bool mapped_ram_read_header(QEMUFile
*file
, MappedRamHeader
*header
,
3044 size_t ret
, header_size
= sizeof(MappedRamHeader
);
3046 ret
= qemu_get_buffer(file
, (uint8_t *)header
, header_size
);
3047 if (ret
!= header_size
) {
3048 error_setg(errp
, "Could not read whole mapped-ram migration header "
3049 "(expected %zd, got %zd bytes)", header_size
, ret
);
3053 /* migration stream is big-endian */
3054 header
->version
= be32_to_cpu(header
->version
);
3056 if (header
->version
> MAPPED_RAM_HDR_VERSION
) {
3057 error_setg(errp
, "Migration mapped-ram capability version not "
3058 "supported (expected <= %d, got %d)", MAPPED_RAM_HDR_VERSION
,
3063 header
->page_size
= be64_to_cpu(header
->page_size
);
3064 header
->bitmap_offset
= be64_to_cpu(header
->bitmap_offset
);
3065 header
->pages_offset
= be64_to_cpu(header
->pages_offset
);
3071 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
3072 * long-running RCU critical section. When rcu-reclaims in the code
3073 * start to become numerous it will be necessary to reduce the
3074 * granularity of these critical sections.
3078 * ram_save_setup: Setup RAM for migration
3080 * Returns zero to indicate success and negative for error
3082 * @f: QEMUFile where to send the data
3083 * @opaque: RAMState pointer
3084 * @errp: pointer to Error*, to store an error if it happens.
3086 static int ram_save_setup(QEMUFile
*f
, void *opaque
, Error
**errp
)
3088 RAMState
**rsp
= opaque
;
3090 int ret
, max_hg_page_size
;
3092 if (compress_threads_save_setup()) {
3093 error_setg(errp
, "%s: failed to start compress threads", __func__
);
3097 /* migration has already setup the bitmap, reuse it. */
3098 if (!migration_in_colo_state()) {
3099 if (ram_init_all(rsp
, errp
) != 0) {
3100 compress_threads_save_cleanup();
3104 (*rsp
)->pss
[RAM_CHANNEL_PRECOPY
].pss_channel
= f
;
3107 * ??? Mirrors the previous value of qemu_host_page_size,
3108 * but is this really what was intended for the migration?
3110 max_hg_page_size
= MAX(qemu_real_host_page_size(), TARGET_PAGE_SIZE
);
3112 WITH_RCU_READ_LOCK_GUARD() {
3113 qemu_put_be64(f
, ram_bytes_total_with_ignored()
3114 | RAM_SAVE_FLAG_MEM_SIZE
);
3116 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
3117 qemu_put_byte(f
, strlen(block
->idstr
));
3118 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, strlen(block
->idstr
));
3119 qemu_put_be64(f
, block
->used_length
);
3120 if (migrate_postcopy_ram() &&
3121 block
->page_size
!= max_hg_page_size
) {
3122 qemu_put_be64(f
, block
->page_size
);
3124 if (migrate_ignore_shared()) {
3125 qemu_put_be64(f
, block
->mr
->addr
);
3128 if (migrate_mapped_ram()) {
3129 mapped_ram_setup_ramblock(f
, block
);
3134 ret
= rdma_registration_start(f
, RAM_CONTROL_SETUP
);
3136 error_setg(errp
, "%s: failed to start RDMA registration", __func__
);
3137 qemu_file_set_error(f
, ret
);
3141 ret
= rdma_registration_stop(f
, RAM_CONTROL_SETUP
);
3143 error_setg(errp
, "%s: failed to stop RDMA registration", __func__
);
3144 qemu_file_set_error(f
, ret
);
3148 migration_ops
= g_malloc0(sizeof(MigrationOps
));
3150 if (migrate_multifd()) {
3151 migration_ops
->ram_save_target_page
= ram_save_target_page_multifd
;
3153 migration_ops
->ram_save_target_page
= ram_save_target_page_legacy
;
3157 ret
= multifd_send_sync_main();
3160 error_setg(errp
, "%s: multifd synchronization failed", __func__
);
3164 if (migrate_multifd() && !migrate_multifd_flush_after_each_section()
3165 && !migrate_mapped_ram()) {
3166 qemu_put_be64(f
, RAM_SAVE_FLAG_MULTIFD_FLUSH
);
3169 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3170 ret
= qemu_fflush(f
);
3172 error_setg_errno(errp
, -ret
, "%s failed", __func__
);
3177 static void ram_save_file_bmap(QEMUFile
*f
)
3181 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
3182 long num_pages
= block
->used_length
>> TARGET_PAGE_BITS
;
3183 long bitmap_size
= BITS_TO_LONGS(num_pages
) * sizeof(unsigned long);
3185 qemu_put_buffer_at(f
, (uint8_t *)block
->file_bmap
, bitmap_size
,
3186 block
->bitmap_offset
);
3187 ram_transferred_add(bitmap_size
);
3190 * Free the bitmap here to catch any synchronization issues
3191 * with multifd channels. No channels should be sending pages
3192 * after we've written the bitmap to file.
3194 g_free(block
->file_bmap
);
3195 block
->file_bmap
= NULL
;
3199 void ramblock_set_file_bmap_atomic(RAMBlock
*block
, ram_addr_t offset
, bool set
)
3202 set_bit_atomic(offset
>> TARGET_PAGE_BITS
, block
->file_bmap
);
3204 clear_bit_atomic(offset
>> TARGET_PAGE_BITS
, block
->file_bmap
);
3209 * ram_save_iterate: iterative stage for migration
3211 * Returns zero to indicate success and negative for error
3213 * @f: QEMUFile where to send the data
3214 * @opaque: RAMState pointer
3216 static int ram_save_iterate(QEMUFile
*f
, void *opaque
)
3218 RAMState
**temp
= opaque
;
3219 RAMState
*rs
= *temp
;
3226 * We'll take this lock a little bit long, but it's okay for two reasons.
3227 * Firstly, the only possible other thread to take it is who calls
3228 * qemu_guest_free_page_hint(), which should be rare; secondly, see
3229 * MAX_WAIT (if curious, further see commit 4508bd9ed8053ce) below, which
3230 * guarantees that we'll at least released it in a regular basis.
3232 WITH_QEMU_LOCK_GUARD(&rs
->bitmap_mutex
) {
3233 WITH_RCU_READ_LOCK_GUARD() {
3234 if (ram_list
.version
!= rs
->last_version
) {
3235 ram_state_reset(rs
);
3238 /* Read version before ram_list.blocks */
3241 ret
= rdma_registration_start(f
, RAM_CONTROL_ROUND
);
3243 qemu_file_set_error(f
, ret
);
3247 t0
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
3249 while ((ret
= migration_rate_exceeded(f
)) == 0 ||
3250 postcopy_has_request(rs
)) {
3253 if (qemu_file_get_error(f
)) {
3257 pages
= ram_find_and_save_block(rs
);
3258 /* no more pages to sent */
3265 qemu_file_set_error(f
, pages
);
3269 rs
->target_page_count
+= pages
;
3272 * During postcopy, it is necessary to make sure one whole host
3273 * page is sent in one chunk.
3275 if (migrate_postcopy_ram()) {
3276 compress_flush_data();
3280 * we want to check in the 1st loop, just in case it was the 1st
3281 * time and we had to sync the dirty bitmap.
3282 * qemu_clock_get_ns() is a bit expensive, so we only check each
3285 if ((i
& 63) == 0) {
3286 uint64_t t1
= (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - t0
) /
3288 if (t1
> MAX_WAIT
) {
3289 trace_ram_save_iterate_big_wait(t1
, i
);
3299 * Must occur before EOS (or any QEMUFile operation)
3300 * because of RDMA protocol.
3302 ret
= rdma_registration_stop(f
, RAM_CONTROL_ROUND
);
3304 qemu_file_set_error(f
, ret
);
3309 && migration_is_setup_or_active()) {
3310 if (migrate_multifd() && migrate_multifd_flush_after_each_section() &&
3311 !migrate_mapped_ram()) {
3312 ret
= multifd_send_sync_main();
3318 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3319 ram_transferred_add(8);
3320 ret
= qemu_fflush(f
);
3330 * ram_save_complete: function called to send the remaining amount of ram
3332 * Returns zero to indicate success or negative on error
3334 * Called with the BQL
3336 * @f: QEMUFile where to send the data
3337 * @opaque: RAMState pointer
3339 static int ram_save_complete(QEMUFile
*f
, void *opaque
)
3341 RAMState
**temp
= opaque
;
3342 RAMState
*rs
= *temp
;
3345 rs
->last_stage
= !migration_in_colo_state();
3347 WITH_RCU_READ_LOCK_GUARD() {
3348 if (!migration_in_postcopy()) {
3349 migration_bitmap_sync_precopy(rs
, true);
3352 ret
= rdma_registration_start(f
, RAM_CONTROL_FINISH
);
3354 qemu_file_set_error(f
, ret
);
3358 /* try transferring iterative blocks of memory */
3360 /* flush all remaining blocks regardless of rate limiting */
3361 qemu_mutex_lock(&rs
->bitmap_mutex
);
3365 pages
= ram_find_and_save_block(rs
);
3366 /* no more blocks to sent */
3371 qemu_mutex_unlock(&rs
->bitmap_mutex
);
3375 qemu_mutex_unlock(&rs
->bitmap_mutex
);
3377 compress_flush_data();
3379 ret
= rdma_registration_stop(f
, RAM_CONTROL_FINISH
);
3381 qemu_file_set_error(f
, ret
);
3386 ret
= multifd_send_sync_main();
3391 if (migrate_mapped_ram()) {
3392 ram_save_file_bmap(f
);
3394 if (qemu_file_get_error(f
)) {
3395 Error
*local_err
= NULL
;
3396 int err
= qemu_file_get_error_obj(f
, &local_err
);
3398 error_reportf_err(local_err
, "Failed to write bitmap to file: ");
3403 if (migrate_multifd() && !migrate_multifd_flush_after_each_section() &&
3404 !migrate_mapped_ram()) {
3405 qemu_put_be64(f
, RAM_SAVE_FLAG_MULTIFD_FLUSH
);
3407 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3408 return qemu_fflush(f
);
3411 static void ram_state_pending_estimate(void *opaque
, uint64_t *must_precopy
,
3412 uint64_t *can_postcopy
)
3414 RAMState
**temp
= opaque
;
3415 RAMState
*rs
= *temp
;
3417 uint64_t remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
3419 if (migrate_postcopy_ram()) {
3420 /* We can do postcopy, and all the data is postcopiable */
3421 *can_postcopy
+= remaining_size
;
3423 *must_precopy
+= remaining_size
;
3427 static void ram_state_pending_exact(void *opaque
, uint64_t *must_precopy
,
3428 uint64_t *can_postcopy
)
3430 RAMState
**temp
= opaque
;
3431 RAMState
*rs
= *temp
;
3432 uint64_t remaining_size
;
3434 if (!migration_in_postcopy()) {
3436 WITH_RCU_READ_LOCK_GUARD() {
3437 migration_bitmap_sync_precopy(rs
, false);
3442 remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
3444 if (migrate_postcopy_ram()) {
3445 /* We can do postcopy, and all the data is postcopiable */
3446 *can_postcopy
+= remaining_size
;
3448 *must_precopy
+= remaining_size
;
3452 static int load_xbzrle(QEMUFile
*f
, ram_addr_t addr
, void *host
)
3454 unsigned int xh_len
;
3456 uint8_t *loaded_data
;
3458 /* extract RLE header */
3459 xh_flags
= qemu_get_byte(f
);
3460 xh_len
= qemu_get_be16(f
);
3462 if (xh_flags
!= ENCODING_FLAG_XBZRLE
) {
3463 error_report("Failed to load XBZRLE page - wrong compression!");
3467 if (xh_len
> TARGET_PAGE_SIZE
) {
3468 error_report("Failed to load XBZRLE page - len overflow!");
3471 loaded_data
= XBZRLE
.decoded_buf
;
3472 /* load data and decode */
3473 /* it can change loaded_data to point to an internal buffer */
3474 qemu_get_buffer_in_place(f
, &loaded_data
, xh_len
);
3477 if (xbzrle_decode_buffer(loaded_data
, xh_len
, host
,
3478 TARGET_PAGE_SIZE
) == -1) {
3479 error_report("Failed to load XBZRLE page - decode error!");
3487 * ram_block_from_stream: read a RAMBlock id from the migration stream
3489 * Must be called from within a rcu critical section.
3491 * Returns a pointer from within the RCU-protected ram_list.
3493 * @mis: the migration incoming state pointer
3494 * @f: QEMUFile where to read the data from
3495 * @flags: Page flags (mostly to see if it's a continuation of previous block)
3496 * @channel: the channel we're using
3498 static inline RAMBlock
*ram_block_from_stream(MigrationIncomingState
*mis
,
3499 QEMUFile
*f
, int flags
,
3502 RAMBlock
*block
= mis
->last_recv_block
[channel
];
3506 if (flags
& RAM_SAVE_FLAG_CONTINUE
) {
3508 error_report("Ack, bad migration stream!");
3514 len
= qemu_get_byte(f
);
3515 qemu_get_buffer(f
, (uint8_t *)id
, len
);
3518 block
= qemu_ram_block_by_name(id
);
3520 error_report("Can't find block %s", id
);
3524 if (migrate_ram_is_ignored(block
)) {
3525 error_report("block %s should not be migrated !", id
);
3529 mis
->last_recv_block
[channel
] = block
;
3534 static inline void *host_from_ram_block_offset(RAMBlock
*block
,
3537 if (!offset_in_ramblock(block
, offset
)) {
3541 return block
->host
+ offset
;
3544 static void *host_page_from_ram_block_offset(RAMBlock
*block
,
3547 /* Note: Explicitly no check against offset_in_ramblock(). */
3548 return (void *)QEMU_ALIGN_DOWN((uintptr_t)(block
->host
+ offset
),
3552 static ram_addr_t
host_page_offset_from_ram_block_offset(RAMBlock
*block
,
3555 return ((uintptr_t)block
->host
+ offset
) & (block
->page_size
- 1);
3558 void colo_record_bitmap(RAMBlock
*block
, ram_addr_t
*normal
, uint32_t pages
)
3560 qemu_mutex_lock(&ram_state
->bitmap_mutex
);
3561 for (int i
= 0; i
< pages
; i
++) {
3562 ram_addr_t offset
= normal
[i
];
3563 ram_state
->migration_dirty_pages
+= !test_and_set_bit(
3564 offset
>> TARGET_PAGE_BITS
,
3567 qemu_mutex_unlock(&ram_state
->bitmap_mutex
);
3570 static inline void *colo_cache_from_block_offset(RAMBlock
*block
,
3571 ram_addr_t offset
, bool record_bitmap
)
3573 if (!offset_in_ramblock(block
, offset
)) {
3576 if (!block
->colo_cache
) {
3577 error_report("%s: colo_cache is NULL in block :%s",
3578 __func__
, block
->idstr
);
3583 * During colo checkpoint, we need bitmap of these migrated pages.
3584 * It help us to decide which pages in ram cache should be flushed
3585 * into VM's RAM later.
3587 if (record_bitmap
) {
3588 colo_record_bitmap(block
, &offset
, 1);
3590 return block
->colo_cache
+ offset
;
3594 * ram_handle_zero: handle the zero page case
3596 * If a page (or a whole RDMA chunk) has been
3597 * determined to be zero, then zap it.
3599 * @host: host address for the zero page
3600 * @ch: what the page is filled from. We only support zero
3601 * @size: size of the zero page
3603 void ram_handle_zero(void *host
, uint64_t size
)
3605 if (!buffer_is_zero(host
, size
)) {
3606 memset(host
, 0, size
);
3610 static void colo_init_ram_state(void)
3612 Error
*local_err
= NULL
;
3614 if (!ram_state_init(&ram_state
, &local_err
)) {
3615 error_report_err(local_err
);
3620 * colo cache: this is for secondary VM, we cache the whole
3621 * memory of the secondary VM, it is need to hold the global lock
3622 * to call this helper.
3624 int colo_init_ram_cache(void)
3628 WITH_RCU_READ_LOCK_GUARD() {
3629 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3630 block
->colo_cache
= qemu_anon_ram_alloc(block
->used_length
,
3631 NULL
, false, false);
3632 if (!block
->colo_cache
) {
3633 error_report("%s: Can't alloc memory for COLO cache of block %s,"
3634 "size 0x" RAM_ADDR_FMT
, __func__
, block
->idstr
,
3635 block
->used_length
);
3636 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3637 if (block
->colo_cache
) {
3638 qemu_anon_ram_free(block
->colo_cache
, block
->used_length
);
3639 block
->colo_cache
= NULL
;
3644 if (!machine_dump_guest_core(current_machine
)) {
3645 qemu_madvise(block
->colo_cache
, block
->used_length
,
3646 QEMU_MADV_DONTDUMP
);
3652 * Record the dirty pages that sent by PVM, we use this dirty bitmap together
3653 * with to decide which page in cache should be flushed into SVM's RAM. Here
3654 * we use the same name 'ram_bitmap' as for migration.
3656 if (ram_bytes_total()) {
3657 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3658 unsigned long pages
= block
->max_length
>> TARGET_PAGE_BITS
;
3659 block
->bmap
= bitmap_new(pages
);
3663 colo_init_ram_state();
3667 /* TODO: duplicated with ram_init_bitmaps */
3668 void colo_incoming_start_dirty_log(void)
3670 RAMBlock
*block
= NULL
;
3671 Error
*local_err
= NULL
;
3673 /* For memory_global_dirty_log_start below. */
3675 qemu_mutex_lock_ramlist();
3677 memory_global_dirty_log_sync(false);
3678 WITH_RCU_READ_LOCK_GUARD() {
3679 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3680 ramblock_sync_dirty_bitmap(ram_state
, block
);
3681 /* Discard this dirty bitmap record */
3682 bitmap_zero(block
->bmap
, block
->max_length
>> TARGET_PAGE_BITS
);
3684 if (!memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION
,
3686 error_report_err(local_err
);
3689 ram_state
->migration_dirty_pages
= 0;
3690 qemu_mutex_unlock_ramlist();
3694 /* It is need to hold the global lock to call this helper */
3695 void colo_release_ram_cache(void)
3699 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION
);
3700 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3701 g_free(block
->bmap
);
3705 WITH_RCU_READ_LOCK_GUARD() {
3706 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3707 if (block
->colo_cache
) {
3708 qemu_anon_ram_free(block
->colo_cache
, block
->used_length
);
3709 block
->colo_cache
= NULL
;
3713 ram_state_cleanup(&ram_state
);
3717 * ram_load_setup: Setup RAM for migration incoming side
3719 * Returns zero to indicate success and negative for error
3721 * @f: QEMUFile where to receive the data
3722 * @opaque: RAMState pointer
3723 * @errp: pointer to Error*, to store an error if it happens.
3725 static int ram_load_setup(QEMUFile
*f
, void *opaque
, Error
**errp
)
3727 xbzrle_load_setup();
3728 ramblock_recv_map_init();
3733 static int ram_load_cleanup(void *opaque
)
3737 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
3738 qemu_ram_block_writeback(rb
);
3741 xbzrle_load_cleanup();
3743 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
3744 g_free(rb
->receivedmap
);
3745 rb
->receivedmap
= NULL
;
3752 * ram_postcopy_incoming_init: allocate postcopy data structures
3754 * Returns 0 for success and negative if there was one error
3756 * @mis: current migration incoming state
3758 * Allocate data structures etc needed by incoming migration with
3759 * postcopy-ram. postcopy-ram's similarly names
3760 * postcopy_ram_incoming_init does the work.
3762 int ram_postcopy_incoming_init(MigrationIncomingState
*mis
)
3764 return postcopy_ram_incoming_init(mis
);
3768 * ram_load_postcopy: load a page in postcopy case
3770 * Returns 0 for success or -errno in case of error
3772 * Called in postcopy mode by ram_load().
3773 * rcu_read_lock is taken prior to this being called.
3775 * @f: QEMUFile where to send the data
3776 * @channel: the channel to use for loading
3778 int ram_load_postcopy(QEMUFile
*f
, int channel
)
3780 int flags
= 0, ret
= 0;
3781 bool place_needed
= false;
3782 bool matches_target_page_size
= false;
3783 MigrationIncomingState
*mis
= migration_incoming_get_current();
3784 PostcopyTmpPage
*tmp_page
= &mis
->postcopy_tmp_pages
[channel
];
3786 while (!ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
3788 void *page_buffer
= NULL
;
3789 void *place_source
= NULL
;
3790 RAMBlock
*block
= NULL
;
3794 addr
= qemu_get_be64(f
);
3797 * If qemu file error, we should stop here, and then "addr"
3800 ret
= qemu_file_get_error(f
);
3805 flags
= addr
& ~TARGET_PAGE_MASK
;
3806 addr
&= TARGET_PAGE_MASK
;
3808 trace_ram_load_postcopy_loop(channel
, (uint64_t)addr
, flags
);
3809 if (flags
& (RAM_SAVE_FLAG_ZERO
| RAM_SAVE_FLAG_PAGE
|
3810 RAM_SAVE_FLAG_COMPRESS_PAGE
)) {
3811 block
= ram_block_from_stream(mis
, f
, flags
, channel
);
3818 * Relying on used_length is racy and can result in false positives.
3819 * We might place pages beyond used_length in case RAM was shrunk
3820 * while in postcopy, which is fine - trying to place via
3821 * UFFDIO_COPY/UFFDIO_ZEROPAGE will never segfault.
3823 if (!block
->host
|| addr
>= block
->postcopy_length
) {
3824 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
3828 tmp_page
->target_pages
++;
3829 matches_target_page_size
= block
->page_size
== TARGET_PAGE_SIZE
;
3831 * Postcopy requires that we place whole host pages atomically;
3832 * these may be huge pages for RAMBlocks that are backed by
3834 * To make it atomic, the data is read into a temporary page
3835 * that's moved into place later.
3836 * The migration protocol uses, possibly smaller, target-pages
3837 * however the source ensures it always sends all the components
3838 * of a host page in one chunk.
3840 page_buffer
= tmp_page
->tmp_huge_page
+
3841 host_page_offset_from_ram_block_offset(block
, addr
);
3842 /* If all TP are zero then we can optimise the place */
3843 if (tmp_page
->target_pages
== 1) {
3844 tmp_page
->host_addr
=
3845 host_page_from_ram_block_offset(block
, addr
);
3846 } else if (tmp_page
->host_addr
!=
3847 host_page_from_ram_block_offset(block
, addr
)) {
3848 /* not the 1st TP within the HP */
3849 error_report("Non-same host page detected on channel %d: "
3850 "Target host page %p, received host page %p "
3851 "(rb %s offset 0x"RAM_ADDR_FMT
" target_pages %d)",
3852 channel
, tmp_page
->host_addr
,
3853 host_page_from_ram_block_offset(block
, addr
),
3854 block
->idstr
, addr
, tmp_page
->target_pages
);
3860 * If it's the last part of a host page then we place the host
3863 if (tmp_page
->target_pages
==
3864 (block
->page_size
/ TARGET_PAGE_SIZE
)) {
3865 place_needed
= true;
3867 place_source
= tmp_page
->tmp_huge_page
;
3870 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
3871 case RAM_SAVE_FLAG_ZERO
:
3872 ch
= qemu_get_byte(f
);
3874 error_report("Found a zero page with value %d", ch
);
3879 * Can skip to set page_buffer when
3880 * this is a zero page and (block->page_size == TARGET_PAGE_SIZE).
3882 if (!matches_target_page_size
) {
3883 memset(page_buffer
, ch
, TARGET_PAGE_SIZE
);
3887 case RAM_SAVE_FLAG_PAGE
:
3888 tmp_page
->all_zero
= false;
3889 if (!matches_target_page_size
) {
3890 /* For huge pages, we always use temporary buffer */
3891 qemu_get_buffer(f
, page_buffer
, TARGET_PAGE_SIZE
);
3894 * For small pages that matches target page size, we
3895 * avoid the qemu_file copy. Instead we directly use
3896 * the buffer of QEMUFile to place the page. Note: we
3897 * cannot do any QEMUFile operation before using that
3898 * buffer to make sure the buffer is valid when
3901 qemu_get_buffer_in_place(f
, (uint8_t **)&place_source
,
3905 case RAM_SAVE_FLAG_COMPRESS_PAGE
:
3906 tmp_page
->all_zero
= false;
3907 len
= qemu_get_be32(f
);
3908 if (len
< 0 || len
> compressBound(TARGET_PAGE_SIZE
)) {
3909 error_report("Invalid compressed data length: %d", len
);
3913 decompress_data_with_multi_threads(f
, page_buffer
, len
);
3915 case RAM_SAVE_FLAG_MULTIFD_FLUSH
:
3916 multifd_recv_sync_main();
3918 case RAM_SAVE_FLAG_EOS
:
3920 if (migrate_multifd() &&
3921 migrate_multifd_flush_after_each_section()) {
3922 multifd_recv_sync_main();
3926 error_report("Unknown combination of migration flags: 0x%x"
3927 " (postcopy mode)", flags
);
3932 /* Got the whole host page, wait for decompress before placing. */
3934 ret
|= wait_for_decompress_done();
3937 /* Detect for any possible file errors */
3938 if (!ret
&& qemu_file_get_error(f
)) {
3939 ret
= qemu_file_get_error(f
);
3942 if (!ret
&& place_needed
) {
3943 if (tmp_page
->all_zero
) {
3944 ret
= postcopy_place_page_zero(mis
, tmp_page
->host_addr
, block
);
3946 ret
= postcopy_place_page(mis
, tmp_page
->host_addr
,
3947 place_source
, block
);
3949 place_needed
= false;
3950 postcopy_temp_page_reset(tmp_page
);
3957 static bool postcopy_is_running(void)
3959 PostcopyState ps
= postcopy_state_get();
3960 return ps
>= POSTCOPY_INCOMING_LISTENING
&& ps
< POSTCOPY_INCOMING_END
;
3964 * Flush content of RAM cache into SVM's memory.
3965 * Only flush the pages that be dirtied by PVM or SVM or both.
3967 void colo_flush_ram_cache(void)
3969 RAMBlock
*block
= NULL
;
3972 unsigned long offset
= 0;
3974 memory_global_dirty_log_sync(false);
3975 qemu_mutex_lock(&ram_state
->bitmap_mutex
);
3976 WITH_RCU_READ_LOCK_GUARD() {
3977 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3978 ramblock_sync_dirty_bitmap(ram_state
, block
);
3982 trace_colo_flush_ram_cache_begin(ram_state
->migration_dirty_pages
);
3983 WITH_RCU_READ_LOCK_GUARD() {
3984 block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
3987 unsigned long num
= 0;
3989 offset
= colo_bitmap_find_dirty(ram_state
, block
, offset
, &num
);
3990 if (!offset_in_ramblock(block
,
3991 ((ram_addr_t
)offset
) << TARGET_PAGE_BITS
)) {
3994 block
= QLIST_NEXT_RCU(block
, next
);
3996 unsigned long i
= 0;
3998 for (i
= 0; i
< num
; i
++) {
3999 migration_bitmap_clear_dirty(ram_state
, block
, offset
+ i
);
4001 dst_host
= block
->host
4002 + (((ram_addr_t
)offset
) << TARGET_PAGE_BITS
);
4003 src_host
= block
->colo_cache
4004 + (((ram_addr_t
)offset
) << TARGET_PAGE_BITS
);
4005 memcpy(dst_host
, src_host
, TARGET_PAGE_SIZE
* num
);
4010 qemu_mutex_unlock(&ram_state
->bitmap_mutex
);
4011 trace_colo_flush_ram_cache_end();
4014 static size_t ram_load_multifd_pages(void *host_addr
, size_t size
,
4017 MultiFDRecvData
*data
= multifd_get_recv_data();
4019 data
->opaque
= host_addr
;
4020 data
->file_offset
= offset
;
4023 if (!multifd_recv()) {
4030 static bool read_ramblock_mapped_ram(QEMUFile
*f
, RAMBlock
*block
,
4031 long num_pages
, unsigned long *bitmap
,
4035 unsigned long set_bit_idx
, clear_bit_idx
;
4038 size_t read
, unread
, size
;
4040 for (set_bit_idx
= find_first_bit(bitmap
, num_pages
);
4041 set_bit_idx
< num_pages
;
4042 set_bit_idx
= find_next_bit(bitmap
, num_pages
, clear_bit_idx
+ 1)) {
4044 clear_bit_idx
= find_next_zero_bit(bitmap
, num_pages
, set_bit_idx
+ 1);
4046 unread
= TARGET_PAGE_SIZE
* (clear_bit_idx
- set_bit_idx
);
4047 offset
= set_bit_idx
<< TARGET_PAGE_BITS
;
4049 while (unread
> 0) {
4050 host
= host_from_ram_block_offset(block
, offset
);
4052 error_setg(errp
, "page outside of ramblock %s range",
4057 size
= MIN(unread
, MAPPED_RAM_LOAD_BUF_SIZE
);
4059 if (migrate_multifd()) {
4060 read
= ram_load_multifd_pages(host
, size
,
4061 block
->pages_offset
+ offset
);
4063 read
= qemu_get_buffer_at(f
, host
, size
,
4064 block
->pages_offset
+ offset
);
4078 qemu_file_get_error_obj(f
, errp
);
4079 error_prepend(errp
, "(%s) failed to read page " RAM_ADDR_FMT
4080 "from file offset %" PRIx64
": ", block
->idstr
, offset
,
4081 block
->pages_offset
+ offset
);
4085 static void parse_ramblock_mapped_ram(QEMUFile
*f
, RAMBlock
*block
,
4086 ram_addr_t length
, Error
**errp
)
4088 g_autofree
unsigned long *bitmap
= NULL
;
4089 MappedRamHeader header
;
4093 if (!mapped_ram_read_header(f
, &header
, errp
)) {
4097 block
->pages_offset
= header
.pages_offset
;
4100 * Check the alignment of the file region that contains pages. We
4101 * don't enforce MAPPED_RAM_FILE_OFFSET_ALIGNMENT to allow that
4102 * value to change in the future. Do only a sanity check with page
4105 if (!QEMU_IS_ALIGNED(block
->pages_offset
, TARGET_PAGE_SIZE
)) {
4107 "Error reading ramblock %s pages, region has bad alignment",
4112 num_pages
= length
/ header
.page_size
;
4113 bitmap_size
= BITS_TO_LONGS(num_pages
) * sizeof(unsigned long);
4115 bitmap
= g_malloc0(bitmap_size
);
4116 if (qemu_get_buffer_at(f
, (uint8_t *)bitmap
, bitmap_size
,
4117 header
.bitmap_offset
) != bitmap_size
) {
4118 error_setg(errp
, "Error reading dirty bitmap");
4122 if (!read_ramblock_mapped_ram(f
, block
, num_pages
, bitmap
, errp
)) {
4126 /* Skip pages array */
4127 qemu_set_offset(f
, block
->pages_offset
+ length
, SEEK_SET
);
4132 static int parse_ramblock(QEMUFile
*f
, RAMBlock
*block
, ram_addr_t length
)
4135 /* ADVISE is earlier, it shows the source has the postcopy capability on */
4136 bool postcopy_advised
= migration_incoming_postcopy_advised();
4137 int max_hg_page_size
;
4138 Error
*local_err
= NULL
;
4142 if (migrate_mapped_ram()) {
4143 parse_ramblock_mapped_ram(f
, block
, length
, &local_err
);
4145 error_report_err(local_err
);
4151 if (!qemu_ram_is_migratable(block
)) {
4152 error_report("block %s should not be migrated !", block
->idstr
);
4156 if (length
!= block
->used_length
) {
4157 ret
= qemu_ram_resize(block
, length
, &local_err
);
4159 error_report_err(local_err
);
4165 * ??? Mirrors the previous value of qemu_host_page_size,
4166 * but is this really what was intended for the migration?
4168 max_hg_page_size
= MAX(qemu_real_host_page_size(), TARGET_PAGE_SIZE
);
4170 /* For postcopy we need to check hugepage sizes match */
4171 if (postcopy_advised
&& migrate_postcopy_ram() &&
4172 block
->page_size
!= max_hg_page_size
) {
4173 uint64_t remote_page_size
= qemu_get_be64(f
);
4174 if (remote_page_size
!= block
->page_size
) {
4175 error_report("Mismatched RAM page size %s "
4176 "(local) %zd != %" PRId64
, block
->idstr
,
4177 block
->page_size
, remote_page_size
);
4181 if (migrate_ignore_shared()) {
4182 hwaddr addr
= qemu_get_be64(f
);
4183 if (migrate_ram_is_ignored(block
) &&
4184 block
->mr
->addr
!= addr
) {
4185 error_report("Mismatched GPAs for block %s "
4186 "%" PRId64
"!= %" PRId64
, block
->idstr
,
4187 (uint64_t)addr
, (uint64_t)block
->mr
->addr
);
4191 ret
= rdma_block_notification_handle(f
, block
->idstr
);
4193 qemu_file_set_error(f
, ret
);
4199 static int parse_ramblocks(QEMUFile
*f
, ram_addr_t total_ram_bytes
)
4203 /* Synchronize RAM block list */
4204 while (!ret
&& total_ram_bytes
) {
4208 int len
= qemu_get_byte(f
);
4210 qemu_get_buffer(f
, (uint8_t *)id
, len
);
4212 length
= qemu_get_be64(f
);
4214 block
= qemu_ram_block_by_name(id
);
4216 ret
= parse_ramblock(f
, block
, length
);
4218 error_report("Unknown ramblock \"%s\", cannot accept "
4222 total_ram_bytes
-= length
;
4229 * ram_load_precopy: load pages in precopy case
4231 * Returns 0 for success or -errno in case of error
4233 * Called in precopy mode by ram_load().
4234 * rcu_read_lock is taken prior to this being called.
4236 * @f: QEMUFile where to send the data
4238 static int ram_load_precopy(QEMUFile
*f
)
4240 MigrationIncomingState
*mis
= migration_incoming_get_current();
4241 int flags
= 0, ret
= 0, invalid_flags
= 0, len
= 0, i
= 0;
4243 if (!migrate_compress()) {
4244 invalid_flags
|= RAM_SAVE_FLAG_COMPRESS_PAGE
;
4247 if (migrate_mapped_ram()) {
4248 invalid_flags
|= (RAM_SAVE_FLAG_HOOK
| RAM_SAVE_FLAG_MULTIFD_FLUSH
|
4249 RAM_SAVE_FLAG_PAGE
| RAM_SAVE_FLAG_XBZRLE
|
4250 RAM_SAVE_FLAG_ZERO
);
4253 while (!ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
4255 void *host
= NULL
, *host_bak
= NULL
;
4259 * Yield periodically to let main loop run, but an iteration of
4260 * the main loop is expensive, so do it each some iterations
4262 if ((i
& 32767) == 0 && qemu_in_coroutine()) {
4263 aio_co_schedule(qemu_get_current_aio_context(),
4264 qemu_coroutine_self());
4265 qemu_coroutine_yield();
4269 addr
= qemu_get_be64(f
);
4270 ret
= qemu_file_get_error(f
);
4272 error_report("Getting RAM address failed");
4276 flags
= addr
& ~TARGET_PAGE_MASK
;
4277 addr
&= TARGET_PAGE_MASK
;
4279 if (flags
& invalid_flags
) {
4280 error_report("Unexpected RAM flags: %d", flags
& invalid_flags
);
4282 if (flags
& invalid_flags
& RAM_SAVE_FLAG_COMPRESS_PAGE
) {
4283 error_report("Received an unexpected compressed page");
4290 if (flags
& (RAM_SAVE_FLAG_ZERO
| RAM_SAVE_FLAG_PAGE
|
4291 RAM_SAVE_FLAG_COMPRESS_PAGE
| RAM_SAVE_FLAG_XBZRLE
)) {
4292 RAMBlock
*block
= ram_block_from_stream(mis
, f
, flags
,
4293 RAM_CHANNEL_PRECOPY
);
4295 host
= host_from_ram_block_offset(block
, addr
);
4297 * After going into COLO stage, we should not load the page
4298 * into SVM's memory directly, we put them into colo_cache firstly.
4299 * NOTE: We need to keep a copy of SVM's ram in colo_cache.
4300 * Previously, we copied all these memory in preparing stage of COLO
4301 * while we need to stop VM, which is a time-consuming process.
4302 * Here we optimize it by a trick, back-up every page while in
4303 * migration process while COLO is enabled, though it affects the
4304 * speed of the migration, but it obviously reduce the downtime of
4305 * back-up all SVM'S memory in COLO preparing stage.
4307 if (migration_incoming_colo_enabled()) {
4308 if (migration_incoming_in_colo_state()) {
4309 /* In COLO stage, put all pages into cache temporarily */
4310 host
= colo_cache_from_block_offset(block
, addr
, true);
4313 * In migration stage but before COLO stage,
4314 * Put all pages into both cache and SVM's memory.
4316 host_bak
= colo_cache_from_block_offset(block
, addr
, false);
4320 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
4324 if (!migration_incoming_in_colo_state()) {
4325 ramblock_recv_bitmap_set(block
, host
);
4328 trace_ram_load_loop(block
->idstr
, (uint64_t)addr
, flags
, host
);
4331 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
4332 case RAM_SAVE_FLAG_MEM_SIZE
:
4333 ret
= parse_ramblocks(f
, addr
);
4335 * For mapped-ram migration (to a file) using multifd, we sync
4336 * once and for all here to make sure all tasks we queued to
4337 * multifd threads are completed, so that all the ramblocks
4338 * (including all the guest memory pages within) are fully
4339 * loaded after this sync returns.
4341 if (migrate_mapped_ram()) {
4342 multifd_recv_sync_main();
4346 case RAM_SAVE_FLAG_ZERO
:
4347 ch
= qemu_get_byte(f
);
4349 error_report("Found a zero page with value %d", ch
);
4353 ram_handle_zero(host
, TARGET_PAGE_SIZE
);
4356 case RAM_SAVE_FLAG_PAGE
:
4357 qemu_get_buffer(f
, host
, TARGET_PAGE_SIZE
);
4360 case RAM_SAVE_FLAG_COMPRESS_PAGE
:
4361 len
= qemu_get_be32(f
);
4362 if (len
< 0 || len
> compressBound(TARGET_PAGE_SIZE
)) {
4363 error_report("Invalid compressed data length: %d", len
);
4367 decompress_data_with_multi_threads(f
, host
, len
);
4370 case RAM_SAVE_FLAG_XBZRLE
:
4371 if (load_xbzrle(f
, addr
, host
) < 0) {
4372 error_report("Failed to decompress XBZRLE page at "
4373 RAM_ADDR_FMT
, addr
);
4378 case RAM_SAVE_FLAG_MULTIFD_FLUSH
:
4379 multifd_recv_sync_main();
4381 case RAM_SAVE_FLAG_EOS
:
4383 if (migrate_multifd() &&
4384 migrate_multifd_flush_after_each_section() &&
4386 * Mapped-ram migration flushes once and for all after
4387 * parsing ramblocks. Always ignore EOS for it.
4389 !migrate_mapped_ram()) {
4390 multifd_recv_sync_main();
4393 case RAM_SAVE_FLAG_HOOK
:
4394 ret
= rdma_registration_handle(f
);
4396 qemu_file_set_error(f
, ret
);
4400 error_report("Unknown combination of migration flags: 0x%x", flags
);
4404 ret
= qemu_file_get_error(f
);
4406 if (!ret
&& host_bak
) {
4407 memcpy(host_bak
, host
, TARGET_PAGE_SIZE
);
4411 ret
|= wait_for_decompress_done();
4415 static int ram_load(QEMUFile
*f
, void *opaque
, int version_id
)
4418 static uint64_t seq_iter
;
4420 * If system is running in postcopy mode, page inserts to host memory must
4423 bool postcopy_running
= postcopy_is_running();
4427 if (version_id
!= 4) {
4432 * This RCU critical section can be very long running.
4433 * When RCU reclaims in the code start to become numerous,
4434 * it will be necessary to reduce the granularity of this
4437 WITH_RCU_READ_LOCK_GUARD() {
4438 if (postcopy_running
) {
4440 * Note! Here RAM_CHANNEL_PRECOPY is the precopy channel of
4441 * postcopy migration, we have another RAM_CHANNEL_POSTCOPY to
4442 * service fast page faults.
4444 ret
= ram_load_postcopy(f
, RAM_CHANNEL_PRECOPY
);
4446 ret
= ram_load_precopy(f
);
4449 trace_ram_load_complete(ret
, seq_iter
);
4454 static bool ram_has_postcopy(void *opaque
)
4457 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
4458 if (ramblock_is_pmem(rb
)) {
4459 info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4460 "is not supported now!", rb
->idstr
, rb
->host
);
4465 return migrate_postcopy_ram();
4468 /* Sync all the dirty bitmap with destination VM. */
4469 static int ram_dirty_bitmap_sync_all(MigrationState
*s
, RAMState
*rs
)
4472 QEMUFile
*file
= s
->to_dst_file
;
4474 trace_ram_dirty_bitmap_sync_start();
4476 qatomic_set(&rs
->postcopy_bmap_sync_requested
, 0);
4477 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
4478 qemu_savevm_send_recv_bitmap(file
, block
->idstr
);
4479 trace_ram_dirty_bitmap_request(block
->idstr
);
4480 qatomic_inc(&rs
->postcopy_bmap_sync_requested
);
4483 trace_ram_dirty_bitmap_sync_wait();
4485 /* Wait until all the ramblocks' dirty bitmap synced */
4486 while (qatomic_read(&rs
->postcopy_bmap_sync_requested
)) {
4487 if (migration_rp_wait(s
)) {
4492 trace_ram_dirty_bitmap_sync_complete();
4498 * Read the received bitmap, revert it as the initial dirty bitmap.
4499 * This is only used when the postcopy migration is paused but wants
4500 * to resume from a middle point.
4502 * Returns true if succeeded, false for errors.
4504 bool ram_dirty_bitmap_reload(MigrationState
*s
, RAMBlock
*block
, Error
**errp
)
4506 /* from_dst_file is always valid because we're within rp_thread */
4507 QEMUFile
*file
= s
->rp_state
.from_dst_file
;
4508 g_autofree
unsigned long *le_bitmap
= NULL
;
4509 unsigned long nbits
= block
->used_length
>> TARGET_PAGE_BITS
;
4510 uint64_t local_size
= DIV_ROUND_UP(nbits
, 8);
4511 uint64_t size
, end_mark
;
4512 RAMState
*rs
= ram_state
;
4514 trace_ram_dirty_bitmap_reload_begin(block
->idstr
);
4516 if (s
->state
!= MIGRATION_STATUS_POSTCOPY_RECOVER
) {
4517 error_setg(errp
, "Reload bitmap in incorrect state %s",
4518 MigrationStatus_str(s
->state
));
4523 * Note: see comments in ramblock_recv_bitmap_send() on why we
4524 * need the endianness conversion, and the paddings.
4526 local_size
= ROUND_UP(local_size
, 8);
4529 le_bitmap
= bitmap_new(nbits
+ BITS_PER_LONG
);
4531 size
= qemu_get_be64(file
);
4533 /* The size of the bitmap should match with our ramblock */
4534 if (size
!= local_size
) {
4535 error_setg(errp
, "ramblock '%s' bitmap size mismatch (0x%"PRIx64
4536 " != 0x%"PRIx64
")", block
->idstr
, size
, local_size
);
4540 size
= qemu_get_buffer(file
, (uint8_t *)le_bitmap
, local_size
);
4541 end_mark
= qemu_get_be64(file
);
4543 if (qemu_file_get_error(file
) || size
!= local_size
) {
4544 error_setg(errp
, "read bitmap failed for ramblock '%s': "
4545 "(size 0x%"PRIx64
", got: 0x%"PRIx64
")",
4546 block
->idstr
, local_size
, size
);
4550 if (end_mark
!= RAMBLOCK_RECV_BITMAP_ENDING
) {
4551 error_setg(errp
, "ramblock '%s' end mark incorrect: 0x%"PRIx64
,
4552 block
->idstr
, end_mark
);
4557 * Endianness conversion. We are during postcopy (though paused).
4558 * The dirty bitmap won't change. We can directly modify it.
4560 bitmap_from_le(block
->bmap
, le_bitmap
, nbits
);
4563 * What we received is "received bitmap". Revert it as the initial
4564 * dirty bitmap for this ramblock.
4566 bitmap_complement(block
->bmap
, block
->bmap
, nbits
);
4568 /* Clear dirty bits of discarded ranges that we don't want to migrate. */
4569 ramblock_dirty_bitmap_clear_discarded_pages(block
);
4571 /* We'll recalculate migration_dirty_pages in ram_state_resume_prepare(). */
4572 trace_ram_dirty_bitmap_reload_complete(block
->idstr
);
4574 qatomic_dec(&rs
->postcopy_bmap_sync_requested
);
4577 * We succeeded to sync bitmap for current ramblock. Always kick the
4578 * migration thread to check whether all requested bitmaps are
4579 * reloaded. NOTE: it's racy to only kick when requested==0, because
4580 * we don't know whether the migration thread may still be increasing
4583 migration_rp_kick(s
);
4588 static int ram_resume_prepare(MigrationState
*s
, void *opaque
)
4590 RAMState
*rs
= *(RAMState
**)opaque
;
4593 ret
= ram_dirty_bitmap_sync_all(s
, rs
);
4598 ram_state_resume_prepare(rs
, s
->to_dst_file
);
4603 void postcopy_preempt_shutdown_file(MigrationState
*s
)
4605 qemu_put_be64(s
->postcopy_qemufile_src
, RAM_SAVE_FLAG_EOS
);
4606 qemu_fflush(s
->postcopy_qemufile_src
);
4609 static SaveVMHandlers savevm_ram_handlers
= {
4610 .save_setup
= ram_save_setup
,
4611 .save_live_iterate
= ram_save_iterate
,
4612 .save_live_complete_postcopy
= ram_save_complete
,
4613 .save_live_complete_precopy
= ram_save_complete
,
4614 .has_postcopy
= ram_has_postcopy
,
4615 .state_pending_exact
= ram_state_pending_exact
,
4616 .state_pending_estimate
= ram_state_pending_estimate
,
4617 .load_state
= ram_load
,
4618 .save_cleanup
= ram_save_cleanup
,
4619 .load_setup
= ram_load_setup
,
4620 .load_cleanup
= ram_load_cleanup
,
4621 .resume_prepare
= ram_resume_prepare
,
4624 static void ram_mig_ram_block_resized(RAMBlockNotifier
*n
, void *host
,
4625 size_t old_size
, size_t new_size
)
4627 PostcopyState ps
= postcopy_state_get();
4629 RAMBlock
*rb
= qemu_ram_block_from_host(host
, false, &offset
);
4633 error_report("RAM block not found");
4637 if (migrate_ram_is_ignored(rb
)) {
4641 if (!migration_is_idle()) {
4643 * Precopy code on the source cannot deal with the size of RAM blocks
4644 * changing at random points in time - especially after sending the
4645 * RAM block sizes in the migration stream, they must no longer change.
4646 * Abort and indicate a proper reason.
4648 error_setg(&err
, "RAM block '%s' resized during precopy.", rb
->idstr
);
4649 migration_cancel(err
);
4654 case POSTCOPY_INCOMING_ADVISE
:
4656 * Update what ram_postcopy_incoming_init()->init_range() does at the
4657 * time postcopy was advised. Syncing RAM blocks with the source will
4658 * result in RAM resizes.
4660 if (old_size
< new_size
) {
4661 if (ram_discard_range(rb
->idstr
, old_size
, new_size
- old_size
)) {
4662 error_report("RAM block '%s' discard of resized RAM failed",
4666 rb
->postcopy_length
= new_size
;
4668 case POSTCOPY_INCOMING_NONE
:
4669 case POSTCOPY_INCOMING_RUNNING
:
4670 case POSTCOPY_INCOMING_END
:
4672 * Once our guest is running, postcopy does no longer care about
4673 * resizes. When growing, the new memory was not available on the
4674 * source, no handler needed.
4678 error_report("RAM block '%s' resized during postcopy state: %d",
4684 static RAMBlockNotifier ram_mig_ram_notifier
= {
4685 .ram_block_resized
= ram_mig_ram_block_resized
,
4688 void ram_mig_init(void)
4690 qemu_mutex_init(&XBZRLE
.lock
);
4691 register_savevm_live("ram", 0, 4, &savevm_ram_handlers
, &ram_state
);
4692 ram_block_notifier_add(&ram_mig_ram_notifier
);