4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2011-2015 Red Hat Inc
8 * Juan Quintela <quintela@redhat.com>
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 #include "qemu/osdep.h"
32 #include "qemu/cutils.h"
33 #include "qemu/bitops.h"
34 #include "qemu/bitmap.h"
35 #include "qemu/main-loop.h"
36 #include "qemu/pmem.h"
39 #include "migration.h"
41 #include "migration/register.h"
42 #include "migration/misc.h"
43 #include "qemu-file.h"
44 #include "postcopy-ram.h"
45 #include "page_cache.h"
46 #include "qemu/error-report.h"
47 #include "qapi/error.h"
48 #include "qapi/qapi-events-migration.h"
49 #include "qapi/qmp/qerror.h"
51 #include "exec/ram_addr.h"
52 #include "exec/target_page.h"
53 #include "qemu/rcu_queue.h"
54 #include "migration/colo.h"
56 #include "sysemu/sysemu.h"
57 #include "qemu/uuid.h"
61 /***********************************************************/
62 /* ram save/restore */
64 /* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
65 * worked for pages that where filled with the same char. We switched
66 * it to only search for the zero value. And to avoid confusion with
67 * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
70 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
71 #define RAM_SAVE_FLAG_ZERO 0x02
72 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
73 #define RAM_SAVE_FLAG_PAGE 0x08
74 #define RAM_SAVE_FLAG_EOS 0x10
75 #define RAM_SAVE_FLAG_CONTINUE 0x20
76 #define RAM_SAVE_FLAG_XBZRLE 0x40
77 /* 0x80 is reserved in migration.h start with 0x100 next */
78 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
80 static inline bool is_zero_range(uint8_t *p
, uint64_t size
)
82 return buffer_is_zero(p
, size
);
85 XBZRLECacheStats xbzrle_counters
;
87 /* struct contains XBZRLE cache and a static page
88 used by the compression */
90 /* buffer used for XBZRLE encoding */
92 /* buffer for storing page content */
94 /* Cache for XBZRLE, Protected by lock. */
97 /* it will store a page full of zeros */
98 uint8_t *zero_target_page
;
99 /* buffer used for XBZRLE decoding */
100 uint8_t *decoded_buf
;
103 static void XBZRLE_cache_lock(void)
105 if (migrate_use_xbzrle())
106 qemu_mutex_lock(&XBZRLE
.lock
);
109 static void XBZRLE_cache_unlock(void)
111 if (migrate_use_xbzrle())
112 qemu_mutex_unlock(&XBZRLE
.lock
);
116 * xbzrle_cache_resize: resize the xbzrle cache
118 * This function is called from qmp_migrate_set_cache_size in main
119 * thread, possibly while a migration is in progress. A running
120 * migration may be using the cache and might finish during this call,
121 * hence changes to the cache are protected by XBZRLE.lock().
123 * Returns 0 for success or -1 for error
125 * @new_size: new cache size
126 * @errp: set *errp if the check failed, with reason
128 int xbzrle_cache_resize(int64_t new_size
, Error
**errp
)
130 PageCache
*new_cache
;
133 /* Check for truncation */
134 if (new_size
!= (size_t)new_size
) {
135 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, "cache size",
136 "exceeding address space");
140 if (new_size
== migrate_xbzrle_cache_size()) {
147 if (XBZRLE
.cache
!= NULL
) {
148 new_cache
= cache_init(new_size
, TARGET_PAGE_SIZE
, errp
);
154 cache_fini(XBZRLE
.cache
);
155 XBZRLE
.cache
= new_cache
;
158 XBZRLE_cache_unlock();
162 static bool ramblock_is_ignored(RAMBlock
*block
)
164 return !qemu_ram_is_migratable(block
) ||
165 (migrate_ignore_shared() && qemu_ram_is_shared(block
));
168 /* Should be holding either ram_list.mutex, or the RCU lock. */
169 #define RAMBLOCK_FOREACH_NOT_IGNORED(block) \
170 INTERNAL_RAMBLOCK_FOREACH(block) \
171 if (ramblock_is_ignored(block)) {} else
173 #define RAMBLOCK_FOREACH_MIGRATABLE(block) \
174 INTERNAL_RAMBLOCK_FOREACH(block) \
175 if (!qemu_ram_is_migratable(block)) {} else
177 #undef RAMBLOCK_FOREACH
179 int foreach_not_ignored_block(RAMBlockIterFunc func
, void *opaque
)
185 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
186 ret
= func(block
, opaque
);
195 static void ramblock_recv_map_init(void)
199 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
200 assert(!rb
->receivedmap
);
201 rb
->receivedmap
= bitmap_new(rb
->max_length
>> qemu_target_page_bits());
205 int ramblock_recv_bitmap_test(RAMBlock
*rb
, void *host_addr
)
207 return test_bit(ramblock_recv_bitmap_offset(host_addr
, rb
),
211 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock
*rb
, uint64_t byte_offset
)
213 return test_bit(byte_offset
>> TARGET_PAGE_BITS
, rb
->receivedmap
);
216 void ramblock_recv_bitmap_set(RAMBlock
*rb
, void *host_addr
)
218 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr
, rb
), rb
->receivedmap
);
221 void ramblock_recv_bitmap_set_range(RAMBlock
*rb
, void *host_addr
,
224 bitmap_set_atomic(rb
->receivedmap
,
225 ramblock_recv_bitmap_offset(host_addr
, rb
),
229 #define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
232 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
234 * Returns >0 if success with sent bytes, or <0 if error.
236 int64_t ramblock_recv_bitmap_send(QEMUFile
*file
,
237 const char *block_name
)
239 RAMBlock
*block
= qemu_ram_block_by_name(block_name
);
240 unsigned long *le_bitmap
, nbits
;
244 error_report("%s: invalid block name: %s", __func__
, block_name
);
248 nbits
= block
->used_length
>> TARGET_PAGE_BITS
;
251 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
252 * machines we may need 4 more bytes for padding (see below
253 * comment). So extend it a bit before hand.
255 le_bitmap
= bitmap_new(nbits
+ BITS_PER_LONG
);
258 * Always use little endian when sending the bitmap. This is
259 * required that when source and destination VMs are not using the
260 * same endianess. (Note: big endian won't work.)
262 bitmap_to_le(le_bitmap
, block
->receivedmap
, nbits
);
264 /* Size of the bitmap, in bytes */
265 size
= DIV_ROUND_UP(nbits
, 8);
268 * size is always aligned to 8 bytes for 64bit machines, but it
269 * may not be true for 32bit machines. We need this padding to
270 * make sure the migration can survive even between 32bit and
273 size
= ROUND_UP(size
, 8);
275 qemu_put_be64(file
, size
);
276 qemu_put_buffer(file
, (const uint8_t *)le_bitmap
, size
);
278 * Mark as an end, in case the middle part is screwed up due to
279 * some "misterious" reason.
281 qemu_put_be64(file
, RAMBLOCK_RECV_BITMAP_ENDING
);
286 if (qemu_file_get_error(file
)) {
287 return qemu_file_get_error(file
);
290 return size
+ sizeof(size
);
294 * An outstanding page request, on the source, having been received
297 struct RAMSrcPageRequest
{
302 QSIMPLEQ_ENTRY(RAMSrcPageRequest
) next_req
;
305 /* State of RAM for migration */
307 /* QEMUFile used for this migration */
309 /* Last block that we have visited searching for dirty pages */
310 RAMBlock
*last_seen_block
;
311 /* Last block from where we have sent data */
312 RAMBlock
*last_sent_block
;
313 /* Last dirty target page we have sent */
314 ram_addr_t last_page
;
315 /* last ram version we have seen */
316 uint32_t last_version
;
317 /* We are in the first round */
319 /* The free page optimization is enabled */
321 /* How many times we have dirty too many pages */
322 int dirty_rate_high_cnt
;
323 /* these variables are used for bitmap sync */
324 /* last time we did a full bitmap_sync */
325 int64_t time_last_bitmap_sync
;
326 /* bytes transferred at start_time */
327 uint64_t bytes_xfer_prev
;
328 /* number of dirty pages since start_time */
329 uint64_t num_dirty_pages_period
;
330 /* xbzrle misses since the beginning of the period */
331 uint64_t xbzrle_cache_miss_prev
;
333 /* compression statistics since the beginning of the period */
334 /* amount of count that no free thread to compress data */
335 uint64_t compress_thread_busy_prev
;
336 /* amount bytes after compression */
337 uint64_t compressed_size_prev
;
338 /* amount of compressed pages */
339 uint64_t compress_pages_prev
;
341 /* total handled target pages at the beginning of period */
342 uint64_t target_page_count_prev
;
343 /* total handled target pages since start */
344 uint64_t target_page_count
;
345 /* number of dirty bits in the bitmap */
346 uint64_t migration_dirty_pages
;
347 /* Protects modification of the bitmap and migration dirty pages */
348 QemuMutex bitmap_mutex
;
349 /* The RAMBlock used in the last src_page_requests */
350 RAMBlock
*last_req_rb
;
351 /* Queue of outstanding page requests from the destination */
352 QemuMutex src_page_req_mutex
;
353 QSIMPLEQ_HEAD(, RAMSrcPageRequest
) src_page_requests
;
355 typedef struct RAMState RAMState
;
357 static RAMState
*ram_state
;
359 static NotifierWithReturnList precopy_notifier_list
;
361 void precopy_infrastructure_init(void)
363 notifier_with_return_list_init(&precopy_notifier_list
);
366 void precopy_add_notifier(NotifierWithReturn
*n
)
368 notifier_with_return_list_add(&precopy_notifier_list
, n
);
371 void precopy_remove_notifier(NotifierWithReturn
*n
)
373 notifier_with_return_remove(n
);
376 int precopy_notify(PrecopyNotifyReason reason
, Error
**errp
)
378 PrecopyNotifyData pnd
;
382 return notifier_with_return_list_notify(&precopy_notifier_list
, &pnd
);
385 void precopy_enable_free_page_optimization(void)
391 ram_state
->fpo_enabled
= true;
394 uint64_t ram_bytes_remaining(void)
396 return ram_state
? (ram_state
->migration_dirty_pages
* TARGET_PAGE_SIZE
) :
400 MigrationStats ram_counters
;
402 /* used by the search for pages to send */
403 struct PageSearchStatus
{
404 /* Current block being searched */
406 /* Current page to search from */
408 /* Set once we wrap around */
411 typedef struct PageSearchStatus PageSearchStatus
;
413 CompressionStats compression_counters
;
415 struct CompressParam
{
425 /* internally used fields */
429 typedef struct CompressParam CompressParam
;
431 struct DecompressParam
{
441 typedef struct DecompressParam DecompressParam
;
443 static CompressParam
*comp_param
;
444 static QemuThread
*compress_threads
;
445 /* comp_done_cond is used to wake up the migration thread when
446 * one of the compression threads has finished the compression.
447 * comp_done_lock is used to co-work with comp_done_cond.
449 static QemuMutex comp_done_lock
;
450 static QemuCond comp_done_cond
;
451 /* The empty QEMUFileOps will be used by file in CompressParam */
452 static const QEMUFileOps empty_ops
= { };
454 static QEMUFile
*decomp_file
;
455 static DecompressParam
*decomp_param
;
456 static QemuThread
*decompress_threads
;
457 static QemuMutex decomp_done_lock
;
458 static QemuCond decomp_done_cond
;
460 static bool do_compress_ram_page(QEMUFile
*f
, z_stream
*stream
, RAMBlock
*block
,
461 ram_addr_t offset
, uint8_t *source_buf
);
463 static void *do_data_compress(void *opaque
)
465 CompressParam
*param
= opaque
;
470 qemu_mutex_lock(¶m
->mutex
);
471 while (!param
->quit
) {
473 block
= param
->block
;
474 offset
= param
->offset
;
476 qemu_mutex_unlock(¶m
->mutex
);
478 zero_page
= do_compress_ram_page(param
->file
, ¶m
->stream
,
479 block
, offset
, param
->originbuf
);
481 qemu_mutex_lock(&comp_done_lock
);
483 param
->zero_page
= zero_page
;
484 qemu_cond_signal(&comp_done_cond
);
485 qemu_mutex_unlock(&comp_done_lock
);
487 qemu_mutex_lock(¶m
->mutex
);
489 qemu_cond_wait(¶m
->cond
, ¶m
->mutex
);
492 qemu_mutex_unlock(¶m
->mutex
);
497 static void compress_threads_save_cleanup(void)
501 if (!migrate_use_compression() || !comp_param
) {
505 thread_count
= migrate_compress_threads();
506 for (i
= 0; i
< thread_count
; i
++) {
508 * we use it as a indicator which shows if the thread is
509 * properly init'd or not
511 if (!comp_param
[i
].file
) {
515 qemu_mutex_lock(&comp_param
[i
].mutex
);
516 comp_param
[i
].quit
= true;
517 qemu_cond_signal(&comp_param
[i
].cond
);
518 qemu_mutex_unlock(&comp_param
[i
].mutex
);
520 qemu_thread_join(compress_threads
+ i
);
521 qemu_mutex_destroy(&comp_param
[i
].mutex
);
522 qemu_cond_destroy(&comp_param
[i
].cond
);
523 deflateEnd(&comp_param
[i
].stream
);
524 g_free(comp_param
[i
].originbuf
);
525 qemu_fclose(comp_param
[i
].file
);
526 comp_param
[i
].file
= NULL
;
528 qemu_mutex_destroy(&comp_done_lock
);
529 qemu_cond_destroy(&comp_done_cond
);
530 g_free(compress_threads
);
532 compress_threads
= NULL
;
536 static int compress_threads_save_setup(void)
540 if (!migrate_use_compression()) {
543 thread_count
= migrate_compress_threads();
544 compress_threads
= g_new0(QemuThread
, thread_count
);
545 comp_param
= g_new0(CompressParam
, thread_count
);
546 qemu_cond_init(&comp_done_cond
);
547 qemu_mutex_init(&comp_done_lock
);
548 for (i
= 0; i
< thread_count
; i
++) {
549 comp_param
[i
].originbuf
= g_try_malloc(TARGET_PAGE_SIZE
);
550 if (!comp_param
[i
].originbuf
) {
554 if (deflateInit(&comp_param
[i
].stream
,
555 migrate_compress_level()) != Z_OK
) {
556 g_free(comp_param
[i
].originbuf
);
560 /* comp_param[i].file is just used as a dummy buffer to save data,
561 * set its ops to empty.
563 comp_param
[i
].file
= qemu_fopen_ops(NULL
, &empty_ops
);
564 comp_param
[i
].done
= true;
565 comp_param
[i
].quit
= false;
566 qemu_mutex_init(&comp_param
[i
].mutex
);
567 qemu_cond_init(&comp_param
[i
].cond
);
568 qemu_thread_create(compress_threads
+ i
, "compress",
569 do_data_compress
, comp_param
+ i
,
570 QEMU_THREAD_JOINABLE
);
575 compress_threads_save_cleanup();
581 #define MULTIFD_MAGIC 0x11223344U
582 #define MULTIFD_VERSION 1
584 #define MULTIFD_FLAG_SYNC (1 << 0)
586 /* This value needs to be a multiple of qemu_target_page_size() */
587 #define MULTIFD_PACKET_SIZE (512 * 1024)
592 unsigned char uuid
[16]; /* QemuUUID */
594 uint8_t unused1
[7]; /* Reserved for future use */
595 uint64_t unused2
[4]; /* Reserved for future use */
596 } __attribute__((packed
)) MultiFDInit_t
;
602 /* maximum number of allocated pages */
603 uint32_t pages_alloc
;
605 /* size of the next packet that contains pages */
606 uint32_t next_packet_size
;
608 uint64_t unused
[4]; /* Reserved for future use */
611 } __attribute__((packed
)) MultiFDPacket_t
;
614 /* number of used pages */
616 /* number of allocated pages */
618 /* global number of generated multifd packets */
620 /* offset of each page */
622 /* pointer to each page */
628 /* this fields are not changed once the thread is created */
631 /* channel thread name */
633 /* channel thread id */
635 /* communication channel */
637 /* sem where to wait for more work */
639 /* this mutex protects the following parameters */
641 /* is this channel thread running */
643 /* should this thread finish */
645 /* thread has work to do */
647 /* array of pages to sent */
648 MultiFDPages_t
*pages
;
649 /* packet allocated len */
651 /* pointer to the packet */
652 MultiFDPacket_t
*packet
;
653 /* multifd flags for each packet */
655 /* size of the next packet that contains pages */
656 uint32_t next_packet_size
;
657 /* global number of generated multifd packets */
659 /* thread local variables */
660 /* packets sent through this channel */
661 uint64_t num_packets
;
662 /* pages sent through this channel */
667 /* this fields are not changed once the thread is created */
670 /* channel thread name */
672 /* channel thread id */
674 /* communication channel */
676 /* this mutex protects the following parameters */
678 /* is this channel thread running */
680 /* array of pages to receive */
681 MultiFDPages_t
*pages
;
682 /* packet allocated len */
684 /* pointer to the packet */
685 MultiFDPacket_t
*packet
;
686 /* multifd flags for each packet */
688 /* global number of generated multifd packets */
690 /* thread local variables */
691 /* size of the next packet that contains pages */
692 uint32_t next_packet_size
;
693 /* packets sent through this channel */
694 uint64_t num_packets
;
695 /* pages sent through this channel */
697 /* syncs main thread and channels */
698 QemuSemaphore sem_sync
;
701 static int multifd_send_initial_packet(MultiFDSendParams
*p
, Error
**errp
)
706 msg
.magic
= cpu_to_be32(MULTIFD_MAGIC
);
707 msg
.version
= cpu_to_be32(MULTIFD_VERSION
);
709 memcpy(msg
.uuid
, &qemu_uuid
.data
, sizeof(msg
.uuid
));
711 ret
= qio_channel_write_all(p
->c
, (char *)&msg
, sizeof(msg
), errp
);
718 static int multifd_recv_initial_packet(QIOChannel
*c
, Error
**errp
)
723 ret
= qio_channel_read_all(c
, (char *)&msg
, sizeof(msg
), errp
);
728 msg
.magic
= be32_to_cpu(msg
.magic
);
729 msg
.version
= be32_to_cpu(msg
.version
);
731 if (msg
.magic
!= MULTIFD_MAGIC
) {
732 error_setg(errp
, "multifd: received packet magic %x "
733 "expected %x", msg
.magic
, MULTIFD_MAGIC
);
737 if (msg
.version
!= MULTIFD_VERSION
) {
738 error_setg(errp
, "multifd: received packet version %d "
739 "expected %d", msg
.version
, MULTIFD_VERSION
);
743 if (memcmp(msg
.uuid
, &qemu_uuid
, sizeof(qemu_uuid
))) {
744 char *uuid
= qemu_uuid_unparse_strdup(&qemu_uuid
);
745 char *msg_uuid
= qemu_uuid_unparse_strdup((const QemuUUID
*)msg
.uuid
);
747 error_setg(errp
, "multifd: received uuid '%s' and expected "
748 "uuid '%s' for channel %hhd", msg_uuid
, uuid
, msg
.id
);
754 if (msg
.id
> migrate_multifd_channels()) {
755 error_setg(errp
, "multifd: received channel version %d "
756 "expected %d", msg
.version
, MULTIFD_VERSION
);
763 static MultiFDPages_t
*multifd_pages_init(size_t size
)
765 MultiFDPages_t
*pages
= g_new0(MultiFDPages_t
, 1);
767 pages
->allocated
= size
;
768 pages
->iov
= g_new0(struct iovec
, size
);
769 pages
->offset
= g_new0(ram_addr_t
, size
);
774 static void multifd_pages_clear(MultiFDPages_t
*pages
)
777 pages
->allocated
= 0;
778 pages
->packet_num
= 0;
782 g_free(pages
->offset
);
783 pages
->offset
= NULL
;
787 static void multifd_send_fill_packet(MultiFDSendParams
*p
)
789 MultiFDPacket_t
*packet
= p
->packet
;
790 uint32_t page_max
= MULTIFD_PACKET_SIZE
/ qemu_target_page_size();
793 packet
->magic
= cpu_to_be32(MULTIFD_MAGIC
);
794 packet
->version
= cpu_to_be32(MULTIFD_VERSION
);
795 packet
->flags
= cpu_to_be32(p
->flags
);
796 packet
->pages_alloc
= cpu_to_be32(page_max
);
797 packet
->pages_used
= cpu_to_be32(p
->pages
->used
);
798 packet
->next_packet_size
= cpu_to_be32(p
->next_packet_size
);
799 packet
->packet_num
= cpu_to_be64(p
->packet_num
);
801 if (p
->pages
->block
) {
802 strncpy(packet
->ramblock
, p
->pages
->block
->idstr
, 256);
805 for (i
= 0; i
< p
->pages
->used
; i
++) {
806 packet
->offset
[i
] = cpu_to_be64(p
->pages
->offset
[i
]);
810 static int multifd_recv_unfill_packet(MultiFDRecvParams
*p
, Error
**errp
)
812 MultiFDPacket_t
*packet
= p
->packet
;
813 uint32_t pages_max
= MULTIFD_PACKET_SIZE
/ qemu_target_page_size();
817 packet
->magic
= be32_to_cpu(packet
->magic
);
818 if (packet
->magic
!= MULTIFD_MAGIC
) {
819 error_setg(errp
, "multifd: received packet "
820 "magic %x and expected magic %x",
821 packet
->magic
, MULTIFD_MAGIC
);
825 packet
->version
= be32_to_cpu(packet
->version
);
826 if (packet
->version
!= MULTIFD_VERSION
) {
827 error_setg(errp
, "multifd: received packet "
828 "version %d and expected version %d",
829 packet
->version
, MULTIFD_VERSION
);
833 p
->flags
= be32_to_cpu(packet
->flags
);
835 packet
->pages_alloc
= be32_to_cpu(packet
->pages_alloc
);
837 * If we recevied a packet that is 100 times bigger than expected
838 * just stop migration. It is a magic number.
840 if (packet
->pages_alloc
> pages_max
* 100) {
841 error_setg(errp
, "multifd: received packet "
842 "with size %d and expected a maximum size of %d",
843 packet
->pages_alloc
, pages_max
* 100) ;
847 * We received a packet that is bigger than expected but inside
848 * reasonable limits (see previous comment). Just reallocate.
850 if (packet
->pages_alloc
> p
->pages
->allocated
) {
851 multifd_pages_clear(p
->pages
);
852 p
->pages
= multifd_pages_init(packet
->pages_alloc
);
855 p
->pages
->used
= be32_to_cpu(packet
->pages_used
);
856 if (p
->pages
->used
> packet
->pages_alloc
) {
857 error_setg(errp
, "multifd: received packet "
858 "with %d pages and expected maximum pages are %d",
859 p
->pages
->used
, packet
->pages_alloc
) ;
863 p
->next_packet_size
= be32_to_cpu(packet
->next_packet_size
);
864 p
->packet_num
= be64_to_cpu(packet
->packet_num
);
866 if (p
->pages
->used
) {
867 /* make sure that ramblock is 0 terminated */
868 packet
->ramblock
[255] = 0;
869 block
= qemu_ram_block_by_name(packet
->ramblock
);
871 error_setg(errp
, "multifd: unknown ram block %s",
877 for (i
= 0; i
< p
->pages
->used
; i
++) {
878 ram_addr_t offset
= be64_to_cpu(packet
->offset
[i
]);
880 if (offset
> (block
->used_length
- TARGET_PAGE_SIZE
)) {
881 error_setg(errp
, "multifd: offset too long " RAM_ADDR_FMT
882 " (max " RAM_ADDR_FMT
")",
883 offset
, block
->max_length
);
886 p
->pages
->iov
[i
].iov_base
= block
->host
+ offset
;
887 p
->pages
->iov
[i
].iov_len
= TARGET_PAGE_SIZE
;
894 MultiFDSendParams
*params
;
895 /* number of created threads */
897 /* array of pages to sent */
898 MultiFDPages_t
*pages
;
899 /* syncs main thread and channels */
900 QemuSemaphore sem_sync
;
901 /* global number of generated multifd packets */
903 /* send channels ready */
904 QemuSemaphore channels_ready
;
905 } *multifd_send_state
;
908 * How we use multifd_send_state->pages and channel->pages?
910 * We create a pages for each channel, and a main one. Each time that
911 * we need to send a batch of pages we interchange the ones between
912 * multifd_send_state and the channel that is sending it. There are
913 * two reasons for that:
914 * - to not have to do so many mallocs during migration
915 * - to make easier to know what to free at the end of migration
917 * This way we always know who is the owner of each "pages" struct,
918 * and we don't need any locking. It belongs to the migration thread
919 * or to the channel thread. Switching is safe because the migration
920 * thread is using the channel mutex when changing it, and the channel
921 * have to had finish with its own, otherwise pending_job can't be
925 static void multifd_send_pages(void)
928 static int next_channel
;
929 MultiFDSendParams
*p
= NULL
; /* make happy gcc */
930 MultiFDPages_t
*pages
= multifd_send_state
->pages
;
931 uint64_t transferred
;
933 qemu_sem_wait(&multifd_send_state
->channels_ready
);
934 for (i
= next_channel
;; i
= (i
+ 1) % migrate_multifd_channels()) {
935 p
= &multifd_send_state
->params
[i
];
937 qemu_mutex_lock(&p
->mutex
);
938 if (!p
->pending_job
) {
940 next_channel
= (i
+ 1) % migrate_multifd_channels();
943 qemu_mutex_unlock(&p
->mutex
);
947 p
->packet_num
= multifd_send_state
->packet_num
++;
948 p
->pages
->block
= NULL
;
949 multifd_send_state
->pages
= p
->pages
;
951 transferred
= ((uint64_t) pages
->used
) * TARGET_PAGE_SIZE
+ p
->packet_len
;
952 ram_counters
.multifd_bytes
+= transferred
;
953 ram_counters
.transferred
+= transferred
;;
954 qemu_mutex_unlock(&p
->mutex
);
955 qemu_sem_post(&p
->sem
);
958 static void multifd_queue_page(RAMBlock
*block
, ram_addr_t offset
)
960 MultiFDPages_t
*pages
= multifd_send_state
->pages
;
963 pages
->block
= block
;
966 if (pages
->block
== block
) {
967 pages
->offset
[pages
->used
] = offset
;
968 pages
->iov
[pages
->used
].iov_base
= block
->host
+ offset
;
969 pages
->iov
[pages
->used
].iov_len
= TARGET_PAGE_SIZE
;
972 if (pages
->used
< pages
->allocated
) {
977 multifd_send_pages();
979 if (pages
->block
!= block
) {
980 multifd_queue_page(block
, offset
);
984 static void multifd_send_terminate_threads(Error
*err
)
989 MigrationState
*s
= migrate_get_current();
990 migrate_set_error(s
, err
);
991 if (s
->state
== MIGRATION_STATUS_SETUP
||
992 s
->state
== MIGRATION_STATUS_PRE_SWITCHOVER
||
993 s
->state
== MIGRATION_STATUS_DEVICE
||
994 s
->state
== MIGRATION_STATUS_ACTIVE
) {
995 migrate_set_state(&s
->state
, s
->state
,
996 MIGRATION_STATUS_FAILED
);
1000 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1001 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
1003 qemu_mutex_lock(&p
->mutex
);
1005 qemu_sem_post(&p
->sem
);
1006 qemu_mutex_unlock(&p
->mutex
);
1010 void multifd_save_cleanup(void)
1014 if (!migrate_use_multifd()) {
1017 multifd_send_terminate_threads(NULL
);
1018 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1019 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
1022 qemu_thread_join(&p
->thread
);
1024 socket_send_channel_destroy(p
->c
);
1026 qemu_mutex_destroy(&p
->mutex
);
1027 qemu_sem_destroy(&p
->sem
);
1030 multifd_pages_clear(p
->pages
);
1036 qemu_sem_destroy(&multifd_send_state
->channels_ready
);
1037 qemu_sem_destroy(&multifd_send_state
->sem_sync
);
1038 g_free(multifd_send_state
->params
);
1039 multifd_send_state
->params
= NULL
;
1040 multifd_pages_clear(multifd_send_state
->pages
);
1041 multifd_send_state
->pages
= NULL
;
1042 g_free(multifd_send_state
);
1043 multifd_send_state
= NULL
;
1046 static void multifd_send_sync_main(void)
1050 if (!migrate_use_multifd()) {
1053 if (multifd_send_state
->pages
->used
) {
1054 multifd_send_pages();
1056 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1057 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
1059 trace_multifd_send_sync_main_signal(p
->id
);
1061 qemu_mutex_lock(&p
->mutex
);
1063 p
->packet_num
= multifd_send_state
->packet_num
++;
1064 p
->flags
|= MULTIFD_FLAG_SYNC
;
1066 qemu_mutex_unlock(&p
->mutex
);
1067 qemu_sem_post(&p
->sem
);
1069 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1070 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
1072 trace_multifd_send_sync_main_wait(p
->id
);
1073 qemu_sem_wait(&multifd_send_state
->sem_sync
);
1075 trace_multifd_send_sync_main(multifd_send_state
->packet_num
);
1078 static void *multifd_send_thread(void *opaque
)
1080 MultiFDSendParams
*p
= opaque
;
1081 Error
*local_err
= NULL
;
1084 trace_multifd_send_thread_start(p
->id
);
1085 rcu_register_thread();
1087 if (multifd_send_initial_packet(p
, &local_err
) < 0) {
1090 /* initial packet */
1094 qemu_sem_wait(&p
->sem
);
1095 qemu_mutex_lock(&p
->mutex
);
1097 if (p
->pending_job
) {
1098 uint32_t used
= p
->pages
->used
;
1099 uint64_t packet_num
= p
->packet_num
;
1100 uint32_t flags
= p
->flags
;
1102 p
->next_packet_size
= used
* qemu_target_page_size();
1103 multifd_send_fill_packet(p
);
1106 p
->num_pages
+= used
;
1108 qemu_mutex_unlock(&p
->mutex
);
1110 trace_multifd_send(p
->id
, packet_num
, used
, flags
,
1111 p
->next_packet_size
);
1113 ret
= qio_channel_write_all(p
->c
, (void *)p
->packet
,
1114 p
->packet_len
, &local_err
);
1120 ret
= qio_channel_writev_all(p
->c
, p
->pages
->iov
,
1127 qemu_mutex_lock(&p
->mutex
);
1129 qemu_mutex_unlock(&p
->mutex
);
1131 if (flags
& MULTIFD_FLAG_SYNC
) {
1132 qemu_sem_post(&multifd_send_state
->sem_sync
);
1134 qemu_sem_post(&multifd_send_state
->channels_ready
);
1135 } else if (p
->quit
) {
1136 qemu_mutex_unlock(&p
->mutex
);
1139 qemu_mutex_unlock(&p
->mutex
);
1140 /* sometimes there are spurious wakeups */
1146 multifd_send_terminate_threads(local_err
);
1149 qemu_mutex_lock(&p
->mutex
);
1151 qemu_mutex_unlock(&p
->mutex
);
1153 rcu_unregister_thread();
1154 trace_multifd_send_thread_end(p
->id
, p
->num_packets
, p
->num_pages
);
1159 static void multifd_new_send_channel_async(QIOTask
*task
, gpointer opaque
)
1161 MultiFDSendParams
*p
= opaque
;
1162 QIOChannel
*sioc
= QIO_CHANNEL(qio_task_get_source(task
));
1163 Error
*local_err
= NULL
;
1165 if (qio_task_propagate_error(task
, &local_err
)) {
1166 migrate_set_error(migrate_get_current(), local_err
);
1167 multifd_save_cleanup();
1169 p
->c
= QIO_CHANNEL(sioc
);
1170 qio_channel_set_delay(p
->c
, false);
1172 qemu_thread_create(&p
->thread
, p
->name
, multifd_send_thread
, p
,
1173 QEMU_THREAD_JOINABLE
);
1175 atomic_inc(&multifd_send_state
->count
);
1179 int multifd_save_setup(void)
1182 uint32_t page_count
= MULTIFD_PACKET_SIZE
/ qemu_target_page_size();
1185 if (!migrate_use_multifd()) {
1188 thread_count
= migrate_multifd_channels();
1189 multifd_send_state
= g_malloc0(sizeof(*multifd_send_state
));
1190 multifd_send_state
->params
= g_new0(MultiFDSendParams
, thread_count
);
1191 atomic_set(&multifd_send_state
->count
, 0);
1192 multifd_send_state
->pages
= multifd_pages_init(page_count
);
1193 qemu_sem_init(&multifd_send_state
->sem_sync
, 0);
1194 qemu_sem_init(&multifd_send_state
->channels_ready
, 0);
1196 for (i
= 0; i
< thread_count
; i
++) {
1197 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
1199 qemu_mutex_init(&p
->mutex
);
1200 qemu_sem_init(&p
->sem
, 0);
1204 p
->pages
= multifd_pages_init(page_count
);
1205 p
->packet_len
= sizeof(MultiFDPacket_t
)
1206 + sizeof(ram_addr_t
) * page_count
;
1207 p
->packet
= g_malloc0(p
->packet_len
);
1208 p
->name
= g_strdup_printf("multifdsend_%d", i
);
1209 socket_send_channel_create(multifd_new_send_channel_async
, p
);
1215 MultiFDRecvParams
*params
;
1216 /* number of created threads */
1218 /* syncs main thread and channels */
1219 QemuSemaphore sem_sync
;
1220 /* global number of generated multifd packets */
1221 uint64_t packet_num
;
1222 } *multifd_recv_state
;
1224 static void multifd_recv_terminate_threads(Error
*err
)
1229 MigrationState
*s
= migrate_get_current();
1230 migrate_set_error(s
, err
);
1231 if (s
->state
== MIGRATION_STATUS_SETUP
||
1232 s
->state
== MIGRATION_STATUS_ACTIVE
) {
1233 migrate_set_state(&s
->state
, s
->state
,
1234 MIGRATION_STATUS_FAILED
);
1238 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1239 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1241 qemu_mutex_lock(&p
->mutex
);
1242 /* We could arrive here for two reasons:
1243 - normal quit, i.e. everything went fine, just finished
1244 - error quit: We close the channels so the channel threads
1245 finish the qio_channel_read_all_eof() */
1246 qio_channel_shutdown(p
->c
, QIO_CHANNEL_SHUTDOWN_BOTH
, NULL
);
1247 qemu_mutex_unlock(&p
->mutex
);
1251 int multifd_load_cleanup(Error
**errp
)
1256 if (!migrate_use_multifd()) {
1259 multifd_recv_terminate_threads(NULL
);
1260 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1261 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1264 qemu_thread_join(&p
->thread
);
1266 object_unref(OBJECT(p
->c
));
1268 qemu_mutex_destroy(&p
->mutex
);
1269 qemu_sem_destroy(&p
->sem_sync
);
1272 multifd_pages_clear(p
->pages
);
1278 qemu_sem_destroy(&multifd_recv_state
->sem_sync
);
1279 g_free(multifd_recv_state
->params
);
1280 multifd_recv_state
->params
= NULL
;
1281 g_free(multifd_recv_state
);
1282 multifd_recv_state
= NULL
;
1287 static void multifd_recv_sync_main(void)
1291 if (!migrate_use_multifd()) {
1294 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1295 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1297 trace_multifd_recv_sync_main_wait(p
->id
);
1298 qemu_sem_wait(&multifd_recv_state
->sem_sync
);
1299 qemu_mutex_lock(&p
->mutex
);
1300 if (multifd_recv_state
->packet_num
< p
->packet_num
) {
1301 multifd_recv_state
->packet_num
= p
->packet_num
;
1303 qemu_mutex_unlock(&p
->mutex
);
1305 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1306 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1308 trace_multifd_recv_sync_main_signal(p
->id
);
1309 qemu_sem_post(&p
->sem_sync
);
1311 trace_multifd_recv_sync_main(multifd_recv_state
->packet_num
);
1314 static void *multifd_recv_thread(void *opaque
)
1316 MultiFDRecvParams
*p
= opaque
;
1317 Error
*local_err
= NULL
;
1320 trace_multifd_recv_thread_start(p
->id
);
1321 rcu_register_thread();
1327 ret
= qio_channel_read_all_eof(p
->c
, (void *)p
->packet
,
1328 p
->packet_len
, &local_err
);
1329 if (ret
== 0) { /* EOF */
1332 if (ret
== -1) { /* Error */
1336 qemu_mutex_lock(&p
->mutex
);
1337 ret
= multifd_recv_unfill_packet(p
, &local_err
);
1339 qemu_mutex_unlock(&p
->mutex
);
1343 used
= p
->pages
->used
;
1345 trace_multifd_recv(p
->id
, p
->packet_num
, used
, flags
,
1346 p
->next_packet_size
);
1348 p
->num_pages
+= used
;
1349 qemu_mutex_unlock(&p
->mutex
);
1352 ret
= qio_channel_readv_all(p
->c
, p
->pages
->iov
,
1359 if (flags
& MULTIFD_FLAG_SYNC
) {
1360 qemu_sem_post(&multifd_recv_state
->sem_sync
);
1361 qemu_sem_wait(&p
->sem_sync
);
1366 multifd_recv_terminate_threads(local_err
);
1368 qemu_mutex_lock(&p
->mutex
);
1370 qemu_mutex_unlock(&p
->mutex
);
1372 rcu_unregister_thread();
1373 trace_multifd_recv_thread_end(p
->id
, p
->num_packets
, p
->num_pages
);
1378 int multifd_load_setup(void)
1381 uint32_t page_count
= MULTIFD_PACKET_SIZE
/ qemu_target_page_size();
1384 if (!migrate_use_multifd()) {
1387 thread_count
= migrate_multifd_channels();
1388 multifd_recv_state
= g_malloc0(sizeof(*multifd_recv_state
));
1389 multifd_recv_state
->params
= g_new0(MultiFDRecvParams
, thread_count
);
1390 atomic_set(&multifd_recv_state
->count
, 0);
1391 qemu_sem_init(&multifd_recv_state
->sem_sync
, 0);
1393 for (i
= 0; i
< thread_count
; i
++) {
1394 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1396 qemu_mutex_init(&p
->mutex
);
1397 qemu_sem_init(&p
->sem_sync
, 0);
1399 p
->pages
= multifd_pages_init(page_count
);
1400 p
->packet_len
= sizeof(MultiFDPacket_t
)
1401 + sizeof(ram_addr_t
) * page_count
;
1402 p
->packet
= g_malloc0(p
->packet_len
);
1403 p
->name
= g_strdup_printf("multifdrecv_%d", i
);
1408 bool multifd_recv_all_channels_created(void)
1410 int thread_count
= migrate_multifd_channels();
1412 if (!migrate_use_multifd()) {
1416 return thread_count
== atomic_read(&multifd_recv_state
->count
);
1420 * Try to receive all multifd channels to get ready for the migration.
1421 * - Return true and do not set @errp when correctly receving all channels;
1422 * - Return false and do not set @errp when correctly receiving the current one;
1423 * - Return false and set @errp when failing to receive the current channel.
1425 bool multifd_recv_new_channel(QIOChannel
*ioc
, Error
**errp
)
1427 MultiFDRecvParams
*p
;
1428 Error
*local_err
= NULL
;
1431 id
= multifd_recv_initial_packet(ioc
, &local_err
);
1433 multifd_recv_terminate_threads(local_err
);
1434 error_propagate_prepend(errp
, local_err
,
1435 "failed to receive packet"
1436 " via multifd channel %d: ",
1437 atomic_read(&multifd_recv_state
->count
));
1441 p
= &multifd_recv_state
->params
[id
];
1443 error_setg(&local_err
, "multifd: received id '%d' already setup'",
1445 multifd_recv_terminate_threads(local_err
);
1446 error_propagate(errp
, local_err
);
1450 object_ref(OBJECT(ioc
));
1451 /* initial packet */
1455 qemu_thread_create(&p
->thread
, p
->name
, multifd_recv_thread
, p
,
1456 QEMU_THREAD_JOINABLE
);
1457 atomic_inc(&multifd_recv_state
->count
);
1458 return atomic_read(&multifd_recv_state
->count
) ==
1459 migrate_multifd_channels();
1463 * save_page_header: write page header to wire
1465 * If this is the 1st block, it also writes the block identification
1467 * Returns the number of bytes written
1469 * @f: QEMUFile where to send the data
1470 * @block: block that contains the page we want to send
1471 * @offset: offset inside the block for the page
1472 * in the lower bits, it contains flags
1474 static size_t save_page_header(RAMState
*rs
, QEMUFile
*f
, RAMBlock
*block
,
1479 if (block
== rs
->last_sent_block
) {
1480 offset
|= RAM_SAVE_FLAG_CONTINUE
;
1482 qemu_put_be64(f
, offset
);
1485 if (!(offset
& RAM_SAVE_FLAG_CONTINUE
)) {
1486 len
= strlen(block
->idstr
);
1487 qemu_put_byte(f
, len
);
1488 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, len
);
1490 rs
->last_sent_block
= block
;
1496 * mig_throttle_guest_down: throotle down the guest
1498 * Reduce amount of guest cpu execution to hopefully slow down memory
1499 * writes. If guest dirty memory rate is reduced below the rate at
1500 * which we can transfer pages to the destination then we should be
1501 * able to complete migration. Some workloads dirty memory way too
1502 * fast and will not effectively converge, even with auto-converge.
1504 static void mig_throttle_guest_down(void)
1506 MigrationState
*s
= migrate_get_current();
1507 uint64_t pct_initial
= s
->parameters
.cpu_throttle_initial
;
1508 uint64_t pct_icrement
= s
->parameters
.cpu_throttle_increment
;
1509 int pct_max
= s
->parameters
.max_cpu_throttle
;
1511 /* We have not started throttling yet. Let's start it. */
1512 if (!cpu_throttle_active()) {
1513 cpu_throttle_set(pct_initial
);
1515 /* Throttling already on, just increase the rate */
1516 cpu_throttle_set(MIN(cpu_throttle_get_percentage() + pct_icrement
,
1522 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
1524 * @rs: current RAM state
1525 * @current_addr: address for the zero page
1527 * Update the xbzrle cache to reflect a page that's been sent as all 0.
1528 * The important thing is that a stale (not-yet-0'd) page be replaced
1530 * As a bonus, if the page wasn't in the cache it gets added so that
1531 * when a small write is made into the 0'd page it gets XBZRLE sent.
1533 static void xbzrle_cache_zero_page(RAMState
*rs
, ram_addr_t current_addr
)
1535 if (rs
->ram_bulk_stage
|| !migrate_use_xbzrle()) {
1539 /* We don't care if this fails to allocate a new cache page
1540 * as long as it updated an old one */
1541 cache_insert(XBZRLE
.cache
, current_addr
, XBZRLE
.zero_target_page
,
1542 ram_counters
.dirty_sync_count
);
1545 #define ENCODING_FLAG_XBZRLE 0x1
1548 * save_xbzrle_page: compress and send current page
1550 * Returns: 1 means that we wrote the page
1551 * 0 means that page is identical to the one already sent
1552 * -1 means that xbzrle would be longer than normal
1554 * @rs: current RAM state
1555 * @current_data: pointer to the address of the page contents
1556 * @current_addr: addr of the page
1557 * @block: block that contains the page we want to send
1558 * @offset: offset inside the block for the page
1559 * @last_stage: if we are at the completion stage
1561 static int save_xbzrle_page(RAMState
*rs
, uint8_t **current_data
,
1562 ram_addr_t current_addr
, RAMBlock
*block
,
1563 ram_addr_t offset
, bool last_stage
)
1565 int encoded_len
= 0, bytes_xbzrle
;
1566 uint8_t *prev_cached_page
;
1568 if (!cache_is_cached(XBZRLE
.cache
, current_addr
,
1569 ram_counters
.dirty_sync_count
)) {
1570 xbzrle_counters
.cache_miss
++;
1572 if (cache_insert(XBZRLE
.cache
, current_addr
, *current_data
,
1573 ram_counters
.dirty_sync_count
) == -1) {
1576 /* update *current_data when the page has been
1577 inserted into cache */
1578 *current_data
= get_cached_data(XBZRLE
.cache
, current_addr
);
1584 prev_cached_page
= get_cached_data(XBZRLE
.cache
, current_addr
);
1586 /* save current buffer into memory */
1587 memcpy(XBZRLE
.current_buf
, *current_data
, TARGET_PAGE_SIZE
);
1589 /* XBZRLE encoding (if there is no overflow) */
1590 encoded_len
= xbzrle_encode_buffer(prev_cached_page
, XBZRLE
.current_buf
,
1591 TARGET_PAGE_SIZE
, XBZRLE
.encoded_buf
,
1593 if (encoded_len
== 0) {
1594 trace_save_xbzrle_page_skipping();
1596 } else if (encoded_len
== -1) {
1597 trace_save_xbzrle_page_overflow();
1598 xbzrle_counters
.overflow
++;
1599 /* update data in the cache */
1601 memcpy(prev_cached_page
, *current_data
, TARGET_PAGE_SIZE
);
1602 *current_data
= prev_cached_page
;
1607 /* we need to update the data in the cache, in order to get the same data */
1609 memcpy(prev_cached_page
, XBZRLE
.current_buf
, TARGET_PAGE_SIZE
);
1612 /* Send XBZRLE based compressed page */
1613 bytes_xbzrle
= save_page_header(rs
, rs
->f
, block
,
1614 offset
| RAM_SAVE_FLAG_XBZRLE
);
1615 qemu_put_byte(rs
->f
, ENCODING_FLAG_XBZRLE
);
1616 qemu_put_be16(rs
->f
, encoded_len
);
1617 qemu_put_buffer(rs
->f
, XBZRLE
.encoded_buf
, encoded_len
);
1618 bytes_xbzrle
+= encoded_len
+ 1 + 2;
1619 xbzrle_counters
.pages
++;
1620 xbzrle_counters
.bytes
+= bytes_xbzrle
;
1621 ram_counters
.transferred
+= bytes_xbzrle
;
1627 * migration_bitmap_find_dirty: find the next dirty page from start
1629 * Returns the page offset within memory region of the start of a dirty page
1631 * @rs: current RAM state
1632 * @rb: RAMBlock where to search for dirty pages
1633 * @start: page where we start the search
1636 unsigned long migration_bitmap_find_dirty(RAMState
*rs
, RAMBlock
*rb
,
1637 unsigned long start
)
1639 unsigned long size
= rb
->used_length
>> TARGET_PAGE_BITS
;
1640 unsigned long *bitmap
= rb
->bmap
;
1643 if (ramblock_is_ignored(rb
)) {
1648 * When the free page optimization is enabled, we need to check the bitmap
1649 * to send the non-free pages rather than all the pages in the bulk stage.
1651 if (!rs
->fpo_enabled
&& rs
->ram_bulk_stage
&& start
> 0) {
1654 next
= find_next_bit(bitmap
, size
, start
);
1660 static inline bool migration_bitmap_clear_dirty(RAMState
*rs
,
1666 qemu_mutex_lock(&rs
->bitmap_mutex
);
1667 ret
= test_and_clear_bit(page
, rb
->bmap
);
1670 rs
->migration_dirty_pages
--;
1672 qemu_mutex_unlock(&rs
->bitmap_mutex
);
1677 static void migration_bitmap_sync_range(RAMState
*rs
, RAMBlock
*rb
,
1680 rs
->migration_dirty_pages
+=
1681 cpu_physical_memory_sync_dirty_bitmap(rb
, 0, length
,
1682 &rs
->num_dirty_pages_period
);
1686 * ram_pagesize_summary: calculate all the pagesizes of a VM
1688 * Returns a summary bitmap of the page sizes of all RAMBlocks
1690 * For VMs with just normal pages this is equivalent to the host page
1691 * size. If it's got some huge pages then it's the OR of all the
1692 * different page sizes.
1694 uint64_t ram_pagesize_summary(void)
1697 uint64_t summary
= 0;
1699 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1700 summary
|= block
->page_size
;
1706 uint64_t ram_get_total_transferred_pages(void)
1708 return ram_counters
.normal
+ ram_counters
.duplicate
+
1709 compression_counters
.pages
+ xbzrle_counters
.pages
;
1712 static void migration_update_rates(RAMState
*rs
, int64_t end_time
)
1714 uint64_t page_count
= rs
->target_page_count
- rs
->target_page_count_prev
;
1715 double compressed_size
;
1717 /* calculate period counters */
1718 ram_counters
.dirty_pages_rate
= rs
->num_dirty_pages_period
* 1000
1719 / (end_time
- rs
->time_last_bitmap_sync
);
1725 if (migrate_use_xbzrle()) {
1726 xbzrle_counters
.cache_miss_rate
= (double)(xbzrle_counters
.cache_miss
-
1727 rs
->xbzrle_cache_miss_prev
) / page_count
;
1728 rs
->xbzrle_cache_miss_prev
= xbzrle_counters
.cache_miss
;
1731 if (migrate_use_compression()) {
1732 compression_counters
.busy_rate
= (double)(compression_counters
.busy
-
1733 rs
->compress_thread_busy_prev
) / page_count
;
1734 rs
->compress_thread_busy_prev
= compression_counters
.busy
;
1736 compressed_size
= compression_counters
.compressed_size
-
1737 rs
->compressed_size_prev
;
1738 if (compressed_size
) {
1739 double uncompressed_size
= (compression_counters
.pages
-
1740 rs
->compress_pages_prev
) * TARGET_PAGE_SIZE
;
1742 /* Compression-Ratio = Uncompressed-size / Compressed-size */
1743 compression_counters
.compression_rate
=
1744 uncompressed_size
/ compressed_size
;
1746 rs
->compress_pages_prev
= compression_counters
.pages
;
1747 rs
->compressed_size_prev
= compression_counters
.compressed_size
;
1752 static void migration_bitmap_sync(RAMState
*rs
)
1756 uint64_t bytes_xfer_now
;
1758 ram_counters
.dirty_sync_count
++;
1760 if (!rs
->time_last_bitmap_sync
) {
1761 rs
->time_last_bitmap_sync
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1764 trace_migration_bitmap_sync_start();
1765 memory_global_dirty_log_sync();
1767 qemu_mutex_lock(&rs
->bitmap_mutex
);
1769 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1770 migration_bitmap_sync_range(rs
, block
, block
->used_length
);
1772 ram_counters
.remaining
= ram_bytes_remaining();
1774 qemu_mutex_unlock(&rs
->bitmap_mutex
);
1776 trace_migration_bitmap_sync_end(rs
->num_dirty_pages_period
);
1778 end_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1780 /* more than 1 second = 1000 millisecons */
1781 if (end_time
> rs
->time_last_bitmap_sync
+ 1000) {
1782 bytes_xfer_now
= ram_counters
.transferred
;
1784 /* During block migration the auto-converge logic incorrectly detects
1785 * that ram migration makes no progress. Avoid this by disabling the
1786 * throttling logic during the bulk phase of block migration. */
1787 if (migrate_auto_converge() && !blk_mig_bulk_active()) {
1788 /* The following detection logic can be refined later. For now:
1789 Check to see if the dirtied bytes is 50% more than the approx.
1790 amount of bytes that just got transferred since the last time we
1791 were in this routine. If that happens twice, start or increase
1794 if ((rs
->num_dirty_pages_period
* TARGET_PAGE_SIZE
>
1795 (bytes_xfer_now
- rs
->bytes_xfer_prev
) / 2) &&
1796 (++rs
->dirty_rate_high_cnt
>= 2)) {
1797 trace_migration_throttle();
1798 rs
->dirty_rate_high_cnt
= 0;
1799 mig_throttle_guest_down();
1803 migration_update_rates(rs
, end_time
);
1805 rs
->target_page_count_prev
= rs
->target_page_count
;
1807 /* reset period counters */
1808 rs
->time_last_bitmap_sync
= end_time
;
1809 rs
->num_dirty_pages_period
= 0;
1810 rs
->bytes_xfer_prev
= bytes_xfer_now
;
1812 if (migrate_use_events()) {
1813 qapi_event_send_migration_pass(ram_counters
.dirty_sync_count
);
1817 static void migration_bitmap_sync_precopy(RAMState
*rs
)
1819 Error
*local_err
= NULL
;
1822 * The current notifier usage is just an optimization to migration, so we
1823 * don't stop the normal migration process in the error case.
1825 if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC
, &local_err
)) {
1826 error_report_err(local_err
);
1829 migration_bitmap_sync(rs
);
1831 if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC
, &local_err
)) {
1832 error_report_err(local_err
);
1837 * save_zero_page_to_file: send the zero page to the file
1839 * Returns the size of data written to the file, 0 means the page is not
1842 * @rs: current RAM state
1843 * @file: the file where the data is saved
1844 * @block: block that contains the page we want to send
1845 * @offset: offset inside the block for the page
1847 static int save_zero_page_to_file(RAMState
*rs
, QEMUFile
*file
,
1848 RAMBlock
*block
, ram_addr_t offset
)
1850 uint8_t *p
= block
->host
+ offset
;
1853 if (is_zero_range(p
, TARGET_PAGE_SIZE
)) {
1854 len
+= save_page_header(rs
, file
, block
, offset
| RAM_SAVE_FLAG_ZERO
);
1855 qemu_put_byte(file
, 0);
1862 * save_zero_page: send the zero page to the stream
1864 * Returns the number of pages written.
1866 * @rs: current RAM state
1867 * @block: block that contains the page we want to send
1868 * @offset: offset inside the block for the page
1870 static int save_zero_page(RAMState
*rs
, RAMBlock
*block
, ram_addr_t offset
)
1872 int len
= save_zero_page_to_file(rs
, rs
->f
, block
, offset
);
1875 ram_counters
.duplicate
++;
1876 ram_counters
.transferred
+= len
;
1882 static void ram_release_pages(const char *rbname
, uint64_t offset
, int pages
)
1884 if (!migrate_release_ram() || !migration_in_postcopy()) {
1888 ram_discard_range(rbname
, offset
, pages
<< TARGET_PAGE_BITS
);
1892 * @pages: the number of pages written by the control path,
1894 * > 0 - number of pages written
1896 * Return true if the pages has been saved, otherwise false is returned.
1898 static bool control_save_page(RAMState
*rs
, RAMBlock
*block
, ram_addr_t offset
,
1901 uint64_t bytes_xmit
= 0;
1905 ret
= ram_control_save_page(rs
->f
, block
->offset
, offset
, TARGET_PAGE_SIZE
,
1907 if (ret
== RAM_SAVE_CONTROL_NOT_SUPP
) {
1912 ram_counters
.transferred
+= bytes_xmit
;
1916 if (ret
== RAM_SAVE_CONTROL_DELAYED
) {
1920 if (bytes_xmit
> 0) {
1921 ram_counters
.normal
++;
1922 } else if (bytes_xmit
== 0) {
1923 ram_counters
.duplicate
++;
1930 * directly send the page to the stream
1932 * Returns the number of pages written.
1934 * @rs: current RAM state
1935 * @block: block that contains the page we want to send
1936 * @offset: offset inside the block for the page
1937 * @buf: the page to be sent
1938 * @async: send to page asyncly
1940 static int save_normal_page(RAMState
*rs
, RAMBlock
*block
, ram_addr_t offset
,
1941 uint8_t *buf
, bool async
)
1943 ram_counters
.transferred
+= save_page_header(rs
, rs
->f
, block
,
1944 offset
| RAM_SAVE_FLAG_PAGE
);
1946 qemu_put_buffer_async(rs
->f
, buf
, TARGET_PAGE_SIZE
,
1947 migrate_release_ram() &
1948 migration_in_postcopy());
1950 qemu_put_buffer(rs
->f
, buf
, TARGET_PAGE_SIZE
);
1952 ram_counters
.transferred
+= TARGET_PAGE_SIZE
;
1953 ram_counters
.normal
++;
1958 * ram_save_page: send the given page to the stream
1960 * Returns the number of pages written.
1962 * >=0 - Number of pages written - this might legally be 0
1963 * if xbzrle noticed the page was the same.
1965 * @rs: current RAM state
1966 * @block: block that contains the page we want to send
1967 * @offset: offset inside the block for the page
1968 * @last_stage: if we are at the completion stage
1970 static int ram_save_page(RAMState
*rs
, PageSearchStatus
*pss
, bool last_stage
)
1974 bool send_async
= true;
1975 RAMBlock
*block
= pss
->block
;
1976 ram_addr_t offset
= pss
->page
<< TARGET_PAGE_BITS
;
1977 ram_addr_t current_addr
= block
->offset
+ offset
;
1979 p
= block
->host
+ offset
;
1980 trace_ram_save_page(block
->idstr
, (uint64_t)offset
, p
);
1982 XBZRLE_cache_lock();
1983 if (!rs
->ram_bulk_stage
&& !migration_in_postcopy() &&
1984 migrate_use_xbzrle()) {
1985 pages
= save_xbzrle_page(rs
, &p
, current_addr
, block
,
1986 offset
, last_stage
);
1988 /* Can't send this cached data async, since the cache page
1989 * might get updated before it gets to the wire
1995 /* XBZRLE overflow or normal page */
1997 pages
= save_normal_page(rs
, block
, offset
, p
, send_async
);
2000 XBZRLE_cache_unlock();
2005 static int ram_save_multifd_page(RAMState
*rs
, RAMBlock
*block
,
2008 multifd_queue_page(block
, offset
);
2009 ram_counters
.normal
++;
2014 static bool do_compress_ram_page(QEMUFile
*f
, z_stream
*stream
, RAMBlock
*block
,
2015 ram_addr_t offset
, uint8_t *source_buf
)
2017 RAMState
*rs
= ram_state
;
2018 uint8_t *p
= block
->host
+ (offset
& TARGET_PAGE_MASK
);
2019 bool zero_page
= false;
2022 if (save_zero_page_to_file(rs
, f
, block
, offset
)) {
2027 save_page_header(rs
, f
, block
, offset
| RAM_SAVE_FLAG_COMPRESS_PAGE
);
2030 * copy it to a internal buffer to avoid it being modified by VM
2031 * so that we can catch up the error during compression and
2034 memcpy(source_buf
, p
, TARGET_PAGE_SIZE
);
2035 ret
= qemu_put_compression_data(f
, stream
, source_buf
, TARGET_PAGE_SIZE
);
2037 qemu_file_set_error(migrate_get_current()->to_dst_file
, ret
);
2038 error_report("compressed data failed!");
2043 ram_release_pages(block
->idstr
, offset
& TARGET_PAGE_MASK
, 1);
2048 update_compress_thread_counts(const CompressParam
*param
, int bytes_xmit
)
2050 ram_counters
.transferred
+= bytes_xmit
;
2052 if (param
->zero_page
) {
2053 ram_counters
.duplicate
++;
2057 /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
2058 compression_counters
.compressed_size
+= bytes_xmit
- 8;
2059 compression_counters
.pages
++;
2062 static bool save_page_use_compression(RAMState
*rs
);
2064 static void flush_compressed_data(RAMState
*rs
)
2066 int idx
, len
, thread_count
;
2068 if (!save_page_use_compression(rs
)) {
2071 thread_count
= migrate_compress_threads();
2073 qemu_mutex_lock(&comp_done_lock
);
2074 for (idx
= 0; idx
< thread_count
; idx
++) {
2075 while (!comp_param
[idx
].done
) {
2076 qemu_cond_wait(&comp_done_cond
, &comp_done_lock
);
2079 qemu_mutex_unlock(&comp_done_lock
);
2081 for (idx
= 0; idx
< thread_count
; idx
++) {
2082 qemu_mutex_lock(&comp_param
[idx
].mutex
);
2083 if (!comp_param
[idx
].quit
) {
2084 len
= qemu_put_qemu_file(rs
->f
, comp_param
[idx
].file
);
2086 * it's safe to fetch zero_page without holding comp_done_lock
2087 * as there is no further request submitted to the thread,
2088 * i.e, the thread should be waiting for a request at this point.
2090 update_compress_thread_counts(&comp_param
[idx
], len
);
2092 qemu_mutex_unlock(&comp_param
[idx
].mutex
);
2096 static inline void set_compress_params(CompressParam
*param
, RAMBlock
*block
,
2099 param
->block
= block
;
2100 param
->offset
= offset
;
2103 static int compress_page_with_multi_thread(RAMState
*rs
, RAMBlock
*block
,
2106 int idx
, thread_count
, bytes_xmit
= -1, pages
= -1;
2107 bool wait
= migrate_compress_wait_thread();
2109 thread_count
= migrate_compress_threads();
2110 qemu_mutex_lock(&comp_done_lock
);
2112 for (idx
= 0; idx
< thread_count
; idx
++) {
2113 if (comp_param
[idx
].done
) {
2114 comp_param
[idx
].done
= false;
2115 bytes_xmit
= qemu_put_qemu_file(rs
->f
, comp_param
[idx
].file
);
2116 qemu_mutex_lock(&comp_param
[idx
].mutex
);
2117 set_compress_params(&comp_param
[idx
], block
, offset
);
2118 qemu_cond_signal(&comp_param
[idx
].cond
);
2119 qemu_mutex_unlock(&comp_param
[idx
].mutex
);
2121 update_compress_thread_counts(&comp_param
[idx
], bytes_xmit
);
2127 * wait for the free thread if the user specifies 'compress-wait-thread',
2128 * otherwise we will post the page out in the main thread as normal page.
2130 if (pages
< 0 && wait
) {
2131 qemu_cond_wait(&comp_done_cond
, &comp_done_lock
);
2134 qemu_mutex_unlock(&comp_done_lock
);
2140 * find_dirty_block: find the next dirty page and update any state
2141 * associated with the search process.
2143 * Returns true if a page is found
2145 * @rs: current RAM state
2146 * @pss: data about the state of the current dirty page scan
2147 * @again: set to false if the search has scanned the whole of RAM
2149 static bool find_dirty_block(RAMState
*rs
, PageSearchStatus
*pss
, bool *again
)
2151 pss
->page
= migration_bitmap_find_dirty(rs
, pss
->block
, pss
->page
);
2152 if (pss
->complete_round
&& pss
->block
== rs
->last_seen_block
&&
2153 pss
->page
>= rs
->last_page
) {
2155 * We've been once around the RAM and haven't found anything.
2161 if ((pss
->page
<< TARGET_PAGE_BITS
) >= pss
->block
->used_length
) {
2162 /* Didn't find anything in this RAM Block */
2164 pss
->block
= QLIST_NEXT_RCU(pss
->block
, next
);
2167 * If memory migration starts over, we will meet a dirtied page
2168 * which may still exists in compression threads's ring, so we
2169 * should flush the compressed data to make sure the new page
2170 * is not overwritten by the old one in the destination.
2172 * Also If xbzrle is on, stop using the data compression at this
2173 * point. In theory, xbzrle can do better than compression.
2175 flush_compressed_data(rs
);
2177 /* Hit the end of the list */
2178 pss
->block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
2179 /* Flag that we've looped */
2180 pss
->complete_round
= true;
2181 rs
->ram_bulk_stage
= false;
2183 /* Didn't find anything this time, but try again on the new block */
2187 /* Can go around again, but... */
2189 /* We've found something so probably don't need to */
2195 * unqueue_page: gets a page of the queue
2197 * Helper for 'get_queued_page' - gets a page off the queue
2199 * Returns the block of the page (or NULL if none available)
2201 * @rs: current RAM state
2202 * @offset: used to return the offset within the RAMBlock
2204 static RAMBlock
*unqueue_page(RAMState
*rs
, ram_addr_t
*offset
)
2206 RAMBlock
*block
= NULL
;
2208 if (QSIMPLEQ_EMPTY_ATOMIC(&rs
->src_page_requests
)) {
2212 qemu_mutex_lock(&rs
->src_page_req_mutex
);
2213 if (!QSIMPLEQ_EMPTY(&rs
->src_page_requests
)) {
2214 struct RAMSrcPageRequest
*entry
=
2215 QSIMPLEQ_FIRST(&rs
->src_page_requests
);
2217 *offset
= entry
->offset
;
2219 if (entry
->len
> TARGET_PAGE_SIZE
) {
2220 entry
->len
-= TARGET_PAGE_SIZE
;
2221 entry
->offset
+= TARGET_PAGE_SIZE
;
2223 memory_region_unref(block
->mr
);
2224 QSIMPLEQ_REMOVE_HEAD(&rs
->src_page_requests
, next_req
);
2226 migration_consume_urgent_request();
2229 qemu_mutex_unlock(&rs
->src_page_req_mutex
);
2235 * get_queued_page: unqueue a page from the postocpy requests
2237 * Skips pages that are already sent (!dirty)
2239 * Returns true if a queued page is found
2241 * @rs: current RAM state
2242 * @pss: data about the state of the current dirty page scan
2244 static bool get_queued_page(RAMState
*rs
, PageSearchStatus
*pss
)
2251 block
= unqueue_page(rs
, &offset
);
2253 * We're sending this page, and since it's postcopy nothing else
2254 * will dirty it, and we must make sure it doesn't get sent again
2255 * even if this queue request was received after the background
2256 * search already sent it.
2261 page
= offset
>> TARGET_PAGE_BITS
;
2262 dirty
= test_bit(page
, block
->bmap
);
2264 trace_get_queued_page_not_dirty(block
->idstr
, (uint64_t)offset
,
2265 page
, test_bit(page
, block
->unsentmap
));
2267 trace_get_queued_page(block
->idstr
, (uint64_t)offset
, page
);
2271 } while (block
&& !dirty
);
2275 * As soon as we start servicing pages out of order, then we have
2276 * to kill the bulk stage, since the bulk stage assumes
2277 * in (migration_bitmap_find_and_reset_dirty) that every page is
2278 * dirty, that's no longer true.
2280 rs
->ram_bulk_stage
= false;
2283 * We want the background search to continue from the queued page
2284 * since the guest is likely to want other pages near to the page
2285 * it just requested.
2288 pss
->page
= offset
>> TARGET_PAGE_BITS
;
2295 * migration_page_queue_free: drop any remaining pages in the ram
2298 * It should be empty at the end anyway, but in error cases there may
2299 * be some left. in case that there is any page left, we drop it.
2302 static void migration_page_queue_free(RAMState
*rs
)
2304 struct RAMSrcPageRequest
*mspr
, *next_mspr
;
2305 /* This queue generally should be empty - but in the case of a failed
2306 * migration might have some droppings in.
2309 QSIMPLEQ_FOREACH_SAFE(mspr
, &rs
->src_page_requests
, next_req
, next_mspr
) {
2310 memory_region_unref(mspr
->rb
->mr
);
2311 QSIMPLEQ_REMOVE_HEAD(&rs
->src_page_requests
, next_req
);
2318 * ram_save_queue_pages: queue the page for transmission
2320 * A request from postcopy destination for example.
2322 * Returns zero on success or negative on error
2324 * @rbname: Name of the RAMBLock of the request. NULL means the
2325 * same that last one.
2326 * @start: starting address from the start of the RAMBlock
2327 * @len: length (in bytes) to send
2329 int ram_save_queue_pages(const char *rbname
, ram_addr_t start
, ram_addr_t len
)
2332 RAMState
*rs
= ram_state
;
2334 ram_counters
.postcopy_requests
++;
2337 /* Reuse last RAMBlock */
2338 ramblock
= rs
->last_req_rb
;
2342 * Shouldn't happen, we can't reuse the last RAMBlock if
2343 * it's the 1st request.
2345 error_report("ram_save_queue_pages no previous block");
2349 ramblock
= qemu_ram_block_by_name(rbname
);
2352 /* We shouldn't be asked for a non-existent RAMBlock */
2353 error_report("ram_save_queue_pages no block '%s'", rbname
);
2356 rs
->last_req_rb
= ramblock
;
2358 trace_ram_save_queue_pages(ramblock
->idstr
, start
, len
);
2359 if (start
+len
> ramblock
->used_length
) {
2360 error_report("%s request overrun start=" RAM_ADDR_FMT
" len="
2361 RAM_ADDR_FMT
" blocklen=" RAM_ADDR_FMT
,
2362 __func__
, start
, len
, ramblock
->used_length
);
2366 struct RAMSrcPageRequest
*new_entry
=
2367 g_malloc0(sizeof(struct RAMSrcPageRequest
));
2368 new_entry
->rb
= ramblock
;
2369 new_entry
->offset
= start
;
2370 new_entry
->len
= len
;
2372 memory_region_ref(ramblock
->mr
);
2373 qemu_mutex_lock(&rs
->src_page_req_mutex
);
2374 QSIMPLEQ_INSERT_TAIL(&rs
->src_page_requests
, new_entry
, next_req
);
2375 migration_make_urgent_request();
2376 qemu_mutex_unlock(&rs
->src_page_req_mutex
);
2386 static bool save_page_use_compression(RAMState
*rs
)
2388 if (!migrate_use_compression()) {
2393 * If xbzrle is on, stop using the data compression after first
2394 * round of migration even if compression is enabled. In theory,
2395 * xbzrle can do better than compression.
2397 if (rs
->ram_bulk_stage
|| !migrate_use_xbzrle()) {
2405 * try to compress the page before posting it out, return true if the page
2406 * has been properly handled by compression, otherwise needs other
2407 * paths to handle it
2409 static bool save_compress_page(RAMState
*rs
, RAMBlock
*block
, ram_addr_t offset
)
2411 if (!save_page_use_compression(rs
)) {
2416 * When starting the process of a new block, the first page of
2417 * the block should be sent out before other pages in the same
2418 * block, and all the pages in last block should have been sent
2419 * out, keeping this order is important, because the 'cont' flag
2420 * is used to avoid resending the block name.
2422 * We post the fist page as normal page as compression will take
2423 * much CPU resource.
2425 if (block
!= rs
->last_sent_block
) {
2426 flush_compressed_data(rs
);
2430 if (compress_page_with_multi_thread(rs
, block
, offset
) > 0) {
2434 compression_counters
.busy
++;
2439 * ram_save_target_page: save one target page
2441 * Returns the number of pages written
2443 * @rs: current RAM state
2444 * @pss: data about the page we want to send
2445 * @last_stage: if we are at the completion stage
2447 static int ram_save_target_page(RAMState
*rs
, PageSearchStatus
*pss
,
2450 RAMBlock
*block
= pss
->block
;
2451 ram_addr_t offset
= pss
->page
<< TARGET_PAGE_BITS
;
2454 if (control_save_page(rs
, block
, offset
, &res
)) {
2458 if (save_compress_page(rs
, block
, offset
)) {
2462 res
= save_zero_page(rs
, block
, offset
);
2464 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
2465 * page would be stale
2467 if (!save_page_use_compression(rs
)) {
2468 XBZRLE_cache_lock();
2469 xbzrle_cache_zero_page(rs
, block
->offset
+ offset
);
2470 XBZRLE_cache_unlock();
2472 ram_release_pages(block
->idstr
, offset
, res
);
2477 * do not use multifd for compression as the first page in the new
2478 * block should be posted out before sending the compressed page
2480 if (!save_page_use_compression(rs
) && migrate_use_multifd()) {
2481 return ram_save_multifd_page(rs
, block
, offset
);
2484 return ram_save_page(rs
, pss
, last_stage
);
2488 * ram_save_host_page: save a whole host page
2490 * Starting at *offset send pages up to the end of the current host
2491 * page. It's valid for the initial offset to point into the middle of
2492 * a host page in which case the remainder of the hostpage is sent.
2493 * Only dirty target pages are sent. Note that the host page size may
2494 * be a huge page for this block.
2495 * The saving stops at the boundary of the used_length of the block
2496 * if the RAMBlock isn't a multiple of the host page size.
2498 * Returns the number of pages written or negative on error
2500 * @rs: current RAM state
2501 * @ms: current migration state
2502 * @pss: data about the page we want to send
2503 * @last_stage: if we are at the completion stage
2505 static int ram_save_host_page(RAMState
*rs
, PageSearchStatus
*pss
,
2508 int tmppages
, pages
= 0;
2509 size_t pagesize_bits
=
2510 qemu_ram_pagesize(pss
->block
) >> TARGET_PAGE_BITS
;
2512 if (ramblock_is_ignored(pss
->block
)) {
2513 error_report("block %s should not be migrated !", pss
->block
->idstr
);
2518 /* Check the pages is dirty and if it is send it */
2519 if (!migration_bitmap_clear_dirty(rs
, pss
->block
, pss
->page
)) {
2524 tmppages
= ram_save_target_page(rs
, pss
, last_stage
);
2530 if (pss
->block
->unsentmap
) {
2531 clear_bit(pss
->page
, pss
->block
->unsentmap
);
2535 } while ((pss
->page
& (pagesize_bits
- 1)) &&
2536 offset_in_ramblock(pss
->block
, pss
->page
<< TARGET_PAGE_BITS
));
2538 /* The offset we leave with is the last one we looked at */
2544 * ram_find_and_save_block: finds a dirty page and sends it to f
2546 * Called within an RCU critical section.
2548 * Returns the number of pages written where zero means no dirty pages,
2549 * or negative on error
2551 * @rs: current RAM state
2552 * @last_stage: if we are at the completion stage
2554 * On systems where host-page-size > target-page-size it will send all the
2555 * pages in a host page that are dirty.
2558 static int ram_find_and_save_block(RAMState
*rs
, bool last_stage
)
2560 PageSearchStatus pss
;
2564 /* No dirty page as there is zero RAM */
2565 if (!ram_bytes_total()) {
2569 pss
.block
= rs
->last_seen_block
;
2570 pss
.page
= rs
->last_page
;
2571 pss
.complete_round
= false;
2574 pss
.block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
2579 found
= get_queued_page(rs
, &pss
);
2582 /* priority queue empty, so just search for something dirty */
2583 found
= find_dirty_block(rs
, &pss
, &again
);
2587 pages
= ram_save_host_page(rs
, &pss
, last_stage
);
2589 } while (!pages
&& again
);
2591 rs
->last_seen_block
= pss
.block
;
2592 rs
->last_page
= pss
.page
;
2597 void acct_update_position(QEMUFile
*f
, size_t size
, bool zero
)
2599 uint64_t pages
= size
/ TARGET_PAGE_SIZE
;
2602 ram_counters
.duplicate
+= pages
;
2604 ram_counters
.normal
+= pages
;
2605 ram_counters
.transferred
+= size
;
2606 qemu_update_position(f
, size
);
2610 static uint64_t ram_bytes_total_common(bool count_ignored
)
2616 if (count_ignored
) {
2617 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2618 total
+= block
->used_length
;
2621 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2622 total
+= block
->used_length
;
2629 uint64_t ram_bytes_total(void)
2631 return ram_bytes_total_common(false);
2634 static void xbzrle_load_setup(void)
2636 XBZRLE
.decoded_buf
= g_malloc(TARGET_PAGE_SIZE
);
2639 static void xbzrle_load_cleanup(void)
2641 g_free(XBZRLE
.decoded_buf
);
2642 XBZRLE
.decoded_buf
= NULL
;
2645 static void ram_state_cleanup(RAMState
**rsp
)
2648 migration_page_queue_free(*rsp
);
2649 qemu_mutex_destroy(&(*rsp
)->bitmap_mutex
);
2650 qemu_mutex_destroy(&(*rsp
)->src_page_req_mutex
);
2656 static void xbzrle_cleanup(void)
2658 XBZRLE_cache_lock();
2660 cache_fini(XBZRLE
.cache
);
2661 g_free(XBZRLE
.encoded_buf
);
2662 g_free(XBZRLE
.current_buf
);
2663 g_free(XBZRLE
.zero_target_page
);
2664 XBZRLE
.cache
= NULL
;
2665 XBZRLE
.encoded_buf
= NULL
;
2666 XBZRLE
.current_buf
= NULL
;
2667 XBZRLE
.zero_target_page
= NULL
;
2669 XBZRLE_cache_unlock();
2672 static void ram_save_cleanup(void *opaque
)
2674 RAMState
**rsp
= opaque
;
2677 /* caller have hold iothread lock or is in a bh, so there is
2678 * no writing race against the migration bitmap
2680 memory_global_dirty_log_stop();
2682 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2683 g_free(block
->bmap
);
2685 g_free(block
->unsentmap
);
2686 block
->unsentmap
= NULL
;
2690 compress_threads_save_cleanup();
2691 ram_state_cleanup(rsp
);
2694 static void ram_state_reset(RAMState
*rs
)
2696 rs
->last_seen_block
= NULL
;
2697 rs
->last_sent_block
= NULL
;
2699 rs
->last_version
= ram_list
.version
;
2700 rs
->ram_bulk_stage
= true;
2701 rs
->fpo_enabled
= false;
2704 #define MAX_WAIT 50 /* ms, half buffered_file limit */
2707 * 'expected' is the value you expect the bitmap mostly to be full
2708 * of; it won't bother printing lines that are all this value.
2709 * If 'todump' is null the migration bitmap is dumped.
2711 void ram_debug_dump_bitmap(unsigned long *todump
, bool expected
,
2712 unsigned long pages
)
2715 int64_t linelen
= 128;
2718 for (cur
= 0; cur
< pages
; cur
+= linelen
) {
2722 * Last line; catch the case where the line length
2723 * is longer than remaining ram
2725 if (cur
+ linelen
> pages
) {
2726 linelen
= pages
- cur
;
2728 for (curb
= 0; curb
< linelen
; curb
++) {
2729 bool thisbit
= test_bit(cur
+ curb
, todump
);
2730 linebuf
[curb
] = thisbit
? '1' : '.';
2731 found
= found
|| (thisbit
!= expected
);
2734 linebuf
[curb
] = '\0';
2735 fprintf(stderr
, "0x%08" PRIx64
" : %s\n", cur
, linebuf
);
2740 /* **** functions for postcopy ***** */
2742 void ram_postcopy_migrated_memory_release(MigrationState
*ms
)
2744 struct RAMBlock
*block
;
2746 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2747 unsigned long *bitmap
= block
->bmap
;
2748 unsigned long range
= block
->used_length
>> TARGET_PAGE_BITS
;
2749 unsigned long run_start
= find_next_zero_bit(bitmap
, range
, 0);
2751 while (run_start
< range
) {
2752 unsigned long run_end
= find_next_bit(bitmap
, range
, run_start
+ 1);
2753 ram_discard_range(block
->idstr
, run_start
<< TARGET_PAGE_BITS
,
2754 (run_end
- run_start
) << TARGET_PAGE_BITS
);
2755 run_start
= find_next_zero_bit(bitmap
, range
, run_end
+ 1);
2761 * postcopy_send_discard_bm_ram: discard a RAMBlock
2763 * Returns zero on success
2765 * Callback from postcopy_each_ram_send_discard for each RAMBlock
2766 * Note: At this point the 'unsentmap' is the processed bitmap combined
2767 * with the dirtymap; so a '1' means it's either dirty or unsent.
2769 * @ms: current migration state
2770 * @pds: state for postcopy
2771 * @start: RAMBlock starting page
2772 * @length: RAMBlock size
2774 static int postcopy_send_discard_bm_ram(MigrationState
*ms
,
2775 PostcopyDiscardState
*pds
,
2778 unsigned long end
= block
->used_length
>> TARGET_PAGE_BITS
;
2779 unsigned long current
;
2780 unsigned long *unsentmap
= block
->unsentmap
;
2782 for (current
= 0; current
< end
; ) {
2783 unsigned long one
= find_next_bit(unsentmap
, end
, current
);
2786 unsigned long zero
= find_next_zero_bit(unsentmap
, end
, one
+ 1);
2787 unsigned long discard_length
;
2790 discard_length
= end
- one
;
2792 discard_length
= zero
- one
;
2794 if (discard_length
) {
2795 postcopy_discard_send_range(ms
, pds
, one
, discard_length
);
2797 current
= one
+ discard_length
;
2807 * postcopy_each_ram_send_discard: discard all RAMBlocks
2809 * Returns 0 for success or negative for error
2811 * Utility for the outgoing postcopy code.
2812 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2813 * passing it bitmap indexes and name.
2814 * (qemu_ram_foreach_block ends up passing unscaled lengths
2815 * which would mean postcopy code would have to deal with target page)
2817 * @ms: current migration state
2819 static int postcopy_each_ram_send_discard(MigrationState
*ms
)
2821 struct RAMBlock
*block
;
2824 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2825 PostcopyDiscardState
*pds
=
2826 postcopy_discard_send_init(ms
, block
->idstr
);
2829 * Postcopy sends chunks of bitmap over the wire, but it
2830 * just needs indexes at this point, avoids it having
2831 * target page specific code.
2833 ret
= postcopy_send_discard_bm_ram(ms
, pds
, block
);
2834 postcopy_discard_send_finish(ms
, pds
);
2844 * postcopy_chunk_hostpages_pass: canocalize bitmap in hostpages
2846 * Helper for postcopy_chunk_hostpages; it's called twice to
2847 * canonicalize the two bitmaps, that are similar, but one is
2850 * Postcopy requires that all target pages in a hostpage are dirty or
2851 * clean, not a mix. This function canonicalizes the bitmaps.
2853 * @ms: current migration state
2854 * @unsent_pass: if true we need to canonicalize partially unsent host pages
2855 * otherwise we need to canonicalize partially dirty host pages
2856 * @block: block that contains the page we want to canonicalize
2857 * @pds: state for postcopy
2859 static void postcopy_chunk_hostpages_pass(MigrationState
*ms
, bool unsent_pass
,
2861 PostcopyDiscardState
*pds
)
2863 RAMState
*rs
= ram_state
;
2864 unsigned long *bitmap
= block
->bmap
;
2865 unsigned long *unsentmap
= block
->unsentmap
;
2866 unsigned int host_ratio
= block
->page_size
/ TARGET_PAGE_SIZE
;
2867 unsigned long pages
= block
->used_length
>> TARGET_PAGE_BITS
;
2868 unsigned long run_start
;
2870 if (block
->page_size
== TARGET_PAGE_SIZE
) {
2871 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2876 /* Find a sent page */
2877 run_start
= find_next_zero_bit(unsentmap
, pages
, 0);
2879 /* Find a dirty page */
2880 run_start
= find_next_bit(bitmap
, pages
, 0);
2883 while (run_start
< pages
) {
2884 bool do_fixup
= false;
2885 unsigned long fixup_start_addr
;
2886 unsigned long host_offset
;
2889 * If the start of this run of pages is in the middle of a host
2890 * page, then we need to fixup this host page.
2892 host_offset
= run_start
% host_ratio
;
2895 run_start
-= host_offset
;
2896 fixup_start_addr
= run_start
;
2897 /* For the next pass */
2898 run_start
= run_start
+ host_ratio
;
2900 /* Find the end of this run */
2901 unsigned long run_end
;
2903 run_end
= find_next_bit(unsentmap
, pages
, run_start
+ 1);
2905 run_end
= find_next_zero_bit(bitmap
, pages
, run_start
+ 1);
2908 * If the end isn't at the start of a host page, then the
2909 * run doesn't finish at the end of a host page
2910 * and we need to discard.
2912 host_offset
= run_end
% host_ratio
;
2915 fixup_start_addr
= run_end
- host_offset
;
2917 * This host page has gone, the next loop iteration starts
2918 * from after the fixup
2920 run_start
= fixup_start_addr
+ host_ratio
;
2923 * No discards on this iteration, next loop starts from
2924 * next sent/dirty page
2926 run_start
= run_end
+ 1;
2933 /* Tell the destination to discard this page */
2934 if (unsent_pass
|| !test_bit(fixup_start_addr
, unsentmap
)) {
2935 /* For the unsent_pass we:
2936 * discard partially sent pages
2937 * For the !unsent_pass (dirty) we:
2938 * discard partially dirty pages that were sent
2939 * (any partially sent pages were already discarded
2940 * by the previous unsent_pass)
2942 postcopy_discard_send_range(ms
, pds
, fixup_start_addr
,
2946 /* Clean up the bitmap */
2947 for (page
= fixup_start_addr
;
2948 page
< fixup_start_addr
+ host_ratio
; page
++) {
2949 /* All pages in this host page are now not sent */
2950 set_bit(page
, unsentmap
);
2953 * Remark them as dirty, updating the count for any pages
2954 * that weren't previously dirty.
2956 rs
->migration_dirty_pages
+= !test_and_set_bit(page
, bitmap
);
2961 /* Find the next sent page for the next iteration */
2962 run_start
= find_next_zero_bit(unsentmap
, pages
, run_start
);
2964 /* Find the next dirty page for the next iteration */
2965 run_start
= find_next_bit(bitmap
, pages
, run_start
);
2971 * postcopy_chuck_hostpages: discrad any partially sent host page
2973 * Utility for the outgoing postcopy code.
2975 * Discard any partially sent host-page size chunks, mark any partially
2976 * dirty host-page size chunks as all dirty. In this case the host-page
2977 * is the host-page for the particular RAMBlock, i.e. it might be a huge page
2979 * Returns zero on success
2981 * @ms: current migration state
2982 * @block: block we want to work with
2984 static int postcopy_chunk_hostpages(MigrationState
*ms
, RAMBlock
*block
)
2986 PostcopyDiscardState
*pds
=
2987 postcopy_discard_send_init(ms
, block
->idstr
);
2989 /* First pass: Discard all partially sent host pages */
2990 postcopy_chunk_hostpages_pass(ms
, true, block
, pds
);
2992 * Second pass: Ensure that all partially dirty host pages are made
2995 postcopy_chunk_hostpages_pass(ms
, false, block
, pds
);
2997 postcopy_discard_send_finish(ms
, pds
);
3002 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
3004 * Returns zero on success
3006 * Transmit the set of pages to be discarded after precopy to the target
3007 * these are pages that:
3008 * a) Have been previously transmitted but are now dirty again
3009 * b) Pages that have never been transmitted, this ensures that
3010 * any pages on the destination that have been mapped by background
3011 * tasks get discarded (transparent huge pages is the specific concern)
3012 * Hopefully this is pretty sparse
3014 * @ms: current migration state
3016 int ram_postcopy_send_discard_bitmap(MigrationState
*ms
)
3018 RAMState
*rs
= ram_state
;
3024 /* This should be our last sync, the src is now paused */
3025 migration_bitmap_sync(rs
);
3027 /* Easiest way to make sure we don't resume in the middle of a host-page */
3028 rs
->last_seen_block
= NULL
;
3029 rs
->last_sent_block
= NULL
;
3032 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3033 unsigned long pages
= block
->used_length
>> TARGET_PAGE_BITS
;
3034 unsigned long *bitmap
= block
->bmap
;
3035 unsigned long *unsentmap
= block
->unsentmap
;
3038 /* We don't have a safe way to resize the sentmap, so
3039 * if the bitmap was resized it will be NULL at this
3042 error_report("migration ram resized during precopy phase");
3046 /* Deal with TPS != HPS and huge pages */
3047 ret
= postcopy_chunk_hostpages(ms
, block
);
3054 * Update the unsentmap to be unsentmap = unsentmap | dirty
3056 bitmap_or(unsentmap
, unsentmap
, bitmap
, pages
);
3057 #ifdef DEBUG_POSTCOPY
3058 ram_debug_dump_bitmap(unsentmap
, true, pages
);
3061 trace_ram_postcopy_send_discard_bitmap();
3063 ret
= postcopy_each_ram_send_discard(ms
);
3070 * ram_discard_range: discard dirtied pages at the beginning of postcopy
3072 * Returns zero on success
3074 * @rbname: name of the RAMBlock of the request. NULL means the
3075 * same that last one.
3076 * @start: RAMBlock starting page
3077 * @length: RAMBlock size
3079 int ram_discard_range(const char *rbname
, uint64_t start
, size_t length
)
3083 trace_ram_discard_range(rbname
, start
, length
);
3086 RAMBlock
*rb
= qemu_ram_block_by_name(rbname
);
3089 error_report("ram_discard_range: Failed to find block '%s'", rbname
);
3094 * On source VM, we don't need to update the received bitmap since
3095 * we don't even have one.
3097 if (rb
->receivedmap
) {
3098 bitmap_clear(rb
->receivedmap
, start
>> qemu_target_page_bits(),
3099 length
>> qemu_target_page_bits());
3102 ret
= ram_block_discard_range(rb
, start
, length
);
3111 * For every allocation, we will try not to crash the VM if the
3112 * allocation failed.
3114 static int xbzrle_init(void)
3116 Error
*local_err
= NULL
;
3118 if (!migrate_use_xbzrle()) {
3122 XBZRLE_cache_lock();
3124 XBZRLE
.zero_target_page
= g_try_malloc0(TARGET_PAGE_SIZE
);
3125 if (!XBZRLE
.zero_target_page
) {
3126 error_report("%s: Error allocating zero page", __func__
);
3130 XBZRLE
.cache
= cache_init(migrate_xbzrle_cache_size(),
3131 TARGET_PAGE_SIZE
, &local_err
);
3132 if (!XBZRLE
.cache
) {
3133 error_report_err(local_err
);
3134 goto free_zero_page
;
3137 XBZRLE
.encoded_buf
= g_try_malloc0(TARGET_PAGE_SIZE
);
3138 if (!XBZRLE
.encoded_buf
) {
3139 error_report("%s: Error allocating encoded_buf", __func__
);
3143 XBZRLE
.current_buf
= g_try_malloc(TARGET_PAGE_SIZE
);
3144 if (!XBZRLE
.current_buf
) {
3145 error_report("%s: Error allocating current_buf", __func__
);
3146 goto free_encoded_buf
;
3149 /* We are all good */
3150 XBZRLE_cache_unlock();
3154 g_free(XBZRLE
.encoded_buf
);
3155 XBZRLE
.encoded_buf
= NULL
;
3157 cache_fini(XBZRLE
.cache
);
3158 XBZRLE
.cache
= NULL
;
3160 g_free(XBZRLE
.zero_target_page
);
3161 XBZRLE
.zero_target_page
= NULL
;
3163 XBZRLE_cache_unlock();
3167 static int ram_state_init(RAMState
**rsp
)
3169 *rsp
= g_try_new0(RAMState
, 1);
3172 error_report("%s: Init ramstate fail", __func__
);
3176 qemu_mutex_init(&(*rsp
)->bitmap_mutex
);
3177 qemu_mutex_init(&(*rsp
)->src_page_req_mutex
);
3178 QSIMPLEQ_INIT(&(*rsp
)->src_page_requests
);
3181 * Count the total number of pages used by ram blocks not including any
3182 * gaps due to alignment or unplugs.
3184 (*rsp
)->migration_dirty_pages
= ram_bytes_total() >> TARGET_PAGE_BITS
;
3186 ram_state_reset(*rsp
);
3191 static void ram_list_init_bitmaps(void)
3194 unsigned long pages
;
3196 /* Skip setting bitmap if there is no RAM */
3197 if (ram_bytes_total()) {
3198 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3199 pages
= block
->max_length
>> TARGET_PAGE_BITS
;
3200 block
->bmap
= bitmap_new(pages
);
3201 bitmap_set(block
->bmap
, 0, pages
);
3202 if (migrate_postcopy_ram()) {
3203 block
->unsentmap
= bitmap_new(pages
);
3204 bitmap_set(block
->unsentmap
, 0, pages
);
3210 static void ram_init_bitmaps(RAMState
*rs
)
3212 /* For memory_global_dirty_log_start below. */
3213 qemu_mutex_lock_iothread();
3214 qemu_mutex_lock_ramlist();
3217 ram_list_init_bitmaps();
3218 memory_global_dirty_log_start();
3219 migration_bitmap_sync_precopy(rs
);
3222 qemu_mutex_unlock_ramlist();
3223 qemu_mutex_unlock_iothread();
3226 static int ram_init_all(RAMState
**rsp
)
3228 if (ram_state_init(rsp
)) {
3232 if (xbzrle_init()) {
3233 ram_state_cleanup(rsp
);
3237 ram_init_bitmaps(*rsp
);
3242 static void ram_state_resume_prepare(RAMState
*rs
, QEMUFile
*out
)
3248 * Postcopy is not using xbzrle/compression, so no need for that.
3249 * Also, since source are already halted, we don't need to care
3250 * about dirty page logging as well.
3253 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3254 pages
+= bitmap_count_one(block
->bmap
,
3255 block
->used_length
>> TARGET_PAGE_BITS
);
3258 /* This may not be aligned with current bitmaps. Recalculate. */
3259 rs
->migration_dirty_pages
= pages
;
3261 rs
->last_seen_block
= NULL
;
3262 rs
->last_sent_block
= NULL
;
3264 rs
->last_version
= ram_list
.version
;
3266 * Disable the bulk stage, otherwise we'll resend the whole RAM no
3267 * matter what we have sent.
3269 rs
->ram_bulk_stage
= false;
3271 /* Update RAMState cache of output QEMUFile */
3274 trace_ram_state_resume_prepare(pages
);
3278 * This function clears bits of the free pages reported by the caller from the
3279 * migration dirty bitmap. @addr is the host address corresponding to the
3280 * start of the continuous guest free pages, and @len is the total bytes of
3283 void qemu_guest_free_page_hint(void *addr
, size_t len
)
3287 size_t used_len
, start
, npages
;
3288 MigrationState
*s
= migrate_get_current();
3290 /* This function is currently expected to be used during live migration */
3291 if (!migration_is_setup_or_active(s
->state
)) {
3295 for (; len
> 0; len
-= used_len
, addr
+= used_len
) {
3296 block
= qemu_ram_block_from_host(addr
, false, &offset
);
3297 if (unlikely(!block
|| offset
>= block
->used_length
)) {
3299 * The implementation might not support RAMBlock resize during
3300 * live migration, but it could happen in theory with future
3301 * updates. So we add a check here to capture that case.
3303 error_report_once("%s unexpected error", __func__
);
3307 if (len
<= block
->used_length
- offset
) {
3310 used_len
= block
->used_length
- offset
;
3313 start
= offset
>> TARGET_PAGE_BITS
;
3314 npages
= used_len
>> TARGET_PAGE_BITS
;
3316 qemu_mutex_lock(&ram_state
->bitmap_mutex
);
3317 ram_state
->migration_dirty_pages
-=
3318 bitmap_count_one_with_offset(block
->bmap
, start
, npages
);
3319 bitmap_clear(block
->bmap
, start
, npages
);
3320 qemu_mutex_unlock(&ram_state
->bitmap_mutex
);
3325 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
3326 * long-running RCU critical section. When rcu-reclaims in the code
3327 * start to become numerous it will be necessary to reduce the
3328 * granularity of these critical sections.
3332 * ram_save_setup: Setup RAM for migration
3334 * Returns zero to indicate success and negative for error
3336 * @f: QEMUFile where to send the data
3337 * @opaque: RAMState pointer
3339 static int ram_save_setup(QEMUFile
*f
, void *opaque
)
3341 RAMState
**rsp
= opaque
;
3344 if (compress_threads_save_setup()) {
3348 /* migration has already setup the bitmap, reuse it. */
3349 if (!migration_in_colo_state()) {
3350 if (ram_init_all(rsp
) != 0) {
3351 compress_threads_save_cleanup();
3359 qemu_put_be64(f
, ram_bytes_total_common(true) | RAM_SAVE_FLAG_MEM_SIZE
);
3361 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
3362 qemu_put_byte(f
, strlen(block
->idstr
));
3363 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, strlen(block
->idstr
));
3364 qemu_put_be64(f
, block
->used_length
);
3365 if (migrate_postcopy_ram() && block
->page_size
!= qemu_host_page_size
) {
3366 qemu_put_be64(f
, block
->page_size
);
3368 if (migrate_ignore_shared()) {
3369 qemu_put_be64(f
, block
->mr
->addr
);
3370 qemu_put_byte(f
, ramblock_is_ignored(block
) ? 1 : 0);
3376 ram_control_before_iterate(f
, RAM_CONTROL_SETUP
);
3377 ram_control_after_iterate(f
, RAM_CONTROL_SETUP
);
3379 multifd_send_sync_main();
3380 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3387 * ram_save_iterate: iterative stage for migration
3389 * Returns zero to indicate success and negative for error
3391 * @f: QEMUFile where to send the data
3392 * @opaque: RAMState pointer
3394 static int ram_save_iterate(QEMUFile
*f
, void *opaque
)
3396 RAMState
**temp
= opaque
;
3397 RAMState
*rs
= *temp
;
3403 if (blk_mig_bulk_active()) {
3404 /* Avoid transferring ram during bulk phase of block migration as
3405 * the bulk phase will usually take a long time and transferring
3406 * ram updates during that time is pointless. */
3411 if (ram_list
.version
!= rs
->last_version
) {
3412 ram_state_reset(rs
);
3415 /* Read version before ram_list.blocks */
3418 ram_control_before_iterate(f
, RAM_CONTROL_ROUND
);
3420 t0
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
3422 while ((ret
= qemu_file_rate_limit(f
)) == 0 ||
3423 !QSIMPLEQ_EMPTY(&rs
->src_page_requests
)) {
3426 if (qemu_file_get_error(f
)) {
3430 pages
= ram_find_and_save_block(rs
, false);
3431 /* no more pages to sent */
3438 qemu_file_set_error(f
, pages
);
3442 rs
->target_page_count
+= pages
;
3444 /* we want to check in the 1st loop, just in case it was the 1st time
3445 and we had to sync the dirty bitmap.
3446 qemu_clock_get_ns() is a bit expensive, so we only check each some
3449 if ((i
& 63) == 0) {
3450 uint64_t t1
= (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - t0
) / 1000000;
3451 if (t1
> MAX_WAIT
) {
3452 trace_ram_save_iterate_big_wait(t1
, i
);
3461 * Must occur before EOS (or any QEMUFile operation)
3462 * because of RDMA protocol.
3464 ram_control_after_iterate(f
, RAM_CONTROL_ROUND
);
3466 multifd_send_sync_main();
3468 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3470 ram_counters
.transferred
+= 8;
3472 ret
= qemu_file_get_error(f
);
3481 * ram_save_complete: function called to send the remaining amount of ram
3483 * Returns zero to indicate success or negative on error
3485 * Called with iothread lock
3487 * @f: QEMUFile where to send the data
3488 * @opaque: RAMState pointer
3490 static int ram_save_complete(QEMUFile
*f
, void *opaque
)
3492 RAMState
**temp
= opaque
;
3493 RAMState
*rs
= *temp
;
3498 if (!migration_in_postcopy()) {
3499 migration_bitmap_sync_precopy(rs
);
3502 ram_control_before_iterate(f
, RAM_CONTROL_FINISH
);
3504 /* try transferring iterative blocks of memory */
3506 /* flush all remaining blocks regardless of rate limiting */
3510 pages
= ram_find_and_save_block(rs
, !migration_in_colo_state());
3511 /* no more blocks to sent */
3521 flush_compressed_data(rs
);
3522 ram_control_after_iterate(f
, RAM_CONTROL_FINISH
);
3526 multifd_send_sync_main();
3527 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3533 static void ram_save_pending(QEMUFile
*f
, void *opaque
, uint64_t max_size
,
3534 uint64_t *res_precopy_only
,
3535 uint64_t *res_compatible
,
3536 uint64_t *res_postcopy_only
)
3538 RAMState
**temp
= opaque
;
3539 RAMState
*rs
= *temp
;
3540 uint64_t remaining_size
;
3542 remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
3544 if (!migration_in_postcopy() &&
3545 remaining_size
< max_size
) {
3546 qemu_mutex_lock_iothread();
3548 migration_bitmap_sync_precopy(rs
);
3550 qemu_mutex_unlock_iothread();
3551 remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
3554 if (migrate_postcopy_ram()) {
3555 /* We can do postcopy, and all the data is postcopiable */
3556 *res_compatible
+= remaining_size
;
3558 *res_precopy_only
+= remaining_size
;
3562 static int load_xbzrle(QEMUFile
*f
, ram_addr_t addr
, void *host
)
3564 unsigned int xh_len
;
3566 uint8_t *loaded_data
;
3568 /* extract RLE header */
3569 xh_flags
= qemu_get_byte(f
);
3570 xh_len
= qemu_get_be16(f
);
3572 if (xh_flags
!= ENCODING_FLAG_XBZRLE
) {
3573 error_report("Failed to load XBZRLE page - wrong compression!");
3577 if (xh_len
> TARGET_PAGE_SIZE
) {
3578 error_report("Failed to load XBZRLE page - len overflow!");
3581 loaded_data
= XBZRLE
.decoded_buf
;
3582 /* load data and decode */
3583 /* it can change loaded_data to point to an internal buffer */
3584 qemu_get_buffer_in_place(f
, &loaded_data
, xh_len
);
3587 if (xbzrle_decode_buffer(loaded_data
, xh_len
, host
,
3588 TARGET_PAGE_SIZE
) == -1) {
3589 error_report("Failed to load XBZRLE page - decode error!");
3597 * ram_block_from_stream: read a RAMBlock id from the migration stream
3599 * Must be called from within a rcu critical section.
3601 * Returns a pointer from within the RCU-protected ram_list.
3603 * @f: QEMUFile where to read the data from
3604 * @flags: Page flags (mostly to see if it's a continuation of previous block)
3606 static inline RAMBlock
*ram_block_from_stream(QEMUFile
*f
, int flags
)
3608 static RAMBlock
*block
= NULL
;
3612 if (flags
& RAM_SAVE_FLAG_CONTINUE
) {
3614 error_report("Ack, bad migration stream!");
3620 len
= qemu_get_byte(f
);
3621 qemu_get_buffer(f
, (uint8_t *)id
, len
);
3624 block
= qemu_ram_block_by_name(id
);
3626 error_report("Can't find block %s", id
);
3630 if (ramblock_is_ignored(block
)) {
3631 error_report("block %s should not be migrated !", id
);
3638 static inline void *host_from_ram_block_offset(RAMBlock
*block
,
3641 if (!offset_in_ramblock(block
, offset
)) {
3645 return block
->host
+ offset
;
3648 static inline void *colo_cache_from_block_offset(RAMBlock
*block
,
3651 if (!offset_in_ramblock(block
, offset
)) {
3654 if (!block
->colo_cache
) {
3655 error_report("%s: colo_cache is NULL in block :%s",
3656 __func__
, block
->idstr
);
3661 * During colo checkpoint, we need bitmap of these migrated pages.
3662 * It help us to decide which pages in ram cache should be flushed
3663 * into VM's RAM later.
3665 if (!test_and_set_bit(offset
>> TARGET_PAGE_BITS
, block
->bmap
)) {
3666 ram_state
->migration_dirty_pages
++;
3668 return block
->colo_cache
+ offset
;
3672 * ram_handle_compressed: handle the zero page case
3674 * If a page (or a whole RDMA chunk) has been
3675 * determined to be zero, then zap it.
3677 * @host: host address for the zero page
3678 * @ch: what the page is filled from. We only support zero
3679 * @size: size of the zero page
3681 void ram_handle_compressed(void *host
, uint8_t ch
, uint64_t size
)
3683 if (ch
!= 0 || !is_zero_range(host
, size
)) {
3684 memset(host
, ch
, size
);
3688 /* return the size after decompression, or negative value on error */
3690 qemu_uncompress_data(z_stream
*stream
, uint8_t *dest
, size_t dest_len
,
3691 const uint8_t *source
, size_t source_len
)
3695 err
= inflateReset(stream
);
3700 stream
->avail_in
= source_len
;
3701 stream
->next_in
= (uint8_t *)source
;
3702 stream
->avail_out
= dest_len
;
3703 stream
->next_out
= dest
;
3705 err
= inflate(stream
, Z_NO_FLUSH
);
3706 if (err
!= Z_STREAM_END
) {
3710 return stream
->total_out
;
3713 static void *do_data_decompress(void *opaque
)
3715 DecompressParam
*param
= opaque
;
3716 unsigned long pagesize
;
3720 qemu_mutex_lock(¶m
->mutex
);
3721 while (!param
->quit
) {
3726 qemu_mutex_unlock(¶m
->mutex
);
3728 pagesize
= TARGET_PAGE_SIZE
;
3730 ret
= qemu_uncompress_data(¶m
->stream
, des
, pagesize
,
3731 param
->compbuf
, len
);
3732 if (ret
< 0 && migrate_get_current()->decompress_error_check
) {
3733 error_report("decompress data failed");
3734 qemu_file_set_error(decomp_file
, ret
);
3737 qemu_mutex_lock(&decomp_done_lock
);
3739 qemu_cond_signal(&decomp_done_cond
);
3740 qemu_mutex_unlock(&decomp_done_lock
);
3742 qemu_mutex_lock(¶m
->mutex
);
3744 qemu_cond_wait(¶m
->cond
, ¶m
->mutex
);
3747 qemu_mutex_unlock(¶m
->mutex
);
3752 static int wait_for_decompress_done(void)
3754 int idx
, thread_count
;
3756 if (!migrate_use_compression()) {
3760 thread_count
= migrate_decompress_threads();
3761 qemu_mutex_lock(&decomp_done_lock
);
3762 for (idx
= 0; idx
< thread_count
; idx
++) {
3763 while (!decomp_param
[idx
].done
) {
3764 qemu_cond_wait(&decomp_done_cond
, &decomp_done_lock
);
3767 qemu_mutex_unlock(&decomp_done_lock
);
3768 return qemu_file_get_error(decomp_file
);
3771 static void compress_threads_load_cleanup(void)
3773 int i
, thread_count
;
3775 if (!migrate_use_compression()) {
3778 thread_count
= migrate_decompress_threads();
3779 for (i
= 0; i
< thread_count
; i
++) {
3781 * we use it as a indicator which shows if the thread is
3782 * properly init'd or not
3784 if (!decomp_param
[i
].compbuf
) {
3788 qemu_mutex_lock(&decomp_param
[i
].mutex
);
3789 decomp_param
[i
].quit
= true;
3790 qemu_cond_signal(&decomp_param
[i
].cond
);
3791 qemu_mutex_unlock(&decomp_param
[i
].mutex
);
3793 for (i
= 0; i
< thread_count
; i
++) {
3794 if (!decomp_param
[i
].compbuf
) {
3798 qemu_thread_join(decompress_threads
+ i
);
3799 qemu_mutex_destroy(&decomp_param
[i
].mutex
);
3800 qemu_cond_destroy(&decomp_param
[i
].cond
);
3801 inflateEnd(&decomp_param
[i
].stream
);
3802 g_free(decomp_param
[i
].compbuf
);
3803 decomp_param
[i
].compbuf
= NULL
;
3805 g_free(decompress_threads
);
3806 g_free(decomp_param
);
3807 decompress_threads
= NULL
;
3808 decomp_param
= NULL
;
3812 static int compress_threads_load_setup(QEMUFile
*f
)
3814 int i
, thread_count
;
3816 if (!migrate_use_compression()) {
3820 thread_count
= migrate_decompress_threads();
3821 decompress_threads
= g_new0(QemuThread
, thread_count
);
3822 decomp_param
= g_new0(DecompressParam
, thread_count
);
3823 qemu_mutex_init(&decomp_done_lock
);
3824 qemu_cond_init(&decomp_done_cond
);
3826 for (i
= 0; i
< thread_count
; i
++) {
3827 if (inflateInit(&decomp_param
[i
].stream
) != Z_OK
) {
3831 decomp_param
[i
].compbuf
= g_malloc0(compressBound(TARGET_PAGE_SIZE
));
3832 qemu_mutex_init(&decomp_param
[i
].mutex
);
3833 qemu_cond_init(&decomp_param
[i
].cond
);
3834 decomp_param
[i
].done
= true;
3835 decomp_param
[i
].quit
= false;
3836 qemu_thread_create(decompress_threads
+ i
, "decompress",
3837 do_data_decompress
, decomp_param
+ i
,
3838 QEMU_THREAD_JOINABLE
);
3842 compress_threads_load_cleanup();
3846 static void decompress_data_with_multi_threads(QEMUFile
*f
,
3847 void *host
, int len
)
3849 int idx
, thread_count
;
3851 thread_count
= migrate_decompress_threads();
3852 qemu_mutex_lock(&decomp_done_lock
);
3854 for (idx
= 0; idx
< thread_count
; idx
++) {
3855 if (decomp_param
[idx
].done
) {
3856 decomp_param
[idx
].done
= false;
3857 qemu_mutex_lock(&decomp_param
[idx
].mutex
);
3858 qemu_get_buffer(f
, decomp_param
[idx
].compbuf
, len
);
3859 decomp_param
[idx
].des
= host
;
3860 decomp_param
[idx
].len
= len
;
3861 qemu_cond_signal(&decomp_param
[idx
].cond
);
3862 qemu_mutex_unlock(&decomp_param
[idx
].mutex
);
3866 if (idx
< thread_count
) {
3869 qemu_cond_wait(&decomp_done_cond
, &decomp_done_lock
);
3872 qemu_mutex_unlock(&decomp_done_lock
);
3876 * colo cache: this is for secondary VM, we cache the whole
3877 * memory of the secondary VM, it is need to hold the global lock
3878 * to call this helper.
3880 int colo_init_ram_cache(void)
3885 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3886 block
->colo_cache
= qemu_anon_ram_alloc(block
->used_length
,
3889 if (!block
->colo_cache
) {
3890 error_report("%s: Can't alloc memory for COLO cache of block %s,"
3891 "size 0x" RAM_ADDR_FMT
, __func__
, block
->idstr
,
3892 block
->used_length
);
3895 memcpy(block
->colo_cache
, block
->host
, block
->used_length
);
3899 * Record the dirty pages that sent by PVM, we use this dirty bitmap together
3900 * with to decide which page in cache should be flushed into SVM's RAM. Here
3901 * we use the same name 'ram_bitmap' as for migration.
3903 if (ram_bytes_total()) {
3906 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3907 unsigned long pages
= block
->max_length
>> TARGET_PAGE_BITS
;
3909 block
->bmap
= bitmap_new(pages
);
3910 bitmap_set(block
->bmap
, 0, pages
);
3913 ram_state
= g_new0(RAMState
, 1);
3914 ram_state
->migration_dirty_pages
= 0;
3915 qemu_mutex_init(&ram_state
->bitmap_mutex
);
3916 memory_global_dirty_log_start();
3922 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3923 if (block
->colo_cache
) {
3924 qemu_anon_ram_free(block
->colo_cache
, block
->used_length
);
3925 block
->colo_cache
= NULL
;
3933 /* It is need to hold the global lock to call this helper */
3934 void colo_release_ram_cache(void)
3938 memory_global_dirty_log_stop();
3939 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3940 g_free(block
->bmap
);
3946 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3947 if (block
->colo_cache
) {
3948 qemu_anon_ram_free(block
->colo_cache
, block
->used_length
);
3949 block
->colo_cache
= NULL
;
3954 qemu_mutex_destroy(&ram_state
->bitmap_mutex
);
3960 * ram_load_setup: Setup RAM for migration incoming side
3962 * Returns zero to indicate success and negative for error
3964 * @f: QEMUFile where to receive the data
3965 * @opaque: RAMState pointer
3967 static int ram_load_setup(QEMUFile
*f
, void *opaque
)
3969 if (compress_threads_load_setup(f
)) {
3973 xbzrle_load_setup();
3974 ramblock_recv_map_init();
3979 static int ram_load_cleanup(void *opaque
)
3983 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
3984 if (ramblock_is_pmem(rb
)) {
3985 pmem_persist(rb
->host
, rb
->used_length
);
3989 xbzrle_load_cleanup();
3990 compress_threads_load_cleanup();
3992 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
3993 g_free(rb
->receivedmap
);
3994 rb
->receivedmap
= NULL
;
4001 * ram_postcopy_incoming_init: allocate postcopy data structures
4003 * Returns 0 for success and negative if there was one error
4005 * @mis: current migration incoming state
4007 * Allocate data structures etc needed by incoming migration with
4008 * postcopy-ram. postcopy-ram's similarly names
4009 * postcopy_ram_incoming_init does the work.
4011 int ram_postcopy_incoming_init(MigrationIncomingState
*mis
)
4013 return postcopy_ram_incoming_init(mis
);
4017 * ram_load_postcopy: load a page in postcopy case
4019 * Returns 0 for success or -errno in case of error
4021 * Called in postcopy mode by ram_load().
4022 * rcu_read_lock is taken prior to this being called.
4024 * @f: QEMUFile where to send the data
4026 static int ram_load_postcopy(QEMUFile
*f
)
4028 int flags
= 0, ret
= 0;
4029 bool place_needed
= false;
4030 bool matches_target_page_size
= false;
4031 MigrationIncomingState
*mis
= migration_incoming_get_current();
4032 /* Temporary page that is later 'placed' */
4033 void *postcopy_host_page
= postcopy_get_tmp_page(mis
);
4034 void *last_host
= NULL
;
4035 bool all_zero
= false;
4037 while (!ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
4040 void *page_buffer
= NULL
;
4041 void *place_source
= NULL
;
4042 RAMBlock
*block
= NULL
;
4045 addr
= qemu_get_be64(f
);
4048 * If qemu file error, we should stop here, and then "addr"
4051 ret
= qemu_file_get_error(f
);
4056 flags
= addr
& ~TARGET_PAGE_MASK
;
4057 addr
&= TARGET_PAGE_MASK
;
4059 trace_ram_load_postcopy_loop((uint64_t)addr
, flags
);
4060 place_needed
= false;
4061 if (flags
& (RAM_SAVE_FLAG_ZERO
| RAM_SAVE_FLAG_PAGE
)) {
4062 block
= ram_block_from_stream(f
, flags
);
4064 host
= host_from_ram_block_offset(block
, addr
);
4066 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
4070 matches_target_page_size
= block
->page_size
== TARGET_PAGE_SIZE
;
4072 * Postcopy requires that we place whole host pages atomically;
4073 * these may be huge pages for RAMBlocks that are backed by
4075 * To make it atomic, the data is read into a temporary page
4076 * that's moved into place later.
4077 * The migration protocol uses, possibly smaller, target-pages
4078 * however the source ensures it always sends all the components
4079 * of a host page in order.
4081 page_buffer
= postcopy_host_page
+
4082 ((uintptr_t)host
& (block
->page_size
- 1));
4083 /* If all TP are zero then we can optimise the place */
4084 if (!((uintptr_t)host
& (block
->page_size
- 1))) {
4087 /* not the 1st TP within the HP */
4088 if (host
!= (last_host
+ TARGET_PAGE_SIZE
)) {
4089 error_report("Non-sequential target page %p/%p",
4098 * If it's the last part of a host page then we place the host
4101 place_needed
= (((uintptr_t)host
+ TARGET_PAGE_SIZE
) &
4102 (block
->page_size
- 1)) == 0;
4103 place_source
= postcopy_host_page
;
4107 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
4108 case RAM_SAVE_FLAG_ZERO
:
4109 ch
= qemu_get_byte(f
);
4110 memset(page_buffer
, ch
, TARGET_PAGE_SIZE
);
4116 case RAM_SAVE_FLAG_PAGE
:
4118 if (!matches_target_page_size
) {
4119 /* For huge pages, we always use temporary buffer */
4120 qemu_get_buffer(f
, page_buffer
, TARGET_PAGE_SIZE
);
4123 * For small pages that matches target page size, we
4124 * avoid the qemu_file copy. Instead we directly use
4125 * the buffer of QEMUFile to place the page. Note: we
4126 * cannot do any QEMUFile operation before using that
4127 * buffer to make sure the buffer is valid when
4130 qemu_get_buffer_in_place(f
, (uint8_t **)&place_source
,
4134 case RAM_SAVE_FLAG_EOS
:
4136 multifd_recv_sync_main();
4139 error_report("Unknown combination of migration flags: %#x"
4140 " (postcopy mode)", flags
);
4145 /* Detect for any possible file errors */
4146 if (!ret
&& qemu_file_get_error(f
)) {
4147 ret
= qemu_file_get_error(f
);
4150 if (!ret
&& place_needed
) {
4151 /* This gets called at the last target page in the host page */
4152 void *place_dest
= host
+ TARGET_PAGE_SIZE
- block
->page_size
;
4155 ret
= postcopy_place_page_zero(mis
, place_dest
,
4158 ret
= postcopy_place_page(mis
, place_dest
,
4159 place_source
, block
);
4167 static bool postcopy_is_advised(void)
4169 PostcopyState ps
= postcopy_state_get();
4170 return ps
>= POSTCOPY_INCOMING_ADVISE
&& ps
< POSTCOPY_INCOMING_END
;
4173 static bool postcopy_is_running(void)
4175 PostcopyState ps
= postcopy_state_get();
4176 return ps
>= POSTCOPY_INCOMING_LISTENING
&& ps
< POSTCOPY_INCOMING_END
;
4180 * Flush content of RAM cache into SVM's memory.
4181 * Only flush the pages that be dirtied by PVM or SVM or both.
4183 static void colo_flush_ram_cache(void)
4185 RAMBlock
*block
= NULL
;
4188 unsigned long offset
= 0;
4190 memory_global_dirty_log_sync();
4192 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
4193 migration_bitmap_sync_range(ram_state
, block
, block
->used_length
);
4197 trace_colo_flush_ram_cache_begin(ram_state
->migration_dirty_pages
);
4199 block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
4202 offset
= migration_bitmap_find_dirty(ram_state
, block
, offset
);
4204 if (offset
<< TARGET_PAGE_BITS
>= block
->used_length
) {
4206 block
= QLIST_NEXT_RCU(block
, next
);
4208 migration_bitmap_clear_dirty(ram_state
, block
, offset
);
4209 dst_host
= block
->host
+ (offset
<< TARGET_PAGE_BITS
);
4210 src_host
= block
->colo_cache
+ (offset
<< TARGET_PAGE_BITS
);
4211 memcpy(dst_host
, src_host
, TARGET_PAGE_SIZE
);
4216 trace_colo_flush_ram_cache_end();
4219 static int ram_load(QEMUFile
*f
, void *opaque
, int version_id
)
4221 int flags
= 0, ret
= 0, invalid_flags
= 0;
4222 static uint64_t seq_iter
;
4225 * If system is running in postcopy mode, page inserts to host memory must
4228 bool postcopy_running
= postcopy_is_running();
4229 /* ADVISE is earlier, it shows the source has the postcopy capability on */
4230 bool postcopy_advised
= postcopy_is_advised();
4234 if (version_id
!= 4) {
4238 if (!migrate_use_compression()) {
4239 invalid_flags
|= RAM_SAVE_FLAG_COMPRESS_PAGE
;
4241 /* This RCU critical section can be very long running.
4242 * When RCU reclaims in the code start to become numerous,
4243 * it will be necessary to reduce the granularity of this
4248 if (postcopy_running
) {
4249 ret
= ram_load_postcopy(f
);
4252 while (!postcopy_running
&& !ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
4253 ram_addr_t addr
, total_ram_bytes
;
4257 addr
= qemu_get_be64(f
);
4258 flags
= addr
& ~TARGET_PAGE_MASK
;
4259 addr
&= TARGET_PAGE_MASK
;
4261 if (flags
& invalid_flags
) {
4262 if (flags
& invalid_flags
& RAM_SAVE_FLAG_COMPRESS_PAGE
) {
4263 error_report("Received an unexpected compressed page");
4270 if (flags
& (RAM_SAVE_FLAG_ZERO
| RAM_SAVE_FLAG_PAGE
|
4271 RAM_SAVE_FLAG_COMPRESS_PAGE
| RAM_SAVE_FLAG_XBZRLE
)) {
4272 RAMBlock
*block
= ram_block_from_stream(f
, flags
);
4275 * After going into COLO, we should load the Page into colo_cache.
4277 if (migration_incoming_in_colo_state()) {
4278 host
= colo_cache_from_block_offset(block
, addr
);
4280 host
= host_from_ram_block_offset(block
, addr
);
4283 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
4288 if (!migration_incoming_in_colo_state()) {
4289 ramblock_recv_bitmap_set(block
, host
);
4292 trace_ram_load_loop(block
->idstr
, (uint64_t)addr
, flags
, host
);
4295 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
4296 case RAM_SAVE_FLAG_MEM_SIZE
:
4297 /* Synchronize RAM block list */
4298 total_ram_bytes
= addr
;
4299 while (!ret
&& total_ram_bytes
) {
4304 len
= qemu_get_byte(f
);
4305 qemu_get_buffer(f
, (uint8_t *)id
, len
);
4307 length
= qemu_get_be64(f
);
4309 block
= qemu_ram_block_by_name(id
);
4310 if (block
&& !qemu_ram_is_migratable(block
)) {
4311 error_report("block %s should not be migrated !", id
);
4314 if (length
!= block
->used_length
) {
4315 Error
*local_err
= NULL
;
4317 ret
= qemu_ram_resize(block
, length
,
4320 error_report_err(local_err
);
4323 /* For postcopy we need to check hugepage sizes match */
4324 if (postcopy_advised
&&
4325 block
->page_size
!= qemu_host_page_size
) {
4326 uint64_t remote_page_size
= qemu_get_be64(f
);
4327 if (remote_page_size
!= block
->page_size
) {
4328 error_report("Mismatched RAM page size %s "
4329 "(local) %zd != %" PRId64
,
4330 id
, block
->page_size
,
4335 if (migrate_ignore_shared()) {
4336 hwaddr addr
= qemu_get_be64(f
);
4337 bool ignored
= qemu_get_byte(f
);
4338 if (ignored
!= ramblock_is_ignored(block
)) {
4339 error_report("RAM block %s should %s be migrated",
4340 id
, ignored
? "" : "not");
4343 if (ramblock_is_ignored(block
) &&
4344 block
->mr
->addr
!= addr
) {
4345 error_report("Mismatched GPAs for block %s "
4346 "%" PRId64
"!= %" PRId64
,
4348 (uint64_t)block
->mr
->addr
);
4352 ram_control_load_hook(f
, RAM_CONTROL_BLOCK_REG
,
4355 error_report("Unknown ramblock \"%s\", cannot "
4356 "accept migration", id
);
4360 total_ram_bytes
-= length
;
4364 case RAM_SAVE_FLAG_ZERO
:
4365 ch
= qemu_get_byte(f
);
4366 ram_handle_compressed(host
, ch
, TARGET_PAGE_SIZE
);
4369 case RAM_SAVE_FLAG_PAGE
:
4370 qemu_get_buffer(f
, host
, TARGET_PAGE_SIZE
);
4373 case RAM_SAVE_FLAG_COMPRESS_PAGE
:
4374 len
= qemu_get_be32(f
);
4375 if (len
< 0 || len
> compressBound(TARGET_PAGE_SIZE
)) {
4376 error_report("Invalid compressed data length: %d", len
);
4380 decompress_data_with_multi_threads(f
, host
, len
);
4383 case RAM_SAVE_FLAG_XBZRLE
:
4384 if (load_xbzrle(f
, addr
, host
) < 0) {
4385 error_report("Failed to decompress XBZRLE page at "
4386 RAM_ADDR_FMT
, addr
);
4391 case RAM_SAVE_FLAG_EOS
:
4393 multifd_recv_sync_main();
4396 if (flags
& RAM_SAVE_FLAG_HOOK
) {
4397 ram_control_load_hook(f
, RAM_CONTROL_HOOK
, NULL
);
4399 error_report("Unknown combination of migration flags: %#x",
4405 ret
= qemu_file_get_error(f
);
4409 ret
|= wait_for_decompress_done();
4411 trace_ram_load_complete(ret
, seq_iter
);
4413 if (!ret
&& migration_incoming_in_colo_state()) {
4414 colo_flush_ram_cache();
4419 static bool ram_has_postcopy(void *opaque
)
4422 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
4423 if (ramblock_is_pmem(rb
)) {
4424 info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4425 "is not supported now!", rb
->idstr
, rb
->host
);
4430 return migrate_postcopy_ram();
4433 /* Sync all the dirty bitmap with destination VM. */
4434 static int ram_dirty_bitmap_sync_all(MigrationState
*s
, RAMState
*rs
)
4437 QEMUFile
*file
= s
->to_dst_file
;
4438 int ramblock_count
= 0;
4440 trace_ram_dirty_bitmap_sync_start();
4442 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
4443 qemu_savevm_send_recv_bitmap(file
, block
->idstr
);
4444 trace_ram_dirty_bitmap_request(block
->idstr
);
4448 trace_ram_dirty_bitmap_sync_wait();
4450 /* Wait until all the ramblocks' dirty bitmap synced */
4451 while (ramblock_count
--) {
4452 qemu_sem_wait(&s
->rp_state
.rp_sem
);
4455 trace_ram_dirty_bitmap_sync_complete();
4460 static void ram_dirty_bitmap_reload_notify(MigrationState
*s
)
4462 qemu_sem_post(&s
->rp_state
.rp_sem
);
4466 * Read the received bitmap, revert it as the initial dirty bitmap.
4467 * This is only used when the postcopy migration is paused but wants
4468 * to resume from a middle point.
4470 int ram_dirty_bitmap_reload(MigrationState
*s
, RAMBlock
*block
)
4473 QEMUFile
*file
= s
->rp_state
.from_dst_file
;
4474 unsigned long *le_bitmap
, nbits
= block
->used_length
>> TARGET_PAGE_BITS
;
4475 uint64_t local_size
= DIV_ROUND_UP(nbits
, 8);
4476 uint64_t size
, end_mark
;
4478 trace_ram_dirty_bitmap_reload_begin(block
->idstr
);
4480 if (s
->state
!= MIGRATION_STATUS_POSTCOPY_RECOVER
) {
4481 error_report("%s: incorrect state %s", __func__
,
4482 MigrationStatus_str(s
->state
));
4487 * Note: see comments in ramblock_recv_bitmap_send() on why we
4488 * need the endianess convertion, and the paddings.
4490 local_size
= ROUND_UP(local_size
, 8);
4493 le_bitmap
= bitmap_new(nbits
+ BITS_PER_LONG
);
4495 size
= qemu_get_be64(file
);
4497 /* The size of the bitmap should match with our ramblock */
4498 if (size
!= local_size
) {
4499 error_report("%s: ramblock '%s' bitmap size mismatch "
4500 "(0x%"PRIx64
" != 0x%"PRIx64
")", __func__
,
4501 block
->idstr
, size
, local_size
);
4506 size
= qemu_get_buffer(file
, (uint8_t *)le_bitmap
, local_size
);
4507 end_mark
= qemu_get_be64(file
);
4509 ret
= qemu_file_get_error(file
);
4510 if (ret
|| size
!= local_size
) {
4511 error_report("%s: read bitmap failed for ramblock '%s': %d"
4512 " (size 0x%"PRIx64
", got: 0x%"PRIx64
")",
4513 __func__
, block
->idstr
, ret
, local_size
, size
);
4518 if (end_mark
!= RAMBLOCK_RECV_BITMAP_ENDING
) {
4519 error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIu64
,
4520 __func__
, block
->idstr
, end_mark
);
4526 * Endianess convertion. We are during postcopy (though paused).
4527 * The dirty bitmap won't change. We can directly modify it.
4529 bitmap_from_le(block
->bmap
, le_bitmap
, nbits
);
4532 * What we received is "received bitmap". Revert it as the initial
4533 * dirty bitmap for this ramblock.
4535 bitmap_complement(block
->bmap
, block
->bmap
, nbits
);
4537 trace_ram_dirty_bitmap_reload_complete(block
->idstr
);
4540 * We succeeded to sync bitmap for current ramblock. If this is
4541 * the last one to sync, we need to notify the main send thread.
4543 ram_dirty_bitmap_reload_notify(s
);
4551 static int ram_resume_prepare(MigrationState
*s
, void *opaque
)
4553 RAMState
*rs
= *(RAMState
**)opaque
;
4556 ret
= ram_dirty_bitmap_sync_all(s
, rs
);
4561 ram_state_resume_prepare(rs
, s
->to_dst_file
);
4566 static SaveVMHandlers savevm_ram_handlers
= {
4567 .save_setup
= ram_save_setup
,
4568 .save_live_iterate
= ram_save_iterate
,
4569 .save_live_complete_postcopy
= ram_save_complete
,
4570 .save_live_complete_precopy
= ram_save_complete
,
4571 .has_postcopy
= ram_has_postcopy
,
4572 .save_live_pending
= ram_save_pending
,
4573 .load_state
= ram_load
,
4574 .save_cleanup
= ram_save_cleanup
,
4575 .load_setup
= ram_load_setup
,
4576 .load_cleanup
= ram_load_cleanup
,
4577 .resume_prepare
= ram_resume_prepare
,
4580 void ram_mig_init(void)
4582 qemu_mutex_init(&XBZRLE
.lock
);
4583 register_savevm_live(NULL
, "ram", 0, 4, &savevm_ram_handlers
, &ram_state
);