4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2011-2015 Red Hat Inc
8 * Juan Quintela <quintela@redhat.com>
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 #include "qemu/osdep.h"
32 #include "qemu/cutils.h"
33 #include "qemu/bitops.h"
34 #include "qemu/bitmap.h"
35 #include "qemu/main-loop.h"
36 #include "qemu/pmem.h"
39 #include "migration.h"
41 #include "migration/register.h"
42 #include "migration/misc.h"
43 #include "qemu-file.h"
44 #include "postcopy-ram.h"
45 #include "page_cache.h"
46 #include "qemu/error-report.h"
47 #include "qapi/error.h"
48 #include "qapi/qapi-events-migration.h"
49 #include "qapi/qmp/qerror.h"
51 #include "exec/ram_addr.h"
52 #include "exec/target_page.h"
53 #include "qemu/rcu_queue.h"
54 #include "migration/colo.h"
56 #include "sysemu/sysemu.h"
57 #include "qemu/uuid.h"
61 /***********************************************************/
62 /* ram save/restore */
64 /* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
65 * worked for pages that where filled with the same char. We switched
66 * it to only search for the zero value. And to avoid confusion with
67 * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
70 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
71 #define RAM_SAVE_FLAG_ZERO 0x02
72 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
73 #define RAM_SAVE_FLAG_PAGE 0x08
74 #define RAM_SAVE_FLAG_EOS 0x10
75 #define RAM_SAVE_FLAG_CONTINUE 0x20
76 #define RAM_SAVE_FLAG_XBZRLE 0x40
77 /* 0x80 is reserved in migration.h start with 0x100 next */
78 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
80 static inline bool is_zero_range(uint8_t *p
, uint64_t size
)
82 return buffer_is_zero(p
, size
);
85 XBZRLECacheStats xbzrle_counters
;
87 /* struct contains XBZRLE cache and a static page
88 used by the compression */
90 /* buffer used for XBZRLE encoding */
92 /* buffer for storing page content */
94 /* Cache for XBZRLE, Protected by lock. */
97 /* it will store a page full of zeros */
98 uint8_t *zero_target_page
;
99 /* buffer used for XBZRLE decoding */
100 uint8_t *decoded_buf
;
103 static void XBZRLE_cache_lock(void)
105 if (migrate_use_xbzrle())
106 qemu_mutex_lock(&XBZRLE
.lock
);
109 static void XBZRLE_cache_unlock(void)
111 if (migrate_use_xbzrle())
112 qemu_mutex_unlock(&XBZRLE
.lock
);
116 * xbzrle_cache_resize: resize the xbzrle cache
118 * This function is called from qmp_migrate_set_cache_size in main
119 * thread, possibly while a migration is in progress. A running
120 * migration may be using the cache and might finish during this call,
121 * hence changes to the cache are protected by XBZRLE.lock().
123 * Returns 0 for success or -1 for error
125 * @new_size: new cache size
126 * @errp: set *errp if the check failed, with reason
128 int xbzrle_cache_resize(int64_t new_size
, Error
**errp
)
130 PageCache
*new_cache
;
133 /* Check for truncation */
134 if (new_size
!= (size_t)new_size
) {
135 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, "cache size",
136 "exceeding address space");
140 if (new_size
== migrate_xbzrle_cache_size()) {
147 if (XBZRLE
.cache
!= NULL
) {
148 new_cache
= cache_init(new_size
, TARGET_PAGE_SIZE
, errp
);
154 cache_fini(XBZRLE
.cache
);
155 XBZRLE
.cache
= new_cache
;
158 XBZRLE_cache_unlock();
162 /* Should be holding either ram_list.mutex, or the RCU lock. */
163 #define RAMBLOCK_FOREACH_MIGRATABLE(block) \
164 INTERNAL_RAMBLOCK_FOREACH(block) \
165 if (!qemu_ram_is_migratable(block)) {} else
167 #undef RAMBLOCK_FOREACH
169 static void ramblock_recv_map_init(void)
173 RAMBLOCK_FOREACH_MIGRATABLE(rb
) {
174 assert(!rb
->receivedmap
);
175 rb
->receivedmap
= bitmap_new(rb
->max_length
>> qemu_target_page_bits());
179 int ramblock_recv_bitmap_test(RAMBlock
*rb
, void *host_addr
)
181 return test_bit(ramblock_recv_bitmap_offset(host_addr
, rb
),
185 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock
*rb
, uint64_t byte_offset
)
187 return test_bit(byte_offset
>> TARGET_PAGE_BITS
, rb
->receivedmap
);
190 void ramblock_recv_bitmap_set(RAMBlock
*rb
, void *host_addr
)
192 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr
, rb
), rb
->receivedmap
);
195 void ramblock_recv_bitmap_set_range(RAMBlock
*rb
, void *host_addr
,
198 bitmap_set_atomic(rb
->receivedmap
,
199 ramblock_recv_bitmap_offset(host_addr
, rb
),
203 #define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
206 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
208 * Returns >0 if success with sent bytes, or <0 if error.
210 int64_t ramblock_recv_bitmap_send(QEMUFile
*file
,
211 const char *block_name
)
213 RAMBlock
*block
= qemu_ram_block_by_name(block_name
);
214 unsigned long *le_bitmap
, nbits
;
218 error_report("%s: invalid block name: %s", __func__
, block_name
);
222 nbits
= block
->used_length
>> TARGET_PAGE_BITS
;
225 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
226 * machines we may need 4 more bytes for padding (see below
227 * comment). So extend it a bit before hand.
229 le_bitmap
= bitmap_new(nbits
+ BITS_PER_LONG
);
232 * Always use little endian when sending the bitmap. This is
233 * required that when source and destination VMs are not using the
234 * same endianess. (Note: big endian won't work.)
236 bitmap_to_le(le_bitmap
, block
->receivedmap
, nbits
);
238 /* Size of the bitmap, in bytes */
239 size
= DIV_ROUND_UP(nbits
, 8);
242 * size is always aligned to 8 bytes for 64bit machines, but it
243 * may not be true for 32bit machines. We need this padding to
244 * make sure the migration can survive even between 32bit and
247 size
= ROUND_UP(size
, 8);
249 qemu_put_be64(file
, size
);
250 qemu_put_buffer(file
, (const uint8_t *)le_bitmap
, size
);
252 * Mark as an end, in case the middle part is screwed up due to
253 * some "misterious" reason.
255 qemu_put_be64(file
, RAMBLOCK_RECV_BITMAP_ENDING
);
260 if (qemu_file_get_error(file
)) {
261 return qemu_file_get_error(file
);
264 return size
+ sizeof(size
);
268 * An outstanding page request, on the source, having been received
271 struct RAMSrcPageRequest
{
276 QSIMPLEQ_ENTRY(RAMSrcPageRequest
) next_req
;
279 /* State of RAM for migration */
281 /* QEMUFile used for this migration */
283 /* Last block that we have visited searching for dirty pages */
284 RAMBlock
*last_seen_block
;
285 /* Last block from where we have sent data */
286 RAMBlock
*last_sent_block
;
287 /* Last dirty target page we have sent */
288 ram_addr_t last_page
;
289 /* last ram version we have seen */
290 uint32_t last_version
;
291 /* We are in the first round */
293 /* How many times we have dirty too many pages */
294 int dirty_rate_high_cnt
;
295 /* these variables are used for bitmap sync */
296 /* last time we did a full bitmap_sync */
297 int64_t time_last_bitmap_sync
;
298 /* bytes transferred at start_time */
299 uint64_t bytes_xfer_prev
;
300 /* number of dirty pages since start_time */
301 uint64_t num_dirty_pages_period
;
302 /* xbzrle misses since the beginning of the period */
303 uint64_t xbzrle_cache_miss_prev
;
305 /* compression statistics since the beginning of the period */
306 /* amount of count that no free thread to compress data */
307 uint64_t compress_thread_busy_prev
;
308 /* amount bytes after compression */
309 uint64_t compressed_size_prev
;
310 /* amount of compressed pages */
311 uint64_t compress_pages_prev
;
313 /* total handled target pages at the beginning of period */
314 uint64_t target_page_count_prev
;
315 /* total handled target pages since start */
316 uint64_t target_page_count
;
317 /* number of dirty bits in the bitmap */
318 uint64_t migration_dirty_pages
;
319 /* protects modification of the bitmap */
320 QemuMutex bitmap_mutex
;
321 /* The RAMBlock used in the last src_page_requests */
322 RAMBlock
*last_req_rb
;
323 /* Queue of outstanding page requests from the destination */
324 QemuMutex src_page_req_mutex
;
325 QSIMPLEQ_HEAD(, RAMSrcPageRequest
) src_page_requests
;
327 typedef struct RAMState RAMState
;
329 static RAMState
*ram_state
;
331 uint64_t ram_bytes_remaining(void)
333 return ram_state
? (ram_state
->migration_dirty_pages
* TARGET_PAGE_SIZE
) :
337 MigrationStats ram_counters
;
339 /* used by the search for pages to send */
340 struct PageSearchStatus
{
341 /* Current block being searched */
343 /* Current page to search from */
345 /* Set once we wrap around */
348 typedef struct PageSearchStatus PageSearchStatus
;
350 CompressionStats compression_counters
;
352 struct CompressParam
{
362 /* internally used fields */
366 typedef struct CompressParam CompressParam
;
368 struct DecompressParam
{
378 typedef struct DecompressParam DecompressParam
;
380 static CompressParam
*comp_param
;
381 static QemuThread
*compress_threads
;
382 /* comp_done_cond is used to wake up the migration thread when
383 * one of the compression threads has finished the compression.
384 * comp_done_lock is used to co-work with comp_done_cond.
386 static QemuMutex comp_done_lock
;
387 static QemuCond comp_done_cond
;
388 /* The empty QEMUFileOps will be used by file in CompressParam */
389 static const QEMUFileOps empty_ops
= { };
391 static QEMUFile
*decomp_file
;
392 static DecompressParam
*decomp_param
;
393 static QemuThread
*decompress_threads
;
394 static QemuMutex decomp_done_lock
;
395 static QemuCond decomp_done_cond
;
397 static bool do_compress_ram_page(QEMUFile
*f
, z_stream
*stream
, RAMBlock
*block
,
398 ram_addr_t offset
, uint8_t *source_buf
);
400 static void *do_data_compress(void *opaque
)
402 CompressParam
*param
= opaque
;
407 qemu_mutex_lock(¶m
->mutex
);
408 while (!param
->quit
) {
410 block
= param
->block
;
411 offset
= param
->offset
;
413 qemu_mutex_unlock(¶m
->mutex
);
415 zero_page
= do_compress_ram_page(param
->file
, ¶m
->stream
,
416 block
, offset
, param
->originbuf
);
418 qemu_mutex_lock(&comp_done_lock
);
420 param
->zero_page
= zero_page
;
421 qemu_cond_signal(&comp_done_cond
);
422 qemu_mutex_unlock(&comp_done_lock
);
424 qemu_mutex_lock(¶m
->mutex
);
426 qemu_cond_wait(¶m
->cond
, ¶m
->mutex
);
429 qemu_mutex_unlock(¶m
->mutex
);
434 static void compress_threads_save_cleanup(void)
438 if (!migrate_use_compression() || !comp_param
) {
442 thread_count
= migrate_compress_threads();
443 for (i
= 0; i
< thread_count
; i
++) {
445 * we use it as a indicator which shows if the thread is
446 * properly init'd or not
448 if (!comp_param
[i
].file
) {
452 qemu_mutex_lock(&comp_param
[i
].mutex
);
453 comp_param
[i
].quit
= true;
454 qemu_cond_signal(&comp_param
[i
].cond
);
455 qemu_mutex_unlock(&comp_param
[i
].mutex
);
457 qemu_thread_join(compress_threads
+ i
);
458 qemu_mutex_destroy(&comp_param
[i
].mutex
);
459 qemu_cond_destroy(&comp_param
[i
].cond
);
460 deflateEnd(&comp_param
[i
].stream
);
461 g_free(comp_param
[i
].originbuf
);
462 qemu_fclose(comp_param
[i
].file
);
463 comp_param
[i
].file
= NULL
;
465 qemu_mutex_destroy(&comp_done_lock
);
466 qemu_cond_destroy(&comp_done_cond
);
467 g_free(compress_threads
);
469 compress_threads
= NULL
;
473 static int compress_threads_save_setup(void)
477 if (!migrate_use_compression()) {
480 thread_count
= migrate_compress_threads();
481 compress_threads
= g_new0(QemuThread
, thread_count
);
482 comp_param
= g_new0(CompressParam
, thread_count
);
483 qemu_cond_init(&comp_done_cond
);
484 qemu_mutex_init(&comp_done_lock
);
485 for (i
= 0; i
< thread_count
; i
++) {
486 comp_param
[i
].originbuf
= g_try_malloc(TARGET_PAGE_SIZE
);
487 if (!comp_param
[i
].originbuf
) {
491 if (deflateInit(&comp_param
[i
].stream
,
492 migrate_compress_level()) != Z_OK
) {
493 g_free(comp_param
[i
].originbuf
);
497 /* comp_param[i].file is just used as a dummy buffer to save data,
498 * set its ops to empty.
500 comp_param
[i
].file
= qemu_fopen_ops(NULL
, &empty_ops
);
501 comp_param
[i
].done
= true;
502 comp_param
[i
].quit
= false;
503 qemu_mutex_init(&comp_param
[i
].mutex
);
504 qemu_cond_init(&comp_param
[i
].cond
);
505 qemu_thread_create(compress_threads
+ i
, "compress",
506 do_data_compress
, comp_param
+ i
,
507 QEMU_THREAD_JOINABLE
);
512 compress_threads_save_cleanup();
518 #define MULTIFD_MAGIC 0x11223344U
519 #define MULTIFD_VERSION 1
521 #define MULTIFD_FLAG_SYNC (1 << 0)
526 unsigned char uuid
[16]; /* QemuUUID */
528 } __attribute__((packed
)) MultiFDInit_t
;
539 } __attribute__((packed
)) MultiFDPacket_t
;
542 /* number of used pages */
544 /* number of allocated pages */
546 /* global number of generated multifd packets */
548 /* offset of each page */
550 /* pointer to each page */
556 /* this fields are not changed once the thread is created */
559 /* channel thread name */
561 /* channel thread id */
563 /* communication channel */
565 /* sem where to wait for more work */
567 /* this mutex protects the following parameters */
569 /* is this channel thread running */
571 /* should this thread finish */
573 /* thread has work to do */
575 /* array of pages to sent */
576 MultiFDPages_t
*pages
;
577 /* packet allocated len */
579 /* pointer to the packet */
580 MultiFDPacket_t
*packet
;
581 /* multifd flags for each packet */
583 /* global number of generated multifd packets */
585 /* thread local variables */
586 /* packets sent through this channel */
587 uint64_t num_packets
;
588 /* pages sent through this channel */
590 /* syncs main thread and channels */
591 QemuSemaphore sem_sync
;
595 /* this fields are not changed once the thread is created */
598 /* channel thread name */
600 /* channel thread id */
602 /* communication channel */
604 /* this mutex protects the following parameters */
606 /* is this channel thread running */
608 /* array of pages to receive */
609 MultiFDPages_t
*pages
;
610 /* packet allocated len */
612 /* pointer to the packet */
613 MultiFDPacket_t
*packet
;
614 /* multifd flags for each packet */
616 /* global number of generated multifd packets */
618 /* thread local variables */
619 /* packets sent through this channel */
620 uint64_t num_packets
;
621 /* pages sent through this channel */
623 /* syncs main thread and channels */
624 QemuSemaphore sem_sync
;
627 static int multifd_send_initial_packet(MultiFDSendParams
*p
, Error
**errp
)
632 msg
.magic
= cpu_to_be32(MULTIFD_MAGIC
);
633 msg
.version
= cpu_to_be32(MULTIFD_VERSION
);
635 memcpy(msg
.uuid
, &qemu_uuid
.data
, sizeof(msg
.uuid
));
637 ret
= qio_channel_write_all(p
->c
, (char *)&msg
, sizeof(msg
), errp
);
644 static int multifd_recv_initial_packet(QIOChannel
*c
, Error
**errp
)
649 ret
= qio_channel_read_all(c
, (char *)&msg
, sizeof(msg
), errp
);
654 msg
.magic
= be32_to_cpu(msg
.magic
);
655 msg
.version
= be32_to_cpu(msg
.version
);
657 if (msg
.magic
!= MULTIFD_MAGIC
) {
658 error_setg(errp
, "multifd: received packet magic %x "
659 "expected %x", msg
.magic
, MULTIFD_MAGIC
);
663 if (msg
.version
!= MULTIFD_VERSION
) {
664 error_setg(errp
, "multifd: received packet version %d "
665 "expected %d", msg
.version
, MULTIFD_VERSION
);
669 if (memcmp(msg
.uuid
, &qemu_uuid
, sizeof(qemu_uuid
))) {
670 char *uuid
= qemu_uuid_unparse_strdup(&qemu_uuid
);
671 char *msg_uuid
= qemu_uuid_unparse_strdup((const QemuUUID
*)msg
.uuid
);
673 error_setg(errp
, "multifd: received uuid '%s' and expected "
674 "uuid '%s' for channel %hhd", msg_uuid
, uuid
, msg
.id
);
680 if (msg
.id
> migrate_multifd_channels()) {
681 error_setg(errp
, "multifd: received channel version %d "
682 "expected %d", msg
.version
, MULTIFD_VERSION
);
689 static MultiFDPages_t
*multifd_pages_init(size_t size
)
691 MultiFDPages_t
*pages
= g_new0(MultiFDPages_t
, 1);
693 pages
->allocated
= size
;
694 pages
->iov
= g_new0(struct iovec
, size
);
695 pages
->offset
= g_new0(ram_addr_t
, size
);
700 static void multifd_pages_clear(MultiFDPages_t
*pages
)
703 pages
->allocated
= 0;
704 pages
->packet_num
= 0;
708 g_free(pages
->offset
);
709 pages
->offset
= NULL
;
713 static void multifd_send_fill_packet(MultiFDSendParams
*p
)
715 MultiFDPacket_t
*packet
= p
->packet
;
718 packet
->magic
= cpu_to_be32(MULTIFD_MAGIC
);
719 packet
->version
= cpu_to_be32(MULTIFD_VERSION
);
720 packet
->flags
= cpu_to_be32(p
->flags
);
721 packet
->size
= cpu_to_be32(migrate_multifd_page_count());
722 packet
->used
= cpu_to_be32(p
->pages
->used
);
723 packet
->packet_num
= cpu_to_be64(p
->packet_num
);
725 if (p
->pages
->block
) {
726 strncpy(packet
->ramblock
, p
->pages
->block
->idstr
, 256);
729 for (i
= 0; i
< p
->pages
->used
; i
++) {
730 packet
->offset
[i
] = cpu_to_be64(p
->pages
->offset
[i
]);
734 static int multifd_recv_unfill_packet(MultiFDRecvParams
*p
, Error
**errp
)
736 MultiFDPacket_t
*packet
= p
->packet
;
740 packet
->magic
= be32_to_cpu(packet
->magic
);
741 if (packet
->magic
!= MULTIFD_MAGIC
) {
742 error_setg(errp
, "multifd: received packet "
743 "magic %x and expected magic %x",
744 packet
->magic
, MULTIFD_MAGIC
);
748 packet
->version
= be32_to_cpu(packet
->version
);
749 if (packet
->version
!= MULTIFD_VERSION
) {
750 error_setg(errp
, "multifd: received packet "
751 "version %d and expected version %d",
752 packet
->version
, MULTIFD_VERSION
);
756 p
->flags
= be32_to_cpu(packet
->flags
);
758 packet
->size
= be32_to_cpu(packet
->size
);
759 if (packet
->size
> migrate_multifd_page_count()) {
760 error_setg(errp
, "multifd: received packet "
761 "with size %d and expected maximum size %d",
762 packet
->size
, migrate_multifd_page_count()) ;
766 p
->pages
->used
= be32_to_cpu(packet
->used
);
767 if (p
->pages
->used
> packet
->size
) {
768 error_setg(errp
, "multifd: received packet "
769 "with size %d and expected maximum size %d",
770 p
->pages
->used
, packet
->size
) ;
774 p
->packet_num
= be64_to_cpu(packet
->packet_num
);
776 if (p
->pages
->used
) {
777 /* make sure that ramblock is 0 terminated */
778 packet
->ramblock
[255] = 0;
779 block
= qemu_ram_block_by_name(packet
->ramblock
);
781 error_setg(errp
, "multifd: unknown ram block %s",
787 for (i
= 0; i
< p
->pages
->used
; i
++) {
788 ram_addr_t offset
= be64_to_cpu(packet
->offset
[i
]);
790 if (offset
> (block
->used_length
- TARGET_PAGE_SIZE
)) {
791 error_setg(errp
, "multifd: offset too long " RAM_ADDR_FMT
792 " (max " RAM_ADDR_FMT
")",
793 offset
, block
->max_length
);
796 p
->pages
->iov
[i
].iov_base
= block
->host
+ offset
;
797 p
->pages
->iov
[i
].iov_len
= TARGET_PAGE_SIZE
;
804 MultiFDSendParams
*params
;
805 /* number of created threads */
807 /* array of pages to sent */
808 MultiFDPages_t
*pages
;
809 /* syncs main thread and channels */
810 QemuSemaphore sem_sync
;
811 /* global number of generated multifd packets */
813 /* send channels ready */
814 QemuSemaphore channels_ready
;
815 } *multifd_send_state
;
818 * How we use multifd_send_state->pages and channel->pages?
820 * We create a pages for each channel, and a main one. Each time that
821 * we need to send a batch of pages we interchange the ones between
822 * multifd_send_state and the channel that is sending it. There are
823 * two reasons for that:
824 * - to not have to do so many mallocs during migration
825 * - to make easier to know what to free at the end of migration
827 * This way we always know who is the owner of each "pages" struct,
828 * and we don't need any loocking. It belongs to the migration thread
829 * or to the channel thread. Switching is safe because the migration
830 * thread is using the channel mutex when changing it, and the channel
831 * have to had finish with its own, otherwise pending_job can't be
835 static void multifd_send_pages(void)
838 static int next_channel
;
839 MultiFDSendParams
*p
= NULL
; /* make happy gcc */
840 MultiFDPages_t
*pages
= multifd_send_state
->pages
;
841 uint64_t transferred
;
843 qemu_sem_wait(&multifd_send_state
->channels_ready
);
844 for (i
= next_channel
;; i
= (i
+ 1) % migrate_multifd_channels()) {
845 p
= &multifd_send_state
->params
[i
];
847 qemu_mutex_lock(&p
->mutex
);
848 if (!p
->pending_job
) {
850 next_channel
= (i
+ 1) % migrate_multifd_channels();
853 qemu_mutex_unlock(&p
->mutex
);
857 p
->packet_num
= multifd_send_state
->packet_num
++;
858 p
->pages
->block
= NULL
;
859 multifd_send_state
->pages
= p
->pages
;
861 transferred
= ((uint64_t) pages
->used
) * TARGET_PAGE_SIZE
+ p
->packet_len
;
862 ram_counters
.multifd_bytes
+= transferred
;
863 ram_counters
.transferred
+= transferred
;;
864 qemu_mutex_unlock(&p
->mutex
);
865 qemu_sem_post(&p
->sem
);
868 static void multifd_queue_page(RAMBlock
*block
, ram_addr_t offset
)
870 MultiFDPages_t
*pages
= multifd_send_state
->pages
;
873 pages
->block
= block
;
876 if (pages
->block
== block
) {
877 pages
->offset
[pages
->used
] = offset
;
878 pages
->iov
[pages
->used
].iov_base
= block
->host
+ offset
;
879 pages
->iov
[pages
->used
].iov_len
= TARGET_PAGE_SIZE
;
882 if (pages
->used
< pages
->allocated
) {
887 multifd_send_pages();
889 if (pages
->block
!= block
) {
890 multifd_queue_page(block
, offset
);
894 static void multifd_send_terminate_threads(Error
*err
)
899 MigrationState
*s
= migrate_get_current();
900 migrate_set_error(s
, err
);
901 if (s
->state
== MIGRATION_STATUS_SETUP
||
902 s
->state
== MIGRATION_STATUS_PRE_SWITCHOVER
||
903 s
->state
== MIGRATION_STATUS_DEVICE
||
904 s
->state
== MIGRATION_STATUS_ACTIVE
) {
905 migrate_set_state(&s
->state
, s
->state
,
906 MIGRATION_STATUS_FAILED
);
910 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
911 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
913 qemu_mutex_lock(&p
->mutex
);
915 qemu_sem_post(&p
->sem
);
916 qemu_mutex_unlock(&p
->mutex
);
920 int multifd_save_cleanup(Error
**errp
)
925 if (!migrate_use_multifd()) {
928 multifd_send_terminate_threads(NULL
);
929 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
930 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
933 qemu_thread_join(&p
->thread
);
935 socket_send_channel_destroy(p
->c
);
937 qemu_mutex_destroy(&p
->mutex
);
938 qemu_sem_destroy(&p
->sem
);
939 qemu_sem_destroy(&p
->sem_sync
);
942 multifd_pages_clear(p
->pages
);
948 qemu_sem_destroy(&multifd_send_state
->channels_ready
);
949 qemu_sem_destroy(&multifd_send_state
->sem_sync
);
950 g_free(multifd_send_state
->params
);
951 multifd_send_state
->params
= NULL
;
952 multifd_pages_clear(multifd_send_state
->pages
);
953 multifd_send_state
->pages
= NULL
;
954 g_free(multifd_send_state
);
955 multifd_send_state
= NULL
;
959 static void multifd_send_sync_main(void)
963 if (!migrate_use_multifd()) {
966 if (multifd_send_state
->pages
->used
) {
967 multifd_send_pages();
969 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
970 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
972 trace_multifd_send_sync_main_signal(p
->id
);
974 qemu_mutex_lock(&p
->mutex
);
976 p
->packet_num
= multifd_send_state
->packet_num
++;
977 p
->flags
|= MULTIFD_FLAG_SYNC
;
979 qemu_mutex_unlock(&p
->mutex
);
980 qemu_sem_post(&p
->sem
);
982 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
983 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
985 trace_multifd_send_sync_main_wait(p
->id
);
986 qemu_sem_wait(&multifd_send_state
->sem_sync
);
988 trace_multifd_send_sync_main(multifd_send_state
->packet_num
);
991 static void *multifd_send_thread(void *opaque
)
993 MultiFDSendParams
*p
= opaque
;
994 Error
*local_err
= NULL
;
997 trace_multifd_send_thread_start(p
->id
);
998 rcu_register_thread();
1000 if (multifd_send_initial_packet(p
, &local_err
) < 0) {
1003 /* initial packet */
1007 qemu_sem_wait(&p
->sem
);
1008 qemu_mutex_lock(&p
->mutex
);
1010 if (p
->pending_job
) {
1011 uint32_t used
= p
->pages
->used
;
1012 uint64_t packet_num
= p
->packet_num
;
1013 uint32_t flags
= p
->flags
;
1015 multifd_send_fill_packet(p
);
1018 p
->num_pages
+= used
;
1020 qemu_mutex_unlock(&p
->mutex
);
1022 trace_multifd_send(p
->id
, packet_num
, used
, flags
);
1024 ret
= qio_channel_write_all(p
->c
, (void *)p
->packet
,
1025 p
->packet_len
, &local_err
);
1030 ret
= qio_channel_writev_all(p
->c
, p
->pages
->iov
, used
, &local_err
);
1035 qemu_mutex_lock(&p
->mutex
);
1037 qemu_mutex_unlock(&p
->mutex
);
1039 if (flags
& MULTIFD_FLAG_SYNC
) {
1040 qemu_sem_post(&multifd_send_state
->sem_sync
);
1042 qemu_sem_post(&multifd_send_state
->channels_ready
);
1043 } else if (p
->quit
) {
1044 qemu_mutex_unlock(&p
->mutex
);
1047 qemu_mutex_unlock(&p
->mutex
);
1048 /* sometimes there are spurious wakeups */
1054 multifd_send_terminate_threads(local_err
);
1057 qemu_mutex_lock(&p
->mutex
);
1059 qemu_mutex_unlock(&p
->mutex
);
1061 rcu_unregister_thread();
1062 trace_multifd_send_thread_end(p
->id
, p
->num_packets
, p
->num_pages
);
1067 static void multifd_new_send_channel_async(QIOTask
*task
, gpointer opaque
)
1069 MultiFDSendParams
*p
= opaque
;
1070 QIOChannel
*sioc
= QIO_CHANNEL(qio_task_get_source(task
));
1071 Error
*local_err
= NULL
;
1073 if (qio_task_propagate_error(task
, &local_err
)) {
1074 if (multifd_save_cleanup(&local_err
) != 0) {
1075 migrate_set_error(migrate_get_current(), local_err
);
1078 p
->c
= QIO_CHANNEL(sioc
);
1079 qio_channel_set_delay(p
->c
, false);
1081 qemu_thread_create(&p
->thread
, p
->name
, multifd_send_thread
, p
,
1082 QEMU_THREAD_JOINABLE
);
1084 atomic_inc(&multifd_send_state
->count
);
1088 int multifd_save_setup(void)
1091 uint32_t page_count
= migrate_multifd_page_count();
1094 if (!migrate_use_multifd()) {
1097 thread_count
= migrate_multifd_channels();
1098 multifd_send_state
= g_malloc0(sizeof(*multifd_send_state
));
1099 multifd_send_state
->params
= g_new0(MultiFDSendParams
, thread_count
);
1100 atomic_set(&multifd_send_state
->count
, 0);
1101 multifd_send_state
->pages
= multifd_pages_init(page_count
);
1102 qemu_sem_init(&multifd_send_state
->sem_sync
, 0);
1103 qemu_sem_init(&multifd_send_state
->channels_ready
, 0);
1105 for (i
= 0; i
< thread_count
; i
++) {
1106 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
1108 qemu_mutex_init(&p
->mutex
);
1109 qemu_sem_init(&p
->sem
, 0);
1110 qemu_sem_init(&p
->sem_sync
, 0);
1114 p
->pages
= multifd_pages_init(page_count
);
1115 p
->packet_len
= sizeof(MultiFDPacket_t
)
1116 + sizeof(ram_addr_t
) * page_count
;
1117 p
->packet
= g_malloc0(p
->packet_len
);
1118 p
->name
= g_strdup_printf("multifdsend_%d", i
);
1119 socket_send_channel_create(multifd_new_send_channel_async
, p
);
1125 MultiFDRecvParams
*params
;
1126 /* number of created threads */
1128 /* syncs main thread and channels */
1129 QemuSemaphore sem_sync
;
1130 /* global number of generated multifd packets */
1131 uint64_t packet_num
;
1132 } *multifd_recv_state
;
1134 static void multifd_recv_terminate_threads(Error
*err
)
1139 MigrationState
*s
= migrate_get_current();
1140 migrate_set_error(s
, err
);
1141 if (s
->state
== MIGRATION_STATUS_SETUP
||
1142 s
->state
== MIGRATION_STATUS_ACTIVE
) {
1143 migrate_set_state(&s
->state
, s
->state
,
1144 MIGRATION_STATUS_FAILED
);
1148 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1149 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1151 qemu_mutex_lock(&p
->mutex
);
1152 /* We could arrive here for two reasons:
1153 - normal quit, i.e. everything went fine, just finished
1154 - error quit: We close the channels so the channel threads
1155 finish the qio_channel_read_all_eof() */
1156 qio_channel_shutdown(p
->c
, QIO_CHANNEL_SHUTDOWN_BOTH
, NULL
);
1157 qemu_mutex_unlock(&p
->mutex
);
1161 int multifd_load_cleanup(Error
**errp
)
1166 if (!migrate_use_multifd()) {
1169 multifd_recv_terminate_threads(NULL
);
1170 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1171 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1174 qemu_thread_join(&p
->thread
);
1176 object_unref(OBJECT(p
->c
));
1178 qemu_mutex_destroy(&p
->mutex
);
1179 qemu_sem_destroy(&p
->sem_sync
);
1182 multifd_pages_clear(p
->pages
);
1188 qemu_sem_destroy(&multifd_recv_state
->sem_sync
);
1189 g_free(multifd_recv_state
->params
);
1190 multifd_recv_state
->params
= NULL
;
1191 g_free(multifd_recv_state
);
1192 multifd_recv_state
= NULL
;
1197 static void multifd_recv_sync_main(void)
1201 if (!migrate_use_multifd()) {
1204 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1205 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1207 trace_multifd_recv_sync_main_wait(p
->id
);
1208 qemu_sem_wait(&multifd_recv_state
->sem_sync
);
1209 qemu_mutex_lock(&p
->mutex
);
1210 if (multifd_recv_state
->packet_num
< p
->packet_num
) {
1211 multifd_recv_state
->packet_num
= p
->packet_num
;
1213 qemu_mutex_unlock(&p
->mutex
);
1215 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1216 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1218 trace_multifd_recv_sync_main_signal(p
->id
);
1219 qemu_sem_post(&p
->sem_sync
);
1221 trace_multifd_recv_sync_main(multifd_recv_state
->packet_num
);
1224 static void *multifd_recv_thread(void *opaque
)
1226 MultiFDRecvParams
*p
= opaque
;
1227 Error
*local_err
= NULL
;
1230 trace_multifd_recv_thread_start(p
->id
);
1231 rcu_register_thread();
1237 ret
= qio_channel_read_all_eof(p
->c
, (void *)p
->packet
,
1238 p
->packet_len
, &local_err
);
1239 if (ret
== 0) { /* EOF */
1242 if (ret
== -1) { /* Error */
1246 qemu_mutex_lock(&p
->mutex
);
1247 ret
= multifd_recv_unfill_packet(p
, &local_err
);
1249 qemu_mutex_unlock(&p
->mutex
);
1253 used
= p
->pages
->used
;
1255 trace_multifd_recv(p
->id
, p
->packet_num
, used
, flags
);
1257 p
->num_pages
+= used
;
1258 qemu_mutex_unlock(&p
->mutex
);
1260 ret
= qio_channel_readv_all(p
->c
, p
->pages
->iov
, used
, &local_err
);
1265 if (flags
& MULTIFD_FLAG_SYNC
) {
1266 qemu_sem_post(&multifd_recv_state
->sem_sync
);
1267 qemu_sem_wait(&p
->sem_sync
);
1272 multifd_recv_terminate_threads(local_err
);
1274 qemu_mutex_lock(&p
->mutex
);
1276 qemu_mutex_unlock(&p
->mutex
);
1278 rcu_unregister_thread();
1279 trace_multifd_recv_thread_end(p
->id
, p
->num_packets
, p
->num_pages
);
1284 int multifd_load_setup(void)
1287 uint32_t page_count
= migrate_multifd_page_count();
1290 if (!migrate_use_multifd()) {
1293 thread_count
= migrate_multifd_channels();
1294 multifd_recv_state
= g_malloc0(sizeof(*multifd_recv_state
));
1295 multifd_recv_state
->params
= g_new0(MultiFDRecvParams
, thread_count
);
1296 atomic_set(&multifd_recv_state
->count
, 0);
1297 qemu_sem_init(&multifd_recv_state
->sem_sync
, 0);
1299 for (i
= 0; i
< thread_count
; i
++) {
1300 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1302 qemu_mutex_init(&p
->mutex
);
1303 qemu_sem_init(&p
->sem_sync
, 0);
1305 p
->pages
= multifd_pages_init(page_count
);
1306 p
->packet_len
= sizeof(MultiFDPacket_t
)
1307 + sizeof(ram_addr_t
) * page_count
;
1308 p
->packet
= g_malloc0(p
->packet_len
);
1309 p
->name
= g_strdup_printf("multifdrecv_%d", i
);
1314 bool multifd_recv_all_channels_created(void)
1316 int thread_count
= migrate_multifd_channels();
1318 if (!migrate_use_multifd()) {
1322 return thread_count
== atomic_read(&multifd_recv_state
->count
);
1326 * Try to receive all multifd channels to get ready for the migration.
1327 * - Return true and do not set @errp when correctly receving all channels;
1328 * - Return false and do not set @errp when correctly receiving the current one;
1329 * - Return false and set @errp when failing to receive the current channel.
1331 bool multifd_recv_new_channel(QIOChannel
*ioc
, Error
**errp
)
1333 MultiFDRecvParams
*p
;
1334 Error
*local_err
= NULL
;
1337 id
= multifd_recv_initial_packet(ioc
, &local_err
);
1339 multifd_recv_terminate_threads(local_err
);
1340 error_propagate_prepend(errp
, local_err
,
1341 "failed to receive packet"
1342 " via multifd channel %d: ",
1343 atomic_read(&multifd_recv_state
->count
));
1347 p
= &multifd_recv_state
->params
[id
];
1349 error_setg(&local_err
, "multifd: received id '%d' already setup'",
1351 multifd_recv_terminate_threads(local_err
);
1352 error_propagate(errp
, local_err
);
1356 object_ref(OBJECT(ioc
));
1357 /* initial packet */
1361 qemu_thread_create(&p
->thread
, p
->name
, multifd_recv_thread
, p
,
1362 QEMU_THREAD_JOINABLE
);
1363 atomic_inc(&multifd_recv_state
->count
);
1364 return atomic_read(&multifd_recv_state
->count
) ==
1365 migrate_multifd_channels();
1369 * save_page_header: write page header to wire
1371 * If this is the 1st block, it also writes the block identification
1373 * Returns the number of bytes written
1375 * @f: QEMUFile where to send the data
1376 * @block: block that contains the page we want to send
1377 * @offset: offset inside the block for the page
1378 * in the lower bits, it contains flags
1380 static size_t save_page_header(RAMState
*rs
, QEMUFile
*f
, RAMBlock
*block
,
1385 if (block
== rs
->last_sent_block
) {
1386 offset
|= RAM_SAVE_FLAG_CONTINUE
;
1388 qemu_put_be64(f
, offset
);
1391 if (!(offset
& RAM_SAVE_FLAG_CONTINUE
)) {
1392 len
= strlen(block
->idstr
);
1393 qemu_put_byte(f
, len
);
1394 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, len
);
1396 rs
->last_sent_block
= block
;
1402 * mig_throttle_guest_down: throotle down the guest
1404 * Reduce amount of guest cpu execution to hopefully slow down memory
1405 * writes. If guest dirty memory rate is reduced below the rate at
1406 * which we can transfer pages to the destination then we should be
1407 * able to complete migration. Some workloads dirty memory way too
1408 * fast and will not effectively converge, even with auto-converge.
1410 static void mig_throttle_guest_down(void)
1412 MigrationState
*s
= migrate_get_current();
1413 uint64_t pct_initial
= s
->parameters
.cpu_throttle_initial
;
1414 uint64_t pct_icrement
= s
->parameters
.cpu_throttle_increment
;
1415 int pct_max
= s
->parameters
.max_cpu_throttle
;
1417 /* We have not started throttling yet. Let's start it. */
1418 if (!cpu_throttle_active()) {
1419 cpu_throttle_set(pct_initial
);
1421 /* Throttling already on, just increase the rate */
1422 cpu_throttle_set(MIN(cpu_throttle_get_percentage() + pct_icrement
,
1428 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
1430 * @rs: current RAM state
1431 * @current_addr: address for the zero page
1433 * Update the xbzrle cache to reflect a page that's been sent as all 0.
1434 * The important thing is that a stale (not-yet-0'd) page be replaced
1436 * As a bonus, if the page wasn't in the cache it gets added so that
1437 * when a small write is made into the 0'd page it gets XBZRLE sent.
1439 static void xbzrle_cache_zero_page(RAMState
*rs
, ram_addr_t current_addr
)
1441 if (rs
->ram_bulk_stage
|| !migrate_use_xbzrle()) {
1445 /* We don't care if this fails to allocate a new cache page
1446 * as long as it updated an old one */
1447 cache_insert(XBZRLE
.cache
, current_addr
, XBZRLE
.zero_target_page
,
1448 ram_counters
.dirty_sync_count
);
1451 #define ENCODING_FLAG_XBZRLE 0x1
1454 * save_xbzrle_page: compress and send current page
1456 * Returns: 1 means that we wrote the page
1457 * 0 means that page is identical to the one already sent
1458 * -1 means that xbzrle would be longer than normal
1460 * @rs: current RAM state
1461 * @current_data: pointer to the address of the page contents
1462 * @current_addr: addr of the page
1463 * @block: block that contains the page we want to send
1464 * @offset: offset inside the block for the page
1465 * @last_stage: if we are at the completion stage
1467 static int save_xbzrle_page(RAMState
*rs
, uint8_t **current_data
,
1468 ram_addr_t current_addr
, RAMBlock
*block
,
1469 ram_addr_t offset
, bool last_stage
)
1471 int encoded_len
= 0, bytes_xbzrle
;
1472 uint8_t *prev_cached_page
;
1474 if (!cache_is_cached(XBZRLE
.cache
, current_addr
,
1475 ram_counters
.dirty_sync_count
)) {
1476 xbzrle_counters
.cache_miss
++;
1478 if (cache_insert(XBZRLE
.cache
, current_addr
, *current_data
,
1479 ram_counters
.dirty_sync_count
) == -1) {
1482 /* update *current_data when the page has been
1483 inserted into cache */
1484 *current_data
= get_cached_data(XBZRLE
.cache
, current_addr
);
1490 prev_cached_page
= get_cached_data(XBZRLE
.cache
, current_addr
);
1492 /* save current buffer into memory */
1493 memcpy(XBZRLE
.current_buf
, *current_data
, TARGET_PAGE_SIZE
);
1495 /* XBZRLE encoding (if there is no overflow) */
1496 encoded_len
= xbzrle_encode_buffer(prev_cached_page
, XBZRLE
.current_buf
,
1497 TARGET_PAGE_SIZE
, XBZRLE
.encoded_buf
,
1499 if (encoded_len
== 0) {
1500 trace_save_xbzrle_page_skipping();
1502 } else if (encoded_len
== -1) {
1503 trace_save_xbzrle_page_overflow();
1504 xbzrle_counters
.overflow
++;
1505 /* update data in the cache */
1507 memcpy(prev_cached_page
, *current_data
, TARGET_PAGE_SIZE
);
1508 *current_data
= prev_cached_page
;
1513 /* we need to update the data in the cache, in order to get the same data */
1515 memcpy(prev_cached_page
, XBZRLE
.current_buf
, TARGET_PAGE_SIZE
);
1518 /* Send XBZRLE based compressed page */
1519 bytes_xbzrle
= save_page_header(rs
, rs
->f
, block
,
1520 offset
| RAM_SAVE_FLAG_XBZRLE
);
1521 qemu_put_byte(rs
->f
, ENCODING_FLAG_XBZRLE
);
1522 qemu_put_be16(rs
->f
, encoded_len
);
1523 qemu_put_buffer(rs
->f
, XBZRLE
.encoded_buf
, encoded_len
);
1524 bytes_xbzrle
+= encoded_len
+ 1 + 2;
1525 xbzrle_counters
.pages
++;
1526 xbzrle_counters
.bytes
+= bytes_xbzrle
;
1527 ram_counters
.transferred
+= bytes_xbzrle
;
1533 * migration_bitmap_find_dirty: find the next dirty page from start
1535 * Called with rcu_read_lock() to protect migration_bitmap
1537 * Returns the byte offset within memory region of the start of a dirty page
1539 * @rs: current RAM state
1540 * @rb: RAMBlock where to search for dirty pages
1541 * @start: page where we start the search
1544 unsigned long migration_bitmap_find_dirty(RAMState
*rs
, RAMBlock
*rb
,
1545 unsigned long start
)
1547 unsigned long size
= rb
->used_length
>> TARGET_PAGE_BITS
;
1548 unsigned long *bitmap
= rb
->bmap
;
1551 if (!qemu_ram_is_migratable(rb
)) {
1555 if (rs
->ram_bulk_stage
&& start
> 0) {
1558 next
= find_next_bit(bitmap
, size
, start
);
1564 static inline bool migration_bitmap_clear_dirty(RAMState
*rs
,
1570 ret
= test_and_clear_bit(page
, rb
->bmap
);
1573 rs
->migration_dirty_pages
--;
1578 static void migration_bitmap_sync_range(RAMState
*rs
, RAMBlock
*rb
,
1579 ram_addr_t start
, ram_addr_t length
)
1581 rs
->migration_dirty_pages
+=
1582 cpu_physical_memory_sync_dirty_bitmap(rb
, start
, length
,
1583 &rs
->num_dirty_pages_period
);
1587 * ram_pagesize_summary: calculate all the pagesizes of a VM
1589 * Returns a summary bitmap of the page sizes of all RAMBlocks
1591 * For VMs with just normal pages this is equivalent to the host page
1592 * size. If it's got some huge pages then it's the OR of all the
1593 * different page sizes.
1595 uint64_t ram_pagesize_summary(void)
1598 uint64_t summary
= 0;
1600 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
1601 summary
|= block
->page_size
;
1607 static void migration_update_rates(RAMState
*rs
, int64_t end_time
)
1609 uint64_t page_count
= rs
->target_page_count
- rs
->target_page_count_prev
;
1610 double compressed_size
;
1612 /* calculate period counters */
1613 ram_counters
.dirty_pages_rate
= rs
->num_dirty_pages_period
* 1000
1614 / (end_time
- rs
->time_last_bitmap_sync
);
1620 if (migrate_use_xbzrle()) {
1621 xbzrle_counters
.cache_miss_rate
= (double)(xbzrle_counters
.cache_miss
-
1622 rs
->xbzrle_cache_miss_prev
) / page_count
;
1623 rs
->xbzrle_cache_miss_prev
= xbzrle_counters
.cache_miss
;
1626 if (migrate_use_compression()) {
1627 compression_counters
.busy_rate
= (double)(compression_counters
.busy
-
1628 rs
->compress_thread_busy_prev
) / page_count
;
1629 rs
->compress_thread_busy_prev
= compression_counters
.busy
;
1631 compressed_size
= compression_counters
.compressed_size
-
1632 rs
->compressed_size_prev
;
1633 if (compressed_size
) {
1634 double uncompressed_size
= (compression_counters
.pages
-
1635 rs
->compress_pages_prev
) * TARGET_PAGE_SIZE
;
1637 /* Compression-Ratio = Uncompressed-size / Compressed-size */
1638 compression_counters
.compression_rate
=
1639 uncompressed_size
/ compressed_size
;
1641 rs
->compress_pages_prev
= compression_counters
.pages
;
1642 rs
->compressed_size_prev
= compression_counters
.compressed_size
;
1647 static void migration_bitmap_sync(RAMState
*rs
)
1651 uint64_t bytes_xfer_now
;
1653 ram_counters
.dirty_sync_count
++;
1655 if (!rs
->time_last_bitmap_sync
) {
1656 rs
->time_last_bitmap_sync
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1659 trace_migration_bitmap_sync_start();
1660 memory_global_dirty_log_sync();
1662 qemu_mutex_lock(&rs
->bitmap_mutex
);
1664 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
1665 migration_bitmap_sync_range(rs
, block
, 0, block
->used_length
);
1667 ram_counters
.remaining
= ram_bytes_remaining();
1669 qemu_mutex_unlock(&rs
->bitmap_mutex
);
1671 trace_migration_bitmap_sync_end(rs
->num_dirty_pages_period
);
1673 end_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1675 /* more than 1 second = 1000 millisecons */
1676 if (end_time
> rs
->time_last_bitmap_sync
+ 1000) {
1677 bytes_xfer_now
= ram_counters
.transferred
;
1679 /* During block migration the auto-converge logic incorrectly detects
1680 * that ram migration makes no progress. Avoid this by disabling the
1681 * throttling logic during the bulk phase of block migration. */
1682 if (migrate_auto_converge() && !blk_mig_bulk_active()) {
1683 /* The following detection logic can be refined later. For now:
1684 Check to see if the dirtied bytes is 50% more than the approx.
1685 amount of bytes that just got transferred since the last time we
1686 were in this routine. If that happens twice, start or increase
1689 if ((rs
->num_dirty_pages_period
* TARGET_PAGE_SIZE
>
1690 (bytes_xfer_now
- rs
->bytes_xfer_prev
) / 2) &&
1691 (++rs
->dirty_rate_high_cnt
>= 2)) {
1692 trace_migration_throttle();
1693 rs
->dirty_rate_high_cnt
= 0;
1694 mig_throttle_guest_down();
1698 migration_update_rates(rs
, end_time
);
1700 rs
->target_page_count_prev
= rs
->target_page_count
;
1702 /* reset period counters */
1703 rs
->time_last_bitmap_sync
= end_time
;
1704 rs
->num_dirty_pages_period
= 0;
1705 rs
->bytes_xfer_prev
= bytes_xfer_now
;
1707 if (migrate_use_events()) {
1708 qapi_event_send_migration_pass(ram_counters
.dirty_sync_count
);
1713 * save_zero_page_to_file: send the zero page to the file
1715 * Returns the size of data written to the file, 0 means the page is not
1718 * @rs: current RAM state
1719 * @file: the file where the data is saved
1720 * @block: block that contains the page we want to send
1721 * @offset: offset inside the block for the page
1723 static int save_zero_page_to_file(RAMState
*rs
, QEMUFile
*file
,
1724 RAMBlock
*block
, ram_addr_t offset
)
1726 uint8_t *p
= block
->host
+ offset
;
1729 if (is_zero_range(p
, TARGET_PAGE_SIZE
)) {
1730 len
+= save_page_header(rs
, file
, block
, offset
| RAM_SAVE_FLAG_ZERO
);
1731 qemu_put_byte(file
, 0);
1738 * save_zero_page: send the zero page to the stream
1740 * Returns the number of pages written.
1742 * @rs: current RAM state
1743 * @block: block that contains the page we want to send
1744 * @offset: offset inside the block for the page
1746 static int save_zero_page(RAMState
*rs
, RAMBlock
*block
, ram_addr_t offset
)
1748 int len
= save_zero_page_to_file(rs
, rs
->f
, block
, offset
);
1751 ram_counters
.duplicate
++;
1752 ram_counters
.transferred
+= len
;
1758 static void ram_release_pages(const char *rbname
, uint64_t offset
, int pages
)
1760 if (!migrate_release_ram() || !migration_in_postcopy()) {
1764 ram_discard_range(rbname
, offset
, pages
<< TARGET_PAGE_BITS
);
1768 * @pages: the number of pages written by the control path,
1770 * > 0 - number of pages written
1772 * Return true if the pages has been saved, otherwise false is returned.
1774 static bool control_save_page(RAMState
*rs
, RAMBlock
*block
, ram_addr_t offset
,
1777 uint64_t bytes_xmit
= 0;
1781 ret
= ram_control_save_page(rs
->f
, block
->offset
, offset
, TARGET_PAGE_SIZE
,
1783 if (ret
== RAM_SAVE_CONTROL_NOT_SUPP
) {
1788 ram_counters
.transferred
+= bytes_xmit
;
1792 if (ret
== RAM_SAVE_CONTROL_DELAYED
) {
1796 if (bytes_xmit
> 0) {
1797 ram_counters
.normal
++;
1798 } else if (bytes_xmit
== 0) {
1799 ram_counters
.duplicate
++;
1806 * directly send the page to the stream
1808 * Returns the number of pages written.
1810 * @rs: current RAM state
1811 * @block: block that contains the page we want to send
1812 * @offset: offset inside the block for the page
1813 * @buf: the page to be sent
1814 * @async: send to page asyncly
1816 static int save_normal_page(RAMState
*rs
, RAMBlock
*block
, ram_addr_t offset
,
1817 uint8_t *buf
, bool async
)
1819 ram_counters
.transferred
+= save_page_header(rs
, rs
->f
, block
,
1820 offset
| RAM_SAVE_FLAG_PAGE
);
1822 qemu_put_buffer_async(rs
->f
, buf
, TARGET_PAGE_SIZE
,
1823 migrate_release_ram() &
1824 migration_in_postcopy());
1826 qemu_put_buffer(rs
->f
, buf
, TARGET_PAGE_SIZE
);
1828 ram_counters
.transferred
+= TARGET_PAGE_SIZE
;
1829 ram_counters
.normal
++;
1834 * ram_save_page: send the given page to the stream
1836 * Returns the number of pages written.
1838 * >=0 - Number of pages written - this might legally be 0
1839 * if xbzrle noticed the page was the same.
1841 * @rs: current RAM state
1842 * @block: block that contains the page we want to send
1843 * @offset: offset inside the block for the page
1844 * @last_stage: if we are at the completion stage
1846 static int ram_save_page(RAMState
*rs
, PageSearchStatus
*pss
, bool last_stage
)
1850 bool send_async
= true;
1851 RAMBlock
*block
= pss
->block
;
1852 ram_addr_t offset
= pss
->page
<< TARGET_PAGE_BITS
;
1853 ram_addr_t current_addr
= block
->offset
+ offset
;
1855 p
= block
->host
+ offset
;
1856 trace_ram_save_page(block
->idstr
, (uint64_t)offset
, p
);
1858 XBZRLE_cache_lock();
1859 if (!rs
->ram_bulk_stage
&& !migration_in_postcopy() &&
1860 migrate_use_xbzrle()) {
1861 pages
= save_xbzrle_page(rs
, &p
, current_addr
, block
,
1862 offset
, last_stage
);
1864 /* Can't send this cached data async, since the cache page
1865 * might get updated before it gets to the wire
1871 /* XBZRLE overflow or normal page */
1873 pages
= save_normal_page(rs
, block
, offset
, p
, send_async
);
1876 XBZRLE_cache_unlock();
1881 static int ram_save_multifd_page(RAMState
*rs
, RAMBlock
*block
,
1884 multifd_queue_page(block
, offset
);
1885 ram_counters
.normal
++;
1890 static bool do_compress_ram_page(QEMUFile
*f
, z_stream
*stream
, RAMBlock
*block
,
1891 ram_addr_t offset
, uint8_t *source_buf
)
1893 RAMState
*rs
= ram_state
;
1894 uint8_t *p
= block
->host
+ (offset
& TARGET_PAGE_MASK
);
1895 bool zero_page
= false;
1898 if (save_zero_page_to_file(rs
, f
, block
, offset
)) {
1903 save_page_header(rs
, f
, block
, offset
| RAM_SAVE_FLAG_COMPRESS_PAGE
);
1906 * copy it to a internal buffer to avoid it being modified by VM
1907 * so that we can catch up the error during compression and
1910 memcpy(source_buf
, p
, TARGET_PAGE_SIZE
);
1911 ret
= qemu_put_compression_data(f
, stream
, source_buf
, TARGET_PAGE_SIZE
);
1913 qemu_file_set_error(migrate_get_current()->to_dst_file
, ret
);
1914 error_report("compressed data failed!");
1919 ram_release_pages(block
->idstr
, offset
& TARGET_PAGE_MASK
, 1);
1924 update_compress_thread_counts(const CompressParam
*param
, int bytes_xmit
)
1926 ram_counters
.transferred
+= bytes_xmit
;
1928 if (param
->zero_page
) {
1929 ram_counters
.duplicate
++;
1933 /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
1934 compression_counters
.compressed_size
+= bytes_xmit
- 8;
1935 compression_counters
.pages
++;
1938 static bool save_page_use_compression(RAMState
*rs
);
1940 static void flush_compressed_data(RAMState
*rs
)
1942 int idx
, len
, thread_count
;
1944 if (!save_page_use_compression(rs
)) {
1947 thread_count
= migrate_compress_threads();
1949 qemu_mutex_lock(&comp_done_lock
);
1950 for (idx
= 0; idx
< thread_count
; idx
++) {
1951 while (!comp_param
[idx
].done
) {
1952 qemu_cond_wait(&comp_done_cond
, &comp_done_lock
);
1955 qemu_mutex_unlock(&comp_done_lock
);
1957 for (idx
= 0; idx
< thread_count
; idx
++) {
1958 qemu_mutex_lock(&comp_param
[idx
].mutex
);
1959 if (!comp_param
[idx
].quit
) {
1960 len
= qemu_put_qemu_file(rs
->f
, comp_param
[idx
].file
);
1962 * it's safe to fetch zero_page without holding comp_done_lock
1963 * as there is no further request submitted to the thread,
1964 * i.e, the thread should be waiting for a request at this point.
1966 update_compress_thread_counts(&comp_param
[idx
], len
);
1968 qemu_mutex_unlock(&comp_param
[idx
].mutex
);
1972 static inline void set_compress_params(CompressParam
*param
, RAMBlock
*block
,
1975 param
->block
= block
;
1976 param
->offset
= offset
;
1979 static int compress_page_with_multi_thread(RAMState
*rs
, RAMBlock
*block
,
1982 int idx
, thread_count
, bytes_xmit
= -1, pages
= -1;
1983 bool wait
= migrate_compress_wait_thread();
1985 thread_count
= migrate_compress_threads();
1986 qemu_mutex_lock(&comp_done_lock
);
1988 for (idx
= 0; idx
< thread_count
; idx
++) {
1989 if (comp_param
[idx
].done
) {
1990 comp_param
[idx
].done
= false;
1991 bytes_xmit
= qemu_put_qemu_file(rs
->f
, comp_param
[idx
].file
);
1992 qemu_mutex_lock(&comp_param
[idx
].mutex
);
1993 set_compress_params(&comp_param
[idx
], block
, offset
);
1994 qemu_cond_signal(&comp_param
[idx
].cond
);
1995 qemu_mutex_unlock(&comp_param
[idx
].mutex
);
1997 update_compress_thread_counts(&comp_param
[idx
], bytes_xmit
);
2003 * wait for the free thread if the user specifies 'compress-wait-thread',
2004 * otherwise we will post the page out in the main thread as normal page.
2006 if (pages
< 0 && wait
) {
2007 qemu_cond_wait(&comp_done_cond
, &comp_done_lock
);
2010 qemu_mutex_unlock(&comp_done_lock
);
2016 * find_dirty_block: find the next dirty page and update any state
2017 * associated with the search process.
2019 * Returns if a page is found
2021 * @rs: current RAM state
2022 * @pss: data about the state of the current dirty page scan
2023 * @again: set to false if the search has scanned the whole of RAM
2025 static bool find_dirty_block(RAMState
*rs
, PageSearchStatus
*pss
, bool *again
)
2027 pss
->page
= migration_bitmap_find_dirty(rs
, pss
->block
, pss
->page
);
2028 if (pss
->complete_round
&& pss
->block
== rs
->last_seen_block
&&
2029 pss
->page
>= rs
->last_page
) {
2031 * We've been once around the RAM and haven't found anything.
2037 if ((pss
->page
<< TARGET_PAGE_BITS
) >= pss
->block
->used_length
) {
2038 /* Didn't find anything in this RAM Block */
2040 pss
->block
= QLIST_NEXT_RCU(pss
->block
, next
);
2043 * If memory migration starts over, we will meet a dirtied page
2044 * which may still exists in compression threads's ring, so we
2045 * should flush the compressed data to make sure the new page
2046 * is not overwritten by the old one in the destination.
2048 * Also If xbzrle is on, stop using the data compression at this
2049 * point. In theory, xbzrle can do better than compression.
2051 flush_compressed_data(rs
);
2053 /* Hit the end of the list */
2054 pss
->block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
2055 /* Flag that we've looped */
2056 pss
->complete_round
= true;
2057 rs
->ram_bulk_stage
= false;
2059 /* Didn't find anything this time, but try again on the new block */
2063 /* Can go around again, but... */
2065 /* We've found something so probably don't need to */
2071 * unqueue_page: gets a page of the queue
2073 * Helper for 'get_queued_page' - gets a page off the queue
2075 * Returns the block of the page (or NULL if none available)
2077 * @rs: current RAM state
2078 * @offset: used to return the offset within the RAMBlock
2080 static RAMBlock
*unqueue_page(RAMState
*rs
, ram_addr_t
*offset
)
2082 RAMBlock
*block
= NULL
;
2084 if (QSIMPLEQ_EMPTY_ATOMIC(&rs
->src_page_requests
)) {
2088 qemu_mutex_lock(&rs
->src_page_req_mutex
);
2089 if (!QSIMPLEQ_EMPTY(&rs
->src_page_requests
)) {
2090 struct RAMSrcPageRequest
*entry
=
2091 QSIMPLEQ_FIRST(&rs
->src_page_requests
);
2093 *offset
= entry
->offset
;
2095 if (entry
->len
> TARGET_PAGE_SIZE
) {
2096 entry
->len
-= TARGET_PAGE_SIZE
;
2097 entry
->offset
+= TARGET_PAGE_SIZE
;
2099 memory_region_unref(block
->mr
);
2100 QSIMPLEQ_REMOVE_HEAD(&rs
->src_page_requests
, next_req
);
2102 migration_consume_urgent_request();
2105 qemu_mutex_unlock(&rs
->src_page_req_mutex
);
2111 * get_queued_page: unqueue a page from the postocpy requests
2113 * Skips pages that are already sent (!dirty)
2115 * Returns if a queued page is found
2117 * @rs: current RAM state
2118 * @pss: data about the state of the current dirty page scan
2120 static bool get_queued_page(RAMState
*rs
, PageSearchStatus
*pss
)
2127 block
= unqueue_page(rs
, &offset
);
2129 * We're sending this page, and since it's postcopy nothing else
2130 * will dirty it, and we must make sure it doesn't get sent again
2131 * even if this queue request was received after the background
2132 * search already sent it.
2137 page
= offset
>> TARGET_PAGE_BITS
;
2138 dirty
= test_bit(page
, block
->bmap
);
2140 trace_get_queued_page_not_dirty(block
->idstr
, (uint64_t)offset
,
2141 page
, test_bit(page
, block
->unsentmap
));
2143 trace_get_queued_page(block
->idstr
, (uint64_t)offset
, page
);
2147 } while (block
&& !dirty
);
2151 * As soon as we start servicing pages out of order, then we have
2152 * to kill the bulk stage, since the bulk stage assumes
2153 * in (migration_bitmap_find_and_reset_dirty) that every page is
2154 * dirty, that's no longer true.
2156 rs
->ram_bulk_stage
= false;
2159 * We want the background search to continue from the queued page
2160 * since the guest is likely to want other pages near to the page
2161 * it just requested.
2164 pss
->page
= offset
>> TARGET_PAGE_BITS
;
2171 * migration_page_queue_free: drop any remaining pages in the ram
2174 * It should be empty at the end anyway, but in error cases there may
2175 * be some left. in case that there is any page left, we drop it.
2178 static void migration_page_queue_free(RAMState
*rs
)
2180 struct RAMSrcPageRequest
*mspr
, *next_mspr
;
2181 /* This queue generally should be empty - but in the case of a failed
2182 * migration might have some droppings in.
2185 QSIMPLEQ_FOREACH_SAFE(mspr
, &rs
->src_page_requests
, next_req
, next_mspr
) {
2186 memory_region_unref(mspr
->rb
->mr
);
2187 QSIMPLEQ_REMOVE_HEAD(&rs
->src_page_requests
, next_req
);
2194 * ram_save_queue_pages: queue the page for transmission
2196 * A request from postcopy destination for example.
2198 * Returns zero on success or negative on error
2200 * @rbname: Name of the RAMBLock of the request. NULL means the
2201 * same that last one.
2202 * @start: starting address from the start of the RAMBlock
2203 * @len: length (in bytes) to send
2205 int ram_save_queue_pages(const char *rbname
, ram_addr_t start
, ram_addr_t len
)
2208 RAMState
*rs
= ram_state
;
2210 ram_counters
.postcopy_requests
++;
2213 /* Reuse last RAMBlock */
2214 ramblock
= rs
->last_req_rb
;
2218 * Shouldn't happen, we can't reuse the last RAMBlock if
2219 * it's the 1st request.
2221 error_report("ram_save_queue_pages no previous block");
2225 ramblock
= qemu_ram_block_by_name(rbname
);
2228 /* We shouldn't be asked for a non-existent RAMBlock */
2229 error_report("ram_save_queue_pages no block '%s'", rbname
);
2232 rs
->last_req_rb
= ramblock
;
2234 trace_ram_save_queue_pages(ramblock
->idstr
, start
, len
);
2235 if (start
+len
> ramblock
->used_length
) {
2236 error_report("%s request overrun start=" RAM_ADDR_FMT
" len="
2237 RAM_ADDR_FMT
" blocklen=" RAM_ADDR_FMT
,
2238 __func__
, start
, len
, ramblock
->used_length
);
2242 struct RAMSrcPageRequest
*new_entry
=
2243 g_malloc0(sizeof(struct RAMSrcPageRequest
));
2244 new_entry
->rb
= ramblock
;
2245 new_entry
->offset
= start
;
2246 new_entry
->len
= len
;
2248 memory_region_ref(ramblock
->mr
);
2249 qemu_mutex_lock(&rs
->src_page_req_mutex
);
2250 QSIMPLEQ_INSERT_TAIL(&rs
->src_page_requests
, new_entry
, next_req
);
2251 migration_make_urgent_request();
2252 qemu_mutex_unlock(&rs
->src_page_req_mutex
);
2262 static bool save_page_use_compression(RAMState
*rs
)
2264 if (!migrate_use_compression()) {
2269 * If xbzrle is on, stop using the data compression after first
2270 * round of migration even if compression is enabled. In theory,
2271 * xbzrle can do better than compression.
2273 if (rs
->ram_bulk_stage
|| !migrate_use_xbzrle()) {
2281 * try to compress the page before posting it out, return true if the page
2282 * has been properly handled by compression, otherwise needs other
2283 * paths to handle it
2285 static bool save_compress_page(RAMState
*rs
, RAMBlock
*block
, ram_addr_t offset
)
2287 if (!save_page_use_compression(rs
)) {
2292 * When starting the process of a new block, the first page of
2293 * the block should be sent out before other pages in the same
2294 * block, and all the pages in last block should have been sent
2295 * out, keeping this order is important, because the 'cont' flag
2296 * is used to avoid resending the block name.
2298 * We post the fist page as normal page as compression will take
2299 * much CPU resource.
2301 if (block
!= rs
->last_sent_block
) {
2302 flush_compressed_data(rs
);
2306 if (compress_page_with_multi_thread(rs
, block
, offset
) > 0) {
2310 compression_counters
.busy
++;
2315 * ram_save_target_page: save one target page
2317 * Returns the number of pages written
2319 * @rs: current RAM state
2320 * @pss: data about the page we want to send
2321 * @last_stage: if we are at the completion stage
2323 static int ram_save_target_page(RAMState
*rs
, PageSearchStatus
*pss
,
2326 RAMBlock
*block
= pss
->block
;
2327 ram_addr_t offset
= pss
->page
<< TARGET_PAGE_BITS
;
2330 if (control_save_page(rs
, block
, offset
, &res
)) {
2334 if (save_compress_page(rs
, block
, offset
)) {
2338 res
= save_zero_page(rs
, block
, offset
);
2340 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
2341 * page would be stale
2343 if (!save_page_use_compression(rs
)) {
2344 XBZRLE_cache_lock();
2345 xbzrle_cache_zero_page(rs
, block
->offset
+ offset
);
2346 XBZRLE_cache_unlock();
2348 ram_release_pages(block
->idstr
, offset
, res
);
2353 * do not use multifd for compression as the first page in the new
2354 * block should be posted out before sending the compressed page
2356 if (!save_page_use_compression(rs
) && migrate_use_multifd()) {
2357 return ram_save_multifd_page(rs
, block
, offset
);
2360 return ram_save_page(rs
, pss
, last_stage
);
2364 * ram_save_host_page: save a whole host page
2366 * Starting at *offset send pages up to the end of the current host
2367 * page. It's valid for the initial offset to point into the middle of
2368 * a host page in which case the remainder of the hostpage is sent.
2369 * Only dirty target pages are sent. Note that the host page size may
2370 * be a huge page for this block.
2371 * The saving stops at the boundary of the used_length of the block
2372 * if the RAMBlock isn't a multiple of the host page size.
2374 * Returns the number of pages written or negative on error
2376 * @rs: current RAM state
2377 * @ms: current migration state
2378 * @pss: data about the page we want to send
2379 * @last_stage: if we are at the completion stage
2381 static int ram_save_host_page(RAMState
*rs
, PageSearchStatus
*pss
,
2384 int tmppages
, pages
= 0;
2385 size_t pagesize_bits
=
2386 qemu_ram_pagesize(pss
->block
) >> TARGET_PAGE_BITS
;
2388 if (!qemu_ram_is_migratable(pss
->block
)) {
2389 error_report("block %s should not be migrated !", pss
->block
->idstr
);
2394 /* Check the pages is dirty and if it is send it */
2395 if (!migration_bitmap_clear_dirty(rs
, pss
->block
, pss
->page
)) {
2400 tmppages
= ram_save_target_page(rs
, pss
, last_stage
);
2406 if (pss
->block
->unsentmap
) {
2407 clear_bit(pss
->page
, pss
->block
->unsentmap
);
2411 } while ((pss
->page
& (pagesize_bits
- 1)) &&
2412 offset_in_ramblock(pss
->block
, pss
->page
<< TARGET_PAGE_BITS
));
2414 /* The offset we leave with is the last one we looked at */
2420 * ram_find_and_save_block: finds a dirty page and sends it to f
2422 * Called within an RCU critical section.
2424 * Returns the number of pages written where zero means no dirty pages,
2425 * or negative on error
2427 * @rs: current RAM state
2428 * @last_stage: if we are at the completion stage
2430 * On systems where host-page-size > target-page-size it will send all the
2431 * pages in a host page that are dirty.
2434 static int ram_find_and_save_block(RAMState
*rs
, bool last_stage
)
2436 PageSearchStatus pss
;
2440 /* No dirty page as there is zero RAM */
2441 if (!ram_bytes_total()) {
2445 pss
.block
= rs
->last_seen_block
;
2446 pss
.page
= rs
->last_page
;
2447 pss
.complete_round
= false;
2450 pss
.block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
2455 found
= get_queued_page(rs
, &pss
);
2458 /* priority queue empty, so just search for something dirty */
2459 found
= find_dirty_block(rs
, &pss
, &again
);
2463 pages
= ram_save_host_page(rs
, &pss
, last_stage
);
2465 } while (!pages
&& again
);
2467 rs
->last_seen_block
= pss
.block
;
2468 rs
->last_page
= pss
.page
;
2473 void acct_update_position(QEMUFile
*f
, size_t size
, bool zero
)
2475 uint64_t pages
= size
/ TARGET_PAGE_SIZE
;
2478 ram_counters
.duplicate
+= pages
;
2480 ram_counters
.normal
+= pages
;
2481 ram_counters
.transferred
+= size
;
2482 qemu_update_position(f
, size
);
2486 uint64_t ram_bytes_total(void)
2492 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2493 total
+= block
->used_length
;
2499 static void xbzrle_load_setup(void)
2501 XBZRLE
.decoded_buf
= g_malloc(TARGET_PAGE_SIZE
);
2504 static void xbzrle_load_cleanup(void)
2506 g_free(XBZRLE
.decoded_buf
);
2507 XBZRLE
.decoded_buf
= NULL
;
2510 static void ram_state_cleanup(RAMState
**rsp
)
2513 migration_page_queue_free(*rsp
);
2514 qemu_mutex_destroy(&(*rsp
)->bitmap_mutex
);
2515 qemu_mutex_destroy(&(*rsp
)->src_page_req_mutex
);
2521 static void xbzrle_cleanup(void)
2523 XBZRLE_cache_lock();
2525 cache_fini(XBZRLE
.cache
);
2526 g_free(XBZRLE
.encoded_buf
);
2527 g_free(XBZRLE
.current_buf
);
2528 g_free(XBZRLE
.zero_target_page
);
2529 XBZRLE
.cache
= NULL
;
2530 XBZRLE
.encoded_buf
= NULL
;
2531 XBZRLE
.current_buf
= NULL
;
2532 XBZRLE
.zero_target_page
= NULL
;
2534 XBZRLE_cache_unlock();
2537 static void ram_save_cleanup(void *opaque
)
2539 RAMState
**rsp
= opaque
;
2542 /* caller have hold iothread lock or is in a bh, so there is
2543 * no writing race against this migration_bitmap
2545 memory_global_dirty_log_stop();
2547 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2548 g_free(block
->bmap
);
2550 g_free(block
->unsentmap
);
2551 block
->unsentmap
= NULL
;
2555 compress_threads_save_cleanup();
2556 ram_state_cleanup(rsp
);
2559 static void ram_state_reset(RAMState
*rs
)
2561 rs
->last_seen_block
= NULL
;
2562 rs
->last_sent_block
= NULL
;
2564 rs
->last_version
= ram_list
.version
;
2565 rs
->ram_bulk_stage
= true;
2568 #define MAX_WAIT 50 /* ms, half buffered_file limit */
2571 * 'expected' is the value you expect the bitmap mostly to be full
2572 * of; it won't bother printing lines that are all this value.
2573 * If 'todump' is null the migration bitmap is dumped.
2575 void ram_debug_dump_bitmap(unsigned long *todump
, bool expected
,
2576 unsigned long pages
)
2579 int64_t linelen
= 128;
2582 for (cur
= 0; cur
< pages
; cur
+= linelen
) {
2586 * Last line; catch the case where the line length
2587 * is longer than remaining ram
2589 if (cur
+ linelen
> pages
) {
2590 linelen
= pages
- cur
;
2592 for (curb
= 0; curb
< linelen
; curb
++) {
2593 bool thisbit
= test_bit(cur
+ curb
, todump
);
2594 linebuf
[curb
] = thisbit
? '1' : '.';
2595 found
= found
|| (thisbit
!= expected
);
2598 linebuf
[curb
] = '\0';
2599 fprintf(stderr
, "0x%08" PRIx64
" : %s\n", cur
, linebuf
);
2604 /* **** functions for postcopy ***** */
2606 void ram_postcopy_migrated_memory_release(MigrationState
*ms
)
2608 struct RAMBlock
*block
;
2610 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2611 unsigned long *bitmap
= block
->bmap
;
2612 unsigned long range
= block
->used_length
>> TARGET_PAGE_BITS
;
2613 unsigned long run_start
= find_next_zero_bit(bitmap
, range
, 0);
2615 while (run_start
< range
) {
2616 unsigned long run_end
= find_next_bit(bitmap
, range
, run_start
+ 1);
2617 ram_discard_range(block
->idstr
, run_start
<< TARGET_PAGE_BITS
,
2618 (run_end
- run_start
) << TARGET_PAGE_BITS
);
2619 run_start
= find_next_zero_bit(bitmap
, range
, run_end
+ 1);
2625 * postcopy_send_discard_bm_ram: discard a RAMBlock
2627 * Returns zero on success
2629 * Callback from postcopy_each_ram_send_discard for each RAMBlock
2630 * Note: At this point the 'unsentmap' is the processed bitmap combined
2631 * with the dirtymap; so a '1' means it's either dirty or unsent.
2633 * @ms: current migration state
2634 * @pds: state for postcopy
2635 * @start: RAMBlock starting page
2636 * @length: RAMBlock size
2638 static int postcopy_send_discard_bm_ram(MigrationState
*ms
,
2639 PostcopyDiscardState
*pds
,
2642 unsigned long end
= block
->used_length
>> TARGET_PAGE_BITS
;
2643 unsigned long current
;
2644 unsigned long *unsentmap
= block
->unsentmap
;
2646 for (current
= 0; current
< end
; ) {
2647 unsigned long one
= find_next_bit(unsentmap
, end
, current
);
2650 unsigned long zero
= find_next_zero_bit(unsentmap
, end
, one
+ 1);
2651 unsigned long discard_length
;
2654 discard_length
= end
- one
;
2656 discard_length
= zero
- one
;
2658 if (discard_length
) {
2659 postcopy_discard_send_range(ms
, pds
, one
, discard_length
);
2661 current
= one
+ discard_length
;
2671 * postcopy_each_ram_send_discard: discard all RAMBlocks
2673 * Returns 0 for success or negative for error
2675 * Utility for the outgoing postcopy code.
2676 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2677 * passing it bitmap indexes and name.
2678 * (qemu_ram_foreach_block ends up passing unscaled lengths
2679 * which would mean postcopy code would have to deal with target page)
2681 * @ms: current migration state
2683 static int postcopy_each_ram_send_discard(MigrationState
*ms
)
2685 struct RAMBlock
*block
;
2688 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2689 PostcopyDiscardState
*pds
=
2690 postcopy_discard_send_init(ms
, block
->idstr
);
2693 * Postcopy sends chunks of bitmap over the wire, but it
2694 * just needs indexes at this point, avoids it having
2695 * target page specific code.
2697 ret
= postcopy_send_discard_bm_ram(ms
, pds
, block
);
2698 postcopy_discard_send_finish(ms
, pds
);
2708 * postcopy_chunk_hostpages_pass: canocalize bitmap in hostpages
2710 * Helper for postcopy_chunk_hostpages; it's called twice to
2711 * canonicalize the two bitmaps, that are similar, but one is
2714 * Postcopy requires that all target pages in a hostpage are dirty or
2715 * clean, not a mix. This function canonicalizes the bitmaps.
2717 * @ms: current migration state
2718 * @unsent_pass: if true we need to canonicalize partially unsent host pages
2719 * otherwise we need to canonicalize partially dirty host pages
2720 * @block: block that contains the page we want to canonicalize
2721 * @pds: state for postcopy
2723 static void postcopy_chunk_hostpages_pass(MigrationState
*ms
, bool unsent_pass
,
2725 PostcopyDiscardState
*pds
)
2727 RAMState
*rs
= ram_state
;
2728 unsigned long *bitmap
= block
->bmap
;
2729 unsigned long *unsentmap
= block
->unsentmap
;
2730 unsigned int host_ratio
= block
->page_size
/ TARGET_PAGE_SIZE
;
2731 unsigned long pages
= block
->used_length
>> TARGET_PAGE_BITS
;
2732 unsigned long run_start
;
2734 if (block
->page_size
== TARGET_PAGE_SIZE
) {
2735 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2740 /* Find a sent page */
2741 run_start
= find_next_zero_bit(unsentmap
, pages
, 0);
2743 /* Find a dirty page */
2744 run_start
= find_next_bit(bitmap
, pages
, 0);
2747 while (run_start
< pages
) {
2748 bool do_fixup
= false;
2749 unsigned long fixup_start_addr
;
2750 unsigned long host_offset
;
2753 * If the start of this run of pages is in the middle of a host
2754 * page, then we need to fixup this host page.
2756 host_offset
= run_start
% host_ratio
;
2759 run_start
-= host_offset
;
2760 fixup_start_addr
= run_start
;
2761 /* For the next pass */
2762 run_start
= run_start
+ host_ratio
;
2764 /* Find the end of this run */
2765 unsigned long run_end
;
2767 run_end
= find_next_bit(unsentmap
, pages
, run_start
+ 1);
2769 run_end
= find_next_zero_bit(bitmap
, pages
, run_start
+ 1);
2772 * If the end isn't at the start of a host page, then the
2773 * run doesn't finish at the end of a host page
2774 * and we need to discard.
2776 host_offset
= run_end
% host_ratio
;
2779 fixup_start_addr
= run_end
- host_offset
;
2781 * This host page has gone, the next loop iteration starts
2782 * from after the fixup
2784 run_start
= fixup_start_addr
+ host_ratio
;
2787 * No discards on this iteration, next loop starts from
2788 * next sent/dirty page
2790 run_start
= run_end
+ 1;
2797 /* Tell the destination to discard this page */
2798 if (unsent_pass
|| !test_bit(fixup_start_addr
, unsentmap
)) {
2799 /* For the unsent_pass we:
2800 * discard partially sent pages
2801 * For the !unsent_pass (dirty) we:
2802 * discard partially dirty pages that were sent
2803 * (any partially sent pages were already discarded
2804 * by the previous unsent_pass)
2806 postcopy_discard_send_range(ms
, pds
, fixup_start_addr
,
2810 /* Clean up the bitmap */
2811 for (page
= fixup_start_addr
;
2812 page
< fixup_start_addr
+ host_ratio
; page
++) {
2813 /* All pages in this host page are now not sent */
2814 set_bit(page
, unsentmap
);
2817 * Remark them as dirty, updating the count for any pages
2818 * that weren't previously dirty.
2820 rs
->migration_dirty_pages
+= !test_and_set_bit(page
, bitmap
);
2825 /* Find the next sent page for the next iteration */
2826 run_start
= find_next_zero_bit(unsentmap
, pages
, run_start
);
2828 /* Find the next dirty page for the next iteration */
2829 run_start
= find_next_bit(bitmap
, pages
, run_start
);
2835 * postcopy_chuck_hostpages: discrad any partially sent host page
2837 * Utility for the outgoing postcopy code.
2839 * Discard any partially sent host-page size chunks, mark any partially
2840 * dirty host-page size chunks as all dirty. In this case the host-page
2841 * is the host-page for the particular RAMBlock, i.e. it might be a huge page
2843 * Returns zero on success
2845 * @ms: current migration state
2846 * @block: block we want to work with
2848 static int postcopy_chunk_hostpages(MigrationState
*ms
, RAMBlock
*block
)
2850 PostcopyDiscardState
*pds
=
2851 postcopy_discard_send_init(ms
, block
->idstr
);
2853 /* First pass: Discard all partially sent host pages */
2854 postcopy_chunk_hostpages_pass(ms
, true, block
, pds
);
2856 * Second pass: Ensure that all partially dirty host pages are made
2859 postcopy_chunk_hostpages_pass(ms
, false, block
, pds
);
2861 postcopy_discard_send_finish(ms
, pds
);
2866 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
2868 * Returns zero on success
2870 * Transmit the set of pages to be discarded after precopy to the target
2871 * these are pages that:
2872 * a) Have been previously transmitted but are now dirty again
2873 * b) Pages that have never been transmitted, this ensures that
2874 * any pages on the destination that have been mapped by background
2875 * tasks get discarded (transparent huge pages is the specific concern)
2876 * Hopefully this is pretty sparse
2878 * @ms: current migration state
2880 int ram_postcopy_send_discard_bitmap(MigrationState
*ms
)
2882 RAMState
*rs
= ram_state
;
2888 /* This should be our last sync, the src is now paused */
2889 migration_bitmap_sync(rs
);
2891 /* Easiest way to make sure we don't resume in the middle of a host-page */
2892 rs
->last_seen_block
= NULL
;
2893 rs
->last_sent_block
= NULL
;
2896 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2897 unsigned long pages
= block
->used_length
>> TARGET_PAGE_BITS
;
2898 unsigned long *bitmap
= block
->bmap
;
2899 unsigned long *unsentmap
= block
->unsentmap
;
2902 /* We don't have a safe way to resize the sentmap, so
2903 * if the bitmap was resized it will be NULL at this
2906 error_report("migration ram resized during precopy phase");
2910 /* Deal with TPS != HPS and huge pages */
2911 ret
= postcopy_chunk_hostpages(ms
, block
);
2918 * Update the unsentmap to be unsentmap = unsentmap | dirty
2920 bitmap_or(unsentmap
, unsentmap
, bitmap
, pages
);
2921 #ifdef DEBUG_POSTCOPY
2922 ram_debug_dump_bitmap(unsentmap
, true, pages
);
2925 trace_ram_postcopy_send_discard_bitmap();
2927 ret
= postcopy_each_ram_send_discard(ms
);
2934 * ram_discard_range: discard dirtied pages at the beginning of postcopy
2936 * Returns zero on success
2938 * @rbname: name of the RAMBlock of the request. NULL means the
2939 * same that last one.
2940 * @start: RAMBlock starting page
2941 * @length: RAMBlock size
2943 int ram_discard_range(const char *rbname
, uint64_t start
, size_t length
)
2947 trace_ram_discard_range(rbname
, start
, length
);
2950 RAMBlock
*rb
= qemu_ram_block_by_name(rbname
);
2953 error_report("ram_discard_range: Failed to find block '%s'", rbname
);
2958 * On source VM, we don't need to update the received bitmap since
2959 * we don't even have one.
2961 if (rb
->receivedmap
) {
2962 bitmap_clear(rb
->receivedmap
, start
>> qemu_target_page_bits(),
2963 length
>> qemu_target_page_bits());
2966 ret
= ram_block_discard_range(rb
, start
, length
);
2975 * For every allocation, we will try not to crash the VM if the
2976 * allocation failed.
2978 static int xbzrle_init(void)
2980 Error
*local_err
= NULL
;
2982 if (!migrate_use_xbzrle()) {
2986 XBZRLE_cache_lock();
2988 XBZRLE
.zero_target_page
= g_try_malloc0(TARGET_PAGE_SIZE
);
2989 if (!XBZRLE
.zero_target_page
) {
2990 error_report("%s: Error allocating zero page", __func__
);
2994 XBZRLE
.cache
= cache_init(migrate_xbzrle_cache_size(),
2995 TARGET_PAGE_SIZE
, &local_err
);
2996 if (!XBZRLE
.cache
) {
2997 error_report_err(local_err
);
2998 goto free_zero_page
;
3001 XBZRLE
.encoded_buf
= g_try_malloc0(TARGET_PAGE_SIZE
);
3002 if (!XBZRLE
.encoded_buf
) {
3003 error_report("%s: Error allocating encoded_buf", __func__
);
3007 XBZRLE
.current_buf
= g_try_malloc(TARGET_PAGE_SIZE
);
3008 if (!XBZRLE
.current_buf
) {
3009 error_report("%s: Error allocating current_buf", __func__
);
3010 goto free_encoded_buf
;
3013 /* We are all good */
3014 XBZRLE_cache_unlock();
3018 g_free(XBZRLE
.encoded_buf
);
3019 XBZRLE
.encoded_buf
= NULL
;
3021 cache_fini(XBZRLE
.cache
);
3022 XBZRLE
.cache
= NULL
;
3024 g_free(XBZRLE
.zero_target_page
);
3025 XBZRLE
.zero_target_page
= NULL
;
3027 XBZRLE_cache_unlock();
3031 static int ram_state_init(RAMState
**rsp
)
3033 *rsp
= g_try_new0(RAMState
, 1);
3036 error_report("%s: Init ramstate fail", __func__
);
3040 qemu_mutex_init(&(*rsp
)->bitmap_mutex
);
3041 qemu_mutex_init(&(*rsp
)->src_page_req_mutex
);
3042 QSIMPLEQ_INIT(&(*rsp
)->src_page_requests
);
3045 * Count the total number of pages used by ram blocks not including any
3046 * gaps due to alignment or unplugs.
3048 (*rsp
)->migration_dirty_pages
= ram_bytes_total() >> TARGET_PAGE_BITS
;
3050 ram_state_reset(*rsp
);
3055 static void ram_list_init_bitmaps(void)
3058 unsigned long pages
;
3060 /* Skip setting bitmap if there is no RAM */
3061 if (ram_bytes_total()) {
3062 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
3063 pages
= block
->max_length
>> TARGET_PAGE_BITS
;
3064 block
->bmap
= bitmap_new(pages
);
3065 bitmap_set(block
->bmap
, 0, pages
);
3066 if (migrate_postcopy_ram()) {
3067 block
->unsentmap
= bitmap_new(pages
);
3068 bitmap_set(block
->unsentmap
, 0, pages
);
3074 static void ram_init_bitmaps(RAMState
*rs
)
3076 /* For memory_global_dirty_log_start below. */
3077 qemu_mutex_lock_iothread();
3078 qemu_mutex_lock_ramlist();
3081 ram_list_init_bitmaps();
3082 memory_global_dirty_log_start();
3083 migration_bitmap_sync(rs
);
3086 qemu_mutex_unlock_ramlist();
3087 qemu_mutex_unlock_iothread();
3090 static int ram_init_all(RAMState
**rsp
)
3092 if (ram_state_init(rsp
)) {
3096 if (xbzrle_init()) {
3097 ram_state_cleanup(rsp
);
3101 ram_init_bitmaps(*rsp
);
3106 static void ram_state_resume_prepare(RAMState
*rs
, QEMUFile
*out
)
3112 * Postcopy is not using xbzrle/compression, so no need for that.
3113 * Also, since source are already halted, we don't need to care
3114 * about dirty page logging as well.
3117 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
3118 pages
+= bitmap_count_one(block
->bmap
,
3119 block
->used_length
>> TARGET_PAGE_BITS
);
3122 /* This may not be aligned with current bitmaps. Recalculate. */
3123 rs
->migration_dirty_pages
= pages
;
3125 rs
->last_seen_block
= NULL
;
3126 rs
->last_sent_block
= NULL
;
3128 rs
->last_version
= ram_list
.version
;
3130 * Disable the bulk stage, otherwise we'll resend the whole RAM no
3131 * matter what we have sent.
3133 rs
->ram_bulk_stage
= false;
3135 /* Update RAMState cache of output QEMUFile */
3138 trace_ram_state_resume_prepare(pages
);
3142 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
3143 * long-running RCU critical section. When rcu-reclaims in the code
3144 * start to become numerous it will be necessary to reduce the
3145 * granularity of these critical sections.
3149 * ram_save_setup: Setup RAM for migration
3151 * Returns zero to indicate success and negative for error
3153 * @f: QEMUFile where to send the data
3154 * @opaque: RAMState pointer
3156 static int ram_save_setup(QEMUFile
*f
, void *opaque
)
3158 RAMState
**rsp
= opaque
;
3161 if (compress_threads_save_setup()) {
3165 /* migration has already setup the bitmap, reuse it. */
3166 if (!migration_in_colo_state()) {
3167 if (ram_init_all(rsp
) != 0) {
3168 compress_threads_save_cleanup();
3176 qemu_put_be64(f
, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE
);
3178 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
3179 qemu_put_byte(f
, strlen(block
->idstr
));
3180 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, strlen(block
->idstr
));
3181 qemu_put_be64(f
, block
->used_length
);
3182 if (migrate_postcopy_ram() && block
->page_size
!= qemu_host_page_size
) {
3183 qemu_put_be64(f
, block
->page_size
);
3189 ram_control_before_iterate(f
, RAM_CONTROL_SETUP
);
3190 ram_control_after_iterate(f
, RAM_CONTROL_SETUP
);
3192 multifd_send_sync_main();
3193 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3200 * ram_save_iterate: iterative stage for migration
3202 * Returns zero to indicate success and negative for error
3204 * @f: QEMUFile where to send the data
3205 * @opaque: RAMState pointer
3207 static int ram_save_iterate(QEMUFile
*f
, void *opaque
)
3209 RAMState
**temp
= opaque
;
3210 RAMState
*rs
= *temp
;
3216 if (blk_mig_bulk_active()) {
3217 /* Avoid transferring ram during bulk phase of block migration as
3218 * the bulk phase will usually take a long time and transferring
3219 * ram updates during that time is pointless. */
3224 if (ram_list
.version
!= rs
->last_version
) {
3225 ram_state_reset(rs
);
3228 /* Read version before ram_list.blocks */
3231 ram_control_before_iterate(f
, RAM_CONTROL_ROUND
);
3233 t0
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
3235 while ((ret
= qemu_file_rate_limit(f
)) == 0 ||
3236 !QSIMPLEQ_EMPTY(&rs
->src_page_requests
)) {
3239 if (qemu_file_get_error(f
)) {
3243 pages
= ram_find_and_save_block(rs
, false);
3244 /* no more pages to sent */
3251 qemu_file_set_error(f
, pages
);
3255 rs
->target_page_count
+= pages
;
3257 /* we want to check in the 1st loop, just in case it was the 1st time
3258 and we had to sync the dirty bitmap.
3259 qemu_get_clock_ns() is a bit expensive, so we only check each some
3262 if ((i
& 63) == 0) {
3263 uint64_t t1
= (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - t0
) / 1000000;
3264 if (t1
> MAX_WAIT
) {
3265 trace_ram_save_iterate_big_wait(t1
, i
);
3274 * Must occur before EOS (or any QEMUFile operation)
3275 * because of RDMA protocol.
3277 ram_control_after_iterate(f
, RAM_CONTROL_ROUND
);
3279 multifd_send_sync_main();
3281 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3283 ram_counters
.transferred
+= 8;
3285 ret
= qemu_file_get_error(f
);
3294 * ram_save_complete: function called to send the remaining amount of ram
3296 * Returns zero to indicate success or negative on error
3298 * Called with iothread lock
3300 * @f: QEMUFile where to send the data
3301 * @opaque: RAMState pointer
3303 static int ram_save_complete(QEMUFile
*f
, void *opaque
)
3305 RAMState
**temp
= opaque
;
3306 RAMState
*rs
= *temp
;
3311 if (!migration_in_postcopy()) {
3312 migration_bitmap_sync(rs
);
3315 ram_control_before_iterate(f
, RAM_CONTROL_FINISH
);
3317 /* try transferring iterative blocks of memory */
3319 /* flush all remaining blocks regardless of rate limiting */
3323 pages
= ram_find_and_save_block(rs
, !migration_in_colo_state());
3324 /* no more blocks to sent */
3334 flush_compressed_data(rs
);
3335 ram_control_after_iterate(f
, RAM_CONTROL_FINISH
);
3339 multifd_send_sync_main();
3340 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3346 static void ram_save_pending(QEMUFile
*f
, void *opaque
, uint64_t max_size
,
3347 uint64_t *res_precopy_only
,
3348 uint64_t *res_compatible
,
3349 uint64_t *res_postcopy_only
)
3351 RAMState
**temp
= opaque
;
3352 RAMState
*rs
= *temp
;
3353 uint64_t remaining_size
;
3355 remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
3357 if (!migration_in_postcopy() &&
3358 remaining_size
< max_size
) {
3359 qemu_mutex_lock_iothread();
3361 migration_bitmap_sync(rs
);
3363 qemu_mutex_unlock_iothread();
3364 remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
3367 if (migrate_postcopy_ram()) {
3368 /* We can do postcopy, and all the data is postcopiable */
3369 *res_compatible
+= remaining_size
;
3371 *res_precopy_only
+= remaining_size
;
3375 static int load_xbzrle(QEMUFile
*f
, ram_addr_t addr
, void *host
)
3377 unsigned int xh_len
;
3379 uint8_t *loaded_data
;
3381 /* extract RLE header */
3382 xh_flags
= qemu_get_byte(f
);
3383 xh_len
= qemu_get_be16(f
);
3385 if (xh_flags
!= ENCODING_FLAG_XBZRLE
) {
3386 error_report("Failed to load XBZRLE page - wrong compression!");
3390 if (xh_len
> TARGET_PAGE_SIZE
) {
3391 error_report("Failed to load XBZRLE page - len overflow!");
3394 loaded_data
= XBZRLE
.decoded_buf
;
3395 /* load data and decode */
3396 /* it can change loaded_data to point to an internal buffer */
3397 qemu_get_buffer_in_place(f
, &loaded_data
, xh_len
);
3400 if (xbzrle_decode_buffer(loaded_data
, xh_len
, host
,
3401 TARGET_PAGE_SIZE
) == -1) {
3402 error_report("Failed to load XBZRLE page - decode error!");
3410 * ram_block_from_stream: read a RAMBlock id from the migration stream
3412 * Must be called from within a rcu critical section.
3414 * Returns a pointer from within the RCU-protected ram_list.
3416 * @f: QEMUFile where to read the data from
3417 * @flags: Page flags (mostly to see if it's a continuation of previous block)
3419 static inline RAMBlock
*ram_block_from_stream(QEMUFile
*f
, int flags
)
3421 static RAMBlock
*block
= NULL
;
3425 if (flags
& RAM_SAVE_FLAG_CONTINUE
) {
3427 error_report("Ack, bad migration stream!");
3433 len
= qemu_get_byte(f
);
3434 qemu_get_buffer(f
, (uint8_t *)id
, len
);
3437 block
= qemu_ram_block_by_name(id
);
3439 error_report("Can't find block %s", id
);
3443 if (!qemu_ram_is_migratable(block
)) {
3444 error_report("block %s should not be migrated !", id
);
3451 static inline void *host_from_ram_block_offset(RAMBlock
*block
,
3454 if (!offset_in_ramblock(block
, offset
)) {
3458 return block
->host
+ offset
;
3461 static inline void *colo_cache_from_block_offset(RAMBlock
*block
,
3464 if (!offset_in_ramblock(block
, offset
)) {
3467 if (!block
->colo_cache
) {
3468 error_report("%s: colo_cache is NULL in block :%s",
3469 __func__
, block
->idstr
);
3474 * During colo checkpoint, we need bitmap of these migrated pages.
3475 * It help us to decide which pages in ram cache should be flushed
3476 * into VM's RAM later.
3478 if (!test_and_set_bit(offset
>> TARGET_PAGE_BITS
, block
->bmap
)) {
3479 ram_state
->migration_dirty_pages
++;
3481 return block
->colo_cache
+ offset
;
3485 * ram_handle_compressed: handle the zero page case
3487 * If a page (or a whole RDMA chunk) has been
3488 * determined to be zero, then zap it.
3490 * @host: host address for the zero page
3491 * @ch: what the page is filled from. We only support zero
3492 * @size: size of the zero page
3494 void ram_handle_compressed(void *host
, uint8_t ch
, uint64_t size
)
3496 if (ch
!= 0 || !is_zero_range(host
, size
)) {
3497 memset(host
, ch
, size
);
3501 /* return the size after decompression, or negative value on error */
3503 qemu_uncompress_data(z_stream
*stream
, uint8_t *dest
, size_t dest_len
,
3504 const uint8_t *source
, size_t source_len
)
3508 err
= inflateReset(stream
);
3513 stream
->avail_in
= source_len
;
3514 stream
->next_in
= (uint8_t *)source
;
3515 stream
->avail_out
= dest_len
;
3516 stream
->next_out
= dest
;
3518 err
= inflate(stream
, Z_NO_FLUSH
);
3519 if (err
!= Z_STREAM_END
) {
3523 return stream
->total_out
;
3526 static void *do_data_decompress(void *opaque
)
3528 DecompressParam
*param
= opaque
;
3529 unsigned long pagesize
;
3533 qemu_mutex_lock(¶m
->mutex
);
3534 while (!param
->quit
) {
3539 qemu_mutex_unlock(¶m
->mutex
);
3541 pagesize
= TARGET_PAGE_SIZE
;
3543 ret
= qemu_uncompress_data(¶m
->stream
, des
, pagesize
,
3544 param
->compbuf
, len
);
3545 if (ret
< 0 && migrate_get_current()->decompress_error_check
) {
3546 error_report("decompress data failed");
3547 qemu_file_set_error(decomp_file
, ret
);
3550 qemu_mutex_lock(&decomp_done_lock
);
3552 qemu_cond_signal(&decomp_done_cond
);
3553 qemu_mutex_unlock(&decomp_done_lock
);
3555 qemu_mutex_lock(¶m
->mutex
);
3557 qemu_cond_wait(¶m
->cond
, ¶m
->mutex
);
3560 qemu_mutex_unlock(¶m
->mutex
);
3565 static int wait_for_decompress_done(void)
3567 int idx
, thread_count
;
3569 if (!migrate_use_compression()) {
3573 thread_count
= migrate_decompress_threads();
3574 qemu_mutex_lock(&decomp_done_lock
);
3575 for (idx
= 0; idx
< thread_count
; idx
++) {
3576 while (!decomp_param
[idx
].done
) {
3577 qemu_cond_wait(&decomp_done_cond
, &decomp_done_lock
);
3580 qemu_mutex_unlock(&decomp_done_lock
);
3581 return qemu_file_get_error(decomp_file
);
3584 static void compress_threads_load_cleanup(void)
3586 int i
, thread_count
;
3588 if (!migrate_use_compression()) {
3591 thread_count
= migrate_decompress_threads();
3592 for (i
= 0; i
< thread_count
; i
++) {
3594 * we use it as a indicator which shows if the thread is
3595 * properly init'd or not
3597 if (!decomp_param
[i
].compbuf
) {
3601 qemu_mutex_lock(&decomp_param
[i
].mutex
);
3602 decomp_param
[i
].quit
= true;
3603 qemu_cond_signal(&decomp_param
[i
].cond
);
3604 qemu_mutex_unlock(&decomp_param
[i
].mutex
);
3606 for (i
= 0; i
< thread_count
; i
++) {
3607 if (!decomp_param
[i
].compbuf
) {
3611 qemu_thread_join(decompress_threads
+ i
);
3612 qemu_mutex_destroy(&decomp_param
[i
].mutex
);
3613 qemu_cond_destroy(&decomp_param
[i
].cond
);
3614 inflateEnd(&decomp_param
[i
].stream
);
3615 g_free(decomp_param
[i
].compbuf
);
3616 decomp_param
[i
].compbuf
= NULL
;
3618 g_free(decompress_threads
);
3619 g_free(decomp_param
);
3620 decompress_threads
= NULL
;
3621 decomp_param
= NULL
;
3625 static int compress_threads_load_setup(QEMUFile
*f
)
3627 int i
, thread_count
;
3629 if (!migrate_use_compression()) {
3633 thread_count
= migrate_decompress_threads();
3634 decompress_threads
= g_new0(QemuThread
, thread_count
);
3635 decomp_param
= g_new0(DecompressParam
, thread_count
);
3636 qemu_mutex_init(&decomp_done_lock
);
3637 qemu_cond_init(&decomp_done_cond
);
3639 for (i
= 0; i
< thread_count
; i
++) {
3640 if (inflateInit(&decomp_param
[i
].stream
) != Z_OK
) {
3644 decomp_param
[i
].compbuf
= g_malloc0(compressBound(TARGET_PAGE_SIZE
));
3645 qemu_mutex_init(&decomp_param
[i
].mutex
);
3646 qemu_cond_init(&decomp_param
[i
].cond
);
3647 decomp_param
[i
].done
= true;
3648 decomp_param
[i
].quit
= false;
3649 qemu_thread_create(decompress_threads
+ i
, "decompress",
3650 do_data_decompress
, decomp_param
+ i
,
3651 QEMU_THREAD_JOINABLE
);
3655 compress_threads_load_cleanup();
3659 static void decompress_data_with_multi_threads(QEMUFile
*f
,
3660 void *host
, int len
)
3662 int idx
, thread_count
;
3664 thread_count
= migrate_decompress_threads();
3665 qemu_mutex_lock(&decomp_done_lock
);
3667 for (idx
= 0; idx
< thread_count
; idx
++) {
3668 if (decomp_param
[idx
].done
) {
3669 decomp_param
[idx
].done
= false;
3670 qemu_mutex_lock(&decomp_param
[idx
].mutex
);
3671 qemu_get_buffer(f
, decomp_param
[idx
].compbuf
, len
);
3672 decomp_param
[idx
].des
= host
;
3673 decomp_param
[idx
].len
= len
;
3674 qemu_cond_signal(&decomp_param
[idx
].cond
);
3675 qemu_mutex_unlock(&decomp_param
[idx
].mutex
);
3679 if (idx
< thread_count
) {
3682 qemu_cond_wait(&decomp_done_cond
, &decomp_done_lock
);
3685 qemu_mutex_unlock(&decomp_done_lock
);
3689 * colo cache: this is for secondary VM, we cache the whole
3690 * memory of the secondary VM, it is need to hold the global lock
3691 * to call this helper.
3693 int colo_init_ram_cache(void)
3698 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
3699 block
->colo_cache
= qemu_anon_ram_alloc(block
->used_length
,
3702 if (!block
->colo_cache
) {
3703 error_report("%s: Can't alloc memory for COLO cache of block %s,"
3704 "size 0x" RAM_ADDR_FMT
, __func__
, block
->idstr
,
3705 block
->used_length
);
3708 memcpy(block
->colo_cache
, block
->host
, block
->used_length
);
3712 * Record the dirty pages that sent by PVM, we use this dirty bitmap together
3713 * with to decide which page in cache should be flushed into SVM's RAM. Here
3714 * we use the same name 'ram_bitmap' as for migration.
3716 if (ram_bytes_total()) {
3719 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
3720 unsigned long pages
= block
->max_length
>> TARGET_PAGE_BITS
;
3722 block
->bmap
= bitmap_new(pages
);
3723 bitmap_set(block
->bmap
, 0, pages
);
3726 ram_state
= g_new0(RAMState
, 1);
3727 ram_state
->migration_dirty_pages
= 0;
3728 memory_global_dirty_log_start();
3734 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
3735 if (block
->colo_cache
) {
3736 qemu_anon_ram_free(block
->colo_cache
, block
->used_length
);
3737 block
->colo_cache
= NULL
;
3745 /* It is need to hold the global lock to call this helper */
3746 void colo_release_ram_cache(void)
3750 memory_global_dirty_log_stop();
3751 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
3752 g_free(block
->bmap
);
3758 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
3759 if (block
->colo_cache
) {
3760 qemu_anon_ram_free(block
->colo_cache
, block
->used_length
);
3761 block
->colo_cache
= NULL
;
3771 * ram_load_setup: Setup RAM for migration incoming side
3773 * Returns zero to indicate success and negative for error
3775 * @f: QEMUFile where to receive the data
3776 * @opaque: RAMState pointer
3778 static int ram_load_setup(QEMUFile
*f
, void *opaque
)
3780 if (compress_threads_load_setup(f
)) {
3784 xbzrle_load_setup();
3785 ramblock_recv_map_init();
3790 static int ram_load_cleanup(void *opaque
)
3794 RAMBLOCK_FOREACH_MIGRATABLE(rb
) {
3795 if (ramblock_is_pmem(rb
)) {
3796 pmem_persist(rb
->host
, rb
->used_length
);
3800 xbzrle_load_cleanup();
3801 compress_threads_load_cleanup();
3803 RAMBLOCK_FOREACH_MIGRATABLE(rb
) {
3804 g_free(rb
->receivedmap
);
3805 rb
->receivedmap
= NULL
;
3812 * ram_postcopy_incoming_init: allocate postcopy data structures
3814 * Returns 0 for success and negative if there was one error
3816 * @mis: current migration incoming state
3818 * Allocate data structures etc needed by incoming migration with
3819 * postcopy-ram. postcopy-ram's similarly names
3820 * postcopy_ram_incoming_init does the work.
3822 int ram_postcopy_incoming_init(MigrationIncomingState
*mis
)
3824 return postcopy_ram_incoming_init(mis
);
3828 * ram_load_postcopy: load a page in postcopy case
3830 * Returns 0 for success or -errno in case of error
3832 * Called in postcopy mode by ram_load().
3833 * rcu_read_lock is taken prior to this being called.
3835 * @f: QEMUFile where to send the data
3837 static int ram_load_postcopy(QEMUFile
*f
)
3839 int flags
= 0, ret
= 0;
3840 bool place_needed
= false;
3841 bool matches_target_page_size
= false;
3842 MigrationIncomingState
*mis
= migration_incoming_get_current();
3843 /* Temporary page that is later 'placed' */
3844 void *postcopy_host_page
= postcopy_get_tmp_page(mis
);
3845 void *last_host
= NULL
;
3846 bool all_zero
= false;
3848 while (!ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
3851 void *page_buffer
= NULL
;
3852 void *place_source
= NULL
;
3853 RAMBlock
*block
= NULL
;
3856 addr
= qemu_get_be64(f
);
3859 * If qemu file error, we should stop here, and then "addr"
3862 ret
= qemu_file_get_error(f
);
3867 flags
= addr
& ~TARGET_PAGE_MASK
;
3868 addr
&= TARGET_PAGE_MASK
;
3870 trace_ram_load_postcopy_loop((uint64_t)addr
, flags
);
3871 place_needed
= false;
3872 if (flags
& (RAM_SAVE_FLAG_ZERO
| RAM_SAVE_FLAG_PAGE
)) {
3873 block
= ram_block_from_stream(f
, flags
);
3875 host
= host_from_ram_block_offset(block
, addr
);
3877 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
3881 matches_target_page_size
= block
->page_size
== TARGET_PAGE_SIZE
;
3883 * Postcopy requires that we place whole host pages atomically;
3884 * these may be huge pages for RAMBlocks that are backed by
3886 * To make it atomic, the data is read into a temporary page
3887 * that's moved into place later.
3888 * The migration protocol uses, possibly smaller, target-pages
3889 * however the source ensures it always sends all the components
3890 * of a host page in order.
3892 page_buffer
= postcopy_host_page
+
3893 ((uintptr_t)host
& (block
->page_size
- 1));
3894 /* If all TP are zero then we can optimise the place */
3895 if (!((uintptr_t)host
& (block
->page_size
- 1))) {
3898 /* not the 1st TP within the HP */
3899 if (host
!= (last_host
+ TARGET_PAGE_SIZE
)) {
3900 error_report("Non-sequential target page %p/%p",
3909 * If it's the last part of a host page then we place the host
3912 place_needed
= (((uintptr_t)host
+ TARGET_PAGE_SIZE
) &
3913 (block
->page_size
- 1)) == 0;
3914 place_source
= postcopy_host_page
;
3918 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
3919 case RAM_SAVE_FLAG_ZERO
:
3920 ch
= qemu_get_byte(f
);
3921 memset(page_buffer
, ch
, TARGET_PAGE_SIZE
);
3927 case RAM_SAVE_FLAG_PAGE
:
3929 if (!matches_target_page_size
) {
3930 /* For huge pages, we always use temporary buffer */
3931 qemu_get_buffer(f
, page_buffer
, TARGET_PAGE_SIZE
);
3934 * For small pages that matches target page size, we
3935 * avoid the qemu_file copy. Instead we directly use
3936 * the buffer of QEMUFile to place the page. Note: we
3937 * cannot do any QEMUFile operation before using that
3938 * buffer to make sure the buffer is valid when
3941 qemu_get_buffer_in_place(f
, (uint8_t **)&place_source
,
3945 case RAM_SAVE_FLAG_EOS
:
3947 multifd_recv_sync_main();
3950 error_report("Unknown combination of migration flags: %#x"
3951 " (postcopy mode)", flags
);
3956 /* Detect for any possible file errors */
3957 if (!ret
&& qemu_file_get_error(f
)) {
3958 ret
= qemu_file_get_error(f
);
3961 if (!ret
&& place_needed
) {
3962 /* This gets called at the last target page in the host page */
3963 void *place_dest
= host
+ TARGET_PAGE_SIZE
- block
->page_size
;
3966 ret
= postcopy_place_page_zero(mis
, place_dest
,
3969 ret
= postcopy_place_page(mis
, place_dest
,
3970 place_source
, block
);
3978 static bool postcopy_is_advised(void)
3980 PostcopyState ps
= postcopy_state_get();
3981 return ps
>= POSTCOPY_INCOMING_ADVISE
&& ps
< POSTCOPY_INCOMING_END
;
3984 static bool postcopy_is_running(void)
3986 PostcopyState ps
= postcopy_state_get();
3987 return ps
>= POSTCOPY_INCOMING_LISTENING
&& ps
< POSTCOPY_INCOMING_END
;
3991 * Flush content of RAM cache into SVM's memory.
3992 * Only flush the pages that be dirtied by PVM or SVM or both.
3994 static void colo_flush_ram_cache(void)
3996 RAMBlock
*block
= NULL
;
3999 unsigned long offset
= 0;
4001 memory_global_dirty_log_sync();
4003 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
4004 migration_bitmap_sync_range(ram_state
, block
, 0, block
->used_length
);
4008 trace_colo_flush_ram_cache_begin(ram_state
->migration_dirty_pages
);
4010 block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
4013 offset
= migration_bitmap_find_dirty(ram_state
, block
, offset
);
4015 if (offset
<< TARGET_PAGE_BITS
>= block
->used_length
) {
4017 block
= QLIST_NEXT_RCU(block
, next
);
4019 migration_bitmap_clear_dirty(ram_state
, block
, offset
);
4020 dst_host
= block
->host
+ (offset
<< TARGET_PAGE_BITS
);
4021 src_host
= block
->colo_cache
+ (offset
<< TARGET_PAGE_BITS
);
4022 memcpy(dst_host
, src_host
, TARGET_PAGE_SIZE
);
4027 trace_colo_flush_ram_cache_end();
4030 static int ram_load(QEMUFile
*f
, void *opaque
, int version_id
)
4032 int flags
= 0, ret
= 0, invalid_flags
= 0;
4033 static uint64_t seq_iter
;
4036 * If system is running in postcopy mode, page inserts to host memory must
4039 bool postcopy_running
= postcopy_is_running();
4040 /* ADVISE is earlier, it shows the source has the postcopy capability on */
4041 bool postcopy_advised
= postcopy_is_advised();
4045 if (version_id
!= 4) {
4049 if (!migrate_use_compression()) {
4050 invalid_flags
|= RAM_SAVE_FLAG_COMPRESS_PAGE
;
4052 /* This RCU critical section can be very long running.
4053 * When RCU reclaims in the code start to become numerous,
4054 * it will be necessary to reduce the granularity of this
4059 if (postcopy_running
) {
4060 ret
= ram_load_postcopy(f
);
4063 while (!postcopy_running
&& !ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
4064 ram_addr_t addr
, total_ram_bytes
;
4068 addr
= qemu_get_be64(f
);
4069 flags
= addr
& ~TARGET_PAGE_MASK
;
4070 addr
&= TARGET_PAGE_MASK
;
4072 if (flags
& invalid_flags
) {
4073 if (flags
& invalid_flags
& RAM_SAVE_FLAG_COMPRESS_PAGE
) {
4074 error_report("Received an unexpected compressed page");
4081 if (flags
& (RAM_SAVE_FLAG_ZERO
| RAM_SAVE_FLAG_PAGE
|
4082 RAM_SAVE_FLAG_COMPRESS_PAGE
| RAM_SAVE_FLAG_XBZRLE
)) {
4083 RAMBlock
*block
= ram_block_from_stream(f
, flags
);
4086 * After going into COLO, we should load the Page into colo_cache.
4088 if (migration_incoming_in_colo_state()) {
4089 host
= colo_cache_from_block_offset(block
, addr
);
4091 host
= host_from_ram_block_offset(block
, addr
);
4094 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
4099 if (!migration_incoming_in_colo_state()) {
4100 ramblock_recv_bitmap_set(block
, host
);
4103 trace_ram_load_loop(block
->idstr
, (uint64_t)addr
, flags
, host
);
4106 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
4107 case RAM_SAVE_FLAG_MEM_SIZE
:
4108 /* Synchronize RAM block list */
4109 total_ram_bytes
= addr
;
4110 while (!ret
&& total_ram_bytes
) {
4115 len
= qemu_get_byte(f
);
4116 qemu_get_buffer(f
, (uint8_t *)id
, len
);
4118 length
= qemu_get_be64(f
);
4120 block
= qemu_ram_block_by_name(id
);
4121 if (block
&& !qemu_ram_is_migratable(block
)) {
4122 error_report("block %s should not be migrated !", id
);
4125 if (length
!= block
->used_length
) {
4126 Error
*local_err
= NULL
;
4128 ret
= qemu_ram_resize(block
, length
,
4131 error_report_err(local_err
);
4134 /* For postcopy we need to check hugepage sizes match */
4135 if (postcopy_advised
&&
4136 block
->page_size
!= qemu_host_page_size
) {
4137 uint64_t remote_page_size
= qemu_get_be64(f
);
4138 if (remote_page_size
!= block
->page_size
) {
4139 error_report("Mismatched RAM page size %s "
4140 "(local) %zd != %" PRId64
,
4141 id
, block
->page_size
,
4146 ram_control_load_hook(f
, RAM_CONTROL_BLOCK_REG
,
4149 error_report("Unknown ramblock \"%s\", cannot "
4150 "accept migration", id
);
4154 total_ram_bytes
-= length
;
4158 case RAM_SAVE_FLAG_ZERO
:
4159 ch
= qemu_get_byte(f
);
4160 ram_handle_compressed(host
, ch
, TARGET_PAGE_SIZE
);
4163 case RAM_SAVE_FLAG_PAGE
:
4164 qemu_get_buffer(f
, host
, TARGET_PAGE_SIZE
);
4167 case RAM_SAVE_FLAG_COMPRESS_PAGE
:
4168 len
= qemu_get_be32(f
);
4169 if (len
< 0 || len
> compressBound(TARGET_PAGE_SIZE
)) {
4170 error_report("Invalid compressed data length: %d", len
);
4174 decompress_data_with_multi_threads(f
, host
, len
);
4177 case RAM_SAVE_FLAG_XBZRLE
:
4178 if (load_xbzrle(f
, addr
, host
) < 0) {
4179 error_report("Failed to decompress XBZRLE page at "
4180 RAM_ADDR_FMT
, addr
);
4185 case RAM_SAVE_FLAG_EOS
:
4187 multifd_recv_sync_main();
4190 if (flags
& RAM_SAVE_FLAG_HOOK
) {
4191 ram_control_load_hook(f
, RAM_CONTROL_HOOK
, NULL
);
4193 error_report("Unknown combination of migration flags: %#x",
4199 ret
= qemu_file_get_error(f
);
4203 ret
|= wait_for_decompress_done();
4205 trace_ram_load_complete(ret
, seq_iter
);
4207 if (!ret
&& migration_incoming_in_colo_state()) {
4208 colo_flush_ram_cache();
4213 static bool ram_has_postcopy(void *opaque
)
4216 RAMBLOCK_FOREACH_MIGRATABLE(rb
) {
4217 if (ramblock_is_pmem(rb
)) {
4218 info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4219 "is not supported now!", rb
->idstr
, rb
->host
);
4224 return migrate_postcopy_ram();
4227 /* Sync all the dirty bitmap with destination VM. */
4228 static int ram_dirty_bitmap_sync_all(MigrationState
*s
, RAMState
*rs
)
4231 QEMUFile
*file
= s
->to_dst_file
;
4232 int ramblock_count
= 0;
4234 trace_ram_dirty_bitmap_sync_start();
4236 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
4237 qemu_savevm_send_recv_bitmap(file
, block
->idstr
);
4238 trace_ram_dirty_bitmap_request(block
->idstr
);
4242 trace_ram_dirty_bitmap_sync_wait();
4244 /* Wait until all the ramblocks' dirty bitmap synced */
4245 while (ramblock_count
--) {
4246 qemu_sem_wait(&s
->rp_state
.rp_sem
);
4249 trace_ram_dirty_bitmap_sync_complete();
4254 static void ram_dirty_bitmap_reload_notify(MigrationState
*s
)
4256 qemu_sem_post(&s
->rp_state
.rp_sem
);
4260 * Read the received bitmap, revert it as the initial dirty bitmap.
4261 * This is only used when the postcopy migration is paused but wants
4262 * to resume from a middle point.
4264 int ram_dirty_bitmap_reload(MigrationState
*s
, RAMBlock
*block
)
4267 QEMUFile
*file
= s
->rp_state
.from_dst_file
;
4268 unsigned long *le_bitmap
, nbits
= block
->used_length
>> TARGET_PAGE_BITS
;
4269 uint64_t local_size
= DIV_ROUND_UP(nbits
, 8);
4270 uint64_t size
, end_mark
;
4272 trace_ram_dirty_bitmap_reload_begin(block
->idstr
);
4274 if (s
->state
!= MIGRATION_STATUS_POSTCOPY_RECOVER
) {
4275 error_report("%s: incorrect state %s", __func__
,
4276 MigrationStatus_str(s
->state
));
4281 * Note: see comments in ramblock_recv_bitmap_send() on why we
4282 * need the endianess convertion, and the paddings.
4284 local_size
= ROUND_UP(local_size
, 8);
4287 le_bitmap
= bitmap_new(nbits
+ BITS_PER_LONG
);
4289 size
= qemu_get_be64(file
);
4291 /* The size of the bitmap should match with our ramblock */
4292 if (size
!= local_size
) {
4293 error_report("%s: ramblock '%s' bitmap size mismatch "
4294 "(0x%"PRIx64
" != 0x%"PRIx64
")", __func__
,
4295 block
->idstr
, size
, local_size
);
4300 size
= qemu_get_buffer(file
, (uint8_t *)le_bitmap
, local_size
);
4301 end_mark
= qemu_get_be64(file
);
4303 ret
= qemu_file_get_error(file
);
4304 if (ret
|| size
!= local_size
) {
4305 error_report("%s: read bitmap failed for ramblock '%s': %d"
4306 " (size 0x%"PRIx64
", got: 0x%"PRIx64
")",
4307 __func__
, block
->idstr
, ret
, local_size
, size
);
4312 if (end_mark
!= RAMBLOCK_RECV_BITMAP_ENDING
) {
4313 error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIu64
,
4314 __func__
, block
->idstr
, end_mark
);
4320 * Endianess convertion. We are during postcopy (though paused).
4321 * The dirty bitmap won't change. We can directly modify it.
4323 bitmap_from_le(block
->bmap
, le_bitmap
, nbits
);
4326 * What we received is "received bitmap". Revert it as the initial
4327 * dirty bitmap for this ramblock.
4329 bitmap_complement(block
->bmap
, block
->bmap
, nbits
);
4331 trace_ram_dirty_bitmap_reload_complete(block
->idstr
);
4334 * We succeeded to sync bitmap for current ramblock. If this is
4335 * the last one to sync, we need to notify the main send thread.
4337 ram_dirty_bitmap_reload_notify(s
);
4345 static int ram_resume_prepare(MigrationState
*s
, void *opaque
)
4347 RAMState
*rs
= *(RAMState
**)opaque
;
4350 ret
= ram_dirty_bitmap_sync_all(s
, rs
);
4355 ram_state_resume_prepare(rs
, s
->to_dst_file
);
4360 static SaveVMHandlers savevm_ram_handlers
= {
4361 .save_setup
= ram_save_setup
,
4362 .save_live_iterate
= ram_save_iterate
,
4363 .save_live_complete_postcopy
= ram_save_complete
,
4364 .save_live_complete_precopy
= ram_save_complete
,
4365 .has_postcopy
= ram_has_postcopy
,
4366 .save_live_pending
= ram_save_pending
,
4367 .load_state
= ram_load
,
4368 .save_cleanup
= ram_save_cleanup
,
4369 .load_setup
= ram_load_setup
,
4370 .load_cleanup
= ram_load_cleanup
,
4371 .resume_prepare
= ram_resume_prepare
,
4374 void ram_mig_init(void)
4376 qemu_mutex_init(&XBZRLE
.lock
);
4377 register_savevm_live(NULL
, "ram", 0, 4, &savevm_ram_handlers
, &ram_state
);