4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2011-2015 Red Hat Inc
8 * Juan Quintela <quintela@redhat.com>
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 #include "qemu/osdep.h"
32 #include "qemu/cutils.h"
33 #include "qemu/bitops.h"
34 #include "qemu/bitmap.h"
35 #include "qemu/main-loop.h"
36 #include "qemu/pmem.h"
39 #include "migration.h"
41 #include "migration/register.h"
42 #include "migration/misc.h"
43 #include "qemu-file.h"
44 #include "postcopy-ram.h"
45 #include "page_cache.h"
46 #include "qemu/error-report.h"
47 #include "qapi/error.h"
48 #include "qapi/qapi-events-migration.h"
49 #include "qapi/qmp/qerror.h"
51 #include "exec/ram_addr.h"
52 #include "exec/target_page.h"
53 #include "qemu/rcu_queue.h"
54 #include "migration/colo.h"
56 #include "sysemu/sysemu.h"
57 #include "qemu/uuid.h"
61 /***********************************************************/
62 /* ram save/restore */
64 /* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
65 * worked for pages that where filled with the same char. We switched
66 * it to only search for the zero value. And to avoid confusion with
67 * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
70 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
71 #define RAM_SAVE_FLAG_ZERO 0x02
72 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
73 #define RAM_SAVE_FLAG_PAGE 0x08
74 #define RAM_SAVE_FLAG_EOS 0x10
75 #define RAM_SAVE_FLAG_CONTINUE 0x20
76 #define RAM_SAVE_FLAG_XBZRLE 0x40
77 /* 0x80 is reserved in migration.h start with 0x100 next */
78 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
80 static inline bool is_zero_range(uint8_t *p
, uint64_t size
)
82 return buffer_is_zero(p
, size
);
85 XBZRLECacheStats xbzrle_counters
;
87 /* struct contains XBZRLE cache and a static page
88 used by the compression */
90 /* buffer used for XBZRLE encoding */
92 /* buffer for storing page content */
94 /* Cache for XBZRLE, Protected by lock. */
97 /* it will store a page full of zeros */
98 uint8_t *zero_target_page
;
99 /* buffer used for XBZRLE decoding */
100 uint8_t *decoded_buf
;
103 static void XBZRLE_cache_lock(void)
105 if (migrate_use_xbzrle())
106 qemu_mutex_lock(&XBZRLE
.lock
);
109 static void XBZRLE_cache_unlock(void)
111 if (migrate_use_xbzrle())
112 qemu_mutex_unlock(&XBZRLE
.lock
);
116 * xbzrle_cache_resize: resize the xbzrle cache
118 * This function is called from qmp_migrate_set_cache_size in main
119 * thread, possibly while a migration is in progress. A running
120 * migration may be using the cache and might finish during this call,
121 * hence changes to the cache are protected by XBZRLE.lock().
123 * Returns 0 for success or -1 for error
125 * @new_size: new cache size
126 * @errp: set *errp if the check failed, with reason
128 int xbzrle_cache_resize(int64_t new_size
, Error
**errp
)
130 PageCache
*new_cache
;
133 /* Check for truncation */
134 if (new_size
!= (size_t)new_size
) {
135 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, "cache size",
136 "exceeding address space");
140 if (new_size
== migrate_xbzrle_cache_size()) {
147 if (XBZRLE
.cache
!= NULL
) {
148 new_cache
= cache_init(new_size
, TARGET_PAGE_SIZE
, errp
);
154 cache_fini(XBZRLE
.cache
);
155 XBZRLE
.cache
= new_cache
;
158 XBZRLE_cache_unlock();
162 /* Should be holding either ram_list.mutex, or the RCU lock. */
163 #define RAMBLOCK_FOREACH_MIGRATABLE(block) \
164 INTERNAL_RAMBLOCK_FOREACH(block) \
165 if (!qemu_ram_is_migratable(block)) {} else
167 #undef RAMBLOCK_FOREACH
169 static void ramblock_recv_map_init(void)
173 RAMBLOCK_FOREACH_MIGRATABLE(rb
) {
174 assert(!rb
->receivedmap
);
175 rb
->receivedmap
= bitmap_new(rb
->max_length
>> qemu_target_page_bits());
179 int ramblock_recv_bitmap_test(RAMBlock
*rb
, void *host_addr
)
181 return test_bit(ramblock_recv_bitmap_offset(host_addr
, rb
),
185 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock
*rb
, uint64_t byte_offset
)
187 return test_bit(byte_offset
>> TARGET_PAGE_BITS
, rb
->receivedmap
);
190 void ramblock_recv_bitmap_set(RAMBlock
*rb
, void *host_addr
)
192 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr
, rb
), rb
->receivedmap
);
195 void ramblock_recv_bitmap_set_range(RAMBlock
*rb
, void *host_addr
,
198 bitmap_set_atomic(rb
->receivedmap
,
199 ramblock_recv_bitmap_offset(host_addr
, rb
),
203 #define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
206 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
208 * Returns >0 if success with sent bytes, or <0 if error.
210 int64_t ramblock_recv_bitmap_send(QEMUFile
*file
,
211 const char *block_name
)
213 RAMBlock
*block
= qemu_ram_block_by_name(block_name
);
214 unsigned long *le_bitmap
, nbits
;
218 error_report("%s: invalid block name: %s", __func__
, block_name
);
222 nbits
= block
->used_length
>> TARGET_PAGE_BITS
;
225 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
226 * machines we may need 4 more bytes for padding (see below
227 * comment). So extend it a bit before hand.
229 le_bitmap
= bitmap_new(nbits
+ BITS_PER_LONG
);
232 * Always use little endian when sending the bitmap. This is
233 * required that when source and destination VMs are not using the
234 * same endianess. (Note: big endian won't work.)
236 bitmap_to_le(le_bitmap
, block
->receivedmap
, nbits
);
238 /* Size of the bitmap, in bytes */
239 size
= DIV_ROUND_UP(nbits
, 8);
242 * size is always aligned to 8 bytes for 64bit machines, but it
243 * may not be true for 32bit machines. We need this padding to
244 * make sure the migration can survive even between 32bit and
247 size
= ROUND_UP(size
, 8);
249 qemu_put_be64(file
, size
);
250 qemu_put_buffer(file
, (const uint8_t *)le_bitmap
, size
);
252 * Mark as an end, in case the middle part is screwed up due to
253 * some "misterious" reason.
255 qemu_put_be64(file
, RAMBLOCK_RECV_BITMAP_ENDING
);
260 if (qemu_file_get_error(file
)) {
261 return qemu_file_get_error(file
);
264 return size
+ sizeof(size
);
268 * An outstanding page request, on the source, having been received
271 struct RAMSrcPageRequest
{
276 QSIMPLEQ_ENTRY(RAMSrcPageRequest
) next_req
;
279 /* State of RAM for migration */
281 /* QEMUFile used for this migration */
283 /* Last block that we have visited searching for dirty pages */
284 RAMBlock
*last_seen_block
;
285 /* Last block from where we have sent data */
286 RAMBlock
*last_sent_block
;
287 /* Last dirty target page we have sent */
288 ram_addr_t last_page
;
289 /* last ram version we have seen */
290 uint32_t last_version
;
291 /* We are in the first round */
293 /* How many times we have dirty too many pages */
294 int dirty_rate_high_cnt
;
295 /* these variables are used for bitmap sync */
296 /* last time we did a full bitmap_sync */
297 int64_t time_last_bitmap_sync
;
298 /* bytes transferred at start_time */
299 uint64_t bytes_xfer_prev
;
300 /* number of dirty pages since start_time */
301 uint64_t num_dirty_pages_period
;
302 /* xbzrle misses since the beginning of the period */
303 uint64_t xbzrle_cache_miss_prev
;
304 /* total handled target pages at the beginning of period */
305 uint64_t target_page_count_prev
;
306 /* total handled target pages since start */
307 uint64_t target_page_count
;
308 /* number of dirty bits in the bitmap */
309 uint64_t migration_dirty_pages
;
310 /* protects modification of the bitmap */
311 QemuMutex bitmap_mutex
;
312 /* The RAMBlock used in the last src_page_requests */
313 RAMBlock
*last_req_rb
;
314 /* Queue of outstanding page requests from the destination */
315 QemuMutex src_page_req_mutex
;
316 QSIMPLEQ_HEAD(src_page_requests
, RAMSrcPageRequest
) src_page_requests
;
318 typedef struct RAMState RAMState
;
320 static RAMState
*ram_state
;
322 uint64_t ram_bytes_remaining(void)
324 return ram_state
? (ram_state
->migration_dirty_pages
* TARGET_PAGE_SIZE
) :
328 MigrationStats ram_counters
;
330 /* used by the search for pages to send */
331 struct PageSearchStatus
{
332 /* Current block being searched */
334 /* Current page to search from */
336 /* Set once we wrap around */
339 typedef struct PageSearchStatus PageSearchStatus
;
341 struct CompressParam
{
351 /* internally used fields */
355 typedef struct CompressParam CompressParam
;
357 struct DecompressParam
{
367 typedef struct DecompressParam DecompressParam
;
369 static CompressParam
*comp_param
;
370 static QemuThread
*compress_threads
;
371 /* comp_done_cond is used to wake up the migration thread when
372 * one of the compression threads has finished the compression.
373 * comp_done_lock is used to co-work with comp_done_cond.
375 static QemuMutex comp_done_lock
;
376 static QemuCond comp_done_cond
;
377 /* The empty QEMUFileOps will be used by file in CompressParam */
378 static const QEMUFileOps empty_ops
= { };
380 static QEMUFile
*decomp_file
;
381 static DecompressParam
*decomp_param
;
382 static QemuThread
*decompress_threads
;
383 static QemuMutex decomp_done_lock
;
384 static QemuCond decomp_done_cond
;
386 static bool do_compress_ram_page(QEMUFile
*f
, z_stream
*stream
, RAMBlock
*block
,
387 ram_addr_t offset
, uint8_t *source_buf
);
389 static void *do_data_compress(void *opaque
)
391 CompressParam
*param
= opaque
;
396 qemu_mutex_lock(¶m
->mutex
);
397 while (!param
->quit
) {
399 block
= param
->block
;
400 offset
= param
->offset
;
402 qemu_mutex_unlock(¶m
->mutex
);
404 zero_page
= do_compress_ram_page(param
->file
, ¶m
->stream
,
405 block
, offset
, param
->originbuf
);
407 qemu_mutex_lock(&comp_done_lock
);
409 param
->zero_page
= zero_page
;
410 qemu_cond_signal(&comp_done_cond
);
411 qemu_mutex_unlock(&comp_done_lock
);
413 qemu_mutex_lock(¶m
->mutex
);
415 qemu_cond_wait(¶m
->cond
, ¶m
->mutex
);
418 qemu_mutex_unlock(¶m
->mutex
);
423 static inline void terminate_compression_threads(void)
425 int idx
, thread_count
;
427 thread_count
= migrate_compress_threads();
429 for (idx
= 0; idx
< thread_count
; idx
++) {
430 qemu_mutex_lock(&comp_param
[idx
].mutex
);
431 comp_param
[idx
].quit
= true;
432 qemu_cond_signal(&comp_param
[idx
].cond
);
433 qemu_mutex_unlock(&comp_param
[idx
].mutex
);
437 static void compress_threads_save_cleanup(void)
441 if (!migrate_use_compression()) {
444 terminate_compression_threads();
445 thread_count
= migrate_compress_threads();
446 for (i
= 0; i
< thread_count
; i
++) {
448 * we use it as a indicator which shows if the thread is
449 * properly init'd or not
451 if (!comp_param
[i
].file
) {
454 qemu_thread_join(compress_threads
+ i
);
455 qemu_mutex_destroy(&comp_param
[i
].mutex
);
456 qemu_cond_destroy(&comp_param
[i
].cond
);
457 deflateEnd(&comp_param
[i
].stream
);
458 g_free(comp_param
[i
].originbuf
);
459 qemu_fclose(comp_param
[i
].file
);
460 comp_param
[i
].file
= NULL
;
462 qemu_mutex_destroy(&comp_done_lock
);
463 qemu_cond_destroy(&comp_done_cond
);
464 g_free(compress_threads
);
466 compress_threads
= NULL
;
470 static int compress_threads_save_setup(void)
474 if (!migrate_use_compression()) {
477 thread_count
= migrate_compress_threads();
478 compress_threads
= g_new0(QemuThread
, thread_count
);
479 comp_param
= g_new0(CompressParam
, thread_count
);
480 qemu_cond_init(&comp_done_cond
);
481 qemu_mutex_init(&comp_done_lock
);
482 for (i
= 0; i
< thread_count
; i
++) {
483 comp_param
[i
].originbuf
= g_try_malloc(TARGET_PAGE_SIZE
);
484 if (!comp_param
[i
].originbuf
) {
488 if (deflateInit(&comp_param
[i
].stream
,
489 migrate_compress_level()) != Z_OK
) {
490 g_free(comp_param
[i
].originbuf
);
494 /* comp_param[i].file is just used as a dummy buffer to save data,
495 * set its ops to empty.
497 comp_param
[i
].file
= qemu_fopen_ops(NULL
, &empty_ops
);
498 comp_param
[i
].done
= true;
499 comp_param
[i
].quit
= false;
500 qemu_mutex_init(&comp_param
[i
].mutex
);
501 qemu_cond_init(&comp_param
[i
].cond
);
502 qemu_thread_create(compress_threads
+ i
, "compress",
503 do_data_compress
, comp_param
+ i
,
504 QEMU_THREAD_JOINABLE
);
509 compress_threads_save_cleanup();
515 #define MULTIFD_MAGIC 0x11223344U
516 #define MULTIFD_VERSION 1
518 #define MULTIFD_FLAG_SYNC (1 << 0)
523 unsigned char uuid
[16]; /* QemuUUID */
525 } __attribute__((packed
)) MultiFDInit_t
;
536 } __attribute__((packed
)) MultiFDPacket_t
;
539 /* number of used pages */
541 /* number of allocated pages */
543 /* global number of generated multifd packets */
545 /* offset of each page */
547 /* pointer to each page */
553 /* this fields are not changed once the thread is created */
556 /* channel thread name */
558 /* channel thread id */
560 /* communication channel */
562 /* sem where to wait for more work */
564 /* this mutex protects the following parameters */
566 /* is this channel thread running */
568 /* should this thread finish */
570 /* thread has work to do */
572 /* array of pages to sent */
573 MultiFDPages_t
*pages
;
574 /* packet allocated len */
576 /* pointer to the packet */
577 MultiFDPacket_t
*packet
;
578 /* multifd flags for each packet */
580 /* global number of generated multifd packets */
582 /* thread local variables */
583 /* packets sent through this channel */
584 uint64_t num_packets
;
585 /* pages sent through this channel */
587 /* syncs main thread and channels */
588 QemuSemaphore sem_sync
;
592 /* this fields are not changed once the thread is created */
595 /* channel thread name */
597 /* channel thread id */
599 /* communication channel */
601 /* this mutex protects the following parameters */
603 /* is this channel thread running */
605 /* array of pages to receive */
606 MultiFDPages_t
*pages
;
607 /* packet allocated len */
609 /* pointer to the packet */
610 MultiFDPacket_t
*packet
;
611 /* multifd flags for each packet */
613 /* global number of generated multifd packets */
615 /* thread local variables */
616 /* packets sent through this channel */
617 uint64_t num_packets
;
618 /* pages sent through this channel */
620 /* syncs main thread and channels */
621 QemuSemaphore sem_sync
;
624 static int multifd_send_initial_packet(MultiFDSendParams
*p
, Error
**errp
)
629 msg
.magic
= cpu_to_be32(MULTIFD_MAGIC
);
630 msg
.version
= cpu_to_be32(MULTIFD_VERSION
);
632 memcpy(msg
.uuid
, &qemu_uuid
.data
, sizeof(msg
.uuid
));
634 ret
= qio_channel_write_all(p
->c
, (char *)&msg
, sizeof(msg
), errp
);
641 static int multifd_recv_initial_packet(QIOChannel
*c
, Error
**errp
)
646 ret
= qio_channel_read_all(c
, (char *)&msg
, sizeof(msg
), errp
);
651 be32_to_cpus(&msg
.magic
);
652 be32_to_cpus(&msg
.version
);
654 if (msg
.magic
!= MULTIFD_MAGIC
) {
655 error_setg(errp
, "multifd: received packet magic %x "
656 "expected %x", msg
.magic
, MULTIFD_MAGIC
);
660 if (msg
.version
!= MULTIFD_VERSION
) {
661 error_setg(errp
, "multifd: received packet version %d "
662 "expected %d", msg
.version
, MULTIFD_VERSION
);
666 if (memcmp(msg
.uuid
, &qemu_uuid
, sizeof(qemu_uuid
))) {
667 char *uuid
= qemu_uuid_unparse_strdup(&qemu_uuid
);
668 char *msg_uuid
= qemu_uuid_unparse_strdup((const QemuUUID
*)msg
.uuid
);
670 error_setg(errp
, "multifd: received uuid '%s' and expected "
671 "uuid '%s' for channel %hhd", msg_uuid
, uuid
, msg
.id
);
677 if (msg
.id
> migrate_multifd_channels()) {
678 error_setg(errp
, "multifd: received channel version %d "
679 "expected %d", msg
.version
, MULTIFD_VERSION
);
686 static MultiFDPages_t
*multifd_pages_init(size_t size
)
688 MultiFDPages_t
*pages
= g_new0(MultiFDPages_t
, 1);
690 pages
->allocated
= size
;
691 pages
->iov
= g_new0(struct iovec
, size
);
692 pages
->offset
= g_new0(ram_addr_t
, size
);
697 static void multifd_pages_clear(MultiFDPages_t
*pages
)
700 pages
->allocated
= 0;
701 pages
->packet_num
= 0;
705 g_free(pages
->offset
);
706 pages
->offset
= NULL
;
710 static void multifd_send_fill_packet(MultiFDSendParams
*p
)
712 MultiFDPacket_t
*packet
= p
->packet
;
715 packet
->magic
= cpu_to_be32(MULTIFD_MAGIC
);
716 packet
->version
= cpu_to_be32(MULTIFD_VERSION
);
717 packet
->flags
= cpu_to_be32(p
->flags
);
718 packet
->size
= cpu_to_be32(migrate_multifd_page_count());
719 packet
->used
= cpu_to_be32(p
->pages
->used
);
720 packet
->packet_num
= cpu_to_be64(p
->packet_num
);
722 if (p
->pages
->block
) {
723 strncpy(packet
->ramblock
, p
->pages
->block
->idstr
, 256);
726 for (i
= 0; i
< p
->pages
->used
; i
++) {
727 packet
->offset
[i
] = cpu_to_be64(p
->pages
->offset
[i
]);
731 static int multifd_recv_unfill_packet(MultiFDRecvParams
*p
, Error
**errp
)
733 MultiFDPacket_t
*packet
= p
->packet
;
737 be32_to_cpus(&packet
->magic
);
738 if (packet
->magic
!= MULTIFD_MAGIC
) {
739 error_setg(errp
, "multifd: received packet "
740 "magic %x and expected magic %x",
741 packet
->magic
, MULTIFD_MAGIC
);
745 be32_to_cpus(&packet
->version
);
746 if (packet
->version
!= MULTIFD_VERSION
) {
747 error_setg(errp
, "multifd: received packet "
748 "version %d and expected version %d",
749 packet
->version
, MULTIFD_VERSION
);
753 p
->flags
= be32_to_cpu(packet
->flags
);
755 be32_to_cpus(&packet
->size
);
756 if (packet
->size
> migrate_multifd_page_count()) {
757 error_setg(errp
, "multifd: received packet "
758 "with size %d and expected maximum size %d",
759 packet
->size
, migrate_multifd_page_count()) ;
763 p
->pages
->used
= be32_to_cpu(packet
->used
);
764 if (p
->pages
->used
> packet
->size
) {
765 error_setg(errp
, "multifd: received packet "
766 "with size %d and expected maximum size %d",
767 p
->pages
->used
, packet
->size
) ;
771 p
->packet_num
= be64_to_cpu(packet
->packet_num
);
773 if (p
->pages
->used
) {
774 /* make sure that ramblock is 0 terminated */
775 packet
->ramblock
[255] = 0;
776 block
= qemu_ram_block_by_name(packet
->ramblock
);
778 error_setg(errp
, "multifd: unknown ram block %s",
784 for (i
= 0; i
< p
->pages
->used
; i
++) {
785 ram_addr_t offset
= be64_to_cpu(packet
->offset
[i
]);
787 if (offset
> (block
->used_length
- TARGET_PAGE_SIZE
)) {
788 error_setg(errp
, "multifd: offset too long " RAM_ADDR_FMT
789 " (max " RAM_ADDR_FMT
")",
790 offset
, block
->max_length
);
793 p
->pages
->iov
[i
].iov_base
= block
->host
+ offset
;
794 p
->pages
->iov
[i
].iov_len
= TARGET_PAGE_SIZE
;
801 MultiFDSendParams
*params
;
802 /* number of created threads */
804 /* array of pages to sent */
805 MultiFDPages_t
*pages
;
806 /* syncs main thread and channels */
807 QemuSemaphore sem_sync
;
808 /* global number of generated multifd packets */
810 /* send channels ready */
811 QemuSemaphore channels_ready
;
812 } *multifd_send_state
;
815 * How we use multifd_send_state->pages and channel->pages?
817 * We create a pages for each channel, and a main one. Each time that
818 * we need to send a batch of pages we interchange the ones between
819 * multifd_send_state and the channel that is sending it. There are
820 * two reasons for that:
821 * - to not have to do so many mallocs during migration
822 * - to make easier to know what to free at the end of migration
824 * This way we always know who is the owner of each "pages" struct,
825 * and we don't need any loocking. It belongs to the migration thread
826 * or to the channel thread. Switching is safe because the migration
827 * thread is using the channel mutex when changing it, and the channel
828 * have to had finish with its own, otherwise pending_job can't be
832 static void multifd_send_pages(void)
835 static int next_channel
;
836 MultiFDSendParams
*p
= NULL
; /* make happy gcc */
837 MultiFDPages_t
*pages
= multifd_send_state
->pages
;
838 uint64_t transferred
;
840 qemu_sem_wait(&multifd_send_state
->channels_ready
);
841 for (i
= next_channel
;; i
= (i
+ 1) % migrate_multifd_channels()) {
842 p
= &multifd_send_state
->params
[i
];
844 qemu_mutex_lock(&p
->mutex
);
845 if (!p
->pending_job
) {
847 next_channel
= (i
+ 1) % migrate_multifd_channels();
850 qemu_mutex_unlock(&p
->mutex
);
854 p
->packet_num
= multifd_send_state
->packet_num
++;
855 p
->pages
->block
= NULL
;
856 multifd_send_state
->pages
= p
->pages
;
858 transferred
= ((uint64_t) pages
->used
) * TARGET_PAGE_SIZE
+ p
->packet_len
;
859 ram_counters
.multifd_bytes
+= transferred
;
860 ram_counters
.transferred
+= transferred
;;
861 qemu_mutex_unlock(&p
->mutex
);
862 qemu_sem_post(&p
->sem
);
865 static void multifd_queue_page(RAMBlock
*block
, ram_addr_t offset
)
867 MultiFDPages_t
*pages
= multifd_send_state
->pages
;
870 pages
->block
= block
;
873 if (pages
->block
== block
) {
874 pages
->offset
[pages
->used
] = offset
;
875 pages
->iov
[pages
->used
].iov_base
= block
->host
+ offset
;
876 pages
->iov
[pages
->used
].iov_len
= TARGET_PAGE_SIZE
;
879 if (pages
->used
< pages
->allocated
) {
884 multifd_send_pages();
886 if (pages
->block
!= block
) {
887 multifd_queue_page(block
, offset
);
891 static void multifd_send_terminate_threads(Error
*err
)
896 MigrationState
*s
= migrate_get_current();
897 migrate_set_error(s
, err
);
898 if (s
->state
== MIGRATION_STATUS_SETUP
||
899 s
->state
== MIGRATION_STATUS_PRE_SWITCHOVER
||
900 s
->state
== MIGRATION_STATUS_DEVICE
||
901 s
->state
== MIGRATION_STATUS_ACTIVE
) {
902 migrate_set_state(&s
->state
, s
->state
,
903 MIGRATION_STATUS_FAILED
);
907 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
908 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
910 qemu_mutex_lock(&p
->mutex
);
912 qemu_sem_post(&p
->sem
);
913 qemu_mutex_unlock(&p
->mutex
);
917 int multifd_save_cleanup(Error
**errp
)
922 if (!migrate_use_multifd()) {
925 multifd_send_terminate_threads(NULL
);
926 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
927 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
930 qemu_thread_join(&p
->thread
);
932 socket_send_channel_destroy(p
->c
);
934 qemu_mutex_destroy(&p
->mutex
);
935 qemu_sem_destroy(&p
->sem
);
936 qemu_sem_destroy(&p
->sem_sync
);
939 multifd_pages_clear(p
->pages
);
945 qemu_sem_destroy(&multifd_send_state
->channels_ready
);
946 qemu_sem_destroy(&multifd_send_state
->sem_sync
);
947 g_free(multifd_send_state
->params
);
948 multifd_send_state
->params
= NULL
;
949 multifd_pages_clear(multifd_send_state
->pages
);
950 multifd_send_state
->pages
= NULL
;
951 g_free(multifd_send_state
);
952 multifd_send_state
= NULL
;
956 static void multifd_send_sync_main(void)
960 if (!migrate_use_multifd()) {
963 if (multifd_send_state
->pages
->used
) {
964 multifd_send_pages();
966 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
967 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
969 trace_multifd_send_sync_main_signal(p
->id
);
971 qemu_mutex_lock(&p
->mutex
);
973 p
->packet_num
= multifd_send_state
->packet_num
++;
974 p
->flags
|= MULTIFD_FLAG_SYNC
;
976 qemu_mutex_unlock(&p
->mutex
);
977 qemu_sem_post(&p
->sem
);
979 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
980 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
982 trace_multifd_send_sync_main_wait(p
->id
);
983 qemu_sem_wait(&multifd_send_state
->sem_sync
);
985 trace_multifd_send_sync_main(multifd_send_state
->packet_num
);
988 static void *multifd_send_thread(void *opaque
)
990 MultiFDSendParams
*p
= opaque
;
991 Error
*local_err
= NULL
;
994 trace_multifd_send_thread_start(p
->id
);
995 rcu_register_thread();
997 if (multifd_send_initial_packet(p
, &local_err
) < 0) {
1000 /* initial packet */
1004 qemu_sem_wait(&p
->sem
);
1005 qemu_mutex_lock(&p
->mutex
);
1007 if (p
->pending_job
) {
1008 uint32_t used
= p
->pages
->used
;
1009 uint64_t packet_num
= p
->packet_num
;
1010 uint32_t flags
= p
->flags
;
1012 multifd_send_fill_packet(p
);
1015 p
->num_pages
+= used
;
1017 qemu_mutex_unlock(&p
->mutex
);
1019 trace_multifd_send(p
->id
, packet_num
, used
, flags
);
1021 ret
= qio_channel_write_all(p
->c
, (void *)p
->packet
,
1022 p
->packet_len
, &local_err
);
1027 ret
= qio_channel_writev_all(p
->c
, p
->pages
->iov
, used
, &local_err
);
1032 qemu_mutex_lock(&p
->mutex
);
1034 qemu_mutex_unlock(&p
->mutex
);
1036 if (flags
& MULTIFD_FLAG_SYNC
) {
1037 qemu_sem_post(&multifd_send_state
->sem_sync
);
1039 qemu_sem_post(&multifd_send_state
->channels_ready
);
1040 } else if (p
->quit
) {
1041 qemu_mutex_unlock(&p
->mutex
);
1044 qemu_mutex_unlock(&p
->mutex
);
1045 /* sometimes there are spurious wakeups */
1051 multifd_send_terminate_threads(local_err
);
1054 qemu_mutex_lock(&p
->mutex
);
1056 qemu_mutex_unlock(&p
->mutex
);
1058 rcu_unregister_thread();
1059 trace_multifd_send_thread_end(p
->id
, p
->num_packets
, p
->num_pages
);
1064 static void multifd_new_send_channel_async(QIOTask
*task
, gpointer opaque
)
1066 MultiFDSendParams
*p
= opaque
;
1067 QIOChannel
*sioc
= QIO_CHANNEL(qio_task_get_source(task
));
1068 Error
*local_err
= NULL
;
1070 if (qio_task_propagate_error(task
, &local_err
)) {
1071 if (multifd_save_cleanup(&local_err
) != 0) {
1072 migrate_set_error(migrate_get_current(), local_err
);
1075 p
->c
= QIO_CHANNEL(sioc
);
1076 qio_channel_set_delay(p
->c
, false);
1078 qemu_thread_create(&p
->thread
, p
->name
, multifd_send_thread
, p
,
1079 QEMU_THREAD_JOINABLE
);
1081 atomic_inc(&multifd_send_state
->count
);
1085 int multifd_save_setup(void)
1088 uint32_t page_count
= migrate_multifd_page_count();
1091 if (!migrate_use_multifd()) {
1094 thread_count
= migrate_multifd_channels();
1095 multifd_send_state
= g_malloc0(sizeof(*multifd_send_state
));
1096 multifd_send_state
->params
= g_new0(MultiFDSendParams
, thread_count
);
1097 atomic_set(&multifd_send_state
->count
, 0);
1098 multifd_send_state
->pages
= multifd_pages_init(page_count
);
1099 qemu_sem_init(&multifd_send_state
->sem_sync
, 0);
1100 qemu_sem_init(&multifd_send_state
->channels_ready
, 0);
1102 for (i
= 0; i
< thread_count
; i
++) {
1103 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
1105 qemu_mutex_init(&p
->mutex
);
1106 qemu_sem_init(&p
->sem
, 0);
1107 qemu_sem_init(&p
->sem_sync
, 0);
1111 p
->pages
= multifd_pages_init(page_count
);
1112 p
->packet_len
= sizeof(MultiFDPacket_t
)
1113 + sizeof(ram_addr_t
) * page_count
;
1114 p
->packet
= g_malloc0(p
->packet_len
);
1115 p
->name
= g_strdup_printf("multifdsend_%d", i
);
1116 socket_send_channel_create(multifd_new_send_channel_async
, p
);
1122 MultiFDRecvParams
*params
;
1123 /* number of created threads */
1125 /* syncs main thread and channels */
1126 QemuSemaphore sem_sync
;
1127 /* global number of generated multifd packets */
1128 uint64_t packet_num
;
1129 } *multifd_recv_state
;
1131 static void multifd_recv_terminate_threads(Error
*err
)
1136 MigrationState
*s
= migrate_get_current();
1137 migrate_set_error(s
, err
);
1138 if (s
->state
== MIGRATION_STATUS_SETUP
||
1139 s
->state
== MIGRATION_STATUS_ACTIVE
) {
1140 migrate_set_state(&s
->state
, s
->state
,
1141 MIGRATION_STATUS_FAILED
);
1145 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1146 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1148 qemu_mutex_lock(&p
->mutex
);
1149 /* We could arrive here for two reasons:
1150 - normal quit, i.e. everything went fine, just finished
1151 - error quit: We close the channels so the channel threads
1152 finish the qio_channel_read_all_eof() */
1153 qio_channel_shutdown(p
->c
, QIO_CHANNEL_SHUTDOWN_BOTH
, NULL
);
1154 qemu_mutex_unlock(&p
->mutex
);
1158 int multifd_load_cleanup(Error
**errp
)
1163 if (!migrate_use_multifd()) {
1166 multifd_recv_terminate_threads(NULL
);
1167 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1168 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1171 qemu_thread_join(&p
->thread
);
1173 object_unref(OBJECT(p
->c
));
1175 qemu_mutex_destroy(&p
->mutex
);
1176 qemu_sem_destroy(&p
->sem_sync
);
1179 multifd_pages_clear(p
->pages
);
1185 qemu_sem_destroy(&multifd_recv_state
->sem_sync
);
1186 g_free(multifd_recv_state
->params
);
1187 multifd_recv_state
->params
= NULL
;
1188 g_free(multifd_recv_state
);
1189 multifd_recv_state
= NULL
;
1194 static void multifd_recv_sync_main(void)
1198 if (!migrate_use_multifd()) {
1201 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1202 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1204 trace_multifd_recv_sync_main_wait(p
->id
);
1205 qemu_sem_wait(&multifd_recv_state
->sem_sync
);
1206 qemu_mutex_lock(&p
->mutex
);
1207 if (multifd_recv_state
->packet_num
< p
->packet_num
) {
1208 multifd_recv_state
->packet_num
= p
->packet_num
;
1210 qemu_mutex_unlock(&p
->mutex
);
1212 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1213 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1215 trace_multifd_recv_sync_main_signal(p
->id
);
1216 qemu_sem_post(&p
->sem_sync
);
1218 trace_multifd_recv_sync_main(multifd_recv_state
->packet_num
);
1221 static void *multifd_recv_thread(void *opaque
)
1223 MultiFDRecvParams
*p
= opaque
;
1224 Error
*local_err
= NULL
;
1227 trace_multifd_recv_thread_start(p
->id
);
1228 rcu_register_thread();
1234 ret
= qio_channel_read_all_eof(p
->c
, (void *)p
->packet
,
1235 p
->packet_len
, &local_err
);
1236 if (ret
== 0) { /* EOF */
1239 if (ret
== -1) { /* Error */
1243 qemu_mutex_lock(&p
->mutex
);
1244 ret
= multifd_recv_unfill_packet(p
, &local_err
);
1246 qemu_mutex_unlock(&p
->mutex
);
1250 used
= p
->pages
->used
;
1252 trace_multifd_recv(p
->id
, p
->packet_num
, used
, flags
);
1254 p
->num_pages
+= used
;
1255 qemu_mutex_unlock(&p
->mutex
);
1257 ret
= qio_channel_readv_all(p
->c
, p
->pages
->iov
, used
, &local_err
);
1262 if (flags
& MULTIFD_FLAG_SYNC
) {
1263 qemu_sem_post(&multifd_recv_state
->sem_sync
);
1264 qemu_sem_wait(&p
->sem_sync
);
1269 multifd_recv_terminate_threads(local_err
);
1271 qemu_mutex_lock(&p
->mutex
);
1273 qemu_mutex_unlock(&p
->mutex
);
1275 rcu_unregister_thread();
1276 trace_multifd_recv_thread_end(p
->id
, p
->num_packets
, p
->num_pages
);
1281 int multifd_load_setup(void)
1284 uint32_t page_count
= migrate_multifd_page_count();
1287 if (!migrate_use_multifd()) {
1290 thread_count
= migrate_multifd_channels();
1291 multifd_recv_state
= g_malloc0(sizeof(*multifd_recv_state
));
1292 multifd_recv_state
->params
= g_new0(MultiFDRecvParams
, thread_count
);
1293 atomic_set(&multifd_recv_state
->count
, 0);
1294 qemu_sem_init(&multifd_recv_state
->sem_sync
, 0);
1296 for (i
= 0; i
< thread_count
; i
++) {
1297 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1299 qemu_mutex_init(&p
->mutex
);
1300 qemu_sem_init(&p
->sem_sync
, 0);
1302 p
->pages
= multifd_pages_init(page_count
);
1303 p
->packet_len
= sizeof(MultiFDPacket_t
)
1304 + sizeof(ram_addr_t
) * page_count
;
1305 p
->packet
= g_malloc0(p
->packet_len
);
1306 p
->name
= g_strdup_printf("multifdrecv_%d", i
);
1311 bool multifd_recv_all_channels_created(void)
1313 int thread_count
= migrate_multifd_channels();
1315 if (!migrate_use_multifd()) {
1319 return thread_count
== atomic_read(&multifd_recv_state
->count
);
1322 /* Return true if multifd is ready for the migration, otherwise false */
1323 bool multifd_recv_new_channel(QIOChannel
*ioc
)
1325 MultiFDRecvParams
*p
;
1326 Error
*local_err
= NULL
;
1329 id
= multifd_recv_initial_packet(ioc
, &local_err
);
1331 multifd_recv_terminate_threads(local_err
);
1335 p
= &multifd_recv_state
->params
[id
];
1337 error_setg(&local_err
, "multifd: received id '%d' already setup'",
1339 multifd_recv_terminate_threads(local_err
);
1343 object_ref(OBJECT(ioc
));
1344 /* initial packet */
1348 qemu_thread_create(&p
->thread
, p
->name
, multifd_recv_thread
, p
,
1349 QEMU_THREAD_JOINABLE
);
1350 atomic_inc(&multifd_recv_state
->count
);
1351 return multifd_recv_state
->count
== migrate_multifd_channels();
1355 * save_page_header: write page header to wire
1357 * If this is the 1st block, it also writes the block identification
1359 * Returns the number of bytes written
1361 * @f: QEMUFile where to send the data
1362 * @block: block that contains the page we want to send
1363 * @offset: offset inside the block for the page
1364 * in the lower bits, it contains flags
1366 static size_t save_page_header(RAMState
*rs
, QEMUFile
*f
, RAMBlock
*block
,
1371 if (block
== rs
->last_sent_block
) {
1372 offset
|= RAM_SAVE_FLAG_CONTINUE
;
1374 qemu_put_be64(f
, offset
);
1377 if (!(offset
& RAM_SAVE_FLAG_CONTINUE
)) {
1378 len
= strlen(block
->idstr
);
1379 qemu_put_byte(f
, len
);
1380 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, len
);
1382 rs
->last_sent_block
= block
;
1388 * mig_throttle_guest_down: throotle down the guest
1390 * Reduce amount of guest cpu execution to hopefully slow down memory
1391 * writes. If guest dirty memory rate is reduced below the rate at
1392 * which we can transfer pages to the destination then we should be
1393 * able to complete migration. Some workloads dirty memory way too
1394 * fast and will not effectively converge, even with auto-converge.
1396 static void mig_throttle_guest_down(void)
1398 MigrationState
*s
= migrate_get_current();
1399 uint64_t pct_initial
= s
->parameters
.cpu_throttle_initial
;
1400 uint64_t pct_icrement
= s
->parameters
.cpu_throttle_increment
;
1401 int pct_max
= s
->parameters
.max_cpu_throttle
;
1403 /* We have not started throttling yet. Let's start it. */
1404 if (!cpu_throttle_active()) {
1405 cpu_throttle_set(pct_initial
);
1407 /* Throttling already on, just increase the rate */
1408 cpu_throttle_set(MIN(cpu_throttle_get_percentage() + pct_icrement
,
1414 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
1416 * @rs: current RAM state
1417 * @current_addr: address for the zero page
1419 * Update the xbzrle cache to reflect a page that's been sent as all 0.
1420 * The important thing is that a stale (not-yet-0'd) page be replaced
1422 * As a bonus, if the page wasn't in the cache it gets added so that
1423 * when a small write is made into the 0'd page it gets XBZRLE sent.
1425 static void xbzrle_cache_zero_page(RAMState
*rs
, ram_addr_t current_addr
)
1427 if (rs
->ram_bulk_stage
|| !migrate_use_xbzrle()) {
1431 /* We don't care if this fails to allocate a new cache page
1432 * as long as it updated an old one */
1433 cache_insert(XBZRLE
.cache
, current_addr
, XBZRLE
.zero_target_page
,
1434 ram_counters
.dirty_sync_count
);
1437 #define ENCODING_FLAG_XBZRLE 0x1
1440 * save_xbzrle_page: compress and send current page
1442 * Returns: 1 means that we wrote the page
1443 * 0 means that page is identical to the one already sent
1444 * -1 means that xbzrle would be longer than normal
1446 * @rs: current RAM state
1447 * @current_data: pointer to the address of the page contents
1448 * @current_addr: addr of the page
1449 * @block: block that contains the page we want to send
1450 * @offset: offset inside the block for the page
1451 * @last_stage: if we are at the completion stage
1453 static int save_xbzrle_page(RAMState
*rs
, uint8_t **current_data
,
1454 ram_addr_t current_addr
, RAMBlock
*block
,
1455 ram_addr_t offset
, bool last_stage
)
1457 int encoded_len
= 0, bytes_xbzrle
;
1458 uint8_t *prev_cached_page
;
1460 if (!cache_is_cached(XBZRLE
.cache
, current_addr
,
1461 ram_counters
.dirty_sync_count
)) {
1462 xbzrle_counters
.cache_miss
++;
1464 if (cache_insert(XBZRLE
.cache
, current_addr
, *current_data
,
1465 ram_counters
.dirty_sync_count
) == -1) {
1468 /* update *current_data when the page has been
1469 inserted into cache */
1470 *current_data
= get_cached_data(XBZRLE
.cache
, current_addr
);
1476 prev_cached_page
= get_cached_data(XBZRLE
.cache
, current_addr
);
1478 /* save current buffer into memory */
1479 memcpy(XBZRLE
.current_buf
, *current_data
, TARGET_PAGE_SIZE
);
1481 /* XBZRLE encoding (if there is no overflow) */
1482 encoded_len
= xbzrle_encode_buffer(prev_cached_page
, XBZRLE
.current_buf
,
1483 TARGET_PAGE_SIZE
, XBZRLE
.encoded_buf
,
1485 if (encoded_len
== 0) {
1486 trace_save_xbzrle_page_skipping();
1488 } else if (encoded_len
== -1) {
1489 trace_save_xbzrle_page_overflow();
1490 xbzrle_counters
.overflow
++;
1491 /* update data in the cache */
1493 memcpy(prev_cached_page
, *current_data
, TARGET_PAGE_SIZE
);
1494 *current_data
= prev_cached_page
;
1499 /* we need to update the data in the cache, in order to get the same data */
1501 memcpy(prev_cached_page
, XBZRLE
.current_buf
, TARGET_PAGE_SIZE
);
1504 /* Send XBZRLE based compressed page */
1505 bytes_xbzrle
= save_page_header(rs
, rs
->f
, block
,
1506 offset
| RAM_SAVE_FLAG_XBZRLE
);
1507 qemu_put_byte(rs
->f
, ENCODING_FLAG_XBZRLE
);
1508 qemu_put_be16(rs
->f
, encoded_len
);
1509 qemu_put_buffer(rs
->f
, XBZRLE
.encoded_buf
, encoded_len
);
1510 bytes_xbzrle
+= encoded_len
+ 1 + 2;
1511 xbzrle_counters
.pages
++;
1512 xbzrle_counters
.bytes
+= bytes_xbzrle
;
1513 ram_counters
.transferred
+= bytes_xbzrle
;
1519 * migration_bitmap_find_dirty: find the next dirty page from start
1521 * Called with rcu_read_lock() to protect migration_bitmap
1523 * Returns the byte offset within memory region of the start of a dirty page
1525 * @rs: current RAM state
1526 * @rb: RAMBlock where to search for dirty pages
1527 * @start: page where we start the search
1530 unsigned long migration_bitmap_find_dirty(RAMState
*rs
, RAMBlock
*rb
,
1531 unsigned long start
)
1533 unsigned long size
= rb
->used_length
>> TARGET_PAGE_BITS
;
1534 unsigned long *bitmap
= rb
->bmap
;
1537 if (!qemu_ram_is_migratable(rb
)) {
1541 if (rs
->ram_bulk_stage
&& start
> 0) {
1544 next
= find_next_bit(bitmap
, size
, start
);
1550 static inline bool migration_bitmap_clear_dirty(RAMState
*rs
,
1556 ret
= test_and_clear_bit(page
, rb
->bmap
);
1559 rs
->migration_dirty_pages
--;
1564 static void migration_bitmap_sync_range(RAMState
*rs
, RAMBlock
*rb
,
1565 ram_addr_t start
, ram_addr_t length
)
1567 rs
->migration_dirty_pages
+=
1568 cpu_physical_memory_sync_dirty_bitmap(rb
, start
, length
,
1569 &rs
->num_dirty_pages_period
);
1573 * ram_pagesize_summary: calculate all the pagesizes of a VM
1575 * Returns a summary bitmap of the page sizes of all RAMBlocks
1577 * For VMs with just normal pages this is equivalent to the host page
1578 * size. If it's got some huge pages then it's the OR of all the
1579 * different page sizes.
1581 uint64_t ram_pagesize_summary(void)
1584 uint64_t summary
= 0;
1586 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
1587 summary
|= block
->page_size
;
1593 static void migration_update_rates(RAMState
*rs
, int64_t end_time
)
1595 uint64_t page_count
= rs
->target_page_count
- rs
->target_page_count_prev
;
1597 /* calculate period counters */
1598 ram_counters
.dirty_pages_rate
= rs
->num_dirty_pages_period
* 1000
1599 / (end_time
- rs
->time_last_bitmap_sync
);
1605 if (migrate_use_xbzrle()) {
1606 xbzrle_counters
.cache_miss_rate
= (double)(xbzrle_counters
.cache_miss
-
1607 rs
->xbzrle_cache_miss_prev
) / page_count
;
1608 rs
->xbzrle_cache_miss_prev
= xbzrle_counters
.cache_miss
;
1612 static void migration_bitmap_sync(RAMState
*rs
)
1616 uint64_t bytes_xfer_now
;
1618 ram_counters
.dirty_sync_count
++;
1620 if (!rs
->time_last_bitmap_sync
) {
1621 rs
->time_last_bitmap_sync
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1624 trace_migration_bitmap_sync_start();
1625 memory_global_dirty_log_sync();
1627 qemu_mutex_lock(&rs
->bitmap_mutex
);
1629 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
1630 migration_bitmap_sync_range(rs
, block
, 0, block
->used_length
);
1632 ram_counters
.remaining
= ram_bytes_remaining();
1634 qemu_mutex_unlock(&rs
->bitmap_mutex
);
1636 trace_migration_bitmap_sync_end(rs
->num_dirty_pages_period
);
1638 end_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1640 /* more than 1 second = 1000 millisecons */
1641 if (end_time
> rs
->time_last_bitmap_sync
+ 1000) {
1642 bytes_xfer_now
= ram_counters
.transferred
;
1644 /* During block migration the auto-converge logic incorrectly detects
1645 * that ram migration makes no progress. Avoid this by disabling the
1646 * throttling logic during the bulk phase of block migration. */
1647 if (migrate_auto_converge() && !blk_mig_bulk_active()) {
1648 /* The following detection logic can be refined later. For now:
1649 Check to see if the dirtied bytes is 50% more than the approx.
1650 amount of bytes that just got transferred since the last time we
1651 were in this routine. If that happens twice, start or increase
1654 if ((rs
->num_dirty_pages_period
* TARGET_PAGE_SIZE
>
1655 (bytes_xfer_now
- rs
->bytes_xfer_prev
) / 2) &&
1656 (++rs
->dirty_rate_high_cnt
>= 2)) {
1657 trace_migration_throttle();
1658 rs
->dirty_rate_high_cnt
= 0;
1659 mig_throttle_guest_down();
1663 migration_update_rates(rs
, end_time
);
1665 rs
->target_page_count_prev
= rs
->target_page_count
;
1667 /* reset period counters */
1668 rs
->time_last_bitmap_sync
= end_time
;
1669 rs
->num_dirty_pages_period
= 0;
1670 rs
->bytes_xfer_prev
= bytes_xfer_now
;
1672 if (migrate_use_events()) {
1673 qapi_event_send_migration_pass(ram_counters
.dirty_sync_count
);
1678 * save_zero_page_to_file: send the zero page to the file
1680 * Returns the size of data written to the file, 0 means the page is not
1683 * @rs: current RAM state
1684 * @file: the file where the data is saved
1685 * @block: block that contains the page we want to send
1686 * @offset: offset inside the block for the page
1688 static int save_zero_page_to_file(RAMState
*rs
, QEMUFile
*file
,
1689 RAMBlock
*block
, ram_addr_t offset
)
1691 uint8_t *p
= block
->host
+ offset
;
1694 if (is_zero_range(p
, TARGET_PAGE_SIZE
)) {
1695 len
+= save_page_header(rs
, file
, block
, offset
| RAM_SAVE_FLAG_ZERO
);
1696 qemu_put_byte(file
, 0);
1703 * save_zero_page: send the zero page to the stream
1705 * Returns the number of pages written.
1707 * @rs: current RAM state
1708 * @block: block that contains the page we want to send
1709 * @offset: offset inside the block for the page
1711 static int save_zero_page(RAMState
*rs
, RAMBlock
*block
, ram_addr_t offset
)
1713 int len
= save_zero_page_to_file(rs
, rs
->f
, block
, offset
);
1716 ram_counters
.duplicate
++;
1717 ram_counters
.transferred
+= len
;
1723 static void ram_release_pages(const char *rbname
, uint64_t offset
, int pages
)
1725 if (!migrate_release_ram() || !migration_in_postcopy()) {
1729 ram_discard_range(rbname
, offset
, pages
<< TARGET_PAGE_BITS
);
1733 * @pages: the number of pages written by the control path,
1735 * > 0 - number of pages written
1737 * Return true if the pages has been saved, otherwise false is returned.
1739 static bool control_save_page(RAMState
*rs
, RAMBlock
*block
, ram_addr_t offset
,
1742 uint64_t bytes_xmit
= 0;
1746 ret
= ram_control_save_page(rs
->f
, block
->offset
, offset
, TARGET_PAGE_SIZE
,
1748 if (ret
== RAM_SAVE_CONTROL_NOT_SUPP
) {
1753 ram_counters
.transferred
+= bytes_xmit
;
1757 if (ret
== RAM_SAVE_CONTROL_DELAYED
) {
1761 if (bytes_xmit
> 0) {
1762 ram_counters
.normal
++;
1763 } else if (bytes_xmit
== 0) {
1764 ram_counters
.duplicate
++;
1771 * directly send the page to the stream
1773 * Returns the number of pages written.
1775 * @rs: current RAM state
1776 * @block: block that contains the page we want to send
1777 * @offset: offset inside the block for the page
1778 * @buf: the page to be sent
1779 * @async: send to page asyncly
1781 static int save_normal_page(RAMState
*rs
, RAMBlock
*block
, ram_addr_t offset
,
1782 uint8_t *buf
, bool async
)
1784 ram_counters
.transferred
+= save_page_header(rs
, rs
->f
, block
,
1785 offset
| RAM_SAVE_FLAG_PAGE
);
1787 qemu_put_buffer_async(rs
->f
, buf
, TARGET_PAGE_SIZE
,
1788 migrate_release_ram() &
1789 migration_in_postcopy());
1791 qemu_put_buffer(rs
->f
, buf
, TARGET_PAGE_SIZE
);
1793 ram_counters
.transferred
+= TARGET_PAGE_SIZE
;
1794 ram_counters
.normal
++;
1799 * ram_save_page: send the given page to the stream
1801 * Returns the number of pages written.
1803 * >=0 - Number of pages written - this might legally be 0
1804 * if xbzrle noticed the page was the same.
1806 * @rs: current RAM state
1807 * @block: block that contains the page we want to send
1808 * @offset: offset inside the block for the page
1809 * @last_stage: if we are at the completion stage
1811 static int ram_save_page(RAMState
*rs
, PageSearchStatus
*pss
, bool last_stage
)
1815 bool send_async
= true;
1816 RAMBlock
*block
= pss
->block
;
1817 ram_addr_t offset
= pss
->page
<< TARGET_PAGE_BITS
;
1818 ram_addr_t current_addr
= block
->offset
+ offset
;
1820 p
= block
->host
+ offset
;
1821 trace_ram_save_page(block
->idstr
, (uint64_t)offset
, p
);
1823 XBZRLE_cache_lock();
1824 if (!rs
->ram_bulk_stage
&& !migration_in_postcopy() &&
1825 migrate_use_xbzrle()) {
1826 pages
= save_xbzrle_page(rs
, &p
, current_addr
, block
,
1827 offset
, last_stage
);
1829 /* Can't send this cached data async, since the cache page
1830 * might get updated before it gets to the wire
1836 /* XBZRLE overflow or normal page */
1838 pages
= save_normal_page(rs
, block
, offset
, p
, send_async
);
1841 XBZRLE_cache_unlock();
1846 static int ram_save_multifd_page(RAMState
*rs
, RAMBlock
*block
,
1849 multifd_queue_page(block
, offset
);
1850 ram_counters
.normal
++;
1855 static bool do_compress_ram_page(QEMUFile
*f
, z_stream
*stream
, RAMBlock
*block
,
1856 ram_addr_t offset
, uint8_t *source_buf
)
1858 RAMState
*rs
= ram_state
;
1859 uint8_t *p
= block
->host
+ (offset
& TARGET_PAGE_MASK
);
1860 bool zero_page
= false;
1863 if (save_zero_page_to_file(rs
, f
, block
, offset
)) {
1868 save_page_header(rs
, f
, block
, offset
| RAM_SAVE_FLAG_COMPRESS_PAGE
);
1871 * copy it to a internal buffer to avoid it being modified by VM
1872 * so that we can catch up the error during compression and
1875 memcpy(source_buf
, p
, TARGET_PAGE_SIZE
);
1876 ret
= qemu_put_compression_data(f
, stream
, source_buf
, TARGET_PAGE_SIZE
);
1878 qemu_file_set_error(migrate_get_current()->to_dst_file
, ret
);
1879 error_report("compressed data failed!");
1884 ram_release_pages(block
->idstr
, offset
& TARGET_PAGE_MASK
, 1);
1889 update_compress_thread_counts(const CompressParam
*param
, int bytes_xmit
)
1891 if (param
->zero_page
) {
1892 ram_counters
.duplicate
++;
1894 ram_counters
.transferred
+= bytes_xmit
;
1897 static void flush_compressed_data(RAMState
*rs
)
1899 int idx
, len
, thread_count
;
1901 if (!migrate_use_compression()) {
1904 thread_count
= migrate_compress_threads();
1906 qemu_mutex_lock(&comp_done_lock
);
1907 for (idx
= 0; idx
< thread_count
; idx
++) {
1908 while (!comp_param
[idx
].done
) {
1909 qemu_cond_wait(&comp_done_cond
, &comp_done_lock
);
1912 qemu_mutex_unlock(&comp_done_lock
);
1914 for (idx
= 0; idx
< thread_count
; idx
++) {
1915 qemu_mutex_lock(&comp_param
[idx
].mutex
);
1916 if (!comp_param
[idx
].quit
) {
1917 len
= qemu_put_qemu_file(rs
->f
, comp_param
[idx
].file
);
1919 * it's safe to fetch zero_page without holding comp_done_lock
1920 * as there is no further request submitted to the thread,
1921 * i.e, the thread should be waiting for a request at this point.
1923 update_compress_thread_counts(&comp_param
[idx
], len
);
1925 qemu_mutex_unlock(&comp_param
[idx
].mutex
);
1929 static inline void set_compress_params(CompressParam
*param
, RAMBlock
*block
,
1932 param
->block
= block
;
1933 param
->offset
= offset
;
1936 static int compress_page_with_multi_thread(RAMState
*rs
, RAMBlock
*block
,
1939 int idx
, thread_count
, bytes_xmit
= -1, pages
= -1;
1940 bool wait
= migrate_compress_wait_thread();
1942 thread_count
= migrate_compress_threads();
1943 qemu_mutex_lock(&comp_done_lock
);
1945 for (idx
= 0; idx
< thread_count
; idx
++) {
1946 if (comp_param
[idx
].done
) {
1947 comp_param
[idx
].done
= false;
1948 bytes_xmit
= qemu_put_qemu_file(rs
->f
, comp_param
[idx
].file
);
1949 qemu_mutex_lock(&comp_param
[idx
].mutex
);
1950 set_compress_params(&comp_param
[idx
], block
, offset
);
1951 qemu_cond_signal(&comp_param
[idx
].cond
);
1952 qemu_mutex_unlock(&comp_param
[idx
].mutex
);
1954 update_compress_thread_counts(&comp_param
[idx
], bytes_xmit
);
1960 * wait for the free thread if the user specifies 'compress-wait-thread',
1961 * otherwise we will post the page out in the main thread as normal page.
1963 if (pages
< 0 && wait
) {
1964 qemu_cond_wait(&comp_done_cond
, &comp_done_lock
);
1967 qemu_mutex_unlock(&comp_done_lock
);
1973 * find_dirty_block: find the next dirty page and update any state
1974 * associated with the search process.
1976 * Returns if a page is found
1978 * @rs: current RAM state
1979 * @pss: data about the state of the current dirty page scan
1980 * @again: set to false if the search has scanned the whole of RAM
1982 static bool find_dirty_block(RAMState
*rs
, PageSearchStatus
*pss
, bool *again
)
1984 pss
->page
= migration_bitmap_find_dirty(rs
, pss
->block
, pss
->page
);
1985 if (pss
->complete_round
&& pss
->block
== rs
->last_seen_block
&&
1986 pss
->page
>= rs
->last_page
) {
1988 * We've been once around the RAM and haven't found anything.
1994 if ((pss
->page
<< TARGET_PAGE_BITS
) >= pss
->block
->used_length
) {
1995 /* Didn't find anything in this RAM Block */
1997 pss
->block
= QLIST_NEXT_RCU(pss
->block
, next
);
2000 * If memory migration starts over, we will meet a dirtied page
2001 * which may still exists in compression threads's ring, so we
2002 * should flush the compressed data to make sure the new page
2003 * is not overwritten by the old one in the destination.
2005 * Also If xbzrle is on, stop using the data compression at this
2006 * point. In theory, xbzrle can do better than compression.
2008 flush_compressed_data(rs
);
2010 /* Hit the end of the list */
2011 pss
->block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
2012 /* Flag that we've looped */
2013 pss
->complete_round
= true;
2014 rs
->ram_bulk_stage
= false;
2016 /* Didn't find anything this time, but try again on the new block */
2020 /* Can go around again, but... */
2022 /* We've found something so probably don't need to */
2028 * unqueue_page: gets a page of the queue
2030 * Helper for 'get_queued_page' - gets a page off the queue
2032 * Returns the block of the page (or NULL if none available)
2034 * @rs: current RAM state
2035 * @offset: used to return the offset within the RAMBlock
2037 static RAMBlock
*unqueue_page(RAMState
*rs
, ram_addr_t
*offset
)
2039 RAMBlock
*block
= NULL
;
2041 if (QSIMPLEQ_EMPTY_ATOMIC(&rs
->src_page_requests
)) {
2045 qemu_mutex_lock(&rs
->src_page_req_mutex
);
2046 if (!QSIMPLEQ_EMPTY(&rs
->src_page_requests
)) {
2047 struct RAMSrcPageRequest
*entry
=
2048 QSIMPLEQ_FIRST(&rs
->src_page_requests
);
2050 *offset
= entry
->offset
;
2052 if (entry
->len
> TARGET_PAGE_SIZE
) {
2053 entry
->len
-= TARGET_PAGE_SIZE
;
2054 entry
->offset
+= TARGET_PAGE_SIZE
;
2056 memory_region_unref(block
->mr
);
2057 QSIMPLEQ_REMOVE_HEAD(&rs
->src_page_requests
, next_req
);
2059 migration_consume_urgent_request();
2062 qemu_mutex_unlock(&rs
->src_page_req_mutex
);
2068 * get_queued_page: unqueue a page from the postocpy requests
2070 * Skips pages that are already sent (!dirty)
2072 * Returns if a queued page is found
2074 * @rs: current RAM state
2075 * @pss: data about the state of the current dirty page scan
2077 static bool get_queued_page(RAMState
*rs
, PageSearchStatus
*pss
)
2084 block
= unqueue_page(rs
, &offset
);
2086 * We're sending this page, and since it's postcopy nothing else
2087 * will dirty it, and we must make sure it doesn't get sent again
2088 * even if this queue request was received after the background
2089 * search already sent it.
2094 page
= offset
>> TARGET_PAGE_BITS
;
2095 dirty
= test_bit(page
, block
->bmap
);
2097 trace_get_queued_page_not_dirty(block
->idstr
, (uint64_t)offset
,
2098 page
, test_bit(page
, block
->unsentmap
));
2100 trace_get_queued_page(block
->idstr
, (uint64_t)offset
, page
);
2104 } while (block
&& !dirty
);
2108 * As soon as we start servicing pages out of order, then we have
2109 * to kill the bulk stage, since the bulk stage assumes
2110 * in (migration_bitmap_find_and_reset_dirty) that every page is
2111 * dirty, that's no longer true.
2113 rs
->ram_bulk_stage
= false;
2116 * We want the background search to continue from the queued page
2117 * since the guest is likely to want other pages near to the page
2118 * it just requested.
2121 pss
->page
= offset
>> TARGET_PAGE_BITS
;
2128 * migration_page_queue_free: drop any remaining pages in the ram
2131 * It should be empty at the end anyway, but in error cases there may
2132 * be some left. in case that there is any page left, we drop it.
2135 static void migration_page_queue_free(RAMState
*rs
)
2137 struct RAMSrcPageRequest
*mspr
, *next_mspr
;
2138 /* This queue generally should be empty - but in the case of a failed
2139 * migration might have some droppings in.
2142 QSIMPLEQ_FOREACH_SAFE(mspr
, &rs
->src_page_requests
, next_req
, next_mspr
) {
2143 memory_region_unref(mspr
->rb
->mr
);
2144 QSIMPLEQ_REMOVE_HEAD(&rs
->src_page_requests
, next_req
);
2151 * ram_save_queue_pages: queue the page for transmission
2153 * A request from postcopy destination for example.
2155 * Returns zero on success or negative on error
2157 * @rbname: Name of the RAMBLock of the request. NULL means the
2158 * same that last one.
2159 * @start: starting address from the start of the RAMBlock
2160 * @len: length (in bytes) to send
2162 int ram_save_queue_pages(const char *rbname
, ram_addr_t start
, ram_addr_t len
)
2165 RAMState
*rs
= ram_state
;
2167 ram_counters
.postcopy_requests
++;
2170 /* Reuse last RAMBlock */
2171 ramblock
= rs
->last_req_rb
;
2175 * Shouldn't happen, we can't reuse the last RAMBlock if
2176 * it's the 1st request.
2178 error_report("ram_save_queue_pages no previous block");
2182 ramblock
= qemu_ram_block_by_name(rbname
);
2185 /* We shouldn't be asked for a non-existent RAMBlock */
2186 error_report("ram_save_queue_pages no block '%s'", rbname
);
2189 rs
->last_req_rb
= ramblock
;
2191 trace_ram_save_queue_pages(ramblock
->idstr
, start
, len
);
2192 if (start
+len
> ramblock
->used_length
) {
2193 error_report("%s request overrun start=" RAM_ADDR_FMT
" len="
2194 RAM_ADDR_FMT
" blocklen=" RAM_ADDR_FMT
,
2195 __func__
, start
, len
, ramblock
->used_length
);
2199 struct RAMSrcPageRequest
*new_entry
=
2200 g_malloc0(sizeof(struct RAMSrcPageRequest
));
2201 new_entry
->rb
= ramblock
;
2202 new_entry
->offset
= start
;
2203 new_entry
->len
= len
;
2205 memory_region_ref(ramblock
->mr
);
2206 qemu_mutex_lock(&rs
->src_page_req_mutex
);
2207 QSIMPLEQ_INSERT_TAIL(&rs
->src_page_requests
, new_entry
, next_req
);
2208 migration_make_urgent_request();
2209 qemu_mutex_unlock(&rs
->src_page_req_mutex
);
2219 static bool save_page_use_compression(RAMState
*rs
)
2221 if (!migrate_use_compression()) {
2226 * If xbzrle is on, stop using the data compression after first
2227 * round of migration even if compression is enabled. In theory,
2228 * xbzrle can do better than compression.
2230 if (rs
->ram_bulk_stage
|| !migrate_use_xbzrle()) {
2238 * try to compress the page before posting it out, return true if the page
2239 * has been properly handled by compression, otherwise needs other
2240 * paths to handle it
2242 static bool save_compress_page(RAMState
*rs
, RAMBlock
*block
, ram_addr_t offset
)
2244 if (!save_page_use_compression(rs
)) {
2249 * When starting the process of a new block, the first page of
2250 * the block should be sent out before other pages in the same
2251 * block, and all the pages in last block should have been sent
2252 * out, keeping this order is important, because the 'cont' flag
2253 * is used to avoid resending the block name.
2255 * We post the fist page as normal page as compression will take
2256 * much CPU resource.
2258 if (block
!= rs
->last_sent_block
) {
2259 flush_compressed_data(rs
);
2263 if (compress_page_with_multi_thread(rs
, block
, offset
) > 0) {
2271 * ram_save_target_page: save one target page
2273 * Returns the number of pages written
2275 * @rs: current RAM state
2276 * @pss: data about the page we want to send
2277 * @last_stage: if we are at the completion stage
2279 static int ram_save_target_page(RAMState
*rs
, PageSearchStatus
*pss
,
2282 RAMBlock
*block
= pss
->block
;
2283 ram_addr_t offset
= pss
->page
<< TARGET_PAGE_BITS
;
2286 if (control_save_page(rs
, block
, offset
, &res
)) {
2290 if (save_compress_page(rs
, block
, offset
)) {
2294 res
= save_zero_page(rs
, block
, offset
);
2296 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
2297 * page would be stale
2299 if (!save_page_use_compression(rs
)) {
2300 XBZRLE_cache_lock();
2301 xbzrle_cache_zero_page(rs
, block
->offset
+ offset
);
2302 XBZRLE_cache_unlock();
2304 ram_release_pages(block
->idstr
, offset
, res
);
2309 * do not use multifd for compression as the first page in the new
2310 * block should be posted out before sending the compressed page
2312 if (!save_page_use_compression(rs
) && migrate_use_multifd()) {
2313 return ram_save_multifd_page(rs
, block
, offset
);
2316 return ram_save_page(rs
, pss
, last_stage
);
2320 * ram_save_host_page: save a whole host page
2322 * Starting at *offset send pages up to the end of the current host
2323 * page. It's valid for the initial offset to point into the middle of
2324 * a host page in which case the remainder of the hostpage is sent.
2325 * Only dirty target pages are sent. Note that the host page size may
2326 * be a huge page for this block.
2327 * The saving stops at the boundary of the used_length of the block
2328 * if the RAMBlock isn't a multiple of the host page size.
2330 * Returns the number of pages written or negative on error
2332 * @rs: current RAM state
2333 * @ms: current migration state
2334 * @pss: data about the page we want to send
2335 * @last_stage: if we are at the completion stage
2337 static int ram_save_host_page(RAMState
*rs
, PageSearchStatus
*pss
,
2340 int tmppages
, pages
= 0;
2341 size_t pagesize_bits
=
2342 qemu_ram_pagesize(pss
->block
) >> TARGET_PAGE_BITS
;
2344 if (!qemu_ram_is_migratable(pss
->block
)) {
2345 error_report("block %s should not be migrated !", pss
->block
->idstr
);
2350 /* Check the pages is dirty and if it is send it */
2351 if (!migration_bitmap_clear_dirty(rs
, pss
->block
, pss
->page
)) {
2356 tmppages
= ram_save_target_page(rs
, pss
, last_stage
);
2362 if (pss
->block
->unsentmap
) {
2363 clear_bit(pss
->page
, pss
->block
->unsentmap
);
2367 } while ((pss
->page
& (pagesize_bits
- 1)) &&
2368 offset_in_ramblock(pss
->block
, pss
->page
<< TARGET_PAGE_BITS
));
2370 /* The offset we leave with is the last one we looked at */
2376 * ram_find_and_save_block: finds a dirty page and sends it to f
2378 * Called within an RCU critical section.
2380 * Returns the number of pages written where zero means no dirty pages,
2381 * or negative on error
2383 * @rs: current RAM state
2384 * @last_stage: if we are at the completion stage
2386 * On systems where host-page-size > target-page-size it will send all the
2387 * pages in a host page that are dirty.
2390 static int ram_find_and_save_block(RAMState
*rs
, bool last_stage
)
2392 PageSearchStatus pss
;
2396 /* No dirty page as there is zero RAM */
2397 if (!ram_bytes_total()) {
2401 pss
.block
= rs
->last_seen_block
;
2402 pss
.page
= rs
->last_page
;
2403 pss
.complete_round
= false;
2406 pss
.block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
2411 found
= get_queued_page(rs
, &pss
);
2414 /* priority queue empty, so just search for something dirty */
2415 found
= find_dirty_block(rs
, &pss
, &again
);
2419 pages
= ram_save_host_page(rs
, &pss
, last_stage
);
2421 } while (!pages
&& again
);
2423 rs
->last_seen_block
= pss
.block
;
2424 rs
->last_page
= pss
.page
;
2429 void acct_update_position(QEMUFile
*f
, size_t size
, bool zero
)
2431 uint64_t pages
= size
/ TARGET_PAGE_SIZE
;
2434 ram_counters
.duplicate
+= pages
;
2436 ram_counters
.normal
+= pages
;
2437 ram_counters
.transferred
+= size
;
2438 qemu_update_position(f
, size
);
2442 uint64_t ram_bytes_total(void)
2448 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2449 total
+= block
->used_length
;
2455 static void xbzrle_load_setup(void)
2457 XBZRLE
.decoded_buf
= g_malloc(TARGET_PAGE_SIZE
);
2460 static void xbzrle_load_cleanup(void)
2462 g_free(XBZRLE
.decoded_buf
);
2463 XBZRLE
.decoded_buf
= NULL
;
2466 static void ram_state_cleanup(RAMState
**rsp
)
2469 migration_page_queue_free(*rsp
);
2470 qemu_mutex_destroy(&(*rsp
)->bitmap_mutex
);
2471 qemu_mutex_destroy(&(*rsp
)->src_page_req_mutex
);
2477 static void xbzrle_cleanup(void)
2479 XBZRLE_cache_lock();
2481 cache_fini(XBZRLE
.cache
);
2482 g_free(XBZRLE
.encoded_buf
);
2483 g_free(XBZRLE
.current_buf
);
2484 g_free(XBZRLE
.zero_target_page
);
2485 XBZRLE
.cache
= NULL
;
2486 XBZRLE
.encoded_buf
= NULL
;
2487 XBZRLE
.current_buf
= NULL
;
2488 XBZRLE
.zero_target_page
= NULL
;
2490 XBZRLE_cache_unlock();
2493 static void ram_save_cleanup(void *opaque
)
2495 RAMState
**rsp
= opaque
;
2498 /* caller have hold iothread lock or is in a bh, so there is
2499 * no writing race against this migration_bitmap
2501 memory_global_dirty_log_stop();
2503 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2504 g_free(block
->bmap
);
2506 g_free(block
->unsentmap
);
2507 block
->unsentmap
= NULL
;
2511 compress_threads_save_cleanup();
2512 ram_state_cleanup(rsp
);
2515 static void ram_state_reset(RAMState
*rs
)
2517 rs
->last_seen_block
= NULL
;
2518 rs
->last_sent_block
= NULL
;
2520 rs
->last_version
= ram_list
.version
;
2521 rs
->ram_bulk_stage
= true;
2524 #define MAX_WAIT 50 /* ms, half buffered_file limit */
2527 * 'expected' is the value you expect the bitmap mostly to be full
2528 * of; it won't bother printing lines that are all this value.
2529 * If 'todump' is null the migration bitmap is dumped.
2531 void ram_debug_dump_bitmap(unsigned long *todump
, bool expected
,
2532 unsigned long pages
)
2535 int64_t linelen
= 128;
2538 for (cur
= 0; cur
< pages
; cur
+= linelen
) {
2542 * Last line; catch the case where the line length
2543 * is longer than remaining ram
2545 if (cur
+ linelen
> pages
) {
2546 linelen
= pages
- cur
;
2548 for (curb
= 0; curb
< linelen
; curb
++) {
2549 bool thisbit
= test_bit(cur
+ curb
, todump
);
2550 linebuf
[curb
] = thisbit
? '1' : '.';
2551 found
= found
|| (thisbit
!= expected
);
2554 linebuf
[curb
] = '\0';
2555 fprintf(stderr
, "0x%08" PRIx64
" : %s\n", cur
, linebuf
);
2560 /* **** functions for postcopy ***** */
2562 void ram_postcopy_migrated_memory_release(MigrationState
*ms
)
2564 struct RAMBlock
*block
;
2566 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2567 unsigned long *bitmap
= block
->bmap
;
2568 unsigned long range
= block
->used_length
>> TARGET_PAGE_BITS
;
2569 unsigned long run_start
= find_next_zero_bit(bitmap
, range
, 0);
2571 while (run_start
< range
) {
2572 unsigned long run_end
= find_next_bit(bitmap
, range
, run_start
+ 1);
2573 ram_discard_range(block
->idstr
, run_start
<< TARGET_PAGE_BITS
,
2574 (run_end
- run_start
) << TARGET_PAGE_BITS
);
2575 run_start
= find_next_zero_bit(bitmap
, range
, run_end
+ 1);
2581 * postcopy_send_discard_bm_ram: discard a RAMBlock
2583 * Returns zero on success
2585 * Callback from postcopy_each_ram_send_discard for each RAMBlock
2586 * Note: At this point the 'unsentmap' is the processed bitmap combined
2587 * with the dirtymap; so a '1' means it's either dirty or unsent.
2589 * @ms: current migration state
2590 * @pds: state for postcopy
2591 * @start: RAMBlock starting page
2592 * @length: RAMBlock size
2594 static int postcopy_send_discard_bm_ram(MigrationState
*ms
,
2595 PostcopyDiscardState
*pds
,
2598 unsigned long end
= block
->used_length
>> TARGET_PAGE_BITS
;
2599 unsigned long current
;
2600 unsigned long *unsentmap
= block
->unsentmap
;
2602 for (current
= 0; current
< end
; ) {
2603 unsigned long one
= find_next_bit(unsentmap
, end
, current
);
2606 unsigned long zero
= find_next_zero_bit(unsentmap
, end
, one
+ 1);
2607 unsigned long discard_length
;
2610 discard_length
= end
- one
;
2612 discard_length
= zero
- one
;
2614 if (discard_length
) {
2615 postcopy_discard_send_range(ms
, pds
, one
, discard_length
);
2617 current
= one
+ discard_length
;
2627 * postcopy_each_ram_send_discard: discard all RAMBlocks
2629 * Returns 0 for success or negative for error
2631 * Utility for the outgoing postcopy code.
2632 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2633 * passing it bitmap indexes and name.
2634 * (qemu_ram_foreach_block ends up passing unscaled lengths
2635 * which would mean postcopy code would have to deal with target page)
2637 * @ms: current migration state
2639 static int postcopy_each_ram_send_discard(MigrationState
*ms
)
2641 struct RAMBlock
*block
;
2644 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2645 PostcopyDiscardState
*pds
=
2646 postcopy_discard_send_init(ms
, block
->idstr
);
2649 * Postcopy sends chunks of bitmap over the wire, but it
2650 * just needs indexes at this point, avoids it having
2651 * target page specific code.
2653 ret
= postcopy_send_discard_bm_ram(ms
, pds
, block
);
2654 postcopy_discard_send_finish(ms
, pds
);
2664 * postcopy_chunk_hostpages_pass: canocalize bitmap in hostpages
2666 * Helper for postcopy_chunk_hostpages; it's called twice to
2667 * canonicalize the two bitmaps, that are similar, but one is
2670 * Postcopy requires that all target pages in a hostpage are dirty or
2671 * clean, not a mix. This function canonicalizes the bitmaps.
2673 * @ms: current migration state
2674 * @unsent_pass: if true we need to canonicalize partially unsent host pages
2675 * otherwise we need to canonicalize partially dirty host pages
2676 * @block: block that contains the page we want to canonicalize
2677 * @pds: state for postcopy
2679 static void postcopy_chunk_hostpages_pass(MigrationState
*ms
, bool unsent_pass
,
2681 PostcopyDiscardState
*pds
)
2683 RAMState
*rs
= ram_state
;
2684 unsigned long *bitmap
= block
->bmap
;
2685 unsigned long *unsentmap
= block
->unsentmap
;
2686 unsigned int host_ratio
= block
->page_size
/ TARGET_PAGE_SIZE
;
2687 unsigned long pages
= block
->used_length
>> TARGET_PAGE_BITS
;
2688 unsigned long run_start
;
2690 if (block
->page_size
== TARGET_PAGE_SIZE
) {
2691 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2696 /* Find a sent page */
2697 run_start
= find_next_zero_bit(unsentmap
, pages
, 0);
2699 /* Find a dirty page */
2700 run_start
= find_next_bit(bitmap
, pages
, 0);
2703 while (run_start
< pages
) {
2704 bool do_fixup
= false;
2705 unsigned long fixup_start_addr
;
2706 unsigned long host_offset
;
2709 * If the start of this run of pages is in the middle of a host
2710 * page, then we need to fixup this host page.
2712 host_offset
= run_start
% host_ratio
;
2715 run_start
-= host_offset
;
2716 fixup_start_addr
= run_start
;
2717 /* For the next pass */
2718 run_start
= run_start
+ host_ratio
;
2720 /* Find the end of this run */
2721 unsigned long run_end
;
2723 run_end
= find_next_bit(unsentmap
, pages
, run_start
+ 1);
2725 run_end
= find_next_zero_bit(bitmap
, pages
, run_start
+ 1);
2728 * If the end isn't at the start of a host page, then the
2729 * run doesn't finish at the end of a host page
2730 * and we need to discard.
2732 host_offset
= run_end
% host_ratio
;
2735 fixup_start_addr
= run_end
- host_offset
;
2737 * This host page has gone, the next loop iteration starts
2738 * from after the fixup
2740 run_start
= fixup_start_addr
+ host_ratio
;
2743 * No discards on this iteration, next loop starts from
2744 * next sent/dirty page
2746 run_start
= run_end
+ 1;
2753 /* Tell the destination to discard this page */
2754 if (unsent_pass
|| !test_bit(fixup_start_addr
, unsentmap
)) {
2755 /* For the unsent_pass we:
2756 * discard partially sent pages
2757 * For the !unsent_pass (dirty) we:
2758 * discard partially dirty pages that were sent
2759 * (any partially sent pages were already discarded
2760 * by the previous unsent_pass)
2762 postcopy_discard_send_range(ms
, pds
, fixup_start_addr
,
2766 /* Clean up the bitmap */
2767 for (page
= fixup_start_addr
;
2768 page
< fixup_start_addr
+ host_ratio
; page
++) {
2769 /* All pages in this host page are now not sent */
2770 set_bit(page
, unsentmap
);
2773 * Remark them as dirty, updating the count for any pages
2774 * that weren't previously dirty.
2776 rs
->migration_dirty_pages
+= !test_and_set_bit(page
, bitmap
);
2781 /* Find the next sent page for the next iteration */
2782 run_start
= find_next_zero_bit(unsentmap
, pages
, run_start
);
2784 /* Find the next dirty page for the next iteration */
2785 run_start
= find_next_bit(bitmap
, pages
, run_start
);
2791 * postcopy_chuck_hostpages: discrad any partially sent host page
2793 * Utility for the outgoing postcopy code.
2795 * Discard any partially sent host-page size chunks, mark any partially
2796 * dirty host-page size chunks as all dirty. In this case the host-page
2797 * is the host-page for the particular RAMBlock, i.e. it might be a huge page
2799 * Returns zero on success
2801 * @ms: current migration state
2802 * @block: block we want to work with
2804 static int postcopy_chunk_hostpages(MigrationState
*ms
, RAMBlock
*block
)
2806 PostcopyDiscardState
*pds
=
2807 postcopy_discard_send_init(ms
, block
->idstr
);
2809 /* First pass: Discard all partially sent host pages */
2810 postcopy_chunk_hostpages_pass(ms
, true, block
, pds
);
2812 * Second pass: Ensure that all partially dirty host pages are made
2815 postcopy_chunk_hostpages_pass(ms
, false, block
, pds
);
2817 postcopy_discard_send_finish(ms
, pds
);
2822 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
2824 * Returns zero on success
2826 * Transmit the set of pages to be discarded after precopy to the target
2827 * these are pages that:
2828 * a) Have been previously transmitted but are now dirty again
2829 * b) Pages that have never been transmitted, this ensures that
2830 * any pages on the destination that have been mapped by background
2831 * tasks get discarded (transparent huge pages is the specific concern)
2832 * Hopefully this is pretty sparse
2834 * @ms: current migration state
2836 int ram_postcopy_send_discard_bitmap(MigrationState
*ms
)
2838 RAMState
*rs
= ram_state
;
2844 /* This should be our last sync, the src is now paused */
2845 migration_bitmap_sync(rs
);
2847 /* Easiest way to make sure we don't resume in the middle of a host-page */
2848 rs
->last_seen_block
= NULL
;
2849 rs
->last_sent_block
= NULL
;
2852 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2853 unsigned long pages
= block
->used_length
>> TARGET_PAGE_BITS
;
2854 unsigned long *bitmap
= block
->bmap
;
2855 unsigned long *unsentmap
= block
->unsentmap
;
2858 /* We don't have a safe way to resize the sentmap, so
2859 * if the bitmap was resized it will be NULL at this
2862 error_report("migration ram resized during precopy phase");
2866 /* Deal with TPS != HPS and huge pages */
2867 ret
= postcopy_chunk_hostpages(ms
, block
);
2874 * Update the unsentmap to be unsentmap = unsentmap | dirty
2876 bitmap_or(unsentmap
, unsentmap
, bitmap
, pages
);
2877 #ifdef DEBUG_POSTCOPY
2878 ram_debug_dump_bitmap(unsentmap
, true, pages
);
2881 trace_ram_postcopy_send_discard_bitmap();
2883 ret
= postcopy_each_ram_send_discard(ms
);
2890 * ram_discard_range: discard dirtied pages at the beginning of postcopy
2892 * Returns zero on success
2894 * @rbname: name of the RAMBlock of the request. NULL means the
2895 * same that last one.
2896 * @start: RAMBlock starting page
2897 * @length: RAMBlock size
2899 int ram_discard_range(const char *rbname
, uint64_t start
, size_t length
)
2903 trace_ram_discard_range(rbname
, start
, length
);
2906 RAMBlock
*rb
= qemu_ram_block_by_name(rbname
);
2909 error_report("ram_discard_range: Failed to find block '%s'", rbname
);
2914 * On source VM, we don't need to update the received bitmap since
2915 * we don't even have one.
2917 if (rb
->receivedmap
) {
2918 bitmap_clear(rb
->receivedmap
, start
>> qemu_target_page_bits(),
2919 length
>> qemu_target_page_bits());
2922 ret
= ram_block_discard_range(rb
, start
, length
);
2931 * For every allocation, we will try not to crash the VM if the
2932 * allocation failed.
2934 static int xbzrle_init(void)
2936 Error
*local_err
= NULL
;
2938 if (!migrate_use_xbzrle()) {
2942 XBZRLE_cache_lock();
2944 XBZRLE
.zero_target_page
= g_try_malloc0(TARGET_PAGE_SIZE
);
2945 if (!XBZRLE
.zero_target_page
) {
2946 error_report("%s: Error allocating zero page", __func__
);
2950 XBZRLE
.cache
= cache_init(migrate_xbzrle_cache_size(),
2951 TARGET_PAGE_SIZE
, &local_err
);
2952 if (!XBZRLE
.cache
) {
2953 error_report_err(local_err
);
2954 goto free_zero_page
;
2957 XBZRLE
.encoded_buf
= g_try_malloc0(TARGET_PAGE_SIZE
);
2958 if (!XBZRLE
.encoded_buf
) {
2959 error_report("%s: Error allocating encoded_buf", __func__
);
2963 XBZRLE
.current_buf
= g_try_malloc(TARGET_PAGE_SIZE
);
2964 if (!XBZRLE
.current_buf
) {
2965 error_report("%s: Error allocating current_buf", __func__
);
2966 goto free_encoded_buf
;
2969 /* We are all good */
2970 XBZRLE_cache_unlock();
2974 g_free(XBZRLE
.encoded_buf
);
2975 XBZRLE
.encoded_buf
= NULL
;
2977 cache_fini(XBZRLE
.cache
);
2978 XBZRLE
.cache
= NULL
;
2980 g_free(XBZRLE
.zero_target_page
);
2981 XBZRLE
.zero_target_page
= NULL
;
2983 XBZRLE_cache_unlock();
2987 static int ram_state_init(RAMState
**rsp
)
2989 *rsp
= g_try_new0(RAMState
, 1);
2992 error_report("%s: Init ramstate fail", __func__
);
2996 qemu_mutex_init(&(*rsp
)->bitmap_mutex
);
2997 qemu_mutex_init(&(*rsp
)->src_page_req_mutex
);
2998 QSIMPLEQ_INIT(&(*rsp
)->src_page_requests
);
3001 * Count the total number of pages used by ram blocks not including any
3002 * gaps due to alignment or unplugs.
3004 (*rsp
)->migration_dirty_pages
= ram_bytes_total() >> TARGET_PAGE_BITS
;
3006 ram_state_reset(*rsp
);
3011 static void ram_list_init_bitmaps(void)
3014 unsigned long pages
;
3016 /* Skip setting bitmap if there is no RAM */
3017 if (ram_bytes_total()) {
3018 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
3019 pages
= block
->max_length
>> TARGET_PAGE_BITS
;
3020 block
->bmap
= bitmap_new(pages
);
3021 bitmap_set(block
->bmap
, 0, pages
);
3022 if (migrate_postcopy_ram()) {
3023 block
->unsentmap
= bitmap_new(pages
);
3024 bitmap_set(block
->unsentmap
, 0, pages
);
3030 static void ram_init_bitmaps(RAMState
*rs
)
3032 /* For memory_global_dirty_log_start below. */
3033 qemu_mutex_lock_iothread();
3034 qemu_mutex_lock_ramlist();
3037 ram_list_init_bitmaps();
3038 memory_global_dirty_log_start();
3039 migration_bitmap_sync(rs
);
3042 qemu_mutex_unlock_ramlist();
3043 qemu_mutex_unlock_iothread();
3046 static int ram_init_all(RAMState
**rsp
)
3048 if (ram_state_init(rsp
)) {
3052 if (xbzrle_init()) {
3053 ram_state_cleanup(rsp
);
3057 ram_init_bitmaps(*rsp
);
3062 static void ram_state_resume_prepare(RAMState
*rs
, QEMUFile
*out
)
3068 * Postcopy is not using xbzrle/compression, so no need for that.
3069 * Also, since source are already halted, we don't need to care
3070 * about dirty page logging as well.
3073 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
3074 pages
+= bitmap_count_one(block
->bmap
,
3075 block
->used_length
>> TARGET_PAGE_BITS
);
3078 /* This may not be aligned with current bitmaps. Recalculate. */
3079 rs
->migration_dirty_pages
= pages
;
3081 rs
->last_seen_block
= NULL
;
3082 rs
->last_sent_block
= NULL
;
3084 rs
->last_version
= ram_list
.version
;
3086 * Disable the bulk stage, otherwise we'll resend the whole RAM no
3087 * matter what we have sent.
3089 rs
->ram_bulk_stage
= false;
3091 /* Update RAMState cache of output QEMUFile */
3094 trace_ram_state_resume_prepare(pages
);
3098 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
3099 * long-running RCU critical section. When rcu-reclaims in the code
3100 * start to become numerous it will be necessary to reduce the
3101 * granularity of these critical sections.
3105 * ram_save_setup: Setup RAM for migration
3107 * Returns zero to indicate success and negative for error
3109 * @f: QEMUFile where to send the data
3110 * @opaque: RAMState pointer
3112 static int ram_save_setup(QEMUFile
*f
, void *opaque
)
3114 RAMState
**rsp
= opaque
;
3117 if (compress_threads_save_setup()) {
3121 /* migration has already setup the bitmap, reuse it. */
3122 if (!migration_in_colo_state()) {
3123 if (ram_init_all(rsp
) != 0) {
3124 compress_threads_save_cleanup();
3132 qemu_put_be64(f
, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE
);
3134 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
3135 qemu_put_byte(f
, strlen(block
->idstr
));
3136 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, strlen(block
->idstr
));
3137 qemu_put_be64(f
, block
->used_length
);
3138 if (migrate_postcopy_ram() && block
->page_size
!= qemu_host_page_size
) {
3139 qemu_put_be64(f
, block
->page_size
);
3145 ram_control_before_iterate(f
, RAM_CONTROL_SETUP
);
3146 ram_control_after_iterate(f
, RAM_CONTROL_SETUP
);
3148 multifd_send_sync_main();
3149 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3156 * ram_save_iterate: iterative stage for migration
3158 * Returns zero to indicate success and negative for error
3160 * @f: QEMUFile where to send the data
3161 * @opaque: RAMState pointer
3163 static int ram_save_iterate(QEMUFile
*f
, void *opaque
)
3165 RAMState
**temp
= opaque
;
3166 RAMState
*rs
= *temp
;
3172 if (blk_mig_bulk_active()) {
3173 /* Avoid transferring ram during bulk phase of block migration as
3174 * the bulk phase will usually take a long time and transferring
3175 * ram updates during that time is pointless. */
3180 if (ram_list
.version
!= rs
->last_version
) {
3181 ram_state_reset(rs
);
3184 /* Read version before ram_list.blocks */
3187 ram_control_before_iterate(f
, RAM_CONTROL_ROUND
);
3189 t0
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
3191 while ((ret
= qemu_file_rate_limit(f
)) == 0 ||
3192 !QSIMPLEQ_EMPTY(&rs
->src_page_requests
)) {
3195 if (qemu_file_get_error(f
)) {
3199 pages
= ram_find_and_save_block(rs
, false);
3200 /* no more pages to sent */
3207 qemu_file_set_error(f
, pages
);
3211 rs
->target_page_count
+= pages
;
3213 /* we want to check in the 1st loop, just in case it was the 1st time
3214 and we had to sync the dirty bitmap.
3215 qemu_get_clock_ns() is a bit expensive, so we only check each some
3218 if ((i
& 63) == 0) {
3219 uint64_t t1
= (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - t0
) / 1000000;
3220 if (t1
> MAX_WAIT
) {
3221 trace_ram_save_iterate_big_wait(t1
, i
);
3230 * Must occur before EOS (or any QEMUFile operation)
3231 * because of RDMA protocol.
3233 ram_control_after_iterate(f
, RAM_CONTROL_ROUND
);
3235 multifd_send_sync_main();
3237 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3239 ram_counters
.transferred
+= 8;
3241 ret
= qemu_file_get_error(f
);
3250 * ram_save_complete: function called to send the remaining amount of ram
3252 * Returns zero to indicate success or negative on error
3254 * Called with iothread lock
3256 * @f: QEMUFile where to send the data
3257 * @opaque: RAMState pointer
3259 static int ram_save_complete(QEMUFile
*f
, void *opaque
)
3261 RAMState
**temp
= opaque
;
3262 RAMState
*rs
= *temp
;
3267 if (!migration_in_postcopy()) {
3268 migration_bitmap_sync(rs
);
3271 ram_control_before_iterate(f
, RAM_CONTROL_FINISH
);
3273 /* try transferring iterative blocks of memory */
3275 /* flush all remaining blocks regardless of rate limiting */
3279 pages
= ram_find_and_save_block(rs
, !migration_in_colo_state());
3280 /* no more blocks to sent */
3290 flush_compressed_data(rs
);
3291 ram_control_after_iterate(f
, RAM_CONTROL_FINISH
);
3295 multifd_send_sync_main();
3296 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3302 static void ram_save_pending(QEMUFile
*f
, void *opaque
, uint64_t max_size
,
3303 uint64_t *res_precopy_only
,
3304 uint64_t *res_compatible
,
3305 uint64_t *res_postcopy_only
)
3307 RAMState
**temp
= opaque
;
3308 RAMState
*rs
= *temp
;
3309 uint64_t remaining_size
;
3311 remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
3313 if (!migration_in_postcopy() &&
3314 remaining_size
< max_size
) {
3315 qemu_mutex_lock_iothread();
3317 migration_bitmap_sync(rs
);
3319 qemu_mutex_unlock_iothread();
3320 remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
3323 if (migrate_postcopy_ram()) {
3324 /* We can do postcopy, and all the data is postcopiable */
3325 *res_compatible
+= remaining_size
;
3327 *res_precopy_only
+= remaining_size
;
3331 static int load_xbzrle(QEMUFile
*f
, ram_addr_t addr
, void *host
)
3333 unsigned int xh_len
;
3335 uint8_t *loaded_data
;
3337 /* extract RLE header */
3338 xh_flags
= qemu_get_byte(f
);
3339 xh_len
= qemu_get_be16(f
);
3341 if (xh_flags
!= ENCODING_FLAG_XBZRLE
) {
3342 error_report("Failed to load XBZRLE page - wrong compression!");
3346 if (xh_len
> TARGET_PAGE_SIZE
) {
3347 error_report("Failed to load XBZRLE page - len overflow!");
3350 loaded_data
= XBZRLE
.decoded_buf
;
3351 /* load data and decode */
3352 /* it can change loaded_data to point to an internal buffer */
3353 qemu_get_buffer_in_place(f
, &loaded_data
, xh_len
);
3356 if (xbzrle_decode_buffer(loaded_data
, xh_len
, host
,
3357 TARGET_PAGE_SIZE
) == -1) {
3358 error_report("Failed to load XBZRLE page - decode error!");
3366 * ram_block_from_stream: read a RAMBlock id from the migration stream
3368 * Must be called from within a rcu critical section.
3370 * Returns a pointer from within the RCU-protected ram_list.
3372 * @f: QEMUFile where to read the data from
3373 * @flags: Page flags (mostly to see if it's a continuation of previous block)
3375 static inline RAMBlock
*ram_block_from_stream(QEMUFile
*f
, int flags
)
3377 static RAMBlock
*block
= NULL
;
3381 if (flags
& RAM_SAVE_FLAG_CONTINUE
) {
3383 error_report("Ack, bad migration stream!");
3389 len
= qemu_get_byte(f
);
3390 qemu_get_buffer(f
, (uint8_t *)id
, len
);
3393 block
= qemu_ram_block_by_name(id
);
3395 error_report("Can't find block %s", id
);
3399 if (!qemu_ram_is_migratable(block
)) {
3400 error_report("block %s should not be migrated !", id
);
3407 static inline void *host_from_ram_block_offset(RAMBlock
*block
,
3410 if (!offset_in_ramblock(block
, offset
)) {
3414 return block
->host
+ offset
;
3418 * ram_handle_compressed: handle the zero page case
3420 * If a page (or a whole RDMA chunk) has been
3421 * determined to be zero, then zap it.
3423 * @host: host address for the zero page
3424 * @ch: what the page is filled from. We only support zero
3425 * @size: size of the zero page
3427 void ram_handle_compressed(void *host
, uint8_t ch
, uint64_t size
)
3429 if (ch
!= 0 || !is_zero_range(host
, size
)) {
3430 memset(host
, ch
, size
);
3434 /* return the size after decompression, or negative value on error */
3436 qemu_uncompress_data(z_stream
*stream
, uint8_t *dest
, size_t dest_len
,
3437 const uint8_t *source
, size_t source_len
)
3441 err
= inflateReset(stream
);
3446 stream
->avail_in
= source_len
;
3447 stream
->next_in
= (uint8_t *)source
;
3448 stream
->avail_out
= dest_len
;
3449 stream
->next_out
= dest
;
3451 err
= inflate(stream
, Z_NO_FLUSH
);
3452 if (err
!= Z_STREAM_END
) {
3456 return stream
->total_out
;
3459 static void *do_data_decompress(void *opaque
)
3461 DecompressParam
*param
= opaque
;
3462 unsigned long pagesize
;
3466 qemu_mutex_lock(¶m
->mutex
);
3467 while (!param
->quit
) {
3472 qemu_mutex_unlock(¶m
->mutex
);
3474 pagesize
= TARGET_PAGE_SIZE
;
3476 ret
= qemu_uncompress_data(¶m
->stream
, des
, pagesize
,
3477 param
->compbuf
, len
);
3478 if (ret
< 0 && migrate_get_current()->decompress_error_check
) {
3479 error_report("decompress data failed");
3480 qemu_file_set_error(decomp_file
, ret
);
3483 qemu_mutex_lock(&decomp_done_lock
);
3485 qemu_cond_signal(&decomp_done_cond
);
3486 qemu_mutex_unlock(&decomp_done_lock
);
3488 qemu_mutex_lock(¶m
->mutex
);
3490 qemu_cond_wait(¶m
->cond
, ¶m
->mutex
);
3493 qemu_mutex_unlock(¶m
->mutex
);
3498 static int wait_for_decompress_done(void)
3500 int idx
, thread_count
;
3502 if (!migrate_use_compression()) {
3506 thread_count
= migrate_decompress_threads();
3507 qemu_mutex_lock(&decomp_done_lock
);
3508 for (idx
= 0; idx
< thread_count
; idx
++) {
3509 while (!decomp_param
[idx
].done
) {
3510 qemu_cond_wait(&decomp_done_cond
, &decomp_done_lock
);
3513 qemu_mutex_unlock(&decomp_done_lock
);
3514 return qemu_file_get_error(decomp_file
);
3517 static void compress_threads_load_cleanup(void)
3519 int i
, thread_count
;
3521 if (!migrate_use_compression()) {
3524 thread_count
= migrate_decompress_threads();
3525 for (i
= 0; i
< thread_count
; i
++) {
3527 * we use it as a indicator which shows if the thread is
3528 * properly init'd or not
3530 if (!decomp_param
[i
].compbuf
) {
3534 qemu_mutex_lock(&decomp_param
[i
].mutex
);
3535 decomp_param
[i
].quit
= true;
3536 qemu_cond_signal(&decomp_param
[i
].cond
);
3537 qemu_mutex_unlock(&decomp_param
[i
].mutex
);
3539 for (i
= 0; i
< thread_count
; i
++) {
3540 if (!decomp_param
[i
].compbuf
) {
3544 qemu_thread_join(decompress_threads
+ i
);
3545 qemu_mutex_destroy(&decomp_param
[i
].mutex
);
3546 qemu_cond_destroy(&decomp_param
[i
].cond
);
3547 inflateEnd(&decomp_param
[i
].stream
);
3548 g_free(decomp_param
[i
].compbuf
);
3549 decomp_param
[i
].compbuf
= NULL
;
3551 g_free(decompress_threads
);
3552 g_free(decomp_param
);
3553 decompress_threads
= NULL
;
3554 decomp_param
= NULL
;
3558 static int compress_threads_load_setup(QEMUFile
*f
)
3560 int i
, thread_count
;
3562 if (!migrate_use_compression()) {
3566 thread_count
= migrate_decompress_threads();
3567 decompress_threads
= g_new0(QemuThread
, thread_count
);
3568 decomp_param
= g_new0(DecompressParam
, thread_count
);
3569 qemu_mutex_init(&decomp_done_lock
);
3570 qemu_cond_init(&decomp_done_cond
);
3572 for (i
= 0; i
< thread_count
; i
++) {
3573 if (inflateInit(&decomp_param
[i
].stream
) != Z_OK
) {
3577 decomp_param
[i
].compbuf
= g_malloc0(compressBound(TARGET_PAGE_SIZE
));
3578 qemu_mutex_init(&decomp_param
[i
].mutex
);
3579 qemu_cond_init(&decomp_param
[i
].cond
);
3580 decomp_param
[i
].done
= true;
3581 decomp_param
[i
].quit
= false;
3582 qemu_thread_create(decompress_threads
+ i
, "decompress",
3583 do_data_decompress
, decomp_param
+ i
,
3584 QEMU_THREAD_JOINABLE
);
3588 compress_threads_load_cleanup();
3592 static void decompress_data_with_multi_threads(QEMUFile
*f
,
3593 void *host
, int len
)
3595 int idx
, thread_count
;
3597 thread_count
= migrate_decompress_threads();
3598 qemu_mutex_lock(&decomp_done_lock
);
3600 for (idx
= 0; idx
< thread_count
; idx
++) {
3601 if (decomp_param
[idx
].done
) {
3602 decomp_param
[idx
].done
= false;
3603 qemu_mutex_lock(&decomp_param
[idx
].mutex
);
3604 qemu_get_buffer(f
, decomp_param
[idx
].compbuf
, len
);
3605 decomp_param
[idx
].des
= host
;
3606 decomp_param
[idx
].len
= len
;
3607 qemu_cond_signal(&decomp_param
[idx
].cond
);
3608 qemu_mutex_unlock(&decomp_param
[idx
].mutex
);
3612 if (idx
< thread_count
) {
3615 qemu_cond_wait(&decomp_done_cond
, &decomp_done_lock
);
3618 qemu_mutex_unlock(&decomp_done_lock
);
3622 * ram_load_setup: Setup RAM for migration incoming side
3624 * Returns zero to indicate success and negative for error
3626 * @f: QEMUFile where to receive the data
3627 * @opaque: RAMState pointer
3629 static int ram_load_setup(QEMUFile
*f
, void *opaque
)
3631 if (compress_threads_load_setup(f
)) {
3635 xbzrle_load_setup();
3636 ramblock_recv_map_init();
3640 static int ram_load_cleanup(void *opaque
)
3644 RAMBLOCK_FOREACH_MIGRATABLE(rb
) {
3645 if (ramblock_is_pmem(rb
)) {
3646 pmem_persist(rb
->host
, rb
->used_length
);
3650 xbzrle_load_cleanup();
3651 compress_threads_load_cleanup();
3653 RAMBLOCK_FOREACH_MIGRATABLE(rb
) {
3654 g_free(rb
->receivedmap
);
3655 rb
->receivedmap
= NULL
;
3661 * ram_postcopy_incoming_init: allocate postcopy data structures
3663 * Returns 0 for success and negative if there was one error
3665 * @mis: current migration incoming state
3667 * Allocate data structures etc needed by incoming migration with
3668 * postcopy-ram. postcopy-ram's similarly names
3669 * postcopy_ram_incoming_init does the work.
3671 int ram_postcopy_incoming_init(MigrationIncomingState
*mis
)
3673 return postcopy_ram_incoming_init(mis
);
3677 * ram_load_postcopy: load a page in postcopy case
3679 * Returns 0 for success or -errno in case of error
3681 * Called in postcopy mode by ram_load().
3682 * rcu_read_lock is taken prior to this being called.
3684 * @f: QEMUFile where to send the data
3686 static int ram_load_postcopy(QEMUFile
*f
)
3688 int flags
= 0, ret
= 0;
3689 bool place_needed
= false;
3690 bool matches_target_page_size
= false;
3691 MigrationIncomingState
*mis
= migration_incoming_get_current();
3692 /* Temporary page that is later 'placed' */
3693 void *postcopy_host_page
= postcopy_get_tmp_page(mis
);
3694 void *last_host
= NULL
;
3695 bool all_zero
= false;
3697 while (!ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
3700 void *page_buffer
= NULL
;
3701 void *place_source
= NULL
;
3702 RAMBlock
*block
= NULL
;
3705 addr
= qemu_get_be64(f
);
3708 * If qemu file error, we should stop here, and then "addr"
3711 ret
= qemu_file_get_error(f
);
3716 flags
= addr
& ~TARGET_PAGE_MASK
;
3717 addr
&= TARGET_PAGE_MASK
;
3719 trace_ram_load_postcopy_loop((uint64_t)addr
, flags
);
3720 place_needed
= false;
3721 if (flags
& (RAM_SAVE_FLAG_ZERO
| RAM_SAVE_FLAG_PAGE
)) {
3722 block
= ram_block_from_stream(f
, flags
);
3724 host
= host_from_ram_block_offset(block
, addr
);
3726 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
3730 matches_target_page_size
= block
->page_size
== TARGET_PAGE_SIZE
;
3732 * Postcopy requires that we place whole host pages atomically;
3733 * these may be huge pages for RAMBlocks that are backed by
3735 * To make it atomic, the data is read into a temporary page
3736 * that's moved into place later.
3737 * The migration protocol uses, possibly smaller, target-pages
3738 * however the source ensures it always sends all the components
3739 * of a host page in order.
3741 page_buffer
= postcopy_host_page
+
3742 ((uintptr_t)host
& (block
->page_size
- 1));
3743 /* If all TP are zero then we can optimise the place */
3744 if (!((uintptr_t)host
& (block
->page_size
- 1))) {
3747 /* not the 1st TP within the HP */
3748 if (host
!= (last_host
+ TARGET_PAGE_SIZE
)) {
3749 error_report("Non-sequential target page %p/%p",
3758 * If it's the last part of a host page then we place the host
3761 place_needed
= (((uintptr_t)host
+ TARGET_PAGE_SIZE
) &
3762 (block
->page_size
- 1)) == 0;
3763 place_source
= postcopy_host_page
;
3767 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
3768 case RAM_SAVE_FLAG_ZERO
:
3769 ch
= qemu_get_byte(f
);
3770 memset(page_buffer
, ch
, TARGET_PAGE_SIZE
);
3776 case RAM_SAVE_FLAG_PAGE
:
3778 if (!matches_target_page_size
) {
3779 /* For huge pages, we always use temporary buffer */
3780 qemu_get_buffer(f
, page_buffer
, TARGET_PAGE_SIZE
);
3783 * For small pages that matches target page size, we
3784 * avoid the qemu_file copy. Instead we directly use
3785 * the buffer of QEMUFile to place the page. Note: we
3786 * cannot do any QEMUFile operation before using that
3787 * buffer to make sure the buffer is valid when
3790 qemu_get_buffer_in_place(f
, (uint8_t **)&place_source
,
3794 case RAM_SAVE_FLAG_EOS
:
3796 multifd_recv_sync_main();
3799 error_report("Unknown combination of migration flags: %#x"
3800 " (postcopy mode)", flags
);
3805 /* Detect for any possible file errors */
3806 if (!ret
&& qemu_file_get_error(f
)) {
3807 ret
= qemu_file_get_error(f
);
3810 if (!ret
&& place_needed
) {
3811 /* This gets called at the last target page in the host page */
3812 void *place_dest
= host
+ TARGET_PAGE_SIZE
- block
->page_size
;
3815 ret
= postcopy_place_page_zero(mis
, place_dest
,
3818 ret
= postcopy_place_page(mis
, place_dest
,
3819 place_source
, block
);
3827 static bool postcopy_is_advised(void)
3829 PostcopyState ps
= postcopy_state_get();
3830 return ps
>= POSTCOPY_INCOMING_ADVISE
&& ps
< POSTCOPY_INCOMING_END
;
3833 static bool postcopy_is_running(void)
3835 PostcopyState ps
= postcopy_state_get();
3836 return ps
>= POSTCOPY_INCOMING_LISTENING
&& ps
< POSTCOPY_INCOMING_END
;
3839 static int ram_load(QEMUFile
*f
, void *opaque
, int version_id
)
3841 int flags
= 0, ret
= 0, invalid_flags
= 0;
3842 static uint64_t seq_iter
;
3845 * If system is running in postcopy mode, page inserts to host memory must
3848 bool postcopy_running
= postcopy_is_running();
3849 /* ADVISE is earlier, it shows the source has the postcopy capability on */
3850 bool postcopy_advised
= postcopy_is_advised();
3854 if (version_id
!= 4) {
3858 if (!migrate_use_compression()) {
3859 invalid_flags
|= RAM_SAVE_FLAG_COMPRESS_PAGE
;
3861 /* This RCU critical section can be very long running.
3862 * When RCU reclaims in the code start to become numerous,
3863 * it will be necessary to reduce the granularity of this
3868 if (postcopy_running
) {
3869 ret
= ram_load_postcopy(f
);
3872 while (!postcopy_running
&& !ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
3873 ram_addr_t addr
, total_ram_bytes
;
3877 addr
= qemu_get_be64(f
);
3878 flags
= addr
& ~TARGET_PAGE_MASK
;
3879 addr
&= TARGET_PAGE_MASK
;
3881 if (flags
& invalid_flags
) {
3882 if (flags
& invalid_flags
& RAM_SAVE_FLAG_COMPRESS_PAGE
) {
3883 error_report("Received an unexpected compressed page");
3890 if (flags
& (RAM_SAVE_FLAG_ZERO
| RAM_SAVE_FLAG_PAGE
|
3891 RAM_SAVE_FLAG_COMPRESS_PAGE
| RAM_SAVE_FLAG_XBZRLE
)) {
3892 RAMBlock
*block
= ram_block_from_stream(f
, flags
);
3894 host
= host_from_ram_block_offset(block
, addr
);
3896 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
3900 ramblock_recv_bitmap_set(block
, host
);
3901 trace_ram_load_loop(block
->idstr
, (uint64_t)addr
, flags
, host
);
3904 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
3905 case RAM_SAVE_FLAG_MEM_SIZE
:
3906 /* Synchronize RAM block list */
3907 total_ram_bytes
= addr
;
3908 while (!ret
&& total_ram_bytes
) {
3913 len
= qemu_get_byte(f
);
3914 qemu_get_buffer(f
, (uint8_t *)id
, len
);
3916 length
= qemu_get_be64(f
);
3918 block
= qemu_ram_block_by_name(id
);
3919 if (block
&& !qemu_ram_is_migratable(block
)) {
3920 error_report("block %s should not be migrated !", id
);
3923 if (length
!= block
->used_length
) {
3924 Error
*local_err
= NULL
;
3926 ret
= qemu_ram_resize(block
, length
,
3929 error_report_err(local_err
);
3932 /* For postcopy we need to check hugepage sizes match */
3933 if (postcopy_advised
&&
3934 block
->page_size
!= qemu_host_page_size
) {
3935 uint64_t remote_page_size
= qemu_get_be64(f
);
3936 if (remote_page_size
!= block
->page_size
) {
3937 error_report("Mismatched RAM page size %s "
3938 "(local) %zd != %" PRId64
,
3939 id
, block
->page_size
,
3944 ram_control_load_hook(f
, RAM_CONTROL_BLOCK_REG
,
3947 error_report("Unknown ramblock \"%s\", cannot "
3948 "accept migration", id
);
3952 total_ram_bytes
-= length
;
3956 case RAM_SAVE_FLAG_ZERO
:
3957 ch
= qemu_get_byte(f
);
3958 ram_handle_compressed(host
, ch
, TARGET_PAGE_SIZE
);
3961 case RAM_SAVE_FLAG_PAGE
:
3962 qemu_get_buffer(f
, host
, TARGET_PAGE_SIZE
);
3965 case RAM_SAVE_FLAG_COMPRESS_PAGE
:
3966 len
= qemu_get_be32(f
);
3967 if (len
< 0 || len
> compressBound(TARGET_PAGE_SIZE
)) {
3968 error_report("Invalid compressed data length: %d", len
);
3972 decompress_data_with_multi_threads(f
, host
, len
);
3975 case RAM_SAVE_FLAG_XBZRLE
:
3976 if (load_xbzrle(f
, addr
, host
) < 0) {
3977 error_report("Failed to decompress XBZRLE page at "
3978 RAM_ADDR_FMT
, addr
);
3983 case RAM_SAVE_FLAG_EOS
:
3985 multifd_recv_sync_main();
3988 if (flags
& RAM_SAVE_FLAG_HOOK
) {
3989 ram_control_load_hook(f
, RAM_CONTROL_HOOK
, NULL
);
3991 error_report("Unknown combination of migration flags: %#x",
3997 ret
= qemu_file_get_error(f
);
4001 ret
|= wait_for_decompress_done();
4003 trace_ram_load_complete(ret
, seq_iter
);
4007 static bool ram_has_postcopy(void *opaque
)
4010 RAMBLOCK_FOREACH_MIGRATABLE(rb
) {
4011 if (ramblock_is_pmem(rb
)) {
4012 info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4013 "is not supported now!", rb
->idstr
, rb
->host
);
4018 return migrate_postcopy_ram();
4021 /* Sync all the dirty bitmap with destination VM. */
4022 static int ram_dirty_bitmap_sync_all(MigrationState
*s
, RAMState
*rs
)
4025 QEMUFile
*file
= s
->to_dst_file
;
4026 int ramblock_count
= 0;
4028 trace_ram_dirty_bitmap_sync_start();
4030 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
4031 qemu_savevm_send_recv_bitmap(file
, block
->idstr
);
4032 trace_ram_dirty_bitmap_request(block
->idstr
);
4036 trace_ram_dirty_bitmap_sync_wait();
4038 /* Wait until all the ramblocks' dirty bitmap synced */
4039 while (ramblock_count
--) {
4040 qemu_sem_wait(&s
->rp_state
.rp_sem
);
4043 trace_ram_dirty_bitmap_sync_complete();
4048 static void ram_dirty_bitmap_reload_notify(MigrationState
*s
)
4050 qemu_sem_post(&s
->rp_state
.rp_sem
);
4054 * Read the received bitmap, revert it as the initial dirty bitmap.
4055 * This is only used when the postcopy migration is paused but wants
4056 * to resume from a middle point.
4058 int ram_dirty_bitmap_reload(MigrationState
*s
, RAMBlock
*block
)
4061 QEMUFile
*file
= s
->rp_state
.from_dst_file
;
4062 unsigned long *le_bitmap
, nbits
= block
->used_length
>> TARGET_PAGE_BITS
;
4063 uint64_t local_size
= DIV_ROUND_UP(nbits
, 8);
4064 uint64_t size
, end_mark
;
4066 trace_ram_dirty_bitmap_reload_begin(block
->idstr
);
4068 if (s
->state
!= MIGRATION_STATUS_POSTCOPY_RECOVER
) {
4069 error_report("%s: incorrect state %s", __func__
,
4070 MigrationStatus_str(s
->state
));
4075 * Note: see comments in ramblock_recv_bitmap_send() on why we
4076 * need the endianess convertion, and the paddings.
4078 local_size
= ROUND_UP(local_size
, 8);
4081 le_bitmap
= bitmap_new(nbits
+ BITS_PER_LONG
);
4083 size
= qemu_get_be64(file
);
4085 /* The size of the bitmap should match with our ramblock */
4086 if (size
!= local_size
) {
4087 error_report("%s: ramblock '%s' bitmap size mismatch "
4088 "(0x%"PRIx64
" != 0x%"PRIx64
")", __func__
,
4089 block
->idstr
, size
, local_size
);
4094 size
= qemu_get_buffer(file
, (uint8_t *)le_bitmap
, local_size
);
4095 end_mark
= qemu_get_be64(file
);
4097 ret
= qemu_file_get_error(file
);
4098 if (ret
|| size
!= local_size
) {
4099 error_report("%s: read bitmap failed for ramblock '%s': %d"
4100 " (size 0x%"PRIx64
", got: 0x%"PRIx64
")",
4101 __func__
, block
->idstr
, ret
, local_size
, size
);
4106 if (end_mark
!= RAMBLOCK_RECV_BITMAP_ENDING
) {
4107 error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIu64
,
4108 __func__
, block
->idstr
, end_mark
);
4114 * Endianess convertion. We are during postcopy (though paused).
4115 * The dirty bitmap won't change. We can directly modify it.
4117 bitmap_from_le(block
->bmap
, le_bitmap
, nbits
);
4120 * What we received is "received bitmap". Revert it as the initial
4121 * dirty bitmap for this ramblock.
4123 bitmap_complement(block
->bmap
, block
->bmap
, nbits
);
4125 trace_ram_dirty_bitmap_reload_complete(block
->idstr
);
4128 * We succeeded to sync bitmap for current ramblock. If this is
4129 * the last one to sync, we need to notify the main send thread.
4131 ram_dirty_bitmap_reload_notify(s
);
4139 static int ram_resume_prepare(MigrationState
*s
, void *opaque
)
4141 RAMState
*rs
= *(RAMState
**)opaque
;
4144 ret
= ram_dirty_bitmap_sync_all(s
, rs
);
4149 ram_state_resume_prepare(rs
, s
->to_dst_file
);
4154 static SaveVMHandlers savevm_ram_handlers
= {
4155 .save_setup
= ram_save_setup
,
4156 .save_live_iterate
= ram_save_iterate
,
4157 .save_live_complete_postcopy
= ram_save_complete
,
4158 .save_live_complete_precopy
= ram_save_complete
,
4159 .has_postcopy
= ram_has_postcopy
,
4160 .save_live_pending
= ram_save_pending
,
4161 .load_state
= ram_load
,
4162 .save_cleanup
= ram_save_cleanup
,
4163 .load_setup
= ram_load_setup
,
4164 .load_cleanup
= ram_load_cleanup
,
4165 .resume_prepare
= ram_resume_prepare
,
4168 void ram_mig_init(void)
4170 qemu_mutex_init(&XBZRLE
.lock
);
4171 register_savevm_live(NULL
, "ram", 0, 4, &savevm_ram_handlers
, &ram_state
);