4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2011-2015 Red Hat Inc
8 * Juan Quintela <quintela@redhat.com>
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 #include "qemu/osdep.h"
32 #include "qemu/cutils.h"
33 #include "qemu/bitops.h"
34 #include "qemu/bitmap.h"
35 #include "qemu/main-loop.h"
36 #include "qemu/pmem.h"
39 #include "migration.h"
41 #include "migration/register.h"
42 #include "migration/misc.h"
43 #include "qemu-file.h"
44 #include "postcopy-ram.h"
45 #include "page_cache.h"
46 #include "qemu/error-report.h"
47 #include "qapi/error.h"
48 #include "qapi/qapi-events-migration.h"
49 #include "qapi/qmp/qerror.h"
51 #include "exec/ram_addr.h"
52 #include "exec/target_page.h"
53 #include "qemu/rcu_queue.h"
54 #include "migration/colo.h"
56 #include "sysemu/sysemu.h"
57 #include "qemu/uuid.h"
61 /***********************************************************/
62 /* ram save/restore */
64 /* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
65 * worked for pages that where filled with the same char. We switched
66 * it to only search for the zero value. And to avoid confusion with
67 * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
70 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
71 #define RAM_SAVE_FLAG_ZERO 0x02
72 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
73 #define RAM_SAVE_FLAG_PAGE 0x08
74 #define RAM_SAVE_FLAG_EOS 0x10
75 #define RAM_SAVE_FLAG_CONTINUE 0x20
76 #define RAM_SAVE_FLAG_XBZRLE 0x40
77 /* 0x80 is reserved in migration.h start with 0x100 next */
78 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
80 static inline bool is_zero_range(uint8_t *p
, uint64_t size
)
82 return buffer_is_zero(p
, size
);
85 XBZRLECacheStats xbzrle_counters
;
87 /* struct contains XBZRLE cache and a static page
88 used by the compression */
90 /* buffer used for XBZRLE encoding */
92 /* buffer for storing page content */
94 /* Cache for XBZRLE, Protected by lock. */
97 /* it will store a page full of zeros */
98 uint8_t *zero_target_page
;
99 /* buffer used for XBZRLE decoding */
100 uint8_t *decoded_buf
;
103 static void XBZRLE_cache_lock(void)
105 if (migrate_use_xbzrle())
106 qemu_mutex_lock(&XBZRLE
.lock
);
109 static void XBZRLE_cache_unlock(void)
111 if (migrate_use_xbzrle())
112 qemu_mutex_unlock(&XBZRLE
.lock
);
116 * xbzrle_cache_resize: resize the xbzrle cache
118 * This function is called from qmp_migrate_set_cache_size in main
119 * thread, possibly while a migration is in progress. A running
120 * migration may be using the cache and might finish during this call,
121 * hence changes to the cache are protected by XBZRLE.lock().
123 * Returns 0 for success or -1 for error
125 * @new_size: new cache size
126 * @errp: set *errp if the check failed, with reason
128 int xbzrle_cache_resize(int64_t new_size
, Error
**errp
)
130 PageCache
*new_cache
;
133 /* Check for truncation */
134 if (new_size
!= (size_t)new_size
) {
135 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, "cache size",
136 "exceeding address space");
140 if (new_size
== migrate_xbzrle_cache_size()) {
147 if (XBZRLE
.cache
!= NULL
) {
148 new_cache
= cache_init(new_size
, TARGET_PAGE_SIZE
, errp
);
154 cache_fini(XBZRLE
.cache
);
155 XBZRLE
.cache
= new_cache
;
158 XBZRLE_cache_unlock();
162 /* Should be holding either ram_list.mutex, or the RCU lock. */
163 #define RAMBLOCK_FOREACH_MIGRATABLE(block) \
164 INTERNAL_RAMBLOCK_FOREACH(block) \
165 if (!qemu_ram_is_migratable(block)) {} else
167 #undef RAMBLOCK_FOREACH
169 static void ramblock_recv_map_init(void)
173 RAMBLOCK_FOREACH_MIGRATABLE(rb
) {
174 assert(!rb
->receivedmap
);
175 rb
->receivedmap
= bitmap_new(rb
->max_length
>> qemu_target_page_bits());
179 int ramblock_recv_bitmap_test(RAMBlock
*rb
, void *host_addr
)
181 return test_bit(ramblock_recv_bitmap_offset(host_addr
, rb
),
185 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock
*rb
, uint64_t byte_offset
)
187 return test_bit(byte_offset
>> TARGET_PAGE_BITS
, rb
->receivedmap
);
190 void ramblock_recv_bitmap_set(RAMBlock
*rb
, void *host_addr
)
192 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr
, rb
), rb
->receivedmap
);
195 void ramblock_recv_bitmap_set_range(RAMBlock
*rb
, void *host_addr
,
198 bitmap_set_atomic(rb
->receivedmap
,
199 ramblock_recv_bitmap_offset(host_addr
, rb
),
203 #define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
206 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
208 * Returns >0 if success with sent bytes, or <0 if error.
210 int64_t ramblock_recv_bitmap_send(QEMUFile
*file
,
211 const char *block_name
)
213 RAMBlock
*block
= qemu_ram_block_by_name(block_name
);
214 unsigned long *le_bitmap
, nbits
;
218 error_report("%s: invalid block name: %s", __func__
, block_name
);
222 nbits
= block
->used_length
>> TARGET_PAGE_BITS
;
225 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
226 * machines we may need 4 more bytes for padding (see below
227 * comment). So extend it a bit before hand.
229 le_bitmap
= bitmap_new(nbits
+ BITS_PER_LONG
);
232 * Always use little endian when sending the bitmap. This is
233 * required that when source and destination VMs are not using the
234 * same endianess. (Note: big endian won't work.)
236 bitmap_to_le(le_bitmap
, block
->receivedmap
, nbits
);
238 /* Size of the bitmap, in bytes */
239 size
= DIV_ROUND_UP(nbits
, 8);
242 * size is always aligned to 8 bytes for 64bit machines, but it
243 * may not be true for 32bit machines. We need this padding to
244 * make sure the migration can survive even between 32bit and
247 size
= ROUND_UP(size
, 8);
249 qemu_put_be64(file
, size
);
250 qemu_put_buffer(file
, (const uint8_t *)le_bitmap
, size
);
252 * Mark as an end, in case the middle part is screwed up due to
253 * some "misterious" reason.
255 qemu_put_be64(file
, RAMBLOCK_RECV_BITMAP_ENDING
);
260 if (qemu_file_get_error(file
)) {
261 return qemu_file_get_error(file
);
264 return size
+ sizeof(size
);
268 * An outstanding page request, on the source, having been received
271 struct RAMSrcPageRequest
{
276 QSIMPLEQ_ENTRY(RAMSrcPageRequest
) next_req
;
279 /* State of RAM for migration */
281 /* QEMUFile used for this migration */
283 /* Last block that we have visited searching for dirty pages */
284 RAMBlock
*last_seen_block
;
285 /* Last block from where we have sent data */
286 RAMBlock
*last_sent_block
;
287 /* Last dirty target page we have sent */
288 ram_addr_t last_page
;
289 /* last ram version we have seen */
290 uint32_t last_version
;
291 /* We are in the first round */
293 /* How many times we have dirty too many pages */
294 int dirty_rate_high_cnt
;
295 /* these variables are used for bitmap sync */
296 /* last time we did a full bitmap_sync */
297 int64_t time_last_bitmap_sync
;
298 /* bytes transferred at start_time */
299 uint64_t bytes_xfer_prev
;
300 /* number of dirty pages since start_time */
301 uint64_t num_dirty_pages_period
;
302 /* xbzrle misses since the beginning of the period */
303 uint64_t xbzrle_cache_miss_prev
;
304 /* number of iterations at the beginning of period */
305 uint64_t iterations_prev
;
306 /* Iterations since start */
308 /* number of dirty bits in the bitmap */
309 uint64_t migration_dirty_pages
;
310 /* protects modification of the bitmap */
311 QemuMutex bitmap_mutex
;
312 /* The RAMBlock used in the last src_page_requests */
313 RAMBlock
*last_req_rb
;
314 /* Queue of outstanding page requests from the destination */
315 QemuMutex src_page_req_mutex
;
316 QSIMPLEQ_HEAD(src_page_requests
, RAMSrcPageRequest
) src_page_requests
;
318 typedef struct RAMState RAMState
;
320 static RAMState
*ram_state
;
322 uint64_t ram_bytes_remaining(void)
324 return ram_state
? (ram_state
->migration_dirty_pages
* TARGET_PAGE_SIZE
) :
328 MigrationStats ram_counters
;
330 /* used by the search for pages to send */
331 struct PageSearchStatus
{
332 /* Current block being searched */
334 /* Current page to search from */
336 /* Set once we wrap around */
339 typedef struct PageSearchStatus PageSearchStatus
;
341 struct CompressParam
{
350 /* internally used fields */
354 typedef struct CompressParam CompressParam
;
356 struct DecompressParam
{
366 typedef struct DecompressParam DecompressParam
;
368 static CompressParam
*comp_param
;
369 static QemuThread
*compress_threads
;
370 /* comp_done_cond is used to wake up the migration thread when
371 * one of the compression threads has finished the compression.
372 * comp_done_lock is used to co-work with comp_done_cond.
374 static QemuMutex comp_done_lock
;
375 static QemuCond comp_done_cond
;
376 /* The empty QEMUFileOps will be used by file in CompressParam */
377 static const QEMUFileOps empty_ops
= { };
379 static QEMUFile
*decomp_file
;
380 static DecompressParam
*decomp_param
;
381 static QemuThread
*decompress_threads
;
382 static QemuMutex decomp_done_lock
;
383 static QemuCond decomp_done_cond
;
385 static int do_compress_ram_page(QEMUFile
*f
, z_stream
*stream
, RAMBlock
*block
,
386 ram_addr_t offset
, uint8_t *source_buf
);
388 static void *do_data_compress(void *opaque
)
390 CompressParam
*param
= opaque
;
394 qemu_mutex_lock(¶m
->mutex
);
395 while (!param
->quit
) {
397 block
= param
->block
;
398 offset
= param
->offset
;
400 qemu_mutex_unlock(¶m
->mutex
);
402 do_compress_ram_page(param
->file
, ¶m
->stream
, block
, offset
,
405 qemu_mutex_lock(&comp_done_lock
);
407 qemu_cond_signal(&comp_done_cond
);
408 qemu_mutex_unlock(&comp_done_lock
);
410 qemu_mutex_lock(¶m
->mutex
);
412 qemu_cond_wait(¶m
->cond
, ¶m
->mutex
);
415 qemu_mutex_unlock(¶m
->mutex
);
420 static inline void terminate_compression_threads(void)
422 int idx
, thread_count
;
424 thread_count
= migrate_compress_threads();
426 for (idx
= 0; idx
< thread_count
; idx
++) {
427 qemu_mutex_lock(&comp_param
[idx
].mutex
);
428 comp_param
[idx
].quit
= true;
429 qemu_cond_signal(&comp_param
[idx
].cond
);
430 qemu_mutex_unlock(&comp_param
[idx
].mutex
);
434 static void compress_threads_save_cleanup(void)
438 if (!migrate_use_compression()) {
441 terminate_compression_threads();
442 thread_count
= migrate_compress_threads();
443 for (i
= 0; i
< thread_count
; i
++) {
445 * we use it as a indicator which shows if the thread is
446 * properly init'd or not
448 if (!comp_param
[i
].file
) {
451 qemu_thread_join(compress_threads
+ i
);
452 qemu_mutex_destroy(&comp_param
[i
].mutex
);
453 qemu_cond_destroy(&comp_param
[i
].cond
);
454 deflateEnd(&comp_param
[i
].stream
);
455 g_free(comp_param
[i
].originbuf
);
456 qemu_fclose(comp_param
[i
].file
);
457 comp_param
[i
].file
= NULL
;
459 qemu_mutex_destroy(&comp_done_lock
);
460 qemu_cond_destroy(&comp_done_cond
);
461 g_free(compress_threads
);
463 compress_threads
= NULL
;
467 static int compress_threads_save_setup(void)
471 if (!migrate_use_compression()) {
474 thread_count
= migrate_compress_threads();
475 compress_threads
= g_new0(QemuThread
, thread_count
);
476 comp_param
= g_new0(CompressParam
, thread_count
);
477 qemu_cond_init(&comp_done_cond
);
478 qemu_mutex_init(&comp_done_lock
);
479 for (i
= 0; i
< thread_count
; i
++) {
480 comp_param
[i
].originbuf
= g_try_malloc(TARGET_PAGE_SIZE
);
481 if (!comp_param
[i
].originbuf
) {
485 if (deflateInit(&comp_param
[i
].stream
,
486 migrate_compress_level()) != Z_OK
) {
487 g_free(comp_param
[i
].originbuf
);
491 /* comp_param[i].file is just used as a dummy buffer to save data,
492 * set its ops to empty.
494 comp_param
[i
].file
= qemu_fopen_ops(NULL
, &empty_ops
);
495 comp_param
[i
].done
= true;
496 comp_param
[i
].quit
= false;
497 qemu_mutex_init(&comp_param
[i
].mutex
);
498 qemu_cond_init(&comp_param
[i
].cond
);
499 qemu_thread_create(compress_threads
+ i
, "compress",
500 do_data_compress
, comp_param
+ i
,
501 QEMU_THREAD_JOINABLE
);
506 compress_threads_save_cleanup();
512 #define MULTIFD_MAGIC 0x11223344U
513 #define MULTIFD_VERSION 1
515 #define MULTIFD_FLAG_SYNC (1 << 0)
520 unsigned char uuid
[16]; /* QemuUUID */
522 } __attribute__((packed
)) MultiFDInit_t
;
533 } __attribute__((packed
)) MultiFDPacket_t
;
536 /* number of used pages */
538 /* number of allocated pages */
540 /* global number of generated multifd packets */
542 /* offset of each page */
544 /* pointer to each page */
550 /* this fields are not changed once the thread is created */
553 /* channel thread name */
555 /* channel thread id */
557 /* communication channel */
559 /* sem where to wait for more work */
561 /* this mutex protects the following parameters */
563 /* is this channel thread running */
565 /* should this thread finish */
567 /* thread has work to do */
569 /* array of pages to sent */
570 MultiFDPages_t
*pages
;
571 /* packet allocated len */
573 /* pointer to the packet */
574 MultiFDPacket_t
*packet
;
575 /* multifd flags for each packet */
577 /* global number of generated multifd packets */
579 /* thread local variables */
580 /* packets sent through this channel */
581 uint64_t num_packets
;
582 /* pages sent through this channel */
584 /* syncs main thread and channels */
585 QemuSemaphore sem_sync
;
589 /* this fields are not changed once the thread is created */
592 /* channel thread name */
594 /* channel thread id */
596 /* communication channel */
598 /* this mutex protects the following parameters */
600 /* is this channel thread running */
602 /* array of pages to receive */
603 MultiFDPages_t
*pages
;
604 /* packet allocated len */
606 /* pointer to the packet */
607 MultiFDPacket_t
*packet
;
608 /* multifd flags for each packet */
610 /* global number of generated multifd packets */
612 /* thread local variables */
613 /* packets sent through this channel */
614 uint64_t num_packets
;
615 /* pages sent through this channel */
617 /* syncs main thread and channels */
618 QemuSemaphore sem_sync
;
621 static int multifd_send_initial_packet(MultiFDSendParams
*p
, Error
**errp
)
626 msg
.magic
= cpu_to_be32(MULTIFD_MAGIC
);
627 msg
.version
= cpu_to_be32(MULTIFD_VERSION
);
629 memcpy(msg
.uuid
, &qemu_uuid
.data
, sizeof(msg
.uuid
));
631 ret
= qio_channel_write_all(p
->c
, (char *)&msg
, sizeof(msg
), errp
);
638 static int multifd_recv_initial_packet(QIOChannel
*c
, Error
**errp
)
643 ret
= qio_channel_read_all(c
, (char *)&msg
, sizeof(msg
), errp
);
648 be32_to_cpus(&msg
.magic
);
649 be32_to_cpus(&msg
.version
);
651 if (msg
.magic
!= MULTIFD_MAGIC
) {
652 error_setg(errp
, "multifd: received packet magic %x "
653 "expected %x", msg
.magic
, MULTIFD_MAGIC
);
657 if (msg
.version
!= MULTIFD_VERSION
) {
658 error_setg(errp
, "multifd: received packet version %d "
659 "expected %d", msg
.version
, MULTIFD_VERSION
);
663 if (memcmp(msg
.uuid
, &qemu_uuid
, sizeof(qemu_uuid
))) {
664 char *uuid
= qemu_uuid_unparse_strdup(&qemu_uuid
);
665 char *msg_uuid
= qemu_uuid_unparse_strdup((const QemuUUID
*)msg
.uuid
);
667 error_setg(errp
, "multifd: received uuid '%s' and expected "
668 "uuid '%s' for channel %hhd", msg_uuid
, uuid
, msg
.id
);
674 if (msg
.id
> migrate_multifd_channels()) {
675 error_setg(errp
, "multifd: received channel version %d "
676 "expected %d", msg
.version
, MULTIFD_VERSION
);
683 static MultiFDPages_t
*multifd_pages_init(size_t size
)
685 MultiFDPages_t
*pages
= g_new0(MultiFDPages_t
, 1);
687 pages
->allocated
= size
;
688 pages
->iov
= g_new0(struct iovec
, size
);
689 pages
->offset
= g_new0(ram_addr_t
, size
);
694 static void multifd_pages_clear(MultiFDPages_t
*pages
)
697 pages
->allocated
= 0;
698 pages
->packet_num
= 0;
702 g_free(pages
->offset
);
703 pages
->offset
= NULL
;
707 static void multifd_send_fill_packet(MultiFDSendParams
*p
)
709 MultiFDPacket_t
*packet
= p
->packet
;
712 packet
->magic
= cpu_to_be32(MULTIFD_MAGIC
);
713 packet
->version
= cpu_to_be32(MULTIFD_VERSION
);
714 packet
->flags
= cpu_to_be32(p
->flags
);
715 packet
->size
= cpu_to_be32(migrate_multifd_page_count());
716 packet
->used
= cpu_to_be32(p
->pages
->used
);
717 packet
->packet_num
= cpu_to_be64(p
->packet_num
);
719 if (p
->pages
->block
) {
720 strncpy(packet
->ramblock
, p
->pages
->block
->idstr
, 256);
723 for (i
= 0; i
< p
->pages
->used
; i
++) {
724 packet
->offset
[i
] = cpu_to_be64(p
->pages
->offset
[i
]);
728 static int multifd_recv_unfill_packet(MultiFDRecvParams
*p
, Error
**errp
)
730 MultiFDPacket_t
*packet
= p
->packet
;
734 be32_to_cpus(&packet
->magic
);
735 if (packet
->magic
!= MULTIFD_MAGIC
) {
736 error_setg(errp
, "multifd: received packet "
737 "magic %x and expected magic %x",
738 packet
->magic
, MULTIFD_MAGIC
);
742 be32_to_cpus(&packet
->version
);
743 if (packet
->version
!= MULTIFD_VERSION
) {
744 error_setg(errp
, "multifd: received packet "
745 "version %d and expected version %d",
746 packet
->version
, MULTIFD_VERSION
);
750 p
->flags
= be32_to_cpu(packet
->flags
);
752 be32_to_cpus(&packet
->size
);
753 if (packet
->size
> migrate_multifd_page_count()) {
754 error_setg(errp
, "multifd: received packet "
755 "with size %d and expected maximum size %d",
756 packet
->size
, migrate_multifd_page_count()) ;
760 p
->pages
->used
= be32_to_cpu(packet
->used
);
761 if (p
->pages
->used
> packet
->size
) {
762 error_setg(errp
, "multifd: received packet "
763 "with size %d and expected maximum size %d",
764 p
->pages
->used
, packet
->size
) ;
768 p
->packet_num
= be64_to_cpu(packet
->packet_num
);
770 if (p
->pages
->used
) {
771 /* make sure that ramblock is 0 terminated */
772 packet
->ramblock
[255] = 0;
773 block
= qemu_ram_block_by_name(packet
->ramblock
);
775 error_setg(errp
, "multifd: unknown ram block %s",
781 for (i
= 0; i
< p
->pages
->used
; i
++) {
782 ram_addr_t offset
= be64_to_cpu(packet
->offset
[i
]);
784 if (offset
> (block
->used_length
- TARGET_PAGE_SIZE
)) {
785 error_setg(errp
, "multifd: offset too long " RAM_ADDR_FMT
786 " (max " RAM_ADDR_FMT
")",
787 offset
, block
->max_length
);
790 p
->pages
->iov
[i
].iov_base
= block
->host
+ offset
;
791 p
->pages
->iov
[i
].iov_len
= TARGET_PAGE_SIZE
;
798 MultiFDSendParams
*params
;
799 /* number of created threads */
801 /* array of pages to sent */
802 MultiFDPages_t
*pages
;
803 /* syncs main thread and channels */
804 QemuSemaphore sem_sync
;
805 /* global number of generated multifd packets */
807 /* send channels ready */
808 QemuSemaphore channels_ready
;
809 } *multifd_send_state
;
812 * How we use multifd_send_state->pages and channel->pages?
814 * We create a pages for each channel, and a main one. Each time that
815 * we need to send a batch of pages we interchange the ones between
816 * multifd_send_state and the channel that is sending it. There are
817 * two reasons for that:
818 * - to not have to do so many mallocs during migration
819 * - to make easier to know what to free at the end of migration
821 * This way we always know who is the owner of each "pages" struct,
822 * and we don't need any loocking. It belongs to the migration thread
823 * or to the channel thread. Switching is safe because the migration
824 * thread is using the channel mutex when changing it, and the channel
825 * have to had finish with its own, otherwise pending_job can't be
829 static void multifd_send_pages(void)
832 static int next_channel
;
833 MultiFDSendParams
*p
= NULL
; /* make happy gcc */
834 MultiFDPages_t
*pages
= multifd_send_state
->pages
;
835 uint64_t transferred
;
837 qemu_sem_wait(&multifd_send_state
->channels_ready
);
838 for (i
= next_channel
;; i
= (i
+ 1) % migrate_multifd_channels()) {
839 p
= &multifd_send_state
->params
[i
];
841 qemu_mutex_lock(&p
->mutex
);
842 if (!p
->pending_job
) {
844 next_channel
= (i
+ 1) % migrate_multifd_channels();
847 qemu_mutex_unlock(&p
->mutex
);
851 p
->packet_num
= multifd_send_state
->packet_num
++;
852 p
->pages
->block
= NULL
;
853 multifd_send_state
->pages
= p
->pages
;
855 transferred
= ((uint64_t) pages
->used
) * TARGET_PAGE_SIZE
+ p
->packet_len
;
856 ram_counters
.multifd_bytes
+= transferred
;
857 ram_counters
.transferred
+= transferred
;;
858 qemu_mutex_unlock(&p
->mutex
);
859 qemu_sem_post(&p
->sem
);
862 static void multifd_queue_page(RAMBlock
*block
, ram_addr_t offset
)
864 MultiFDPages_t
*pages
= multifd_send_state
->pages
;
867 pages
->block
= block
;
870 if (pages
->block
== block
) {
871 pages
->offset
[pages
->used
] = offset
;
872 pages
->iov
[pages
->used
].iov_base
= block
->host
+ offset
;
873 pages
->iov
[pages
->used
].iov_len
= TARGET_PAGE_SIZE
;
876 if (pages
->used
< pages
->allocated
) {
881 multifd_send_pages();
883 if (pages
->block
!= block
) {
884 multifd_queue_page(block
, offset
);
888 static void multifd_send_terminate_threads(Error
*err
)
893 MigrationState
*s
= migrate_get_current();
894 migrate_set_error(s
, err
);
895 if (s
->state
== MIGRATION_STATUS_SETUP
||
896 s
->state
== MIGRATION_STATUS_PRE_SWITCHOVER
||
897 s
->state
== MIGRATION_STATUS_DEVICE
||
898 s
->state
== MIGRATION_STATUS_ACTIVE
) {
899 migrate_set_state(&s
->state
, s
->state
,
900 MIGRATION_STATUS_FAILED
);
904 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
905 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
907 qemu_mutex_lock(&p
->mutex
);
909 qemu_sem_post(&p
->sem
);
910 qemu_mutex_unlock(&p
->mutex
);
914 int multifd_save_cleanup(Error
**errp
)
919 if (!migrate_use_multifd()) {
922 multifd_send_terminate_threads(NULL
);
923 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
924 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
927 qemu_thread_join(&p
->thread
);
929 socket_send_channel_destroy(p
->c
);
931 qemu_mutex_destroy(&p
->mutex
);
932 qemu_sem_destroy(&p
->sem
);
933 qemu_sem_destroy(&p
->sem_sync
);
936 multifd_pages_clear(p
->pages
);
942 qemu_sem_destroy(&multifd_send_state
->channels_ready
);
943 qemu_sem_destroy(&multifd_send_state
->sem_sync
);
944 g_free(multifd_send_state
->params
);
945 multifd_send_state
->params
= NULL
;
946 multifd_pages_clear(multifd_send_state
->pages
);
947 multifd_send_state
->pages
= NULL
;
948 g_free(multifd_send_state
);
949 multifd_send_state
= NULL
;
953 static void multifd_send_sync_main(void)
957 if (!migrate_use_multifd()) {
960 if (multifd_send_state
->pages
->used
) {
961 multifd_send_pages();
963 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
964 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
966 trace_multifd_send_sync_main_signal(p
->id
);
968 qemu_mutex_lock(&p
->mutex
);
970 p
->packet_num
= multifd_send_state
->packet_num
++;
971 p
->flags
|= MULTIFD_FLAG_SYNC
;
973 qemu_mutex_unlock(&p
->mutex
);
974 qemu_sem_post(&p
->sem
);
976 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
977 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
979 trace_multifd_send_sync_main_wait(p
->id
);
980 qemu_sem_wait(&multifd_send_state
->sem_sync
);
982 trace_multifd_send_sync_main(multifd_send_state
->packet_num
);
985 static void *multifd_send_thread(void *opaque
)
987 MultiFDSendParams
*p
= opaque
;
988 Error
*local_err
= NULL
;
991 trace_multifd_send_thread_start(p
->id
);
992 rcu_register_thread();
994 if (multifd_send_initial_packet(p
, &local_err
) < 0) {
1001 qemu_sem_wait(&p
->sem
);
1002 qemu_mutex_lock(&p
->mutex
);
1004 if (p
->pending_job
) {
1005 uint32_t used
= p
->pages
->used
;
1006 uint64_t packet_num
= p
->packet_num
;
1007 uint32_t flags
= p
->flags
;
1009 multifd_send_fill_packet(p
);
1012 p
->num_pages
+= used
;
1014 qemu_mutex_unlock(&p
->mutex
);
1016 trace_multifd_send(p
->id
, packet_num
, used
, flags
);
1018 ret
= qio_channel_write_all(p
->c
, (void *)p
->packet
,
1019 p
->packet_len
, &local_err
);
1024 ret
= qio_channel_writev_all(p
->c
, p
->pages
->iov
, used
, &local_err
);
1029 qemu_mutex_lock(&p
->mutex
);
1031 qemu_mutex_unlock(&p
->mutex
);
1033 if (flags
& MULTIFD_FLAG_SYNC
) {
1034 qemu_sem_post(&multifd_send_state
->sem_sync
);
1036 qemu_sem_post(&multifd_send_state
->channels_ready
);
1037 } else if (p
->quit
) {
1038 qemu_mutex_unlock(&p
->mutex
);
1041 qemu_mutex_unlock(&p
->mutex
);
1042 /* sometimes there are spurious wakeups */
1048 multifd_send_terminate_threads(local_err
);
1051 qemu_mutex_lock(&p
->mutex
);
1053 qemu_mutex_unlock(&p
->mutex
);
1055 rcu_unregister_thread();
1056 trace_multifd_send_thread_end(p
->id
, p
->num_packets
, p
->num_pages
);
1061 static void multifd_new_send_channel_async(QIOTask
*task
, gpointer opaque
)
1063 MultiFDSendParams
*p
= opaque
;
1064 QIOChannel
*sioc
= QIO_CHANNEL(qio_task_get_source(task
));
1065 Error
*local_err
= NULL
;
1067 if (qio_task_propagate_error(task
, &local_err
)) {
1068 if (multifd_save_cleanup(&local_err
) != 0) {
1069 migrate_set_error(migrate_get_current(), local_err
);
1072 p
->c
= QIO_CHANNEL(sioc
);
1073 qio_channel_set_delay(p
->c
, false);
1075 qemu_thread_create(&p
->thread
, p
->name
, multifd_send_thread
, p
,
1076 QEMU_THREAD_JOINABLE
);
1078 atomic_inc(&multifd_send_state
->count
);
1082 int multifd_save_setup(void)
1085 uint32_t page_count
= migrate_multifd_page_count();
1088 if (!migrate_use_multifd()) {
1091 thread_count
= migrate_multifd_channels();
1092 multifd_send_state
= g_malloc0(sizeof(*multifd_send_state
));
1093 multifd_send_state
->params
= g_new0(MultiFDSendParams
, thread_count
);
1094 atomic_set(&multifd_send_state
->count
, 0);
1095 multifd_send_state
->pages
= multifd_pages_init(page_count
);
1096 qemu_sem_init(&multifd_send_state
->sem_sync
, 0);
1097 qemu_sem_init(&multifd_send_state
->channels_ready
, 0);
1099 for (i
= 0; i
< thread_count
; i
++) {
1100 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
1102 qemu_mutex_init(&p
->mutex
);
1103 qemu_sem_init(&p
->sem
, 0);
1104 qemu_sem_init(&p
->sem_sync
, 0);
1108 p
->pages
= multifd_pages_init(page_count
);
1109 p
->packet_len
= sizeof(MultiFDPacket_t
)
1110 + sizeof(ram_addr_t
) * page_count
;
1111 p
->packet
= g_malloc0(p
->packet_len
);
1112 p
->name
= g_strdup_printf("multifdsend_%d", i
);
1113 socket_send_channel_create(multifd_new_send_channel_async
, p
);
1119 MultiFDRecvParams
*params
;
1120 /* number of created threads */
1122 /* syncs main thread and channels */
1123 QemuSemaphore sem_sync
;
1124 /* global number of generated multifd packets */
1125 uint64_t packet_num
;
1126 } *multifd_recv_state
;
1128 static void multifd_recv_terminate_threads(Error
*err
)
1133 MigrationState
*s
= migrate_get_current();
1134 migrate_set_error(s
, err
);
1135 if (s
->state
== MIGRATION_STATUS_SETUP
||
1136 s
->state
== MIGRATION_STATUS_ACTIVE
) {
1137 migrate_set_state(&s
->state
, s
->state
,
1138 MIGRATION_STATUS_FAILED
);
1142 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1143 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1145 qemu_mutex_lock(&p
->mutex
);
1146 /* We could arrive here for two reasons:
1147 - normal quit, i.e. everything went fine, just finished
1148 - error quit: We close the channels so the channel threads
1149 finish the qio_channel_read_all_eof() */
1150 qio_channel_shutdown(p
->c
, QIO_CHANNEL_SHUTDOWN_BOTH
, NULL
);
1151 qemu_mutex_unlock(&p
->mutex
);
1155 int multifd_load_cleanup(Error
**errp
)
1160 if (!migrate_use_multifd()) {
1163 multifd_recv_terminate_threads(NULL
);
1164 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1165 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1168 qemu_thread_join(&p
->thread
);
1170 object_unref(OBJECT(p
->c
));
1172 qemu_mutex_destroy(&p
->mutex
);
1173 qemu_sem_destroy(&p
->sem_sync
);
1176 multifd_pages_clear(p
->pages
);
1182 qemu_sem_destroy(&multifd_recv_state
->sem_sync
);
1183 g_free(multifd_recv_state
->params
);
1184 multifd_recv_state
->params
= NULL
;
1185 g_free(multifd_recv_state
);
1186 multifd_recv_state
= NULL
;
1191 static void multifd_recv_sync_main(void)
1195 if (!migrate_use_multifd()) {
1198 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1199 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1201 trace_multifd_recv_sync_main_wait(p
->id
);
1202 qemu_sem_wait(&multifd_recv_state
->sem_sync
);
1203 qemu_mutex_lock(&p
->mutex
);
1204 if (multifd_recv_state
->packet_num
< p
->packet_num
) {
1205 multifd_recv_state
->packet_num
= p
->packet_num
;
1207 qemu_mutex_unlock(&p
->mutex
);
1209 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1210 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1212 trace_multifd_recv_sync_main_signal(p
->id
);
1213 qemu_sem_post(&p
->sem_sync
);
1215 trace_multifd_recv_sync_main(multifd_recv_state
->packet_num
);
1218 static void *multifd_recv_thread(void *opaque
)
1220 MultiFDRecvParams
*p
= opaque
;
1221 Error
*local_err
= NULL
;
1224 trace_multifd_recv_thread_start(p
->id
);
1225 rcu_register_thread();
1231 ret
= qio_channel_read_all_eof(p
->c
, (void *)p
->packet
,
1232 p
->packet_len
, &local_err
);
1233 if (ret
== 0) { /* EOF */
1236 if (ret
== -1) { /* Error */
1240 qemu_mutex_lock(&p
->mutex
);
1241 ret
= multifd_recv_unfill_packet(p
, &local_err
);
1243 qemu_mutex_unlock(&p
->mutex
);
1247 used
= p
->pages
->used
;
1249 trace_multifd_recv(p
->id
, p
->packet_num
, used
, flags
);
1251 p
->num_pages
+= used
;
1252 qemu_mutex_unlock(&p
->mutex
);
1254 ret
= qio_channel_readv_all(p
->c
, p
->pages
->iov
, used
, &local_err
);
1259 if (flags
& MULTIFD_FLAG_SYNC
) {
1260 qemu_sem_post(&multifd_recv_state
->sem_sync
);
1261 qemu_sem_wait(&p
->sem_sync
);
1266 multifd_recv_terminate_threads(local_err
);
1268 qemu_mutex_lock(&p
->mutex
);
1270 qemu_mutex_unlock(&p
->mutex
);
1272 rcu_unregister_thread();
1273 trace_multifd_recv_thread_end(p
->id
, p
->num_packets
, p
->num_pages
);
1278 int multifd_load_setup(void)
1281 uint32_t page_count
= migrate_multifd_page_count();
1284 if (!migrate_use_multifd()) {
1287 thread_count
= migrate_multifd_channels();
1288 multifd_recv_state
= g_malloc0(sizeof(*multifd_recv_state
));
1289 multifd_recv_state
->params
= g_new0(MultiFDRecvParams
, thread_count
);
1290 atomic_set(&multifd_recv_state
->count
, 0);
1291 qemu_sem_init(&multifd_recv_state
->sem_sync
, 0);
1293 for (i
= 0; i
< thread_count
; i
++) {
1294 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1296 qemu_mutex_init(&p
->mutex
);
1297 qemu_sem_init(&p
->sem_sync
, 0);
1299 p
->pages
= multifd_pages_init(page_count
);
1300 p
->packet_len
= sizeof(MultiFDPacket_t
)
1301 + sizeof(ram_addr_t
) * page_count
;
1302 p
->packet
= g_malloc0(p
->packet_len
);
1303 p
->name
= g_strdup_printf("multifdrecv_%d", i
);
1308 bool multifd_recv_all_channels_created(void)
1310 int thread_count
= migrate_multifd_channels();
1312 if (!migrate_use_multifd()) {
1316 return thread_count
== atomic_read(&multifd_recv_state
->count
);
1319 /* Return true if multifd is ready for the migration, otherwise false */
1320 bool multifd_recv_new_channel(QIOChannel
*ioc
)
1322 MultiFDRecvParams
*p
;
1323 Error
*local_err
= NULL
;
1326 id
= multifd_recv_initial_packet(ioc
, &local_err
);
1328 multifd_recv_terminate_threads(local_err
);
1332 p
= &multifd_recv_state
->params
[id
];
1334 error_setg(&local_err
, "multifd: received id '%d' already setup'",
1336 multifd_recv_terminate_threads(local_err
);
1340 object_ref(OBJECT(ioc
));
1341 /* initial packet */
1345 qemu_thread_create(&p
->thread
, p
->name
, multifd_recv_thread
, p
,
1346 QEMU_THREAD_JOINABLE
);
1347 atomic_inc(&multifd_recv_state
->count
);
1348 return multifd_recv_state
->count
== migrate_multifd_channels();
1352 * save_page_header: write page header to wire
1354 * If this is the 1st block, it also writes the block identification
1356 * Returns the number of bytes written
1358 * @f: QEMUFile where to send the data
1359 * @block: block that contains the page we want to send
1360 * @offset: offset inside the block for the page
1361 * in the lower bits, it contains flags
1363 static size_t save_page_header(RAMState
*rs
, QEMUFile
*f
, RAMBlock
*block
,
1368 if (block
== rs
->last_sent_block
) {
1369 offset
|= RAM_SAVE_FLAG_CONTINUE
;
1371 qemu_put_be64(f
, offset
);
1374 if (!(offset
& RAM_SAVE_FLAG_CONTINUE
)) {
1375 len
= strlen(block
->idstr
);
1376 qemu_put_byte(f
, len
);
1377 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, len
);
1379 rs
->last_sent_block
= block
;
1385 * mig_throttle_guest_down: throotle down the guest
1387 * Reduce amount of guest cpu execution to hopefully slow down memory
1388 * writes. If guest dirty memory rate is reduced below the rate at
1389 * which we can transfer pages to the destination then we should be
1390 * able to complete migration. Some workloads dirty memory way too
1391 * fast and will not effectively converge, even with auto-converge.
1393 static void mig_throttle_guest_down(void)
1395 MigrationState
*s
= migrate_get_current();
1396 uint64_t pct_initial
= s
->parameters
.cpu_throttle_initial
;
1397 uint64_t pct_icrement
= s
->parameters
.cpu_throttle_increment
;
1398 int pct_max
= s
->parameters
.max_cpu_throttle
;
1400 /* We have not started throttling yet. Let's start it. */
1401 if (!cpu_throttle_active()) {
1402 cpu_throttle_set(pct_initial
);
1404 /* Throttling already on, just increase the rate */
1405 cpu_throttle_set(MIN(cpu_throttle_get_percentage() + pct_icrement
,
1411 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
1413 * @rs: current RAM state
1414 * @current_addr: address for the zero page
1416 * Update the xbzrle cache to reflect a page that's been sent as all 0.
1417 * The important thing is that a stale (not-yet-0'd) page be replaced
1419 * As a bonus, if the page wasn't in the cache it gets added so that
1420 * when a small write is made into the 0'd page it gets XBZRLE sent.
1422 static void xbzrle_cache_zero_page(RAMState
*rs
, ram_addr_t current_addr
)
1424 if (rs
->ram_bulk_stage
|| !migrate_use_xbzrle()) {
1428 /* We don't care if this fails to allocate a new cache page
1429 * as long as it updated an old one */
1430 cache_insert(XBZRLE
.cache
, current_addr
, XBZRLE
.zero_target_page
,
1431 ram_counters
.dirty_sync_count
);
1434 #define ENCODING_FLAG_XBZRLE 0x1
1437 * save_xbzrle_page: compress and send current page
1439 * Returns: 1 means that we wrote the page
1440 * 0 means that page is identical to the one already sent
1441 * -1 means that xbzrle would be longer than normal
1443 * @rs: current RAM state
1444 * @current_data: pointer to the address of the page contents
1445 * @current_addr: addr of the page
1446 * @block: block that contains the page we want to send
1447 * @offset: offset inside the block for the page
1448 * @last_stage: if we are at the completion stage
1450 static int save_xbzrle_page(RAMState
*rs
, uint8_t **current_data
,
1451 ram_addr_t current_addr
, RAMBlock
*block
,
1452 ram_addr_t offset
, bool last_stage
)
1454 int encoded_len
= 0, bytes_xbzrle
;
1455 uint8_t *prev_cached_page
;
1457 if (!cache_is_cached(XBZRLE
.cache
, current_addr
,
1458 ram_counters
.dirty_sync_count
)) {
1459 xbzrle_counters
.cache_miss
++;
1461 if (cache_insert(XBZRLE
.cache
, current_addr
, *current_data
,
1462 ram_counters
.dirty_sync_count
) == -1) {
1465 /* update *current_data when the page has been
1466 inserted into cache */
1467 *current_data
= get_cached_data(XBZRLE
.cache
, current_addr
);
1473 prev_cached_page
= get_cached_data(XBZRLE
.cache
, current_addr
);
1475 /* save current buffer into memory */
1476 memcpy(XBZRLE
.current_buf
, *current_data
, TARGET_PAGE_SIZE
);
1478 /* XBZRLE encoding (if there is no overflow) */
1479 encoded_len
= xbzrle_encode_buffer(prev_cached_page
, XBZRLE
.current_buf
,
1480 TARGET_PAGE_SIZE
, XBZRLE
.encoded_buf
,
1482 if (encoded_len
== 0) {
1483 trace_save_xbzrle_page_skipping();
1485 } else if (encoded_len
== -1) {
1486 trace_save_xbzrle_page_overflow();
1487 xbzrle_counters
.overflow
++;
1488 /* update data in the cache */
1490 memcpy(prev_cached_page
, *current_data
, TARGET_PAGE_SIZE
);
1491 *current_data
= prev_cached_page
;
1496 /* we need to update the data in the cache, in order to get the same data */
1498 memcpy(prev_cached_page
, XBZRLE
.current_buf
, TARGET_PAGE_SIZE
);
1501 /* Send XBZRLE based compressed page */
1502 bytes_xbzrle
= save_page_header(rs
, rs
->f
, block
,
1503 offset
| RAM_SAVE_FLAG_XBZRLE
);
1504 qemu_put_byte(rs
->f
, ENCODING_FLAG_XBZRLE
);
1505 qemu_put_be16(rs
->f
, encoded_len
);
1506 qemu_put_buffer(rs
->f
, XBZRLE
.encoded_buf
, encoded_len
);
1507 bytes_xbzrle
+= encoded_len
+ 1 + 2;
1508 xbzrle_counters
.pages
++;
1509 xbzrle_counters
.bytes
+= bytes_xbzrle
;
1510 ram_counters
.transferred
+= bytes_xbzrle
;
1516 * migration_bitmap_find_dirty: find the next dirty page from start
1518 * Called with rcu_read_lock() to protect migration_bitmap
1520 * Returns the byte offset within memory region of the start of a dirty page
1522 * @rs: current RAM state
1523 * @rb: RAMBlock where to search for dirty pages
1524 * @start: page where we start the search
1527 unsigned long migration_bitmap_find_dirty(RAMState
*rs
, RAMBlock
*rb
,
1528 unsigned long start
)
1530 unsigned long size
= rb
->used_length
>> TARGET_PAGE_BITS
;
1531 unsigned long *bitmap
= rb
->bmap
;
1534 if (!qemu_ram_is_migratable(rb
)) {
1538 if (rs
->ram_bulk_stage
&& start
> 0) {
1541 next
= find_next_bit(bitmap
, size
, start
);
1547 static inline bool migration_bitmap_clear_dirty(RAMState
*rs
,
1553 ret
= test_and_clear_bit(page
, rb
->bmap
);
1556 rs
->migration_dirty_pages
--;
1561 static void migration_bitmap_sync_range(RAMState
*rs
, RAMBlock
*rb
,
1562 ram_addr_t start
, ram_addr_t length
)
1564 rs
->migration_dirty_pages
+=
1565 cpu_physical_memory_sync_dirty_bitmap(rb
, start
, length
,
1566 &rs
->num_dirty_pages_period
);
1570 * ram_pagesize_summary: calculate all the pagesizes of a VM
1572 * Returns a summary bitmap of the page sizes of all RAMBlocks
1574 * For VMs with just normal pages this is equivalent to the host page
1575 * size. If it's got some huge pages then it's the OR of all the
1576 * different page sizes.
1578 uint64_t ram_pagesize_summary(void)
1581 uint64_t summary
= 0;
1583 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
1584 summary
|= block
->page_size
;
1590 static void migration_update_rates(RAMState
*rs
, int64_t end_time
)
1592 uint64_t iter_count
= rs
->iterations
- rs
->iterations_prev
;
1594 /* calculate period counters */
1595 ram_counters
.dirty_pages_rate
= rs
->num_dirty_pages_period
* 1000
1596 / (end_time
- rs
->time_last_bitmap_sync
);
1602 if (migrate_use_xbzrle()) {
1603 xbzrle_counters
.cache_miss_rate
= (double)(xbzrle_counters
.cache_miss
-
1604 rs
->xbzrle_cache_miss_prev
) / iter_count
;
1605 rs
->xbzrle_cache_miss_prev
= xbzrle_counters
.cache_miss
;
1609 static void migration_bitmap_sync(RAMState
*rs
)
1613 uint64_t bytes_xfer_now
;
1615 ram_counters
.dirty_sync_count
++;
1617 if (!rs
->time_last_bitmap_sync
) {
1618 rs
->time_last_bitmap_sync
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1621 trace_migration_bitmap_sync_start();
1622 memory_global_dirty_log_sync();
1624 qemu_mutex_lock(&rs
->bitmap_mutex
);
1626 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
1627 migration_bitmap_sync_range(rs
, block
, 0, block
->used_length
);
1629 ram_counters
.remaining
= ram_bytes_remaining();
1631 qemu_mutex_unlock(&rs
->bitmap_mutex
);
1633 trace_migration_bitmap_sync_end(rs
->num_dirty_pages_period
);
1635 end_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1637 /* more than 1 second = 1000 millisecons */
1638 if (end_time
> rs
->time_last_bitmap_sync
+ 1000) {
1639 bytes_xfer_now
= ram_counters
.transferred
;
1641 /* During block migration the auto-converge logic incorrectly detects
1642 * that ram migration makes no progress. Avoid this by disabling the
1643 * throttling logic during the bulk phase of block migration. */
1644 if (migrate_auto_converge() && !blk_mig_bulk_active()) {
1645 /* The following detection logic can be refined later. For now:
1646 Check to see if the dirtied bytes is 50% more than the approx.
1647 amount of bytes that just got transferred since the last time we
1648 were in this routine. If that happens twice, start or increase
1651 if ((rs
->num_dirty_pages_period
* TARGET_PAGE_SIZE
>
1652 (bytes_xfer_now
- rs
->bytes_xfer_prev
) / 2) &&
1653 (++rs
->dirty_rate_high_cnt
>= 2)) {
1654 trace_migration_throttle();
1655 rs
->dirty_rate_high_cnt
= 0;
1656 mig_throttle_guest_down();
1660 migration_update_rates(rs
, end_time
);
1662 rs
->iterations_prev
= rs
->iterations
;
1664 /* reset period counters */
1665 rs
->time_last_bitmap_sync
= end_time
;
1666 rs
->num_dirty_pages_period
= 0;
1667 rs
->bytes_xfer_prev
= bytes_xfer_now
;
1669 if (migrate_use_events()) {
1670 qapi_event_send_migration_pass(ram_counters
.dirty_sync_count
, NULL
);
1675 * save_zero_page: send the zero page to the stream
1677 * Returns the number of pages written.
1679 * @rs: current RAM state
1680 * @block: block that contains the page we want to send
1681 * @offset: offset inside the block for the page
1683 static int save_zero_page(RAMState
*rs
, RAMBlock
*block
, ram_addr_t offset
)
1685 uint8_t *p
= block
->host
+ offset
;
1688 if (is_zero_range(p
, TARGET_PAGE_SIZE
)) {
1689 ram_counters
.duplicate
++;
1690 ram_counters
.transferred
+=
1691 save_page_header(rs
, rs
->f
, block
, offset
| RAM_SAVE_FLAG_ZERO
);
1692 qemu_put_byte(rs
->f
, 0);
1693 ram_counters
.transferred
+= 1;
1700 static void ram_release_pages(const char *rbname
, uint64_t offset
, int pages
)
1702 if (!migrate_release_ram() || !migration_in_postcopy()) {
1706 ram_discard_range(rbname
, offset
, pages
<< TARGET_PAGE_BITS
);
1710 * @pages: the number of pages written by the control path,
1712 * > 0 - number of pages written
1714 * Return true if the pages has been saved, otherwise false is returned.
1716 static bool control_save_page(RAMState
*rs
, RAMBlock
*block
, ram_addr_t offset
,
1719 uint64_t bytes_xmit
= 0;
1723 ret
= ram_control_save_page(rs
->f
, block
->offset
, offset
, TARGET_PAGE_SIZE
,
1725 if (ret
== RAM_SAVE_CONTROL_NOT_SUPP
) {
1730 ram_counters
.transferred
+= bytes_xmit
;
1734 if (ret
== RAM_SAVE_CONTROL_DELAYED
) {
1738 if (bytes_xmit
> 0) {
1739 ram_counters
.normal
++;
1740 } else if (bytes_xmit
== 0) {
1741 ram_counters
.duplicate
++;
1748 * directly send the page to the stream
1750 * Returns the number of pages written.
1752 * @rs: current RAM state
1753 * @block: block that contains the page we want to send
1754 * @offset: offset inside the block for the page
1755 * @buf: the page to be sent
1756 * @async: send to page asyncly
1758 static int save_normal_page(RAMState
*rs
, RAMBlock
*block
, ram_addr_t offset
,
1759 uint8_t *buf
, bool async
)
1761 ram_counters
.transferred
+= save_page_header(rs
, rs
->f
, block
,
1762 offset
| RAM_SAVE_FLAG_PAGE
);
1764 qemu_put_buffer_async(rs
->f
, buf
, TARGET_PAGE_SIZE
,
1765 migrate_release_ram() &
1766 migration_in_postcopy());
1768 qemu_put_buffer(rs
->f
, buf
, TARGET_PAGE_SIZE
);
1770 ram_counters
.transferred
+= TARGET_PAGE_SIZE
;
1771 ram_counters
.normal
++;
1776 * ram_save_page: send the given page to the stream
1778 * Returns the number of pages written.
1780 * >=0 - Number of pages written - this might legally be 0
1781 * if xbzrle noticed the page was the same.
1783 * @rs: current RAM state
1784 * @block: block that contains the page we want to send
1785 * @offset: offset inside the block for the page
1786 * @last_stage: if we are at the completion stage
1788 static int ram_save_page(RAMState
*rs
, PageSearchStatus
*pss
, bool last_stage
)
1792 bool send_async
= true;
1793 RAMBlock
*block
= pss
->block
;
1794 ram_addr_t offset
= pss
->page
<< TARGET_PAGE_BITS
;
1795 ram_addr_t current_addr
= block
->offset
+ offset
;
1797 p
= block
->host
+ offset
;
1798 trace_ram_save_page(block
->idstr
, (uint64_t)offset
, p
);
1800 XBZRLE_cache_lock();
1801 if (!rs
->ram_bulk_stage
&& !migration_in_postcopy() &&
1802 migrate_use_xbzrle()) {
1803 pages
= save_xbzrle_page(rs
, &p
, current_addr
, block
,
1804 offset
, last_stage
);
1806 /* Can't send this cached data async, since the cache page
1807 * might get updated before it gets to the wire
1813 /* XBZRLE overflow or normal page */
1815 pages
= save_normal_page(rs
, block
, offset
, p
, send_async
);
1818 XBZRLE_cache_unlock();
1823 static int ram_save_multifd_page(RAMState
*rs
, RAMBlock
*block
,
1826 multifd_queue_page(block
, offset
);
1827 ram_counters
.normal
++;
1832 static int do_compress_ram_page(QEMUFile
*f
, z_stream
*stream
, RAMBlock
*block
,
1833 ram_addr_t offset
, uint8_t *source_buf
)
1835 RAMState
*rs
= ram_state
;
1836 int bytes_sent
, blen
;
1837 uint8_t *p
= block
->host
+ (offset
& TARGET_PAGE_MASK
);
1839 bytes_sent
= save_page_header(rs
, f
, block
, offset
|
1840 RAM_SAVE_FLAG_COMPRESS_PAGE
);
1843 * copy it to a internal buffer to avoid it being modified by VM
1844 * so that we can catch up the error during compression and
1847 memcpy(source_buf
, p
, TARGET_PAGE_SIZE
);
1848 blen
= qemu_put_compression_data(f
, stream
, source_buf
, TARGET_PAGE_SIZE
);
1851 qemu_file_set_error(migrate_get_current()->to_dst_file
, blen
);
1852 error_report("compressed data failed!");
1855 ram_release_pages(block
->idstr
, offset
& TARGET_PAGE_MASK
, 1);
1861 static void flush_compressed_data(RAMState
*rs
)
1863 int idx
, len
, thread_count
;
1865 if (!migrate_use_compression()) {
1868 thread_count
= migrate_compress_threads();
1870 qemu_mutex_lock(&comp_done_lock
);
1871 for (idx
= 0; idx
< thread_count
; idx
++) {
1872 while (!comp_param
[idx
].done
) {
1873 qemu_cond_wait(&comp_done_cond
, &comp_done_lock
);
1876 qemu_mutex_unlock(&comp_done_lock
);
1878 for (idx
= 0; idx
< thread_count
; idx
++) {
1879 qemu_mutex_lock(&comp_param
[idx
].mutex
);
1880 if (!comp_param
[idx
].quit
) {
1881 len
= qemu_put_qemu_file(rs
->f
, comp_param
[idx
].file
);
1882 ram_counters
.transferred
+= len
;
1884 qemu_mutex_unlock(&comp_param
[idx
].mutex
);
1888 static inline void set_compress_params(CompressParam
*param
, RAMBlock
*block
,
1891 param
->block
= block
;
1892 param
->offset
= offset
;
1895 static int compress_page_with_multi_thread(RAMState
*rs
, RAMBlock
*block
,
1898 int idx
, thread_count
, bytes_xmit
= -1, pages
= -1;
1900 thread_count
= migrate_compress_threads();
1901 qemu_mutex_lock(&comp_done_lock
);
1903 for (idx
= 0; idx
< thread_count
; idx
++) {
1904 if (comp_param
[idx
].done
) {
1905 comp_param
[idx
].done
= false;
1906 bytes_xmit
= qemu_put_qemu_file(rs
->f
, comp_param
[idx
].file
);
1907 qemu_mutex_lock(&comp_param
[idx
].mutex
);
1908 set_compress_params(&comp_param
[idx
], block
, offset
);
1909 qemu_cond_signal(&comp_param
[idx
].cond
);
1910 qemu_mutex_unlock(&comp_param
[idx
].mutex
);
1912 ram_counters
.normal
++;
1913 ram_counters
.transferred
+= bytes_xmit
;
1920 qemu_cond_wait(&comp_done_cond
, &comp_done_lock
);
1923 qemu_mutex_unlock(&comp_done_lock
);
1929 * find_dirty_block: find the next dirty page and update any state
1930 * associated with the search process.
1932 * Returns if a page is found
1934 * @rs: current RAM state
1935 * @pss: data about the state of the current dirty page scan
1936 * @again: set to false if the search has scanned the whole of RAM
1938 static bool find_dirty_block(RAMState
*rs
, PageSearchStatus
*pss
, bool *again
)
1940 pss
->page
= migration_bitmap_find_dirty(rs
, pss
->block
, pss
->page
);
1941 if (pss
->complete_round
&& pss
->block
== rs
->last_seen_block
&&
1942 pss
->page
>= rs
->last_page
) {
1944 * We've been once around the RAM and haven't found anything.
1950 if ((pss
->page
<< TARGET_PAGE_BITS
) >= pss
->block
->used_length
) {
1951 /* Didn't find anything in this RAM Block */
1953 pss
->block
= QLIST_NEXT_RCU(pss
->block
, next
);
1955 /* Hit the end of the list */
1956 pss
->block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
1957 /* Flag that we've looped */
1958 pss
->complete_round
= true;
1959 rs
->ram_bulk_stage
= false;
1960 if (migrate_use_xbzrle()) {
1961 /* If xbzrle is on, stop using the data compression at this
1962 * point. In theory, xbzrle can do better than compression.
1964 flush_compressed_data(rs
);
1967 /* Didn't find anything this time, but try again on the new block */
1971 /* Can go around again, but... */
1973 /* We've found something so probably don't need to */
1979 * unqueue_page: gets a page of the queue
1981 * Helper for 'get_queued_page' - gets a page off the queue
1983 * Returns the block of the page (or NULL if none available)
1985 * @rs: current RAM state
1986 * @offset: used to return the offset within the RAMBlock
1988 static RAMBlock
*unqueue_page(RAMState
*rs
, ram_addr_t
*offset
)
1990 RAMBlock
*block
= NULL
;
1992 qemu_mutex_lock(&rs
->src_page_req_mutex
);
1993 if (!QSIMPLEQ_EMPTY(&rs
->src_page_requests
)) {
1994 struct RAMSrcPageRequest
*entry
=
1995 QSIMPLEQ_FIRST(&rs
->src_page_requests
);
1997 *offset
= entry
->offset
;
1999 if (entry
->len
> TARGET_PAGE_SIZE
) {
2000 entry
->len
-= TARGET_PAGE_SIZE
;
2001 entry
->offset
+= TARGET_PAGE_SIZE
;
2003 memory_region_unref(block
->mr
);
2004 QSIMPLEQ_REMOVE_HEAD(&rs
->src_page_requests
, next_req
);
2006 migration_consume_urgent_request();
2009 qemu_mutex_unlock(&rs
->src_page_req_mutex
);
2015 * get_queued_page: unqueue a page from the postocpy requests
2017 * Skips pages that are already sent (!dirty)
2019 * Returns if a queued page is found
2021 * @rs: current RAM state
2022 * @pss: data about the state of the current dirty page scan
2024 static bool get_queued_page(RAMState
*rs
, PageSearchStatus
*pss
)
2031 block
= unqueue_page(rs
, &offset
);
2033 * We're sending this page, and since it's postcopy nothing else
2034 * will dirty it, and we must make sure it doesn't get sent again
2035 * even if this queue request was received after the background
2036 * search already sent it.
2041 page
= offset
>> TARGET_PAGE_BITS
;
2042 dirty
= test_bit(page
, block
->bmap
);
2044 trace_get_queued_page_not_dirty(block
->idstr
, (uint64_t)offset
,
2045 page
, test_bit(page
, block
->unsentmap
));
2047 trace_get_queued_page(block
->idstr
, (uint64_t)offset
, page
);
2051 } while (block
&& !dirty
);
2055 * As soon as we start servicing pages out of order, then we have
2056 * to kill the bulk stage, since the bulk stage assumes
2057 * in (migration_bitmap_find_and_reset_dirty) that every page is
2058 * dirty, that's no longer true.
2060 rs
->ram_bulk_stage
= false;
2063 * We want the background search to continue from the queued page
2064 * since the guest is likely to want other pages near to the page
2065 * it just requested.
2068 pss
->page
= offset
>> TARGET_PAGE_BITS
;
2075 * migration_page_queue_free: drop any remaining pages in the ram
2078 * It should be empty at the end anyway, but in error cases there may
2079 * be some left. in case that there is any page left, we drop it.
2082 static void migration_page_queue_free(RAMState
*rs
)
2084 struct RAMSrcPageRequest
*mspr
, *next_mspr
;
2085 /* This queue generally should be empty - but in the case of a failed
2086 * migration might have some droppings in.
2089 QSIMPLEQ_FOREACH_SAFE(mspr
, &rs
->src_page_requests
, next_req
, next_mspr
) {
2090 memory_region_unref(mspr
->rb
->mr
);
2091 QSIMPLEQ_REMOVE_HEAD(&rs
->src_page_requests
, next_req
);
2098 * ram_save_queue_pages: queue the page for transmission
2100 * A request from postcopy destination for example.
2102 * Returns zero on success or negative on error
2104 * @rbname: Name of the RAMBLock of the request. NULL means the
2105 * same that last one.
2106 * @start: starting address from the start of the RAMBlock
2107 * @len: length (in bytes) to send
2109 int ram_save_queue_pages(const char *rbname
, ram_addr_t start
, ram_addr_t len
)
2112 RAMState
*rs
= ram_state
;
2114 ram_counters
.postcopy_requests
++;
2117 /* Reuse last RAMBlock */
2118 ramblock
= rs
->last_req_rb
;
2122 * Shouldn't happen, we can't reuse the last RAMBlock if
2123 * it's the 1st request.
2125 error_report("ram_save_queue_pages no previous block");
2129 ramblock
= qemu_ram_block_by_name(rbname
);
2132 /* We shouldn't be asked for a non-existent RAMBlock */
2133 error_report("ram_save_queue_pages no block '%s'", rbname
);
2136 rs
->last_req_rb
= ramblock
;
2138 trace_ram_save_queue_pages(ramblock
->idstr
, start
, len
);
2139 if (start
+len
> ramblock
->used_length
) {
2140 error_report("%s request overrun start=" RAM_ADDR_FMT
" len="
2141 RAM_ADDR_FMT
" blocklen=" RAM_ADDR_FMT
,
2142 __func__
, start
, len
, ramblock
->used_length
);
2146 struct RAMSrcPageRequest
*new_entry
=
2147 g_malloc0(sizeof(struct RAMSrcPageRequest
));
2148 new_entry
->rb
= ramblock
;
2149 new_entry
->offset
= start
;
2150 new_entry
->len
= len
;
2152 memory_region_ref(ramblock
->mr
);
2153 qemu_mutex_lock(&rs
->src_page_req_mutex
);
2154 QSIMPLEQ_INSERT_TAIL(&rs
->src_page_requests
, new_entry
, next_req
);
2155 migration_make_urgent_request();
2156 qemu_mutex_unlock(&rs
->src_page_req_mutex
);
2166 static bool save_page_use_compression(RAMState
*rs
)
2168 if (!migrate_use_compression()) {
2173 * If xbzrle is on, stop using the data compression after first
2174 * round of migration even if compression is enabled. In theory,
2175 * xbzrle can do better than compression.
2177 if (rs
->ram_bulk_stage
|| !migrate_use_xbzrle()) {
2185 * ram_save_target_page: save one target page
2187 * Returns the number of pages written
2189 * @rs: current RAM state
2190 * @pss: data about the page we want to send
2191 * @last_stage: if we are at the completion stage
2193 static int ram_save_target_page(RAMState
*rs
, PageSearchStatus
*pss
,
2196 RAMBlock
*block
= pss
->block
;
2197 ram_addr_t offset
= pss
->page
<< TARGET_PAGE_BITS
;
2200 if (control_save_page(rs
, block
, offset
, &res
)) {
2205 * When starting the process of a new block, the first page of
2206 * the block should be sent out before other pages in the same
2207 * block, and all the pages in last block should have been sent
2208 * out, keeping this order is important, because the 'cont' flag
2209 * is used to avoid resending the block name.
2211 if (block
!= rs
->last_sent_block
&& save_page_use_compression(rs
)) {
2212 flush_compressed_data(rs
);
2215 res
= save_zero_page(rs
, block
, offset
);
2217 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
2218 * page would be stale
2220 if (!save_page_use_compression(rs
)) {
2221 XBZRLE_cache_lock();
2222 xbzrle_cache_zero_page(rs
, block
->offset
+ offset
);
2223 XBZRLE_cache_unlock();
2225 ram_release_pages(block
->idstr
, offset
, res
);
2230 * Make sure the first page is sent out before other pages.
2232 * we post it as normal page as compression will take much
2235 if (block
== rs
->last_sent_block
&& save_page_use_compression(rs
)) {
2236 return compress_page_with_multi_thread(rs
, block
, offset
);
2237 } else if (migrate_use_multifd()) {
2238 return ram_save_multifd_page(rs
, block
, offset
);
2241 return ram_save_page(rs
, pss
, last_stage
);
2245 * ram_save_host_page: save a whole host page
2247 * Starting at *offset send pages up to the end of the current host
2248 * page. It's valid for the initial offset to point into the middle of
2249 * a host page in which case the remainder of the hostpage is sent.
2250 * Only dirty target pages are sent. Note that the host page size may
2251 * be a huge page for this block.
2252 * The saving stops at the boundary of the used_length of the block
2253 * if the RAMBlock isn't a multiple of the host page size.
2255 * Returns the number of pages written or negative on error
2257 * @rs: current RAM state
2258 * @ms: current migration state
2259 * @pss: data about the page we want to send
2260 * @last_stage: if we are at the completion stage
2262 static int ram_save_host_page(RAMState
*rs
, PageSearchStatus
*pss
,
2265 int tmppages
, pages
= 0;
2266 size_t pagesize_bits
=
2267 qemu_ram_pagesize(pss
->block
) >> TARGET_PAGE_BITS
;
2269 if (!qemu_ram_is_migratable(pss
->block
)) {
2270 error_report("block %s should not be migrated !", pss
->block
->idstr
);
2275 /* Check the pages is dirty and if it is send it */
2276 if (!migration_bitmap_clear_dirty(rs
, pss
->block
, pss
->page
)) {
2281 tmppages
= ram_save_target_page(rs
, pss
, last_stage
);
2287 if (pss
->block
->unsentmap
) {
2288 clear_bit(pss
->page
, pss
->block
->unsentmap
);
2292 } while ((pss
->page
& (pagesize_bits
- 1)) &&
2293 offset_in_ramblock(pss
->block
, pss
->page
<< TARGET_PAGE_BITS
));
2295 /* The offset we leave with is the last one we looked at */
2301 * ram_find_and_save_block: finds a dirty page and sends it to f
2303 * Called within an RCU critical section.
2305 * Returns the number of pages written where zero means no dirty pages
2307 * @rs: current RAM state
2308 * @last_stage: if we are at the completion stage
2310 * On systems where host-page-size > target-page-size it will send all the
2311 * pages in a host page that are dirty.
2314 static int ram_find_and_save_block(RAMState
*rs
, bool last_stage
)
2316 PageSearchStatus pss
;
2320 /* No dirty page as there is zero RAM */
2321 if (!ram_bytes_total()) {
2325 pss
.block
= rs
->last_seen_block
;
2326 pss
.page
= rs
->last_page
;
2327 pss
.complete_round
= false;
2330 pss
.block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
2335 found
= get_queued_page(rs
, &pss
);
2338 /* priority queue empty, so just search for something dirty */
2339 found
= find_dirty_block(rs
, &pss
, &again
);
2343 pages
= ram_save_host_page(rs
, &pss
, last_stage
);
2345 } while (!pages
&& again
);
2347 rs
->last_seen_block
= pss
.block
;
2348 rs
->last_page
= pss
.page
;
2353 void acct_update_position(QEMUFile
*f
, size_t size
, bool zero
)
2355 uint64_t pages
= size
/ TARGET_PAGE_SIZE
;
2358 ram_counters
.duplicate
+= pages
;
2360 ram_counters
.normal
+= pages
;
2361 ram_counters
.transferred
+= size
;
2362 qemu_update_position(f
, size
);
2366 uint64_t ram_bytes_total(void)
2372 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2373 total
+= block
->used_length
;
2379 static void xbzrle_load_setup(void)
2381 XBZRLE
.decoded_buf
= g_malloc(TARGET_PAGE_SIZE
);
2384 static void xbzrle_load_cleanup(void)
2386 g_free(XBZRLE
.decoded_buf
);
2387 XBZRLE
.decoded_buf
= NULL
;
2390 static void ram_state_cleanup(RAMState
**rsp
)
2393 migration_page_queue_free(*rsp
);
2394 qemu_mutex_destroy(&(*rsp
)->bitmap_mutex
);
2395 qemu_mutex_destroy(&(*rsp
)->src_page_req_mutex
);
2401 static void xbzrle_cleanup(void)
2403 XBZRLE_cache_lock();
2405 cache_fini(XBZRLE
.cache
);
2406 g_free(XBZRLE
.encoded_buf
);
2407 g_free(XBZRLE
.current_buf
);
2408 g_free(XBZRLE
.zero_target_page
);
2409 XBZRLE
.cache
= NULL
;
2410 XBZRLE
.encoded_buf
= NULL
;
2411 XBZRLE
.current_buf
= NULL
;
2412 XBZRLE
.zero_target_page
= NULL
;
2414 XBZRLE_cache_unlock();
2417 static void ram_save_cleanup(void *opaque
)
2419 RAMState
**rsp
= opaque
;
2422 /* caller have hold iothread lock or is in a bh, so there is
2423 * no writing race against this migration_bitmap
2425 memory_global_dirty_log_stop();
2427 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2428 g_free(block
->bmap
);
2430 g_free(block
->unsentmap
);
2431 block
->unsentmap
= NULL
;
2435 compress_threads_save_cleanup();
2436 ram_state_cleanup(rsp
);
2439 static void ram_state_reset(RAMState
*rs
)
2441 rs
->last_seen_block
= NULL
;
2442 rs
->last_sent_block
= NULL
;
2444 rs
->last_version
= ram_list
.version
;
2445 rs
->ram_bulk_stage
= true;
2448 #define MAX_WAIT 50 /* ms, half buffered_file limit */
2451 * 'expected' is the value you expect the bitmap mostly to be full
2452 * of; it won't bother printing lines that are all this value.
2453 * If 'todump' is null the migration bitmap is dumped.
2455 void ram_debug_dump_bitmap(unsigned long *todump
, bool expected
,
2456 unsigned long pages
)
2459 int64_t linelen
= 128;
2462 for (cur
= 0; cur
< pages
; cur
+= linelen
) {
2466 * Last line; catch the case where the line length
2467 * is longer than remaining ram
2469 if (cur
+ linelen
> pages
) {
2470 linelen
= pages
- cur
;
2472 for (curb
= 0; curb
< linelen
; curb
++) {
2473 bool thisbit
= test_bit(cur
+ curb
, todump
);
2474 linebuf
[curb
] = thisbit
? '1' : '.';
2475 found
= found
|| (thisbit
!= expected
);
2478 linebuf
[curb
] = '\0';
2479 fprintf(stderr
, "0x%08" PRIx64
" : %s\n", cur
, linebuf
);
2484 /* **** functions for postcopy ***** */
2486 void ram_postcopy_migrated_memory_release(MigrationState
*ms
)
2488 struct RAMBlock
*block
;
2490 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2491 unsigned long *bitmap
= block
->bmap
;
2492 unsigned long range
= block
->used_length
>> TARGET_PAGE_BITS
;
2493 unsigned long run_start
= find_next_zero_bit(bitmap
, range
, 0);
2495 while (run_start
< range
) {
2496 unsigned long run_end
= find_next_bit(bitmap
, range
, run_start
+ 1);
2497 ram_discard_range(block
->idstr
, run_start
<< TARGET_PAGE_BITS
,
2498 (run_end
- run_start
) << TARGET_PAGE_BITS
);
2499 run_start
= find_next_zero_bit(bitmap
, range
, run_end
+ 1);
2505 * postcopy_send_discard_bm_ram: discard a RAMBlock
2507 * Returns zero on success
2509 * Callback from postcopy_each_ram_send_discard for each RAMBlock
2510 * Note: At this point the 'unsentmap' is the processed bitmap combined
2511 * with the dirtymap; so a '1' means it's either dirty or unsent.
2513 * @ms: current migration state
2514 * @pds: state for postcopy
2515 * @start: RAMBlock starting page
2516 * @length: RAMBlock size
2518 static int postcopy_send_discard_bm_ram(MigrationState
*ms
,
2519 PostcopyDiscardState
*pds
,
2522 unsigned long end
= block
->used_length
>> TARGET_PAGE_BITS
;
2523 unsigned long current
;
2524 unsigned long *unsentmap
= block
->unsentmap
;
2526 for (current
= 0; current
< end
; ) {
2527 unsigned long one
= find_next_bit(unsentmap
, end
, current
);
2530 unsigned long zero
= find_next_zero_bit(unsentmap
, end
, one
+ 1);
2531 unsigned long discard_length
;
2534 discard_length
= end
- one
;
2536 discard_length
= zero
- one
;
2538 if (discard_length
) {
2539 postcopy_discard_send_range(ms
, pds
, one
, discard_length
);
2541 current
= one
+ discard_length
;
2551 * postcopy_each_ram_send_discard: discard all RAMBlocks
2553 * Returns 0 for success or negative for error
2555 * Utility for the outgoing postcopy code.
2556 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2557 * passing it bitmap indexes and name.
2558 * (qemu_ram_foreach_block ends up passing unscaled lengths
2559 * which would mean postcopy code would have to deal with target page)
2561 * @ms: current migration state
2563 static int postcopy_each_ram_send_discard(MigrationState
*ms
)
2565 struct RAMBlock
*block
;
2568 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2569 PostcopyDiscardState
*pds
=
2570 postcopy_discard_send_init(ms
, block
->idstr
);
2573 * Postcopy sends chunks of bitmap over the wire, but it
2574 * just needs indexes at this point, avoids it having
2575 * target page specific code.
2577 ret
= postcopy_send_discard_bm_ram(ms
, pds
, block
);
2578 postcopy_discard_send_finish(ms
, pds
);
2588 * postcopy_chunk_hostpages_pass: canocalize bitmap in hostpages
2590 * Helper for postcopy_chunk_hostpages; it's called twice to
2591 * canonicalize the two bitmaps, that are similar, but one is
2594 * Postcopy requires that all target pages in a hostpage are dirty or
2595 * clean, not a mix. This function canonicalizes the bitmaps.
2597 * @ms: current migration state
2598 * @unsent_pass: if true we need to canonicalize partially unsent host pages
2599 * otherwise we need to canonicalize partially dirty host pages
2600 * @block: block that contains the page we want to canonicalize
2601 * @pds: state for postcopy
2603 static void postcopy_chunk_hostpages_pass(MigrationState
*ms
, bool unsent_pass
,
2605 PostcopyDiscardState
*pds
)
2607 RAMState
*rs
= ram_state
;
2608 unsigned long *bitmap
= block
->bmap
;
2609 unsigned long *unsentmap
= block
->unsentmap
;
2610 unsigned int host_ratio
= block
->page_size
/ TARGET_PAGE_SIZE
;
2611 unsigned long pages
= block
->used_length
>> TARGET_PAGE_BITS
;
2612 unsigned long run_start
;
2614 if (block
->page_size
== TARGET_PAGE_SIZE
) {
2615 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2620 /* Find a sent page */
2621 run_start
= find_next_zero_bit(unsentmap
, pages
, 0);
2623 /* Find a dirty page */
2624 run_start
= find_next_bit(bitmap
, pages
, 0);
2627 while (run_start
< pages
) {
2628 bool do_fixup
= false;
2629 unsigned long fixup_start_addr
;
2630 unsigned long host_offset
;
2633 * If the start of this run of pages is in the middle of a host
2634 * page, then we need to fixup this host page.
2636 host_offset
= run_start
% host_ratio
;
2639 run_start
-= host_offset
;
2640 fixup_start_addr
= run_start
;
2641 /* For the next pass */
2642 run_start
= run_start
+ host_ratio
;
2644 /* Find the end of this run */
2645 unsigned long run_end
;
2647 run_end
= find_next_bit(unsentmap
, pages
, run_start
+ 1);
2649 run_end
= find_next_zero_bit(bitmap
, pages
, run_start
+ 1);
2652 * If the end isn't at the start of a host page, then the
2653 * run doesn't finish at the end of a host page
2654 * and we need to discard.
2656 host_offset
= run_end
% host_ratio
;
2659 fixup_start_addr
= run_end
- host_offset
;
2661 * This host page has gone, the next loop iteration starts
2662 * from after the fixup
2664 run_start
= fixup_start_addr
+ host_ratio
;
2667 * No discards on this iteration, next loop starts from
2668 * next sent/dirty page
2670 run_start
= run_end
+ 1;
2677 /* Tell the destination to discard this page */
2678 if (unsent_pass
|| !test_bit(fixup_start_addr
, unsentmap
)) {
2679 /* For the unsent_pass we:
2680 * discard partially sent pages
2681 * For the !unsent_pass (dirty) we:
2682 * discard partially dirty pages that were sent
2683 * (any partially sent pages were already discarded
2684 * by the previous unsent_pass)
2686 postcopy_discard_send_range(ms
, pds
, fixup_start_addr
,
2690 /* Clean up the bitmap */
2691 for (page
= fixup_start_addr
;
2692 page
< fixup_start_addr
+ host_ratio
; page
++) {
2693 /* All pages in this host page are now not sent */
2694 set_bit(page
, unsentmap
);
2697 * Remark them as dirty, updating the count for any pages
2698 * that weren't previously dirty.
2700 rs
->migration_dirty_pages
+= !test_and_set_bit(page
, bitmap
);
2705 /* Find the next sent page for the next iteration */
2706 run_start
= find_next_zero_bit(unsentmap
, pages
, run_start
);
2708 /* Find the next dirty page for the next iteration */
2709 run_start
= find_next_bit(bitmap
, pages
, run_start
);
2715 * postcopy_chuck_hostpages: discrad any partially sent host page
2717 * Utility for the outgoing postcopy code.
2719 * Discard any partially sent host-page size chunks, mark any partially
2720 * dirty host-page size chunks as all dirty. In this case the host-page
2721 * is the host-page for the particular RAMBlock, i.e. it might be a huge page
2723 * Returns zero on success
2725 * @ms: current migration state
2726 * @block: block we want to work with
2728 static int postcopy_chunk_hostpages(MigrationState
*ms
, RAMBlock
*block
)
2730 PostcopyDiscardState
*pds
=
2731 postcopy_discard_send_init(ms
, block
->idstr
);
2733 /* First pass: Discard all partially sent host pages */
2734 postcopy_chunk_hostpages_pass(ms
, true, block
, pds
);
2736 * Second pass: Ensure that all partially dirty host pages are made
2739 postcopy_chunk_hostpages_pass(ms
, false, block
, pds
);
2741 postcopy_discard_send_finish(ms
, pds
);
2746 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
2748 * Returns zero on success
2750 * Transmit the set of pages to be discarded after precopy to the target
2751 * these are pages that:
2752 * a) Have been previously transmitted but are now dirty again
2753 * b) Pages that have never been transmitted, this ensures that
2754 * any pages on the destination that have been mapped by background
2755 * tasks get discarded (transparent huge pages is the specific concern)
2756 * Hopefully this is pretty sparse
2758 * @ms: current migration state
2760 int ram_postcopy_send_discard_bitmap(MigrationState
*ms
)
2762 RAMState
*rs
= ram_state
;
2768 /* This should be our last sync, the src is now paused */
2769 migration_bitmap_sync(rs
);
2771 /* Easiest way to make sure we don't resume in the middle of a host-page */
2772 rs
->last_seen_block
= NULL
;
2773 rs
->last_sent_block
= NULL
;
2776 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2777 unsigned long pages
= block
->used_length
>> TARGET_PAGE_BITS
;
2778 unsigned long *bitmap
= block
->bmap
;
2779 unsigned long *unsentmap
= block
->unsentmap
;
2782 /* We don't have a safe way to resize the sentmap, so
2783 * if the bitmap was resized it will be NULL at this
2786 error_report("migration ram resized during precopy phase");
2790 /* Deal with TPS != HPS and huge pages */
2791 ret
= postcopy_chunk_hostpages(ms
, block
);
2798 * Update the unsentmap to be unsentmap = unsentmap | dirty
2800 bitmap_or(unsentmap
, unsentmap
, bitmap
, pages
);
2801 #ifdef DEBUG_POSTCOPY
2802 ram_debug_dump_bitmap(unsentmap
, true, pages
);
2805 trace_ram_postcopy_send_discard_bitmap();
2807 ret
= postcopy_each_ram_send_discard(ms
);
2814 * ram_discard_range: discard dirtied pages at the beginning of postcopy
2816 * Returns zero on success
2818 * @rbname: name of the RAMBlock of the request. NULL means the
2819 * same that last one.
2820 * @start: RAMBlock starting page
2821 * @length: RAMBlock size
2823 int ram_discard_range(const char *rbname
, uint64_t start
, size_t length
)
2827 trace_ram_discard_range(rbname
, start
, length
);
2830 RAMBlock
*rb
= qemu_ram_block_by_name(rbname
);
2833 error_report("ram_discard_range: Failed to find block '%s'", rbname
);
2838 * On source VM, we don't need to update the received bitmap since
2839 * we don't even have one.
2841 if (rb
->receivedmap
) {
2842 bitmap_clear(rb
->receivedmap
, start
>> qemu_target_page_bits(),
2843 length
>> qemu_target_page_bits());
2846 ret
= ram_block_discard_range(rb
, start
, length
);
2855 * For every allocation, we will try not to crash the VM if the
2856 * allocation failed.
2858 static int xbzrle_init(void)
2860 Error
*local_err
= NULL
;
2862 if (!migrate_use_xbzrle()) {
2866 XBZRLE_cache_lock();
2868 XBZRLE
.zero_target_page
= g_try_malloc0(TARGET_PAGE_SIZE
);
2869 if (!XBZRLE
.zero_target_page
) {
2870 error_report("%s: Error allocating zero page", __func__
);
2874 XBZRLE
.cache
= cache_init(migrate_xbzrle_cache_size(),
2875 TARGET_PAGE_SIZE
, &local_err
);
2876 if (!XBZRLE
.cache
) {
2877 error_report_err(local_err
);
2878 goto free_zero_page
;
2881 XBZRLE
.encoded_buf
= g_try_malloc0(TARGET_PAGE_SIZE
);
2882 if (!XBZRLE
.encoded_buf
) {
2883 error_report("%s: Error allocating encoded_buf", __func__
);
2887 XBZRLE
.current_buf
= g_try_malloc(TARGET_PAGE_SIZE
);
2888 if (!XBZRLE
.current_buf
) {
2889 error_report("%s: Error allocating current_buf", __func__
);
2890 goto free_encoded_buf
;
2893 /* We are all good */
2894 XBZRLE_cache_unlock();
2898 g_free(XBZRLE
.encoded_buf
);
2899 XBZRLE
.encoded_buf
= NULL
;
2901 cache_fini(XBZRLE
.cache
);
2902 XBZRLE
.cache
= NULL
;
2904 g_free(XBZRLE
.zero_target_page
);
2905 XBZRLE
.zero_target_page
= NULL
;
2907 XBZRLE_cache_unlock();
2911 static int ram_state_init(RAMState
**rsp
)
2913 *rsp
= g_try_new0(RAMState
, 1);
2916 error_report("%s: Init ramstate fail", __func__
);
2920 qemu_mutex_init(&(*rsp
)->bitmap_mutex
);
2921 qemu_mutex_init(&(*rsp
)->src_page_req_mutex
);
2922 QSIMPLEQ_INIT(&(*rsp
)->src_page_requests
);
2925 * Count the total number of pages used by ram blocks not including any
2926 * gaps due to alignment or unplugs.
2928 (*rsp
)->migration_dirty_pages
= ram_bytes_total() >> TARGET_PAGE_BITS
;
2930 ram_state_reset(*rsp
);
2935 static void ram_list_init_bitmaps(void)
2938 unsigned long pages
;
2940 /* Skip setting bitmap if there is no RAM */
2941 if (ram_bytes_total()) {
2942 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2943 pages
= block
->max_length
>> TARGET_PAGE_BITS
;
2944 block
->bmap
= bitmap_new(pages
);
2945 bitmap_set(block
->bmap
, 0, pages
);
2946 if (migrate_postcopy_ram()) {
2947 block
->unsentmap
= bitmap_new(pages
);
2948 bitmap_set(block
->unsentmap
, 0, pages
);
2954 static void ram_init_bitmaps(RAMState
*rs
)
2956 /* For memory_global_dirty_log_start below. */
2957 qemu_mutex_lock_iothread();
2958 qemu_mutex_lock_ramlist();
2961 ram_list_init_bitmaps();
2962 memory_global_dirty_log_start();
2963 migration_bitmap_sync(rs
);
2966 qemu_mutex_unlock_ramlist();
2967 qemu_mutex_unlock_iothread();
2970 static int ram_init_all(RAMState
**rsp
)
2972 if (ram_state_init(rsp
)) {
2976 if (xbzrle_init()) {
2977 ram_state_cleanup(rsp
);
2981 ram_init_bitmaps(*rsp
);
2986 static void ram_state_resume_prepare(RAMState
*rs
, QEMUFile
*out
)
2992 * Postcopy is not using xbzrle/compression, so no need for that.
2993 * Also, since source are already halted, we don't need to care
2994 * about dirty page logging as well.
2997 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2998 pages
+= bitmap_count_one(block
->bmap
,
2999 block
->used_length
>> TARGET_PAGE_BITS
);
3002 /* This may not be aligned with current bitmaps. Recalculate. */
3003 rs
->migration_dirty_pages
= pages
;
3005 rs
->last_seen_block
= NULL
;
3006 rs
->last_sent_block
= NULL
;
3008 rs
->last_version
= ram_list
.version
;
3010 * Disable the bulk stage, otherwise we'll resend the whole RAM no
3011 * matter what we have sent.
3013 rs
->ram_bulk_stage
= false;
3015 /* Update RAMState cache of output QEMUFile */
3018 trace_ram_state_resume_prepare(pages
);
3022 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
3023 * long-running RCU critical section. When rcu-reclaims in the code
3024 * start to become numerous it will be necessary to reduce the
3025 * granularity of these critical sections.
3029 * ram_save_setup: Setup RAM for migration
3031 * Returns zero to indicate success and negative for error
3033 * @f: QEMUFile where to send the data
3034 * @opaque: RAMState pointer
3036 static int ram_save_setup(QEMUFile
*f
, void *opaque
)
3038 RAMState
**rsp
= opaque
;
3041 if (compress_threads_save_setup()) {
3045 /* migration has already setup the bitmap, reuse it. */
3046 if (!migration_in_colo_state()) {
3047 if (ram_init_all(rsp
) != 0) {
3048 compress_threads_save_cleanup();
3056 qemu_put_be64(f
, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE
);
3058 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
3059 qemu_put_byte(f
, strlen(block
->idstr
));
3060 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, strlen(block
->idstr
));
3061 qemu_put_be64(f
, block
->used_length
);
3062 if (migrate_postcopy_ram() && block
->page_size
!= qemu_host_page_size
) {
3063 qemu_put_be64(f
, block
->page_size
);
3069 ram_control_before_iterate(f
, RAM_CONTROL_SETUP
);
3070 ram_control_after_iterate(f
, RAM_CONTROL_SETUP
);
3072 multifd_send_sync_main();
3073 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3080 * ram_save_iterate: iterative stage for migration
3082 * Returns zero to indicate success and negative for error
3084 * @f: QEMUFile where to send the data
3085 * @opaque: RAMState pointer
3087 static int ram_save_iterate(QEMUFile
*f
, void *opaque
)
3089 RAMState
**temp
= opaque
;
3090 RAMState
*rs
= *temp
;
3096 if (blk_mig_bulk_active()) {
3097 /* Avoid transferring ram during bulk phase of block migration as
3098 * the bulk phase will usually take a long time and transferring
3099 * ram updates during that time is pointless. */
3104 if (ram_list
.version
!= rs
->last_version
) {
3105 ram_state_reset(rs
);
3108 /* Read version before ram_list.blocks */
3111 ram_control_before_iterate(f
, RAM_CONTROL_ROUND
);
3113 t0
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
3115 while ((ret
= qemu_file_rate_limit(f
)) == 0 ||
3116 !QSIMPLEQ_EMPTY(&rs
->src_page_requests
)) {
3119 if (qemu_file_get_error(f
)) {
3123 pages
= ram_find_and_save_block(rs
, false);
3124 /* no more pages to sent */
3131 /* we want to check in the 1st loop, just in case it was the 1st time
3132 and we had to sync the dirty bitmap.
3133 qemu_get_clock_ns() is a bit expensive, so we only check each some
3136 if ((i
& 63) == 0) {
3137 uint64_t t1
= (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - t0
) / 1000000;
3138 if (t1
> MAX_WAIT
) {
3139 trace_ram_save_iterate_big_wait(t1
, i
);
3145 flush_compressed_data(rs
);
3149 * Must occur before EOS (or any QEMUFile operation)
3150 * because of RDMA protocol.
3152 ram_control_after_iterate(f
, RAM_CONTROL_ROUND
);
3154 multifd_send_sync_main();
3156 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3158 ram_counters
.transferred
+= 8;
3160 ret
= qemu_file_get_error(f
);
3169 * ram_save_complete: function called to send the remaining amount of ram
3171 * Returns zero to indicate success
3173 * Called with iothread lock
3175 * @f: QEMUFile where to send the data
3176 * @opaque: RAMState pointer
3178 static int ram_save_complete(QEMUFile
*f
, void *opaque
)
3180 RAMState
**temp
= opaque
;
3181 RAMState
*rs
= *temp
;
3185 if (!migration_in_postcopy()) {
3186 migration_bitmap_sync(rs
);
3189 ram_control_before_iterate(f
, RAM_CONTROL_FINISH
);
3191 /* try transferring iterative blocks of memory */
3193 /* flush all remaining blocks regardless of rate limiting */
3197 pages
= ram_find_and_save_block(rs
, !migration_in_colo_state());
3198 /* no more blocks to sent */
3204 flush_compressed_data(rs
);
3205 ram_control_after_iterate(f
, RAM_CONTROL_FINISH
);
3209 multifd_send_sync_main();
3210 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3216 static void ram_save_pending(QEMUFile
*f
, void *opaque
, uint64_t max_size
,
3217 uint64_t *res_precopy_only
,
3218 uint64_t *res_compatible
,
3219 uint64_t *res_postcopy_only
)
3221 RAMState
**temp
= opaque
;
3222 RAMState
*rs
= *temp
;
3223 uint64_t remaining_size
;
3225 remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
3227 if (!migration_in_postcopy() &&
3228 remaining_size
< max_size
) {
3229 qemu_mutex_lock_iothread();
3231 migration_bitmap_sync(rs
);
3233 qemu_mutex_unlock_iothread();
3234 remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
3237 if (migrate_postcopy_ram()) {
3238 /* We can do postcopy, and all the data is postcopiable */
3239 *res_compatible
+= remaining_size
;
3241 *res_precopy_only
+= remaining_size
;
3245 static int load_xbzrle(QEMUFile
*f
, ram_addr_t addr
, void *host
)
3247 unsigned int xh_len
;
3249 uint8_t *loaded_data
;
3251 /* extract RLE header */
3252 xh_flags
= qemu_get_byte(f
);
3253 xh_len
= qemu_get_be16(f
);
3255 if (xh_flags
!= ENCODING_FLAG_XBZRLE
) {
3256 error_report("Failed to load XBZRLE page - wrong compression!");
3260 if (xh_len
> TARGET_PAGE_SIZE
) {
3261 error_report("Failed to load XBZRLE page - len overflow!");
3264 loaded_data
= XBZRLE
.decoded_buf
;
3265 /* load data and decode */
3266 /* it can change loaded_data to point to an internal buffer */
3267 qemu_get_buffer_in_place(f
, &loaded_data
, xh_len
);
3270 if (xbzrle_decode_buffer(loaded_data
, xh_len
, host
,
3271 TARGET_PAGE_SIZE
) == -1) {
3272 error_report("Failed to load XBZRLE page - decode error!");
3280 * ram_block_from_stream: read a RAMBlock id from the migration stream
3282 * Must be called from within a rcu critical section.
3284 * Returns a pointer from within the RCU-protected ram_list.
3286 * @f: QEMUFile where to read the data from
3287 * @flags: Page flags (mostly to see if it's a continuation of previous block)
3289 static inline RAMBlock
*ram_block_from_stream(QEMUFile
*f
, int flags
)
3291 static RAMBlock
*block
= NULL
;
3295 if (flags
& RAM_SAVE_FLAG_CONTINUE
) {
3297 error_report("Ack, bad migration stream!");
3303 len
= qemu_get_byte(f
);
3304 qemu_get_buffer(f
, (uint8_t *)id
, len
);
3307 block
= qemu_ram_block_by_name(id
);
3309 error_report("Can't find block %s", id
);
3313 if (!qemu_ram_is_migratable(block
)) {
3314 error_report("block %s should not be migrated !", id
);
3321 static inline void *host_from_ram_block_offset(RAMBlock
*block
,
3324 if (!offset_in_ramblock(block
, offset
)) {
3328 return block
->host
+ offset
;
3332 * ram_handle_compressed: handle the zero page case
3334 * If a page (or a whole RDMA chunk) has been
3335 * determined to be zero, then zap it.
3337 * @host: host address for the zero page
3338 * @ch: what the page is filled from. We only support zero
3339 * @size: size of the zero page
3341 void ram_handle_compressed(void *host
, uint8_t ch
, uint64_t size
)
3343 if (ch
!= 0 || !is_zero_range(host
, size
)) {
3344 memset(host
, ch
, size
);
3348 /* return the size after decompression, or negative value on error */
3350 qemu_uncompress_data(z_stream
*stream
, uint8_t *dest
, size_t dest_len
,
3351 const uint8_t *source
, size_t source_len
)
3355 err
= inflateReset(stream
);
3360 stream
->avail_in
= source_len
;
3361 stream
->next_in
= (uint8_t *)source
;
3362 stream
->avail_out
= dest_len
;
3363 stream
->next_out
= dest
;
3365 err
= inflate(stream
, Z_NO_FLUSH
);
3366 if (err
!= Z_STREAM_END
) {
3370 return stream
->total_out
;
3373 static void *do_data_decompress(void *opaque
)
3375 DecompressParam
*param
= opaque
;
3376 unsigned long pagesize
;
3380 qemu_mutex_lock(¶m
->mutex
);
3381 while (!param
->quit
) {
3386 qemu_mutex_unlock(¶m
->mutex
);
3388 pagesize
= TARGET_PAGE_SIZE
;
3390 ret
= qemu_uncompress_data(¶m
->stream
, des
, pagesize
,
3391 param
->compbuf
, len
);
3392 if (ret
< 0 && migrate_get_current()->decompress_error_check
) {
3393 error_report("decompress data failed");
3394 qemu_file_set_error(decomp_file
, ret
);
3397 qemu_mutex_lock(&decomp_done_lock
);
3399 qemu_cond_signal(&decomp_done_cond
);
3400 qemu_mutex_unlock(&decomp_done_lock
);
3402 qemu_mutex_lock(¶m
->mutex
);
3404 qemu_cond_wait(¶m
->cond
, ¶m
->mutex
);
3407 qemu_mutex_unlock(¶m
->mutex
);
3412 static int wait_for_decompress_done(void)
3414 int idx
, thread_count
;
3416 if (!migrate_use_compression()) {
3420 thread_count
= migrate_decompress_threads();
3421 qemu_mutex_lock(&decomp_done_lock
);
3422 for (idx
= 0; idx
< thread_count
; idx
++) {
3423 while (!decomp_param
[idx
].done
) {
3424 qemu_cond_wait(&decomp_done_cond
, &decomp_done_lock
);
3427 qemu_mutex_unlock(&decomp_done_lock
);
3428 return qemu_file_get_error(decomp_file
);
3431 static void compress_threads_load_cleanup(void)
3433 int i
, thread_count
;
3435 if (!migrate_use_compression()) {
3438 thread_count
= migrate_decompress_threads();
3439 for (i
= 0; i
< thread_count
; i
++) {
3441 * we use it as a indicator which shows if the thread is
3442 * properly init'd or not
3444 if (!decomp_param
[i
].compbuf
) {
3448 qemu_mutex_lock(&decomp_param
[i
].mutex
);
3449 decomp_param
[i
].quit
= true;
3450 qemu_cond_signal(&decomp_param
[i
].cond
);
3451 qemu_mutex_unlock(&decomp_param
[i
].mutex
);
3453 for (i
= 0; i
< thread_count
; i
++) {
3454 if (!decomp_param
[i
].compbuf
) {
3458 qemu_thread_join(decompress_threads
+ i
);
3459 qemu_mutex_destroy(&decomp_param
[i
].mutex
);
3460 qemu_cond_destroy(&decomp_param
[i
].cond
);
3461 inflateEnd(&decomp_param
[i
].stream
);
3462 g_free(decomp_param
[i
].compbuf
);
3463 decomp_param
[i
].compbuf
= NULL
;
3465 g_free(decompress_threads
);
3466 g_free(decomp_param
);
3467 decompress_threads
= NULL
;
3468 decomp_param
= NULL
;
3472 static int compress_threads_load_setup(QEMUFile
*f
)
3474 int i
, thread_count
;
3476 if (!migrate_use_compression()) {
3480 thread_count
= migrate_decompress_threads();
3481 decompress_threads
= g_new0(QemuThread
, thread_count
);
3482 decomp_param
= g_new0(DecompressParam
, thread_count
);
3483 qemu_mutex_init(&decomp_done_lock
);
3484 qemu_cond_init(&decomp_done_cond
);
3486 for (i
= 0; i
< thread_count
; i
++) {
3487 if (inflateInit(&decomp_param
[i
].stream
) != Z_OK
) {
3491 decomp_param
[i
].compbuf
= g_malloc0(compressBound(TARGET_PAGE_SIZE
));
3492 qemu_mutex_init(&decomp_param
[i
].mutex
);
3493 qemu_cond_init(&decomp_param
[i
].cond
);
3494 decomp_param
[i
].done
= true;
3495 decomp_param
[i
].quit
= false;
3496 qemu_thread_create(decompress_threads
+ i
, "decompress",
3497 do_data_decompress
, decomp_param
+ i
,
3498 QEMU_THREAD_JOINABLE
);
3502 compress_threads_load_cleanup();
3506 static void decompress_data_with_multi_threads(QEMUFile
*f
,
3507 void *host
, int len
)
3509 int idx
, thread_count
;
3511 thread_count
= migrate_decompress_threads();
3512 qemu_mutex_lock(&decomp_done_lock
);
3514 for (idx
= 0; idx
< thread_count
; idx
++) {
3515 if (decomp_param
[idx
].done
) {
3516 decomp_param
[idx
].done
= false;
3517 qemu_mutex_lock(&decomp_param
[idx
].mutex
);
3518 qemu_get_buffer(f
, decomp_param
[idx
].compbuf
, len
);
3519 decomp_param
[idx
].des
= host
;
3520 decomp_param
[idx
].len
= len
;
3521 qemu_cond_signal(&decomp_param
[idx
].cond
);
3522 qemu_mutex_unlock(&decomp_param
[idx
].mutex
);
3526 if (idx
< thread_count
) {
3529 qemu_cond_wait(&decomp_done_cond
, &decomp_done_lock
);
3532 qemu_mutex_unlock(&decomp_done_lock
);
3536 * ram_load_setup: Setup RAM for migration incoming side
3538 * Returns zero to indicate success and negative for error
3540 * @f: QEMUFile where to receive the data
3541 * @opaque: RAMState pointer
3543 static int ram_load_setup(QEMUFile
*f
, void *opaque
)
3545 if (compress_threads_load_setup(f
)) {
3549 xbzrle_load_setup();
3550 ramblock_recv_map_init();
3554 static int ram_load_cleanup(void *opaque
)
3558 RAMBLOCK_FOREACH_MIGRATABLE(rb
) {
3559 if (ramblock_is_pmem(rb
)) {
3560 pmem_persist(rb
->host
, rb
->used_length
);
3564 xbzrle_load_cleanup();
3565 compress_threads_load_cleanup();
3567 RAMBLOCK_FOREACH_MIGRATABLE(rb
) {
3568 g_free(rb
->receivedmap
);
3569 rb
->receivedmap
= NULL
;
3575 * ram_postcopy_incoming_init: allocate postcopy data structures
3577 * Returns 0 for success and negative if there was one error
3579 * @mis: current migration incoming state
3581 * Allocate data structures etc needed by incoming migration with
3582 * postcopy-ram. postcopy-ram's similarly names
3583 * postcopy_ram_incoming_init does the work.
3585 int ram_postcopy_incoming_init(MigrationIncomingState
*mis
)
3587 return postcopy_ram_incoming_init(mis
);
3591 * ram_load_postcopy: load a page in postcopy case
3593 * Returns 0 for success or -errno in case of error
3595 * Called in postcopy mode by ram_load().
3596 * rcu_read_lock is taken prior to this being called.
3598 * @f: QEMUFile where to send the data
3600 static int ram_load_postcopy(QEMUFile
*f
)
3602 int flags
= 0, ret
= 0;
3603 bool place_needed
= false;
3604 bool matches_target_page_size
= false;
3605 MigrationIncomingState
*mis
= migration_incoming_get_current();
3606 /* Temporary page that is later 'placed' */
3607 void *postcopy_host_page
= postcopy_get_tmp_page(mis
);
3608 void *last_host
= NULL
;
3609 bool all_zero
= false;
3611 while (!ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
3614 void *page_buffer
= NULL
;
3615 void *place_source
= NULL
;
3616 RAMBlock
*block
= NULL
;
3619 addr
= qemu_get_be64(f
);
3622 * If qemu file error, we should stop here, and then "addr"
3625 ret
= qemu_file_get_error(f
);
3630 flags
= addr
& ~TARGET_PAGE_MASK
;
3631 addr
&= TARGET_PAGE_MASK
;
3633 trace_ram_load_postcopy_loop((uint64_t)addr
, flags
);
3634 place_needed
= false;
3635 if (flags
& (RAM_SAVE_FLAG_ZERO
| RAM_SAVE_FLAG_PAGE
)) {
3636 block
= ram_block_from_stream(f
, flags
);
3638 host
= host_from_ram_block_offset(block
, addr
);
3640 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
3644 matches_target_page_size
= block
->page_size
== TARGET_PAGE_SIZE
;
3646 * Postcopy requires that we place whole host pages atomically;
3647 * these may be huge pages for RAMBlocks that are backed by
3649 * To make it atomic, the data is read into a temporary page
3650 * that's moved into place later.
3651 * The migration protocol uses, possibly smaller, target-pages
3652 * however the source ensures it always sends all the components
3653 * of a host page in order.
3655 page_buffer
= postcopy_host_page
+
3656 ((uintptr_t)host
& (block
->page_size
- 1));
3657 /* If all TP are zero then we can optimise the place */
3658 if (!((uintptr_t)host
& (block
->page_size
- 1))) {
3661 /* not the 1st TP within the HP */
3662 if (host
!= (last_host
+ TARGET_PAGE_SIZE
)) {
3663 error_report("Non-sequential target page %p/%p",
3672 * If it's the last part of a host page then we place the host
3675 place_needed
= (((uintptr_t)host
+ TARGET_PAGE_SIZE
) &
3676 (block
->page_size
- 1)) == 0;
3677 place_source
= postcopy_host_page
;
3681 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
3682 case RAM_SAVE_FLAG_ZERO
:
3683 ch
= qemu_get_byte(f
);
3684 memset(page_buffer
, ch
, TARGET_PAGE_SIZE
);
3690 case RAM_SAVE_FLAG_PAGE
:
3692 if (!matches_target_page_size
) {
3693 /* For huge pages, we always use temporary buffer */
3694 qemu_get_buffer(f
, page_buffer
, TARGET_PAGE_SIZE
);
3697 * For small pages that matches target page size, we
3698 * avoid the qemu_file copy. Instead we directly use
3699 * the buffer of QEMUFile to place the page. Note: we
3700 * cannot do any QEMUFile operation before using that
3701 * buffer to make sure the buffer is valid when
3704 qemu_get_buffer_in_place(f
, (uint8_t **)&place_source
,
3708 case RAM_SAVE_FLAG_EOS
:
3710 multifd_recv_sync_main();
3713 error_report("Unknown combination of migration flags: %#x"
3714 " (postcopy mode)", flags
);
3719 /* Detect for any possible file errors */
3720 if (!ret
&& qemu_file_get_error(f
)) {
3721 ret
= qemu_file_get_error(f
);
3724 if (!ret
&& place_needed
) {
3725 /* This gets called at the last target page in the host page */
3726 void *place_dest
= host
+ TARGET_PAGE_SIZE
- block
->page_size
;
3729 ret
= postcopy_place_page_zero(mis
, place_dest
,
3732 ret
= postcopy_place_page(mis
, place_dest
,
3733 place_source
, block
);
3741 static bool postcopy_is_advised(void)
3743 PostcopyState ps
= postcopy_state_get();
3744 return ps
>= POSTCOPY_INCOMING_ADVISE
&& ps
< POSTCOPY_INCOMING_END
;
3747 static bool postcopy_is_running(void)
3749 PostcopyState ps
= postcopy_state_get();
3750 return ps
>= POSTCOPY_INCOMING_LISTENING
&& ps
< POSTCOPY_INCOMING_END
;
3753 static int ram_load(QEMUFile
*f
, void *opaque
, int version_id
)
3755 int flags
= 0, ret
= 0, invalid_flags
= 0;
3756 static uint64_t seq_iter
;
3759 * If system is running in postcopy mode, page inserts to host memory must
3762 bool postcopy_running
= postcopy_is_running();
3763 /* ADVISE is earlier, it shows the source has the postcopy capability on */
3764 bool postcopy_advised
= postcopy_is_advised();
3768 if (version_id
!= 4) {
3772 if (!migrate_use_compression()) {
3773 invalid_flags
|= RAM_SAVE_FLAG_COMPRESS_PAGE
;
3775 /* This RCU critical section can be very long running.
3776 * When RCU reclaims in the code start to become numerous,
3777 * it will be necessary to reduce the granularity of this
3782 if (postcopy_running
) {
3783 ret
= ram_load_postcopy(f
);
3786 while (!postcopy_running
&& !ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
3787 ram_addr_t addr
, total_ram_bytes
;
3791 addr
= qemu_get_be64(f
);
3792 flags
= addr
& ~TARGET_PAGE_MASK
;
3793 addr
&= TARGET_PAGE_MASK
;
3795 if (flags
& invalid_flags
) {
3796 if (flags
& invalid_flags
& RAM_SAVE_FLAG_COMPRESS_PAGE
) {
3797 error_report("Received an unexpected compressed page");
3804 if (flags
& (RAM_SAVE_FLAG_ZERO
| RAM_SAVE_FLAG_PAGE
|
3805 RAM_SAVE_FLAG_COMPRESS_PAGE
| RAM_SAVE_FLAG_XBZRLE
)) {
3806 RAMBlock
*block
= ram_block_from_stream(f
, flags
);
3808 host
= host_from_ram_block_offset(block
, addr
);
3810 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
3814 ramblock_recv_bitmap_set(block
, host
);
3815 trace_ram_load_loop(block
->idstr
, (uint64_t)addr
, flags
, host
);
3818 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
3819 case RAM_SAVE_FLAG_MEM_SIZE
:
3820 /* Synchronize RAM block list */
3821 total_ram_bytes
= addr
;
3822 while (!ret
&& total_ram_bytes
) {
3827 len
= qemu_get_byte(f
);
3828 qemu_get_buffer(f
, (uint8_t *)id
, len
);
3830 length
= qemu_get_be64(f
);
3832 block
= qemu_ram_block_by_name(id
);
3833 if (block
&& !qemu_ram_is_migratable(block
)) {
3834 error_report("block %s should not be migrated !", id
);
3837 if (length
!= block
->used_length
) {
3838 Error
*local_err
= NULL
;
3840 ret
= qemu_ram_resize(block
, length
,
3843 error_report_err(local_err
);
3846 /* For postcopy we need to check hugepage sizes match */
3847 if (postcopy_advised
&&
3848 block
->page_size
!= qemu_host_page_size
) {
3849 uint64_t remote_page_size
= qemu_get_be64(f
);
3850 if (remote_page_size
!= block
->page_size
) {
3851 error_report("Mismatched RAM page size %s "
3852 "(local) %zd != %" PRId64
,
3853 id
, block
->page_size
,
3858 ram_control_load_hook(f
, RAM_CONTROL_BLOCK_REG
,
3861 error_report("Unknown ramblock \"%s\", cannot "
3862 "accept migration", id
);
3866 total_ram_bytes
-= length
;
3870 case RAM_SAVE_FLAG_ZERO
:
3871 ch
= qemu_get_byte(f
);
3872 ram_handle_compressed(host
, ch
, TARGET_PAGE_SIZE
);
3875 case RAM_SAVE_FLAG_PAGE
:
3876 qemu_get_buffer(f
, host
, TARGET_PAGE_SIZE
);
3879 case RAM_SAVE_FLAG_COMPRESS_PAGE
:
3880 len
= qemu_get_be32(f
);
3881 if (len
< 0 || len
> compressBound(TARGET_PAGE_SIZE
)) {
3882 error_report("Invalid compressed data length: %d", len
);
3886 decompress_data_with_multi_threads(f
, host
, len
);
3889 case RAM_SAVE_FLAG_XBZRLE
:
3890 if (load_xbzrle(f
, addr
, host
) < 0) {
3891 error_report("Failed to decompress XBZRLE page at "
3892 RAM_ADDR_FMT
, addr
);
3897 case RAM_SAVE_FLAG_EOS
:
3899 multifd_recv_sync_main();
3902 if (flags
& RAM_SAVE_FLAG_HOOK
) {
3903 ram_control_load_hook(f
, RAM_CONTROL_HOOK
, NULL
);
3905 error_report("Unknown combination of migration flags: %#x",
3911 ret
= qemu_file_get_error(f
);
3915 ret
|= wait_for_decompress_done();
3917 trace_ram_load_complete(ret
, seq_iter
);
3921 static bool ram_has_postcopy(void *opaque
)
3924 RAMBLOCK_FOREACH_MIGRATABLE(rb
) {
3925 if (ramblock_is_pmem(rb
)) {
3926 info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
3927 "is not supported now!", rb
->idstr
, rb
->host
);
3932 return migrate_postcopy_ram();
3935 /* Sync all the dirty bitmap with destination VM. */
3936 static int ram_dirty_bitmap_sync_all(MigrationState
*s
, RAMState
*rs
)
3939 QEMUFile
*file
= s
->to_dst_file
;
3940 int ramblock_count
= 0;
3942 trace_ram_dirty_bitmap_sync_start();
3944 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
3945 qemu_savevm_send_recv_bitmap(file
, block
->idstr
);
3946 trace_ram_dirty_bitmap_request(block
->idstr
);
3950 trace_ram_dirty_bitmap_sync_wait();
3952 /* Wait until all the ramblocks' dirty bitmap synced */
3953 while (ramblock_count
--) {
3954 qemu_sem_wait(&s
->rp_state
.rp_sem
);
3957 trace_ram_dirty_bitmap_sync_complete();
3962 static void ram_dirty_bitmap_reload_notify(MigrationState
*s
)
3964 qemu_sem_post(&s
->rp_state
.rp_sem
);
3968 * Read the received bitmap, revert it as the initial dirty bitmap.
3969 * This is only used when the postcopy migration is paused but wants
3970 * to resume from a middle point.
3972 int ram_dirty_bitmap_reload(MigrationState
*s
, RAMBlock
*block
)
3975 QEMUFile
*file
= s
->rp_state
.from_dst_file
;
3976 unsigned long *le_bitmap
, nbits
= block
->used_length
>> TARGET_PAGE_BITS
;
3977 uint64_t local_size
= DIV_ROUND_UP(nbits
, 8);
3978 uint64_t size
, end_mark
;
3980 trace_ram_dirty_bitmap_reload_begin(block
->idstr
);
3982 if (s
->state
!= MIGRATION_STATUS_POSTCOPY_RECOVER
) {
3983 error_report("%s: incorrect state %s", __func__
,
3984 MigrationStatus_str(s
->state
));
3989 * Note: see comments in ramblock_recv_bitmap_send() on why we
3990 * need the endianess convertion, and the paddings.
3992 local_size
= ROUND_UP(local_size
, 8);
3995 le_bitmap
= bitmap_new(nbits
+ BITS_PER_LONG
);
3997 size
= qemu_get_be64(file
);
3999 /* The size of the bitmap should match with our ramblock */
4000 if (size
!= local_size
) {
4001 error_report("%s: ramblock '%s' bitmap size mismatch "
4002 "(0x%"PRIx64
" != 0x%"PRIx64
")", __func__
,
4003 block
->idstr
, size
, local_size
);
4008 size
= qemu_get_buffer(file
, (uint8_t *)le_bitmap
, local_size
);
4009 end_mark
= qemu_get_be64(file
);
4011 ret
= qemu_file_get_error(file
);
4012 if (ret
|| size
!= local_size
) {
4013 error_report("%s: read bitmap failed for ramblock '%s': %d"
4014 " (size 0x%"PRIx64
", got: 0x%"PRIx64
")",
4015 __func__
, block
->idstr
, ret
, local_size
, size
);
4020 if (end_mark
!= RAMBLOCK_RECV_BITMAP_ENDING
) {
4021 error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIu64
,
4022 __func__
, block
->idstr
, end_mark
);
4028 * Endianess convertion. We are during postcopy (though paused).
4029 * The dirty bitmap won't change. We can directly modify it.
4031 bitmap_from_le(block
->bmap
, le_bitmap
, nbits
);
4034 * What we received is "received bitmap". Revert it as the initial
4035 * dirty bitmap for this ramblock.
4037 bitmap_complement(block
->bmap
, block
->bmap
, nbits
);
4039 trace_ram_dirty_bitmap_reload_complete(block
->idstr
);
4042 * We succeeded to sync bitmap for current ramblock. If this is
4043 * the last one to sync, we need to notify the main send thread.
4045 ram_dirty_bitmap_reload_notify(s
);
4053 static int ram_resume_prepare(MigrationState
*s
, void *opaque
)
4055 RAMState
*rs
= *(RAMState
**)opaque
;
4058 ret
= ram_dirty_bitmap_sync_all(s
, rs
);
4063 ram_state_resume_prepare(rs
, s
->to_dst_file
);
4068 static SaveVMHandlers savevm_ram_handlers
= {
4069 .save_setup
= ram_save_setup
,
4070 .save_live_iterate
= ram_save_iterate
,
4071 .save_live_complete_postcopy
= ram_save_complete
,
4072 .save_live_complete_precopy
= ram_save_complete
,
4073 .has_postcopy
= ram_has_postcopy
,
4074 .save_live_pending
= ram_save_pending
,
4075 .load_state
= ram_load
,
4076 .save_cleanup
= ram_save_cleanup
,
4077 .load_setup
= ram_load_setup
,
4078 .load_cleanup
= ram_load_cleanup
,
4079 .resume_prepare
= ram_resume_prepare
,
4082 void ram_mig_init(void)
4084 qemu_mutex_init(&XBZRLE
.lock
);
4085 register_savevm_live(NULL
, "ram", 0, 4, &savevm_ram_handlers
, &ram_state
);