]> git.proxmox.com Git - mirror_qemu.git/blob - migration/ram.c
migration: fix the multifd code when receiving less channels
[mirror_qemu.git] / migration / ram.c
1 /*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2011-2015 Red Hat Inc
6 *
7 * Authors:
8 * Juan Quintela <quintela@redhat.com>
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 */
28
29 #include "qemu/osdep.h"
30 #include "cpu.h"
31 #include <zlib.h>
32 #include "qemu/cutils.h"
33 #include "qemu/bitops.h"
34 #include "qemu/bitmap.h"
35 #include "qemu/main-loop.h"
36 #include "qemu/pmem.h"
37 #include "xbzrle.h"
38 #include "ram.h"
39 #include "migration.h"
40 #include "socket.h"
41 #include "migration/register.h"
42 #include "migration/misc.h"
43 #include "qemu-file.h"
44 #include "postcopy-ram.h"
45 #include "page_cache.h"
46 #include "qemu/error-report.h"
47 #include "qapi/error.h"
48 #include "qapi/qapi-events-migration.h"
49 #include "qapi/qmp/qerror.h"
50 #include "trace.h"
51 #include "exec/ram_addr.h"
52 #include "exec/target_page.h"
53 #include "qemu/rcu_queue.h"
54 #include "migration/colo.h"
55 #include "block.h"
56 #include "sysemu/sysemu.h"
57 #include "qemu/uuid.h"
58 #include "savevm.h"
59 #include "qemu/iov.h"
60
61 /***********************************************************/
62 /* ram save/restore */
63
64 /* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
65 * worked for pages that where filled with the same char. We switched
66 * it to only search for the zero value. And to avoid confusion with
67 * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
68 */
69
70 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
71 #define RAM_SAVE_FLAG_ZERO 0x02
72 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
73 #define RAM_SAVE_FLAG_PAGE 0x08
74 #define RAM_SAVE_FLAG_EOS 0x10
75 #define RAM_SAVE_FLAG_CONTINUE 0x20
76 #define RAM_SAVE_FLAG_XBZRLE 0x40
77 /* 0x80 is reserved in migration.h start with 0x100 next */
78 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
79
80 static inline bool is_zero_range(uint8_t *p, uint64_t size)
81 {
82 return buffer_is_zero(p, size);
83 }
84
85 XBZRLECacheStats xbzrle_counters;
86
87 /* struct contains XBZRLE cache and a static page
88 used by the compression */
89 static struct {
90 /* buffer used for XBZRLE encoding */
91 uint8_t *encoded_buf;
92 /* buffer for storing page content */
93 uint8_t *current_buf;
94 /* Cache for XBZRLE, Protected by lock. */
95 PageCache *cache;
96 QemuMutex lock;
97 /* it will store a page full of zeros */
98 uint8_t *zero_target_page;
99 /* buffer used for XBZRLE decoding */
100 uint8_t *decoded_buf;
101 } XBZRLE;
102
103 static void XBZRLE_cache_lock(void)
104 {
105 if (migrate_use_xbzrle())
106 qemu_mutex_lock(&XBZRLE.lock);
107 }
108
109 static void XBZRLE_cache_unlock(void)
110 {
111 if (migrate_use_xbzrle())
112 qemu_mutex_unlock(&XBZRLE.lock);
113 }
114
115 /**
116 * xbzrle_cache_resize: resize the xbzrle cache
117 *
118 * This function is called from qmp_migrate_set_cache_size in main
119 * thread, possibly while a migration is in progress. A running
120 * migration may be using the cache and might finish during this call,
121 * hence changes to the cache are protected by XBZRLE.lock().
122 *
123 * Returns 0 for success or -1 for error
124 *
125 * @new_size: new cache size
126 * @errp: set *errp if the check failed, with reason
127 */
128 int xbzrle_cache_resize(int64_t new_size, Error **errp)
129 {
130 PageCache *new_cache;
131 int64_t ret = 0;
132
133 /* Check for truncation */
134 if (new_size != (size_t)new_size) {
135 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
136 "exceeding address space");
137 return -1;
138 }
139
140 if (new_size == migrate_xbzrle_cache_size()) {
141 /* nothing to do */
142 return 0;
143 }
144
145 XBZRLE_cache_lock();
146
147 if (XBZRLE.cache != NULL) {
148 new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp);
149 if (!new_cache) {
150 ret = -1;
151 goto out;
152 }
153
154 cache_fini(XBZRLE.cache);
155 XBZRLE.cache = new_cache;
156 }
157 out:
158 XBZRLE_cache_unlock();
159 return ret;
160 }
161
162 /* Should be holding either ram_list.mutex, or the RCU lock. */
163 #define RAMBLOCK_FOREACH_MIGRATABLE(block) \
164 INTERNAL_RAMBLOCK_FOREACH(block) \
165 if (!qemu_ram_is_migratable(block)) {} else
166
167 #undef RAMBLOCK_FOREACH
168
169 static void ramblock_recv_map_init(void)
170 {
171 RAMBlock *rb;
172
173 RAMBLOCK_FOREACH_MIGRATABLE(rb) {
174 assert(!rb->receivedmap);
175 rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
176 }
177 }
178
179 int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr)
180 {
181 return test_bit(ramblock_recv_bitmap_offset(host_addr, rb),
182 rb->receivedmap);
183 }
184
185 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset)
186 {
187 return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap);
188 }
189
190 void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr)
191 {
192 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap);
193 }
194
195 void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
196 size_t nr)
197 {
198 bitmap_set_atomic(rb->receivedmap,
199 ramblock_recv_bitmap_offset(host_addr, rb),
200 nr);
201 }
202
203 #define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
204
205 /*
206 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
207 *
208 * Returns >0 if success with sent bytes, or <0 if error.
209 */
210 int64_t ramblock_recv_bitmap_send(QEMUFile *file,
211 const char *block_name)
212 {
213 RAMBlock *block = qemu_ram_block_by_name(block_name);
214 unsigned long *le_bitmap, nbits;
215 uint64_t size;
216
217 if (!block) {
218 error_report("%s: invalid block name: %s", __func__, block_name);
219 return -1;
220 }
221
222 nbits = block->used_length >> TARGET_PAGE_BITS;
223
224 /*
225 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
226 * machines we may need 4 more bytes for padding (see below
227 * comment). So extend it a bit before hand.
228 */
229 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
230
231 /*
232 * Always use little endian when sending the bitmap. This is
233 * required that when source and destination VMs are not using the
234 * same endianess. (Note: big endian won't work.)
235 */
236 bitmap_to_le(le_bitmap, block->receivedmap, nbits);
237
238 /* Size of the bitmap, in bytes */
239 size = DIV_ROUND_UP(nbits, 8);
240
241 /*
242 * size is always aligned to 8 bytes for 64bit machines, but it
243 * may not be true for 32bit machines. We need this padding to
244 * make sure the migration can survive even between 32bit and
245 * 64bit machines.
246 */
247 size = ROUND_UP(size, 8);
248
249 qemu_put_be64(file, size);
250 qemu_put_buffer(file, (const uint8_t *)le_bitmap, size);
251 /*
252 * Mark as an end, in case the middle part is screwed up due to
253 * some "misterious" reason.
254 */
255 qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING);
256 qemu_fflush(file);
257
258 g_free(le_bitmap);
259
260 if (qemu_file_get_error(file)) {
261 return qemu_file_get_error(file);
262 }
263
264 return size + sizeof(size);
265 }
266
267 /*
268 * An outstanding page request, on the source, having been received
269 * and queued
270 */
271 struct RAMSrcPageRequest {
272 RAMBlock *rb;
273 hwaddr offset;
274 hwaddr len;
275
276 QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
277 };
278
279 /* State of RAM for migration */
280 struct RAMState {
281 /* QEMUFile used for this migration */
282 QEMUFile *f;
283 /* Last block that we have visited searching for dirty pages */
284 RAMBlock *last_seen_block;
285 /* Last block from where we have sent data */
286 RAMBlock *last_sent_block;
287 /* Last dirty target page we have sent */
288 ram_addr_t last_page;
289 /* last ram version we have seen */
290 uint32_t last_version;
291 /* We are in the first round */
292 bool ram_bulk_stage;
293 /* How many times we have dirty too many pages */
294 int dirty_rate_high_cnt;
295 /* these variables are used for bitmap sync */
296 /* last time we did a full bitmap_sync */
297 int64_t time_last_bitmap_sync;
298 /* bytes transferred at start_time */
299 uint64_t bytes_xfer_prev;
300 /* number of dirty pages since start_time */
301 uint64_t num_dirty_pages_period;
302 /* xbzrle misses since the beginning of the period */
303 uint64_t xbzrle_cache_miss_prev;
304
305 /* compression statistics since the beginning of the period */
306 /* amount of count that no free thread to compress data */
307 uint64_t compress_thread_busy_prev;
308 /* amount bytes after compression */
309 uint64_t compressed_size_prev;
310 /* amount of compressed pages */
311 uint64_t compress_pages_prev;
312
313 /* total handled target pages at the beginning of period */
314 uint64_t target_page_count_prev;
315 /* total handled target pages since start */
316 uint64_t target_page_count;
317 /* number of dirty bits in the bitmap */
318 uint64_t migration_dirty_pages;
319 /* protects modification of the bitmap */
320 QemuMutex bitmap_mutex;
321 /* The RAMBlock used in the last src_page_requests */
322 RAMBlock *last_req_rb;
323 /* Queue of outstanding page requests from the destination */
324 QemuMutex src_page_req_mutex;
325 QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests;
326 };
327 typedef struct RAMState RAMState;
328
329 static RAMState *ram_state;
330
331 uint64_t ram_bytes_remaining(void)
332 {
333 return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
334 0;
335 }
336
337 MigrationStats ram_counters;
338
339 /* used by the search for pages to send */
340 struct PageSearchStatus {
341 /* Current block being searched */
342 RAMBlock *block;
343 /* Current page to search from */
344 unsigned long page;
345 /* Set once we wrap around */
346 bool complete_round;
347 };
348 typedef struct PageSearchStatus PageSearchStatus;
349
350 CompressionStats compression_counters;
351
352 struct CompressParam {
353 bool done;
354 bool quit;
355 bool zero_page;
356 QEMUFile *file;
357 QemuMutex mutex;
358 QemuCond cond;
359 RAMBlock *block;
360 ram_addr_t offset;
361
362 /* internally used fields */
363 z_stream stream;
364 uint8_t *originbuf;
365 };
366 typedef struct CompressParam CompressParam;
367
368 struct DecompressParam {
369 bool done;
370 bool quit;
371 QemuMutex mutex;
372 QemuCond cond;
373 void *des;
374 uint8_t *compbuf;
375 int len;
376 z_stream stream;
377 };
378 typedef struct DecompressParam DecompressParam;
379
380 static CompressParam *comp_param;
381 static QemuThread *compress_threads;
382 /* comp_done_cond is used to wake up the migration thread when
383 * one of the compression threads has finished the compression.
384 * comp_done_lock is used to co-work with comp_done_cond.
385 */
386 static QemuMutex comp_done_lock;
387 static QemuCond comp_done_cond;
388 /* The empty QEMUFileOps will be used by file in CompressParam */
389 static const QEMUFileOps empty_ops = { };
390
391 static QEMUFile *decomp_file;
392 static DecompressParam *decomp_param;
393 static QemuThread *decompress_threads;
394 static QemuMutex decomp_done_lock;
395 static QemuCond decomp_done_cond;
396
397 static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
398 ram_addr_t offset, uint8_t *source_buf);
399
400 static void *do_data_compress(void *opaque)
401 {
402 CompressParam *param = opaque;
403 RAMBlock *block;
404 ram_addr_t offset;
405 bool zero_page;
406
407 qemu_mutex_lock(&param->mutex);
408 while (!param->quit) {
409 if (param->block) {
410 block = param->block;
411 offset = param->offset;
412 param->block = NULL;
413 qemu_mutex_unlock(&param->mutex);
414
415 zero_page = do_compress_ram_page(param->file, &param->stream,
416 block, offset, param->originbuf);
417
418 qemu_mutex_lock(&comp_done_lock);
419 param->done = true;
420 param->zero_page = zero_page;
421 qemu_cond_signal(&comp_done_cond);
422 qemu_mutex_unlock(&comp_done_lock);
423
424 qemu_mutex_lock(&param->mutex);
425 } else {
426 qemu_cond_wait(&param->cond, &param->mutex);
427 }
428 }
429 qemu_mutex_unlock(&param->mutex);
430
431 return NULL;
432 }
433
434 static void compress_threads_save_cleanup(void)
435 {
436 int i, thread_count;
437
438 if (!migrate_use_compression() || !comp_param) {
439 return;
440 }
441
442 thread_count = migrate_compress_threads();
443 for (i = 0; i < thread_count; i++) {
444 /*
445 * we use it as a indicator which shows if the thread is
446 * properly init'd or not
447 */
448 if (!comp_param[i].file) {
449 break;
450 }
451
452 qemu_mutex_lock(&comp_param[i].mutex);
453 comp_param[i].quit = true;
454 qemu_cond_signal(&comp_param[i].cond);
455 qemu_mutex_unlock(&comp_param[i].mutex);
456
457 qemu_thread_join(compress_threads + i);
458 qemu_mutex_destroy(&comp_param[i].mutex);
459 qemu_cond_destroy(&comp_param[i].cond);
460 deflateEnd(&comp_param[i].stream);
461 g_free(comp_param[i].originbuf);
462 qemu_fclose(comp_param[i].file);
463 comp_param[i].file = NULL;
464 }
465 qemu_mutex_destroy(&comp_done_lock);
466 qemu_cond_destroy(&comp_done_cond);
467 g_free(compress_threads);
468 g_free(comp_param);
469 compress_threads = NULL;
470 comp_param = NULL;
471 }
472
473 static int compress_threads_save_setup(void)
474 {
475 int i, thread_count;
476
477 if (!migrate_use_compression()) {
478 return 0;
479 }
480 thread_count = migrate_compress_threads();
481 compress_threads = g_new0(QemuThread, thread_count);
482 comp_param = g_new0(CompressParam, thread_count);
483 qemu_cond_init(&comp_done_cond);
484 qemu_mutex_init(&comp_done_lock);
485 for (i = 0; i < thread_count; i++) {
486 comp_param[i].originbuf = g_try_malloc(TARGET_PAGE_SIZE);
487 if (!comp_param[i].originbuf) {
488 goto exit;
489 }
490
491 if (deflateInit(&comp_param[i].stream,
492 migrate_compress_level()) != Z_OK) {
493 g_free(comp_param[i].originbuf);
494 goto exit;
495 }
496
497 /* comp_param[i].file is just used as a dummy buffer to save data,
498 * set its ops to empty.
499 */
500 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
501 comp_param[i].done = true;
502 comp_param[i].quit = false;
503 qemu_mutex_init(&comp_param[i].mutex);
504 qemu_cond_init(&comp_param[i].cond);
505 qemu_thread_create(compress_threads + i, "compress",
506 do_data_compress, comp_param + i,
507 QEMU_THREAD_JOINABLE);
508 }
509 return 0;
510
511 exit:
512 compress_threads_save_cleanup();
513 return -1;
514 }
515
516 /* Multiple fd's */
517
518 #define MULTIFD_MAGIC 0x11223344U
519 #define MULTIFD_VERSION 1
520
521 #define MULTIFD_FLAG_SYNC (1 << 0)
522
523 typedef struct {
524 uint32_t magic;
525 uint32_t version;
526 unsigned char uuid[16]; /* QemuUUID */
527 uint8_t id;
528 } __attribute__((packed)) MultiFDInit_t;
529
530 typedef struct {
531 uint32_t magic;
532 uint32_t version;
533 uint32_t flags;
534 uint32_t size;
535 uint32_t used;
536 uint64_t packet_num;
537 char ramblock[256];
538 uint64_t offset[];
539 } __attribute__((packed)) MultiFDPacket_t;
540
541 typedef struct {
542 /* number of used pages */
543 uint32_t used;
544 /* number of allocated pages */
545 uint32_t allocated;
546 /* global number of generated multifd packets */
547 uint64_t packet_num;
548 /* offset of each page */
549 ram_addr_t *offset;
550 /* pointer to each page */
551 struct iovec *iov;
552 RAMBlock *block;
553 } MultiFDPages_t;
554
555 typedef struct {
556 /* this fields are not changed once the thread is created */
557 /* channel number */
558 uint8_t id;
559 /* channel thread name */
560 char *name;
561 /* channel thread id */
562 QemuThread thread;
563 /* communication channel */
564 QIOChannel *c;
565 /* sem where to wait for more work */
566 QemuSemaphore sem;
567 /* this mutex protects the following parameters */
568 QemuMutex mutex;
569 /* is this channel thread running */
570 bool running;
571 /* should this thread finish */
572 bool quit;
573 /* thread has work to do */
574 int pending_job;
575 /* array of pages to sent */
576 MultiFDPages_t *pages;
577 /* packet allocated len */
578 uint32_t packet_len;
579 /* pointer to the packet */
580 MultiFDPacket_t *packet;
581 /* multifd flags for each packet */
582 uint32_t flags;
583 /* global number of generated multifd packets */
584 uint64_t packet_num;
585 /* thread local variables */
586 /* packets sent through this channel */
587 uint64_t num_packets;
588 /* pages sent through this channel */
589 uint64_t num_pages;
590 /* syncs main thread and channels */
591 QemuSemaphore sem_sync;
592 } MultiFDSendParams;
593
594 typedef struct {
595 /* this fields are not changed once the thread is created */
596 /* channel number */
597 uint8_t id;
598 /* channel thread name */
599 char *name;
600 /* channel thread id */
601 QemuThread thread;
602 /* communication channel */
603 QIOChannel *c;
604 /* this mutex protects the following parameters */
605 QemuMutex mutex;
606 /* is this channel thread running */
607 bool running;
608 /* array of pages to receive */
609 MultiFDPages_t *pages;
610 /* packet allocated len */
611 uint32_t packet_len;
612 /* pointer to the packet */
613 MultiFDPacket_t *packet;
614 /* multifd flags for each packet */
615 uint32_t flags;
616 /* global number of generated multifd packets */
617 uint64_t packet_num;
618 /* thread local variables */
619 /* packets sent through this channel */
620 uint64_t num_packets;
621 /* pages sent through this channel */
622 uint64_t num_pages;
623 /* syncs main thread and channels */
624 QemuSemaphore sem_sync;
625 } MultiFDRecvParams;
626
627 static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp)
628 {
629 MultiFDInit_t msg;
630 int ret;
631
632 msg.magic = cpu_to_be32(MULTIFD_MAGIC);
633 msg.version = cpu_to_be32(MULTIFD_VERSION);
634 msg.id = p->id;
635 memcpy(msg.uuid, &qemu_uuid.data, sizeof(msg.uuid));
636
637 ret = qio_channel_write_all(p->c, (char *)&msg, sizeof(msg), errp);
638 if (ret != 0) {
639 return -1;
640 }
641 return 0;
642 }
643
644 static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
645 {
646 MultiFDInit_t msg;
647 int ret;
648
649 ret = qio_channel_read_all(c, (char *)&msg, sizeof(msg), errp);
650 if (ret != 0) {
651 return -1;
652 }
653
654 msg.magic = be32_to_cpu(msg.magic);
655 msg.version = be32_to_cpu(msg.version);
656
657 if (msg.magic != MULTIFD_MAGIC) {
658 error_setg(errp, "multifd: received packet magic %x "
659 "expected %x", msg.magic, MULTIFD_MAGIC);
660 return -1;
661 }
662
663 if (msg.version != MULTIFD_VERSION) {
664 error_setg(errp, "multifd: received packet version %d "
665 "expected %d", msg.version, MULTIFD_VERSION);
666 return -1;
667 }
668
669 if (memcmp(msg.uuid, &qemu_uuid, sizeof(qemu_uuid))) {
670 char *uuid = qemu_uuid_unparse_strdup(&qemu_uuid);
671 char *msg_uuid = qemu_uuid_unparse_strdup((const QemuUUID *)msg.uuid);
672
673 error_setg(errp, "multifd: received uuid '%s' and expected "
674 "uuid '%s' for channel %hhd", msg_uuid, uuid, msg.id);
675 g_free(uuid);
676 g_free(msg_uuid);
677 return -1;
678 }
679
680 if (msg.id > migrate_multifd_channels()) {
681 error_setg(errp, "multifd: received channel version %d "
682 "expected %d", msg.version, MULTIFD_VERSION);
683 return -1;
684 }
685
686 return msg.id;
687 }
688
689 static MultiFDPages_t *multifd_pages_init(size_t size)
690 {
691 MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
692
693 pages->allocated = size;
694 pages->iov = g_new0(struct iovec, size);
695 pages->offset = g_new0(ram_addr_t, size);
696
697 return pages;
698 }
699
700 static void multifd_pages_clear(MultiFDPages_t *pages)
701 {
702 pages->used = 0;
703 pages->allocated = 0;
704 pages->packet_num = 0;
705 pages->block = NULL;
706 g_free(pages->iov);
707 pages->iov = NULL;
708 g_free(pages->offset);
709 pages->offset = NULL;
710 g_free(pages);
711 }
712
713 static void multifd_send_fill_packet(MultiFDSendParams *p)
714 {
715 MultiFDPacket_t *packet = p->packet;
716 int i;
717
718 packet->magic = cpu_to_be32(MULTIFD_MAGIC);
719 packet->version = cpu_to_be32(MULTIFD_VERSION);
720 packet->flags = cpu_to_be32(p->flags);
721 packet->size = cpu_to_be32(migrate_multifd_page_count());
722 packet->used = cpu_to_be32(p->pages->used);
723 packet->packet_num = cpu_to_be64(p->packet_num);
724
725 if (p->pages->block) {
726 strncpy(packet->ramblock, p->pages->block->idstr, 256);
727 }
728
729 for (i = 0; i < p->pages->used; i++) {
730 packet->offset[i] = cpu_to_be64(p->pages->offset[i]);
731 }
732 }
733
734 static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
735 {
736 MultiFDPacket_t *packet = p->packet;
737 RAMBlock *block;
738 int i;
739
740 packet->magic = be32_to_cpu(packet->magic);
741 if (packet->magic != MULTIFD_MAGIC) {
742 error_setg(errp, "multifd: received packet "
743 "magic %x and expected magic %x",
744 packet->magic, MULTIFD_MAGIC);
745 return -1;
746 }
747
748 packet->version = be32_to_cpu(packet->version);
749 if (packet->version != MULTIFD_VERSION) {
750 error_setg(errp, "multifd: received packet "
751 "version %d and expected version %d",
752 packet->version, MULTIFD_VERSION);
753 return -1;
754 }
755
756 p->flags = be32_to_cpu(packet->flags);
757
758 packet->size = be32_to_cpu(packet->size);
759 if (packet->size > migrate_multifd_page_count()) {
760 error_setg(errp, "multifd: received packet "
761 "with size %d and expected maximum size %d",
762 packet->size, migrate_multifd_page_count()) ;
763 return -1;
764 }
765
766 p->pages->used = be32_to_cpu(packet->used);
767 if (p->pages->used > packet->size) {
768 error_setg(errp, "multifd: received packet "
769 "with size %d and expected maximum size %d",
770 p->pages->used, packet->size) ;
771 return -1;
772 }
773
774 p->packet_num = be64_to_cpu(packet->packet_num);
775
776 if (p->pages->used) {
777 /* make sure that ramblock is 0 terminated */
778 packet->ramblock[255] = 0;
779 block = qemu_ram_block_by_name(packet->ramblock);
780 if (!block) {
781 error_setg(errp, "multifd: unknown ram block %s",
782 packet->ramblock);
783 return -1;
784 }
785 }
786
787 for (i = 0; i < p->pages->used; i++) {
788 ram_addr_t offset = be64_to_cpu(packet->offset[i]);
789
790 if (offset > (block->used_length - TARGET_PAGE_SIZE)) {
791 error_setg(errp, "multifd: offset too long " RAM_ADDR_FMT
792 " (max " RAM_ADDR_FMT ")",
793 offset, block->max_length);
794 return -1;
795 }
796 p->pages->iov[i].iov_base = block->host + offset;
797 p->pages->iov[i].iov_len = TARGET_PAGE_SIZE;
798 }
799
800 return 0;
801 }
802
803 struct {
804 MultiFDSendParams *params;
805 /* number of created threads */
806 int count;
807 /* array of pages to sent */
808 MultiFDPages_t *pages;
809 /* syncs main thread and channels */
810 QemuSemaphore sem_sync;
811 /* global number of generated multifd packets */
812 uint64_t packet_num;
813 /* send channels ready */
814 QemuSemaphore channels_ready;
815 } *multifd_send_state;
816
817 /*
818 * How we use multifd_send_state->pages and channel->pages?
819 *
820 * We create a pages for each channel, and a main one. Each time that
821 * we need to send a batch of pages we interchange the ones between
822 * multifd_send_state and the channel that is sending it. There are
823 * two reasons for that:
824 * - to not have to do so many mallocs during migration
825 * - to make easier to know what to free at the end of migration
826 *
827 * This way we always know who is the owner of each "pages" struct,
828 * and we don't need any loocking. It belongs to the migration thread
829 * or to the channel thread. Switching is safe because the migration
830 * thread is using the channel mutex when changing it, and the channel
831 * have to had finish with its own, otherwise pending_job can't be
832 * false.
833 */
834
835 static void multifd_send_pages(void)
836 {
837 int i;
838 static int next_channel;
839 MultiFDSendParams *p = NULL; /* make happy gcc */
840 MultiFDPages_t *pages = multifd_send_state->pages;
841 uint64_t transferred;
842
843 qemu_sem_wait(&multifd_send_state->channels_ready);
844 for (i = next_channel;; i = (i + 1) % migrate_multifd_channels()) {
845 p = &multifd_send_state->params[i];
846
847 qemu_mutex_lock(&p->mutex);
848 if (!p->pending_job) {
849 p->pending_job++;
850 next_channel = (i + 1) % migrate_multifd_channels();
851 break;
852 }
853 qemu_mutex_unlock(&p->mutex);
854 }
855 p->pages->used = 0;
856
857 p->packet_num = multifd_send_state->packet_num++;
858 p->pages->block = NULL;
859 multifd_send_state->pages = p->pages;
860 p->pages = pages;
861 transferred = ((uint64_t) pages->used) * TARGET_PAGE_SIZE + p->packet_len;
862 ram_counters.multifd_bytes += transferred;
863 ram_counters.transferred += transferred;;
864 qemu_mutex_unlock(&p->mutex);
865 qemu_sem_post(&p->sem);
866 }
867
868 static void multifd_queue_page(RAMBlock *block, ram_addr_t offset)
869 {
870 MultiFDPages_t *pages = multifd_send_state->pages;
871
872 if (!pages->block) {
873 pages->block = block;
874 }
875
876 if (pages->block == block) {
877 pages->offset[pages->used] = offset;
878 pages->iov[pages->used].iov_base = block->host + offset;
879 pages->iov[pages->used].iov_len = TARGET_PAGE_SIZE;
880 pages->used++;
881
882 if (pages->used < pages->allocated) {
883 return;
884 }
885 }
886
887 multifd_send_pages();
888
889 if (pages->block != block) {
890 multifd_queue_page(block, offset);
891 }
892 }
893
894 static void multifd_send_terminate_threads(Error *err)
895 {
896 int i;
897
898 if (err) {
899 MigrationState *s = migrate_get_current();
900 migrate_set_error(s, err);
901 if (s->state == MIGRATION_STATUS_SETUP ||
902 s->state == MIGRATION_STATUS_PRE_SWITCHOVER ||
903 s->state == MIGRATION_STATUS_DEVICE ||
904 s->state == MIGRATION_STATUS_ACTIVE) {
905 migrate_set_state(&s->state, s->state,
906 MIGRATION_STATUS_FAILED);
907 }
908 }
909
910 for (i = 0; i < migrate_multifd_channels(); i++) {
911 MultiFDSendParams *p = &multifd_send_state->params[i];
912
913 qemu_mutex_lock(&p->mutex);
914 p->quit = true;
915 qemu_sem_post(&p->sem);
916 qemu_mutex_unlock(&p->mutex);
917 }
918 }
919
920 int multifd_save_cleanup(Error **errp)
921 {
922 int i;
923 int ret = 0;
924
925 if (!migrate_use_multifd()) {
926 return 0;
927 }
928 multifd_send_terminate_threads(NULL);
929 for (i = 0; i < migrate_multifd_channels(); i++) {
930 MultiFDSendParams *p = &multifd_send_state->params[i];
931
932 if (p->running) {
933 qemu_thread_join(&p->thread);
934 }
935 socket_send_channel_destroy(p->c);
936 p->c = NULL;
937 qemu_mutex_destroy(&p->mutex);
938 qemu_sem_destroy(&p->sem);
939 qemu_sem_destroy(&p->sem_sync);
940 g_free(p->name);
941 p->name = NULL;
942 multifd_pages_clear(p->pages);
943 p->pages = NULL;
944 p->packet_len = 0;
945 g_free(p->packet);
946 p->packet = NULL;
947 }
948 qemu_sem_destroy(&multifd_send_state->channels_ready);
949 qemu_sem_destroy(&multifd_send_state->sem_sync);
950 g_free(multifd_send_state->params);
951 multifd_send_state->params = NULL;
952 multifd_pages_clear(multifd_send_state->pages);
953 multifd_send_state->pages = NULL;
954 g_free(multifd_send_state);
955 multifd_send_state = NULL;
956 return ret;
957 }
958
959 static void multifd_send_sync_main(void)
960 {
961 int i;
962
963 if (!migrate_use_multifd()) {
964 return;
965 }
966 if (multifd_send_state->pages->used) {
967 multifd_send_pages();
968 }
969 for (i = 0; i < migrate_multifd_channels(); i++) {
970 MultiFDSendParams *p = &multifd_send_state->params[i];
971
972 trace_multifd_send_sync_main_signal(p->id);
973
974 qemu_mutex_lock(&p->mutex);
975
976 p->packet_num = multifd_send_state->packet_num++;
977 p->flags |= MULTIFD_FLAG_SYNC;
978 p->pending_job++;
979 qemu_mutex_unlock(&p->mutex);
980 qemu_sem_post(&p->sem);
981 }
982 for (i = 0; i < migrate_multifd_channels(); i++) {
983 MultiFDSendParams *p = &multifd_send_state->params[i];
984
985 trace_multifd_send_sync_main_wait(p->id);
986 qemu_sem_wait(&multifd_send_state->sem_sync);
987 }
988 trace_multifd_send_sync_main(multifd_send_state->packet_num);
989 }
990
991 static void *multifd_send_thread(void *opaque)
992 {
993 MultiFDSendParams *p = opaque;
994 Error *local_err = NULL;
995 int ret;
996
997 trace_multifd_send_thread_start(p->id);
998 rcu_register_thread();
999
1000 if (multifd_send_initial_packet(p, &local_err) < 0) {
1001 goto out;
1002 }
1003 /* initial packet */
1004 p->num_packets = 1;
1005
1006 while (true) {
1007 qemu_sem_wait(&p->sem);
1008 qemu_mutex_lock(&p->mutex);
1009
1010 if (p->pending_job) {
1011 uint32_t used = p->pages->used;
1012 uint64_t packet_num = p->packet_num;
1013 uint32_t flags = p->flags;
1014
1015 multifd_send_fill_packet(p);
1016 p->flags = 0;
1017 p->num_packets++;
1018 p->num_pages += used;
1019 p->pages->used = 0;
1020 qemu_mutex_unlock(&p->mutex);
1021
1022 trace_multifd_send(p->id, packet_num, used, flags);
1023
1024 ret = qio_channel_write_all(p->c, (void *)p->packet,
1025 p->packet_len, &local_err);
1026 if (ret != 0) {
1027 break;
1028 }
1029
1030 ret = qio_channel_writev_all(p->c, p->pages->iov, used, &local_err);
1031 if (ret != 0) {
1032 break;
1033 }
1034
1035 qemu_mutex_lock(&p->mutex);
1036 p->pending_job--;
1037 qemu_mutex_unlock(&p->mutex);
1038
1039 if (flags & MULTIFD_FLAG_SYNC) {
1040 qemu_sem_post(&multifd_send_state->sem_sync);
1041 }
1042 qemu_sem_post(&multifd_send_state->channels_ready);
1043 } else if (p->quit) {
1044 qemu_mutex_unlock(&p->mutex);
1045 break;
1046 } else {
1047 qemu_mutex_unlock(&p->mutex);
1048 /* sometimes there are spurious wakeups */
1049 }
1050 }
1051
1052 out:
1053 if (local_err) {
1054 multifd_send_terminate_threads(local_err);
1055 }
1056
1057 qemu_mutex_lock(&p->mutex);
1058 p->running = false;
1059 qemu_mutex_unlock(&p->mutex);
1060
1061 rcu_unregister_thread();
1062 trace_multifd_send_thread_end(p->id, p->num_packets, p->num_pages);
1063
1064 return NULL;
1065 }
1066
1067 static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
1068 {
1069 MultiFDSendParams *p = opaque;
1070 QIOChannel *sioc = QIO_CHANNEL(qio_task_get_source(task));
1071 Error *local_err = NULL;
1072
1073 if (qio_task_propagate_error(task, &local_err)) {
1074 if (multifd_save_cleanup(&local_err) != 0) {
1075 migrate_set_error(migrate_get_current(), local_err);
1076 }
1077 } else {
1078 p->c = QIO_CHANNEL(sioc);
1079 qio_channel_set_delay(p->c, false);
1080 p->running = true;
1081 qemu_thread_create(&p->thread, p->name, multifd_send_thread, p,
1082 QEMU_THREAD_JOINABLE);
1083
1084 atomic_inc(&multifd_send_state->count);
1085 }
1086 }
1087
1088 int multifd_save_setup(void)
1089 {
1090 int thread_count;
1091 uint32_t page_count = migrate_multifd_page_count();
1092 uint8_t i;
1093
1094 if (!migrate_use_multifd()) {
1095 return 0;
1096 }
1097 thread_count = migrate_multifd_channels();
1098 multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
1099 multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
1100 atomic_set(&multifd_send_state->count, 0);
1101 multifd_send_state->pages = multifd_pages_init(page_count);
1102 qemu_sem_init(&multifd_send_state->sem_sync, 0);
1103 qemu_sem_init(&multifd_send_state->channels_ready, 0);
1104
1105 for (i = 0; i < thread_count; i++) {
1106 MultiFDSendParams *p = &multifd_send_state->params[i];
1107
1108 qemu_mutex_init(&p->mutex);
1109 qemu_sem_init(&p->sem, 0);
1110 qemu_sem_init(&p->sem_sync, 0);
1111 p->quit = false;
1112 p->pending_job = 0;
1113 p->id = i;
1114 p->pages = multifd_pages_init(page_count);
1115 p->packet_len = sizeof(MultiFDPacket_t)
1116 + sizeof(ram_addr_t) * page_count;
1117 p->packet = g_malloc0(p->packet_len);
1118 p->name = g_strdup_printf("multifdsend_%d", i);
1119 socket_send_channel_create(multifd_new_send_channel_async, p);
1120 }
1121 return 0;
1122 }
1123
1124 struct {
1125 MultiFDRecvParams *params;
1126 /* number of created threads */
1127 int count;
1128 /* syncs main thread and channels */
1129 QemuSemaphore sem_sync;
1130 /* global number of generated multifd packets */
1131 uint64_t packet_num;
1132 } *multifd_recv_state;
1133
1134 static void multifd_recv_terminate_threads(Error *err)
1135 {
1136 int i;
1137
1138 if (err) {
1139 MigrationState *s = migrate_get_current();
1140 migrate_set_error(s, err);
1141 if (s->state == MIGRATION_STATUS_SETUP ||
1142 s->state == MIGRATION_STATUS_ACTIVE) {
1143 migrate_set_state(&s->state, s->state,
1144 MIGRATION_STATUS_FAILED);
1145 }
1146 }
1147
1148 for (i = 0; i < migrate_multifd_channels(); i++) {
1149 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1150
1151 qemu_mutex_lock(&p->mutex);
1152 /* We could arrive here for two reasons:
1153 - normal quit, i.e. everything went fine, just finished
1154 - error quit: We close the channels so the channel threads
1155 finish the qio_channel_read_all_eof() */
1156 qio_channel_shutdown(p->c, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
1157 qemu_mutex_unlock(&p->mutex);
1158 }
1159 }
1160
1161 int multifd_load_cleanup(Error **errp)
1162 {
1163 int i;
1164 int ret = 0;
1165
1166 if (!migrate_use_multifd()) {
1167 return 0;
1168 }
1169 multifd_recv_terminate_threads(NULL);
1170 for (i = 0; i < migrate_multifd_channels(); i++) {
1171 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1172
1173 if (p->running) {
1174 qemu_thread_join(&p->thread);
1175 }
1176 object_unref(OBJECT(p->c));
1177 p->c = NULL;
1178 qemu_mutex_destroy(&p->mutex);
1179 qemu_sem_destroy(&p->sem_sync);
1180 g_free(p->name);
1181 p->name = NULL;
1182 multifd_pages_clear(p->pages);
1183 p->pages = NULL;
1184 p->packet_len = 0;
1185 g_free(p->packet);
1186 p->packet = NULL;
1187 }
1188 qemu_sem_destroy(&multifd_recv_state->sem_sync);
1189 g_free(multifd_recv_state->params);
1190 multifd_recv_state->params = NULL;
1191 g_free(multifd_recv_state);
1192 multifd_recv_state = NULL;
1193
1194 return ret;
1195 }
1196
1197 static void multifd_recv_sync_main(void)
1198 {
1199 int i;
1200
1201 if (!migrate_use_multifd()) {
1202 return;
1203 }
1204 for (i = 0; i < migrate_multifd_channels(); i++) {
1205 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1206
1207 trace_multifd_recv_sync_main_wait(p->id);
1208 qemu_sem_wait(&multifd_recv_state->sem_sync);
1209 qemu_mutex_lock(&p->mutex);
1210 if (multifd_recv_state->packet_num < p->packet_num) {
1211 multifd_recv_state->packet_num = p->packet_num;
1212 }
1213 qemu_mutex_unlock(&p->mutex);
1214 }
1215 for (i = 0; i < migrate_multifd_channels(); i++) {
1216 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1217
1218 trace_multifd_recv_sync_main_signal(p->id);
1219 qemu_sem_post(&p->sem_sync);
1220 }
1221 trace_multifd_recv_sync_main(multifd_recv_state->packet_num);
1222 }
1223
1224 static void *multifd_recv_thread(void *opaque)
1225 {
1226 MultiFDRecvParams *p = opaque;
1227 Error *local_err = NULL;
1228 int ret;
1229
1230 trace_multifd_recv_thread_start(p->id);
1231 rcu_register_thread();
1232
1233 while (true) {
1234 uint32_t used;
1235 uint32_t flags;
1236
1237 ret = qio_channel_read_all_eof(p->c, (void *)p->packet,
1238 p->packet_len, &local_err);
1239 if (ret == 0) { /* EOF */
1240 break;
1241 }
1242 if (ret == -1) { /* Error */
1243 break;
1244 }
1245
1246 qemu_mutex_lock(&p->mutex);
1247 ret = multifd_recv_unfill_packet(p, &local_err);
1248 if (ret) {
1249 qemu_mutex_unlock(&p->mutex);
1250 break;
1251 }
1252
1253 used = p->pages->used;
1254 flags = p->flags;
1255 trace_multifd_recv(p->id, p->packet_num, used, flags);
1256 p->num_packets++;
1257 p->num_pages += used;
1258 qemu_mutex_unlock(&p->mutex);
1259
1260 ret = qio_channel_readv_all(p->c, p->pages->iov, used, &local_err);
1261 if (ret != 0) {
1262 break;
1263 }
1264
1265 if (flags & MULTIFD_FLAG_SYNC) {
1266 qemu_sem_post(&multifd_recv_state->sem_sync);
1267 qemu_sem_wait(&p->sem_sync);
1268 }
1269 }
1270
1271 if (local_err) {
1272 multifd_recv_terminate_threads(local_err);
1273 }
1274 qemu_mutex_lock(&p->mutex);
1275 p->running = false;
1276 qemu_mutex_unlock(&p->mutex);
1277
1278 rcu_unregister_thread();
1279 trace_multifd_recv_thread_end(p->id, p->num_packets, p->num_pages);
1280
1281 return NULL;
1282 }
1283
1284 int multifd_load_setup(void)
1285 {
1286 int thread_count;
1287 uint32_t page_count = migrate_multifd_page_count();
1288 uint8_t i;
1289
1290 if (!migrate_use_multifd()) {
1291 return 0;
1292 }
1293 thread_count = migrate_multifd_channels();
1294 multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));
1295 multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);
1296 atomic_set(&multifd_recv_state->count, 0);
1297 qemu_sem_init(&multifd_recv_state->sem_sync, 0);
1298
1299 for (i = 0; i < thread_count; i++) {
1300 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1301
1302 qemu_mutex_init(&p->mutex);
1303 qemu_sem_init(&p->sem_sync, 0);
1304 p->id = i;
1305 p->pages = multifd_pages_init(page_count);
1306 p->packet_len = sizeof(MultiFDPacket_t)
1307 + sizeof(ram_addr_t) * page_count;
1308 p->packet = g_malloc0(p->packet_len);
1309 p->name = g_strdup_printf("multifdrecv_%d", i);
1310 }
1311 return 0;
1312 }
1313
1314 bool multifd_recv_all_channels_created(void)
1315 {
1316 int thread_count = migrate_multifd_channels();
1317
1318 if (!migrate_use_multifd()) {
1319 return true;
1320 }
1321
1322 return thread_count == atomic_read(&multifd_recv_state->count);
1323 }
1324
1325 /*
1326 * Try to receive all multifd channels to get ready for the migration.
1327 * - Return true and do not set @errp when correctly receving all channels;
1328 * - Return false and do not set @errp when correctly receiving the current one;
1329 * - Return false and set @errp when failing to receive the current channel.
1330 */
1331 bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp)
1332 {
1333 MultiFDRecvParams *p;
1334 Error *local_err = NULL;
1335 int id;
1336
1337 id = multifd_recv_initial_packet(ioc, &local_err);
1338 if (id < 0) {
1339 multifd_recv_terminate_threads(local_err);
1340 error_propagate_prepend(errp, local_err,
1341 "failed to receive packet"
1342 " via multifd channel %d: ",
1343 atomic_read(&multifd_recv_state->count));
1344 return false;
1345 }
1346
1347 p = &multifd_recv_state->params[id];
1348 if (p->c != NULL) {
1349 error_setg(&local_err, "multifd: received id '%d' already setup'",
1350 id);
1351 multifd_recv_terminate_threads(local_err);
1352 error_propagate(errp, local_err);
1353 return false;
1354 }
1355 p->c = ioc;
1356 object_ref(OBJECT(ioc));
1357 /* initial packet */
1358 p->num_packets = 1;
1359
1360 p->running = true;
1361 qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,
1362 QEMU_THREAD_JOINABLE);
1363 atomic_inc(&multifd_recv_state->count);
1364 return atomic_read(&multifd_recv_state->count) ==
1365 migrate_multifd_channels();
1366 }
1367
1368 /**
1369 * save_page_header: write page header to wire
1370 *
1371 * If this is the 1st block, it also writes the block identification
1372 *
1373 * Returns the number of bytes written
1374 *
1375 * @f: QEMUFile where to send the data
1376 * @block: block that contains the page we want to send
1377 * @offset: offset inside the block for the page
1378 * in the lower bits, it contains flags
1379 */
1380 static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
1381 ram_addr_t offset)
1382 {
1383 size_t size, len;
1384
1385 if (block == rs->last_sent_block) {
1386 offset |= RAM_SAVE_FLAG_CONTINUE;
1387 }
1388 qemu_put_be64(f, offset);
1389 size = 8;
1390
1391 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
1392 len = strlen(block->idstr);
1393 qemu_put_byte(f, len);
1394 qemu_put_buffer(f, (uint8_t *)block->idstr, len);
1395 size += 1 + len;
1396 rs->last_sent_block = block;
1397 }
1398 return size;
1399 }
1400
1401 /**
1402 * mig_throttle_guest_down: throotle down the guest
1403 *
1404 * Reduce amount of guest cpu execution to hopefully slow down memory
1405 * writes. If guest dirty memory rate is reduced below the rate at
1406 * which we can transfer pages to the destination then we should be
1407 * able to complete migration. Some workloads dirty memory way too
1408 * fast and will not effectively converge, even with auto-converge.
1409 */
1410 static void mig_throttle_guest_down(void)
1411 {
1412 MigrationState *s = migrate_get_current();
1413 uint64_t pct_initial = s->parameters.cpu_throttle_initial;
1414 uint64_t pct_icrement = s->parameters.cpu_throttle_increment;
1415 int pct_max = s->parameters.max_cpu_throttle;
1416
1417 /* We have not started throttling yet. Let's start it. */
1418 if (!cpu_throttle_active()) {
1419 cpu_throttle_set(pct_initial);
1420 } else {
1421 /* Throttling already on, just increase the rate */
1422 cpu_throttle_set(MIN(cpu_throttle_get_percentage() + pct_icrement,
1423 pct_max));
1424 }
1425 }
1426
1427 /**
1428 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
1429 *
1430 * @rs: current RAM state
1431 * @current_addr: address for the zero page
1432 *
1433 * Update the xbzrle cache to reflect a page that's been sent as all 0.
1434 * The important thing is that a stale (not-yet-0'd) page be replaced
1435 * by the new data.
1436 * As a bonus, if the page wasn't in the cache it gets added so that
1437 * when a small write is made into the 0'd page it gets XBZRLE sent.
1438 */
1439 static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
1440 {
1441 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
1442 return;
1443 }
1444
1445 /* We don't care if this fails to allocate a new cache page
1446 * as long as it updated an old one */
1447 cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
1448 ram_counters.dirty_sync_count);
1449 }
1450
1451 #define ENCODING_FLAG_XBZRLE 0x1
1452
1453 /**
1454 * save_xbzrle_page: compress and send current page
1455 *
1456 * Returns: 1 means that we wrote the page
1457 * 0 means that page is identical to the one already sent
1458 * -1 means that xbzrle would be longer than normal
1459 *
1460 * @rs: current RAM state
1461 * @current_data: pointer to the address of the page contents
1462 * @current_addr: addr of the page
1463 * @block: block that contains the page we want to send
1464 * @offset: offset inside the block for the page
1465 * @last_stage: if we are at the completion stage
1466 */
1467 static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
1468 ram_addr_t current_addr, RAMBlock *block,
1469 ram_addr_t offset, bool last_stage)
1470 {
1471 int encoded_len = 0, bytes_xbzrle;
1472 uint8_t *prev_cached_page;
1473
1474 if (!cache_is_cached(XBZRLE.cache, current_addr,
1475 ram_counters.dirty_sync_count)) {
1476 xbzrle_counters.cache_miss++;
1477 if (!last_stage) {
1478 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
1479 ram_counters.dirty_sync_count) == -1) {
1480 return -1;
1481 } else {
1482 /* update *current_data when the page has been
1483 inserted into cache */
1484 *current_data = get_cached_data(XBZRLE.cache, current_addr);
1485 }
1486 }
1487 return -1;
1488 }
1489
1490 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
1491
1492 /* save current buffer into memory */
1493 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
1494
1495 /* XBZRLE encoding (if there is no overflow) */
1496 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
1497 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
1498 TARGET_PAGE_SIZE);
1499 if (encoded_len == 0) {
1500 trace_save_xbzrle_page_skipping();
1501 return 0;
1502 } else if (encoded_len == -1) {
1503 trace_save_xbzrle_page_overflow();
1504 xbzrle_counters.overflow++;
1505 /* update data in the cache */
1506 if (!last_stage) {
1507 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
1508 *current_data = prev_cached_page;
1509 }
1510 return -1;
1511 }
1512
1513 /* we need to update the data in the cache, in order to get the same data */
1514 if (!last_stage) {
1515 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
1516 }
1517
1518 /* Send XBZRLE based compressed page */
1519 bytes_xbzrle = save_page_header(rs, rs->f, block,
1520 offset | RAM_SAVE_FLAG_XBZRLE);
1521 qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
1522 qemu_put_be16(rs->f, encoded_len);
1523 qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
1524 bytes_xbzrle += encoded_len + 1 + 2;
1525 xbzrle_counters.pages++;
1526 xbzrle_counters.bytes += bytes_xbzrle;
1527 ram_counters.transferred += bytes_xbzrle;
1528
1529 return 1;
1530 }
1531
1532 /**
1533 * migration_bitmap_find_dirty: find the next dirty page from start
1534 *
1535 * Called with rcu_read_lock() to protect migration_bitmap
1536 *
1537 * Returns the byte offset within memory region of the start of a dirty page
1538 *
1539 * @rs: current RAM state
1540 * @rb: RAMBlock where to search for dirty pages
1541 * @start: page where we start the search
1542 */
1543 static inline
1544 unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
1545 unsigned long start)
1546 {
1547 unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
1548 unsigned long *bitmap = rb->bmap;
1549 unsigned long next;
1550
1551 if (!qemu_ram_is_migratable(rb)) {
1552 return size;
1553 }
1554
1555 if (rs->ram_bulk_stage && start > 0) {
1556 next = start + 1;
1557 } else {
1558 next = find_next_bit(bitmap, size, start);
1559 }
1560
1561 return next;
1562 }
1563
1564 static inline bool migration_bitmap_clear_dirty(RAMState *rs,
1565 RAMBlock *rb,
1566 unsigned long page)
1567 {
1568 bool ret;
1569
1570 ret = test_and_clear_bit(page, rb->bmap);
1571
1572 if (ret) {
1573 rs->migration_dirty_pages--;
1574 }
1575 return ret;
1576 }
1577
1578 static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
1579 ram_addr_t start, ram_addr_t length)
1580 {
1581 rs->migration_dirty_pages +=
1582 cpu_physical_memory_sync_dirty_bitmap(rb, start, length,
1583 &rs->num_dirty_pages_period);
1584 }
1585
1586 /**
1587 * ram_pagesize_summary: calculate all the pagesizes of a VM
1588 *
1589 * Returns a summary bitmap of the page sizes of all RAMBlocks
1590 *
1591 * For VMs with just normal pages this is equivalent to the host page
1592 * size. If it's got some huge pages then it's the OR of all the
1593 * different page sizes.
1594 */
1595 uint64_t ram_pagesize_summary(void)
1596 {
1597 RAMBlock *block;
1598 uint64_t summary = 0;
1599
1600 RAMBLOCK_FOREACH_MIGRATABLE(block) {
1601 summary |= block->page_size;
1602 }
1603
1604 return summary;
1605 }
1606
1607 static void migration_update_rates(RAMState *rs, int64_t end_time)
1608 {
1609 uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
1610 double compressed_size;
1611
1612 /* calculate period counters */
1613 ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
1614 / (end_time - rs->time_last_bitmap_sync);
1615
1616 if (!page_count) {
1617 return;
1618 }
1619
1620 if (migrate_use_xbzrle()) {
1621 xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
1622 rs->xbzrle_cache_miss_prev) / page_count;
1623 rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
1624 }
1625
1626 if (migrate_use_compression()) {
1627 compression_counters.busy_rate = (double)(compression_counters.busy -
1628 rs->compress_thread_busy_prev) / page_count;
1629 rs->compress_thread_busy_prev = compression_counters.busy;
1630
1631 compressed_size = compression_counters.compressed_size -
1632 rs->compressed_size_prev;
1633 if (compressed_size) {
1634 double uncompressed_size = (compression_counters.pages -
1635 rs->compress_pages_prev) * TARGET_PAGE_SIZE;
1636
1637 /* Compression-Ratio = Uncompressed-size / Compressed-size */
1638 compression_counters.compression_rate =
1639 uncompressed_size / compressed_size;
1640
1641 rs->compress_pages_prev = compression_counters.pages;
1642 rs->compressed_size_prev = compression_counters.compressed_size;
1643 }
1644 }
1645 }
1646
1647 static void migration_bitmap_sync(RAMState *rs)
1648 {
1649 RAMBlock *block;
1650 int64_t end_time;
1651 uint64_t bytes_xfer_now;
1652
1653 ram_counters.dirty_sync_count++;
1654
1655 if (!rs->time_last_bitmap_sync) {
1656 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1657 }
1658
1659 trace_migration_bitmap_sync_start();
1660 memory_global_dirty_log_sync();
1661
1662 qemu_mutex_lock(&rs->bitmap_mutex);
1663 rcu_read_lock();
1664 RAMBLOCK_FOREACH_MIGRATABLE(block) {
1665 migration_bitmap_sync_range(rs, block, 0, block->used_length);
1666 }
1667 ram_counters.remaining = ram_bytes_remaining();
1668 rcu_read_unlock();
1669 qemu_mutex_unlock(&rs->bitmap_mutex);
1670
1671 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
1672
1673 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1674
1675 /* more than 1 second = 1000 millisecons */
1676 if (end_time > rs->time_last_bitmap_sync + 1000) {
1677 bytes_xfer_now = ram_counters.transferred;
1678
1679 /* During block migration the auto-converge logic incorrectly detects
1680 * that ram migration makes no progress. Avoid this by disabling the
1681 * throttling logic during the bulk phase of block migration. */
1682 if (migrate_auto_converge() && !blk_mig_bulk_active()) {
1683 /* The following detection logic can be refined later. For now:
1684 Check to see if the dirtied bytes is 50% more than the approx.
1685 amount of bytes that just got transferred since the last time we
1686 were in this routine. If that happens twice, start or increase
1687 throttling */
1688
1689 if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
1690 (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
1691 (++rs->dirty_rate_high_cnt >= 2)) {
1692 trace_migration_throttle();
1693 rs->dirty_rate_high_cnt = 0;
1694 mig_throttle_guest_down();
1695 }
1696 }
1697
1698 migration_update_rates(rs, end_time);
1699
1700 rs->target_page_count_prev = rs->target_page_count;
1701
1702 /* reset period counters */
1703 rs->time_last_bitmap_sync = end_time;
1704 rs->num_dirty_pages_period = 0;
1705 rs->bytes_xfer_prev = bytes_xfer_now;
1706 }
1707 if (migrate_use_events()) {
1708 qapi_event_send_migration_pass(ram_counters.dirty_sync_count);
1709 }
1710 }
1711
1712 /**
1713 * save_zero_page_to_file: send the zero page to the file
1714 *
1715 * Returns the size of data written to the file, 0 means the page is not
1716 * a zero page
1717 *
1718 * @rs: current RAM state
1719 * @file: the file where the data is saved
1720 * @block: block that contains the page we want to send
1721 * @offset: offset inside the block for the page
1722 */
1723 static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
1724 RAMBlock *block, ram_addr_t offset)
1725 {
1726 uint8_t *p = block->host + offset;
1727 int len = 0;
1728
1729 if (is_zero_range(p, TARGET_PAGE_SIZE)) {
1730 len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO);
1731 qemu_put_byte(file, 0);
1732 len += 1;
1733 }
1734 return len;
1735 }
1736
1737 /**
1738 * save_zero_page: send the zero page to the stream
1739 *
1740 * Returns the number of pages written.
1741 *
1742 * @rs: current RAM state
1743 * @block: block that contains the page we want to send
1744 * @offset: offset inside the block for the page
1745 */
1746 static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
1747 {
1748 int len = save_zero_page_to_file(rs, rs->f, block, offset);
1749
1750 if (len) {
1751 ram_counters.duplicate++;
1752 ram_counters.transferred += len;
1753 return 1;
1754 }
1755 return -1;
1756 }
1757
1758 static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
1759 {
1760 if (!migrate_release_ram() || !migration_in_postcopy()) {
1761 return;
1762 }
1763
1764 ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS);
1765 }
1766
1767 /*
1768 * @pages: the number of pages written by the control path,
1769 * < 0 - error
1770 * > 0 - number of pages written
1771 *
1772 * Return true if the pages has been saved, otherwise false is returned.
1773 */
1774 static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1775 int *pages)
1776 {
1777 uint64_t bytes_xmit = 0;
1778 int ret;
1779
1780 *pages = -1;
1781 ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE,
1782 &bytes_xmit);
1783 if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
1784 return false;
1785 }
1786
1787 if (bytes_xmit) {
1788 ram_counters.transferred += bytes_xmit;
1789 *pages = 1;
1790 }
1791
1792 if (ret == RAM_SAVE_CONTROL_DELAYED) {
1793 return true;
1794 }
1795
1796 if (bytes_xmit > 0) {
1797 ram_counters.normal++;
1798 } else if (bytes_xmit == 0) {
1799 ram_counters.duplicate++;
1800 }
1801
1802 return true;
1803 }
1804
1805 /*
1806 * directly send the page to the stream
1807 *
1808 * Returns the number of pages written.
1809 *
1810 * @rs: current RAM state
1811 * @block: block that contains the page we want to send
1812 * @offset: offset inside the block for the page
1813 * @buf: the page to be sent
1814 * @async: send to page asyncly
1815 */
1816 static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1817 uint8_t *buf, bool async)
1818 {
1819 ram_counters.transferred += save_page_header(rs, rs->f, block,
1820 offset | RAM_SAVE_FLAG_PAGE);
1821 if (async) {
1822 qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE,
1823 migrate_release_ram() &
1824 migration_in_postcopy());
1825 } else {
1826 qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
1827 }
1828 ram_counters.transferred += TARGET_PAGE_SIZE;
1829 ram_counters.normal++;
1830 return 1;
1831 }
1832
1833 /**
1834 * ram_save_page: send the given page to the stream
1835 *
1836 * Returns the number of pages written.
1837 * < 0 - error
1838 * >=0 - Number of pages written - this might legally be 0
1839 * if xbzrle noticed the page was the same.
1840 *
1841 * @rs: current RAM state
1842 * @block: block that contains the page we want to send
1843 * @offset: offset inside the block for the page
1844 * @last_stage: if we are at the completion stage
1845 */
1846 static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
1847 {
1848 int pages = -1;
1849 uint8_t *p;
1850 bool send_async = true;
1851 RAMBlock *block = pss->block;
1852 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
1853 ram_addr_t current_addr = block->offset + offset;
1854
1855 p = block->host + offset;
1856 trace_ram_save_page(block->idstr, (uint64_t)offset, p);
1857
1858 XBZRLE_cache_lock();
1859 if (!rs->ram_bulk_stage && !migration_in_postcopy() &&
1860 migrate_use_xbzrle()) {
1861 pages = save_xbzrle_page(rs, &p, current_addr, block,
1862 offset, last_stage);
1863 if (!last_stage) {
1864 /* Can't send this cached data async, since the cache page
1865 * might get updated before it gets to the wire
1866 */
1867 send_async = false;
1868 }
1869 }
1870
1871 /* XBZRLE overflow or normal page */
1872 if (pages == -1) {
1873 pages = save_normal_page(rs, block, offset, p, send_async);
1874 }
1875
1876 XBZRLE_cache_unlock();
1877
1878 return pages;
1879 }
1880
1881 static int ram_save_multifd_page(RAMState *rs, RAMBlock *block,
1882 ram_addr_t offset)
1883 {
1884 multifd_queue_page(block, offset);
1885 ram_counters.normal++;
1886
1887 return 1;
1888 }
1889
1890 static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
1891 ram_addr_t offset, uint8_t *source_buf)
1892 {
1893 RAMState *rs = ram_state;
1894 uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
1895 bool zero_page = false;
1896 int ret;
1897
1898 if (save_zero_page_to_file(rs, f, block, offset)) {
1899 zero_page = true;
1900 goto exit;
1901 }
1902
1903 save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
1904
1905 /*
1906 * copy it to a internal buffer to avoid it being modified by VM
1907 * so that we can catch up the error during compression and
1908 * decompression
1909 */
1910 memcpy(source_buf, p, TARGET_PAGE_SIZE);
1911 ret = qemu_put_compression_data(f, stream, source_buf, TARGET_PAGE_SIZE);
1912 if (ret < 0) {
1913 qemu_file_set_error(migrate_get_current()->to_dst_file, ret);
1914 error_report("compressed data failed!");
1915 return false;
1916 }
1917
1918 exit:
1919 ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
1920 return zero_page;
1921 }
1922
1923 static void
1924 update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
1925 {
1926 ram_counters.transferred += bytes_xmit;
1927
1928 if (param->zero_page) {
1929 ram_counters.duplicate++;
1930 return;
1931 }
1932
1933 /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
1934 compression_counters.compressed_size += bytes_xmit - 8;
1935 compression_counters.pages++;
1936 }
1937
1938 static bool save_page_use_compression(RAMState *rs);
1939
1940 static void flush_compressed_data(RAMState *rs)
1941 {
1942 int idx, len, thread_count;
1943
1944 if (!save_page_use_compression(rs)) {
1945 return;
1946 }
1947 thread_count = migrate_compress_threads();
1948
1949 qemu_mutex_lock(&comp_done_lock);
1950 for (idx = 0; idx < thread_count; idx++) {
1951 while (!comp_param[idx].done) {
1952 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
1953 }
1954 }
1955 qemu_mutex_unlock(&comp_done_lock);
1956
1957 for (idx = 0; idx < thread_count; idx++) {
1958 qemu_mutex_lock(&comp_param[idx].mutex);
1959 if (!comp_param[idx].quit) {
1960 len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
1961 /*
1962 * it's safe to fetch zero_page without holding comp_done_lock
1963 * as there is no further request submitted to the thread,
1964 * i.e, the thread should be waiting for a request at this point.
1965 */
1966 update_compress_thread_counts(&comp_param[idx], len);
1967 }
1968 qemu_mutex_unlock(&comp_param[idx].mutex);
1969 }
1970 }
1971
1972 static inline void set_compress_params(CompressParam *param, RAMBlock *block,
1973 ram_addr_t offset)
1974 {
1975 param->block = block;
1976 param->offset = offset;
1977 }
1978
1979 static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
1980 ram_addr_t offset)
1981 {
1982 int idx, thread_count, bytes_xmit = -1, pages = -1;
1983 bool wait = migrate_compress_wait_thread();
1984
1985 thread_count = migrate_compress_threads();
1986 qemu_mutex_lock(&comp_done_lock);
1987 retry:
1988 for (idx = 0; idx < thread_count; idx++) {
1989 if (comp_param[idx].done) {
1990 comp_param[idx].done = false;
1991 bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
1992 qemu_mutex_lock(&comp_param[idx].mutex);
1993 set_compress_params(&comp_param[idx], block, offset);
1994 qemu_cond_signal(&comp_param[idx].cond);
1995 qemu_mutex_unlock(&comp_param[idx].mutex);
1996 pages = 1;
1997 update_compress_thread_counts(&comp_param[idx], bytes_xmit);
1998 break;
1999 }
2000 }
2001
2002 /*
2003 * wait for the free thread if the user specifies 'compress-wait-thread',
2004 * otherwise we will post the page out in the main thread as normal page.
2005 */
2006 if (pages < 0 && wait) {
2007 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
2008 goto retry;
2009 }
2010 qemu_mutex_unlock(&comp_done_lock);
2011
2012 return pages;
2013 }
2014
2015 /**
2016 * find_dirty_block: find the next dirty page and update any state
2017 * associated with the search process.
2018 *
2019 * Returns if a page is found
2020 *
2021 * @rs: current RAM state
2022 * @pss: data about the state of the current dirty page scan
2023 * @again: set to false if the search has scanned the whole of RAM
2024 */
2025 static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
2026 {
2027 pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
2028 if (pss->complete_round && pss->block == rs->last_seen_block &&
2029 pss->page >= rs->last_page) {
2030 /*
2031 * We've been once around the RAM and haven't found anything.
2032 * Give up.
2033 */
2034 *again = false;
2035 return false;
2036 }
2037 if ((pss->page << TARGET_PAGE_BITS) >= pss->block->used_length) {
2038 /* Didn't find anything in this RAM Block */
2039 pss->page = 0;
2040 pss->block = QLIST_NEXT_RCU(pss->block, next);
2041 if (!pss->block) {
2042 /*
2043 * If memory migration starts over, we will meet a dirtied page
2044 * which may still exists in compression threads's ring, so we
2045 * should flush the compressed data to make sure the new page
2046 * is not overwritten by the old one in the destination.
2047 *
2048 * Also If xbzrle is on, stop using the data compression at this
2049 * point. In theory, xbzrle can do better than compression.
2050 */
2051 flush_compressed_data(rs);
2052
2053 /* Hit the end of the list */
2054 pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
2055 /* Flag that we've looped */
2056 pss->complete_round = true;
2057 rs->ram_bulk_stage = false;
2058 }
2059 /* Didn't find anything this time, but try again on the new block */
2060 *again = true;
2061 return false;
2062 } else {
2063 /* Can go around again, but... */
2064 *again = true;
2065 /* We've found something so probably don't need to */
2066 return true;
2067 }
2068 }
2069
2070 /**
2071 * unqueue_page: gets a page of the queue
2072 *
2073 * Helper for 'get_queued_page' - gets a page off the queue
2074 *
2075 * Returns the block of the page (or NULL if none available)
2076 *
2077 * @rs: current RAM state
2078 * @offset: used to return the offset within the RAMBlock
2079 */
2080 static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
2081 {
2082 RAMBlock *block = NULL;
2083
2084 if (QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests)) {
2085 return NULL;
2086 }
2087
2088 qemu_mutex_lock(&rs->src_page_req_mutex);
2089 if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
2090 struct RAMSrcPageRequest *entry =
2091 QSIMPLEQ_FIRST(&rs->src_page_requests);
2092 block = entry->rb;
2093 *offset = entry->offset;
2094
2095 if (entry->len > TARGET_PAGE_SIZE) {
2096 entry->len -= TARGET_PAGE_SIZE;
2097 entry->offset += TARGET_PAGE_SIZE;
2098 } else {
2099 memory_region_unref(block->mr);
2100 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
2101 g_free(entry);
2102 migration_consume_urgent_request();
2103 }
2104 }
2105 qemu_mutex_unlock(&rs->src_page_req_mutex);
2106
2107 return block;
2108 }
2109
2110 /**
2111 * get_queued_page: unqueue a page from the postocpy requests
2112 *
2113 * Skips pages that are already sent (!dirty)
2114 *
2115 * Returns if a queued page is found
2116 *
2117 * @rs: current RAM state
2118 * @pss: data about the state of the current dirty page scan
2119 */
2120 static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
2121 {
2122 RAMBlock *block;
2123 ram_addr_t offset;
2124 bool dirty;
2125
2126 do {
2127 block = unqueue_page(rs, &offset);
2128 /*
2129 * We're sending this page, and since it's postcopy nothing else
2130 * will dirty it, and we must make sure it doesn't get sent again
2131 * even if this queue request was received after the background
2132 * search already sent it.
2133 */
2134 if (block) {
2135 unsigned long page;
2136
2137 page = offset >> TARGET_PAGE_BITS;
2138 dirty = test_bit(page, block->bmap);
2139 if (!dirty) {
2140 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
2141 page, test_bit(page, block->unsentmap));
2142 } else {
2143 trace_get_queued_page(block->idstr, (uint64_t)offset, page);
2144 }
2145 }
2146
2147 } while (block && !dirty);
2148
2149 if (block) {
2150 /*
2151 * As soon as we start servicing pages out of order, then we have
2152 * to kill the bulk stage, since the bulk stage assumes
2153 * in (migration_bitmap_find_and_reset_dirty) that every page is
2154 * dirty, that's no longer true.
2155 */
2156 rs->ram_bulk_stage = false;
2157
2158 /*
2159 * We want the background search to continue from the queued page
2160 * since the guest is likely to want other pages near to the page
2161 * it just requested.
2162 */
2163 pss->block = block;
2164 pss->page = offset >> TARGET_PAGE_BITS;
2165 }
2166
2167 return !!block;
2168 }
2169
2170 /**
2171 * migration_page_queue_free: drop any remaining pages in the ram
2172 * request queue
2173 *
2174 * It should be empty at the end anyway, but in error cases there may
2175 * be some left. in case that there is any page left, we drop it.
2176 *
2177 */
2178 static void migration_page_queue_free(RAMState *rs)
2179 {
2180 struct RAMSrcPageRequest *mspr, *next_mspr;
2181 /* This queue generally should be empty - but in the case of a failed
2182 * migration might have some droppings in.
2183 */
2184 rcu_read_lock();
2185 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
2186 memory_region_unref(mspr->rb->mr);
2187 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
2188 g_free(mspr);
2189 }
2190 rcu_read_unlock();
2191 }
2192
2193 /**
2194 * ram_save_queue_pages: queue the page for transmission
2195 *
2196 * A request from postcopy destination for example.
2197 *
2198 * Returns zero on success or negative on error
2199 *
2200 * @rbname: Name of the RAMBLock of the request. NULL means the
2201 * same that last one.
2202 * @start: starting address from the start of the RAMBlock
2203 * @len: length (in bytes) to send
2204 */
2205 int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
2206 {
2207 RAMBlock *ramblock;
2208 RAMState *rs = ram_state;
2209
2210 ram_counters.postcopy_requests++;
2211 rcu_read_lock();
2212 if (!rbname) {
2213 /* Reuse last RAMBlock */
2214 ramblock = rs->last_req_rb;
2215
2216 if (!ramblock) {
2217 /*
2218 * Shouldn't happen, we can't reuse the last RAMBlock if
2219 * it's the 1st request.
2220 */
2221 error_report("ram_save_queue_pages no previous block");
2222 goto err;
2223 }
2224 } else {
2225 ramblock = qemu_ram_block_by_name(rbname);
2226
2227 if (!ramblock) {
2228 /* We shouldn't be asked for a non-existent RAMBlock */
2229 error_report("ram_save_queue_pages no block '%s'", rbname);
2230 goto err;
2231 }
2232 rs->last_req_rb = ramblock;
2233 }
2234 trace_ram_save_queue_pages(ramblock->idstr, start, len);
2235 if (start+len > ramblock->used_length) {
2236 error_report("%s request overrun start=" RAM_ADDR_FMT " len="
2237 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
2238 __func__, start, len, ramblock->used_length);
2239 goto err;
2240 }
2241
2242 struct RAMSrcPageRequest *new_entry =
2243 g_malloc0(sizeof(struct RAMSrcPageRequest));
2244 new_entry->rb = ramblock;
2245 new_entry->offset = start;
2246 new_entry->len = len;
2247
2248 memory_region_ref(ramblock->mr);
2249 qemu_mutex_lock(&rs->src_page_req_mutex);
2250 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
2251 migration_make_urgent_request();
2252 qemu_mutex_unlock(&rs->src_page_req_mutex);
2253 rcu_read_unlock();
2254
2255 return 0;
2256
2257 err:
2258 rcu_read_unlock();
2259 return -1;
2260 }
2261
2262 static bool save_page_use_compression(RAMState *rs)
2263 {
2264 if (!migrate_use_compression()) {
2265 return false;
2266 }
2267
2268 /*
2269 * If xbzrle is on, stop using the data compression after first
2270 * round of migration even if compression is enabled. In theory,
2271 * xbzrle can do better than compression.
2272 */
2273 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
2274 return true;
2275 }
2276
2277 return false;
2278 }
2279
2280 /*
2281 * try to compress the page before posting it out, return true if the page
2282 * has been properly handled by compression, otherwise needs other
2283 * paths to handle it
2284 */
2285 static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
2286 {
2287 if (!save_page_use_compression(rs)) {
2288 return false;
2289 }
2290
2291 /*
2292 * When starting the process of a new block, the first page of
2293 * the block should be sent out before other pages in the same
2294 * block, and all the pages in last block should have been sent
2295 * out, keeping this order is important, because the 'cont' flag
2296 * is used to avoid resending the block name.
2297 *
2298 * We post the fist page as normal page as compression will take
2299 * much CPU resource.
2300 */
2301 if (block != rs->last_sent_block) {
2302 flush_compressed_data(rs);
2303 return false;
2304 }
2305
2306 if (compress_page_with_multi_thread(rs, block, offset) > 0) {
2307 return true;
2308 }
2309
2310 compression_counters.busy++;
2311 return false;
2312 }
2313
2314 /**
2315 * ram_save_target_page: save one target page
2316 *
2317 * Returns the number of pages written
2318 *
2319 * @rs: current RAM state
2320 * @pss: data about the page we want to send
2321 * @last_stage: if we are at the completion stage
2322 */
2323 static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
2324 bool last_stage)
2325 {
2326 RAMBlock *block = pss->block;
2327 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
2328 int res;
2329
2330 if (control_save_page(rs, block, offset, &res)) {
2331 return res;
2332 }
2333
2334 if (save_compress_page(rs, block, offset)) {
2335 return 1;
2336 }
2337
2338 res = save_zero_page(rs, block, offset);
2339 if (res > 0) {
2340 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
2341 * page would be stale
2342 */
2343 if (!save_page_use_compression(rs)) {
2344 XBZRLE_cache_lock();
2345 xbzrle_cache_zero_page(rs, block->offset + offset);
2346 XBZRLE_cache_unlock();
2347 }
2348 ram_release_pages(block->idstr, offset, res);
2349 return res;
2350 }
2351
2352 /*
2353 * do not use multifd for compression as the first page in the new
2354 * block should be posted out before sending the compressed page
2355 */
2356 if (!save_page_use_compression(rs) && migrate_use_multifd()) {
2357 return ram_save_multifd_page(rs, block, offset);
2358 }
2359
2360 return ram_save_page(rs, pss, last_stage);
2361 }
2362
2363 /**
2364 * ram_save_host_page: save a whole host page
2365 *
2366 * Starting at *offset send pages up to the end of the current host
2367 * page. It's valid for the initial offset to point into the middle of
2368 * a host page in which case the remainder of the hostpage is sent.
2369 * Only dirty target pages are sent. Note that the host page size may
2370 * be a huge page for this block.
2371 * The saving stops at the boundary of the used_length of the block
2372 * if the RAMBlock isn't a multiple of the host page size.
2373 *
2374 * Returns the number of pages written or negative on error
2375 *
2376 * @rs: current RAM state
2377 * @ms: current migration state
2378 * @pss: data about the page we want to send
2379 * @last_stage: if we are at the completion stage
2380 */
2381 static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
2382 bool last_stage)
2383 {
2384 int tmppages, pages = 0;
2385 size_t pagesize_bits =
2386 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
2387
2388 if (!qemu_ram_is_migratable(pss->block)) {
2389 error_report("block %s should not be migrated !", pss->block->idstr);
2390 return 0;
2391 }
2392
2393 do {
2394 /* Check the pages is dirty and if it is send it */
2395 if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
2396 pss->page++;
2397 continue;
2398 }
2399
2400 tmppages = ram_save_target_page(rs, pss, last_stage);
2401 if (tmppages < 0) {
2402 return tmppages;
2403 }
2404
2405 pages += tmppages;
2406 if (pss->block->unsentmap) {
2407 clear_bit(pss->page, pss->block->unsentmap);
2408 }
2409
2410 pss->page++;
2411 } while ((pss->page & (pagesize_bits - 1)) &&
2412 offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));
2413
2414 /* The offset we leave with is the last one we looked at */
2415 pss->page--;
2416 return pages;
2417 }
2418
2419 /**
2420 * ram_find_and_save_block: finds a dirty page and sends it to f
2421 *
2422 * Called within an RCU critical section.
2423 *
2424 * Returns the number of pages written where zero means no dirty pages,
2425 * or negative on error
2426 *
2427 * @rs: current RAM state
2428 * @last_stage: if we are at the completion stage
2429 *
2430 * On systems where host-page-size > target-page-size it will send all the
2431 * pages in a host page that are dirty.
2432 */
2433
2434 static int ram_find_and_save_block(RAMState *rs, bool last_stage)
2435 {
2436 PageSearchStatus pss;
2437 int pages = 0;
2438 bool again, found;
2439
2440 /* No dirty page as there is zero RAM */
2441 if (!ram_bytes_total()) {
2442 return pages;
2443 }
2444
2445 pss.block = rs->last_seen_block;
2446 pss.page = rs->last_page;
2447 pss.complete_round = false;
2448
2449 if (!pss.block) {
2450 pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
2451 }
2452
2453 do {
2454 again = true;
2455 found = get_queued_page(rs, &pss);
2456
2457 if (!found) {
2458 /* priority queue empty, so just search for something dirty */
2459 found = find_dirty_block(rs, &pss, &again);
2460 }
2461
2462 if (found) {
2463 pages = ram_save_host_page(rs, &pss, last_stage);
2464 }
2465 } while (!pages && again);
2466
2467 rs->last_seen_block = pss.block;
2468 rs->last_page = pss.page;
2469
2470 return pages;
2471 }
2472
2473 void acct_update_position(QEMUFile *f, size_t size, bool zero)
2474 {
2475 uint64_t pages = size / TARGET_PAGE_SIZE;
2476
2477 if (zero) {
2478 ram_counters.duplicate += pages;
2479 } else {
2480 ram_counters.normal += pages;
2481 ram_counters.transferred += size;
2482 qemu_update_position(f, size);
2483 }
2484 }
2485
2486 uint64_t ram_bytes_total(void)
2487 {
2488 RAMBlock *block;
2489 uint64_t total = 0;
2490
2491 rcu_read_lock();
2492 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2493 total += block->used_length;
2494 }
2495 rcu_read_unlock();
2496 return total;
2497 }
2498
2499 static void xbzrle_load_setup(void)
2500 {
2501 XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
2502 }
2503
2504 static void xbzrle_load_cleanup(void)
2505 {
2506 g_free(XBZRLE.decoded_buf);
2507 XBZRLE.decoded_buf = NULL;
2508 }
2509
2510 static void ram_state_cleanup(RAMState **rsp)
2511 {
2512 if (*rsp) {
2513 migration_page_queue_free(*rsp);
2514 qemu_mutex_destroy(&(*rsp)->bitmap_mutex);
2515 qemu_mutex_destroy(&(*rsp)->src_page_req_mutex);
2516 g_free(*rsp);
2517 *rsp = NULL;
2518 }
2519 }
2520
2521 static void xbzrle_cleanup(void)
2522 {
2523 XBZRLE_cache_lock();
2524 if (XBZRLE.cache) {
2525 cache_fini(XBZRLE.cache);
2526 g_free(XBZRLE.encoded_buf);
2527 g_free(XBZRLE.current_buf);
2528 g_free(XBZRLE.zero_target_page);
2529 XBZRLE.cache = NULL;
2530 XBZRLE.encoded_buf = NULL;
2531 XBZRLE.current_buf = NULL;
2532 XBZRLE.zero_target_page = NULL;
2533 }
2534 XBZRLE_cache_unlock();
2535 }
2536
2537 static void ram_save_cleanup(void *opaque)
2538 {
2539 RAMState **rsp = opaque;
2540 RAMBlock *block;
2541
2542 /* caller have hold iothread lock or is in a bh, so there is
2543 * no writing race against this migration_bitmap
2544 */
2545 memory_global_dirty_log_stop();
2546
2547 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2548 g_free(block->bmap);
2549 block->bmap = NULL;
2550 g_free(block->unsentmap);
2551 block->unsentmap = NULL;
2552 }
2553
2554 xbzrle_cleanup();
2555 compress_threads_save_cleanup();
2556 ram_state_cleanup(rsp);
2557 }
2558
2559 static void ram_state_reset(RAMState *rs)
2560 {
2561 rs->last_seen_block = NULL;
2562 rs->last_sent_block = NULL;
2563 rs->last_page = 0;
2564 rs->last_version = ram_list.version;
2565 rs->ram_bulk_stage = true;
2566 }
2567
2568 #define MAX_WAIT 50 /* ms, half buffered_file limit */
2569
2570 /*
2571 * 'expected' is the value you expect the bitmap mostly to be full
2572 * of; it won't bother printing lines that are all this value.
2573 * If 'todump' is null the migration bitmap is dumped.
2574 */
2575 void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
2576 unsigned long pages)
2577 {
2578 int64_t cur;
2579 int64_t linelen = 128;
2580 char linebuf[129];
2581
2582 for (cur = 0; cur < pages; cur += linelen) {
2583 int64_t curb;
2584 bool found = false;
2585 /*
2586 * Last line; catch the case where the line length
2587 * is longer than remaining ram
2588 */
2589 if (cur + linelen > pages) {
2590 linelen = pages - cur;
2591 }
2592 for (curb = 0; curb < linelen; curb++) {
2593 bool thisbit = test_bit(cur + curb, todump);
2594 linebuf[curb] = thisbit ? '1' : '.';
2595 found = found || (thisbit != expected);
2596 }
2597 if (found) {
2598 linebuf[curb] = '\0';
2599 fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf);
2600 }
2601 }
2602 }
2603
2604 /* **** functions for postcopy ***** */
2605
2606 void ram_postcopy_migrated_memory_release(MigrationState *ms)
2607 {
2608 struct RAMBlock *block;
2609
2610 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2611 unsigned long *bitmap = block->bmap;
2612 unsigned long range = block->used_length >> TARGET_PAGE_BITS;
2613 unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
2614
2615 while (run_start < range) {
2616 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
2617 ram_discard_range(block->idstr, run_start << TARGET_PAGE_BITS,
2618 (run_end - run_start) << TARGET_PAGE_BITS);
2619 run_start = find_next_zero_bit(bitmap, range, run_end + 1);
2620 }
2621 }
2622 }
2623
2624 /**
2625 * postcopy_send_discard_bm_ram: discard a RAMBlock
2626 *
2627 * Returns zero on success
2628 *
2629 * Callback from postcopy_each_ram_send_discard for each RAMBlock
2630 * Note: At this point the 'unsentmap' is the processed bitmap combined
2631 * with the dirtymap; so a '1' means it's either dirty or unsent.
2632 *
2633 * @ms: current migration state
2634 * @pds: state for postcopy
2635 * @start: RAMBlock starting page
2636 * @length: RAMBlock size
2637 */
2638 static int postcopy_send_discard_bm_ram(MigrationState *ms,
2639 PostcopyDiscardState *pds,
2640 RAMBlock *block)
2641 {
2642 unsigned long end = block->used_length >> TARGET_PAGE_BITS;
2643 unsigned long current;
2644 unsigned long *unsentmap = block->unsentmap;
2645
2646 for (current = 0; current < end; ) {
2647 unsigned long one = find_next_bit(unsentmap, end, current);
2648
2649 if (one <= end) {
2650 unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1);
2651 unsigned long discard_length;
2652
2653 if (zero >= end) {
2654 discard_length = end - one;
2655 } else {
2656 discard_length = zero - one;
2657 }
2658 if (discard_length) {
2659 postcopy_discard_send_range(ms, pds, one, discard_length);
2660 }
2661 current = one + discard_length;
2662 } else {
2663 current = one;
2664 }
2665 }
2666
2667 return 0;
2668 }
2669
2670 /**
2671 * postcopy_each_ram_send_discard: discard all RAMBlocks
2672 *
2673 * Returns 0 for success or negative for error
2674 *
2675 * Utility for the outgoing postcopy code.
2676 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2677 * passing it bitmap indexes and name.
2678 * (qemu_ram_foreach_block ends up passing unscaled lengths
2679 * which would mean postcopy code would have to deal with target page)
2680 *
2681 * @ms: current migration state
2682 */
2683 static int postcopy_each_ram_send_discard(MigrationState *ms)
2684 {
2685 struct RAMBlock *block;
2686 int ret;
2687
2688 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2689 PostcopyDiscardState *pds =
2690 postcopy_discard_send_init(ms, block->idstr);
2691
2692 /*
2693 * Postcopy sends chunks of bitmap over the wire, but it
2694 * just needs indexes at this point, avoids it having
2695 * target page specific code.
2696 */
2697 ret = postcopy_send_discard_bm_ram(ms, pds, block);
2698 postcopy_discard_send_finish(ms, pds);
2699 if (ret) {
2700 return ret;
2701 }
2702 }
2703
2704 return 0;
2705 }
2706
2707 /**
2708 * postcopy_chunk_hostpages_pass: canocalize bitmap in hostpages
2709 *
2710 * Helper for postcopy_chunk_hostpages; it's called twice to
2711 * canonicalize the two bitmaps, that are similar, but one is
2712 * inverted.
2713 *
2714 * Postcopy requires that all target pages in a hostpage are dirty or
2715 * clean, not a mix. This function canonicalizes the bitmaps.
2716 *
2717 * @ms: current migration state
2718 * @unsent_pass: if true we need to canonicalize partially unsent host pages
2719 * otherwise we need to canonicalize partially dirty host pages
2720 * @block: block that contains the page we want to canonicalize
2721 * @pds: state for postcopy
2722 */
2723 static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
2724 RAMBlock *block,
2725 PostcopyDiscardState *pds)
2726 {
2727 RAMState *rs = ram_state;
2728 unsigned long *bitmap = block->bmap;
2729 unsigned long *unsentmap = block->unsentmap;
2730 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
2731 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
2732 unsigned long run_start;
2733
2734 if (block->page_size == TARGET_PAGE_SIZE) {
2735 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2736 return;
2737 }
2738
2739 if (unsent_pass) {
2740 /* Find a sent page */
2741 run_start = find_next_zero_bit(unsentmap, pages, 0);
2742 } else {
2743 /* Find a dirty page */
2744 run_start = find_next_bit(bitmap, pages, 0);
2745 }
2746
2747 while (run_start < pages) {
2748 bool do_fixup = false;
2749 unsigned long fixup_start_addr;
2750 unsigned long host_offset;
2751
2752 /*
2753 * If the start of this run of pages is in the middle of a host
2754 * page, then we need to fixup this host page.
2755 */
2756 host_offset = run_start % host_ratio;
2757 if (host_offset) {
2758 do_fixup = true;
2759 run_start -= host_offset;
2760 fixup_start_addr = run_start;
2761 /* For the next pass */
2762 run_start = run_start + host_ratio;
2763 } else {
2764 /* Find the end of this run */
2765 unsigned long run_end;
2766 if (unsent_pass) {
2767 run_end = find_next_bit(unsentmap, pages, run_start + 1);
2768 } else {
2769 run_end = find_next_zero_bit(bitmap, pages, run_start + 1);
2770 }
2771 /*
2772 * If the end isn't at the start of a host page, then the
2773 * run doesn't finish at the end of a host page
2774 * and we need to discard.
2775 */
2776 host_offset = run_end % host_ratio;
2777 if (host_offset) {
2778 do_fixup = true;
2779 fixup_start_addr = run_end - host_offset;
2780 /*
2781 * This host page has gone, the next loop iteration starts
2782 * from after the fixup
2783 */
2784 run_start = fixup_start_addr + host_ratio;
2785 } else {
2786 /*
2787 * No discards on this iteration, next loop starts from
2788 * next sent/dirty page
2789 */
2790 run_start = run_end + 1;
2791 }
2792 }
2793
2794 if (do_fixup) {
2795 unsigned long page;
2796
2797 /* Tell the destination to discard this page */
2798 if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) {
2799 /* For the unsent_pass we:
2800 * discard partially sent pages
2801 * For the !unsent_pass (dirty) we:
2802 * discard partially dirty pages that were sent
2803 * (any partially sent pages were already discarded
2804 * by the previous unsent_pass)
2805 */
2806 postcopy_discard_send_range(ms, pds, fixup_start_addr,
2807 host_ratio);
2808 }
2809
2810 /* Clean up the bitmap */
2811 for (page = fixup_start_addr;
2812 page < fixup_start_addr + host_ratio; page++) {
2813 /* All pages in this host page are now not sent */
2814 set_bit(page, unsentmap);
2815
2816 /*
2817 * Remark them as dirty, updating the count for any pages
2818 * that weren't previously dirty.
2819 */
2820 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
2821 }
2822 }
2823
2824 if (unsent_pass) {
2825 /* Find the next sent page for the next iteration */
2826 run_start = find_next_zero_bit(unsentmap, pages, run_start);
2827 } else {
2828 /* Find the next dirty page for the next iteration */
2829 run_start = find_next_bit(bitmap, pages, run_start);
2830 }
2831 }
2832 }
2833
2834 /**
2835 * postcopy_chuck_hostpages: discrad any partially sent host page
2836 *
2837 * Utility for the outgoing postcopy code.
2838 *
2839 * Discard any partially sent host-page size chunks, mark any partially
2840 * dirty host-page size chunks as all dirty. In this case the host-page
2841 * is the host-page for the particular RAMBlock, i.e. it might be a huge page
2842 *
2843 * Returns zero on success
2844 *
2845 * @ms: current migration state
2846 * @block: block we want to work with
2847 */
2848 static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
2849 {
2850 PostcopyDiscardState *pds =
2851 postcopy_discard_send_init(ms, block->idstr);
2852
2853 /* First pass: Discard all partially sent host pages */
2854 postcopy_chunk_hostpages_pass(ms, true, block, pds);
2855 /*
2856 * Second pass: Ensure that all partially dirty host pages are made
2857 * fully dirty.
2858 */
2859 postcopy_chunk_hostpages_pass(ms, false, block, pds);
2860
2861 postcopy_discard_send_finish(ms, pds);
2862 return 0;
2863 }
2864
2865 /**
2866 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
2867 *
2868 * Returns zero on success
2869 *
2870 * Transmit the set of pages to be discarded after precopy to the target
2871 * these are pages that:
2872 * a) Have been previously transmitted but are now dirty again
2873 * b) Pages that have never been transmitted, this ensures that
2874 * any pages on the destination that have been mapped by background
2875 * tasks get discarded (transparent huge pages is the specific concern)
2876 * Hopefully this is pretty sparse
2877 *
2878 * @ms: current migration state
2879 */
2880 int ram_postcopy_send_discard_bitmap(MigrationState *ms)
2881 {
2882 RAMState *rs = ram_state;
2883 RAMBlock *block;
2884 int ret;
2885
2886 rcu_read_lock();
2887
2888 /* This should be our last sync, the src is now paused */
2889 migration_bitmap_sync(rs);
2890
2891 /* Easiest way to make sure we don't resume in the middle of a host-page */
2892 rs->last_seen_block = NULL;
2893 rs->last_sent_block = NULL;
2894 rs->last_page = 0;
2895
2896 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2897 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
2898 unsigned long *bitmap = block->bmap;
2899 unsigned long *unsentmap = block->unsentmap;
2900
2901 if (!unsentmap) {
2902 /* We don't have a safe way to resize the sentmap, so
2903 * if the bitmap was resized it will be NULL at this
2904 * point.
2905 */
2906 error_report("migration ram resized during precopy phase");
2907 rcu_read_unlock();
2908 return -EINVAL;
2909 }
2910 /* Deal with TPS != HPS and huge pages */
2911 ret = postcopy_chunk_hostpages(ms, block);
2912 if (ret) {
2913 rcu_read_unlock();
2914 return ret;
2915 }
2916
2917 /*
2918 * Update the unsentmap to be unsentmap = unsentmap | dirty
2919 */
2920 bitmap_or(unsentmap, unsentmap, bitmap, pages);
2921 #ifdef DEBUG_POSTCOPY
2922 ram_debug_dump_bitmap(unsentmap, true, pages);
2923 #endif
2924 }
2925 trace_ram_postcopy_send_discard_bitmap();
2926
2927 ret = postcopy_each_ram_send_discard(ms);
2928 rcu_read_unlock();
2929
2930 return ret;
2931 }
2932
2933 /**
2934 * ram_discard_range: discard dirtied pages at the beginning of postcopy
2935 *
2936 * Returns zero on success
2937 *
2938 * @rbname: name of the RAMBlock of the request. NULL means the
2939 * same that last one.
2940 * @start: RAMBlock starting page
2941 * @length: RAMBlock size
2942 */
2943 int ram_discard_range(const char *rbname, uint64_t start, size_t length)
2944 {
2945 int ret = -1;
2946
2947 trace_ram_discard_range(rbname, start, length);
2948
2949 rcu_read_lock();
2950 RAMBlock *rb = qemu_ram_block_by_name(rbname);
2951
2952 if (!rb) {
2953 error_report("ram_discard_range: Failed to find block '%s'", rbname);
2954 goto err;
2955 }
2956
2957 /*
2958 * On source VM, we don't need to update the received bitmap since
2959 * we don't even have one.
2960 */
2961 if (rb->receivedmap) {
2962 bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(),
2963 length >> qemu_target_page_bits());
2964 }
2965
2966 ret = ram_block_discard_range(rb, start, length);
2967
2968 err:
2969 rcu_read_unlock();
2970
2971 return ret;
2972 }
2973
2974 /*
2975 * For every allocation, we will try not to crash the VM if the
2976 * allocation failed.
2977 */
2978 static int xbzrle_init(void)
2979 {
2980 Error *local_err = NULL;
2981
2982 if (!migrate_use_xbzrle()) {
2983 return 0;
2984 }
2985
2986 XBZRLE_cache_lock();
2987
2988 XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
2989 if (!XBZRLE.zero_target_page) {
2990 error_report("%s: Error allocating zero page", __func__);
2991 goto err_out;
2992 }
2993
2994 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
2995 TARGET_PAGE_SIZE, &local_err);
2996 if (!XBZRLE.cache) {
2997 error_report_err(local_err);
2998 goto free_zero_page;
2999 }
3000
3001 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
3002 if (!XBZRLE.encoded_buf) {
3003 error_report("%s: Error allocating encoded_buf", __func__);
3004 goto free_cache;
3005 }
3006
3007 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
3008 if (!XBZRLE.current_buf) {
3009 error_report("%s: Error allocating current_buf", __func__);
3010 goto free_encoded_buf;
3011 }
3012
3013 /* We are all good */
3014 XBZRLE_cache_unlock();
3015 return 0;
3016
3017 free_encoded_buf:
3018 g_free(XBZRLE.encoded_buf);
3019 XBZRLE.encoded_buf = NULL;
3020 free_cache:
3021 cache_fini(XBZRLE.cache);
3022 XBZRLE.cache = NULL;
3023 free_zero_page:
3024 g_free(XBZRLE.zero_target_page);
3025 XBZRLE.zero_target_page = NULL;
3026 err_out:
3027 XBZRLE_cache_unlock();
3028 return -ENOMEM;
3029 }
3030
3031 static int ram_state_init(RAMState **rsp)
3032 {
3033 *rsp = g_try_new0(RAMState, 1);
3034
3035 if (!*rsp) {
3036 error_report("%s: Init ramstate fail", __func__);
3037 return -1;
3038 }
3039
3040 qemu_mutex_init(&(*rsp)->bitmap_mutex);
3041 qemu_mutex_init(&(*rsp)->src_page_req_mutex);
3042 QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
3043
3044 /*
3045 * Count the total number of pages used by ram blocks not including any
3046 * gaps due to alignment or unplugs.
3047 */
3048 (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
3049
3050 ram_state_reset(*rsp);
3051
3052 return 0;
3053 }
3054
3055 static void ram_list_init_bitmaps(void)
3056 {
3057 RAMBlock *block;
3058 unsigned long pages;
3059
3060 /* Skip setting bitmap if there is no RAM */
3061 if (ram_bytes_total()) {
3062 RAMBLOCK_FOREACH_MIGRATABLE(block) {
3063 pages = block->max_length >> TARGET_PAGE_BITS;
3064 block->bmap = bitmap_new(pages);
3065 bitmap_set(block->bmap, 0, pages);
3066 if (migrate_postcopy_ram()) {
3067 block->unsentmap = bitmap_new(pages);
3068 bitmap_set(block->unsentmap, 0, pages);
3069 }
3070 }
3071 }
3072 }
3073
3074 static void ram_init_bitmaps(RAMState *rs)
3075 {
3076 /* For memory_global_dirty_log_start below. */
3077 qemu_mutex_lock_iothread();
3078 qemu_mutex_lock_ramlist();
3079 rcu_read_lock();
3080
3081 ram_list_init_bitmaps();
3082 memory_global_dirty_log_start();
3083 migration_bitmap_sync(rs);
3084
3085 rcu_read_unlock();
3086 qemu_mutex_unlock_ramlist();
3087 qemu_mutex_unlock_iothread();
3088 }
3089
3090 static int ram_init_all(RAMState **rsp)
3091 {
3092 if (ram_state_init(rsp)) {
3093 return -1;
3094 }
3095
3096 if (xbzrle_init()) {
3097 ram_state_cleanup(rsp);
3098 return -1;
3099 }
3100
3101 ram_init_bitmaps(*rsp);
3102
3103 return 0;
3104 }
3105
3106 static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
3107 {
3108 RAMBlock *block;
3109 uint64_t pages = 0;
3110
3111 /*
3112 * Postcopy is not using xbzrle/compression, so no need for that.
3113 * Also, since source are already halted, we don't need to care
3114 * about dirty page logging as well.
3115 */
3116
3117 RAMBLOCK_FOREACH_MIGRATABLE(block) {
3118 pages += bitmap_count_one(block->bmap,
3119 block->used_length >> TARGET_PAGE_BITS);
3120 }
3121
3122 /* This may not be aligned with current bitmaps. Recalculate. */
3123 rs->migration_dirty_pages = pages;
3124
3125 rs->last_seen_block = NULL;
3126 rs->last_sent_block = NULL;
3127 rs->last_page = 0;
3128 rs->last_version = ram_list.version;
3129 /*
3130 * Disable the bulk stage, otherwise we'll resend the whole RAM no
3131 * matter what we have sent.
3132 */
3133 rs->ram_bulk_stage = false;
3134
3135 /* Update RAMState cache of output QEMUFile */
3136 rs->f = out;
3137
3138 trace_ram_state_resume_prepare(pages);
3139 }
3140
3141 /*
3142 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
3143 * long-running RCU critical section. When rcu-reclaims in the code
3144 * start to become numerous it will be necessary to reduce the
3145 * granularity of these critical sections.
3146 */
3147
3148 /**
3149 * ram_save_setup: Setup RAM for migration
3150 *
3151 * Returns zero to indicate success and negative for error
3152 *
3153 * @f: QEMUFile where to send the data
3154 * @opaque: RAMState pointer
3155 */
3156 static int ram_save_setup(QEMUFile *f, void *opaque)
3157 {
3158 RAMState **rsp = opaque;
3159 RAMBlock *block;
3160
3161 if (compress_threads_save_setup()) {
3162 return -1;
3163 }
3164
3165 /* migration has already setup the bitmap, reuse it. */
3166 if (!migration_in_colo_state()) {
3167 if (ram_init_all(rsp) != 0) {
3168 compress_threads_save_cleanup();
3169 return -1;
3170 }
3171 }
3172 (*rsp)->f = f;
3173
3174 rcu_read_lock();
3175
3176 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
3177
3178 RAMBLOCK_FOREACH_MIGRATABLE(block) {
3179 qemu_put_byte(f, strlen(block->idstr));
3180 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
3181 qemu_put_be64(f, block->used_length);
3182 if (migrate_postcopy_ram() && block->page_size != qemu_host_page_size) {
3183 qemu_put_be64(f, block->page_size);
3184 }
3185 }
3186
3187 rcu_read_unlock();
3188
3189 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
3190 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
3191
3192 multifd_send_sync_main();
3193 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3194 qemu_fflush(f);
3195
3196 return 0;
3197 }
3198
3199 /**
3200 * ram_save_iterate: iterative stage for migration
3201 *
3202 * Returns zero to indicate success and negative for error
3203 *
3204 * @f: QEMUFile where to send the data
3205 * @opaque: RAMState pointer
3206 */
3207 static int ram_save_iterate(QEMUFile *f, void *opaque)
3208 {
3209 RAMState **temp = opaque;
3210 RAMState *rs = *temp;
3211 int ret;
3212 int i;
3213 int64_t t0;
3214 int done = 0;
3215
3216 if (blk_mig_bulk_active()) {
3217 /* Avoid transferring ram during bulk phase of block migration as
3218 * the bulk phase will usually take a long time and transferring
3219 * ram updates during that time is pointless. */
3220 goto out;
3221 }
3222
3223 rcu_read_lock();
3224 if (ram_list.version != rs->last_version) {
3225 ram_state_reset(rs);
3226 }
3227
3228 /* Read version before ram_list.blocks */
3229 smp_rmb();
3230
3231 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
3232
3233 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
3234 i = 0;
3235 while ((ret = qemu_file_rate_limit(f)) == 0 ||
3236 !QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
3237 int pages;
3238
3239 if (qemu_file_get_error(f)) {
3240 break;
3241 }
3242
3243 pages = ram_find_and_save_block(rs, false);
3244 /* no more pages to sent */
3245 if (pages == 0) {
3246 done = 1;
3247 break;
3248 }
3249
3250 if (pages < 0) {
3251 qemu_file_set_error(f, pages);
3252 break;
3253 }
3254
3255 rs->target_page_count += pages;
3256
3257 /* we want to check in the 1st loop, just in case it was the 1st time
3258 and we had to sync the dirty bitmap.
3259 qemu_get_clock_ns() is a bit expensive, so we only check each some
3260 iterations
3261 */
3262 if ((i & 63) == 0) {
3263 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
3264 if (t1 > MAX_WAIT) {
3265 trace_ram_save_iterate_big_wait(t1, i);
3266 break;
3267 }
3268 }
3269 i++;
3270 }
3271 rcu_read_unlock();
3272
3273 /*
3274 * Must occur before EOS (or any QEMUFile operation)
3275 * because of RDMA protocol.
3276 */
3277 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
3278
3279 multifd_send_sync_main();
3280 out:
3281 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3282 qemu_fflush(f);
3283 ram_counters.transferred += 8;
3284
3285 ret = qemu_file_get_error(f);
3286 if (ret < 0) {
3287 return ret;
3288 }
3289
3290 return done;
3291 }
3292
3293 /**
3294 * ram_save_complete: function called to send the remaining amount of ram
3295 *
3296 * Returns zero to indicate success or negative on error
3297 *
3298 * Called with iothread lock
3299 *
3300 * @f: QEMUFile where to send the data
3301 * @opaque: RAMState pointer
3302 */
3303 static int ram_save_complete(QEMUFile *f, void *opaque)
3304 {
3305 RAMState **temp = opaque;
3306 RAMState *rs = *temp;
3307 int ret = 0;
3308
3309 rcu_read_lock();
3310
3311 if (!migration_in_postcopy()) {
3312 migration_bitmap_sync(rs);
3313 }
3314
3315 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
3316
3317 /* try transferring iterative blocks of memory */
3318
3319 /* flush all remaining blocks regardless of rate limiting */
3320 while (true) {
3321 int pages;
3322
3323 pages = ram_find_and_save_block(rs, !migration_in_colo_state());
3324 /* no more blocks to sent */
3325 if (pages == 0) {
3326 break;
3327 }
3328 if (pages < 0) {
3329 ret = pages;
3330 break;
3331 }
3332 }
3333
3334 flush_compressed_data(rs);
3335 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
3336
3337 rcu_read_unlock();
3338
3339 multifd_send_sync_main();
3340 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3341 qemu_fflush(f);
3342
3343 return ret;
3344 }
3345
3346 static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
3347 uint64_t *res_precopy_only,
3348 uint64_t *res_compatible,
3349 uint64_t *res_postcopy_only)
3350 {
3351 RAMState **temp = opaque;
3352 RAMState *rs = *temp;
3353 uint64_t remaining_size;
3354
3355 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
3356
3357 if (!migration_in_postcopy() &&
3358 remaining_size < max_size) {
3359 qemu_mutex_lock_iothread();
3360 rcu_read_lock();
3361 migration_bitmap_sync(rs);
3362 rcu_read_unlock();
3363 qemu_mutex_unlock_iothread();
3364 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
3365 }
3366
3367 if (migrate_postcopy_ram()) {
3368 /* We can do postcopy, and all the data is postcopiable */
3369 *res_compatible += remaining_size;
3370 } else {
3371 *res_precopy_only += remaining_size;
3372 }
3373 }
3374
3375 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
3376 {
3377 unsigned int xh_len;
3378 int xh_flags;
3379 uint8_t *loaded_data;
3380
3381 /* extract RLE header */
3382 xh_flags = qemu_get_byte(f);
3383 xh_len = qemu_get_be16(f);
3384
3385 if (xh_flags != ENCODING_FLAG_XBZRLE) {
3386 error_report("Failed to load XBZRLE page - wrong compression!");
3387 return -1;
3388 }
3389
3390 if (xh_len > TARGET_PAGE_SIZE) {
3391 error_report("Failed to load XBZRLE page - len overflow!");
3392 return -1;
3393 }
3394 loaded_data = XBZRLE.decoded_buf;
3395 /* load data and decode */
3396 /* it can change loaded_data to point to an internal buffer */
3397 qemu_get_buffer_in_place(f, &loaded_data, xh_len);
3398
3399 /* decode RLE */
3400 if (xbzrle_decode_buffer(loaded_data, xh_len, host,
3401 TARGET_PAGE_SIZE) == -1) {
3402 error_report("Failed to load XBZRLE page - decode error!");
3403 return -1;
3404 }
3405
3406 return 0;
3407 }
3408
3409 /**
3410 * ram_block_from_stream: read a RAMBlock id from the migration stream
3411 *
3412 * Must be called from within a rcu critical section.
3413 *
3414 * Returns a pointer from within the RCU-protected ram_list.
3415 *
3416 * @f: QEMUFile where to read the data from
3417 * @flags: Page flags (mostly to see if it's a continuation of previous block)
3418 */
3419 static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
3420 {
3421 static RAMBlock *block = NULL;
3422 char id[256];
3423 uint8_t len;
3424
3425 if (flags & RAM_SAVE_FLAG_CONTINUE) {
3426 if (!block) {
3427 error_report("Ack, bad migration stream!");
3428 return NULL;
3429 }
3430 return block;
3431 }
3432
3433 len = qemu_get_byte(f);
3434 qemu_get_buffer(f, (uint8_t *)id, len);
3435 id[len] = 0;
3436
3437 block = qemu_ram_block_by_name(id);
3438 if (!block) {
3439 error_report("Can't find block %s", id);
3440 return NULL;
3441 }
3442
3443 if (!qemu_ram_is_migratable(block)) {
3444 error_report("block %s should not be migrated !", id);
3445 return NULL;
3446 }
3447
3448 return block;
3449 }
3450
3451 static inline void *host_from_ram_block_offset(RAMBlock *block,
3452 ram_addr_t offset)
3453 {
3454 if (!offset_in_ramblock(block, offset)) {
3455 return NULL;
3456 }
3457
3458 return block->host + offset;
3459 }
3460
3461 static inline void *colo_cache_from_block_offset(RAMBlock *block,
3462 ram_addr_t offset)
3463 {
3464 if (!offset_in_ramblock(block, offset)) {
3465 return NULL;
3466 }
3467 if (!block->colo_cache) {
3468 error_report("%s: colo_cache is NULL in block :%s",
3469 __func__, block->idstr);
3470 return NULL;
3471 }
3472
3473 /*
3474 * During colo checkpoint, we need bitmap of these migrated pages.
3475 * It help us to decide which pages in ram cache should be flushed
3476 * into VM's RAM later.
3477 */
3478 if (!test_and_set_bit(offset >> TARGET_PAGE_BITS, block->bmap)) {
3479 ram_state->migration_dirty_pages++;
3480 }
3481 return block->colo_cache + offset;
3482 }
3483
3484 /**
3485 * ram_handle_compressed: handle the zero page case
3486 *
3487 * If a page (or a whole RDMA chunk) has been
3488 * determined to be zero, then zap it.
3489 *
3490 * @host: host address for the zero page
3491 * @ch: what the page is filled from. We only support zero
3492 * @size: size of the zero page
3493 */
3494 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
3495 {
3496 if (ch != 0 || !is_zero_range(host, size)) {
3497 memset(host, ch, size);
3498 }
3499 }
3500
3501 /* return the size after decompression, or negative value on error */
3502 static int
3503 qemu_uncompress_data(z_stream *stream, uint8_t *dest, size_t dest_len,
3504 const uint8_t *source, size_t source_len)
3505 {
3506 int err;
3507
3508 err = inflateReset(stream);
3509 if (err != Z_OK) {
3510 return -1;
3511 }
3512
3513 stream->avail_in = source_len;
3514 stream->next_in = (uint8_t *)source;
3515 stream->avail_out = dest_len;
3516 stream->next_out = dest;
3517
3518 err = inflate(stream, Z_NO_FLUSH);
3519 if (err != Z_STREAM_END) {
3520 return -1;
3521 }
3522
3523 return stream->total_out;
3524 }
3525
3526 static void *do_data_decompress(void *opaque)
3527 {
3528 DecompressParam *param = opaque;
3529 unsigned long pagesize;
3530 uint8_t *des;
3531 int len, ret;
3532
3533 qemu_mutex_lock(&param->mutex);
3534 while (!param->quit) {
3535 if (param->des) {
3536 des = param->des;
3537 len = param->len;
3538 param->des = 0;
3539 qemu_mutex_unlock(&param->mutex);
3540
3541 pagesize = TARGET_PAGE_SIZE;
3542
3543 ret = qemu_uncompress_data(&param->stream, des, pagesize,
3544 param->compbuf, len);
3545 if (ret < 0 && migrate_get_current()->decompress_error_check) {
3546 error_report("decompress data failed");
3547 qemu_file_set_error(decomp_file, ret);
3548 }
3549
3550 qemu_mutex_lock(&decomp_done_lock);
3551 param->done = true;
3552 qemu_cond_signal(&decomp_done_cond);
3553 qemu_mutex_unlock(&decomp_done_lock);
3554
3555 qemu_mutex_lock(&param->mutex);
3556 } else {
3557 qemu_cond_wait(&param->cond, &param->mutex);
3558 }
3559 }
3560 qemu_mutex_unlock(&param->mutex);
3561
3562 return NULL;
3563 }
3564
3565 static int wait_for_decompress_done(void)
3566 {
3567 int idx, thread_count;
3568
3569 if (!migrate_use_compression()) {
3570 return 0;
3571 }
3572
3573 thread_count = migrate_decompress_threads();
3574 qemu_mutex_lock(&decomp_done_lock);
3575 for (idx = 0; idx < thread_count; idx++) {
3576 while (!decomp_param[idx].done) {
3577 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
3578 }
3579 }
3580 qemu_mutex_unlock(&decomp_done_lock);
3581 return qemu_file_get_error(decomp_file);
3582 }
3583
3584 static void compress_threads_load_cleanup(void)
3585 {
3586 int i, thread_count;
3587
3588 if (!migrate_use_compression()) {
3589 return;
3590 }
3591 thread_count = migrate_decompress_threads();
3592 for (i = 0; i < thread_count; i++) {
3593 /*
3594 * we use it as a indicator which shows if the thread is
3595 * properly init'd or not
3596 */
3597 if (!decomp_param[i].compbuf) {
3598 break;
3599 }
3600
3601 qemu_mutex_lock(&decomp_param[i].mutex);
3602 decomp_param[i].quit = true;
3603 qemu_cond_signal(&decomp_param[i].cond);
3604 qemu_mutex_unlock(&decomp_param[i].mutex);
3605 }
3606 for (i = 0; i < thread_count; i++) {
3607 if (!decomp_param[i].compbuf) {
3608 break;
3609 }
3610
3611 qemu_thread_join(decompress_threads + i);
3612 qemu_mutex_destroy(&decomp_param[i].mutex);
3613 qemu_cond_destroy(&decomp_param[i].cond);
3614 inflateEnd(&decomp_param[i].stream);
3615 g_free(decomp_param[i].compbuf);
3616 decomp_param[i].compbuf = NULL;
3617 }
3618 g_free(decompress_threads);
3619 g_free(decomp_param);
3620 decompress_threads = NULL;
3621 decomp_param = NULL;
3622 decomp_file = NULL;
3623 }
3624
3625 static int compress_threads_load_setup(QEMUFile *f)
3626 {
3627 int i, thread_count;
3628
3629 if (!migrate_use_compression()) {
3630 return 0;
3631 }
3632
3633 thread_count = migrate_decompress_threads();
3634 decompress_threads = g_new0(QemuThread, thread_count);
3635 decomp_param = g_new0(DecompressParam, thread_count);
3636 qemu_mutex_init(&decomp_done_lock);
3637 qemu_cond_init(&decomp_done_cond);
3638 decomp_file = f;
3639 for (i = 0; i < thread_count; i++) {
3640 if (inflateInit(&decomp_param[i].stream) != Z_OK) {
3641 goto exit;
3642 }
3643
3644 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
3645 qemu_mutex_init(&decomp_param[i].mutex);
3646 qemu_cond_init(&decomp_param[i].cond);
3647 decomp_param[i].done = true;
3648 decomp_param[i].quit = false;
3649 qemu_thread_create(decompress_threads + i, "decompress",
3650 do_data_decompress, decomp_param + i,
3651 QEMU_THREAD_JOINABLE);
3652 }
3653 return 0;
3654 exit:
3655 compress_threads_load_cleanup();
3656 return -1;
3657 }
3658
3659 static void decompress_data_with_multi_threads(QEMUFile *f,
3660 void *host, int len)
3661 {
3662 int idx, thread_count;
3663
3664 thread_count = migrate_decompress_threads();
3665 qemu_mutex_lock(&decomp_done_lock);
3666 while (true) {
3667 for (idx = 0; idx < thread_count; idx++) {
3668 if (decomp_param[idx].done) {
3669 decomp_param[idx].done = false;
3670 qemu_mutex_lock(&decomp_param[idx].mutex);
3671 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
3672 decomp_param[idx].des = host;
3673 decomp_param[idx].len = len;
3674 qemu_cond_signal(&decomp_param[idx].cond);
3675 qemu_mutex_unlock(&decomp_param[idx].mutex);
3676 break;
3677 }
3678 }
3679 if (idx < thread_count) {
3680 break;
3681 } else {
3682 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
3683 }
3684 }
3685 qemu_mutex_unlock(&decomp_done_lock);
3686 }
3687
3688 /*
3689 * colo cache: this is for secondary VM, we cache the whole
3690 * memory of the secondary VM, it is need to hold the global lock
3691 * to call this helper.
3692 */
3693 int colo_init_ram_cache(void)
3694 {
3695 RAMBlock *block;
3696
3697 rcu_read_lock();
3698 RAMBLOCK_FOREACH_MIGRATABLE(block) {
3699 block->colo_cache = qemu_anon_ram_alloc(block->used_length,
3700 NULL,
3701 false);
3702 if (!block->colo_cache) {
3703 error_report("%s: Can't alloc memory for COLO cache of block %s,"
3704 "size 0x" RAM_ADDR_FMT, __func__, block->idstr,
3705 block->used_length);
3706 goto out_locked;
3707 }
3708 memcpy(block->colo_cache, block->host, block->used_length);
3709 }
3710 rcu_read_unlock();
3711 /*
3712 * Record the dirty pages that sent by PVM, we use this dirty bitmap together
3713 * with to decide which page in cache should be flushed into SVM's RAM. Here
3714 * we use the same name 'ram_bitmap' as for migration.
3715 */
3716 if (ram_bytes_total()) {
3717 RAMBlock *block;
3718
3719 RAMBLOCK_FOREACH_MIGRATABLE(block) {
3720 unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
3721
3722 block->bmap = bitmap_new(pages);
3723 bitmap_set(block->bmap, 0, pages);
3724 }
3725 }
3726 ram_state = g_new0(RAMState, 1);
3727 ram_state->migration_dirty_pages = 0;
3728 memory_global_dirty_log_start();
3729
3730 return 0;
3731
3732 out_locked:
3733
3734 RAMBLOCK_FOREACH_MIGRATABLE(block) {
3735 if (block->colo_cache) {
3736 qemu_anon_ram_free(block->colo_cache, block->used_length);
3737 block->colo_cache = NULL;
3738 }
3739 }
3740
3741 rcu_read_unlock();
3742 return -errno;
3743 }
3744
3745 /* It is need to hold the global lock to call this helper */
3746 void colo_release_ram_cache(void)
3747 {
3748 RAMBlock *block;
3749
3750 memory_global_dirty_log_stop();
3751 RAMBLOCK_FOREACH_MIGRATABLE(block) {
3752 g_free(block->bmap);
3753 block->bmap = NULL;
3754 }
3755
3756 rcu_read_lock();
3757
3758 RAMBLOCK_FOREACH_MIGRATABLE(block) {
3759 if (block->colo_cache) {
3760 qemu_anon_ram_free(block->colo_cache, block->used_length);
3761 block->colo_cache = NULL;
3762 }
3763 }
3764
3765 rcu_read_unlock();
3766 g_free(ram_state);
3767 ram_state = NULL;
3768 }
3769
3770 /**
3771 * ram_load_setup: Setup RAM for migration incoming side
3772 *
3773 * Returns zero to indicate success and negative for error
3774 *
3775 * @f: QEMUFile where to receive the data
3776 * @opaque: RAMState pointer
3777 */
3778 static int ram_load_setup(QEMUFile *f, void *opaque)
3779 {
3780 if (compress_threads_load_setup(f)) {
3781 return -1;
3782 }
3783
3784 xbzrle_load_setup();
3785 ramblock_recv_map_init();
3786
3787 return 0;
3788 }
3789
3790 static int ram_load_cleanup(void *opaque)
3791 {
3792 RAMBlock *rb;
3793
3794 RAMBLOCK_FOREACH_MIGRATABLE(rb) {
3795 if (ramblock_is_pmem(rb)) {
3796 pmem_persist(rb->host, rb->used_length);
3797 }
3798 }
3799
3800 xbzrle_load_cleanup();
3801 compress_threads_load_cleanup();
3802
3803 RAMBLOCK_FOREACH_MIGRATABLE(rb) {
3804 g_free(rb->receivedmap);
3805 rb->receivedmap = NULL;
3806 }
3807
3808 return 0;
3809 }
3810
3811 /**
3812 * ram_postcopy_incoming_init: allocate postcopy data structures
3813 *
3814 * Returns 0 for success and negative if there was one error
3815 *
3816 * @mis: current migration incoming state
3817 *
3818 * Allocate data structures etc needed by incoming migration with
3819 * postcopy-ram. postcopy-ram's similarly names
3820 * postcopy_ram_incoming_init does the work.
3821 */
3822 int ram_postcopy_incoming_init(MigrationIncomingState *mis)
3823 {
3824 return postcopy_ram_incoming_init(mis);
3825 }
3826
3827 /**
3828 * ram_load_postcopy: load a page in postcopy case
3829 *
3830 * Returns 0 for success or -errno in case of error
3831 *
3832 * Called in postcopy mode by ram_load().
3833 * rcu_read_lock is taken prior to this being called.
3834 *
3835 * @f: QEMUFile where to send the data
3836 */
3837 static int ram_load_postcopy(QEMUFile *f)
3838 {
3839 int flags = 0, ret = 0;
3840 bool place_needed = false;
3841 bool matches_target_page_size = false;
3842 MigrationIncomingState *mis = migration_incoming_get_current();
3843 /* Temporary page that is later 'placed' */
3844 void *postcopy_host_page = postcopy_get_tmp_page(mis);
3845 void *last_host = NULL;
3846 bool all_zero = false;
3847
3848 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
3849 ram_addr_t addr;
3850 void *host = NULL;
3851 void *page_buffer = NULL;
3852 void *place_source = NULL;
3853 RAMBlock *block = NULL;
3854 uint8_t ch;
3855
3856 addr = qemu_get_be64(f);
3857
3858 /*
3859 * If qemu file error, we should stop here, and then "addr"
3860 * may be invalid
3861 */
3862 ret = qemu_file_get_error(f);
3863 if (ret) {
3864 break;
3865 }
3866
3867 flags = addr & ~TARGET_PAGE_MASK;
3868 addr &= TARGET_PAGE_MASK;
3869
3870 trace_ram_load_postcopy_loop((uint64_t)addr, flags);
3871 place_needed = false;
3872 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) {
3873 block = ram_block_from_stream(f, flags);
3874
3875 host = host_from_ram_block_offset(block, addr);
3876 if (!host) {
3877 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
3878 ret = -EINVAL;
3879 break;
3880 }
3881 matches_target_page_size = block->page_size == TARGET_PAGE_SIZE;
3882 /*
3883 * Postcopy requires that we place whole host pages atomically;
3884 * these may be huge pages for RAMBlocks that are backed by
3885 * hugetlbfs.
3886 * To make it atomic, the data is read into a temporary page
3887 * that's moved into place later.
3888 * The migration protocol uses, possibly smaller, target-pages
3889 * however the source ensures it always sends all the components
3890 * of a host page in order.
3891 */
3892 page_buffer = postcopy_host_page +
3893 ((uintptr_t)host & (block->page_size - 1));
3894 /* If all TP are zero then we can optimise the place */
3895 if (!((uintptr_t)host & (block->page_size - 1))) {
3896 all_zero = true;
3897 } else {
3898 /* not the 1st TP within the HP */
3899 if (host != (last_host + TARGET_PAGE_SIZE)) {
3900 error_report("Non-sequential target page %p/%p",
3901 host, last_host);
3902 ret = -EINVAL;
3903 break;
3904 }
3905 }
3906
3907
3908 /*
3909 * If it's the last part of a host page then we place the host
3910 * page
3911 */
3912 place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
3913 (block->page_size - 1)) == 0;
3914 place_source = postcopy_host_page;
3915 }
3916 last_host = host;
3917
3918 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
3919 case RAM_SAVE_FLAG_ZERO:
3920 ch = qemu_get_byte(f);
3921 memset(page_buffer, ch, TARGET_PAGE_SIZE);
3922 if (ch) {
3923 all_zero = false;
3924 }
3925 break;
3926
3927 case RAM_SAVE_FLAG_PAGE:
3928 all_zero = false;
3929 if (!matches_target_page_size) {
3930 /* For huge pages, we always use temporary buffer */
3931 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
3932 } else {
3933 /*
3934 * For small pages that matches target page size, we
3935 * avoid the qemu_file copy. Instead we directly use
3936 * the buffer of QEMUFile to place the page. Note: we
3937 * cannot do any QEMUFile operation before using that
3938 * buffer to make sure the buffer is valid when
3939 * placing the page.
3940 */
3941 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
3942 TARGET_PAGE_SIZE);
3943 }
3944 break;
3945 case RAM_SAVE_FLAG_EOS:
3946 /* normal exit */
3947 multifd_recv_sync_main();
3948 break;
3949 default:
3950 error_report("Unknown combination of migration flags: %#x"
3951 " (postcopy mode)", flags);
3952 ret = -EINVAL;
3953 break;
3954 }
3955
3956 /* Detect for any possible file errors */
3957 if (!ret && qemu_file_get_error(f)) {
3958 ret = qemu_file_get_error(f);
3959 }
3960
3961 if (!ret && place_needed) {
3962 /* This gets called at the last target page in the host page */
3963 void *place_dest = host + TARGET_PAGE_SIZE - block->page_size;
3964
3965 if (all_zero) {
3966 ret = postcopy_place_page_zero(mis, place_dest,
3967 block);
3968 } else {
3969 ret = postcopy_place_page(mis, place_dest,
3970 place_source, block);
3971 }
3972 }
3973 }
3974
3975 return ret;
3976 }
3977
3978 static bool postcopy_is_advised(void)
3979 {
3980 PostcopyState ps = postcopy_state_get();
3981 return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
3982 }
3983
3984 static bool postcopy_is_running(void)
3985 {
3986 PostcopyState ps = postcopy_state_get();
3987 return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END;
3988 }
3989
3990 /*
3991 * Flush content of RAM cache into SVM's memory.
3992 * Only flush the pages that be dirtied by PVM or SVM or both.
3993 */
3994 static void colo_flush_ram_cache(void)
3995 {
3996 RAMBlock *block = NULL;
3997 void *dst_host;
3998 void *src_host;
3999 unsigned long offset = 0;
4000
4001 memory_global_dirty_log_sync();
4002 rcu_read_lock();
4003 RAMBLOCK_FOREACH_MIGRATABLE(block) {
4004 migration_bitmap_sync_range(ram_state, block, 0, block->used_length);
4005 }
4006 rcu_read_unlock();
4007
4008 trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages);
4009 rcu_read_lock();
4010 block = QLIST_FIRST_RCU(&ram_list.blocks);
4011
4012 while (block) {
4013 offset = migration_bitmap_find_dirty(ram_state, block, offset);
4014
4015 if (offset << TARGET_PAGE_BITS >= block->used_length) {
4016 offset = 0;
4017 block = QLIST_NEXT_RCU(block, next);
4018 } else {
4019 migration_bitmap_clear_dirty(ram_state, block, offset);
4020 dst_host = block->host + (offset << TARGET_PAGE_BITS);
4021 src_host = block->colo_cache + (offset << TARGET_PAGE_BITS);
4022 memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
4023 }
4024 }
4025
4026 rcu_read_unlock();
4027 trace_colo_flush_ram_cache_end();
4028 }
4029
4030 static int ram_load(QEMUFile *f, void *opaque, int version_id)
4031 {
4032 int flags = 0, ret = 0, invalid_flags = 0;
4033 static uint64_t seq_iter;
4034 int len = 0;
4035 /*
4036 * If system is running in postcopy mode, page inserts to host memory must
4037 * be atomic
4038 */
4039 bool postcopy_running = postcopy_is_running();
4040 /* ADVISE is earlier, it shows the source has the postcopy capability on */
4041 bool postcopy_advised = postcopy_is_advised();
4042
4043 seq_iter++;
4044
4045 if (version_id != 4) {
4046 ret = -EINVAL;
4047 }
4048
4049 if (!migrate_use_compression()) {
4050 invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
4051 }
4052 /* This RCU critical section can be very long running.
4053 * When RCU reclaims in the code start to become numerous,
4054 * it will be necessary to reduce the granularity of this
4055 * critical section.
4056 */
4057 rcu_read_lock();
4058
4059 if (postcopy_running) {
4060 ret = ram_load_postcopy(f);
4061 }
4062
4063 while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
4064 ram_addr_t addr, total_ram_bytes;
4065 void *host = NULL;
4066 uint8_t ch;
4067
4068 addr = qemu_get_be64(f);
4069 flags = addr & ~TARGET_PAGE_MASK;
4070 addr &= TARGET_PAGE_MASK;
4071
4072 if (flags & invalid_flags) {
4073 if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
4074 error_report("Received an unexpected compressed page");
4075 }
4076
4077 ret = -EINVAL;
4078 break;
4079 }
4080
4081 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
4082 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
4083 RAMBlock *block = ram_block_from_stream(f, flags);
4084
4085 /*
4086 * After going into COLO, we should load the Page into colo_cache.
4087 */
4088 if (migration_incoming_in_colo_state()) {
4089 host = colo_cache_from_block_offset(block, addr);
4090 } else {
4091 host = host_from_ram_block_offset(block, addr);
4092 }
4093 if (!host) {
4094 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
4095 ret = -EINVAL;
4096 break;
4097 }
4098
4099 if (!migration_incoming_in_colo_state()) {
4100 ramblock_recv_bitmap_set(block, host);
4101 }
4102
4103 trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
4104 }
4105
4106 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
4107 case RAM_SAVE_FLAG_MEM_SIZE:
4108 /* Synchronize RAM block list */
4109 total_ram_bytes = addr;
4110 while (!ret && total_ram_bytes) {
4111 RAMBlock *block;
4112 char id[256];
4113 ram_addr_t length;
4114
4115 len = qemu_get_byte(f);
4116 qemu_get_buffer(f, (uint8_t *)id, len);
4117 id[len] = 0;
4118 length = qemu_get_be64(f);
4119
4120 block = qemu_ram_block_by_name(id);
4121 if (block && !qemu_ram_is_migratable(block)) {
4122 error_report("block %s should not be migrated !", id);
4123 ret = -EINVAL;
4124 } else if (block) {
4125 if (length != block->used_length) {
4126 Error *local_err = NULL;
4127
4128 ret = qemu_ram_resize(block, length,
4129 &local_err);
4130 if (local_err) {
4131 error_report_err(local_err);
4132 }
4133 }
4134 /* For postcopy we need to check hugepage sizes match */
4135 if (postcopy_advised &&
4136 block->page_size != qemu_host_page_size) {
4137 uint64_t remote_page_size = qemu_get_be64(f);
4138 if (remote_page_size != block->page_size) {
4139 error_report("Mismatched RAM page size %s "
4140 "(local) %zd != %" PRId64,
4141 id, block->page_size,
4142 remote_page_size);
4143 ret = -EINVAL;
4144 }
4145 }
4146 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
4147 block->idstr);
4148 } else {
4149 error_report("Unknown ramblock \"%s\", cannot "
4150 "accept migration", id);
4151 ret = -EINVAL;
4152 }
4153
4154 total_ram_bytes -= length;
4155 }
4156 break;
4157
4158 case RAM_SAVE_FLAG_ZERO:
4159 ch = qemu_get_byte(f);
4160 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
4161 break;
4162
4163 case RAM_SAVE_FLAG_PAGE:
4164 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
4165 break;
4166
4167 case RAM_SAVE_FLAG_COMPRESS_PAGE:
4168 len = qemu_get_be32(f);
4169 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
4170 error_report("Invalid compressed data length: %d", len);
4171 ret = -EINVAL;
4172 break;
4173 }
4174 decompress_data_with_multi_threads(f, host, len);
4175 break;
4176
4177 case RAM_SAVE_FLAG_XBZRLE:
4178 if (load_xbzrle(f, addr, host) < 0) {
4179 error_report("Failed to decompress XBZRLE page at "
4180 RAM_ADDR_FMT, addr);
4181 ret = -EINVAL;
4182 break;
4183 }
4184 break;
4185 case RAM_SAVE_FLAG_EOS:
4186 /* normal exit */
4187 multifd_recv_sync_main();
4188 break;
4189 default:
4190 if (flags & RAM_SAVE_FLAG_HOOK) {
4191 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
4192 } else {
4193 error_report("Unknown combination of migration flags: %#x",
4194 flags);
4195 ret = -EINVAL;
4196 }
4197 }
4198 if (!ret) {
4199 ret = qemu_file_get_error(f);
4200 }
4201 }
4202
4203 ret |= wait_for_decompress_done();
4204 rcu_read_unlock();
4205 trace_ram_load_complete(ret, seq_iter);
4206
4207 if (!ret && migration_incoming_in_colo_state()) {
4208 colo_flush_ram_cache();
4209 }
4210 return ret;
4211 }
4212
4213 static bool ram_has_postcopy(void *opaque)
4214 {
4215 RAMBlock *rb;
4216 RAMBLOCK_FOREACH_MIGRATABLE(rb) {
4217 if (ramblock_is_pmem(rb)) {
4218 info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4219 "is not supported now!", rb->idstr, rb->host);
4220 return false;
4221 }
4222 }
4223
4224 return migrate_postcopy_ram();
4225 }
4226
4227 /* Sync all the dirty bitmap with destination VM. */
4228 static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
4229 {
4230 RAMBlock *block;
4231 QEMUFile *file = s->to_dst_file;
4232 int ramblock_count = 0;
4233
4234 trace_ram_dirty_bitmap_sync_start();
4235
4236 RAMBLOCK_FOREACH_MIGRATABLE(block) {
4237 qemu_savevm_send_recv_bitmap(file, block->idstr);
4238 trace_ram_dirty_bitmap_request(block->idstr);
4239 ramblock_count++;
4240 }
4241
4242 trace_ram_dirty_bitmap_sync_wait();
4243
4244 /* Wait until all the ramblocks' dirty bitmap synced */
4245 while (ramblock_count--) {
4246 qemu_sem_wait(&s->rp_state.rp_sem);
4247 }
4248
4249 trace_ram_dirty_bitmap_sync_complete();
4250
4251 return 0;
4252 }
4253
4254 static void ram_dirty_bitmap_reload_notify(MigrationState *s)
4255 {
4256 qemu_sem_post(&s->rp_state.rp_sem);
4257 }
4258
4259 /*
4260 * Read the received bitmap, revert it as the initial dirty bitmap.
4261 * This is only used when the postcopy migration is paused but wants
4262 * to resume from a middle point.
4263 */
4264 int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
4265 {
4266 int ret = -EINVAL;
4267 QEMUFile *file = s->rp_state.from_dst_file;
4268 unsigned long *le_bitmap, nbits = block->used_length >> TARGET_PAGE_BITS;
4269 uint64_t local_size = DIV_ROUND_UP(nbits, 8);
4270 uint64_t size, end_mark;
4271
4272 trace_ram_dirty_bitmap_reload_begin(block->idstr);
4273
4274 if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
4275 error_report("%s: incorrect state %s", __func__,
4276 MigrationStatus_str(s->state));
4277 return -EINVAL;
4278 }
4279
4280 /*
4281 * Note: see comments in ramblock_recv_bitmap_send() on why we
4282 * need the endianess convertion, and the paddings.
4283 */
4284 local_size = ROUND_UP(local_size, 8);
4285
4286 /* Add paddings */
4287 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
4288
4289 size = qemu_get_be64(file);
4290
4291 /* The size of the bitmap should match with our ramblock */
4292 if (size != local_size) {
4293 error_report("%s: ramblock '%s' bitmap size mismatch "
4294 "(0x%"PRIx64" != 0x%"PRIx64")", __func__,
4295 block->idstr, size, local_size);
4296 ret = -EINVAL;
4297 goto out;
4298 }
4299
4300 size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size);
4301 end_mark = qemu_get_be64(file);
4302
4303 ret = qemu_file_get_error(file);
4304 if (ret || size != local_size) {
4305 error_report("%s: read bitmap failed for ramblock '%s': %d"
4306 " (size 0x%"PRIx64", got: 0x%"PRIx64")",
4307 __func__, block->idstr, ret, local_size, size);
4308 ret = -EIO;
4309 goto out;
4310 }
4311
4312 if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) {
4313 error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIu64,
4314 __func__, block->idstr, end_mark);
4315 ret = -EINVAL;
4316 goto out;
4317 }
4318
4319 /*
4320 * Endianess convertion. We are during postcopy (though paused).
4321 * The dirty bitmap won't change. We can directly modify it.
4322 */
4323 bitmap_from_le(block->bmap, le_bitmap, nbits);
4324
4325 /*
4326 * What we received is "received bitmap". Revert it as the initial
4327 * dirty bitmap for this ramblock.
4328 */
4329 bitmap_complement(block->bmap, block->bmap, nbits);
4330
4331 trace_ram_dirty_bitmap_reload_complete(block->idstr);
4332
4333 /*
4334 * We succeeded to sync bitmap for current ramblock. If this is
4335 * the last one to sync, we need to notify the main send thread.
4336 */
4337 ram_dirty_bitmap_reload_notify(s);
4338
4339 ret = 0;
4340 out:
4341 g_free(le_bitmap);
4342 return ret;
4343 }
4344
4345 static int ram_resume_prepare(MigrationState *s, void *opaque)
4346 {
4347 RAMState *rs = *(RAMState **)opaque;
4348 int ret;
4349
4350 ret = ram_dirty_bitmap_sync_all(s, rs);
4351 if (ret) {
4352 return ret;
4353 }
4354
4355 ram_state_resume_prepare(rs, s->to_dst_file);
4356
4357 return 0;
4358 }
4359
4360 static SaveVMHandlers savevm_ram_handlers = {
4361 .save_setup = ram_save_setup,
4362 .save_live_iterate = ram_save_iterate,
4363 .save_live_complete_postcopy = ram_save_complete,
4364 .save_live_complete_precopy = ram_save_complete,
4365 .has_postcopy = ram_has_postcopy,
4366 .save_live_pending = ram_save_pending,
4367 .load_state = ram_load,
4368 .save_cleanup = ram_save_cleanup,
4369 .load_setup = ram_load_setup,
4370 .load_cleanup = ram_load_cleanup,
4371 .resume_prepare = ram_resume_prepare,
4372 };
4373
4374 void ram_mig_init(void)
4375 {
4376 qemu_mutex_init(&XBZRLE.lock);
4377 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, &ram_state);
4378 }