]> git.proxmox.com Git - mirror_qemu.git/blame - migration/ram.c
multifd: Create new next_packet_size field
[mirror_qemu.git] / migration / ram.c
CommitLineData
56e93d26
JQ
1/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
76cc7b58
JQ
5 * Copyright (c) 2011-2015 Red Hat Inc
6 *
7 * Authors:
8 * Juan Quintela <quintela@redhat.com>
56e93d26
JQ
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 */
e688df6b 28
1393a485 29#include "qemu/osdep.h"
33c11879 30#include "cpu.h"
56e93d26 31#include <zlib.h>
f348b6d1 32#include "qemu/cutils.h"
56e93d26
JQ
33#include "qemu/bitops.h"
34#include "qemu/bitmap.h"
7205c9ec 35#include "qemu/main-loop.h"
56eb90af 36#include "qemu/pmem.h"
709e3fe8 37#include "xbzrle.h"
7b1e1a22 38#include "ram.h"
6666c96a 39#include "migration.h"
71bb07db 40#include "socket.h"
f2a8f0a6 41#include "migration/register.h"
7b1e1a22 42#include "migration/misc.h"
08a0aee1 43#include "qemu-file.h"
be07b0ac 44#include "postcopy-ram.h"
53d37d36 45#include "page_cache.h"
56e93d26 46#include "qemu/error-report.h"
e688df6b 47#include "qapi/error.h"
9af23989 48#include "qapi/qapi-events-migration.h"
8acabf69 49#include "qapi/qmp/qerror.h"
56e93d26 50#include "trace.h"
56e93d26 51#include "exec/ram_addr.h"
f9494614 52#include "exec/target_page.h"
56e93d26 53#include "qemu/rcu_queue.h"
a91246c9 54#include "migration/colo.h"
53d37d36 55#include "block.h"
af8b7d2b
JQ
56#include "sysemu/sysemu.h"
57#include "qemu/uuid.h"
edd090c7 58#include "savevm.h"
b9ee2f7d 59#include "qemu/iov.h"
56e93d26 60
56e93d26
JQ
61/***********************************************************/
62/* ram save/restore */
63
bb890ed5
JQ
64/* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
65 * worked for pages that where filled with the same char. We switched
66 * it to only search for the zero value. And to avoid confusion with
67 * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
68 */
69
56e93d26 70#define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
bb890ed5 71#define RAM_SAVE_FLAG_ZERO 0x02
56e93d26
JQ
72#define RAM_SAVE_FLAG_MEM_SIZE 0x04
73#define RAM_SAVE_FLAG_PAGE 0x08
74#define RAM_SAVE_FLAG_EOS 0x10
75#define RAM_SAVE_FLAG_CONTINUE 0x20
76#define RAM_SAVE_FLAG_XBZRLE 0x40
77/* 0x80 is reserved in migration.h start with 0x100 next */
78#define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
79
56e93d26
JQ
80static inline bool is_zero_range(uint8_t *p, uint64_t size)
81{
a1febc49 82 return buffer_is_zero(p, size);
56e93d26
JQ
83}
84
9360447d
JQ
85XBZRLECacheStats xbzrle_counters;
86
56e93d26
JQ
87/* struct contains XBZRLE cache and a static page
88 used by the compression */
89static struct {
90 /* buffer used for XBZRLE encoding */
91 uint8_t *encoded_buf;
92 /* buffer for storing page content */
93 uint8_t *current_buf;
94 /* Cache for XBZRLE, Protected by lock. */
95 PageCache *cache;
96 QemuMutex lock;
c00e0928
JQ
97 /* it will store a page full of zeros */
98 uint8_t *zero_target_page;
f265e0e4
JQ
99 /* buffer used for XBZRLE decoding */
100 uint8_t *decoded_buf;
56e93d26
JQ
101} XBZRLE;
102
56e93d26
JQ
103static void XBZRLE_cache_lock(void)
104{
105 if (migrate_use_xbzrle())
106 qemu_mutex_lock(&XBZRLE.lock);
107}
108
109static void XBZRLE_cache_unlock(void)
110{
111 if (migrate_use_xbzrle())
112 qemu_mutex_unlock(&XBZRLE.lock);
113}
114
3d0684b2
JQ
115/**
116 * xbzrle_cache_resize: resize the xbzrle cache
117 *
118 * This function is called from qmp_migrate_set_cache_size in main
119 * thread, possibly while a migration is in progress. A running
120 * migration may be using the cache and might finish during this call,
121 * hence changes to the cache are protected by XBZRLE.lock().
122 *
c9dede2d 123 * Returns 0 for success or -1 for error
3d0684b2
JQ
124 *
125 * @new_size: new cache size
8acabf69 126 * @errp: set *errp if the check failed, with reason
56e93d26 127 */
c9dede2d 128int xbzrle_cache_resize(int64_t new_size, Error **errp)
56e93d26
JQ
129{
130 PageCache *new_cache;
c9dede2d 131 int64_t ret = 0;
56e93d26 132
8acabf69
JQ
133 /* Check for truncation */
134 if (new_size != (size_t)new_size) {
135 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
136 "exceeding address space");
137 return -1;
138 }
139
2a313e5c
JQ
140 if (new_size == migrate_xbzrle_cache_size()) {
141 /* nothing to do */
c9dede2d 142 return 0;
2a313e5c
JQ
143 }
144
56e93d26
JQ
145 XBZRLE_cache_lock();
146
147 if (XBZRLE.cache != NULL) {
80f8dfde 148 new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp);
56e93d26 149 if (!new_cache) {
56e93d26
JQ
150 ret = -1;
151 goto out;
152 }
153
154 cache_fini(XBZRLE.cache);
155 XBZRLE.cache = new_cache;
156 }
56e93d26
JQ
157out:
158 XBZRLE_cache_unlock();
159 return ret;
160}
161
fbd162e6
YK
162static bool ramblock_is_ignored(RAMBlock *block)
163{
164 return !qemu_ram_is_migratable(block) ||
165 (migrate_ignore_shared() && qemu_ram_is_shared(block));
166}
167
b895de50 168/* Should be holding either ram_list.mutex, or the RCU lock. */
fbd162e6
YK
169#define RAMBLOCK_FOREACH_NOT_IGNORED(block) \
170 INTERNAL_RAMBLOCK_FOREACH(block) \
171 if (ramblock_is_ignored(block)) {} else
172
b895de50 173#define RAMBLOCK_FOREACH_MIGRATABLE(block) \
343f632c 174 INTERNAL_RAMBLOCK_FOREACH(block) \
b895de50
CLG
175 if (!qemu_ram_is_migratable(block)) {} else
176
343f632c
DDAG
177#undef RAMBLOCK_FOREACH
178
fbd162e6
YK
179int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque)
180{
181 RAMBlock *block;
182 int ret = 0;
183
184 rcu_read_lock();
185 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
186 ret = func(block, opaque);
187 if (ret) {
188 break;
189 }
190 }
191 rcu_read_unlock();
192 return ret;
193}
194
f9494614
AP
195static void ramblock_recv_map_init(void)
196{
197 RAMBlock *rb;
198
fbd162e6 199 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
f9494614
AP
200 assert(!rb->receivedmap);
201 rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
202 }
203}
204
205int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr)
206{
207 return test_bit(ramblock_recv_bitmap_offset(host_addr, rb),
208 rb->receivedmap);
209}
210
1cba9f6e
DDAG
211bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset)
212{
213 return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap);
214}
215
f9494614
AP
216void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr)
217{
218 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap);
219}
220
221void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
222 size_t nr)
223{
224 bitmap_set_atomic(rb->receivedmap,
225 ramblock_recv_bitmap_offset(host_addr, rb),
226 nr);
227}
228
a335debb
PX
229#define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
230
231/*
232 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
233 *
234 * Returns >0 if success with sent bytes, or <0 if error.
235 */
236int64_t ramblock_recv_bitmap_send(QEMUFile *file,
237 const char *block_name)
238{
239 RAMBlock *block = qemu_ram_block_by_name(block_name);
240 unsigned long *le_bitmap, nbits;
241 uint64_t size;
242
243 if (!block) {
244 error_report("%s: invalid block name: %s", __func__, block_name);
245 return -1;
246 }
247
248 nbits = block->used_length >> TARGET_PAGE_BITS;
249
250 /*
251 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
252 * machines we may need 4 more bytes for padding (see below
253 * comment). So extend it a bit before hand.
254 */
255 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
256
257 /*
258 * Always use little endian when sending the bitmap. This is
259 * required that when source and destination VMs are not using the
260 * same endianess. (Note: big endian won't work.)
261 */
262 bitmap_to_le(le_bitmap, block->receivedmap, nbits);
263
264 /* Size of the bitmap, in bytes */
a725ef9f 265 size = DIV_ROUND_UP(nbits, 8);
a335debb
PX
266
267 /*
268 * size is always aligned to 8 bytes for 64bit machines, but it
269 * may not be true for 32bit machines. We need this padding to
270 * make sure the migration can survive even between 32bit and
271 * 64bit machines.
272 */
273 size = ROUND_UP(size, 8);
274
275 qemu_put_be64(file, size);
276 qemu_put_buffer(file, (const uint8_t *)le_bitmap, size);
277 /*
278 * Mark as an end, in case the middle part is screwed up due to
279 * some "misterious" reason.
280 */
281 qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING);
282 qemu_fflush(file);
283
bf269906 284 g_free(le_bitmap);
a335debb
PX
285
286 if (qemu_file_get_error(file)) {
287 return qemu_file_get_error(file);
288 }
289
290 return size + sizeof(size);
291}
292
ec481c6c
JQ
293/*
294 * An outstanding page request, on the source, having been received
295 * and queued
296 */
297struct RAMSrcPageRequest {
298 RAMBlock *rb;
299 hwaddr offset;
300 hwaddr len;
301
302 QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
303};
304
6f37bb8b
JQ
305/* State of RAM for migration */
306struct RAMState {
204b88b8
JQ
307 /* QEMUFile used for this migration */
308 QEMUFile *f;
6f37bb8b
JQ
309 /* Last block that we have visited searching for dirty pages */
310 RAMBlock *last_seen_block;
311 /* Last block from where we have sent data */
312 RAMBlock *last_sent_block;
269ace29
JQ
313 /* Last dirty target page we have sent */
314 ram_addr_t last_page;
6f37bb8b
JQ
315 /* last ram version we have seen */
316 uint32_t last_version;
317 /* We are in the first round */
318 bool ram_bulk_stage;
6eeb63f7
WW
319 /* The free page optimization is enabled */
320 bool fpo_enabled;
8d820d6f
JQ
321 /* How many times we have dirty too many pages */
322 int dirty_rate_high_cnt;
f664da80
JQ
323 /* these variables are used for bitmap sync */
324 /* last time we did a full bitmap_sync */
325 int64_t time_last_bitmap_sync;
eac74159 326 /* bytes transferred at start_time */
c4bdf0cf 327 uint64_t bytes_xfer_prev;
a66cd90c 328 /* number of dirty pages since start_time */
68908ed6 329 uint64_t num_dirty_pages_period;
b5833fde
JQ
330 /* xbzrle misses since the beginning of the period */
331 uint64_t xbzrle_cache_miss_prev;
76e03000
XG
332
333 /* compression statistics since the beginning of the period */
334 /* amount of count that no free thread to compress data */
335 uint64_t compress_thread_busy_prev;
336 /* amount bytes after compression */
337 uint64_t compressed_size_prev;
338 /* amount of compressed pages */
339 uint64_t compress_pages_prev;
340
be8b02ed
XG
341 /* total handled target pages at the beginning of period */
342 uint64_t target_page_count_prev;
343 /* total handled target pages since start */
344 uint64_t target_page_count;
9360447d 345 /* number of dirty bits in the bitmap */
2dfaf12e 346 uint64_t migration_dirty_pages;
386a907b 347 /* Protects modification of the bitmap and migration dirty pages */
108cfae0 348 QemuMutex bitmap_mutex;
68a098f3
JQ
349 /* The RAMBlock used in the last src_page_requests */
350 RAMBlock *last_req_rb;
ec481c6c
JQ
351 /* Queue of outstanding page requests from the destination */
352 QemuMutex src_page_req_mutex;
b58deb34 353 QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests;
6f37bb8b
JQ
354};
355typedef struct RAMState RAMState;
356
53518d94 357static RAMState *ram_state;
6f37bb8b 358
bd227060
WW
359static NotifierWithReturnList precopy_notifier_list;
360
361void precopy_infrastructure_init(void)
362{
363 notifier_with_return_list_init(&precopy_notifier_list);
364}
365
366void precopy_add_notifier(NotifierWithReturn *n)
367{
368 notifier_with_return_list_add(&precopy_notifier_list, n);
369}
370
371void precopy_remove_notifier(NotifierWithReturn *n)
372{
373 notifier_with_return_remove(n);
374}
375
376int precopy_notify(PrecopyNotifyReason reason, Error **errp)
377{
378 PrecopyNotifyData pnd;
379 pnd.reason = reason;
380 pnd.errp = errp;
381
382 return notifier_with_return_list_notify(&precopy_notifier_list, &pnd);
383}
384
6eeb63f7
WW
385void precopy_enable_free_page_optimization(void)
386{
387 if (!ram_state) {
388 return;
389 }
390
391 ram_state->fpo_enabled = true;
392}
393
9edabd4d 394uint64_t ram_bytes_remaining(void)
2f4fde93 395{
bae416e5
DDAG
396 return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
397 0;
2f4fde93
JQ
398}
399
9360447d 400MigrationStats ram_counters;
96506894 401
b8fb8cb7
DDAG
402/* used by the search for pages to send */
403struct PageSearchStatus {
404 /* Current block being searched */
405 RAMBlock *block;
a935e30f
JQ
406 /* Current page to search from */
407 unsigned long page;
b8fb8cb7
DDAG
408 /* Set once we wrap around */
409 bool complete_round;
410};
411typedef struct PageSearchStatus PageSearchStatus;
412
76e03000
XG
413CompressionStats compression_counters;
414
56e93d26 415struct CompressParam {
56e93d26 416 bool done;
90e56fb4 417 bool quit;
5e5fdcff 418 bool zero_page;
56e93d26
JQ
419 QEMUFile *file;
420 QemuMutex mutex;
421 QemuCond cond;
422 RAMBlock *block;
423 ram_addr_t offset;
34ab9e97
XG
424
425 /* internally used fields */
dcaf446e 426 z_stream stream;
34ab9e97 427 uint8_t *originbuf;
56e93d26
JQ
428};
429typedef struct CompressParam CompressParam;
430
431struct DecompressParam {
73a8912b 432 bool done;
90e56fb4 433 bool quit;
56e93d26
JQ
434 QemuMutex mutex;
435 QemuCond cond;
436 void *des;
d341d9f3 437 uint8_t *compbuf;
56e93d26 438 int len;
797ca154 439 z_stream stream;
56e93d26
JQ
440};
441typedef struct DecompressParam DecompressParam;
442
443static CompressParam *comp_param;
444static QemuThread *compress_threads;
445/* comp_done_cond is used to wake up the migration thread when
446 * one of the compression threads has finished the compression.
447 * comp_done_lock is used to co-work with comp_done_cond.
448 */
0d9f9a5c
LL
449static QemuMutex comp_done_lock;
450static QemuCond comp_done_cond;
56e93d26
JQ
451/* The empty QEMUFileOps will be used by file in CompressParam */
452static const QEMUFileOps empty_ops = { };
453
34ab9e97 454static QEMUFile *decomp_file;
56e93d26
JQ
455static DecompressParam *decomp_param;
456static QemuThread *decompress_threads;
73a8912b
LL
457static QemuMutex decomp_done_lock;
458static QemuCond decomp_done_cond;
56e93d26 459
5e5fdcff 460static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
6ef3771c 461 ram_addr_t offset, uint8_t *source_buf);
56e93d26
JQ
462
463static void *do_data_compress(void *opaque)
464{
465 CompressParam *param = opaque;
a7a9a88f
LL
466 RAMBlock *block;
467 ram_addr_t offset;
5e5fdcff 468 bool zero_page;
56e93d26 469
a7a9a88f 470 qemu_mutex_lock(&param->mutex);
90e56fb4 471 while (!param->quit) {
a7a9a88f
LL
472 if (param->block) {
473 block = param->block;
474 offset = param->offset;
475 param->block = NULL;
476 qemu_mutex_unlock(&param->mutex);
477
5e5fdcff
XG
478 zero_page = do_compress_ram_page(param->file, &param->stream,
479 block, offset, param->originbuf);
a7a9a88f 480
0d9f9a5c 481 qemu_mutex_lock(&comp_done_lock);
a7a9a88f 482 param->done = true;
5e5fdcff 483 param->zero_page = zero_page;
0d9f9a5c
LL
484 qemu_cond_signal(&comp_done_cond);
485 qemu_mutex_unlock(&comp_done_lock);
a7a9a88f
LL
486
487 qemu_mutex_lock(&param->mutex);
488 } else {
56e93d26
JQ
489 qemu_cond_wait(&param->cond, &param->mutex);
490 }
56e93d26 491 }
a7a9a88f 492 qemu_mutex_unlock(&param->mutex);
56e93d26
JQ
493
494 return NULL;
495}
496
f0afa331 497static void compress_threads_save_cleanup(void)
56e93d26
JQ
498{
499 int i, thread_count;
500
05306935 501 if (!migrate_use_compression() || !comp_param) {
56e93d26
JQ
502 return;
503 }
05306935 504
56e93d26
JQ
505 thread_count = migrate_compress_threads();
506 for (i = 0; i < thread_count; i++) {
dcaf446e
XG
507 /*
508 * we use it as a indicator which shows if the thread is
509 * properly init'd or not
510 */
511 if (!comp_param[i].file) {
512 break;
513 }
05306935
FL
514
515 qemu_mutex_lock(&comp_param[i].mutex);
516 comp_param[i].quit = true;
517 qemu_cond_signal(&comp_param[i].cond);
518 qemu_mutex_unlock(&comp_param[i].mutex);
519
56e93d26 520 qemu_thread_join(compress_threads + i);
56e93d26
JQ
521 qemu_mutex_destroy(&comp_param[i].mutex);
522 qemu_cond_destroy(&comp_param[i].cond);
dcaf446e 523 deflateEnd(&comp_param[i].stream);
34ab9e97 524 g_free(comp_param[i].originbuf);
dcaf446e
XG
525 qemu_fclose(comp_param[i].file);
526 comp_param[i].file = NULL;
56e93d26 527 }
0d9f9a5c
LL
528 qemu_mutex_destroy(&comp_done_lock);
529 qemu_cond_destroy(&comp_done_cond);
56e93d26
JQ
530 g_free(compress_threads);
531 g_free(comp_param);
56e93d26
JQ
532 compress_threads = NULL;
533 comp_param = NULL;
56e93d26
JQ
534}
535
dcaf446e 536static int compress_threads_save_setup(void)
56e93d26
JQ
537{
538 int i, thread_count;
539
540 if (!migrate_use_compression()) {
dcaf446e 541 return 0;
56e93d26 542 }
56e93d26
JQ
543 thread_count = migrate_compress_threads();
544 compress_threads = g_new0(QemuThread, thread_count);
545 comp_param = g_new0(CompressParam, thread_count);
0d9f9a5c
LL
546 qemu_cond_init(&comp_done_cond);
547 qemu_mutex_init(&comp_done_lock);
56e93d26 548 for (i = 0; i < thread_count; i++) {
34ab9e97
XG
549 comp_param[i].originbuf = g_try_malloc(TARGET_PAGE_SIZE);
550 if (!comp_param[i].originbuf) {
551 goto exit;
552 }
553
dcaf446e
XG
554 if (deflateInit(&comp_param[i].stream,
555 migrate_compress_level()) != Z_OK) {
34ab9e97 556 g_free(comp_param[i].originbuf);
dcaf446e
XG
557 goto exit;
558 }
559
e110aa91
C
560 /* comp_param[i].file is just used as a dummy buffer to save data,
561 * set its ops to empty.
56e93d26
JQ
562 */
563 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
564 comp_param[i].done = true;
90e56fb4 565 comp_param[i].quit = false;
56e93d26
JQ
566 qemu_mutex_init(&comp_param[i].mutex);
567 qemu_cond_init(&comp_param[i].cond);
568 qemu_thread_create(compress_threads + i, "compress",
569 do_data_compress, comp_param + i,
570 QEMU_THREAD_JOINABLE);
571 }
dcaf446e
XG
572 return 0;
573
574exit:
575 compress_threads_save_cleanup();
576 return -1;
56e93d26
JQ
577}
578
f986c3d2
JQ
579/* Multiple fd's */
580
af8b7d2b
JQ
581#define MULTIFD_MAGIC 0x11223344U
582#define MULTIFD_VERSION 1
583
6df264ac
JQ
584#define MULTIFD_FLAG_SYNC (1 << 0)
585
af8b7d2b
JQ
586typedef struct {
587 uint32_t magic;
588 uint32_t version;
589 unsigned char uuid[16]; /* QemuUUID */
590 uint8_t id;
591} __attribute__((packed)) MultiFDInit_t;
592
2a26c979
JQ
593typedef struct {
594 uint32_t magic;
595 uint32_t version;
596 uint32_t flags;
6f862692
JQ
597 /* maximum number of allocated pages */
598 uint32_t pages_alloc;
599 uint32_t pages_used;
2a34ee59
JQ
600 /* size of the next packet that contains pages */
601 uint32_t next_packet_size;
2a26c979
JQ
602 uint64_t packet_num;
603 char ramblock[256];
604 uint64_t offset[];
605} __attribute__((packed)) MultiFDPacket_t;
606
34c55a94
JQ
607typedef struct {
608 /* number of used pages */
609 uint32_t used;
610 /* number of allocated pages */
611 uint32_t allocated;
612 /* global number of generated multifd packets */
613 uint64_t packet_num;
614 /* offset of each page */
615 ram_addr_t *offset;
616 /* pointer to each page */
617 struct iovec *iov;
618 RAMBlock *block;
619} MultiFDPages_t;
620
8c4598f2
JQ
621typedef struct {
622 /* this fields are not changed once the thread is created */
623 /* channel number */
f986c3d2 624 uint8_t id;
8c4598f2 625 /* channel thread name */
f986c3d2 626 char *name;
8c4598f2 627 /* channel thread id */
f986c3d2 628 QemuThread thread;
8c4598f2 629 /* communication channel */
60df2d4a 630 QIOChannel *c;
8c4598f2 631 /* sem where to wait for more work */
f986c3d2 632 QemuSemaphore sem;
8c4598f2 633 /* this mutex protects the following parameters */
f986c3d2 634 QemuMutex mutex;
8c4598f2 635 /* is this channel thread running */
66770707 636 bool running;
8c4598f2 637 /* should this thread finish */
f986c3d2 638 bool quit;
0beb5ed3
JQ
639 /* thread has work to do */
640 int pending_job;
34c55a94
JQ
641 /* array of pages to sent */
642 MultiFDPages_t *pages;
2a26c979
JQ
643 /* packet allocated len */
644 uint32_t packet_len;
645 /* pointer to the packet */
646 MultiFDPacket_t *packet;
647 /* multifd flags for each packet */
648 uint32_t flags;
2a34ee59
JQ
649 /* size of the next packet that contains pages */
650 uint32_t next_packet_size;
2a26c979
JQ
651 /* global number of generated multifd packets */
652 uint64_t packet_num;
408ea6ae
JQ
653 /* thread local variables */
654 /* packets sent through this channel */
655 uint64_t num_packets;
656 /* pages sent through this channel */
657 uint64_t num_pages;
6df264ac
JQ
658 /* syncs main thread and channels */
659 QemuSemaphore sem_sync;
8c4598f2
JQ
660} MultiFDSendParams;
661
662typedef struct {
663 /* this fields are not changed once the thread is created */
664 /* channel number */
665 uint8_t id;
666 /* channel thread name */
667 char *name;
668 /* channel thread id */
669 QemuThread thread;
670 /* communication channel */
671 QIOChannel *c;
8c4598f2
JQ
672 /* this mutex protects the following parameters */
673 QemuMutex mutex;
674 /* is this channel thread running */
675 bool running;
34c55a94
JQ
676 /* array of pages to receive */
677 MultiFDPages_t *pages;
2a26c979
JQ
678 /* packet allocated len */
679 uint32_t packet_len;
680 /* pointer to the packet */
681 MultiFDPacket_t *packet;
682 /* multifd flags for each packet */
683 uint32_t flags;
684 /* global number of generated multifd packets */
685 uint64_t packet_num;
408ea6ae 686 /* thread local variables */
2a34ee59
JQ
687 /* size of the next packet that contains pages */
688 uint32_t next_packet_size;
408ea6ae
JQ
689 /* packets sent through this channel */
690 uint64_t num_packets;
691 /* pages sent through this channel */
692 uint64_t num_pages;
6df264ac
JQ
693 /* syncs main thread and channels */
694 QemuSemaphore sem_sync;
8c4598f2 695} MultiFDRecvParams;
f986c3d2 696
af8b7d2b
JQ
697static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp)
698{
699 MultiFDInit_t msg;
700 int ret;
701
702 msg.magic = cpu_to_be32(MULTIFD_MAGIC);
703 msg.version = cpu_to_be32(MULTIFD_VERSION);
704 msg.id = p->id;
705 memcpy(msg.uuid, &qemu_uuid.data, sizeof(msg.uuid));
706
707 ret = qio_channel_write_all(p->c, (char *)&msg, sizeof(msg), errp);
708 if (ret != 0) {
709 return -1;
710 }
711 return 0;
712}
713
714static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
715{
716 MultiFDInit_t msg;
717 int ret;
718
719 ret = qio_channel_read_all(c, (char *)&msg, sizeof(msg), errp);
720 if (ret != 0) {
721 return -1;
722 }
723
341ba0df
PM
724 msg.magic = be32_to_cpu(msg.magic);
725 msg.version = be32_to_cpu(msg.version);
af8b7d2b
JQ
726
727 if (msg.magic != MULTIFD_MAGIC) {
728 error_setg(errp, "multifd: received packet magic %x "
729 "expected %x", msg.magic, MULTIFD_MAGIC);
730 return -1;
731 }
732
733 if (msg.version != MULTIFD_VERSION) {
734 error_setg(errp, "multifd: received packet version %d "
735 "expected %d", msg.version, MULTIFD_VERSION);
736 return -1;
737 }
738
739 if (memcmp(msg.uuid, &qemu_uuid, sizeof(qemu_uuid))) {
740 char *uuid = qemu_uuid_unparse_strdup(&qemu_uuid);
741 char *msg_uuid = qemu_uuid_unparse_strdup((const QemuUUID *)msg.uuid);
742
743 error_setg(errp, "multifd: received uuid '%s' and expected "
744 "uuid '%s' for channel %hhd", msg_uuid, uuid, msg.id);
745 g_free(uuid);
746 g_free(msg_uuid);
747 return -1;
748 }
749
750 if (msg.id > migrate_multifd_channels()) {
751 error_setg(errp, "multifd: received channel version %d "
752 "expected %d", msg.version, MULTIFD_VERSION);
753 return -1;
754 }
755
756 return msg.id;
757}
758
34c55a94
JQ
759static MultiFDPages_t *multifd_pages_init(size_t size)
760{
761 MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
762
763 pages->allocated = size;
764 pages->iov = g_new0(struct iovec, size);
765 pages->offset = g_new0(ram_addr_t, size);
766
767 return pages;
768}
769
770static void multifd_pages_clear(MultiFDPages_t *pages)
771{
772 pages->used = 0;
773 pages->allocated = 0;
774 pages->packet_num = 0;
775 pages->block = NULL;
776 g_free(pages->iov);
777 pages->iov = NULL;
778 g_free(pages->offset);
779 pages->offset = NULL;
780 g_free(pages);
781}
782
2a26c979
JQ
783static void multifd_send_fill_packet(MultiFDSendParams *p)
784{
785 MultiFDPacket_t *packet = p->packet;
786 int i;
787
788 packet->magic = cpu_to_be32(MULTIFD_MAGIC);
789 packet->version = cpu_to_be32(MULTIFD_VERSION);
790 packet->flags = cpu_to_be32(p->flags);
6f862692
JQ
791 packet->pages_alloc = cpu_to_be32(migrate_multifd_page_count());
792 packet->pages_used = cpu_to_be32(p->pages->used);
2a34ee59 793 packet->next_packet_size = cpu_to_be32(p->next_packet_size);
2a26c979
JQ
794 packet->packet_num = cpu_to_be64(p->packet_num);
795
796 if (p->pages->block) {
797 strncpy(packet->ramblock, p->pages->block->idstr, 256);
798 }
799
800 for (i = 0; i < p->pages->used; i++) {
801 packet->offset[i] = cpu_to_be64(p->pages->offset[i]);
802 }
803}
804
805static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
806{
807 MultiFDPacket_t *packet = p->packet;
808 RAMBlock *block;
809 int i;
810
341ba0df 811 packet->magic = be32_to_cpu(packet->magic);
2a26c979
JQ
812 if (packet->magic != MULTIFD_MAGIC) {
813 error_setg(errp, "multifd: received packet "
814 "magic %x and expected magic %x",
815 packet->magic, MULTIFD_MAGIC);
816 return -1;
817 }
818
341ba0df 819 packet->version = be32_to_cpu(packet->version);
2a26c979
JQ
820 if (packet->version != MULTIFD_VERSION) {
821 error_setg(errp, "multifd: received packet "
822 "version %d and expected version %d",
823 packet->version, MULTIFD_VERSION);
824 return -1;
825 }
826
827 p->flags = be32_to_cpu(packet->flags);
828
6f862692
JQ
829 packet->pages_alloc = be32_to_cpu(packet->pages_alloc);
830 if (packet->pages_alloc > migrate_multifd_page_count()) {
2a26c979
JQ
831 error_setg(errp, "multifd: received packet "
832 "with size %d and expected maximum size %d",
6f862692 833 packet->pages_alloc, migrate_multifd_page_count()) ;
2a26c979
JQ
834 return -1;
835 }
836
6f862692
JQ
837 p->pages->used = be32_to_cpu(packet->pages_used);
838 if (p->pages->used > packet->pages_alloc) {
2a26c979 839 error_setg(errp, "multifd: received packet "
6f862692
JQ
840 "with %d pages and expected maximum pages are %d",
841 p->pages->used, packet->pages_alloc) ;
2a26c979
JQ
842 return -1;
843 }
844
2a34ee59 845 p->next_packet_size = be32_to_cpu(packet->next_packet_size);
2a26c979
JQ
846 p->packet_num = be64_to_cpu(packet->packet_num);
847
848 if (p->pages->used) {
849 /* make sure that ramblock is 0 terminated */
850 packet->ramblock[255] = 0;
851 block = qemu_ram_block_by_name(packet->ramblock);
852 if (!block) {
853 error_setg(errp, "multifd: unknown ram block %s",
854 packet->ramblock);
855 return -1;
856 }
857 }
858
859 for (i = 0; i < p->pages->used; i++) {
860 ram_addr_t offset = be64_to_cpu(packet->offset[i]);
861
862 if (offset > (block->used_length - TARGET_PAGE_SIZE)) {
863 error_setg(errp, "multifd: offset too long " RAM_ADDR_FMT
864 " (max " RAM_ADDR_FMT ")",
865 offset, block->max_length);
866 return -1;
867 }
868 p->pages->iov[i].iov_base = block->host + offset;
869 p->pages->iov[i].iov_len = TARGET_PAGE_SIZE;
870 }
871
872 return 0;
873}
874
f986c3d2
JQ
875struct {
876 MultiFDSendParams *params;
877 /* number of created threads */
878 int count;
34c55a94
JQ
879 /* array of pages to sent */
880 MultiFDPages_t *pages;
6df264ac
JQ
881 /* syncs main thread and channels */
882 QemuSemaphore sem_sync;
883 /* global number of generated multifd packets */
884 uint64_t packet_num;
b9ee2f7d
JQ
885 /* send channels ready */
886 QemuSemaphore channels_ready;
f986c3d2
JQ
887} *multifd_send_state;
888
b9ee2f7d
JQ
889/*
890 * How we use multifd_send_state->pages and channel->pages?
891 *
892 * We create a pages for each channel, and a main one. Each time that
893 * we need to send a batch of pages we interchange the ones between
894 * multifd_send_state and the channel that is sending it. There are
895 * two reasons for that:
896 * - to not have to do so many mallocs during migration
897 * - to make easier to know what to free at the end of migration
898 *
899 * This way we always know who is the owner of each "pages" struct,
900 * and we don't need any loocking. It belongs to the migration thread
901 * or to the channel thread. Switching is safe because the migration
902 * thread is using the channel mutex when changing it, and the channel
903 * have to had finish with its own, otherwise pending_job can't be
904 * false.
905 */
906
907static void multifd_send_pages(void)
908{
909 int i;
910 static int next_channel;
911 MultiFDSendParams *p = NULL; /* make happy gcc */
912 MultiFDPages_t *pages = multifd_send_state->pages;
913 uint64_t transferred;
914
915 qemu_sem_wait(&multifd_send_state->channels_ready);
916 for (i = next_channel;; i = (i + 1) % migrate_multifd_channels()) {
917 p = &multifd_send_state->params[i];
918
919 qemu_mutex_lock(&p->mutex);
920 if (!p->pending_job) {
921 p->pending_job++;
922 next_channel = (i + 1) % migrate_multifd_channels();
923 break;
924 }
925 qemu_mutex_unlock(&p->mutex);
926 }
927 p->pages->used = 0;
928
929 p->packet_num = multifd_send_state->packet_num++;
930 p->pages->block = NULL;
931 multifd_send_state->pages = p->pages;
932 p->pages = pages;
4fcefd44 933 transferred = ((uint64_t) pages->used) * TARGET_PAGE_SIZE + p->packet_len;
b9ee2f7d
JQ
934 ram_counters.multifd_bytes += transferred;
935 ram_counters.transferred += transferred;;
936 qemu_mutex_unlock(&p->mutex);
937 qemu_sem_post(&p->sem);
938}
939
940static void multifd_queue_page(RAMBlock *block, ram_addr_t offset)
941{
942 MultiFDPages_t *pages = multifd_send_state->pages;
943
944 if (!pages->block) {
945 pages->block = block;
946 }
947
948 if (pages->block == block) {
949 pages->offset[pages->used] = offset;
950 pages->iov[pages->used].iov_base = block->host + offset;
951 pages->iov[pages->used].iov_len = TARGET_PAGE_SIZE;
952 pages->used++;
953
954 if (pages->used < pages->allocated) {
955 return;
956 }
957 }
958
959 multifd_send_pages();
960
961 if (pages->block != block) {
962 multifd_queue_page(block, offset);
963 }
964}
965
66770707 966static void multifd_send_terminate_threads(Error *err)
f986c3d2
JQ
967{
968 int i;
969
7a169d74
JQ
970 if (err) {
971 MigrationState *s = migrate_get_current();
972 migrate_set_error(s, err);
973 if (s->state == MIGRATION_STATUS_SETUP ||
974 s->state == MIGRATION_STATUS_PRE_SWITCHOVER ||
975 s->state == MIGRATION_STATUS_DEVICE ||
976 s->state == MIGRATION_STATUS_ACTIVE) {
977 migrate_set_state(&s->state, s->state,
978 MIGRATION_STATUS_FAILED);
979 }
980 }
981
66770707 982 for (i = 0; i < migrate_multifd_channels(); i++) {
f986c3d2
JQ
983 MultiFDSendParams *p = &multifd_send_state->params[i];
984
985 qemu_mutex_lock(&p->mutex);
986 p->quit = true;
987 qemu_sem_post(&p->sem);
988 qemu_mutex_unlock(&p->mutex);
989 }
990}
991
1398b2e3 992void multifd_save_cleanup(void)
f986c3d2
JQ
993{
994 int i;
f986c3d2
JQ
995
996 if (!migrate_use_multifd()) {
1398b2e3 997 return;
f986c3d2 998 }
66770707
JQ
999 multifd_send_terminate_threads(NULL);
1000 for (i = 0; i < migrate_multifd_channels(); i++) {
f986c3d2
JQ
1001 MultiFDSendParams *p = &multifd_send_state->params[i];
1002
66770707
JQ
1003 if (p->running) {
1004 qemu_thread_join(&p->thread);
1005 }
60df2d4a
JQ
1006 socket_send_channel_destroy(p->c);
1007 p->c = NULL;
f986c3d2
JQ
1008 qemu_mutex_destroy(&p->mutex);
1009 qemu_sem_destroy(&p->sem);
6df264ac 1010 qemu_sem_destroy(&p->sem_sync);
f986c3d2
JQ
1011 g_free(p->name);
1012 p->name = NULL;
34c55a94
JQ
1013 multifd_pages_clear(p->pages);
1014 p->pages = NULL;
2a26c979
JQ
1015 p->packet_len = 0;
1016 g_free(p->packet);
1017 p->packet = NULL;
f986c3d2 1018 }
b9ee2f7d 1019 qemu_sem_destroy(&multifd_send_state->channels_ready);
6df264ac 1020 qemu_sem_destroy(&multifd_send_state->sem_sync);
f986c3d2
JQ
1021 g_free(multifd_send_state->params);
1022 multifd_send_state->params = NULL;
34c55a94
JQ
1023 multifd_pages_clear(multifd_send_state->pages);
1024 multifd_send_state->pages = NULL;
f986c3d2
JQ
1025 g_free(multifd_send_state);
1026 multifd_send_state = NULL;
f986c3d2
JQ
1027}
1028
6df264ac
JQ
1029static void multifd_send_sync_main(void)
1030{
1031 int i;
1032
1033 if (!migrate_use_multifd()) {
1034 return;
1035 }
b9ee2f7d
JQ
1036 if (multifd_send_state->pages->used) {
1037 multifd_send_pages();
1038 }
6df264ac
JQ
1039 for (i = 0; i < migrate_multifd_channels(); i++) {
1040 MultiFDSendParams *p = &multifd_send_state->params[i];
1041
1042 trace_multifd_send_sync_main_signal(p->id);
1043
1044 qemu_mutex_lock(&p->mutex);
b9ee2f7d
JQ
1045
1046 p->packet_num = multifd_send_state->packet_num++;
6df264ac
JQ
1047 p->flags |= MULTIFD_FLAG_SYNC;
1048 p->pending_job++;
1049 qemu_mutex_unlock(&p->mutex);
1050 qemu_sem_post(&p->sem);
1051 }
1052 for (i = 0; i < migrate_multifd_channels(); i++) {
1053 MultiFDSendParams *p = &multifd_send_state->params[i];
1054
1055 trace_multifd_send_sync_main_wait(p->id);
1056 qemu_sem_wait(&multifd_send_state->sem_sync);
1057 }
1058 trace_multifd_send_sync_main(multifd_send_state->packet_num);
1059}
1060
f986c3d2
JQ
1061static void *multifd_send_thread(void *opaque)
1062{
1063 MultiFDSendParams *p = opaque;
af8b7d2b 1064 Error *local_err = NULL;
8b2db7f5 1065 int ret;
af8b7d2b 1066
408ea6ae 1067 trace_multifd_send_thread_start(p->id);
74637e6f 1068 rcu_register_thread();
408ea6ae 1069
af8b7d2b
JQ
1070 if (multifd_send_initial_packet(p, &local_err) < 0) {
1071 goto out;
1072 }
408ea6ae
JQ
1073 /* initial packet */
1074 p->num_packets = 1;
f986c3d2
JQ
1075
1076 while (true) {
d82628e4 1077 qemu_sem_wait(&p->sem);
f986c3d2 1078 qemu_mutex_lock(&p->mutex);
0beb5ed3
JQ
1079
1080 if (p->pending_job) {
1081 uint32_t used = p->pages->used;
1082 uint64_t packet_num = p->packet_num;
1083 uint32_t flags = p->flags;
1084
2a34ee59 1085 p->next_packet_size = used * qemu_target_page_size();
0beb5ed3
JQ
1086 multifd_send_fill_packet(p);
1087 p->flags = 0;
1088 p->num_packets++;
1089 p->num_pages += used;
1090 p->pages->used = 0;
1091 qemu_mutex_unlock(&p->mutex);
1092
2a34ee59
JQ
1093 trace_multifd_send(p->id, packet_num, used, flags,
1094 p->next_packet_size);
0beb5ed3 1095
8b2db7f5
JQ
1096 ret = qio_channel_write_all(p->c, (void *)p->packet,
1097 p->packet_len, &local_err);
1098 if (ret != 0) {
1099 break;
1100 }
1101
ad24c7cb
JQ
1102 if (used) {
1103 ret = qio_channel_writev_all(p->c, p->pages->iov,
1104 used, &local_err);
1105 if (ret != 0) {
1106 break;
1107 }
8b2db7f5 1108 }
0beb5ed3
JQ
1109
1110 qemu_mutex_lock(&p->mutex);
1111 p->pending_job--;
1112 qemu_mutex_unlock(&p->mutex);
6df264ac
JQ
1113
1114 if (flags & MULTIFD_FLAG_SYNC) {
1115 qemu_sem_post(&multifd_send_state->sem_sync);
1116 }
b9ee2f7d 1117 qemu_sem_post(&multifd_send_state->channels_ready);
0beb5ed3 1118 } else if (p->quit) {
f986c3d2
JQ
1119 qemu_mutex_unlock(&p->mutex);
1120 break;
6df264ac
JQ
1121 } else {
1122 qemu_mutex_unlock(&p->mutex);
1123 /* sometimes there are spurious wakeups */
f986c3d2 1124 }
f986c3d2
JQ
1125 }
1126
af8b7d2b
JQ
1127out:
1128 if (local_err) {
1129 multifd_send_terminate_threads(local_err);
1130 }
1131
66770707
JQ
1132 qemu_mutex_lock(&p->mutex);
1133 p->running = false;
1134 qemu_mutex_unlock(&p->mutex);
1135
74637e6f 1136 rcu_unregister_thread();
408ea6ae
JQ
1137 trace_multifd_send_thread_end(p->id, p->num_packets, p->num_pages);
1138
f986c3d2
JQ
1139 return NULL;
1140}
1141
60df2d4a
JQ
1142static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
1143{
1144 MultiFDSendParams *p = opaque;
1145 QIOChannel *sioc = QIO_CHANNEL(qio_task_get_source(task));
1146 Error *local_err = NULL;
1147
1148 if (qio_task_propagate_error(task, &local_err)) {
1398b2e3
FL
1149 migrate_set_error(migrate_get_current(), local_err);
1150 multifd_save_cleanup();
60df2d4a
JQ
1151 } else {
1152 p->c = QIO_CHANNEL(sioc);
1153 qio_channel_set_delay(p->c, false);
1154 p->running = true;
1155 qemu_thread_create(&p->thread, p->name, multifd_send_thread, p,
1156 QEMU_THREAD_JOINABLE);
1157
1158 atomic_inc(&multifd_send_state->count);
1159 }
1160}
1161
f986c3d2
JQ
1162int multifd_save_setup(void)
1163{
1164 int thread_count;
34c55a94 1165 uint32_t page_count = migrate_multifd_page_count();
f986c3d2
JQ
1166 uint8_t i;
1167
1168 if (!migrate_use_multifd()) {
1169 return 0;
1170 }
1171 thread_count = migrate_multifd_channels();
1172 multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
1173 multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
66770707 1174 atomic_set(&multifd_send_state->count, 0);
34c55a94 1175 multifd_send_state->pages = multifd_pages_init(page_count);
6df264ac 1176 qemu_sem_init(&multifd_send_state->sem_sync, 0);
b9ee2f7d 1177 qemu_sem_init(&multifd_send_state->channels_ready, 0);
34c55a94 1178
f986c3d2
JQ
1179 for (i = 0; i < thread_count; i++) {
1180 MultiFDSendParams *p = &multifd_send_state->params[i];
1181
1182 qemu_mutex_init(&p->mutex);
1183 qemu_sem_init(&p->sem, 0);
6df264ac 1184 qemu_sem_init(&p->sem_sync, 0);
f986c3d2 1185 p->quit = false;
0beb5ed3 1186 p->pending_job = 0;
f986c3d2 1187 p->id = i;
34c55a94 1188 p->pages = multifd_pages_init(page_count);
2a26c979
JQ
1189 p->packet_len = sizeof(MultiFDPacket_t)
1190 + sizeof(ram_addr_t) * page_count;
1191 p->packet = g_malloc0(p->packet_len);
f986c3d2 1192 p->name = g_strdup_printf("multifdsend_%d", i);
60df2d4a 1193 socket_send_channel_create(multifd_new_send_channel_async, p);
f986c3d2
JQ
1194 }
1195 return 0;
1196}
1197
f986c3d2
JQ
1198struct {
1199 MultiFDRecvParams *params;
1200 /* number of created threads */
1201 int count;
6df264ac
JQ
1202 /* syncs main thread and channels */
1203 QemuSemaphore sem_sync;
1204 /* global number of generated multifd packets */
1205 uint64_t packet_num;
f986c3d2
JQ
1206} *multifd_recv_state;
1207
66770707 1208static void multifd_recv_terminate_threads(Error *err)
f986c3d2
JQ
1209{
1210 int i;
1211
7a169d74
JQ
1212 if (err) {
1213 MigrationState *s = migrate_get_current();
1214 migrate_set_error(s, err);
1215 if (s->state == MIGRATION_STATUS_SETUP ||
1216 s->state == MIGRATION_STATUS_ACTIVE) {
1217 migrate_set_state(&s->state, s->state,
1218 MIGRATION_STATUS_FAILED);
1219 }
1220 }
1221
66770707 1222 for (i = 0; i < migrate_multifd_channels(); i++) {
f986c3d2
JQ
1223 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1224
1225 qemu_mutex_lock(&p->mutex);
7a5cc33c
JQ
1226 /* We could arrive here for two reasons:
1227 - normal quit, i.e. everything went fine, just finished
1228 - error quit: We close the channels so the channel threads
1229 finish the qio_channel_read_all_eof() */
1230 qio_channel_shutdown(p->c, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
f986c3d2
JQ
1231 qemu_mutex_unlock(&p->mutex);
1232 }
1233}
1234
1235int multifd_load_cleanup(Error **errp)
1236{
1237 int i;
1238 int ret = 0;
1239
1240 if (!migrate_use_multifd()) {
1241 return 0;
1242 }
66770707
JQ
1243 multifd_recv_terminate_threads(NULL);
1244 for (i = 0; i < migrate_multifd_channels(); i++) {
f986c3d2
JQ
1245 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1246
66770707
JQ
1247 if (p->running) {
1248 qemu_thread_join(&p->thread);
1249 }
60df2d4a
JQ
1250 object_unref(OBJECT(p->c));
1251 p->c = NULL;
f986c3d2 1252 qemu_mutex_destroy(&p->mutex);
6df264ac 1253 qemu_sem_destroy(&p->sem_sync);
f986c3d2
JQ
1254 g_free(p->name);
1255 p->name = NULL;
34c55a94
JQ
1256 multifd_pages_clear(p->pages);
1257 p->pages = NULL;
2a26c979
JQ
1258 p->packet_len = 0;
1259 g_free(p->packet);
1260 p->packet = NULL;
f986c3d2 1261 }
6df264ac 1262 qemu_sem_destroy(&multifd_recv_state->sem_sync);
f986c3d2
JQ
1263 g_free(multifd_recv_state->params);
1264 multifd_recv_state->params = NULL;
1265 g_free(multifd_recv_state);
1266 multifd_recv_state = NULL;
1267
1268 return ret;
1269}
1270
6df264ac
JQ
1271static void multifd_recv_sync_main(void)
1272{
1273 int i;
1274
1275 if (!migrate_use_multifd()) {
1276 return;
1277 }
1278 for (i = 0; i < migrate_multifd_channels(); i++) {
1279 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1280
6df264ac
JQ
1281 trace_multifd_recv_sync_main_wait(p->id);
1282 qemu_sem_wait(&multifd_recv_state->sem_sync);
1283 qemu_mutex_lock(&p->mutex);
1284 if (multifd_recv_state->packet_num < p->packet_num) {
1285 multifd_recv_state->packet_num = p->packet_num;
1286 }
1287 qemu_mutex_unlock(&p->mutex);
1288 }
1289 for (i = 0; i < migrate_multifd_channels(); i++) {
1290 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1291
1292 trace_multifd_recv_sync_main_signal(p->id);
6df264ac
JQ
1293 qemu_sem_post(&p->sem_sync);
1294 }
1295 trace_multifd_recv_sync_main(multifd_recv_state->packet_num);
1296}
1297
f986c3d2
JQ
1298static void *multifd_recv_thread(void *opaque)
1299{
1300 MultiFDRecvParams *p = opaque;
2a26c979
JQ
1301 Error *local_err = NULL;
1302 int ret;
f986c3d2 1303
408ea6ae 1304 trace_multifd_recv_thread_start(p->id);
74637e6f 1305 rcu_register_thread();
408ea6ae 1306
f986c3d2 1307 while (true) {
6df264ac
JQ
1308 uint32_t used;
1309 uint32_t flags;
0beb5ed3 1310
8b2db7f5
JQ
1311 ret = qio_channel_read_all_eof(p->c, (void *)p->packet,
1312 p->packet_len, &local_err);
1313 if (ret == 0) { /* EOF */
1314 break;
1315 }
1316 if (ret == -1) { /* Error */
1317 break;
1318 }
2a26c979 1319
6df264ac
JQ
1320 qemu_mutex_lock(&p->mutex);
1321 ret = multifd_recv_unfill_packet(p, &local_err);
1322 if (ret) {
f986c3d2
JQ
1323 qemu_mutex_unlock(&p->mutex);
1324 break;
1325 }
6df264ac
JQ
1326
1327 used = p->pages->used;
1328 flags = p->flags;
2a34ee59
JQ
1329 trace_multifd_recv(p->id, p->packet_num, used, flags,
1330 p->next_packet_size);
6df264ac
JQ
1331 p->num_packets++;
1332 p->num_pages += used;
f986c3d2 1333 qemu_mutex_unlock(&p->mutex);
6df264ac 1334
ad24c7cb
JQ
1335 if (used) {
1336 ret = qio_channel_readv_all(p->c, p->pages->iov,
1337 used, &local_err);
1338 if (ret != 0) {
1339 break;
1340 }
8b2db7f5
JQ
1341 }
1342
6df264ac
JQ
1343 if (flags & MULTIFD_FLAG_SYNC) {
1344 qemu_sem_post(&multifd_recv_state->sem_sync);
1345 qemu_sem_wait(&p->sem_sync);
1346 }
f986c3d2
JQ
1347 }
1348
d82628e4
JQ
1349 if (local_err) {
1350 multifd_recv_terminate_threads(local_err);
1351 }
66770707
JQ
1352 qemu_mutex_lock(&p->mutex);
1353 p->running = false;
1354 qemu_mutex_unlock(&p->mutex);
1355
74637e6f 1356 rcu_unregister_thread();
408ea6ae
JQ
1357 trace_multifd_recv_thread_end(p->id, p->num_packets, p->num_pages);
1358
f986c3d2
JQ
1359 return NULL;
1360}
1361
1362int multifd_load_setup(void)
1363{
1364 int thread_count;
34c55a94 1365 uint32_t page_count = migrate_multifd_page_count();
f986c3d2
JQ
1366 uint8_t i;
1367
1368 if (!migrate_use_multifd()) {
1369 return 0;
1370 }
1371 thread_count = migrate_multifd_channels();
1372 multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));
1373 multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);
66770707 1374 atomic_set(&multifd_recv_state->count, 0);
6df264ac 1375 qemu_sem_init(&multifd_recv_state->sem_sync, 0);
34c55a94 1376
f986c3d2
JQ
1377 for (i = 0; i < thread_count; i++) {
1378 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1379
1380 qemu_mutex_init(&p->mutex);
6df264ac 1381 qemu_sem_init(&p->sem_sync, 0);
f986c3d2 1382 p->id = i;
34c55a94 1383 p->pages = multifd_pages_init(page_count);
2a26c979
JQ
1384 p->packet_len = sizeof(MultiFDPacket_t)
1385 + sizeof(ram_addr_t) * page_count;
1386 p->packet = g_malloc0(p->packet_len);
f986c3d2 1387 p->name = g_strdup_printf("multifdrecv_%d", i);
f986c3d2
JQ
1388 }
1389 return 0;
1390}
1391
62c1e0ca
JQ
1392bool multifd_recv_all_channels_created(void)
1393{
1394 int thread_count = migrate_multifd_channels();
1395
1396 if (!migrate_use_multifd()) {
1397 return true;
1398 }
1399
1400 return thread_count == atomic_read(&multifd_recv_state->count);
1401}
1402
49ed0d24
FL
1403/*
1404 * Try to receive all multifd channels to get ready for the migration.
1405 * - Return true and do not set @errp when correctly receving all channels;
1406 * - Return false and do not set @errp when correctly receiving the current one;
1407 * - Return false and set @errp when failing to receive the current channel.
1408 */
1409bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp)
71bb07db 1410{
60df2d4a 1411 MultiFDRecvParams *p;
af8b7d2b
JQ
1412 Error *local_err = NULL;
1413 int id;
60df2d4a 1414
af8b7d2b
JQ
1415 id = multifd_recv_initial_packet(ioc, &local_err);
1416 if (id < 0) {
1417 multifd_recv_terminate_threads(local_err);
49ed0d24
FL
1418 error_propagate_prepend(errp, local_err,
1419 "failed to receive packet"
1420 " via multifd channel %d: ",
1421 atomic_read(&multifd_recv_state->count));
81e62053 1422 return false;
af8b7d2b
JQ
1423 }
1424
1425 p = &multifd_recv_state->params[id];
1426 if (p->c != NULL) {
1427 error_setg(&local_err, "multifd: received id '%d' already setup'",
1428 id);
1429 multifd_recv_terminate_threads(local_err);
49ed0d24 1430 error_propagate(errp, local_err);
81e62053 1431 return false;
af8b7d2b 1432 }
60df2d4a
JQ
1433 p->c = ioc;
1434 object_ref(OBJECT(ioc));
408ea6ae
JQ
1435 /* initial packet */
1436 p->num_packets = 1;
60df2d4a
JQ
1437
1438 p->running = true;
1439 qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,
1440 QEMU_THREAD_JOINABLE);
1441 atomic_inc(&multifd_recv_state->count);
49ed0d24
FL
1442 return atomic_read(&multifd_recv_state->count) ==
1443 migrate_multifd_channels();
71bb07db
JQ
1444}
1445
56e93d26 1446/**
3d0684b2 1447 * save_page_header: write page header to wire
56e93d26
JQ
1448 *
1449 * If this is the 1st block, it also writes the block identification
1450 *
3d0684b2 1451 * Returns the number of bytes written
56e93d26
JQ
1452 *
1453 * @f: QEMUFile where to send the data
1454 * @block: block that contains the page we want to send
1455 * @offset: offset inside the block for the page
1456 * in the lower bits, it contains flags
1457 */
2bf3aa85
JQ
1458static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
1459 ram_addr_t offset)
56e93d26 1460{
9f5f380b 1461 size_t size, len;
56e93d26 1462
24795694
JQ
1463 if (block == rs->last_sent_block) {
1464 offset |= RAM_SAVE_FLAG_CONTINUE;
1465 }
2bf3aa85 1466 qemu_put_be64(f, offset);
56e93d26
JQ
1467 size = 8;
1468
1469 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
9f5f380b 1470 len = strlen(block->idstr);
2bf3aa85
JQ
1471 qemu_put_byte(f, len);
1472 qemu_put_buffer(f, (uint8_t *)block->idstr, len);
9f5f380b 1473 size += 1 + len;
24795694 1474 rs->last_sent_block = block;
56e93d26
JQ
1475 }
1476 return size;
1477}
1478
3d0684b2
JQ
1479/**
1480 * mig_throttle_guest_down: throotle down the guest
1481 *
1482 * Reduce amount of guest cpu execution to hopefully slow down memory
1483 * writes. If guest dirty memory rate is reduced below the rate at
1484 * which we can transfer pages to the destination then we should be
1485 * able to complete migration. Some workloads dirty memory way too
1486 * fast and will not effectively converge, even with auto-converge.
070afca2
JH
1487 */
1488static void mig_throttle_guest_down(void)
1489{
1490 MigrationState *s = migrate_get_current();
2594f56d
DB
1491 uint64_t pct_initial = s->parameters.cpu_throttle_initial;
1492 uint64_t pct_icrement = s->parameters.cpu_throttle_increment;
4cbc9c7f 1493 int pct_max = s->parameters.max_cpu_throttle;
070afca2
JH
1494
1495 /* We have not started throttling yet. Let's start it. */
1496 if (!cpu_throttle_active()) {
1497 cpu_throttle_set(pct_initial);
1498 } else {
1499 /* Throttling already on, just increase the rate */
4cbc9c7f
LQ
1500 cpu_throttle_set(MIN(cpu_throttle_get_percentage() + pct_icrement,
1501 pct_max));
070afca2
JH
1502 }
1503}
1504
3d0684b2
JQ
1505/**
1506 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
1507 *
6f37bb8b 1508 * @rs: current RAM state
3d0684b2
JQ
1509 * @current_addr: address for the zero page
1510 *
1511 * Update the xbzrle cache to reflect a page that's been sent as all 0.
56e93d26
JQ
1512 * The important thing is that a stale (not-yet-0'd) page be replaced
1513 * by the new data.
1514 * As a bonus, if the page wasn't in the cache it gets added so that
3d0684b2 1515 * when a small write is made into the 0'd page it gets XBZRLE sent.
56e93d26 1516 */
6f37bb8b 1517static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
56e93d26 1518{
6f37bb8b 1519 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
56e93d26
JQ
1520 return;
1521 }
1522
1523 /* We don't care if this fails to allocate a new cache page
1524 * as long as it updated an old one */
c00e0928 1525 cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
9360447d 1526 ram_counters.dirty_sync_count);
56e93d26
JQ
1527}
1528
1529#define ENCODING_FLAG_XBZRLE 0x1
1530
1531/**
1532 * save_xbzrle_page: compress and send current page
1533 *
1534 * Returns: 1 means that we wrote the page
1535 * 0 means that page is identical to the one already sent
1536 * -1 means that xbzrle would be longer than normal
1537 *
5a987738 1538 * @rs: current RAM state
3d0684b2
JQ
1539 * @current_data: pointer to the address of the page contents
1540 * @current_addr: addr of the page
56e93d26
JQ
1541 * @block: block that contains the page we want to send
1542 * @offset: offset inside the block for the page
1543 * @last_stage: if we are at the completion stage
56e93d26 1544 */
204b88b8 1545static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
56e93d26 1546 ram_addr_t current_addr, RAMBlock *block,
072c2511 1547 ram_addr_t offset, bool last_stage)
56e93d26
JQ
1548{
1549 int encoded_len = 0, bytes_xbzrle;
1550 uint8_t *prev_cached_page;
1551
9360447d
JQ
1552 if (!cache_is_cached(XBZRLE.cache, current_addr,
1553 ram_counters.dirty_sync_count)) {
1554 xbzrle_counters.cache_miss++;
56e93d26
JQ
1555 if (!last_stage) {
1556 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
9360447d 1557 ram_counters.dirty_sync_count) == -1) {
56e93d26
JQ
1558 return -1;
1559 } else {
1560 /* update *current_data when the page has been
1561 inserted into cache */
1562 *current_data = get_cached_data(XBZRLE.cache, current_addr);
1563 }
1564 }
1565 return -1;
1566 }
1567
1568 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
1569
1570 /* save current buffer into memory */
1571 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
1572
1573 /* XBZRLE encoding (if there is no overflow) */
1574 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
1575 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
1576 TARGET_PAGE_SIZE);
1577 if (encoded_len == 0) {
55c4446b 1578 trace_save_xbzrle_page_skipping();
56e93d26
JQ
1579 return 0;
1580 } else if (encoded_len == -1) {
55c4446b 1581 trace_save_xbzrle_page_overflow();
9360447d 1582 xbzrle_counters.overflow++;
56e93d26
JQ
1583 /* update data in the cache */
1584 if (!last_stage) {
1585 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
1586 *current_data = prev_cached_page;
1587 }
1588 return -1;
1589 }
1590
1591 /* we need to update the data in the cache, in order to get the same data */
1592 if (!last_stage) {
1593 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
1594 }
1595
1596 /* Send XBZRLE based compressed page */
2bf3aa85 1597 bytes_xbzrle = save_page_header(rs, rs->f, block,
204b88b8
JQ
1598 offset | RAM_SAVE_FLAG_XBZRLE);
1599 qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
1600 qemu_put_be16(rs->f, encoded_len);
1601 qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
56e93d26 1602 bytes_xbzrle += encoded_len + 1 + 2;
9360447d
JQ
1603 xbzrle_counters.pages++;
1604 xbzrle_counters.bytes += bytes_xbzrle;
1605 ram_counters.transferred += bytes_xbzrle;
56e93d26
JQ
1606
1607 return 1;
1608}
1609
3d0684b2
JQ
1610/**
1611 * migration_bitmap_find_dirty: find the next dirty page from start
f3f491fc 1612 *
3d0684b2
JQ
1613 * Called with rcu_read_lock() to protect migration_bitmap
1614 *
1615 * Returns the byte offset within memory region of the start of a dirty page
1616 *
6f37bb8b 1617 * @rs: current RAM state
3d0684b2 1618 * @rb: RAMBlock where to search for dirty pages
a935e30f 1619 * @start: page where we start the search
f3f491fc 1620 */
56e93d26 1621static inline
a935e30f 1622unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
f20e2865 1623 unsigned long start)
56e93d26 1624{
6b6712ef
JQ
1625 unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
1626 unsigned long *bitmap = rb->bmap;
56e93d26
JQ
1627 unsigned long next;
1628
fbd162e6 1629 if (ramblock_is_ignored(rb)) {
b895de50
CLG
1630 return size;
1631 }
1632
6eeb63f7
WW
1633 /*
1634 * When the free page optimization is enabled, we need to check the bitmap
1635 * to send the non-free pages rather than all the pages in the bulk stage.
1636 */
1637 if (!rs->fpo_enabled && rs->ram_bulk_stage && start > 0) {
6b6712ef 1638 next = start + 1;
56e93d26 1639 } else {
6b6712ef 1640 next = find_next_bit(bitmap, size, start);
56e93d26
JQ
1641 }
1642
6b6712ef 1643 return next;
56e93d26
JQ
1644}
1645
06b10688 1646static inline bool migration_bitmap_clear_dirty(RAMState *rs,
f20e2865
JQ
1647 RAMBlock *rb,
1648 unsigned long page)
a82d593b
DDAG
1649{
1650 bool ret;
a82d593b 1651
386a907b 1652 qemu_mutex_lock(&rs->bitmap_mutex);
6b6712ef 1653 ret = test_and_clear_bit(page, rb->bmap);
a82d593b
DDAG
1654
1655 if (ret) {
0d8ec885 1656 rs->migration_dirty_pages--;
a82d593b 1657 }
386a907b
WW
1658 qemu_mutex_unlock(&rs->bitmap_mutex);
1659
a82d593b
DDAG
1660 return ret;
1661}
1662
15440dd5
JQ
1663static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
1664 ram_addr_t start, ram_addr_t length)
56e93d26 1665{
0d8ec885 1666 rs->migration_dirty_pages +=
6b6712ef 1667 cpu_physical_memory_sync_dirty_bitmap(rb, start, length,
0d8ec885 1668 &rs->num_dirty_pages_period);
56e93d26
JQ
1669}
1670
3d0684b2
JQ
1671/**
1672 * ram_pagesize_summary: calculate all the pagesizes of a VM
1673 *
1674 * Returns a summary bitmap of the page sizes of all RAMBlocks
1675 *
1676 * For VMs with just normal pages this is equivalent to the host page
1677 * size. If it's got some huge pages then it's the OR of all the
1678 * different page sizes.
e8ca1db2
DDAG
1679 */
1680uint64_t ram_pagesize_summary(void)
1681{
1682 RAMBlock *block;
1683 uint64_t summary = 0;
1684
fbd162e6 1685 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
e8ca1db2
DDAG
1686 summary |= block->page_size;
1687 }
1688
1689 return summary;
1690}
1691
aecbfe9c
XG
1692uint64_t ram_get_total_transferred_pages(void)
1693{
1694 return ram_counters.normal + ram_counters.duplicate +
1695 compression_counters.pages + xbzrle_counters.pages;
1696}
1697
b734035b
XG
1698static void migration_update_rates(RAMState *rs, int64_t end_time)
1699{
be8b02ed 1700 uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
76e03000 1701 double compressed_size;
b734035b
XG
1702
1703 /* calculate period counters */
1704 ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
1705 / (end_time - rs->time_last_bitmap_sync);
1706
be8b02ed 1707 if (!page_count) {
b734035b
XG
1708 return;
1709 }
1710
1711 if (migrate_use_xbzrle()) {
1712 xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
be8b02ed 1713 rs->xbzrle_cache_miss_prev) / page_count;
b734035b
XG
1714 rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
1715 }
76e03000
XG
1716
1717 if (migrate_use_compression()) {
1718 compression_counters.busy_rate = (double)(compression_counters.busy -
1719 rs->compress_thread_busy_prev) / page_count;
1720 rs->compress_thread_busy_prev = compression_counters.busy;
1721
1722 compressed_size = compression_counters.compressed_size -
1723 rs->compressed_size_prev;
1724 if (compressed_size) {
1725 double uncompressed_size = (compression_counters.pages -
1726 rs->compress_pages_prev) * TARGET_PAGE_SIZE;
1727
1728 /* Compression-Ratio = Uncompressed-size / Compressed-size */
1729 compression_counters.compression_rate =
1730 uncompressed_size / compressed_size;
1731
1732 rs->compress_pages_prev = compression_counters.pages;
1733 rs->compressed_size_prev = compression_counters.compressed_size;
1734 }
1735 }
b734035b
XG
1736}
1737
8d820d6f 1738static void migration_bitmap_sync(RAMState *rs)
56e93d26
JQ
1739{
1740 RAMBlock *block;
56e93d26 1741 int64_t end_time;
c4bdf0cf 1742 uint64_t bytes_xfer_now;
56e93d26 1743
9360447d 1744 ram_counters.dirty_sync_count++;
56e93d26 1745
f664da80
JQ
1746 if (!rs->time_last_bitmap_sync) {
1747 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
56e93d26
JQ
1748 }
1749
1750 trace_migration_bitmap_sync_start();
9c1f8f44 1751 memory_global_dirty_log_sync();
56e93d26 1752
108cfae0 1753 qemu_mutex_lock(&rs->bitmap_mutex);
56e93d26 1754 rcu_read_lock();
fbd162e6 1755 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
15440dd5 1756 migration_bitmap_sync_range(rs, block, 0, block->used_length);
56e93d26 1757 }
650af890 1758 ram_counters.remaining = ram_bytes_remaining();
56e93d26 1759 rcu_read_unlock();
108cfae0 1760 qemu_mutex_unlock(&rs->bitmap_mutex);
56e93d26 1761
a66cd90c 1762 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
1ffb5dfd 1763
56e93d26
JQ
1764 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1765
1766 /* more than 1 second = 1000 millisecons */
f664da80 1767 if (end_time > rs->time_last_bitmap_sync + 1000) {
9360447d 1768 bytes_xfer_now = ram_counters.transferred;
d693c6f1 1769
9ac78b61
PL
1770 /* During block migration the auto-converge logic incorrectly detects
1771 * that ram migration makes no progress. Avoid this by disabling the
1772 * throttling logic during the bulk phase of block migration. */
1773 if (migrate_auto_converge() && !blk_mig_bulk_active()) {
56e93d26
JQ
1774 /* The following detection logic can be refined later. For now:
1775 Check to see if the dirtied bytes is 50% more than the approx.
1776 amount of bytes that just got transferred since the last time we
070afca2
JH
1777 were in this routine. If that happens twice, start or increase
1778 throttling */
070afca2 1779
d693c6f1 1780 if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
eac74159 1781 (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
b4a3c64b 1782 (++rs->dirty_rate_high_cnt >= 2)) {
56e93d26 1783 trace_migration_throttle();
8d820d6f 1784 rs->dirty_rate_high_cnt = 0;
070afca2 1785 mig_throttle_guest_down();
d693c6f1 1786 }
56e93d26 1787 }
070afca2 1788
b734035b
XG
1789 migration_update_rates(rs, end_time);
1790
be8b02ed 1791 rs->target_page_count_prev = rs->target_page_count;
d693c6f1
FF
1792
1793 /* reset period counters */
f664da80 1794 rs->time_last_bitmap_sync = end_time;
a66cd90c 1795 rs->num_dirty_pages_period = 0;
d2a4d85a 1796 rs->bytes_xfer_prev = bytes_xfer_now;
56e93d26 1797 }
4addcd4f 1798 if (migrate_use_events()) {
3ab72385 1799 qapi_event_send_migration_pass(ram_counters.dirty_sync_count);
4addcd4f 1800 }
56e93d26
JQ
1801}
1802
bd227060
WW
1803static void migration_bitmap_sync_precopy(RAMState *rs)
1804{
1805 Error *local_err = NULL;
1806
1807 /*
1808 * The current notifier usage is just an optimization to migration, so we
1809 * don't stop the normal migration process in the error case.
1810 */
1811 if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC, &local_err)) {
1812 error_report_err(local_err);
1813 }
1814
1815 migration_bitmap_sync(rs);
1816
1817 if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) {
1818 error_report_err(local_err);
1819 }
1820}
1821
6c97ec5f
XG
1822/**
1823 * save_zero_page_to_file: send the zero page to the file
1824 *
1825 * Returns the size of data written to the file, 0 means the page is not
1826 * a zero page
1827 *
1828 * @rs: current RAM state
1829 * @file: the file where the data is saved
1830 * @block: block that contains the page we want to send
1831 * @offset: offset inside the block for the page
1832 */
1833static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
1834 RAMBlock *block, ram_addr_t offset)
1835{
1836 uint8_t *p = block->host + offset;
1837 int len = 0;
1838
1839 if (is_zero_range(p, TARGET_PAGE_SIZE)) {
1840 len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO);
1841 qemu_put_byte(file, 0);
1842 len += 1;
1843 }
1844 return len;
1845}
1846
56e93d26 1847/**
3d0684b2 1848 * save_zero_page: send the zero page to the stream
56e93d26 1849 *
3d0684b2 1850 * Returns the number of pages written.
56e93d26 1851 *
f7ccd61b 1852 * @rs: current RAM state
56e93d26
JQ
1853 * @block: block that contains the page we want to send
1854 * @offset: offset inside the block for the page
56e93d26 1855 */
7faccdc3 1856static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
56e93d26 1857{
6c97ec5f 1858 int len = save_zero_page_to_file(rs, rs->f, block, offset);
56e93d26 1859
6c97ec5f 1860 if (len) {
9360447d 1861 ram_counters.duplicate++;
6c97ec5f
XG
1862 ram_counters.transferred += len;
1863 return 1;
56e93d26 1864 }
6c97ec5f 1865 return -1;
56e93d26
JQ
1866}
1867
5727309d 1868static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
53f09a10 1869{
5727309d 1870 if (!migrate_release_ram() || !migration_in_postcopy()) {
53f09a10
PB
1871 return;
1872 }
1873
aaa2064c 1874 ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS);
53f09a10
PB
1875}
1876
059ff0fb
XG
1877/*
1878 * @pages: the number of pages written by the control path,
1879 * < 0 - error
1880 * > 0 - number of pages written
1881 *
1882 * Return true if the pages has been saved, otherwise false is returned.
1883 */
1884static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1885 int *pages)
1886{
1887 uint64_t bytes_xmit = 0;
1888 int ret;
1889
1890 *pages = -1;
1891 ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE,
1892 &bytes_xmit);
1893 if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
1894 return false;
1895 }
1896
1897 if (bytes_xmit) {
1898 ram_counters.transferred += bytes_xmit;
1899 *pages = 1;
1900 }
1901
1902 if (ret == RAM_SAVE_CONTROL_DELAYED) {
1903 return true;
1904 }
1905
1906 if (bytes_xmit > 0) {
1907 ram_counters.normal++;
1908 } else if (bytes_xmit == 0) {
1909 ram_counters.duplicate++;
1910 }
1911
1912 return true;
1913}
1914
65dacaa0
XG
1915/*
1916 * directly send the page to the stream
1917 *
1918 * Returns the number of pages written.
1919 *
1920 * @rs: current RAM state
1921 * @block: block that contains the page we want to send
1922 * @offset: offset inside the block for the page
1923 * @buf: the page to be sent
1924 * @async: send to page asyncly
1925 */
1926static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1927 uint8_t *buf, bool async)
1928{
1929 ram_counters.transferred += save_page_header(rs, rs->f, block,
1930 offset | RAM_SAVE_FLAG_PAGE);
1931 if (async) {
1932 qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE,
1933 migrate_release_ram() &
1934 migration_in_postcopy());
1935 } else {
1936 qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
1937 }
1938 ram_counters.transferred += TARGET_PAGE_SIZE;
1939 ram_counters.normal++;
1940 return 1;
1941}
1942
56e93d26 1943/**
3d0684b2 1944 * ram_save_page: send the given page to the stream
56e93d26 1945 *
3d0684b2 1946 * Returns the number of pages written.
3fd3c4b3
DDAG
1947 * < 0 - error
1948 * >=0 - Number of pages written - this might legally be 0
1949 * if xbzrle noticed the page was the same.
56e93d26 1950 *
6f37bb8b 1951 * @rs: current RAM state
56e93d26
JQ
1952 * @block: block that contains the page we want to send
1953 * @offset: offset inside the block for the page
1954 * @last_stage: if we are at the completion stage
56e93d26 1955 */
a0a8aa14 1956static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
56e93d26
JQ
1957{
1958 int pages = -1;
56e93d26 1959 uint8_t *p;
56e93d26 1960 bool send_async = true;
a08f6890 1961 RAMBlock *block = pss->block;
a935e30f 1962 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
059ff0fb 1963 ram_addr_t current_addr = block->offset + offset;
56e93d26 1964
2f68e399 1965 p = block->host + offset;
1db9d8e5 1966 trace_ram_save_page(block->idstr, (uint64_t)offset, p);
56e93d26 1967
56e93d26 1968 XBZRLE_cache_lock();
d7400a34
XG
1969 if (!rs->ram_bulk_stage && !migration_in_postcopy() &&
1970 migrate_use_xbzrle()) {
059ff0fb
XG
1971 pages = save_xbzrle_page(rs, &p, current_addr, block,
1972 offset, last_stage);
1973 if (!last_stage) {
1974 /* Can't send this cached data async, since the cache page
1975 * might get updated before it gets to the wire
56e93d26 1976 */
059ff0fb 1977 send_async = false;
56e93d26
JQ
1978 }
1979 }
1980
1981 /* XBZRLE overflow or normal page */
1982 if (pages == -1) {
65dacaa0 1983 pages = save_normal_page(rs, block, offset, p, send_async);
56e93d26
JQ
1984 }
1985
1986 XBZRLE_cache_unlock();
1987
1988 return pages;
1989}
1990
b9ee2f7d
JQ
1991static int ram_save_multifd_page(RAMState *rs, RAMBlock *block,
1992 ram_addr_t offset)
1993{
b9ee2f7d 1994 multifd_queue_page(block, offset);
b9ee2f7d
JQ
1995 ram_counters.normal++;
1996
1997 return 1;
1998}
1999
5e5fdcff 2000static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
6ef3771c 2001 ram_addr_t offset, uint8_t *source_buf)
56e93d26 2002{
53518d94 2003 RAMState *rs = ram_state;
a7a9a88f 2004 uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
5e5fdcff 2005 bool zero_page = false;
6ef3771c 2006 int ret;
56e93d26 2007
5e5fdcff
XG
2008 if (save_zero_page_to_file(rs, f, block, offset)) {
2009 zero_page = true;
2010 goto exit;
2011 }
2012
6ef3771c 2013 save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
34ab9e97
XG
2014
2015 /*
2016 * copy it to a internal buffer to avoid it being modified by VM
2017 * so that we can catch up the error during compression and
2018 * decompression
2019 */
2020 memcpy(source_buf, p, TARGET_PAGE_SIZE);
6ef3771c
XG
2021 ret = qemu_put_compression_data(f, stream, source_buf, TARGET_PAGE_SIZE);
2022 if (ret < 0) {
2023 qemu_file_set_error(migrate_get_current()->to_dst_file, ret);
b3be2896 2024 error_report("compressed data failed!");
5e5fdcff 2025 return false;
b3be2896 2026 }
56e93d26 2027
5e5fdcff 2028exit:
6ef3771c 2029 ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
5e5fdcff
XG
2030 return zero_page;
2031}
2032
2033static void
2034update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
2035{
76e03000
XG
2036 ram_counters.transferred += bytes_xmit;
2037
5e5fdcff
XG
2038 if (param->zero_page) {
2039 ram_counters.duplicate++;
76e03000 2040 return;
5e5fdcff 2041 }
76e03000
XG
2042
2043 /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
2044 compression_counters.compressed_size += bytes_xmit - 8;
2045 compression_counters.pages++;
56e93d26
JQ
2046}
2047
32b05495
XG
2048static bool save_page_use_compression(RAMState *rs);
2049
ce25d337 2050static void flush_compressed_data(RAMState *rs)
56e93d26
JQ
2051{
2052 int idx, len, thread_count;
2053
32b05495 2054 if (!save_page_use_compression(rs)) {
56e93d26
JQ
2055 return;
2056 }
2057 thread_count = migrate_compress_threads();
a7a9a88f 2058
0d9f9a5c 2059 qemu_mutex_lock(&comp_done_lock);
56e93d26 2060 for (idx = 0; idx < thread_count; idx++) {
a7a9a88f 2061 while (!comp_param[idx].done) {
0d9f9a5c 2062 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
56e93d26 2063 }
a7a9a88f 2064 }
0d9f9a5c 2065 qemu_mutex_unlock(&comp_done_lock);
a7a9a88f
LL
2066
2067 for (idx = 0; idx < thread_count; idx++) {
2068 qemu_mutex_lock(&comp_param[idx].mutex);
90e56fb4 2069 if (!comp_param[idx].quit) {
ce25d337 2070 len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
5e5fdcff
XG
2071 /*
2072 * it's safe to fetch zero_page without holding comp_done_lock
2073 * as there is no further request submitted to the thread,
2074 * i.e, the thread should be waiting for a request at this point.
2075 */
2076 update_compress_thread_counts(&comp_param[idx], len);
56e93d26 2077 }
a7a9a88f 2078 qemu_mutex_unlock(&comp_param[idx].mutex);
56e93d26
JQ
2079 }
2080}
2081
2082static inline void set_compress_params(CompressParam *param, RAMBlock *block,
2083 ram_addr_t offset)
2084{
2085 param->block = block;
2086 param->offset = offset;
2087}
2088
ce25d337
JQ
2089static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
2090 ram_addr_t offset)
56e93d26
JQ
2091{
2092 int idx, thread_count, bytes_xmit = -1, pages = -1;
1d58872a 2093 bool wait = migrate_compress_wait_thread();
56e93d26
JQ
2094
2095 thread_count = migrate_compress_threads();
0d9f9a5c 2096 qemu_mutex_lock(&comp_done_lock);
1d58872a
XG
2097retry:
2098 for (idx = 0; idx < thread_count; idx++) {
2099 if (comp_param[idx].done) {
2100 comp_param[idx].done = false;
2101 bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
2102 qemu_mutex_lock(&comp_param[idx].mutex);
2103 set_compress_params(&comp_param[idx], block, offset);
2104 qemu_cond_signal(&comp_param[idx].cond);
2105 qemu_mutex_unlock(&comp_param[idx].mutex);
2106 pages = 1;
5e5fdcff 2107 update_compress_thread_counts(&comp_param[idx], bytes_xmit);
56e93d26 2108 break;
56e93d26
JQ
2109 }
2110 }
1d58872a
XG
2111
2112 /*
2113 * wait for the free thread if the user specifies 'compress-wait-thread',
2114 * otherwise we will post the page out in the main thread as normal page.
2115 */
2116 if (pages < 0 && wait) {
2117 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
2118 goto retry;
2119 }
0d9f9a5c 2120 qemu_mutex_unlock(&comp_done_lock);
56e93d26
JQ
2121
2122 return pages;
2123}
2124
3d0684b2
JQ
2125/**
2126 * find_dirty_block: find the next dirty page and update any state
2127 * associated with the search process.
b9e60928 2128 *
3d0684b2 2129 * Returns if a page is found
b9e60928 2130 *
6f37bb8b 2131 * @rs: current RAM state
3d0684b2
JQ
2132 * @pss: data about the state of the current dirty page scan
2133 * @again: set to false if the search has scanned the whole of RAM
b9e60928 2134 */
f20e2865 2135static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
b9e60928 2136{
f20e2865 2137 pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
6f37bb8b 2138 if (pss->complete_round && pss->block == rs->last_seen_block &&
a935e30f 2139 pss->page >= rs->last_page) {
b9e60928
DDAG
2140 /*
2141 * We've been once around the RAM and haven't found anything.
2142 * Give up.
2143 */
2144 *again = false;
2145 return false;
2146 }
a935e30f 2147 if ((pss->page << TARGET_PAGE_BITS) >= pss->block->used_length) {
b9e60928 2148 /* Didn't find anything in this RAM Block */
a935e30f 2149 pss->page = 0;
b9e60928
DDAG
2150 pss->block = QLIST_NEXT_RCU(pss->block, next);
2151 if (!pss->block) {
48df9d80
XG
2152 /*
2153 * If memory migration starts over, we will meet a dirtied page
2154 * which may still exists in compression threads's ring, so we
2155 * should flush the compressed data to make sure the new page
2156 * is not overwritten by the old one in the destination.
2157 *
2158 * Also If xbzrle is on, stop using the data compression at this
2159 * point. In theory, xbzrle can do better than compression.
2160 */
2161 flush_compressed_data(rs);
2162
b9e60928
DDAG
2163 /* Hit the end of the list */
2164 pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
2165 /* Flag that we've looped */
2166 pss->complete_round = true;
6f37bb8b 2167 rs->ram_bulk_stage = false;
b9e60928
DDAG
2168 }
2169 /* Didn't find anything this time, but try again on the new block */
2170 *again = true;
2171 return false;
2172 } else {
2173 /* Can go around again, but... */
2174 *again = true;
2175 /* We've found something so probably don't need to */
2176 return true;
2177 }
2178}
2179
3d0684b2
JQ
2180/**
2181 * unqueue_page: gets a page of the queue
2182 *
a82d593b 2183 * Helper for 'get_queued_page' - gets a page off the queue
a82d593b 2184 *
3d0684b2
JQ
2185 * Returns the block of the page (or NULL if none available)
2186 *
ec481c6c 2187 * @rs: current RAM state
3d0684b2 2188 * @offset: used to return the offset within the RAMBlock
a82d593b 2189 */
f20e2865 2190static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
a82d593b
DDAG
2191{
2192 RAMBlock *block = NULL;
2193
ae526e32
XG
2194 if (QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests)) {
2195 return NULL;
2196 }
2197
ec481c6c
JQ
2198 qemu_mutex_lock(&rs->src_page_req_mutex);
2199 if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
2200 struct RAMSrcPageRequest *entry =
2201 QSIMPLEQ_FIRST(&rs->src_page_requests);
a82d593b
DDAG
2202 block = entry->rb;
2203 *offset = entry->offset;
a82d593b
DDAG
2204
2205 if (entry->len > TARGET_PAGE_SIZE) {
2206 entry->len -= TARGET_PAGE_SIZE;
2207 entry->offset += TARGET_PAGE_SIZE;
2208 } else {
2209 memory_region_unref(block->mr);
ec481c6c 2210 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
a82d593b 2211 g_free(entry);
e03a34f8 2212 migration_consume_urgent_request();
a82d593b
DDAG
2213 }
2214 }
ec481c6c 2215 qemu_mutex_unlock(&rs->src_page_req_mutex);
a82d593b
DDAG
2216
2217 return block;
2218}
2219
3d0684b2
JQ
2220/**
2221 * get_queued_page: unqueue a page from the postocpy requests
2222 *
2223 * Skips pages that are already sent (!dirty)
a82d593b 2224 *
3d0684b2 2225 * Returns if a queued page is found
a82d593b 2226 *
6f37bb8b 2227 * @rs: current RAM state
3d0684b2 2228 * @pss: data about the state of the current dirty page scan
a82d593b 2229 */
f20e2865 2230static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
a82d593b
DDAG
2231{
2232 RAMBlock *block;
2233 ram_addr_t offset;
2234 bool dirty;
2235
2236 do {
f20e2865 2237 block = unqueue_page(rs, &offset);
a82d593b
DDAG
2238 /*
2239 * We're sending this page, and since it's postcopy nothing else
2240 * will dirty it, and we must make sure it doesn't get sent again
2241 * even if this queue request was received after the background
2242 * search already sent it.
2243 */
2244 if (block) {
f20e2865
JQ
2245 unsigned long page;
2246
6b6712ef
JQ
2247 page = offset >> TARGET_PAGE_BITS;
2248 dirty = test_bit(page, block->bmap);
a82d593b 2249 if (!dirty) {
06b10688 2250 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
6b6712ef 2251 page, test_bit(page, block->unsentmap));
a82d593b 2252 } else {
f20e2865 2253 trace_get_queued_page(block->idstr, (uint64_t)offset, page);
a82d593b
DDAG
2254 }
2255 }
2256
2257 } while (block && !dirty);
2258
2259 if (block) {
2260 /*
2261 * As soon as we start servicing pages out of order, then we have
2262 * to kill the bulk stage, since the bulk stage assumes
2263 * in (migration_bitmap_find_and_reset_dirty) that every page is
2264 * dirty, that's no longer true.
2265 */
6f37bb8b 2266 rs->ram_bulk_stage = false;
a82d593b
DDAG
2267
2268 /*
2269 * We want the background search to continue from the queued page
2270 * since the guest is likely to want other pages near to the page
2271 * it just requested.
2272 */
2273 pss->block = block;
a935e30f 2274 pss->page = offset >> TARGET_PAGE_BITS;
a82d593b
DDAG
2275 }
2276
2277 return !!block;
2278}
2279
6c595cde 2280/**
5e58f968
JQ
2281 * migration_page_queue_free: drop any remaining pages in the ram
2282 * request queue
6c595cde 2283 *
3d0684b2
JQ
2284 * It should be empty at the end anyway, but in error cases there may
2285 * be some left. in case that there is any page left, we drop it.
2286 *
6c595cde 2287 */
83c13382 2288static void migration_page_queue_free(RAMState *rs)
6c595cde 2289{
ec481c6c 2290 struct RAMSrcPageRequest *mspr, *next_mspr;
6c595cde
DDAG
2291 /* This queue generally should be empty - but in the case of a failed
2292 * migration might have some droppings in.
2293 */
2294 rcu_read_lock();
ec481c6c 2295 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
6c595cde 2296 memory_region_unref(mspr->rb->mr);
ec481c6c 2297 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
6c595cde
DDAG
2298 g_free(mspr);
2299 }
2300 rcu_read_unlock();
2301}
2302
2303/**
3d0684b2
JQ
2304 * ram_save_queue_pages: queue the page for transmission
2305 *
2306 * A request from postcopy destination for example.
2307 *
2308 * Returns zero on success or negative on error
2309 *
3d0684b2
JQ
2310 * @rbname: Name of the RAMBLock of the request. NULL means the
2311 * same that last one.
2312 * @start: starting address from the start of the RAMBlock
2313 * @len: length (in bytes) to send
6c595cde 2314 */
96506894 2315int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
6c595cde
DDAG
2316{
2317 RAMBlock *ramblock;
53518d94 2318 RAMState *rs = ram_state;
6c595cde 2319
9360447d 2320 ram_counters.postcopy_requests++;
6c595cde
DDAG
2321 rcu_read_lock();
2322 if (!rbname) {
2323 /* Reuse last RAMBlock */
68a098f3 2324 ramblock = rs->last_req_rb;
6c595cde
DDAG
2325
2326 if (!ramblock) {
2327 /*
2328 * Shouldn't happen, we can't reuse the last RAMBlock if
2329 * it's the 1st request.
2330 */
2331 error_report("ram_save_queue_pages no previous block");
2332 goto err;
2333 }
2334 } else {
2335 ramblock = qemu_ram_block_by_name(rbname);
2336
2337 if (!ramblock) {
2338 /* We shouldn't be asked for a non-existent RAMBlock */
2339 error_report("ram_save_queue_pages no block '%s'", rbname);
2340 goto err;
2341 }
68a098f3 2342 rs->last_req_rb = ramblock;
6c595cde
DDAG
2343 }
2344 trace_ram_save_queue_pages(ramblock->idstr, start, len);
2345 if (start+len > ramblock->used_length) {
9458ad6b
JQ
2346 error_report("%s request overrun start=" RAM_ADDR_FMT " len="
2347 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
6c595cde
DDAG
2348 __func__, start, len, ramblock->used_length);
2349 goto err;
2350 }
2351
ec481c6c
JQ
2352 struct RAMSrcPageRequest *new_entry =
2353 g_malloc0(sizeof(struct RAMSrcPageRequest));
6c595cde
DDAG
2354 new_entry->rb = ramblock;
2355 new_entry->offset = start;
2356 new_entry->len = len;
2357
2358 memory_region_ref(ramblock->mr);
ec481c6c
JQ
2359 qemu_mutex_lock(&rs->src_page_req_mutex);
2360 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
e03a34f8 2361 migration_make_urgent_request();
ec481c6c 2362 qemu_mutex_unlock(&rs->src_page_req_mutex);
6c595cde
DDAG
2363 rcu_read_unlock();
2364
2365 return 0;
2366
2367err:
2368 rcu_read_unlock();
2369 return -1;
2370}
2371
d7400a34
XG
2372static bool save_page_use_compression(RAMState *rs)
2373{
2374 if (!migrate_use_compression()) {
2375 return false;
2376 }
2377
2378 /*
2379 * If xbzrle is on, stop using the data compression after first
2380 * round of migration even if compression is enabled. In theory,
2381 * xbzrle can do better than compression.
2382 */
2383 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
2384 return true;
2385 }
2386
2387 return false;
2388}
2389
5e5fdcff
XG
2390/*
2391 * try to compress the page before posting it out, return true if the page
2392 * has been properly handled by compression, otherwise needs other
2393 * paths to handle it
2394 */
2395static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
2396{
2397 if (!save_page_use_compression(rs)) {
2398 return false;
2399 }
2400
2401 /*
2402 * When starting the process of a new block, the first page of
2403 * the block should be sent out before other pages in the same
2404 * block, and all the pages in last block should have been sent
2405 * out, keeping this order is important, because the 'cont' flag
2406 * is used to avoid resending the block name.
2407 *
2408 * We post the fist page as normal page as compression will take
2409 * much CPU resource.
2410 */
2411 if (block != rs->last_sent_block) {
2412 flush_compressed_data(rs);
2413 return false;
2414 }
2415
2416 if (compress_page_with_multi_thread(rs, block, offset) > 0) {
2417 return true;
2418 }
2419
76e03000 2420 compression_counters.busy++;
5e5fdcff
XG
2421 return false;
2422}
2423
a82d593b 2424/**
3d0684b2 2425 * ram_save_target_page: save one target page
a82d593b 2426 *
3d0684b2 2427 * Returns the number of pages written
a82d593b 2428 *
6f37bb8b 2429 * @rs: current RAM state
3d0684b2 2430 * @pss: data about the page we want to send
a82d593b 2431 * @last_stage: if we are at the completion stage
a82d593b 2432 */
a0a8aa14 2433static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
f20e2865 2434 bool last_stage)
a82d593b 2435{
a8ec91f9
XG
2436 RAMBlock *block = pss->block;
2437 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
2438 int res;
2439
2440 if (control_save_page(rs, block, offset, &res)) {
2441 return res;
2442 }
2443
5e5fdcff
XG
2444 if (save_compress_page(rs, block, offset)) {
2445 return 1;
d7400a34
XG
2446 }
2447
2448 res = save_zero_page(rs, block, offset);
2449 if (res > 0) {
2450 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
2451 * page would be stale
2452 */
2453 if (!save_page_use_compression(rs)) {
2454 XBZRLE_cache_lock();
2455 xbzrle_cache_zero_page(rs, block->offset + offset);
2456 XBZRLE_cache_unlock();
2457 }
2458 ram_release_pages(block->idstr, offset, res);
2459 return res;
2460 }
2461
da3f56cb 2462 /*
5e5fdcff
XG
2463 * do not use multifd for compression as the first page in the new
2464 * block should be posted out before sending the compressed page
da3f56cb 2465 */
5e5fdcff 2466 if (!save_page_use_compression(rs) && migrate_use_multifd()) {
b9ee2f7d 2467 return ram_save_multifd_page(rs, block, offset);
a82d593b
DDAG
2468 }
2469
1faa5665 2470 return ram_save_page(rs, pss, last_stage);
a82d593b
DDAG
2471}
2472
2473/**
3d0684b2 2474 * ram_save_host_page: save a whole host page
a82d593b 2475 *
3d0684b2
JQ
2476 * Starting at *offset send pages up to the end of the current host
2477 * page. It's valid for the initial offset to point into the middle of
2478 * a host page in which case the remainder of the hostpage is sent.
2479 * Only dirty target pages are sent. Note that the host page size may
2480 * be a huge page for this block.
1eb3fc0a
DDAG
2481 * The saving stops at the boundary of the used_length of the block
2482 * if the RAMBlock isn't a multiple of the host page size.
a82d593b 2483 *
3d0684b2
JQ
2484 * Returns the number of pages written or negative on error
2485 *
6f37bb8b 2486 * @rs: current RAM state
3d0684b2 2487 * @ms: current migration state
3d0684b2 2488 * @pss: data about the page we want to send
a82d593b 2489 * @last_stage: if we are at the completion stage
a82d593b 2490 */
a0a8aa14 2491static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
f20e2865 2492 bool last_stage)
a82d593b
DDAG
2493{
2494 int tmppages, pages = 0;
a935e30f
JQ
2495 size_t pagesize_bits =
2496 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
4c011c37 2497
fbd162e6 2498 if (ramblock_is_ignored(pss->block)) {
b895de50
CLG
2499 error_report("block %s should not be migrated !", pss->block->idstr);
2500 return 0;
2501 }
2502
a82d593b 2503 do {
1faa5665
XG
2504 /* Check the pages is dirty and if it is send it */
2505 if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
2506 pss->page++;
2507 continue;
2508 }
2509
f20e2865 2510 tmppages = ram_save_target_page(rs, pss, last_stage);
a82d593b
DDAG
2511 if (tmppages < 0) {
2512 return tmppages;
2513 }
2514
2515 pages += tmppages;
1faa5665
XG
2516 if (pss->block->unsentmap) {
2517 clear_bit(pss->page, pss->block->unsentmap);
2518 }
2519
a935e30f 2520 pss->page++;
1eb3fc0a
DDAG
2521 } while ((pss->page & (pagesize_bits - 1)) &&
2522 offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));
a82d593b
DDAG
2523
2524 /* The offset we leave with is the last one we looked at */
a935e30f 2525 pss->page--;
a82d593b
DDAG
2526 return pages;
2527}
6c595cde 2528
56e93d26 2529/**
3d0684b2 2530 * ram_find_and_save_block: finds a dirty page and sends it to f
56e93d26
JQ
2531 *
2532 * Called within an RCU critical section.
2533 *
e8f3735f
XG
2534 * Returns the number of pages written where zero means no dirty pages,
2535 * or negative on error
56e93d26 2536 *
6f37bb8b 2537 * @rs: current RAM state
56e93d26 2538 * @last_stage: if we are at the completion stage
a82d593b
DDAG
2539 *
2540 * On systems where host-page-size > target-page-size it will send all the
2541 * pages in a host page that are dirty.
56e93d26
JQ
2542 */
2543
ce25d337 2544static int ram_find_and_save_block(RAMState *rs, bool last_stage)
56e93d26 2545{
b8fb8cb7 2546 PageSearchStatus pss;
56e93d26 2547 int pages = 0;
b9e60928 2548 bool again, found;
56e93d26 2549
0827b9e9
AA
2550 /* No dirty page as there is zero RAM */
2551 if (!ram_bytes_total()) {
2552 return pages;
2553 }
2554
6f37bb8b 2555 pss.block = rs->last_seen_block;
a935e30f 2556 pss.page = rs->last_page;
b8fb8cb7
DDAG
2557 pss.complete_round = false;
2558
2559 if (!pss.block) {
2560 pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
2561 }
56e93d26 2562
b9e60928 2563 do {
a82d593b 2564 again = true;
f20e2865 2565 found = get_queued_page(rs, &pss);
b9e60928 2566
a82d593b
DDAG
2567 if (!found) {
2568 /* priority queue empty, so just search for something dirty */
f20e2865 2569 found = find_dirty_block(rs, &pss, &again);
a82d593b 2570 }
f3f491fc 2571
a82d593b 2572 if (found) {
f20e2865 2573 pages = ram_save_host_page(rs, &pss, last_stage);
56e93d26 2574 }
b9e60928 2575 } while (!pages && again);
56e93d26 2576
6f37bb8b 2577 rs->last_seen_block = pss.block;
a935e30f 2578 rs->last_page = pss.page;
56e93d26
JQ
2579
2580 return pages;
2581}
2582
2583void acct_update_position(QEMUFile *f, size_t size, bool zero)
2584{
2585 uint64_t pages = size / TARGET_PAGE_SIZE;
f7ccd61b 2586
56e93d26 2587 if (zero) {
9360447d 2588 ram_counters.duplicate += pages;
56e93d26 2589 } else {
9360447d
JQ
2590 ram_counters.normal += pages;
2591 ram_counters.transferred += size;
56e93d26
JQ
2592 qemu_update_position(f, size);
2593 }
2594}
2595
fbd162e6 2596static uint64_t ram_bytes_total_common(bool count_ignored)
56e93d26
JQ
2597{
2598 RAMBlock *block;
2599 uint64_t total = 0;
2600
2601 rcu_read_lock();
fbd162e6
YK
2602 if (count_ignored) {
2603 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2604 total += block->used_length;
2605 }
2606 } else {
2607 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2608 total += block->used_length;
2609 }
99e15582 2610 }
56e93d26
JQ
2611 rcu_read_unlock();
2612 return total;
2613}
2614
fbd162e6
YK
2615uint64_t ram_bytes_total(void)
2616{
2617 return ram_bytes_total_common(false);
2618}
2619
f265e0e4 2620static void xbzrle_load_setup(void)
56e93d26 2621{
f265e0e4 2622 XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
56e93d26
JQ
2623}
2624
f265e0e4
JQ
2625static void xbzrle_load_cleanup(void)
2626{
2627 g_free(XBZRLE.decoded_buf);
2628 XBZRLE.decoded_buf = NULL;
2629}
2630
7d7c96be
PX
2631static void ram_state_cleanup(RAMState **rsp)
2632{
b9ccaf6d
DDAG
2633 if (*rsp) {
2634 migration_page_queue_free(*rsp);
2635 qemu_mutex_destroy(&(*rsp)->bitmap_mutex);
2636 qemu_mutex_destroy(&(*rsp)->src_page_req_mutex);
2637 g_free(*rsp);
2638 *rsp = NULL;
2639 }
7d7c96be
PX
2640}
2641
84593a08
PX
2642static void xbzrle_cleanup(void)
2643{
2644 XBZRLE_cache_lock();
2645 if (XBZRLE.cache) {
2646 cache_fini(XBZRLE.cache);
2647 g_free(XBZRLE.encoded_buf);
2648 g_free(XBZRLE.current_buf);
2649 g_free(XBZRLE.zero_target_page);
2650 XBZRLE.cache = NULL;
2651 XBZRLE.encoded_buf = NULL;
2652 XBZRLE.current_buf = NULL;
2653 XBZRLE.zero_target_page = NULL;
2654 }
2655 XBZRLE_cache_unlock();
2656}
2657
f265e0e4 2658static void ram_save_cleanup(void *opaque)
56e93d26 2659{
53518d94 2660 RAMState **rsp = opaque;
6b6712ef 2661 RAMBlock *block;
eb859c53 2662
2ff64038
LZ
2663 /* caller have hold iothread lock or is in a bh, so there is
2664 * no writing race against this migration_bitmap
2665 */
6b6712ef
JQ
2666 memory_global_dirty_log_stop();
2667
fbd162e6 2668 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
6b6712ef
JQ
2669 g_free(block->bmap);
2670 block->bmap = NULL;
2671 g_free(block->unsentmap);
2672 block->unsentmap = NULL;
56e93d26
JQ
2673 }
2674
84593a08 2675 xbzrle_cleanup();
f0afa331 2676 compress_threads_save_cleanup();
7d7c96be 2677 ram_state_cleanup(rsp);
56e93d26
JQ
2678}
2679
6f37bb8b 2680static void ram_state_reset(RAMState *rs)
56e93d26 2681{
6f37bb8b
JQ
2682 rs->last_seen_block = NULL;
2683 rs->last_sent_block = NULL;
269ace29 2684 rs->last_page = 0;
6f37bb8b
JQ
2685 rs->last_version = ram_list.version;
2686 rs->ram_bulk_stage = true;
6eeb63f7 2687 rs->fpo_enabled = false;
56e93d26
JQ
2688}
2689
2690#define MAX_WAIT 50 /* ms, half buffered_file limit */
2691
4f2e4252
DDAG
2692/*
2693 * 'expected' is the value you expect the bitmap mostly to be full
2694 * of; it won't bother printing lines that are all this value.
2695 * If 'todump' is null the migration bitmap is dumped.
2696 */
6b6712ef
JQ
2697void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
2698 unsigned long pages)
4f2e4252 2699{
4f2e4252
DDAG
2700 int64_t cur;
2701 int64_t linelen = 128;
2702 char linebuf[129];
2703
6b6712ef 2704 for (cur = 0; cur < pages; cur += linelen) {
4f2e4252
DDAG
2705 int64_t curb;
2706 bool found = false;
2707 /*
2708 * Last line; catch the case where the line length
2709 * is longer than remaining ram
2710 */
6b6712ef
JQ
2711 if (cur + linelen > pages) {
2712 linelen = pages - cur;
4f2e4252
DDAG
2713 }
2714 for (curb = 0; curb < linelen; curb++) {
2715 bool thisbit = test_bit(cur + curb, todump);
2716 linebuf[curb] = thisbit ? '1' : '.';
2717 found = found || (thisbit != expected);
2718 }
2719 if (found) {
2720 linebuf[curb] = '\0';
2721 fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf);
2722 }
2723 }
2724}
2725
e0b266f0
DDAG
2726/* **** functions for postcopy ***** */
2727
ced1c616
PB
2728void ram_postcopy_migrated_memory_release(MigrationState *ms)
2729{
2730 struct RAMBlock *block;
ced1c616 2731
fbd162e6 2732 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
6b6712ef
JQ
2733 unsigned long *bitmap = block->bmap;
2734 unsigned long range = block->used_length >> TARGET_PAGE_BITS;
2735 unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
ced1c616
PB
2736
2737 while (run_start < range) {
2738 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
aaa2064c 2739 ram_discard_range(block->idstr, run_start << TARGET_PAGE_BITS,
ced1c616
PB
2740 (run_end - run_start) << TARGET_PAGE_BITS);
2741 run_start = find_next_zero_bit(bitmap, range, run_end + 1);
2742 }
2743 }
2744}
2745
3d0684b2
JQ
2746/**
2747 * postcopy_send_discard_bm_ram: discard a RAMBlock
2748 *
2749 * Returns zero on success
2750 *
e0b266f0
DDAG
2751 * Callback from postcopy_each_ram_send_discard for each RAMBlock
2752 * Note: At this point the 'unsentmap' is the processed bitmap combined
2753 * with the dirtymap; so a '1' means it's either dirty or unsent.
3d0684b2
JQ
2754 *
2755 * @ms: current migration state
2756 * @pds: state for postcopy
2757 * @start: RAMBlock starting page
2758 * @length: RAMBlock size
e0b266f0
DDAG
2759 */
2760static int postcopy_send_discard_bm_ram(MigrationState *ms,
2761 PostcopyDiscardState *pds,
6b6712ef 2762 RAMBlock *block)
e0b266f0 2763{
6b6712ef 2764 unsigned long end = block->used_length >> TARGET_PAGE_BITS;
e0b266f0 2765 unsigned long current;
6b6712ef 2766 unsigned long *unsentmap = block->unsentmap;
e0b266f0 2767
6b6712ef 2768 for (current = 0; current < end; ) {
e0b266f0
DDAG
2769 unsigned long one = find_next_bit(unsentmap, end, current);
2770
2771 if (one <= end) {
2772 unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1);
2773 unsigned long discard_length;
2774
2775 if (zero >= end) {
2776 discard_length = end - one;
2777 } else {
2778 discard_length = zero - one;
2779 }
d688c62d
DDAG
2780 if (discard_length) {
2781 postcopy_discard_send_range(ms, pds, one, discard_length);
2782 }
e0b266f0
DDAG
2783 current = one + discard_length;
2784 } else {
2785 current = one;
2786 }
2787 }
2788
2789 return 0;
2790}
2791
3d0684b2
JQ
2792/**
2793 * postcopy_each_ram_send_discard: discard all RAMBlocks
2794 *
2795 * Returns 0 for success or negative for error
2796 *
e0b266f0
DDAG
2797 * Utility for the outgoing postcopy code.
2798 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2799 * passing it bitmap indexes and name.
e0b266f0
DDAG
2800 * (qemu_ram_foreach_block ends up passing unscaled lengths
2801 * which would mean postcopy code would have to deal with target page)
3d0684b2
JQ
2802 *
2803 * @ms: current migration state
e0b266f0
DDAG
2804 */
2805static int postcopy_each_ram_send_discard(MigrationState *ms)
2806{
2807 struct RAMBlock *block;
2808 int ret;
2809
fbd162e6 2810 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
6b6712ef
JQ
2811 PostcopyDiscardState *pds =
2812 postcopy_discard_send_init(ms, block->idstr);
e0b266f0
DDAG
2813
2814 /*
2815 * Postcopy sends chunks of bitmap over the wire, but it
2816 * just needs indexes at this point, avoids it having
2817 * target page specific code.
2818 */
6b6712ef 2819 ret = postcopy_send_discard_bm_ram(ms, pds, block);
e0b266f0
DDAG
2820 postcopy_discard_send_finish(ms, pds);
2821 if (ret) {
2822 return ret;
2823 }
2824 }
2825
2826 return 0;
2827}
2828
3d0684b2
JQ
2829/**
2830 * postcopy_chunk_hostpages_pass: canocalize bitmap in hostpages
2831 *
2832 * Helper for postcopy_chunk_hostpages; it's called twice to
2833 * canonicalize the two bitmaps, that are similar, but one is
2834 * inverted.
99e314eb 2835 *
3d0684b2
JQ
2836 * Postcopy requires that all target pages in a hostpage are dirty or
2837 * clean, not a mix. This function canonicalizes the bitmaps.
99e314eb 2838 *
3d0684b2
JQ
2839 * @ms: current migration state
2840 * @unsent_pass: if true we need to canonicalize partially unsent host pages
2841 * otherwise we need to canonicalize partially dirty host pages
2842 * @block: block that contains the page we want to canonicalize
2843 * @pds: state for postcopy
99e314eb
DDAG
2844 */
2845static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
2846 RAMBlock *block,
2847 PostcopyDiscardState *pds)
2848{
53518d94 2849 RAMState *rs = ram_state;
6b6712ef
JQ
2850 unsigned long *bitmap = block->bmap;
2851 unsigned long *unsentmap = block->unsentmap;
29c59172 2852 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
6b6712ef 2853 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
99e314eb
DDAG
2854 unsigned long run_start;
2855
29c59172
DDAG
2856 if (block->page_size == TARGET_PAGE_SIZE) {
2857 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2858 return;
2859 }
2860
99e314eb
DDAG
2861 if (unsent_pass) {
2862 /* Find a sent page */
6b6712ef 2863 run_start = find_next_zero_bit(unsentmap, pages, 0);
99e314eb
DDAG
2864 } else {
2865 /* Find a dirty page */
6b6712ef 2866 run_start = find_next_bit(bitmap, pages, 0);
99e314eb
DDAG
2867 }
2868
6b6712ef 2869 while (run_start < pages) {
99e314eb
DDAG
2870 bool do_fixup = false;
2871 unsigned long fixup_start_addr;
2872 unsigned long host_offset;
2873
2874 /*
2875 * If the start of this run of pages is in the middle of a host
2876 * page, then we need to fixup this host page.
2877 */
2878 host_offset = run_start % host_ratio;
2879 if (host_offset) {
2880 do_fixup = true;
2881 run_start -= host_offset;
2882 fixup_start_addr = run_start;
2883 /* For the next pass */
2884 run_start = run_start + host_ratio;
2885 } else {
2886 /* Find the end of this run */
2887 unsigned long run_end;
2888 if (unsent_pass) {
6b6712ef 2889 run_end = find_next_bit(unsentmap, pages, run_start + 1);
99e314eb 2890 } else {
6b6712ef 2891 run_end = find_next_zero_bit(bitmap, pages, run_start + 1);
99e314eb
DDAG
2892 }
2893 /*
2894 * If the end isn't at the start of a host page, then the
2895 * run doesn't finish at the end of a host page
2896 * and we need to discard.
2897 */
2898 host_offset = run_end % host_ratio;
2899 if (host_offset) {
2900 do_fixup = true;
2901 fixup_start_addr = run_end - host_offset;
2902 /*
2903 * This host page has gone, the next loop iteration starts
2904 * from after the fixup
2905 */
2906 run_start = fixup_start_addr + host_ratio;
2907 } else {
2908 /*
2909 * No discards on this iteration, next loop starts from
2910 * next sent/dirty page
2911 */
2912 run_start = run_end + 1;
2913 }
2914 }
2915
2916 if (do_fixup) {
2917 unsigned long page;
2918
2919 /* Tell the destination to discard this page */
2920 if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) {
2921 /* For the unsent_pass we:
2922 * discard partially sent pages
2923 * For the !unsent_pass (dirty) we:
2924 * discard partially dirty pages that were sent
2925 * (any partially sent pages were already discarded
2926 * by the previous unsent_pass)
2927 */
2928 postcopy_discard_send_range(ms, pds, fixup_start_addr,
2929 host_ratio);
2930 }
2931
2932 /* Clean up the bitmap */
2933 for (page = fixup_start_addr;
2934 page < fixup_start_addr + host_ratio; page++) {
2935 /* All pages in this host page are now not sent */
2936 set_bit(page, unsentmap);
2937
2938 /*
2939 * Remark them as dirty, updating the count for any pages
2940 * that weren't previously dirty.
2941 */
0d8ec885 2942 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
99e314eb
DDAG
2943 }
2944 }
2945
2946 if (unsent_pass) {
2947 /* Find the next sent page for the next iteration */
6b6712ef 2948 run_start = find_next_zero_bit(unsentmap, pages, run_start);
99e314eb
DDAG
2949 } else {
2950 /* Find the next dirty page for the next iteration */
6b6712ef 2951 run_start = find_next_bit(bitmap, pages, run_start);
99e314eb
DDAG
2952 }
2953 }
2954}
2955
3d0684b2
JQ
2956/**
2957 * postcopy_chuck_hostpages: discrad any partially sent host page
2958 *
99e314eb
DDAG
2959 * Utility for the outgoing postcopy code.
2960 *
2961 * Discard any partially sent host-page size chunks, mark any partially
29c59172
DDAG
2962 * dirty host-page size chunks as all dirty. In this case the host-page
2963 * is the host-page for the particular RAMBlock, i.e. it might be a huge page
99e314eb 2964 *
3d0684b2
JQ
2965 * Returns zero on success
2966 *
2967 * @ms: current migration state
6b6712ef 2968 * @block: block we want to work with
99e314eb 2969 */
6b6712ef 2970static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
99e314eb 2971{
6b6712ef
JQ
2972 PostcopyDiscardState *pds =
2973 postcopy_discard_send_init(ms, block->idstr);
99e314eb 2974
6b6712ef
JQ
2975 /* First pass: Discard all partially sent host pages */
2976 postcopy_chunk_hostpages_pass(ms, true, block, pds);
2977 /*
2978 * Second pass: Ensure that all partially dirty host pages are made
2979 * fully dirty.
2980 */
2981 postcopy_chunk_hostpages_pass(ms, false, block, pds);
99e314eb 2982
6b6712ef 2983 postcopy_discard_send_finish(ms, pds);
99e314eb
DDAG
2984 return 0;
2985}
2986
3d0684b2
JQ
2987/**
2988 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
2989 *
2990 * Returns zero on success
2991 *
e0b266f0
DDAG
2992 * Transmit the set of pages to be discarded after precopy to the target
2993 * these are pages that:
2994 * a) Have been previously transmitted but are now dirty again
2995 * b) Pages that have never been transmitted, this ensures that
2996 * any pages on the destination that have been mapped by background
2997 * tasks get discarded (transparent huge pages is the specific concern)
2998 * Hopefully this is pretty sparse
3d0684b2
JQ
2999 *
3000 * @ms: current migration state
e0b266f0
DDAG
3001 */
3002int ram_postcopy_send_discard_bitmap(MigrationState *ms)
3003{
53518d94 3004 RAMState *rs = ram_state;
6b6712ef 3005 RAMBlock *block;
e0b266f0 3006 int ret;
e0b266f0
DDAG
3007
3008 rcu_read_lock();
3009
3010 /* This should be our last sync, the src is now paused */
eb859c53 3011 migration_bitmap_sync(rs);
e0b266f0 3012
6b6712ef
JQ
3013 /* Easiest way to make sure we don't resume in the middle of a host-page */
3014 rs->last_seen_block = NULL;
3015 rs->last_sent_block = NULL;
3016 rs->last_page = 0;
e0b266f0 3017
fbd162e6 3018 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
6b6712ef
JQ
3019 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
3020 unsigned long *bitmap = block->bmap;
3021 unsigned long *unsentmap = block->unsentmap;
3022
3023 if (!unsentmap) {
3024 /* We don't have a safe way to resize the sentmap, so
3025 * if the bitmap was resized it will be NULL at this
3026 * point.
3027 */
3028 error_report("migration ram resized during precopy phase");
3029 rcu_read_unlock();
3030 return -EINVAL;
3031 }
3032 /* Deal with TPS != HPS and huge pages */
3033 ret = postcopy_chunk_hostpages(ms, block);
3034 if (ret) {
3035 rcu_read_unlock();
3036 return ret;
3037 }
e0b266f0 3038
6b6712ef
JQ
3039 /*
3040 * Update the unsentmap to be unsentmap = unsentmap | dirty
3041 */
3042 bitmap_or(unsentmap, unsentmap, bitmap, pages);
e0b266f0 3043#ifdef DEBUG_POSTCOPY
6b6712ef 3044 ram_debug_dump_bitmap(unsentmap, true, pages);
e0b266f0 3045#endif
6b6712ef
JQ
3046 }
3047 trace_ram_postcopy_send_discard_bitmap();
e0b266f0
DDAG
3048
3049 ret = postcopy_each_ram_send_discard(ms);
3050 rcu_read_unlock();
3051
3052 return ret;
3053}
3054
3d0684b2
JQ
3055/**
3056 * ram_discard_range: discard dirtied pages at the beginning of postcopy
e0b266f0 3057 *
3d0684b2 3058 * Returns zero on success
e0b266f0 3059 *
36449157
JQ
3060 * @rbname: name of the RAMBlock of the request. NULL means the
3061 * same that last one.
3d0684b2
JQ
3062 * @start: RAMBlock starting page
3063 * @length: RAMBlock size
e0b266f0 3064 */
aaa2064c 3065int ram_discard_range(const char *rbname, uint64_t start, size_t length)
e0b266f0
DDAG
3066{
3067 int ret = -1;
3068
36449157 3069 trace_ram_discard_range(rbname, start, length);
d3a5038c 3070
e0b266f0 3071 rcu_read_lock();
36449157 3072 RAMBlock *rb = qemu_ram_block_by_name(rbname);
e0b266f0
DDAG
3073
3074 if (!rb) {
36449157 3075 error_report("ram_discard_range: Failed to find block '%s'", rbname);
e0b266f0
DDAG
3076 goto err;
3077 }
3078
814bb08f
PX
3079 /*
3080 * On source VM, we don't need to update the received bitmap since
3081 * we don't even have one.
3082 */
3083 if (rb->receivedmap) {
3084 bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(),
3085 length >> qemu_target_page_bits());
3086 }
3087
d3a5038c 3088 ret = ram_block_discard_range(rb, start, length);
e0b266f0
DDAG
3089
3090err:
3091 rcu_read_unlock();
3092
3093 return ret;
3094}
3095
84593a08
PX
3096/*
3097 * For every allocation, we will try not to crash the VM if the
3098 * allocation failed.
3099 */
3100static int xbzrle_init(void)
3101{
3102 Error *local_err = NULL;
3103
3104 if (!migrate_use_xbzrle()) {
3105 return 0;
3106 }
3107
3108 XBZRLE_cache_lock();
3109
3110 XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
3111 if (!XBZRLE.zero_target_page) {
3112 error_report("%s: Error allocating zero page", __func__);
3113 goto err_out;
3114 }
3115
3116 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
3117 TARGET_PAGE_SIZE, &local_err);
3118 if (!XBZRLE.cache) {
3119 error_report_err(local_err);
3120 goto free_zero_page;
3121 }
3122
3123 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
3124 if (!XBZRLE.encoded_buf) {
3125 error_report("%s: Error allocating encoded_buf", __func__);
3126 goto free_cache;
3127 }
3128
3129 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
3130 if (!XBZRLE.current_buf) {
3131 error_report("%s: Error allocating current_buf", __func__);
3132 goto free_encoded_buf;
3133 }
3134
3135 /* We are all good */
3136 XBZRLE_cache_unlock();
3137 return 0;
3138
3139free_encoded_buf:
3140 g_free(XBZRLE.encoded_buf);
3141 XBZRLE.encoded_buf = NULL;
3142free_cache:
3143 cache_fini(XBZRLE.cache);
3144 XBZRLE.cache = NULL;
3145free_zero_page:
3146 g_free(XBZRLE.zero_target_page);
3147 XBZRLE.zero_target_page = NULL;
3148err_out:
3149 XBZRLE_cache_unlock();
3150 return -ENOMEM;
3151}
3152
53518d94 3153static int ram_state_init(RAMState **rsp)
56e93d26 3154{
7d00ee6a
PX
3155 *rsp = g_try_new0(RAMState, 1);
3156
3157 if (!*rsp) {
3158 error_report("%s: Init ramstate fail", __func__);
3159 return -1;
3160 }
53518d94
JQ
3161
3162 qemu_mutex_init(&(*rsp)->bitmap_mutex);
3163 qemu_mutex_init(&(*rsp)->src_page_req_mutex);
3164 QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
56e93d26 3165
7d00ee6a
PX
3166 /*
3167 * Count the total number of pages used by ram blocks not including any
3168 * gaps due to alignment or unplugs.
3169 */
3170 (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
3171
3172 ram_state_reset(*rsp);
3173
3174 return 0;
3175}
3176
d6eff5d7 3177static void ram_list_init_bitmaps(void)
7d00ee6a 3178{
d6eff5d7
PX
3179 RAMBlock *block;
3180 unsigned long pages;
56e93d26 3181
0827b9e9
AA
3182 /* Skip setting bitmap if there is no RAM */
3183 if (ram_bytes_total()) {
fbd162e6 3184 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
d6eff5d7 3185 pages = block->max_length >> TARGET_PAGE_BITS;
6b6712ef
JQ
3186 block->bmap = bitmap_new(pages);
3187 bitmap_set(block->bmap, 0, pages);
3188 if (migrate_postcopy_ram()) {
3189 block->unsentmap = bitmap_new(pages);
3190 bitmap_set(block->unsentmap, 0, pages);
3191 }
0827b9e9 3192 }
f3f491fc 3193 }
d6eff5d7
PX
3194}
3195
3196static void ram_init_bitmaps(RAMState *rs)
3197{
3198 /* For memory_global_dirty_log_start below. */
3199 qemu_mutex_lock_iothread();
3200 qemu_mutex_lock_ramlist();
3201 rcu_read_lock();
f3f491fc 3202
d6eff5d7 3203 ram_list_init_bitmaps();
56e93d26 3204 memory_global_dirty_log_start();
bd227060 3205 migration_bitmap_sync_precopy(rs);
d6eff5d7
PX
3206
3207 rcu_read_unlock();
56e93d26 3208 qemu_mutex_unlock_ramlist();
49877834 3209 qemu_mutex_unlock_iothread();
d6eff5d7
PX
3210}
3211
3212static int ram_init_all(RAMState **rsp)
3213{
3214 if (ram_state_init(rsp)) {
3215 return -1;
3216 }
3217
3218 if (xbzrle_init()) {
3219 ram_state_cleanup(rsp);
3220 return -1;
3221 }
3222
3223 ram_init_bitmaps(*rsp);
a91246c9
HZ
3224
3225 return 0;
3226}
3227
08614f34
PX
3228static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
3229{
3230 RAMBlock *block;
3231 uint64_t pages = 0;
3232
3233 /*
3234 * Postcopy is not using xbzrle/compression, so no need for that.
3235 * Also, since source are already halted, we don't need to care
3236 * about dirty page logging as well.
3237 */
3238
fbd162e6 3239 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
08614f34
PX
3240 pages += bitmap_count_one(block->bmap,
3241 block->used_length >> TARGET_PAGE_BITS);
3242 }
3243
3244 /* This may not be aligned with current bitmaps. Recalculate. */
3245 rs->migration_dirty_pages = pages;
3246
3247 rs->last_seen_block = NULL;
3248 rs->last_sent_block = NULL;
3249 rs->last_page = 0;
3250 rs->last_version = ram_list.version;
3251 /*
3252 * Disable the bulk stage, otherwise we'll resend the whole RAM no
3253 * matter what we have sent.
3254 */
3255 rs->ram_bulk_stage = false;
3256
3257 /* Update RAMState cache of output QEMUFile */
3258 rs->f = out;
3259
3260 trace_ram_state_resume_prepare(pages);
3261}
3262
6bcb05fc
WW
3263/*
3264 * This function clears bits of the free pages reported by the caller from the
3265 * migration dirty bitmap. @addr is the host address corresponding to the
3266 * start of the continuous guest free pages, and @len is the total bytes of
3267 * those pages.
3268 */
3269void qemu_guest_free_page_hint(void *addr, size_t len)
3270{
3271 RAMBlock *block;
3272 ram_addr_t offset;
3273 size_t used_len, start, npages;
3274 MigrationState *s = migrate_get_current();
3275
3276 /* This function is currently expected to be used during live migration */
3277 if (!migration_is_setup_or_active(s->state)) {
3278 return;
3279 }
3280
3281 for (; len > 0; len -= used_len, addr += used_len) {
3282 block = qemu_ram_block_from_host(addr, false, &offset);
3283 if (unlikely(!block || offset >= block->used_length)) {
3284 /*
3285 * The implementation might not support RAMBlock resize during
3286 * live migration, but it could happen in theory with future
3287 * updates. So we add a check here to capture that case.
3288 */
3289 error_report_once("%s unexpected error", __func__);
3290 return;
3291 }
3292
3293 if (len <= block->used_length - offset) {
3294 used_len = len;
3295 } else {
3296 used_len = block->used_length - offset;
3297 }
3298
3299 start = offset >> TARGET_PAGE_BITS;
3300 npages = used_len >> TARGET_PAGE_BITS;
3301
3302 qemu_mutex_lock(&ram_state->bitmap_mutex);
3303 ram_state->migration_dirty_pages -=
3304 bitmap_count_one_with_offset(block->bmap, start, npages);
3305 bitmap_clear(block->bmap, start, npages);
3306 qemu_mutex_unlock(&ram_state->bitmap_mutex);
3307 }
3308}
3309
3d0684b2
JQ
3310/*
3311 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
a91246c9
HZ
3312 * long-running RCU critical section. When rcu-reclaims in the code
3313 * start to become numerous it will be necessary to reduce the
3314 * granularity of these critical sections.
3315 */
3316
3d0684b2
JQ
3317/**
3318 * ram_save_setup: Setup RAM for migration
3319 *
3320 * Returns zero to indicate success and negative for error
3321 *
3322 * @f: QEMUFile where to send the data
3323 * @opaque: RAMState pointer
3324 */
a91246c9
HZ
3325static int ram_save_setup(QEMUFile *f, void *opaque)
3326{
53518d94 3327 RAMState **rsp = opaque;
a91246c9
HZ
3328 RAMBlock *block;
3329
dcaf446e
XG
3330 if (compress_threads_save_setup()) {
3331 return -1;
3332 }
3333
a91246c9
HZ
3334 /* migration has already setup the bitmap, reuse it. */
3335 if (!migration_in_colo_state()) {
7d00ee6a 3336 if (ram_init_all(rsp) != 0) {
dcaf446e 3337 compress_threads_save_cleanup();
a91246c9 3338 return -1;
53518d94 3339 }
a91246c9 3340 }
53518d94 3341 (*rsp)->f = f;
a91246c9
HZ
3342
3343 rcu_read_lock();
56e93d26 3344
fbd162e6 3345 qemu_put_be64(f, ram_bytes_total_common(true) | RAM_SAVE_FLAG_MEM_SIZE);
56e93d26 3346
b895de50 3347 RAMBLOCK_FOREACH_MIGRATABLE(block) {
56e93d26
JQ
3348 qemu_put_byte(f, strlen(block->idstr));
3349 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
3350 qemu_put_be64(f, block->used_length);
ef08fb38
DDAG
3351 if (migrate_postcopy_ram() && block->page_size != qemu_host_page_size) {
3352 qemu_put_be64(f, block->page_size);
3353 }
fbd162e6
YK
3354 if (migrate_ignore_shared()) {
3355 qemu_put_be64(f, block->mr->addr);
3356 qemu_put_byte(f, ramblock_is_ignored(block) ? 1 : 0);
3357 }
56e93d26
JQ
3358 }
3359
3360 rcu_read_unlock();
3361
3362 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
3363 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
3364
6df264ac 3365 multifd_send_sync_main();
56e93d26 3366 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
35374cbd 3367 qemu_fflush(f);
56e93d26
JQ
3368
3369 return 0;
3370}
3371
3d0684b2
JQ
3372/**
3373 * ram_save_iterate: iterative stage for migration
3374 *
3375 * Returns zero to indicate success and negative for error
3376 *
3377 * @f: QEMUFile where to send the data
3378 * @opaque: RAMState pointer
3379 */
56e93d26
JQ
3380static int ram_save_iterate(QEMUFile *f, void *opaque)
3381{
53518d94
JQ
3382 RAMState **temp = opaque;
3383 RAMState *rs = *temp;
56e93d26
JQ
3384 int ret;
3385 int i;
3386 int64_t t0;
5c90308f 3387 int done = 0;
56e93d26 3388
b2557345
PL
3389 if (blk_mig_bulk_active()) {
3390 /* Avoid transferring ram during bulk phase of block migration as
3391 * the bulk phase will usually take a long time and transferring
3392 * ram updates during that time is pointless. */
3393 goto out;
3394 }
3395
56e93d26 3396 rcu_read_lock();
6f37bb8b
JQ
3397 if (ram_list.version != rs->last_version) {
3398 ram_state_reset(rs);
56e93d26
JQ
3399 }
3400
3401 /* Read version before ram_list.blocks */
3402 smp_rmb();
3403
3404 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
3405
3406 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
3407 i = 0;
e03a34f8
DDAG
3408 while ((ret = qemu_file_rate_limit(f)) == 0 ||
3409 !QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
56e93d26
JQ
3410 int pages;
3411
e03a34f8
DDAG
3412 if (qemu_file_get_error(f)) {
3413 break;
3414 }
3415
ce25d337 3416 pages = ram_find_and_save_block(rs, false);
56e93d26
JQ
3417 /* no more pages to sent */
3418 if (pages == 0) {
5c90308f 3419 done = 1;
56e93d26
JQ
3420 break;
3421 }
e8f3735f
XG
3422
3423 if (pages < 0) {
3424 qemu_file_set_error(f, pages);
3425 break;
3426 }
3427
be8b02ed 3428 rs->target_page_count += pages;
070afca2 3429
56e93d26
JQ
3430 /* we want to check in the 1st loop, just in case it was the 1st time
3431 and we had to sync the dirty bitmap.
3432 qemu_get_clock_ns() is a bit expensive, so we only check each some
3433 iterations
3434 */
3435 if ((i & 63) == 0) {
3436 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
3437 if (t1 > MAX_WAIT) {
55c4446b 3438 trace_ram_save_iterate_big_wait(t1, i);
56e93d26
JQ
3439 break;
3440 }
3441 }
3442 i++;
3443 }
56e93d26
JQ
3444 rcu_read_unlock();
3445
3446 /*
3447 * Must occur before EOS (or any QEMUFile operation)
3448 * because of RDMA protocol.
3449 */
3450 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
3451
6df264ac 3452 multifd_send_sync_main();
b2557345 3453out:
56e93d26 3454 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
35374cbd 3455 qemu_fflush(f);
9360447d 3456 ram_counters.transferred += 8;
56e93d26
JQ
3457
3458 ret = qemu_file_get_error(f);
3459 if (ret < 0) {
3460 return ret;
3461 }
3462
5c90308f 3463 return done;
56e93d26
JQ
3464}
3465
3d0684b2
JQ
3466/**
3467 * ram_save_complete: function called to send the remaining amount of ram
3468 *
e8f3735f 3469 * Returns zero to indicate success or negative on error
3d0684b2
JQ
3470 *
3471 * Called with iothread lock
3472 *
3473 * @f: QEMUFile where to send the data
3474 * @opaque: RAMState pointer
3475 */
56e93d26
JQ
3476static int ram_save_complete(QEMUFile *f, void *opaque)
3477{
53518d94
JQ
3478 RAMState **temp = opaque;
3479 RAMState *rs = *temp;
e8f3735f 3480 int ret = 0;
6f37bb8b 3481
56e93d26
JQ
3482 rcu_read_lock();
3483
5727309d 3484 if (!migration_in_postcopy()) {
bd227060 3485 migration_bitmap_sync_precopy(rs);
663e6c1d 3486 }
56e93d26
JQ
3487
3488 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
3489
3490 /* try transferring iterative blocks of memory */
3491
3492 /* flush all remaining blocks regardless of rate limiting */
3493 while (true) {
3494 int pages;
3495
ce25d337 3496 pages = ram_find_and_save_block(rs, !migration_in_colo_state());
56e93d26
JQ
3497 /* no more blocks to sent */
3498 if (pages == 0) {
3499 break;
3500 }
e8f3735f
XG
3501 if (pages < 0) {
3502 ret = pages;
3503 break;
3504 }
56e93d26
JQ
3505 }
3506
ce25d337 3507 flush_compressed_data(rs);
56e93d26 3508 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
56e93d26
JQ
3509
3510 rcu_read_unlock();
d09a6fde 3511
6df264ac 3512 multifd_send_sync_main();
56e93d26 3513 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
35374cbd 3514 qemu_fflush(f);
56e93d26 3515
e8f3735f 3516 return ret;
56e93d26
JQ
3517}
3518
c31b098f 3519static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
47995026
VSO
3520 uint64_t *res_precopy_only,
3521 uint64_t *res_compatible,
3522 uint64_t *res_postcopy_only)
56e93d26 3523{
53518d94
JQ
3524 RAMState **temp = opaque;
3525 RAMState *rs = *temp;
56e93d26
JQ
3526 uint64_t remaining_size;
3527
9edabd4d 3528 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
56e93d26 3529
5727309d 3530 if (!migration_in_postcopy() &&
663e6c1d 3531 remaining_size < max_size) {
56e93d26
JQ
3532 qemu_mutex_lock_iothread();
3533 rcu_read_lock();
bd227060 3534 migration_bitmap_sync_precopy(rs);
56e93d26
JQ
3535 rcu_read_unlock();
3536 qemu_mutex_unlock_iothread();
9edabd4d 3537 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
56e93d26 3538 }
c31b098f 3539
86e1167e
VSO
3540 if (migrate_postcopy_ram()) {
3541 /* We can do postcopy, and all the data is postcopiable */
47995026 3542 *res_compatible += remaining_size;
86e1167e 3543 } else {
47995026 3544 *res_precopy_only += remaining_size;
86e1167e 3545 }
56e93d26
JQ
3546}
3547
3548static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
3549{
3550 unsigned int xh_len;
3551 int xh_flags;
063e760a 3552 uint8_t *loaded_data;
56e93d26 3553
56e93d26
JQ
3554 /* extract RLE header */
3555 xh_flags = qemu_get_byte(f);
3556 xh_len = qemu_get_be16(f);
3557
3558 if (xh_flags != ENCODING_FLAG_XBZRLE) {
3559 error_report("Failed to load XBZRLE page - wrong compression!");
3560 return -1;
3561 }
3562
3563 if (xh_len > TARGET_PAGE_SIZE) {
3564 error_report("Failed to load XBZRLE page - len overflow!");
3565 return -1;
3566 }
f265e0e4 3567 loaded_data = XBZRLE.decoded_buf;
56e93d26 3568 /* load data and decode */
f265e0e4 3569 /* it can change loaded_data to point to an internal buffer */
063e760a 3570 qemu_get_buffer_in_place(f, &loaded_data, xh_len);
56e93d26
JQ
3571
3572 /* decode RLE */
063e760a 3573 if (xbzrle_decode_buffer(loaded_data, xh_len, host,
56e93d26
JQ
3574 TARGET_PAGE_SIZE) == -1) {
3575 error_report("Failed to load XBZRLE page - decode error!");
3576 return -1;
3577 }
3578
3579 return 0;
3580}
3581
3d0684b2
JQ
3582/**
3583 * ram_block_from_stream: read a RAMBlock id from the migration stream
3584 *
3585 * Must be called from within a rcu critical section.
3586 *
56e93d26 3587 * Returns a pointer from within the RCU-protected ram_list.
a7180877 3588 *
3d0684b2
JQ
3589 * @f: QEMUFile where to read the data from
3590 * @flags: Page flags (mostly to see if it's a continuation of previous block)
a7180877 3591 */
3d0684b2 3592static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
56e93d26
JQ
3593{
3594 static RAMBlock *block = NULL;
3595 char id[256];
3596 uint8_t len;
3597
3598 if (flags & RAM_SAVE_FLAG_CONTINUE) {
4c4bad48 3599 if (!block) {
56e93d26
JQ
3600 error_report("Ack, bad migration stream!");
3601 return NULL;
3602 }
4c4bad48 3603 return block;
56e93d26
JQ
3604 }
3605
3606 len = qemu_get_byte(f);
3607 qemu_get_buffer(f, (uint8_t *)id, len);
3608 id[len] = 0;
3609
e3dd7493 3610 block = qemu_ram_block_by_name(id);
4c4bad48
HZ
3611 if (!block) {
3612 error_report("Can't find block %s", id);
3613 return NULL;
56e93d26
JQ
3614 }
3615
fbd162e6 3616 if (ramblock_is_ignored(block)) {
b895de50
CLG
3617 error_report("block %s should not be migrated !", id);
3618 return NULL;
3619 }
3620
4c4bad48
HZ
3621 return block;
3622}
3623
3624static inline void *host_from_ram_block_offset(RAMBlock *block,
3625 ram_addr_t offset)
3626{
3627 if (!offset_in_ramblock(block, offset)) {
3628 return NULL;
3629 }
3630
3631 return block->host + offset;
56e93d26
JQ
3632}
3633
13af18f2
ZC
3634static inline void *colo_cache_from_block_offset(RAMBlock *block,
3635 ram_addr_t offset)
3636{
3637 if (!offset_in_ramblock(block, offset)) {
3638 return NULL;
3639 }
3640 if (!block->colo_cache) {
3641 error_report("%s: colo_cache is NULL in block :%s",
3642 __func__, block->idstr);
3643 return NULL;
3644 }
7d9acafa
ZC
3645
3646 /*
3647 * During colo checkpoint, we need bitmap of these migrated pages.
3648 * It help us to decide which pages in ram cache should be flushed
3649 * into VM's RAM later.
3650 */
3651 if (!test_and_set_bit(offset >> TARGET_PAGE_BITS, block->bmap)) {
3652 ram_state->migration_dirty_pages++;
3653 }
13af18f2
ZC
3654 return block->colo_cache + offset;
3655}
3656
3d0684b2
JQ
3657/**
3658 * ram_handle_compressed: handle the zero page case
3659 *
56e93d26
JQ
3660 * If a page (or a whole RDMA chunk) has been
3661 * determined to be zero, then zap it.
3d0684b2
JQ
3662 *
3663 * @host: host address for the zero page
3664 * @ch: what the page is filled from. We only support zero
3665 * @size: size of the zero page
56e93d26
JQ
3666 */
3667void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
3668{
3669 if (ch != 0 || !is_zero_range(host, size)) {
3670 memset(host, ch, size);
3671 }
3672}
3673
797ca154
XG
3674/* return the size after decompression, or negative value on error */
3675static int
3676qemu_uncompress_data(z_stream *stream, uint8_t *dest, size_t dest_len,
3677 const uint8_t *source, size_t source_len)
3678{
3679 int err;
3680
3681 err = inflateReset(stream);
3682 if (err != Z_OK) {
3683 return -1;
3684 }
3685
3686 stream->avail_in = source_len;
3687 stream->next_in = (uint8_t *)source;
3688 stream->avail_out = dest_len;
3689 stream->next_out = dest;
3690
3691 err = inflate(stream, Z_NO_FLUSH);
3692 if (err != Z_STREAM_END) {
3693 return -1;
3694 }
3695
3696 return stream->total_out;
3697}
3698
56e93d26
JQ
3699static void *do_data_decompress(void *opaque)
3700{
3701 DecompressParam *param = opaque;
3702 unsigned long pagesize;
33d151f4 3703 uint8_t *des;
34ab9e97 3704 int len, ret;
56e93d26 3705
33d151f4 3706 qemu_mutex_lock(&param->mutex);
90e56fb4 3707 while (!param->quit) {
33d151f4
LL
3708 if (param->des) {
3709 des = param->des;
3710 len = param->len;
3711 param->des = 0;
3712 qemu_mutex_unlock(&param->mutex);
3713
56e93d26 3714 pagesize = TARGET_PAGE_SIZE;
34ab9e97
XG
3715
3716 ret = qemu_uncompress_data(&param->stream, des, pagesize,
3717 param->compbuf, len);
f548222c 3718 if (ret < 0 && migrate_get_current()->decompress_error_check) {
34ab9e97
XG
3719 error_report("decompress data failed");
3720 qemu_file_set_error(decomp_file, ret);
3721 }
73a8912b 3722
33d151f4
LL
3723 qemu_mutex_lock(&decomp_done_lock);
3724 param->done = true;
3725 qemu_cond_signal(&decomp_done_cond);
3726 qemu_mutex_unlock(&decomp_done_lock);
3727
3728 qemu_mutex_lock(&param->mutex);
3729 } else {
3730 qemu_cond_wait(&param->cond, &param->mutex);
3731 }
56e93d26 3732 }
33d151f4 3733 qemu_mutex_unlock(&param->mutex);
56e93d26
JQ
3734
3735 return NULL;
3736}
3737
34ab9e97 3738static int wait_for_decompress_done(void)
5533b2e9
LL
3739{
3740 int idx, thread_count;
3741
3742 if (!migrate_use_compression()) {
34ab9e97 3743 return 0;
5533b2e9
LL
3744 }
3745
3746 thread_count = migrate_decompress_threads();
3747 qemu_mutex_lock(&decomp_done_lock);
3748 for (idx = 0; idx < thread_count; idx++) {
3749 while (!decomp_param[idx].done) {
3750 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
3751 }
3752 }
3753 qemu_mutex_unlock(&decomp_done_lock);
34ab9e97 3754 return qemu_file_get_error(decomp_file);
5533b2e9
LL
3755}
3756
f0afa331 3757static void compress_threads_load_cleanup(void)
56e93d26
JQ
3758{
3759 int i, thread_count;
3760
3416ab5b
JQ
3761 if (!migrate_use_compression()) {
3762 return;
3763 }
56e93d26
JQ
3764 thread_count = migrate_decompress_threads();
3765 for (i = 0; i < thread_count; i++) {
797ca154
XG
3766 /*
3767 * we use it as a indicator which shows if the thread is
3768 * properly init'd or not
3769 */
3770 if (!decomp_param[i].compbuf) {
3771 break;
3772 }
3773
56e93d26 3774 qemu_mutex_lock(&decomp_param[i].mutex);
90e56fb4 3775 decomp_param[i].quit = true;
56e93d26
JQ
3776 qemu_cond_signal(&decomp_param[i].cond);
3777 qemu_mutex_unlock(&decomp_param[i].mutex);
3778 }
3779 for (i = 0; i < thread_count; i++) {
797ca154
XG
3780 if (!decomp_param[i].compbuf) {
3781 break;
3782 }
3783
56e93d26
JQ
3784 qemu_thread_join(decompress_threads + i);
3785 qemu_mutex_destroy(&decomp_param[i].mutex);
3786 qemu_cond_destroy(&decomp_param[i].cond);
797ca154 3787 inflateEnd(&decomp_param[i].stream);
56e93d26 3788 g_free(decomp_param[i].compbuf);
797ca154 3789 decomp_param[i].compbuf = NULL;
56e93d26
JQ
3790 }
3791 g_free(decompress_threads);
3792 g_free(decomp_param);
56e93d26
JQ
3793 decompress_threads = NULL;
3794 decomp_param = NULL;
34ab9e97 3795 decomp_file = NULL;
56e93d26
JQ
3796}
3797
34ab9e97 3798static int compress_threads_load_setup(QEMUFile *f)
797ca154
XG
3799{
3800 int i, thread_count;
3801
3802 if (!migrate_use_compression()) {
3803 return 0;
3804 }
3805
3806 thread_count = migrate_decompress_threads();
3807 decompress_threads = g_new0(QemuThread, thread_count);
3808 decomp_param = g_new0(DecompressParam, thread_count);
3809 qemu_mutex_init(&decomp_done_lock);
3810 qemu_cond_init(&decomp_done_cond);
34ab9e97 3811 decomp_file = f;
797ca154
XG
3812 for (i = 0; i < thread_count; i++) {
3813 if (inflateInit(&decomp_param[i].stream) != Z_OK) {
3814 goto exit;
3815 }
3816
3817 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
3818 qemu_mutex_init(&decomp_param[i].mutex);
3819 qemu_cond_init(&decomp_param[i].cond);
3820 decomp_param[i].done = true;
3821 decomp_param[i].quit = false;
3822 qemu_thread_create(decompress_threads + i, "decompress",
3823 do_data_decompress, decomp_param + i,
3824 QEMU_THREAD_JOINABLE);
3825 }
3826 return 0;
3827exit:
3828 compress_threads_load_cleanup();
3829 return -1;
3830}
3831
c1bc6626 3832static void decompress_data_with_multi_threads(QEMUFile *f,
56e93d26
JQ
3833 void *host, int len)
3834{
3835 int idx, thread_count;
3836
3837 thread_count = migrate_decompress_threads();
73a8912b 3838 qemu_mutex_lock(&decomp_done_lock);
56e93d26
JQ
3839 while (true) {
3840 for (idx = 0; idx < thread_count; idx++) {
73a8912b 3841 if (decomp_param[idx].done) {
33d151f4
LL
3842 decomp_param[idx].done = false;
3843 qemu_mutex_lock(&decomp_param[idx].mutex);
c1bc6626 3844 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
56e93d26
JQ
3845 decomp_param[idx].des = host;
3846 decomp_param[idx].len = len;
33d151f4
LL
3847 qemu_cond_signal(&decomp_param[idx].cond);
3848 qemu_mutex_unlock(&decomp_param[idx].mutex);
56e93d26
JQ
3849 break;
3850 }
3851 }
3852 if (idx < thread_count) {
3853 break;
73a8912b
LL
3854 } else {
3855 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
56e93d26
JQ
3856 }
3857 }
73a8912b 3858 qemu_mutex_unlock(&decomp_done_lock);
56e93d26
JQ
3859}
3860
13af18f2
ZC
3861/*
3862 * colo cache: this is for secondary VM, we cache the whole
3863 * memory of the secondary VM, it is need to hold the global lock
3864 * to call this helper.
3865 */
3866int colo_init_ram_cache(void)
3867{
3868 RAMBlock *block;
3869
3870 rcu_read_lock();
fbd162e6 3871 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
13af18f2
ZC
3872 block->colo_cache = qemu_anon_ram_alloc(block->used_length,
3873 NULL,
3874 false);
3875 if (!block->colo_cache) {
3876 error_report("%s: Can't alloc memory for COLO cache of block %s,"
3877 "size 0x" RAM_ADDR_FMT, __func__, block->idstr,
3878 block->used_length);
3879 goto out_locked;
3880 }
3881 memcpy(block->colo_cache, block->host, block->used_length);
3882 }
3883 rcu_read_unlock();
7d9acafa
ZC
3884 /*
3885 * Record the dirty pages that sent by PVM, we use this dirty bitmap together
3886 * with to decide which page in cache should be flushed into SVM's RAM. Here
3887 * we use the same name 'ram_bitmap' as for migration.
3888 */
3889 if (ram_bytes_total()) {
3890 RAMBlock *block;
3891
fbd162e6 3892 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
7d9acafa
ZC
3893 unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
3894
3895 block->bmap = bitmap_new(pages);
3896 bitmap_set(block->bmap, 0, pages);
3897 }
3898 }
3899 ram_state = g_new0(RAMState, 1);
3900 ram_state->migration_dirty_pages = 0;
d1955d22 3901 memory_global_dirty_log_start();
7d9acafa 3902
13af18f2
ZC
3903 return 0;
3904
3905out_locked:
7d9acafa 3906
fbd162e6 3907 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
13af18f2
ZC
3908 if (block->colo_cache) {
3909 qemu_anon_ram_free(block->colo_cache, block->used_length);
3910 block->colo_cache = NULL;
3911 }
3912 }
3913
3914 rcu_read_unlock();
3915 return -errno;
3916}
3917
3918/* It is need to hold the global lock to call this helper */
3919void colo_release_ram_cache(void)
3920{
3921 RAMBlock *block;
3922
d1955d22 3923 memory_global_dirty_log_stop();
fbd162e6 3924 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
7d9acafa
ZC
3925 g_free(block->bmap);
3926 block->bmap = NULL;
3927 }
3928
13af18f2 3929 rcu_read_lock();
7d9acafa 3930
fbd162e6 3931 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
13af18f2
ZC
3932 if (block->colo_cache) {
3933 qemu_anon_ram_free(block->colo_cache, block->used_length);
3934 block->colo_cache = NULL;
3935 }
3936 }
7d9acafa 3937
13af18f2 3938 rcu_read_unlock();
7d9acafa
ZC
3939 g_free(ram_state);
3940 ram_state = NULL;
13af18f2
ZC
3941}
3942
f265e0e4
JQ
3943/**
3944 * ram_load_setup: Setup RAM for migration incoming side
3945 *
3946 * Returns zero to indicate success and negative for error
3947 *
3948 * @f: QEMUFile where to receive the data
3949 * @opaque: RAMState pointer
3950 */
3951static int ram_load_setup(QEMUFile *f, void *opaque)
3952{
34ab9e97 3953 if (compress_threads_load_setup(f)) {
797ca154
XG
3954 return -1;
3955 }
3956
f265e0e4 3957 xbzrle_load_setup();
f9494614 3958 ramblock_recv_map_init();
13af18f2 3959
f265e0e4
JQ
3960 return 0;
3961}
3962
3963static int ram_load_cleanup(void *opaque)
3964{
f9494614 3965 RAMBlock *rb;
56eb90af 3966
fbd162e6 3967 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
56eb90af
JH
3968 if (ramblock_is_pmem(rb)) {
3969 pmem_persist(rb->host, rb->used_length);
3970 }
3971 }
3972
f265e0e4 3973 xbzrle_load_cleanup();
f0afa331 3974 compress_threads_load_cleanup();
f9494614 3975
fbd162e6 3976 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
f9494614
AP
3977 g_free(rb->receivedmap);
3978 rb->receivedmap = NULL;
3979 }
13af18f2 3980
f265e0e4
JQ
3981 return 0;
3982}
3983
3d0684b2
JQ
3984/**
3985 * ram_postcopy_incoming_init: allocate postcopy data structures
3986 *
3987 * Returns 0 for success and negative if there was one error
3988 *
3989 * @mis: current migration incoming state
3990 *
3991 * Allocate data structures etc needed by incoming migration with
3992 * postcopy-ram. postcopy-ram's similarly names
3993 * postcopy_ram_incoming_init does the work.
1caddf8a
DDAG
3994 */
3995int ram_postcopy_incoming_init(MigrationIncomingState *mis)
3996{
c136180c 3997 return postcopy_ram_incoming_init(mis);
1caddf8a
DDAG
3998}
3999
3d0684b2
JQ
4000/**
4001 * ram_load_postcopy: load a page in postcopy case
4002 *
4003 * Returns 0 for success or -errno in case of error
4004 *
a7180877
DDAG
4005 * Called in postcopy mode by ram_load().
4006 * rcu_read_lock is taken prior to this being called.
3d0684b2
JQ
4007 *
4008 * @f: QEMUFile where to send the data
a7180877
DDAG
4009 */
4010static int ram_load_postcopy(QEMUFile *f)
4011{
4012 int flags = 0, ret = 0;
4013 bool place_needed = false;
1aa83678 4014 bool matches_target_page_size = false;
a7180877
DDAG
4015 MigrationIncomingState *mis = migration_incoming_get_current();
4016 /* Temporary page that is later 'placed' */
4017 void *postcopy_host_page = postcopy_get_tmp_page(mis);
c53b7ddc 4018 void *last_host = NULL;
a3b6ff6d 4019 bool all_zero = false;
a7180877
DDAG
4020
4021 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
4022 ram_addr_t addr;
4023 void *host = NULL;
4024 void *page_buffer = NULL;
4025 void *place_source = NULL;
df9ff5e1 4026 RAMBlock *block = NULL;
a7180877 4027 uint8_t ch;
a7180877
DDAG
4028
4029 addr = qemu_get_be64(f);
7a9ddfbf
PX
4030
4031 /*
4032 * If qemu file error, we should stop here, and then "addr"
4033 * may be invalid
4034 */
4035 ret = qemu_file_get_error(f);
4036 if (ret) {
4037 break;
4038 }
4039
a7180877
DDAG
4040 flags = addr & ~TARGET_PAGE_MASK;
4041 addr &= TARGET_PAGE_MASK;
4042
4043 trace_ram_load_postcopy_loop((uint64_t)addr, flags);
4044 place_needed = false;
bb890ed5 4045 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) {
df9ff5e1 4046 block = ram_block_from_stream(f, flags);
4c4bad48
HZ
4047
4048 host = host_from_ram_block_offset(block, addr);
a7180877
DDAG
4049 if (!host) {
4050 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
4051 ret = -EINVAL;
4052 break;
4053 }
1aa83678 4054 matches_target_page_size = block->page_size == TARGET_PAGE_SIZE;
a7180877 4055 /*
28abd200
DDAG
4056 * Postcopy requires that we place whole host pages atomically;
4057 * these may be huge pages for RAMBlocks that are backed by
4058 * hugetlbfs.
a7180877
DDAG
4059 * To make it atomic, the data is read into a temporary page
4060 * that's moved into place later.
4061 * The migration protocol uses, possibly smaller, target-pages
4062 * however the source ensures it always sends all the components
4063 * of a host page in order.
4064 */
4065 page_buffer = postcopy_host_page +
28abd200 4066 ((uintptr_t)host & (block->page_size - 1));
a7180877 4067 /* If all TP are zero then we can optimise the place */
28abd200 4068 if (!((uintptr_t)host & (block->page_size - 1))) {
a7180877 4069 all_zero = true;
c53b7ddc
DDAG
4070 } else {
4071 /* not the 1st TP within the HP */
4072 if (host != (last_host + TARGET_PAGE_SIZE)) {
9af9e0fe 4073 error_report("Non-sequential target page %p/%p",
c53b7ddc
DDAG
4074 host, last_host);
4075 ret = -EINVAL;
4076 break;
4077 }
a7180877
DDAG
4078 }
4079
c53b7ddc 4080
a7180877
DDAG
4081 /*
4082 * If it's the last part of a host page then we place the host
4083 * page
4084 */
4085 place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
28abd200 4086 (block->page_size - 1)) == 0;
a7180877
DDAG
4087 place_source = postcopy_host_page;
4088 }
c53b7ddc 4089 last_host = host;
a7180877
DDAG
4090
4091 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
bb890ed5 4092 case RAM_SAVE_FLAG_ZERO:
a7180877
DDAG
4093 ch = qemu_get_byte(f);
4094 memset(page_buffer, ch, TARGET_PAGE_SIZE);
4095 if (ch) {
4096 all_zero = false;
4097 }
4098 break;
4099
4100 case RAM_SAVE_FLAG_PAGE:
4101 all_zero = false;
1aa83678
PX
4102 if (!matches_target_page_size) {
4103 /* For huge pages, we always use temporary buffer */
a7180877
DDAG
4104 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
4105 } else {
1aa83678
PX
4106 /*
4107 * For small pages that matches target page size, we
4108 * avoid the qemu_file copy. Instead we directly use
4109 * the buffer of QEMUFile to place the page. Note: we
4110 * cannot do any QEMUFile operation before using that
4111 * buffer to make sure the buffer is valid when
4112 * placing the page.
a7180877
DDAG
4113 */
4114 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
4115 TARGET_PAGE_SIZE);
4116 }
4117 break;
4118 case RAM_SAVE_FLAG_EOS:
4119 /* normal exit */
6df264ac 4120 multifd_recv_sync_main();
a7180877
DDAG
4121 break;
4122 default:
4123 error_report("Unknown combination of migration flags: %#x"
4124 " (postcopy mode)", flags);
4125 ret = -EINVAL;
7a9ddfbf
PX
4126 break;
4127 }
4128
4129 /* Detect for any possible file errors */
4130 if (!ret && qemu_file_get_error(f)) {
4131 ret = qemu_file_get_error(f);
a7180877
DDAG
4132 }
4133
7a9ddfbf 4134 if (!ret && place_needed) {
a7180877 4135 /* This gets called at the last target page in the host page */
df9ff5e1
DDAG
4136 void *place_dest = host + TARGET_PAGE_SIZE - block->page_size;
4137
a7180877 4138 if (all_zero) {
df9ff5e1 4139 ret = postcopy_place_page_zero(mis, place_dest,
8be4620b 4140 block);
a7180877 4141 } else {
df9ff5e1 4142 ret = postcopy_place_page(mis, place_dest,
8be4620b 4143 place_source, block);
a7180877
DDAG
4144 }
4145 }
a7180877
DDAG
4146 }
4147
4148 return ret;
4149}
4150
acab30b8
DHB
4151static bool postcopy_is_advised(void)
4152{
4153 PostcopyState ps = postcopy_state_get();
4154 return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
4155}
4156
4157static bool postcopy_is_running(void)
4158{
4159 PostcopyState ps = postcopy_state_get();
4160 return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END;
4161}
4162
e6f4aa18
ZC
4163/*
4164 * Flush content of RAM cache into SVM's memory.
4165 * Only flush the pages that be dirtied by PVM or SVM or both.
4166 */
4167static void colo_flush_ram_cache(void)
4168{
4169 RAMBlock *block = NULL;
4170 void *dst_host;
4171 void *src_host;
4172 unsigned long offset = 0;
4173
d1955d22
HZ
4174 memory_global_dirty_log_sync();
4175 rcu_read_lock();
fbd162e6 4176 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
d1955d22
HZ
4177 migration_bitmap_sync_range(ram_state, block, 0, block->used_length);
4178 }
4179 rcu_read_unlock();
4180
e6f4aa18
ZC
4181 trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages);
4182 rcu_read_lock();
4183 block = QLIST_FIRST_RCU(&ram_list.blocks);
4184
4185 while (block) {
4186 offset = migration_bitmap_find_dirty(ram_state, block, offset);
4187
4188 if (offset << TARGET_PAGE_BITS >= block->used_length) {
4189 offset = 0;
4190 block = QLIST_NEXT_RCU(block, next);
4191 } else {
4192 migration_bitmap_clear_dirty(ram_state, block, offset);
4193 dst_host = block->host + (offset << TARGET_PAGE_BITS);
4194 src_host = block->colo_cache + (offset << TARGET_PAGE_BITS);
4195 memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
4196 }
4197 }
4198
4199 rcu_read_unlock();
4200 trace_colo_flush_ram_cache_end();
4201}
4202
56e93d26
JQ
4203static int ram_load(QEMUFile *f, void *opaque, int version_id)
4204{
edc60127 4205 int flags = 0, ret = 0, invalid_flags = 0;
56e93d26
JQ
4206 static uint64_t seq_iter;
4207 int len = 0;
a7180877
DDAG
4208 /*
4209 * If system is running in postcopy mode, page inserts to host memory must
4210 * be atomic
4211 */
acab30b8 4212 bool postcopy_running = postcopy_is_running();
ef08fb38 4213 /* ADVISE is earlier, it shows the source has the postcopy capability on */
acab30b8 4214 bool postcopy_advised = postcopy_is_advised();
56e93d26
JQ
4215
4216 seq_iter++;
4217
4218 if (version_id != 4) {
4219 ret = -EINVAL;
4220 }
4221
edc60127
JQ
4222 if (!migrate_use_compression()) {
4223 invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
4224 }
56e93d26
JQ
4225 /* This RCU critical section can be very long running.
4226 * When RCU reclaims in the code start to become numerous,
4227 * it will be necessary to reduce the granularity of this
4228 * critical section.
4229 */
4230 rcu_read_lock();
a7180877
DDAG
4231
4232 if (postcopy_running) {
4233 ret = ram_load_postcopy(f);
4234 }
4235
4236 while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
56e93d26 4237 ram_addr_t addr, total_ram_bytes;
a776aa15 4238 void *host = NULL;
56e93d26
JQ
4239 uint8_t ch;
4240
4241 addr = qemu_get_be64(f);
4242 flags = addr & ~TARGET_PAGE_MASK;
4243 addr &= TARGET_PAGE_MASK;
4244
edc60127
JQ
4245 if (flags & invalid_flags) {
4246 if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
4247 error_report("Received an unexpected compressed page");
4248 }
4249
4250 ret = -EINVAL;
4251 break;
4252 }
4253
bb890ed5 4254 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
a776aa15 4255 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
4c4bad48
HZ
4256 RAMBlock *block = ram_block_from_stream(f, flags);
4257
13af18f2
ZC
4258 /*
4259 * After going into COLO, we should load the Page into colo_cache.
4260 */
4261 if (migration_incoming_in_colo_state()) {
4262 host = colo_cache_from_block_offset(block, addr);
4263 } else {
4264 host = host_from_ram_block_offset(block, addr);
4265 }
a776aa15
DDAG
4266 if (!host) {
4267 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
4268 ret = -EINVAL;
4269 break;
4270 }
13af18f2
ZC
4271
4272 if (!migration_incoming_in_colo_state()) {
4273 ramblock_recv_bitmap_set(block, host);
4274 }
4275
1db9d8e5 4276 trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
a776aa15
DDAG
4277 }
4278
56e93d26
JQ
4279 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
4280 case RAM_SAVE_FLAG_MEM_SIZE:
4281 /* Synchronize RAM block list */
4282 total_ram_bytes = addr;
4283 while (!ret && total_ram_bytes) {
4284 RAMBlock *block;
56e93d26
JQ
4285 char id[256];
4286 ram_addr_t length;
4287
4288 len = qemu_get_byte(f);
4289 qemu_get_buffer(f, (uint8_t *)id, len);
4290 id[len] = 0;
4291 length = qemu_get_be64(f);
4292
e3dd7493 4293 block = qemu_ram_block_by_name(id);
b895de50
CLG
4294 if (block && !qemu_ram_is_migratable(block)) {
4295 error_report("block %s should not be migrated !", id);
4296 ret = -EINVAL;
4297 } else if (block) {
e3dd7493
DDAG
4298 if (length != block->used_length) {
4299 Error *local_err = NULL;
56e93d26 4300
fa53a0e5 4301 ret = qemu_ram_resize(block, length,
e3dd7493
DDAG
4302 &local_err);
4303 if (local_err) {
4304 error_report_err(local_err);
56e93d26 4305 }
56e93d26 4306 }
ef08fb38
DDAG
4307 /* For postcopy we need to check hugepage sizes match */
4308 if (postcopy_advised &&
4309 block->page_size != qemu_host_page_size) {
4310 uint64_t remote_page_size = qemu_get_be64(f);
4311 if (remote_page_size != block->page_size) {
4312 error_report("Mismatched RAM page size %s "
4313 "(local) %zd != %" PRId64,
4314 id, block->page_size,
4315 remote_page_size);
4316 ret = -EINVAL;
4317 }
4318 }
fbd162e6
YK
4319 if (migrate_ignore_shared()) {
4320 hwaddr addr = qemu_get_be64(f);
4321 bool ignored = qemu_get_byte(f);
4322 if (ignored != ramblock_is_ignored(block)) {
4323 error_report("RAM block %s should %s be migrated",
4324 id, ignored ? "" : "not");
4325 ret = -EINVAL;
4326 }
4327 if (ramblock_is_ignored(block) &&
4328 block->mr->addr != addr) {
4329 error_report("Mismatched GPAs for block %s "
4330 "%" PRId64 "!= %" PRId64,
4331 id, (uint64_t)addr,
4332 (uint64_t)block->mr->addr);
4333 ret = -EINVAL;
4334 }
4335 }
e3dd7493
DDAG
4336 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
4337 block->idstr);
4338 } else {
56e93d26
JQ
4339 error_report("Unknown ramblock \"%s\", cannot "
4340 "accept migration", id);
4341 ret = -EINVAL;
4342 }
4343
4344 total_ram_bytes -= length;
4345 }
4346 break;
a776aa15 4347
bb890ed5 4348 case RAM_SAVE_FLAG_ZERO:
56e93d26
JQ
4349 ch = qemu_get_byte(f);
4350 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
4351 break;
a776aa15 4352
56e93d26 4353 case RAM_SAVE_FLAG_PAGE:
56e93d26
JQ
4354 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
4355 break;
56e93d26 4356
a776aa15 4357 case RAM_SAVE_FLAG_COMPRESS_PAGE:
56e93d26
JQ
4358 len = qemu_get_be32(f);
4359 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
4360 error_report("Invalid compressed data length: %d", len);
4361 ret = -EINVAL;
4362 break;
4363 }
c1bc6626 4364 decompress_data_with_multi_threads(f, host, len);
56e93d26 4365 break;
a776aa15 4366
56e93d26 4367 case RAM_SAVE_FLAG_XBZRLE:
56e93d26
JQ
4368 if (load_xbzrle(f, addr, host) < 0) {
4369 error_report("Failed to decompress XBZRLE page at "
4370 RAM_ADDR_FMT, addr);
4371 ret = -EINVAL;
4372 break;
4373 }
4374 break;
4375 case RAM_SAVE_FLAG_EOS:
4376 /* normal exit */
6df264ac 4377 multifd_recv_sync_main();
56e93d26
JQ
4378 break;
4379 default:
4380 if (flags & RAM_SAVE_FLAG_HOOK) {
632e3a5c 4381 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
56e93d26
JQ
4382 } else {
4383 error_report("Unknown combination of migration flags: %#x",
4384 flags);
4385 ret = -EINVAL;
4386 }
4387 }
4388 if (!ret) {
4389 ret = qemu_file_get_error(f);
4390 }
4391 }
4392
34ab9e97 4393 ret |= wait_for_decompress_done();
56e93d26 4394 rcu_read_unlock();
55c4446b 4395 trace_ram_load_complete(ret, seq_iter);
e6f4aa18
ZC
4396
4397 if (!ret && migration_incoming_in_colo_state()) {
4398 colo_flush_ram_cache();
4399 }
56e93d26
JQ
4400 return ret;
4401}
4402
c6467627
VSO
4403static bool ram_has_postcopy(void *opaque)
4404{
469dd51b 4405 RAMBlock *rb;
fbd162e6 4406 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
469dd51b
JH
4407 if (ramblock_is_pmem(rb)) {
4408 info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4409 "is not supported now!", rb->idstr, rb->host);
4410 return false;
4411 }
4412 }
4413
c6467627
VSO
4414 return migrate_postcopy_ram();
4415}
4416
edd090c7
PX
4417/* Sync all the dirty bitmap with destination VM. */
4418static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
4419{
4420 RAMBlock *block;
4421 QEMUFile *file = s->to_dst_file;
4422 int ramblock_count = 0;
4423
4424 trace_ram_dirty_bitmap_sync_start();
4425
fbd162e6 4426 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
edd090c7
PX
4427 qemu_savevm_send_recv_bitmap(file, block->idstr);
4428 trace_ram_dirty_bitmap_request(block->idstr);
4429 ramblock_count++;
4430 }
4431
4432 trace_ram_dirty_bitmap_sync_wait();
4433
4434 /* Wait until all the ramblocks' dirty bitmap synced */
4435 while (ramblock_count--) {
4436 qemu_sem_wait(&s->rp_state.rp_sem);
4437 }
4438
4439 trace_ram_dirty_bitmap_sync_complete();
4440
4441 return 0;
4442}
4443
4444static void ram_dirty_bitmap_reload_notify(MigrationState *s)
4445{
4446 qemu_sem_post(&s->rp_state.rp_sem);
4447}
4448
a335debb
PX
4449/*
4450 * Read the received bitmap, revert it as the initial dirty bitmap.
4451 * This is only used when the postcopy migration is paused but wants
4452 * to resume from a middle point.
4453 */
4454int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
4455{
4456 int ret = -EINVAL;
4457 QEMUFile *file = s->rp_state.from_dst_file;
4458 unsigned long *le_bitmap, nbits = block->used_length >> TARGET_PAGE_BITS;
a725ef9f 4459 uint64_t local_size = DIV_ROUND_UP(nbits, 8);
a335debb
PX
4460 uint64_t size, end_mark;
4461
4462 trace_ram_dirty_bitmap_reload_begin(block->idstr);
4463
4464 if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
4465 error_report("%s: incorrect state %s", __func__,
4466 MigrationStatus_str(s->state));
4467 return -EINVAL;
4468 }
4469
4470 /*
4471 * Note: see comments in ramblock_recv_bitmap_send() on why we
4472 * need the endianess convertion, and the paddings.
4473 */
4474 local_size = ROUND_UP(local_size, 8);
4475
4476 /* Add paddings */
4477 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
4478
4479 size = qemu_get_be64(file);
4480
4481 /* The size of the bitmap should match with our ramblock */
4482 if (size != local_size) {
4483 error_report("%s: ramblock '%s' bitmap size mismatch "
4484 "(0x%"PRIx64" != 0x%"PRIx64")", __func__,
4485 block->idstr, size, local_size);
4486 ret = -EINVAL;
4487 goto out;
4488 }
4489
4490 size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size);
4491 end_mark = qemu_get_be64(file);
4492
4493 ret = qemu_file_get_error(file);
4494 if (ret || size != local_size) {
4495 error_report("%s: read bitmap failed for ramblock '%s': %d"
4496 " (size 0x%"PRIx64", got: 0x%"PRIx64")",
4497 __func__, block->idstr, ret, local_size, size);
4498 ret = -EIO;
4499 goto out;
4500 }
4501
4502 if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) {
4503 error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIu64,
4504 __func__, block->idstr, end_mark);
4505 ret = -EINVAL;
4506 goto out;
4507 }
4508
4509 /*
4510 * Endianess convertion. We are during postcopy (though paused).
4511 * The dirty bitmap won't change. We can directly modify it.
4512 */
4513 bitmap_from_le(block->bmap, le_bitmap, nbits);
4514
4515 /*
4516 * What we received is "received bitmap". Revert it as the initial
4517 * dirty bitmap for this ramblock.
4518 */
4519 bitmap_complement(block->bmap, block->bmap, nbits);
4520
4521 trace_ram_dirty_bitmap_reload_complete(block->idstr);
4522
edd090c7
PX
4523 /*
4524 * We succeeded to sync bitmap for current ramblock. If this is
4525 * the last one to sync, we need to notify the main send thread.
4526 */
4527 ram_dirty_bitmap_reload_notify(s);
4528
a335debb
PX
4529 ret = 0;
4530out:
bf269906 4531 g_free(le_bitmap);
a335debb
PX
4532 return ret;
4533}
4534
edd090c7
PX
4535static int ram_resume_prepare(MigrationState *s, void *opaque)
4536{
4537 RAMState *rs = *(RAMState **)opaque;
08614f34 4538 int ret;
edd090c7 4539
08614f34
PX
4540 ret = ram_dirty_bitmap_sync_all(s, rs);
4541 if (ret) {
4542 return ret;
4543 }
4544
4545 ram_state_resume_prepare(rs, s->to_dst_file);
4546
4547 return 0;
edd090c7
PX
4548}
4549
56e93d26 4550static SaveVMHandlers savevm_ram_handlers = {
9907e842 4551 .save_setup = ram_save_setup,
56e93d26 4552 .save_live_iterate = ram_save_iterate,
763c906b 4553 .save_live_complete_postcopy = ram_save_complete,
a3e06c3d 4554 .save_live_complete_precopy = ram_save_complete,
c6467627 4555 .has_postcopy = ram_has_postcopy,
56e93d26
JQ
4556 .save_live_pending = ram_save_pending,
4557 .load_state = ram_load,
f265e0e4
JQ
4558 .save_cleanup = ram_save_cleanup,
4559 .load_setup = ram_load_setup,
4560 .load_cleanup = ram_load_cleanup,
edd090c7 4561 .resume_prepare = ram_resume_prepare,
56e93d26
JQ
4562};
4563
4564void ram_mig_init(void)
4565{
4566 qemu_mutex_init(&XBZRLE.lock);
6f37bb8b 4567 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, &ram_state);
56e93d26 4568}