]> git.proxmox.com Git - mirror_qemu.git/blame - migration/ram.c
Add a hint message to loadvm and exits on failure
[mirror_qemu.git] / migration / ram.c
CommitLineData
56e93d26
JQ
1/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
76cc7b58
JQ
5 * Copyright (c) 2011-2015 Red Hat Inc
6 *
7 * Authors:
8 * Juan Quintela <quintela@redhat.com>
56e93d26
JQ
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 */
e688df6b 28
1393a485 29#include "qemu/osdep.h"
33c11879 30#include "cpu.h"
56e93d26 31#include <zlib.h>
f348b6d1 32#include "qemu/cutils.h"
56e93d26
JQ
33#include "qemu/bitops.h"
34#include "qemu/bitmap.h"
7205c9ec 35#include "qemu/main-loop.h"
56eb90af 36#include "qemu/pmem.h"
709e3fe8 37#include "xbzrle.h"
7b1e1a22 38#include "ram.h"
6666c96a 39#include "migration.h"
71bb07db 40#include "socket.h"
f2a8f0a6 41#include "migration/register.h"
7b1e1a22 42#include "migration/misc.h"
08a0aee1 43#include "qemu-file.h"
be07b0ac 44#include "postcopy-ram.h"
53d37d36 45#include "page_cache.h"
56e93d26 46#include "qemu/error-report.h"
e688df6b 47#include "qapi/error.h"
9af23989 48#include "qapi/qapi-events-migration.h"
8acabf69 49#include "qapi/qmp/qerror.h"
56e93d26 50#include "trace.h"
56e93d26 51#include "exec/ram_addr.h"
f9494614 52#include "exec/target_page.h"
56e93d26 53#include "qemu/rcu_queue.h"
a91246c9 54#include "migration/colo.h"
53d37d36 55#include "block.h"
af8b7d2b
JQ
56#include "sysemu/sysemu.h"
57#include "qemu/uuid.h"
edd090c7 58#include "savevm.h"
b9ee2f7d 59#include "qemu/iov.h"
56e93d26 60
56e93d26
JQ
61/***********************************************************/
62/* ram save/restore */
63
bb890ed5
JQ
64/* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
65 * worked for pages that where filled with the same char. We switched
66 * it to only search for the zero value. And to avoid confusion with
67 * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
68 */
69
56e93d26 70#define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
bb890ed5 71#define RAM_SAVE_FLAG_ZERO 0x02
56e93d26
JQ
72#define RAM_SAVE_FLAG_MEM_SIZE 0x04
73#define RAM_SAVE_FLAG_PAGE 0x08
74#define RAM_SAVE_FLAG_EOS 0x10
75#define RAM_SAVE_FLAG_CONTINUE 0x20
76#define RAM_SAVE_FLAG_XBZRLE 0x40
77/* 0x80 is reserved in migration.h start with 0x100 next */
78#define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
79
56e93d26
JQ
80static inline bool is_zero_range(uint8_t *p, uint64_t size)
81{
a1febc49 82 return buffer_is_zero(p, size);
56e93d26
JQ
83}
84
9360447d
JQ
85XBZRLECacheStats xbzrle_counters;
86
56e93d26
JQ
87/* struct contains XBZRLE cache and a static page
88 used by the compression */
89static struct {
90 /* buffer used for XBZRLE encoding */
91 uint8_t *encoded_buf;
92 /* buffer for storing page content */
93 uint8_t *current_buf;
94 /* Cache for XBZRLE, Protected by lock. */
95 PageCache *cache;
96 QemuMutex lock;
c00e0928
JQ
97 /* it will store a page full of zeros */
98 uint8_t *zero_target_page;
f265e0e4
JQ
99 /* buffer used for XBZRLE decoding */
100 uint8_t *decoded_buf;
56e93d26
JQ
101} XBZRLE;
102
56e93d26
JQ
103static void XBZRLE_cache_lock(void)
104{
105 if (migrate_use_xbzrle())
106 qemu_mutex_lock(&XBZRLE.lock);
107}
108
109static void XBZRLE_cache_unlock(void)
110{
111 if (migrate_use_xbzrle())
112 qemu_mutex_unlock(&XBZRLE.lock);
113}
114
3d0684b2
JQ
115/**
116 * xbzrle_cache_resize: resize the xbzrle cache
117 *
118 * This function is called from qmp_migrate_set_cache_size in main
119 * thread, possibly while a migration is in progress. A running
120 * migration may be using the cache and might finish during this call,
121 * hence changes to the cache are protected by XBZRLE.lock().
122 *
c9dede2d 123 * Returns 0 for success or -1 for error
3d0684b2
JQ
124 *
125 * @new_size: new cache size
8acabf69 126 * @errp: set *errp if the check failed, with reason
56e93d26 127 */
c9dede2d 128int xbzrle_cache_resize(int64_t new_size, Error **errp)
56e93d26
JQ
129{
130 PageCache *new_cache;
c9dede2d 131 int64_t ret = 0;
56e93d26 132
8acabf69
JQ
133 /* Check for truncation */
134 if (new_size != (size_t)new_size) {
135 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
136 "exceeding address space");
137 return -1;
138 }
139
2a313e5c
JQ
140 if (new_size == migrate_xbzrle_cache_size()) {
141 /* nothing to do */
c9dede2d 142 return 0;
2a313e5c
JQ
143 }
144
56e93d26
JQ
145 XBZRLE_cache_lock();
146
147 if (XBZRLE.cache != NULL) {
80f8dfde 148 new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp);
56e93d26 149 if (!new_cache) {
56e93d26
JQ
150 ret = -1;
151 goto out;
152 }
153
154 cache_fini(XBZRLE.cache);
155 XBZRLE.cache = new_cache;
156 }
56e93d26
JQ
157out:
158 XBZRLE_cache_unlock();
159 return ret;
160}
161
b895de50
CLG
162/* Should be holding either ram_list.mutex, or the RCU lock. */
163#define RAMBLOCK_FOREACH_MIGRATABLE(block) \
343f632c 164 INTERNAL_RAMBLOCK_FOREACH(block) \
b895de50
CLG
165 if (!qemu_ram_is_migratable(block)) {} else
166
343f632c
DDAG
167#undef RAMBLOCK_FOREACH
168
f9494614
AP
169static void ramblock_recv_map_init(void)
170{
171 RAMBlock *rb;
172
b895de50 173 RAMBLOCK_FOREACH_MIGRATABLE(rb) {
f9494614
AP
174 assert(!rb->receivedmap);
175 rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
176 }
177}
178
179int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr)
180{
181 return test_bit(ramblock_recv_bitmap_offset(host_addr, rb),
182 rb->receivedmap);
183}
184
1cba9f6e
DDAG
185bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset)
186{
187 return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap);
188}
189
f9494614
AP
190void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr)
191{
192 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap);
193}
194
195void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
196 size_t nr)
197{
198 bitmap_set_atomic(rb->receivedmap,
199 ramblock_recv_bitmap_offset(host_addr, rb),
200 nr);
201}
202
a335debb
PX
203#define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
204
205/*
206 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
207 *
208 * Returns >0 if success with sent bytes, or <0 if error.
209 */
210int64_t ramblock_recv_bitmap_send(QEMUFile *file,
211 const char *block_name)
212{
213 RAMBlock *block = qemu_ram_block_by_name(block_name);
214 unsigned long *le_bitmap, nbits;
215 uint64_t size;
216
217 if (!block) {
218 error_report("%s: invalid block name: %s", __func__, block_name);
219 return -1;
220 }
221
222 nbits = block->used_length >> TARGET_PAGE_BITS;
223
224 /*
225 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
226 * machines we may need 4 more bytes for padding (see below
227 * comment). So extend it a bit before hand.
228 */
229 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
230
231 /*
232 * Always use little endian when sending the bitmap. This is
233 * required that when source and destination VMs are not using the
234 * same endianess. (Note: big endian won't work.)
235 */
236 bitmap_to_le(le_bitmap, block->receivedmap, nbits);
237
238 /* Size of the bitmap, in bytes */
a725ef9f 239 size = DIV_ROUND_UP(nbits, 8);
a335debb
PX
240
241 /*
242 * size is always aligned to 8 bytes for 64bit machines, but it
243 * may not be true for 32bit machines. We need this padding to
244 * make sure the migration can survive even between 32bit and
245 * 64bit machines.
246 */
247 size = ROUND_UP(size, 8);
248
249 qemu_put_be64(file, size);
250 qemu_put_buffer(file, (const uint8_t *)le_bitmap, size);
251 /*
252 * Mark as an end, in case the middle part is screwed up due to
253 * some "misterious" reason.
254 */
255 qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING);
256 qemu_fflush(file);
257
bf269906 258 g_free(le_bitmap);
a335debb
PX
259
260 if (qemu_file_get_error(file)) {
261 return qemu_file_get_error(file);
262 }
263
264 return size + sizeof(size);
265}
266
ec481c6c
JQ
267/*
268 * An outstanding page request, on the source, having been received
269 * and queued
270 */
271struct RAMSrcPageRequest {
272 RAMBlock *rb;
273 hwaddr offset;
274 hwaddr len;
275
276 QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
277};
278
6f37bb8b
JQ
279/* State of RAM for migration */
280struct RAMState {
204b88b8
JQ
281 /* QEMUFile used for this migration */
282 QEMUFile *f;
6f37bb8b
JQ
283 /* Last block that we have visited searching for dirty pages */
284 RAMBlock *last_seen_block;
285 /* Last block from where we have sent data */
286 RAMBlock *last_sent_block;
269ace29
JQ
287 /* Last dirty target page we have sent */
288 ram_addr_t last_page;
6f37bb8b
JQ
289 /* last ram version we have seen */
290 uint32_t last_version;
291 /* We are in the first round */
292 bool ram_bulk_stage;
8d820d6f
JQ
293 /* How many times we have dirty too many pages */
294 int dirty_rate_high_cnt;
f664da80
JQ
295 /* these variables are used for bitmap sync */
296 /* last time we did a full bitmap_sync */
297 int64_t time_last_bitmap_sync;
eac74159 298 /* bytes transferred at start_time */
c4bdf0cf 299 uint64_t bytes_xfer_prev;
a66cd90c 300 /* number of dirty pages since start_time */
68908ed6 301 uint64_t num_dirty_pages_period;
b5833fde
JQ
302 /* xbzrle misses since the beginning of the period */
303 uint64_t xbzrle_cache_miss_prev;
be8b02ed
XG
304 /* total handled target pages at the beginning of period */
305 uint64_t target_page_count_prev;
306 /* total handled target pages since start */
307 uint64_t target_page_count;
9360447d 308 /* number of dirty bits in the bitmap */
2dfaf12e
PX
309 uint64_t migration_dirty_pages;
310 /* protects modification of the bitmap */
108cfae0 311 QemuMutex bitmap_mutex;
68a098f3
JQ
312 /* The RAMBlock used in the last src_page_requests */
313 RAMBlock *last_req_rb;
ec481c6c
JQ
314 /* Queue of outstanding page requests from the destination */
315 QemuMutex src_page_req_mutex;
316 QSIMPLEQ_HEAD(src_page_requests, RAMSrcPageRequest) src_page_requests;
6f37bb8b
JQ
317};
318typedef struct RAMState RAMState;
319
53518d94 320static RAMState *ram_state;
6f37bb8b 321
9edabd4d 322uint64_t ram_bytes_remaining(void)
2f4fde93 323{
bae416e5
DDAG
324 return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
325 0;
2f4fde93
JQ
326}
327
9360447d 328MigrationStats ram_counters;
96506894 329
b8fb8cb7
DDAG
330/* used by the search for pages to send */
331struct PageSearchStatus {
332 /* Current block being searched */
333 RAMBlock *block;
a935e30f
JQ
334 /* Current page to search from */
335 unsigned long page;
b8fb8cb7
DDAG
336 /* Set once we wrap around */
337 bool complete_round;
338};
339typedef struct PageSearchStatus PageSearchStatus;
340
56e93d26 341struct CompressParam {
56e93d26 342 bool done;
90e56fb4 343 bool quit;
5e5fdcff 344 bool zero_page;
56e93d26
JQ
345 QEMUFile *file;
346 QemuMutex mutex;
347 QemuCond cond;
348 RAMBlock *block;
349 ram_addr_t offset;
34ab9e97
XG
350
351 /* internally used fields */
dcaf446e 352 z_stream stream;
34ab9e97 353 uint8_t *originbuf;
56e93d26
JQ
354};
355typedef struct CompressParam CompressParam;
356
357struct DecompressParam {
73a8912b 358 bool done;
90e56fb4 359 bool quit;
56e93d26
JQ
360 QemuMutex mutex;
361 QemuCond cond;
362 void *des;
d341d9f3 363 uint8_t *compbuf;
56e93d26 364 int len;
797ca154 365 z_stream stream;
56e93d26
JQ
366};
367typedef struct DecompressParam DecompressParam;
368
369static CompressParam *comp_param;
370static QemuThread *compress_threads;
371/* comp_done_cond is used to wake up the migration thread when
372 * one of the compression threads has finished the compression.
373 * comp_done_lock is used to co-work with comp_done_cond.
374 */
0d9f9a5c
LL
375static QemuMutex comp_done_lock;
376static QemuCond comp_done_cond;
56e93d26
JQ
377/* The empty QEMUFileOps will be used by file in CompressParam */
378static const QEMUFileOps empty_ops = { };
379
34ab9e97 380static QEMUFile *decomp_file;
56e93d26
JQ
381static DecompressParam *decomp_param;
382static QemuThread *decompress_threads;
73a8912b
LL
383static QemuMutex decomp_done_lock;
384static QemuCond decomp_done_cond;
56e93d26 385
5e5fdcff 386static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
6ef3771c 387 ram_addr_t offset, uint8_t *source_buf);
56e93d26
JQ
388
389static void *do_data_compress(void *opaque)
390{
391 CompressParam *param = opaque;
a7a9a88f
LL
392 RAMBlock *block;
393 ram_addr_t offset;
5e5fdcff 394 bool zero_page;
56e93d26 395
a7a9a88f 396 qemu_mutex_lock(&param->mutex);
90e56fb4 397 while (!param->quit) {
a7a9a88f
LL
398 if (param->block) {
399 block = param->block;
400 offset = param->offset;
401 param->block = NULL;
402 qemu_mutex_unlock(&param->mutex);
403
5e5fdcff
XG
404 zero_page = do_compress_ram_page(param->file, &param->stream,
405 block, offset, param->originbuf);
a7a9a88f 406
0d9f9a5c 407 qemu_mutex_lock(&comp_done_lock);
a7a9a88f 408 param->done = true;
5e5fdcff 409 param->zero_page = zero_page;
0d9f9a5c
LL
410 qemu_cond_signal(&comp_done_cond);
411 qemu_mutex_unlock(&comp_done_lock);
a7a9a88f
LL
412
413 qemu_mutex_lock(&param->mutex);
414 } else {
56e93d26
JQ
415 qemu_cond_wait(&param->cond, &param->mutex);
416 }
56e93d26 417 }
a7a9a88f 418 qemu_mutex_unlock(&param->mutex);
56e93d26
JQ
419
420 return NULL;
421}
422
423static inline void terminate_compression_threads(void)
424{
425 int idx, thread_count;
426
427 thread_count = migrate_compress_threads();
3d0684b2 428
56e93d26
JQ
429 for (idx = 0; idx < thread_count; idx++) {
430 qemu_mutex_lock(&comp_param[idx].mutex);
90e56fb4 431 comp_param[idx].quit = true;
56e93d26
JQ
432 qemu_cond_signal(&comp_param[idx].cond);
433 qemu_mutex_unlock(&comp_param[idx].mutex);
434 }
435}
436
f0afa331 437static void compress_threads_save_cleanup(void)
56e93d26
JQ
438{
439 int i, thread_count;
440
441 if (!migrate_use_compression()) {
442 return;
443 }
444 terminate_compression_threads();
445 thread_count = migrate_compress_threads();
446 for (i = 0; i < thread_count; i++) {
dcaf446e
XG
447 /*
448 * we use it as a indicator which shows if the thread is
449 * properly init'd or not
450 */
451 if (!comp_param[i].file) {
452 break;
453 }
56e93d26 454 qemu_thread_join(compress_threads + i);
56e93d26
JQ
455 qemu_mutex_destroy(&comp_param[i].mutex);
456 qemu_cond_destroy(&comp_param[i].cond);
dcaf446e 457 deflateEnd(&comp_param[i].stream);
34ab9e97 458 g_free(comp_param[i].originbuf);
dcaf446e
XG
459 qemu_fclose(comp_param[i].file);
460 comp_param[i].file = NULL;
56e93d26 461 }
0d9f9a5c
LL
462 qemu_mutex_destroy(&comp_done_lock);
463 qemu_cond_destroy(&comp_done_cond);
56e93d26
JQ
464 g_free(compress_threads);
465 g_free(comp_param);
56e93d26
JQ
466 compress_threads = NULL;
467 comp_param = NULL;
56e93d26
JQ
468}
469
dcaf446e 470static int compress_threads_save_setup(void)
56e93d26
JQ
471{
472 int i, thread_count;
473
474 if (!migrate_use_compression()) {
dcaf446e 475 return 0;
56e93d26 476 }
56e93d26
JQ
477 thread_count = migrate_compress_threads();
478 compress_threads = g_new0(QemuThread, thread_count);
479 comp_param = g_new0(CompressParam, thread_count);
0d9f9a5c
LL
480 qemu_cond_init(&comp_done_cond);
481 qemu_mutex_init(&comp_done_lock);
56e93d26 482 for (i = 0; i < thread_count; i++) {
34ab9e97
XG
483 comp_param[i].originbuf = g_try_malloc(TARGET_PAGE_SIZE);
484 if (!comp_param[i].originbuf) {
485 goto exit;
486 }
487
dcaf446e
XG
488 if (deflateInit(&comp_param[i].stream,
489 migrate_compress_level()) != Z_OK) {
34ab9e97 490 g_free(comp_param[i].originbuf);
dcaf446e
XG
491 goto exit;
492 }
493
e110aa91
C
494 /* comp_param[i].file is just used as a dummy buffer to save data,
495 * set its ops to empty.
56e93d26
JQ
496 */
497 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
498 comp_param[i].done = true;
90e56fb4 499 comp_param[i].quit = false;
56e93d26
JQ
500 qemu_mutex_init(&comp_param[i].mutex);
501 qemu_cond_init(&comp_param[i].cond);
502 qemu_thread_create(compress_threads + i, "compress",
503 do_data_compress, comp_param + i,
504 QEMU_THREAD_JOINABLE);
505 }
dcaf446e
XG
506 return 0;
507
508exit:
509 compress_threads_save_cleanup();
510 return -1;
56e93d26
JQ
511}
512
f986c3d2
JQ
513/* Multiple fd's */
514
af8b7d2b
JQ
515#define MULTIFD_MAGIC 0x11223344U
516#define MULTIFD_VERSION 1
517
6df264ac
JQ
518#define MULTIFD_FLAG_SYNC (1 << 0)
519
af8b7d2b
JQ
520typedef struct {
521 uint32_t magic;
522 uint32_t version;
523 unsigned char uuid[16]; /* QemuUUID */
524 uint8_t id;
525} __attribute__((packed)) MultiFDInit_t;
526
2a26c979
JQ
527typedef struct {
528 uint32_t magic;
529 uint32_t version;
530 uint32_t flags;
531 uint32_t size;
532 uint32_t used;
533 uint64_t packet_num;
534 char ramblock[256];
535 uint64_t offset[];
536} __attribute__((packed)) MultiFDPacket_t;
537
34c55a94
JQ
538typedef struct {
539 /* number of used pages */
540 uint32_t used;
541 /* number of allocated pages */
542 uint32_t allocated;
543 /* global number of generated multifd packets */
544 uint64_t packet_num;
545 /* offset of each page */
546 ram_addr_t *offset;
547 /* pointer to each page */
548 struct iovec *iov;
549 RAMBlock *block;
550} MultiFDPages_t;
551
8c4598f2
JQ
552typedef struct {
553 /* this fields are not changed once the thread is created */
554 /* channel number */
f986c3d2 555 uint8_t id;
8c4598f2 556 /* channel thread name */
f986c3d2 557 char *name;
8c4598f2 558 /* channel thread id */
f986c3d2 559 QemuThread thread;
8c4598f2 560 /* communication channel */
60df2d4a 561 QIOChannel *c;
8c4598f2 562 /* sem where to wait for more work */
f986c3d2 563 QemuSemaphore sem;
8c4598f2 564 /* this mutex protects the following parameters */
f986c3d2 565 QemuMutex mutex;
8c4598f2 566 /* is this channel thread running */
66770707 567 bool running;
8c4598f2 568 /* should this thread finish */
f986c3d2 569 bool quit;
0beb5ed3
JQ
570 /* thread has work to do */
571 int pending_job;
34c55a94
JQ
572 /* array of pages to sent */
573 MultiFDPages_t *pages;
2a26c979
JQ
574 /* packet allocated len */
575 uint32_t packet_len;
576 /* pointer to the packet */
577 MultiFDPacket_t *packet;
578 /* multifd flags for each packet */
579 uint32_t flags;
580 /* global number of generated multifd packets */
581 uint64_t packet_num;
408ea6ae
JQ
582 /* thread local variables */
583 /* packets sent through this channel */
584 uint64_t num_packets;
585 /* pages sent through this channel */
586 uint64_t num_pages;
6df264ac
JQ
587 /* syncs main thread and channels */
588 QemuSemaphore sem_sync;
8c4598f2
JQ
589} MultiFDSendParams;
590
591typedef struct {
592 /* this fields are not changed once the thread is created */
593 /* channel number */
594 uint8_t id;
595 /* channel thread name */
596 char *name;
597 /* channel thread id */
598 QemuThread thread;
599 /* communication channel */
600 QIOChannel *c;
8c4598f2
JQ
601 /* this mutex protects the following parameters */
602 QemuMutex mutex;
603 /* is this channel thread running */
604 bool running;
34c55a94
JQ
605 /* array of pages to receive */
606 MultiFDPages_t *pages;
2a26c979
JQ
607 /* packet allocated len */
608 uint32_t packet_len;
609 /* pointer to the packet */
610 MultiFDPacket_t *packet;
611 /* multifd flags for each packet */
612 uint32_t flags;
613 /* global number of generated multifd packets */
614 uint64_t packet_num;
408ea6ae
JQ
615 /* thread local variables */
616 /* packets sent through this channel */
617 uint64_t num_packets;
618 /* pages sent through this channel */
619 uint64_t num_pages;
6df264ac
JQ
620 /* syncs main thread and channels */
621 QemuSemaphore sem_sync;
8c4598f2 622} MultiFDRecvParams;
f986c3d2 623
af8b7d2b
JQ
624static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp)
625{
626 MultiFDInit_t msg;
627 int ret;
628
629 msg.magic = cpu_to_be32(MULTIFD_MAGIC);
630 msg.version = cpu_to_be32(MULTIFD_VERSION);
631 msg.id = p->id;
632 memcpy(msg.uuid, &qemu_uuid.data, sizeof(msg.uuid));
633
634 ret = qio_channel_write_all(p->c, (char *)&msg, sizeof(msg), errp);
635 if (ret != 0) {
636 return -1;
637 }
638 return 0;
639}
640
641static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
642{
643 MultiFDInit_t msg;
644 int ret;
645
646 ret = qio_channel_read_all(c, (char *)&msg, sizeof(msg), errp);
647 if (ret != 0) {
648 return -1;
649 }
650
651 be32_to_cpus(&msg.magic);
652 be32_to_cpus(&msg.version);
653
654 if (msg.magic != MULTIFD_MAGIC) {
655 error_setg(errp, "multifd: received packet magic %x "
656 "expected %x", msg.magic, MULTIFD_MAGIC);
657 return -1;
658 }
659
660 if (msg.version != MULTIFD_VERSION) {
661 error_setg(errp, "multifd: received packet version %d "
662 "expected %d", msg.version, MULTIFD_VERSION);
663 return -1;
664 }
665
666 if (memcmp(msg.uuid, &qemu_uuid, sizeof(qemu_uuid))) {
667 char *uuid = qemu_uuid_unparse_strdup(&qemu_uuid);
668 char *msg_uuid = qemu_uuid_unparse_strdup((const QemuUUID *)msg.uuid);
669
670 error_setg(errp, "multifd: received uuid '%s' and expected "
671 "uuid '%s' for channel %hhd", msg_uuid, uuid, msg.id);
672 g_free(uuid);
673 g_free(msg_uuid);
674 return -1;
675 }
676
677 if (msg.id > migrate_multifd_channels()) {
678 error_setg(errp, "multifd: received channel version %d "
679 "expected %d", msg.version, MULTIFD_VERSION);
680 return -1;
681 }
682
683 return msg.id;
684}
685
34c55a94
JQ
686static MultiFDPages_t *multifd_pages_init(size_t size)
687{
688 MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
689
690 pages->allocated = size;
691 pages->iov = g_new0(struct iovec, size);
692 pages->offset = g_new0(ram_addr_t, size);
693
694 return pages;
695}
696
697static void multifd_pages_clear(MultiFDPages_t *pages)
698{
699 pages->used = 0;
700 pages->allocated = 0;
701 pages->packet_num = 0;
702 pages->block = NULL;
703 g_free(pages->iov);
704 pages->iov = NULL;
705 g_free(pages->offset);
706 pages->offset = NULL;
707 g_free(pages);
708}
709
2a26c979
JQ
710static void multifd_send_fill_packet(MultiFDSendParams *p)
711{
712 MultiFDPacket_t *packet = p->packet;
713 int i;
714
715 packet->magic = cpu_to_be32(MULTIFD_MAGIC);
716 packet->version = cpu_to_be32(MULTIFD_VERSION);
717 packet->flags = cpu_to_be32(p->flags);
718 packet->size = cpu_to_be32(migrate_multifd_page_count());
719 packet->used = cpu_to_be32(p->pages->used);
720 packet->packet_num = cpu_to_be64(p->packet_num);
721
722 if (p->pages->block) {
723 strncpy(packet->ramblock, p->pages->block->idstr, 256);
724 }
725
726 for (i = 0; i < p->pages->used; i++) {
727 packet->offset[i] = cpu_to_be64(p->pages->offset[i]);
728 }
729}
730
731static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
732{
733 MultiFDPacket_t *packet = p->packet;
734 RAMBlock *block;
735 int i;
736
2a26c979
JQ
737 be32_to_cpus(&packet->magic);
738 if (packet->magic != MULTIFD_MAGIC) {
739 error_setg(errp, "multifd: received packet "
740 "magic %x and expected magic %x",
741 packet->magic, MULTIFD_MAGIC);
742 return -1;
743 }
744
745 be32_to_cpus(&packet->version);
746 if (packet->version != MULTIFD_VERSION) {
747 error_setg(errp, "multifd: received packet "
748 "version %d and expected version %d",
749 packet->version, MULTIFD_VERSION);
750 return -1;
751 }
752
753 p->flags = be32_to_cpu(packet->flags);
754
755 be32_to_cpus(&packet->size);
756 if (packet->size > migrate_multifd_page_count()) {
757 error_setg(errp, "multifd: received packet "
758 "with size %d and expected maximum size %d",
759 packet->size, migrate_multifd_page_count()) ;
760 return -1;
761 }
762
763 p->pages->used = be32_to_cpu(packet->used);
764 if (p->pages->used > packet->size) {
765 error_setg(errp, "multifd: received packet "
766 "with size %d and expected maximum size %d",
767 p->pages->used, packet->size) ;
768 return -1;
769 }
770
771 p->packet_num = be64_to_cpu(packet->packet_num);
772
773 if (p->pages->used) {
774 /* make sure that ramblock is 0 terminated */
775 packet->ramblock[255] = 0;
776 block = qemu_ram_block_by_name(packet->ramblock);
777 if (!block) {
778 error_setg(errp, "multifd: unknown ram block %s",
779 packet->ramblock);
780 return -1;
781 }
782 }
783
784 for (i = 0; i < p->pages->used; i++) {
785 ram_addr_t offset = be64_to_cpu(packet->offset[i]);
786
787 if (offset > (block->used_length - TARGET_PAGE_SIZE)) {
788 error_setg(errp, "multifd: offset too long " RAM_ADDR_FMT
789 " (max " RAM_ADDR_FMT ")",
790 offset, block->max_length);
791 return -1;
792 }
793 p->pages->iov[i].iov_base = block->host + offset;
794 p->pages->iov[i].iov_len = TARGET_PAGE_SIZE;
795 }
796
797 return 0;
798}
799
f986c3d2
JQ
800struct {
801 MultiFDSendParams *params;
802 /* number of created threads */
803 int count;
34c55a94
JQ
804 /* array of pages to sent */
805 MultiFDPages_t *pages;
6df264ac
JQ
806 /* syncs main thread and channels */
807 QemuSemaphore sem_sync;
808 /* global number of generated multifd packets */
809 uint64_t packet_num;
b9ee2f7d
JQ
810 /* send channels ready */
811 QemuSemaphore channels_ready;
f986c3d2
JQ
812} *multifd_send_state;
813
b9ee2f7d
JQ
814/*
815 * How we use multifd_send_state->pages and channel->pages?
816 *
817 * We create a pages for each channel, and a main one. Each time that
818 * we need to send a batch of pages we interchange the ones between
819 * multifd_send_state and the channel that is sending it. There are
820 * two reasons for that:
821 * - to not have to do so many mallocs during migration
822 * - to make easier to know what to free at the end of migration
823 *
824 * This way we always know who is the owner of each "pages" struct,
825 * and we don't need any loocking. It belongs to the migration thread
826 * or to the channel thread. Switching is safe because the migration
827 * thread is using the channel mutex when changing it, and the channel
828 * have to had finish with its own, otherwise pending_job can't be
829 * false.
830 */
831
832static void multifd_send_pages(void)
833{
834 int i;
835 static int next_channel;
836 MultiFDSendParams *p = NULL; /* make happy gcc */
837 MultiFDPages_t *pages = multifd_send_state->pages;
838 uint64_t transferred;
839
840 qemu_sem_wait(&multifd_send_state->channels_ready);
841 for (i = next_channel;; i = (i + 1) % migrate_multifd_channels()) {
842 p = &multifd_send_state->params[i];
843
844 qemu_mutex_lock(&p->mutex);
845 if (!p->pending_job) {
846 p->pending_job++;
847 next_channel = (i + 1) % migrate_multifd_channels();
848 break;
849 }
850 qemu_mutex_unlock(&p->mutex);
851 }
852 p->pages->used = 0;
853
854 p->packet_num = multifd_send_state->packet_num++;
855 p->pages->block = NULL;
856 multifd_send_state->pages = p->pages;
857 p->pages = pages;
4fcefd44 858 transferred = ((uint64_t) pages->used) * TARGET_PAGE_SIZE + p->packet_len;
b9ee2f7d
JQ
859 ram_counters.multifd_bytes += transferred;
860 ram_counters.transferred += transferred;;
861 qemu_mutex_unlock(&p->mutex);
862 qemu_sem_post(&p->sem);
863}
864
865static void multifd_queue_page(RAMBlock *block, ram_addr_t offset)
866{
867 MultiFDPages_t *pages = multifd_send_state->pages;
868
869 if (!pages->block) {
870 pages->block = block;
871 }
872
873 if (pages->block == block) {
874 pages->offset[pages->used] = offset;
875 pages->iov[pages->used].iov_base = block->host + offset;
876 pages->iov[pages->used].iov_len = TARGET_PAGE_SIZE;
877 pages->used++;
878
879 if (pages->used < pages->allocated) {
880 return;
881 }
882 }
883
884 multifd_send_pages();
885
886 if (pages->block != block) {
887 multifd_queue_page(block, offset);
888 }
889}
890
66770707 891static void multifd_send_terminate_threads(Error *err)
f986c3d2
JQ
892{
893 int i;
894
7a169d74
JQ
895 if (err) {
896 MigrationState *s = migrate_get_current();
897 migrate_set_error(s, err);
898 if (s->state == MIGRATION_STATUS_SETUP ||
899 s->state == MIGRATION_STATUS_PRE_SWITCHOVER ||
900 s->state == MIGRATION_STATUS_DEVICE ||
901 s->state == MIGRATION_STATUS_ACTIVE) {
902 migrate_set_state(&s->state, s->state,
903 MIGRATION_STATUS_FAILED);
904 }
905 }
906
66770707 907 for (i = 0; i < migrate_multifd_channels(); i++) {
f986c3d2
JQ
908 MultiFDSendParams *p = &multifd_send_state->params[i];
909
910 qemu_mutex_lock(&p->mutex);
911 p->quit = true;
912 qemu_sem_post(&p->sem);
913 qemu_mutex_unlock(&p->mutex);
914 }
915}
916
917int multifd_save_cleanup(Error **errp)
918{
919 int i;
920 int ret = 0;
921
922 if (!migrate_use_multifd()) {
923 return 0;
924 }
66770707
JQ
925 multifd_send_terminate_threads(NULL);
926 for (i = 0; i < migrate_multifd_channels(); i++) {
f986c3d2
JQ
927 MultiFDSendParams *p = &multifd_send_state->params[i];
928
66770707
JQ
929 if (p->running) {
930 qemu_thread_join(&p->thread);
931 }
60df2d4a
JQ
932 socket_send_channel_destroy(p->c);
933 p->c = NULL;
f986c3d2
JQ
934 qemu_mutex_destroy(&p->mutex);
935 qemu_sem_destroy(&p->sem);
6df264ac 936 qemu_sem_destroy(&p->sem_sync);
f986c3d2
JQ
937 g_free(p->name);
938 p->name = NULL;
34c55a94
JQ
939 multifd_pages_clear(p->pages);
940 p->pages = NULL;
2a26c979
JQ
941 p->packet_len = 0;
942 g_free(p->packet);
943 p->packet = NULL;
f986c3d2 944 }
b9ee2f7d 945 qemu_sem_destroy(&multifd_send_state->channels_ready);
6df264ac 946 qemu_sem_destroy(&multifd_send_state->sem_sync);
f986c3d2
JQ
947 g_free(multifd_send_state->params);
948 multifd_send_state->params = NULL;
34c55a94
JQ
949 multifd_pages_clear(multifd_send_state->pages);
950 multifd_send_state->pages = NULL;
f986c3d2
JQ
951 g_free(multifd_send_state);
952 multifd_send_state = NULL;
953 return ret;
954}
955
6df264ac
JQ
956static void multifd_send_sync_main(void)
957{
958 int i;
959
960 if (!migrate_use_multifd()) {
961 return;
962 }
b9ee2f7d
JQ
963 if (multifd_send_state->pages->used) {
964 multifd_send_pages();
965 }
6df264ac
JQ
966 for (i = 0; i < migrate_multifd_channels(); i++) {
967 MultiFDSendParams *p = &multifd_send_state->params[i];
968
969 trace_multifd_send_sync_main_signal(p->id);
970
971 qemu_mutex_lock(&p->mutex);
b9ee2f7d
JQ
972
973 p->packet_num = multifd_send_state->packet_num++;
6df264ac
JQ
974 p->flags |= MULTIFD_FLAG_SYNC;
975 p->pending_job++;
976 qemu_mutex_unlock(&p->mutex);
977 qemu_sem_post(&p->sem);
978 }
979 for (i = 0; i < migrate_multifd_channels(); i++) {
980 MultiFDSendParams *p = &multifd_send_state->params[i];
981
982 trace_multifd_send_sync_main_wait(p->id);
983 qemu_sem_wait(&multifd_send_state->sem_sync);
984 }
985 trace_multifd_send_sync_main(multifd_send_state->packet_num);
986}
987
f986c3d2
JQ
988static void *multifd_send_thread(void *opaque)
989{
990 MultiFDSendParams *p = opaque;
af8b7d2b 991 Error *local_err = NULL;
8b2db7f5 992 int ret;
af8b7d2b 993
408ea6ae 994 trace_multifd_send_thread_start(p->id);
74637e6f 995 rcu_register_thread();
408ea6ae 996
af8b7d2b
JQ
997 if (multifd_send_initial_packet(p, &local_err) < 0) {
998 goto out;
999 }
408ea6ae
JQ
1000 /* initial packet */
1001 p->num_packets = 1;
f986c3d2
JQ
1002
1003 while (true) {
d82628e4 1004 qemu_sem_wait(&p->sem);
f986c3d2 1005 qemu_mutex_lock(&p->mutex);
0beb5ed3
JQ
1006
1007 if (p->pending_job) {
1008 uint32_t used = p->pages->used;
1009 uint64_t packet_num = p->packet_num;
1010 uint32_t flags = p->flags;
1011
1012 multifd_send_fill_packet(p);
1013 p->flags = 0;
1014 p->num_packets++;
1015 p->num_pages += used;
1016 p->pages->used = 0;
1017 qemu_mutex_unlock(&p->mutex);
1018
1019 trace_multifd_send(p->id, packet_num, used, flags);
1020
8b2db7f5
JQ
1021 ret = qio_channel_write_all(p->c, (void *)p->packet,
1022 p->packet_len, &local_err);
1023 if (ret != 0) {
1024 break;
1025 }
1026
1027 ret = qio_channel_writev_all(p->c, p->pages->iov, used, &local_err);
1028 if (ret != 0) {
1029 break;
1030 }
0beb5ed3
JQ
1031
1032 qemu_mutex_lock(&p->mutex);
1033 p->pending_job--;
1034 qemu_mutex_unlock(&p->mutex);
6df264ac
JQ
1035
1036 if (flags & MULTIFD_FLAG_SYNC) {
1037 qemu_sem_post(&multifd_send_state->sem_sync);
1038 }
b9ee2f7d 1039 qemu_sem_post(&multifd_send_state->channels_ready);
0beb5ed3 1040 } else if (p->quit) {
f986c3d2
JQ
1041 qemu_mutex_unlock(&p->mutex);
1042 break;
6df264ac
JQ
1043 } else {
1044 qemu_mutex_unlock(&p->mutex);
1045 /* sometimes there are spurious wakeups */
f986c3d2 1046 }
f986c3d2
JQ
1047 }
1048
af8b7d2b
JQ
1049out:
1050 if (local_err) {
1051 multifd_send_terminate_threads(local_err);
1052 }
1053
66770707
JQ
1054 qemu_mutex_lock(&p->mutex);
1055 p->running = false;
1056 qemu_mutex_unlock(&p->mutex);
1057
74637e6f 1058 rcu_unregister_thread();
408ea6ae
JQ
1059 trace_multifd_send_thread_end(p->id, p->num_packets, p->num_pages);
1060
f986c3d2
JQ
1061 return NULL;
1062}
1063
60df2d4a
JQ
1064static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
1065{
1066 MultiFDSendParams *p = opaque;
1067 QIOChannel *sioc = QIO_CHANNEL(qio_task_get_source(task));
1068 Error *local_err = NULL;
1069
1070 if (qio_task_propagate_error(task, &local_err)) {
1071 if (multifd_save_cleanup(&local_err) != 0) {
1072 migrate_set_error(migrate_get_current(), local_err);
1073 }
1074 } else {
1075 p->c = QIO_CHANNEL(sioc);
1076 qio_channel_set_delay(p->c, false);
1077 p->running = true;
1078 qemu_thread_create(&p->thread, p->name, multifd_send_thread, p,
1079 QEMU_THREAD_JOINABLE);
1080
1081 atomic_inc(&multifd_send_state->count);
1082 }
1083}
1084
f986c3d2
JQ
1085int multifd_save_setup(void)
1086{
1087 int thread_count;
34c55a94 1088 uint32_t page_count = migrate_multifd_page_count();
f986c3d2
JQ
1089 uint8_t i;
1090
1091 if (!migrate_use_multifd()) {
1092 return 0;
1093 }
1094 thread_count = migrate_multifd_channels();
1095 multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
1096 multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
66770707 1097 atomic_set(&multifd_send_state->count, 0);
34c55a94 1098 multifd_send_state->pages = multifd_pages_init(page_count);
6df264ac 1099 qemu_sem_init(&multifd_send_state->sem_sync, 0);
b9ee2f7d 1100 qemu_sem_init(&multifd_send_state->channels_ready, 0);
34c55a94 1101
f986c3d2
JQ
1102 for (i = 0; i < thread_count; i++) {
1103 MultiFDSendParams *p = &multifd_send_state->params[i];
1104
1105 qemu_mutex_init(&p->mutex);
1106 qemu_sem_init(&p->sem, 0);
6df264ac 1107 qemu_sem_init(&p->sem_sync, 0);
f986c3d2 1108 p->quit = false;
0beb5ed3 1109 p->pending_job = 0;
f986c3d2 1110 p->id = i;
34c55a94 1111 p->pages = multifd_pages_init(page_count);
2a26c979
JQ
1112 p->packet_len = sizeof(MultiFDPacket_t)
1113 + sizeof(ram_addr_t) * page_count;
1114 p->packet = g_malloc0(p->packet_len);
f986c3d2 1115 p->name = g_strdup_printf("multifdsend_%d", i);
60df2d4a 1116 socket_send_channel_create(multifd_new_send_channel_async, p);
f986c3d2
JQ
1117 }
1118 return 0;
1119}
1120
f986c3d2
JQ
1121struct {
1122 MultiFDRecvParams *params;
1123 /* number of created threads */
1124 int count;
6df264ac
JQ
1125 /* syncs main thread and channels */
1126 QemuSemaphore sem_sync;
1127 /* global number of generated multifd packets */
1128 uint64_t packet_num;
f986c3d2
JQ
1129} *multifd_recv_state;
1130
66770707 1131static void multifd_recv_terminate_threads(Error *err)
f986c3d2
JQ
1132{
1133 int i;
1134
7a169d74
JQ
1135 if (err) {
1136 MigrationState *s = migrate_get_current();
1137 migrate_set_error(s, err);
1138 if (s->state == MIGRATION_STATUS_SETUP ||
1139 s->state == MIGRATION_STATUS_ACTIVE) {
1140 migrate_set_state(&s->state, s->state,
1141 MIGRATION_STATUS_FAILED);
1142 }
1143 }
1144
66770707 1145 for (i = 0; i < migrate_multifd_channels(); i++) {
f986c3d2
JQ
1146 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1147
1148 qemu_mutex_lock(&p->mutex);
7a5cc33c
JQ
1149 /* We could arrive here for two reasons:
1150 - normal quit, i.e. everything went fine, just finished
1151 - error quit: We close the channels so the channel threads
1152 finish the qio_channel_read_all_eof() */
1153 qio_channel_shutdown(p->c, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
f986c3d2
JQ
1154 qemu_mutex_unlock(&p->mutex);
1155 }
1156}
1157
1158int multifd_load_cleanup(Error **errp)
1159{
1160 int i;
1161 int ret = 0;
1162
1163 if (!migrate_use_multifd()) {
1164 return 0;
1165 }
66770707
JQ
1166 multifd_recv_terminate_threads(NULL);
1167 for (i = 0; i < migrate_multifd_channels(); i++) {
f986c3d2
JQ
1168 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1169
66770707
JQ
1170 if (p->running) {
1171 qemu_thread_join(&p->thread);
1172 }
60df2d4a
JQ
1173 object_unref(OBJECT(p->c));
1174 p->c = NULL;
f986c3d2 1175 qemu_mutex_destroy(&p->mutex);
6df264ac 1176 qemu_sem_destroy(&p->sem_sync);
f986c3d2
JQ
1177 g_free(p->name);
1178 p->name = NULL;
34c55a94
JQ
1179 multifd_pages_clear(p->pages);
1180 p->pages = NULL;
2a26c979
JQ
1181 p->packet_len = 0;
1182 g_free(p->packet);
1183 p->packet = NULL;
f986c3d2 1184 }
6df264ac 1185 qemu_sem_destroy(&multifd_recv_state->sem_sync);
f986c3d2
JQ
1186 g_free(multifd_recv_state->params);
1187 multifd_recv_state->params = NULL;
1188 g_free(multifd_recv_state);
1189 multifd_recv_state = NULL;
1190
1191 return ret;
1192}
1193
6df264ac
JQ
1194static void multifd_recv_sync_main(void)
1195{
1196 int i;
1197
1198 if (!migrate_use_multifd()) {
1199 return;
1200 }
1201 for (i = 0; i < migrate_multifd_channels(); i++) {
1202 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1203
6df264ac
JQ
1204 trace_multifd_recv_sync_main_wait(p->id);
1205 qemu_sem_wait(&multifd_recv_state->sem_sync);
1206 qemu_mutex_lock(&p->mutex);
1207 if (multifd_recv_state->packet_num < p->packet_num) {
1208 multifd_recv_state->packet_num = p->packet_num;
1209 }
1210 qemu_mutex_unlock(&p->mutex);
1211 }
1212 for (i = 0; i < migrate_multifd_channels(); i++) {
1213 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1214
1215 trace_multifd_recv_sync_main_signal(p->id);
6df264ac
JQ
1216 qemu_sem_post(&p->sem_sync);
1217 }
1218 trace_multifd_recv_sync_main(multifd_recv_state->packet_num);
1219}
1220
f986c3d2
JQ
1221static void *multifd_recv_thread(void *opaque)
1222{
1223 MultiFDRecvParams *p = opaque;
2a26c979
JQ
1224 Error *local_err = NULL;
1225 int ret;
f986c3d2 1226
408ea6ae 1227 trace_multifd_recv_thread_start(p->id);
74637e6f 1228 rcu_register_thread();
408ea6ae 1229
f986c3d2 1230 while (true) {
6df264ac
JQ
1231 uint32_t used;
1232 uint32_t flags;
0beb5ed3 1233
8b2db7f5
JQ
1234 ret = qio_channel_read_all_eof(p->c, (void *)p->packet,
1235 p->packet_len, &local_err);
1236 if (ret == 0) { /* EOF */
1237 break;
1238 }
1239 if (ret == -1) { /* Error */
1240 break;
1241 }
2a26c979 1242
6df264ac
JQ
1243 qemu_mutex_lock(&p->mutex);
1244 ret = multifd_recv_unfill_packet(p, &local_err);
1245 if (ret) {
f986c3d2
JQ
1246 qemu_mutex_unlock(&p->mutex);
1247 break;
1248 }
6df264ac
JQ
1249
1250 used = p->pages->used;
1251 flags = p->flags;
1252 trace_multifd_recv(p->id, p->packet_num, used, flags);
6df264ac
JQ
1253 p->num_packets++;
1254 p->num_pages += used;
f986c3d2 1255 qemu_mutex_unlock(&p->mutex);
6df264ac 1256
8b2db7f5
JQ
1257 ret = qio_channel_readv_all(p->c, p->pages->iov, used, &local_err);
1258 if (ret != 0) {
1259 break;
1260 }
1261
6df264ac
JQ
1262 if (flags & MULTIFD_FLAG_SYNC) {
1263 qemu_sem_post(&multifd_recv_state->sem_sync);
1264 qemu_sem_wait(&p->sem_sync);
1265 }
f986c3d2
JQ
1266 }
1267
d82628e4
JQ
1268 if (local_err) {
1269 multifd_recv_terminate_threads(local_err);
1270 }
66770707
JQ
1271 qemu_mutex_lock(&p->mutex);
1272 p->running = false;
1273 qemu_mutex_unlock(&p->mutex);
1274
74637e6f 1275 rcu_unregister_thread();
408ea6ae
JQ
1276 trace_multifd_recv_thread_end(p->id, p->num_packets, p->num_pages);
1277
f986c3d2
JQ
1278 return NULL;
1279}
1280
1281int multifd_load_setup(void)
1282{
1283 int thread_count;
34c55a94 1284 uint32_t page_count = migrate_multifd_page_count();
f986c3d2
JQ
1285 uint8_t i;
1286
1287 if (!migrate_use_multifd()) {
1288 return 0;
1289 }
1290 thread_count = migrate_multifd_channels();
1291 multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));
1292 multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);
66770707 1293 atomic_set(&multifd_recv_state->count, 0);
6df264ac 1294 qemu_sem_init(&multifd_recv_state->sem_sync, 0);
34c55a94 1295
f986c3d2
JQ
1296 for (i = 0; i < thread_count; i++) {
1297 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1298
1299 qemu_mutex_init(&p->mutex);
6df264ac 1300 qemu_sem_init(&p->sem_sync, 0);
f986c3d2 1301 p->id = i;
34c55a94 1302 p->pages = multifd_pages_init(page_count);
2a26c979
JQ
1303 p->packet_len = sizeof(MultiFDPacket_t)
1304 + sizeof(ram_addr_t) * page_count;
1305 p->packet = g_malloc0(p->packet_len);
f986c3d2 1306 p->name = g_strdup_printf("multifdrecv_%d", i);
f986c3d2
JQ
1307 }
1308 return 0;
1309}
1310
62c1e0ca
JQ
1311bool multifd_recv_all_channels_created(void)
1312{
1313 int thread_count = migrate_multifd_channels();
1314
1315 if (!migrate_use_multifd()) {
1316 return true;
1317 }
1318
1319 return thread_count == atomic_read(&multifd_recv_state->count);
1320}
1321
81e62053
PX
1322/* Return true if multifd is ready for the migration, otherwise false */
1323bool multifd_recv_new_channel(QIOChannel *ioc)
71bb07db 1324{
60df2d4a 1325 MultiFDRecvParams *p;
af8b7d2b
JQ
1326 Error *local_err = NULL;
1327 int id;
60df2d4a 1328
af8b7d2b
JQ
1329 id = multifd_recv_initial_packet(ioc, &local_err);
1330 if (id < 0) {
1331 multifd_recv_terminate_threads(local_err);
81e62053 1332 return false;
af8b7d2b
JQ
1333 }
1334
1335 p = &multifd_recv_state->params[id];
1336 if (p->c != NULL) {
1337 error_setg(&local_err, "multifd: received id '%d' already setup'",
1338 id);
1339 multifd_recv_terminate_threads(local_err);
81e62053 1340 return false;
af8b7d2b 1341 }
60df2d4a
JQ
1342 p->c = ioc;
1343 object_ref(OBJECT(ioc));
408ea6ae
JQ
1344 /* initial packet */
1345 p->num_packets = 1;
60df2d4a
JQ
1346
1347 p->running = true;
1348 qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,
1349 QEMU_THREAD_JOINABLE);
1350 atomic_inc(&multifd_recv_state->count);
81e62053 1351 return multifd_recv_state->count == migrate_multifd_channels();
71bb07db
JQ
1352}
1353
56e93d26 1354/**
3d0684b2 1355 * save_page_header: write page header to wire
56e93d26
JQ
1356 *
1357 * If this is the 1st block, it also writes the block identification
1358 *
3d0684b2 1359 * Returns the number of bytes written
56e93d26
JQ
1360 *
1361 * @f: QEMUFile where to send the data
1362 * @block: block that contains the page we want to send
1363 * @offset: offset inside the block for the page
1364 * in the lower bits, it contains flags
1365 */
2bf3aa85
JQ
1366static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
1367 ram_addr_t offset)
56e93d26 1368{
9f5f380b 1369 size_t size, len;
56e93d26 1370
24795694
JQ
1371 if (block == rs->last_sent_block) {
1372 offset |= RAM_SAVE_FLAG_CONTINUE;
1373 }
2bf3aa85 1374 qemu_put_be64(f, offset);
56e93d26
JQ
1375 size = 8;
1376
1377 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
9f5f380b 1378 len = strlen(block->idstr);
2bf3aa85
JQ
1379 qemu_put_byte(f, len);
1380 qemu_put_buffer(f, (uint8_t *)block->idstr, len);
9f5f380b 1381 size += 1 + len;
24795694 1382 rs->last_sent_block = block;
56e93d26
JQ
1383 }
1384 return size;
1385}
1386
3d0684b2
JQ
1387/**
1388 * mig_throttle_guest_down: throotle down the guest
1389 *
1390 * Reduce amount of guest cpu execution to hopefully slow down memory
1391 * writes. If guest dirty memory rate is reduced below the rate at
1392 * which we can transfer pages to the destination then we should be
1393 * able to complete migration. Some workloads dirty memory way too
1394 * fast and will not effectively converge, even with auto-converge.
070afca2
JH
1395 */
1396static void mig_throttle_guest_down(void)
1397{
1398 MigrationState *s = migrate_get_current();
2594f56d
DB
1399 uint64_t pct_initial = s->parameters.cpu_throttle_initial;
1400 uint64_t pct_icrement = s->parameters.cpu_throttle_increment;
4cbc9c7f 1401 int pct_max = s->parameters.max_cpu_throttle;
070afca2
JH
1402
1403 /* We have not started throttling yet. Let's start it. */
1404 if (!cpu_throttle_active()) {
1405 cpu_throttle_set(pct_initial);
1406 } else {
1407 /* Throttling already on, just increase the rate */
4cbc9c7f
LQ
1408 cpu_throttle_set(MIN(cpu_throttle_get_percentage() + pct_icrement,
1409 pct_max));
070afca2
JH
1410 }
1411}
1412
3d0684b2
JQ
1413/**
1414 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
1415 *
6f37bb8b 1416 * @rs: current RAM state
3d0684b2
JQ
1417 * @current_addr: address for the zero page
1418 *
1419 * Update the xbzrle cache to reflect a page that's been sent as all 0.
56e93d26
JQ
1420 * The important thing is that a stale (not-yet-0'd) page be replaced
1421 * by the new data.
1422 * As a bonus, if the page wasn't in the cache it gets added so that
3d0684b2 1423 * when a small write is made into the 0'd page it gets XBZRLE sent.
56e93d26 1424 */
6f37bb8b 1425static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
56e93d26 1426{
6f37bb8b 1427 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
56e93d26
JQ
1428 return;
1429 }
1430
1431 /* We don't care if this fails to allocate a new cache page
1432 * as long as it updated an old one */
c00e0928 1433 cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
9360447d 1434 ram_counters.dirty_sync_count);
56e93d26
JQ
1435}
1436
1437#define ENCODING_FLAG_XBZRLE 0x1
1438
1439/**
1440 * save_xbzrle_page: compress and send current page
1441 *
1442 * Returns: 1 means that we wrote the page
1443 * 0 means that page is identical to the one already sent
1444 * -1 means that xbzrle would be longer than normal
1445 *
5a987738 1446 * @rs: current RAM state
3d0684b2
JQ
1447 * @current_data: pointer to the address of the page contents
1448 * @current_addr: addr of the page
56e93d26
JQ
1449 * @block: block that contains the page we want to send
1450 * @offset: offset inside the block for the page
1451 * @last_stage: if we are at the completion stage
56e93d26 1452 */
204b88b8 1453static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
56e93d26 1454 ram_addr_t current_addr, RAMBlock *block,
072c2511 1455 ram_addr_t offset, bool last_stage)
56e93d26
JQ
1456{
1457 int encoded_len = 0, bytes_xbzrle;
1458 uint8_t *prev_cached_page;
1459
9360447d
JQ
1460 if (!cache_is_cached(XBZRLE.cache, current_addr,
1461 ram_counters.dirty_sync_count)) {
1462 xbzrle_counters.cache_miss++;
56e93d26
JQ
1463 if (!last_stage) {
1464 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
9360447d 1465 ram_counters.dirty_sync_count) == -1) {
56e93d26
JQ
1466 return -1;
1467 } else {
1468 /* update *current_data when the page has been
1469 inserted into cache */
1470 *current_data = get_cached_data(XBZRLE.cache, current_addr);
1471 }
1472 }
1473 return -1;
1474 }
1475
1476 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
1477
1478 /* save current buffer into memory */
1479 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
1480
1481 /* XBZRLE encoding (if there is no overflow) */
1482 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
1483 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
1484 TARGET_PAGE_SIZE);
1485 if (encoded_len == 0) {
55c4446b 1486 trace_save_xbzrle_page_skipping();
56e93d26
JQ
1487 return 0;
1488 } else if (encoded_len == -1) {
55c4446b 1489 trace_save_xbzrle_page_overflow();
9360447d 1490 xbzrle_counters.overflow++;
56e93d26
JQ
1491 /* update data in the cache */
1492 if (!last_stage) {
1493 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
1494 *current_data = prev_cached_page;
1495 }
1496 return -1;
1497 }
1498
1499 /* we need to update the data in the cache, in order to get the same data */
1500 if (!last_stage) {
1501 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
1502 }
1503
1504 /* Send XBZRLE based compressed page */
2bf3aa85 1505 bytes_xbzrle = save_page_header(rs, rs->f, block,
204b88b8
JQ
1506 offset | RAM_SAVE_FLAG_XBZRLE);
1507 qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
1508 qemu_put_be16(rs->f, encoded_len);
1509 qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
56e93d26 1510 bytes_xbzrle += encoded_len + 1 + 2;
9360447d
JQ
1511 xbzrle_counters.pages++;
1512 xbzrle_counters.bytes += bytes_xbzrle;
1513 ram_counters.transferred += bytes_xbzrle;
56e93d26
JQ
1514
1515 return 1;
1516}
1517
3d0684b2
JQ
1518/**
1519 * migration_bitmap_find_dirty: find the next dirty page from start
f3f491fc 1520 *
3d0684b2
JQ
1521 * Called with rcu_read_lock() to protect migration_bitmap
1522 *
1523 * Returns the byte offset within memory region of the start of a dirty page
1524 *
6f37bb8b 1525 * @rs: current RAM state
3d0684b2 1526 * @rb: RAMBlock where to search for dirty pages
a935e30f 1527 * @start: page where we start the search
f3f491fc 1528 */
56e93d26 1529static inline
a935e30f 1530unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
f20e2865 1531 unsigned long start)
56e93d26 1532{
6b6712ef
JQ
1533 unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
1534 unsigned long *bitmap = rb->bmap;
56e93d26
JQ
1535 unsigned long next;
1536
b895de50
CLG
1537 if (!qemu_ram_is_migratable(rb)) {
1538 return size;
1539 }
1540
6b6712ef
JQ
1541 if (rs->ram_bulk_stage && start > 0) {
1542 next = start + 1;
56e93d26 1543 } else {
6b6712ef 1544 next = find_next_bit(bitmap, size, start);
56e93d26
JQ
1545 }
1546
6b6712ef 1547 return next;
56e93d26
JQ
1548}
1549
06b10688 1550static inline bool migration_bitmap_clear_dirty(RAMState *rs,
f20e2865
JQ
1551 RAMBlock *rb,
1552 unsigned long page)
a82d593b
DDAG
1553{
1554 bool ret;
a82d593b 1555
6b6712ef 1556 ret = test_and_clear_bit(page, rb->bmap);
a82d593b
DDAG
1557
1558 if (ret) {
0d8ec885 1559 rs->migration_dirty_pages--;
a82d593b
DDAG
1560 }
1561 return ret;
1562}
1563
15440dd5
JQ
1564static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
1565 ram_addr_t start, ram_addr_t length)
56e93d26 1566{
0d8ec885 1567 rs->migration_dirty_pages +=
6b6712ef 1568 cpu_physical_memory_sync_dirty_bitmap(rb, start, length,
0d8ec885 1569 &rs->num_dirty_pages_period);
56e93d26
JQ
1570}
1571
3d0684b2
JQ
1572/**
1573 * ram_pagesize_summary: calculate all the pagesizes of a VM
1574 *
1575 * Returns a summary bitmap of the page sizes of all RAMBlocks
1576 *
1577 * For VMs with just normal pages this is equivalent to the host page
1578 * size. If it's got some huge pages then it's the OR of all the
1579 * different page sizes.
e8ca1db2
DDAG
1580 */
1581uint64_t ram_pagesize_summary(void)
1582{
1583 RAMBlock *block;
1584 uint64_t summary = 0;
1585
b895de50 1586 RAMBLOCK_FOREACH_MIGRATABLE(block) {
e8ca1db2
DDAG
1587 summary |= block->page_size;
1588 }
1589
1590 return summary;
1591}
1592
b734035b
XG
1593static void migration_update_rates(RAMState *rs, int64_t end_time)
1594{
be8b02ed 1595 uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
b734035b
XG
1596
1597 /* calculate period counters */
1598 ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
1599 / (end_time - rs->time_last_bitmap_sync);
1600
be8b02ed 1601 if (!page_count) {
b734035b
XG
1602 return;
1603 }
1604
1605 if (migrate_use_xbzrle()) {
1606 xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
be8b02ed 1607 rs->xbzrle_cache_miss_prev) / page_count;
b734035b
XG
1608 rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
1609 }
1610}
1611
8d820d6f 1612static void migration_bitmap_sync(RAMState *rs)
56e93d26
JQ
1613{
1614 RAMBlock *block;
56e93d26 1615 int64_t end_time;
c4bdf0cf 1616 uint64_t bytes_xfer_now;
56e93d26 1617
9360447d 1618 ram_counters.dirty_sync_count++;
56e93d26 1619
f664da80
JQ
1620 if (!rs->time_last_bitmap_sync) {
1621 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
56e93d26
JQ
1622 }
1623
1624 trace_migration_bitmap_sync_start();
9c1f8f44 1625 memory_global_dirty_log_sync();
56e93d26 1626
108cfae0 1627 qemu_mutex_lock(&rs->bitmap_mutex);
56e93d26 1628 rcu_read_lock();
b895de50 1629 RAMBLOCK_FOREACH_MIGRATABLE(block) {
15440dd5 1630 migration_bitmap_sync_range(rs, block, 0, block->used_length);
56e93d26 1631 }
650af890 1632 ram_counters.remaining = ram_bytes_remaining();
56e93d26 1633 rcu_read_unlock();
108cfae0 1634 qemu_mutex_unlock(&rs->bitmap_mutex);
56e93d26 1635
a66cd90c 1636 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
1ffb5dfd 1637
56e93d26
JQ
1638 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1639
1640 /* more than 1 second = 1000 millisecons */
f664da80 1641 if (end_time > rs->time_last_bitmap_sync + 1000) {
9360447d 1642 bytes_xfer_now = ram_counters.transferred;
d693c6f1 1643
9ac78b61
PL
1644 /* During block migration the auto-converge logic incorrectly detects
1645 * that ram migration makes no progress. Avoid this by disabling the
1646 * throttling logic during the bulk phase of block migration. */
1647 if (migrate_auto_converge() && !blk_mig_bulk_active()) {
56e93d26
JQ
1648 /* The following detection logic can be refined later. For now:
1649 Check to see if the dirtied bytes is 50% more than the approx.
1650 amount of bytes that just got transferred since the last time we
070afca2
JH
1651 were in this routine. If that happens twice, start or increase
1652 throttling */
070afca2 1653
d693c6f1 1654 if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
eac74159 1655 (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
b4a3c64b 1656 (++rs->dirty_rate_high_cnt >= 2)) {
56e93d26 1657 trace_migration_throttle();
8d820d6f 1658 rs->dirty_rate_high_cnt = 0;
070afca2 1659 mig_throttle_guest_down();
d693c6f1 1660 }
56e93d26 1661 }
070afca2 1662
b734035b
XG
1663 migration_update_rates(rs, end_time);
1664
be8b02ed 1665 rs->target_page_count_prev = rs->target_page_count;
d693c6f1
FF
1666
1667 /* reset period counters */
f664da80 1668 rs->time_last_bitmap_sync = end_time;
a66cd90c 1669 rs->num_dirty_pages_period = 0;
d2a4d85a 1670 rs->bytes_xfer_prev = bytes_xfer_now;
56e93d26 1671 }
4addcd4f 1672 if (migrate_use_events()) {
3ab72385 1673 qapi_event_send_migration_pass(ram_counters.dirty_sync_count);
4addcd4f 1674 }
56e93d26
JQ
1675}
1676
6c97ec5f
XG
1677/**
1678 * save_zero_page_to_file: send the zero page to the file
1679 *
1680 * Returns the size of data written to the file, 0 means the page is not
1681 * a zero page
1682 *
1683 * @rs: current RAM state
1684 * @file: the file where the data is saved
1685 * @block: block that contains the page we want to send
1686 * @offset: offset inside the block for the page
1687 */
1688static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
1689 RAMBlock *block, ram_addr_t offset)
1690{
1691 uint8_t *p = block->host + offset;
1692 int len = 0;
1693
1694 if (is_zero_range(p, TARGET_PAGE_SIZE)) {
1695 len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO);
1696 qemu_put_byte(file, 0);
1697 len += 1;
1698 }
1699 return len;
1700}
1701
56e93d26 1702/**
3d0684b2 1703 * save_zero_page: send the zero page to the stream
56e93d26 1704 *
3d0684b2 1705 * Returns the number of pages written.
56e93d26 1706 *
f7ccd61b 1707 * @rs: current RAM state
56e93d26
JQ
1708 * @block: block that contains the page we want to send
1709 * @offset: offset inside the block for the page
56e93d26 1710 */
7faccdc3 1711static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
56e93d26 1712{
6c97ec5f 1713 int len = save_zero_page_to_file(rs, rs->f, block, offset);
56e93d26 1714
6c97ec5f 1715 if (len) {
9360447d 1716 ram_counters.duplicate++;
6c97ec5f
XG
1717 ram_counters.transferred += len;
1718 return 1;
56e93d26 1719 }
6c97ec5f 1720 return -1;
56e93d26
JQ
1721}
1722
5727309d 1723static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
53f09a10 1724{
5727309d 1725 if (!migrate_release_ram() || !migration_in_postcopy()) {
53f09a10
PB
1726 return;
1727 }
1728
aaa2064c 1729 ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS);
53f09a10
PB
1730}
1731
059ff0fb
XG
1732/*
1733 * @pages: the number of pages written by the control path,
1734 * < 0 - error
1735 * > 0 - number of pages written
1736 *
1737 * Return true if the pages has been saved, otherwise false is returned.
1738 */
1739static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1740 int *pages)
1741{
1742 uint64_t bytes_xmit = 0;
1743 int ret;
1744
1745 *pages = -1;
1746 ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE,
1747 &bytes_xmit);
1748 if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
1749 return false;
1750 }
1751
1752 if (bytes_xmit) {
1753 ram_counters.transferred += bytes_xmit;
1754 *pages = 1;
1755 }
1756
1757 if (ret == RAM_SAVE_CONTROL_DELAYED) {
1758 return true;
1759 }
1760
1761 if (bytes_xmit > 0) {
1762 ram_counters.normal++;
1763 } else if (bytes_xmit == 0) {
1764 ram_counters.duplicate++;
1765 }
1766
1767 return true;
1768}
1769
65dacaa0
XG
1770/*
1771 * directly send the page to the stream
1772 *
1773 * Returns the number of pages written.
1774 *
1775 * @rs: current RAM state
1776 * @block: block that contains the page we want to send
1777 * @offset: offset inside the block for the page
1778 * @buf: the page to be sent
1779 * @async: send to page asyncly
1780 */
1781static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1782 uint8_t *buf, bool async)
1783{
1784 ram_counters.transferred += save_page_header(rs, rs->f, block,
1785 offset | RAM_SAVE_FLAG_PAGE);
1786 if (async) {
1787 qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE,
1788 migrate_release_ram() &
1789 migration_in_postcopy());
1790 } else {
1791 qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
1792 }
1793 ram_counters.transferred += TARGET_PAGE_SIZE;
1794 ram_counters.normal++;
1795 return 1;
1796}
1797
56e93d26 1798/**
3d0684b2 1799 * ram_save_page: send the given page to the stream
56e93d26 1800 *
3d0684b2 1801 * Returns the number of pages written.
3fd3c4b3
DDAG
1802 * < 0 - error
1803 * >=0 - Number of pages written - this might legally be 0
1804 * if xbzrle noticed the page was the same.
56e93d26 1805 *
6f37bb8b 1806 * @rs: current RAM state
56e93d26
JQ
1807 * @block: block that contains the page we want to send
1808 * @offset: offset inside the block for the page
1809 * @last_stage: if we are at the completion stage
56e93d26 1810 */
a0a8aa14 1811static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
56e93d26
JQ
1812{
1813 int pages = -1;
56e93d26 1814 uint8_t *p;
56e93d26 1815 bool send_async = true;
a08f6890 1816 RAMBlock *block = pss->block;
a935e30f 1817 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
059ff0fb 1818 ram_addr_t current_addr = block->offset + offset;
56e93d26 1819
2f68e399 1820 p = block->host + offset;
1db9d8e5 1821 trace_ram_save_page(block->idstr, (uint64_t)offset, p);
56e93d26 1822
56e93d26 1823 XBZRLE_cache_lock();
d7400a34
XG
1824 if (!rs->ram_bulk_stage && !migration_in_postcopy() &&
1825 migrate_use_xbzrle()) {
059ff0fb
XG
1826 pages = save_xbzrle_page(rs, &p, current_addr, block,
1827 offset, last_stage);
1828 if (!last_stage) {
1829 /* Can't send this cached data async, since the cache page
1830 * might get updated before it gets to the wire
56e93d26 1831 */
059ff0fb 1832 send_async = false;
56e93d26
JQ
1833 }
1834 }
1835
1836 /* XBZRLE overflow or normal page */
1837 if (pages == -1) {
65dacaa0 1838 pages = save_normal_page(rs, block, offset, p, send_async);
56e93d26
JQ
1839 }
1840
1841 XBZRLE_cache_unlock();
1842
1843 return pages;
1844}
1845
b9ee2f7d
JQ
1846static int ram_save_multifd_page(RAMState *rs, RAMBlock *block,
1847 ram_addr_t offset)
1848{
b9ee2f7d 1849 multifd_queue_page(block, offset);
b9ee2f7d
JQ
1850 ram_counters.normal++;
1851
1852 return 1;
1853}
1854
5e5fdcff 1855static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
6ef3771c 1856 ram_addr_t offset, uint8_t *source_buf)
56e93d26 1857{
53518d94 1858 RAMState *rs = ram_state;
a7a9a88f 1859 uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
5e5fdcff 1860 bool zero_page = false;
6ef3771c 1861 int ret;
56e93d26 1862
5e5fdcff
XG
1863 if (save_zero_page_to_file(rs, f, block, offset)) {
1864 zero_page = true;
1865 goto exit;
1866 }
1867
6ef3771c 1868 save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
34ab9e97
XG
1869
1870 /*
1871 * copy it to a internal buffer to avoid it being modified by VM
1872 * so that we can catch up the error during compression and
1873 * decompression
1874 */
1875 memcpy(source_buf, p, TARGET_PAGE_SIZE);
6ef3771c
XG
1876 ret = qemu_put_compression_data(f, stream, source_buf, TARGET_PAGE_SIZE);
1877 if (ret < 0) {
1878 qemu_file_set_error(migrate_get_current()->to_dst_file, ret);
b3be2896 1879 error_report("compressed data failed!");
5e5fdcff 1880 return false;
b3be2896 1881 }
56e93d26 1882
5e5fdcff 1883exit:
6ef3771c 1884 ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
5e5fdcff
XG
1885 return zero_page;
1886}
1887
1888static void
1889update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
1890{
1891 if (param->zero_page) {
1892 ram_counters.duplicate++;
1893 }
1894 ram_counters.transferred += bytes_xmit;
56e93d26
JQ
1895}
1896
ce25d337 1897static void flush_compressed_data(RAMState *rs)
56e93d26
JQ
1898{
1899 int idx, len, thread_count;
1900
1901 if (!migrate_use_compression()) {
1902 return;
1903 }
1904 thread_count = migrate_compress_threads();
a7a9a88f 1905
0d9f9a5c 1906 qemu_mutex_lock(&comp_done_lock);
56e93d26 1907 for (idx = 0; idx < thread_count; idx++) {
a7a9a88f 1908 while (!comp_param[idx].done) {
0d9f9a5c 1909 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
56e93d26 1910 }
a7a9a88f 1911 }
0d9f9a5c 1912 qemu_mutex_unlock(&comp_done_lock);
a7a9a88f
LL
1913
1914 for (idx = 0; idx < thread_count; idx++) {
1915 qemu_mutex_lock(&comp_param[idx].mutex);
90e56fb4 1916 if (!comp_param[idx].quit) {
ce25d337 1917 len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
5e5fdcff
XG
1918 /*
1919 * it's safe to fetch zero_page without holding comp_done_lock
1920 * as there is no further request submitted to the thread,
1921 * i.e, the thread should be waiting for a request at this point.
1922 */
1923 update_compress_thread_counts(&comp_param[idx], len);
56e93d26 1924 }
a7a9a88f 1925 qemu_mutex_unlock(&comp_param[idx].mutex);
56e93d26
JQ
1926 }
1927}
1928
1929static inline void set_compress_params(CompressParam *param, RAMBlock *block,
1930 ram_addr_t offset)
1931{
1932 param->block = block;
1933 param->offset = offset;
1934}
1935
ce25d337
JQ
1936static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
1937 ram_addr_t offset)
56e93d26
JQ
1938{
1939 int idx, thread_count, bytes_xmit = -1, pages = -1;
1d58872a 1940 bool wait = migrate_compress_wait_thread();
56e93d26
JQ
1941
1942 thread_count = migrate_compress_threads();
0d9f9a5c 1943 qemu_mutex_lock(&comp_done_lock);
1d58872a
XG
1944retry:
1945 for (idx = 0; idx < thread_count; idx++) {
1946 if (comp_param[idx].done) {
1947 comp_param[idx].done = false;
1948 bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
1949 qemu_mutex_lock(&comp_param[idx].mutex);
1950 set_compress_params(&comp_param[idx], block, offset);
1951 qemu_cond_signal(&comp_param[idx].cond);
1952 qemu_mutex_unlock(&comp_param[idx].mutex);
1953 pages = 1;
5e5fdcff 1954 update_compress_thread_counts(&comp_param[idx], bytes_xmit);
56e93d26 1955 break;
56e93d26
JQ
1956 }
1957 }
1d58872a
XG
1958
1959 /*
1960 * wait for the free thread if the user specifies 'compress-wait-thread',
1961 * otherwise we will post the page out in the main thread as normal page.
1962 */
1963 if (pages < 0 && wait) {
1964 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
1965 goto retry;
1966 }
0d9f9a5c 1967 qemu_mutex_unlock(&comp_done_lock);
56e93d26
JQ
1968
1969 return pages;
1970}
1971
3d0684b2
JQ
1972/**
1973 * find_dirty_block: find the next dirty page and update any state
1974 * associated with the search process.
b9e60928 1975 *
3d0684b2 1976 * Returns if a page is found
b9e60928 1977 *
6f37bb8b 1978 * @rs: current RAM state
3d0684b2
JQ
1979 * @pss: data about the state of the current dirty page scan
1980 * @again: set to false if the search has scanned the whole of RAM
b9e60928 1981 */
f20e2865 1982static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
b9e60928 1983{
f20e2865 1984 pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
6f37bb8b 1985 if (pss->complete_round && pss->block == rs->last_seen_block &&
a935e30f 1986 pss->page >= rs->last_page) {
b9e60928
DDAG
1987 /*
1988 * We've been once around the RAM and haven't found anything.
1989 * Give up.
1990 */
1991 *again = false;
1992 return false;
1993 }
a935e30f 1994 if ((pss->page << TARGET_PAGE_BITS) >= pss->block->used_length) {
b9e60928 1995 /* Didn't find anything in this RAM Block */
a935e30f 1996 pss->page = 0;
b9e60928
DDAG
1997 pss->block = QLIST_NEXT_RCU(pss->block, next);
1998 if (!pss->block) {
1999 /* Hit the end of the list */
2000 pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
2001 /* Flag that we've looped */
2002 pss->complete_round = true;
6f37bb8b 2003 rs->ram_bulk_stage = false;
b9e60928
DDAG
2004 if (migrate_use_xbzrle()) {
2005 /* If xbzrle is on, stop using the data compression at this
2006 * point. In theory, xbzrle can do better than compression.
2007 */
ce25d337 2008 flush_compressed_data(rs);
b9e60928
DDAG
2009 }
2010 }
2011 /* Didn't find anything this time, but try again on the new block */
2012 *again = true;
2013 return false;
2014 } else {
2015 /* Can go around again, but... */
2016 *again = true;
2017 /* We've found something so probably don't need to */
2018 return true;
2019 }
2020}
2021
3d0684b2
JQ
2022/**
2023 * unqueue_page: gets a page of the queue
2024 *
a82d593b 2025 * Helper for 'get_queued_page' - gets a page off the queue
a82d593b 2026 *
3d0684b2
JQ
2027 * Returns the block of the page (or NULL if none available)
2028 *
ec481c6c 2029 * @rs: current RAM state
3d0684b2 2030 * @offset: used to return the offset within the RAMBlock
a82d593b 2031 */
f20e2865 2032static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
a82d593b
DDAG
2033{
2034 RAMBlock *block = NULL;
2035
ae526e32
XG
2036 if (QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests)) {
2037 return NULL;
2038 }
2039
ec481c6c
JQ
2040 qemu_mutex_lock(&rs->src_page_req_mutex);
2041 if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
2042 struct RAMSrcPageRequest *entry =
2043 QSIMPLEQ_FIRST(&rs->src_page_requests);
a82d593b
DDAG
2044 block = entry->rb;
2045 *offset = entry->offset;
a82d593b
DDAG
2046
2047 if (entry->len > TARGET_PAGE_SIZE) {
2048 entry->len -= TARGET_PAGE_SIZE;
2049 entry->offset += TARGET_PAGE_SIZE;
2050 } else {
2051 memory_region_unref(block->mr);
ec481c6c 2052 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
a82d593b 2053 g_free(entry);
e03a34f8 2054 migration_consume_urgent_request();
a82d593b
DDAG
2055 }
2056 }
ec481c6c 2057 qemu_mutex_unlock(&rs->src_page_req_mutex);
a82d593b
DDAG
2058
2059 return block;
2060}
2061
3d0684b2
JQ
2062/**
2063 * get_queued_page: unqueue a page from the postocpy requests
2064 *
2065 * Skips pages that are already sent (!dirty)
a82d593b 2066 *
3d0684b2 2067 * Returns if a queued page is found
a82d593b 2068 *
6f37bb8b 2069 * @rs: current RAM state
3d0684b2 2070 * @pss: data about the state of the current dirty page scan
a82d593b 2071 */
f20e2865 2072static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
a82d593b
DDAG
2073{
2074 RAMBlock *block;
2075 ram_addr_t offset;
2076 bool dirty;
2077
2078 do {
f20e2865 2079 block = unqueue_page(rs, &offset);
a82d593b
DDAG
2080 /*
2081 * We're sending this page, and since it's postcopy nothing else
2082 * will dirty it, and we must make sure it doesn't get sent again
2083 * even if this queue request was received after the background
2084 * search already sent it.
2085 */
2086 if (block) {
f20e2865
JQ
2087 unsigned long page;
2088
6b6712ef
JQ
2089 page = offset >> TARGET_PAGE_BITS;
2090 dirty = test_bit(page, block->bmap);
a82d593b 2091 if (!dirty) {
06b10688 2092 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
6b6712ef 2093 page, test_bit(page, block->unsentmap));
a82d593b 2094 } else {
f20e2865 2095 trace_get_queued_page(block->idstr, (uint64_t)offset, page);
a82d593b
DDAG
2096 }
2097 }
2098
2099 } while (block && !dirty);
2100
2101 if (block) {
2102 /*
2103 * As soon as we start servicing pages out of order, then we have
2104 * to kill the bulk stage, since the bulk stage assumes
2105 * in (migration_bitmap_find_and_reset_dirty) that every page is
2106 * dirty, that's no longer true.
2107 */
6f37bb8b 2108 rs->ram_bulk_stage = false;
a82d593b
DDAG
2109
2110 /*
2111 * We want the background search to continue from the queued page
2112 * since the guest is likely to want other pages near to the page
2113 * it just requested.
2114 */
2115 pss->block = block;
a935e30f 2116 pss->page = offset >> TARGET_PAGE_BITS;
a82d593b
DDAG
2117 }
2118
2119 return !!block;
2120}
2121
6c595cde 2122/**
5e58f968
JQ
2123 * migration_page_queue_free: drop any remaining pages in the ram
2124 * request queue
6c595cde 2125 *
3d0684b2
JQ
2126 * It should be empty at the end anyway, but in error cases there may
2127 * be some left. in case that there is any page left, we drop it.
2128 *
6c595cde 2129 */
83c13382 2130static void migration_page_queue_free(RAMState *rs)
6c595cde 2131{
ec481c6c 2132 struct RAMSrcPageRequest *mspr, *next_mspr;
6c595cde
DDAG
2133 /* This queue generally should be empty - but in the case of a failed
2134 * migration might have some droppings in.
2135 */
2136 rcu_read_lock();
ec481c6c 2137 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
6c595cde 2138 memory_region_unref(mspr->rb->mr);
ec481c6c 2139 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
6c595cde
DDAG
2140 g_free(mspr);
2141 }
2142 rcu_read_unlock();
2143}
2144
2145/**
3d0684b2
JQ
2146 * ram_save_queue_pages: queue the page for transmission
2147 *
2148 * A request from postcopy destination for example.
2149 *
2150 * Returns zero on success or negative on error
2151 *
3d0684b2
JQ
2152 * @rbname: Name of the RAMBLock of the request. NULL means the
2153 * same that last one.
2154 * @start: starting address from the start of the RAMBlock
2155 * @len: length (in bytes) to send
6c595cde 2156 */
96506894 2157int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
6c595cde
DDAG
2158{
2159 RAMBlock *ramblock;
53518d94 2160 RAMState *rs = ram_state;
6c595cde 2161
9360447d 2162 ram_counters.postcopy_requests++;
6c595cde
DDAG
2163 rcu_read_lock();
2164 if (!rbname) {
2165 /* Reuse last RAMBlock */
68a098f3 2166 ramblock = rs->last_req_rb;
6c595cde
DDAG
2167
2168 if (!ramblock) {
2169 /*
2170 * Shouldn't happen, we can't reuse the last RAMBlock if
2171 * it's the 1st request.
2172 */
2173 error_report("ram_save_queue_pages no previous block");
2174 goto err;
2175 }
2176 } else {
2177 ramblock = qemu_ram_block_by_name(rbname);
2178
2179 if (!ramblock) {
2180 /* We shouldn't be asked for a non-existent RAMBlock */
2181 error_report("ram_save_queue_pages no block '%s'", rbname);
2182 goto err;
2183 }
68a098f3 2184 rs->last_req_rb = ramblock;
6c595cde
DDAG
2185 }
2186 trace_ram_save_queue_pages(ramblock->idstr, start, len);
2187 if (start+len > ramblock->used_length) {
9458ad6b
JQ
2188 error_report("%s request overrun start=" RAM_ADDR_FMT " len="
2189 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
6c595cde
DDAG
2190 __func__, start, len, ramblock->used_length);
2191 goto err;
2192 }
2193
ec481c6c
JQ
2194 struct RAMSrcPageRequest *new_entry =
2195 g_malloc0(sizeof(struct RAMSrcPageRequest));
6c595cde
DDAG
2196 new_entry->rb = ramblock;
2197 new_entry->offset = start;
2198 new_entry->len = len;
2199
2200 memory_region_ref(ramblock->mr);
ec481c6c
JQ
2201 qemu_mutex_lock(&rs->src_page_req_mutex);
2202 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
e03a34f8 2203 migration_make_urgent_request();
ec481c6c 2204 qemu_mutex_unlock(&rs->src_page_req_mutex);
6c595cde
DDAG
2205 rcu_read_unlock();
2206
2207 return 0;
2208
2209err:
2210 rcu_read_unlock();
2211 return -1;
2212}
2213
d7400a34
XG
2214static bool save_page_use_compression(RAMState *rs)
2215{
2216 if (!migrate_use_compression()) {
2217 return false;
2218 }
2219
2220 /*
2221 * If xbzrle is on, stop using the data compression after first
2222 * round of migration even if compression is enabled. In theory,
2223 * xbzrle can do better than compression.
2224 */
2225 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
2226 return true;
2227 }
2228
2229 return false;
2230}
2231
5e5fdcff
XG
2232/*
2233 * try to compress the page before posting it out, return true if the page
2234 * has been properly handled by compression, otherwise needs other
2235 * paths to handle it
2236 */
2237static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
2238{
2239 if (!save_page_use_compression(rs)) {
2240 return false;
2241 }
2242
2243 /*
2244 * When starting the process of a new block, the first page of
2245 * the block should be sent out before other pages in the same
2246 * block, and all the pages in last block should have been sent
2247 * out, keeping this order is important, because the 'cont' flag
2248 * is used to avoid resending the block name.
2249 *
2250 * We post the fist page as normal page as compression will take
2251 * much CPU resource.
2252 */
2253 if (block != rs->last_sent_block) {
2254 flush_compressed_data(rs);
2255 return false;
2256 }
2257
2258 if (compress_page_with_multi_thread(rs, block, offset) > 0) {
2259 return true;
2260 }
2261
2262 return false;
2263}
2264
a82d593b 2265/**
3d0684b2 2266 * ram_save_target_page: save one target page
a82d593b 2267 *
3d0684b2 2268 * Returns the number of pages written
a82d593b 2269 *
6f37bb8b 2270 * @rs: current RAM state
3d0684b2 2271 * @pss: data about the page we want to send
a82d593b 2272 * @last_stage: if we are at the completion stage
a82d593b 2273 */
a0a8aa14 2274static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
f20e2865 2275 bool last_stage)
a82d593b 2276{
a8ec91f9
XG
2277 RAMBlock *block = pss->block;
2278 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
2279 int res;
2280
2281 if (control_save_page(rs, block, offset, &res)) {
2282 return res;
2283 }
2284
5e5fdcff
XG
2285 if (save_compress_page(rs, block, offset)) {
2286 return 1;
d7400a34
XG
2287 }
2288
2289 res = save_zero_page(rs, block, offset);
2290 if (res > 0) {
2291 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
2292 * page would be stale
2293 */
2294 if (!save_page_use_compression(rs)) {
2295 XBZRLE_cache_lock();
2296 xbzrle_cache_zero_page(rs, block->offset + offset);
2297 XBZRLE_cache_unlock();
2298 }
2299 ram_release_pages(block->idstr, offset, res);
2300 return res;
2301 }
2302
da3f56cb 2303 /*
5e5fdcff
XG
2304 * do not use multifd for compression as the first page in the new
2305 * block should be posted out before sending the compressed page
da3f56cb 2306 */
5e5fdcff 2307 if (!save_page_use_compression(rs) && migrate_use_multifd()) {
b9ee2f7d 2308 return ram_save_multifd_page(rs, block, offset);
a82d593b
DDAG
2309 }
2310
1faa5665 2311 return ram_save_page(rs, pss, last_stage);
a82d593b
DDAG
2312}
2313
2314/**
3d0684b2 2315 * ram_save_host_page: save a whole host page
a82d593b 2316 *
3d0684b2
JQ
2317 * Starting at *offset send pages up to the end of the current host
2318 * page. It's valid for the initial offset to point into the middle of
2319 * a host page in which case the remainder of the hostpage is sent.
2320 * Only dirty target pages are sent. Note that the host page size may
2321 * be a huge page for this block.
1eb3fc0a
DDAG
2322 * The saving stops at the boundary of the used_length of the block
2323 * if the RAMBlock isn't a multiple of the host page size.
a82d593b 2324 *
3d0684b2
JQ
2325 * Returns the number of pages written or negative on error
2326 *
6f37bb8b 2327 * @rs: current RAM state
3d0684b2 2328 * @ms: current migration state
3d0684b2 2329 * @pss: data about the page we want to send
a82d593b 2330 * @last_stage: if we are at the completion stage
a82d593b 2331 */
a0a8aa14 2332static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
f20e2865 2333 bool last_stage)
a82d593b
DDAG
2334{
2335 int tmppages, pages = 0;
a935e30f
JQ
2336 size_t pagesize_bits =
2337 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
4c011c37 2338
b895de50
CLG
2339 if (!qemu_ram_is_migratable(pss->block)) {
2340 error_report("block %s should not be migrated !", pss->block->idstr);
2341 return 0;
2342 }
2343
a82d593b 2344 do {
1faa5665
XG
2345 /* Check the pages is dirty and if it is send it */
2346 if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
2347 pss->page++;
2348 continue;
2349 }
2350
f20e2865 2351 tmppages = ram_save_target_page(rs, pss, last_stage);
a82d593b
DDAG
2352 if (tmppages < 0) {
2353 return tmppages;
2354 }
2355
2356 pages += tmppages;
1faa5665
XG
2357 if (pss->block->unsentmap) {
2358 clear_bit(pss->page, pss->block->unsentmap);
2359 }
2360
a935e30f 2361 pss->page++;
1eb3fc0a
DDAG
2362 } while ((pss->page & (pagesize_bits - 1)) &&
2363 offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));
a82d593b
DDAG
2364
2365 /* The offset we leave with is the last one we looked at */
a935e30f 2366 pss->page--;
a82d593b
DDAG
2367 return pages;
2368}
6c595cde 2369
56e93d26 2370/**
3d0684b2 2371 * ram_find_and_save_block: finds a dirty page and sends it to f
56e93d26
JQ
2372 *
2373 * Called within an RCU critical section.
2374 *
e8f3735f
XG
2375 * Returns the number of pages written where zero means no dirty pages,
2376 * or negative on error
56e93d26 2377 *
6f37bb8b 2378 * @rs: current RAM state
56e93d26 2379 * @last_stage: if we are at the completion stage
a82d593b
DDAG
2380 *
2381 * On systems where host-page-size > target-page-size it will send all the
2382 * pages in a host page that are dirty.
56e93d26
JQ
2383 */
2384
ce25d337 2385static int ram_find_and_save_block(RAMState *rs, bool last_stage)
56e93d26 2386{
b8fb8cb7 2387 PageSearchStatus pss;
56e93d26 2388 int pages = 0;
b9e60928 2389 bool again, found;
56e93d26 2390
0827b9e9
AA
2391 /* No dirty page as there is zero RAM */
2392 if (!ram_bytes_total()) {
2393 return pages;
2394 }
2395
6f37bb8b 2396 pss.block = rs->last_seen_block;
a935e30f 2397 pss.page = rs->last_page;
b8fb8cb7
DDAG
2398 pss.complete_round = false;
2399
2400 if (!pss.block) {
2401 pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
2402 }
56e93d26 2403
b9e60928 2404 do {
a82d593b 2405 again = true;
f20e2865 2406 found = get_queued_page(rs, &pss);
b9e60928 2407
a82d593b
DDAG
2408 if (!found) {
2409 /* priority queue empty, so just search for something dirty */
f20e2865 2410 found = find_dirty_block(rs, &pss, &again);
a82d593b 2411 }
f3f491fc 2412
a82d593b 2413 if (found) {
f20e2865 2414 pages = ram_save_host_page(rs, &pss, last_stage);
56e93d26 2415 }
b9e60928 2416 } while (!pages && again);
56e93d26 2417
6f37bb8b 2418 rs->last_seen_block = pss.block;
a935e30f 2419 rs->last_page = pss.page;
56e93d26
JQ
2420
2421 return pages;
2422}
2423
2424void acct_update_position(QEMUFile *f, size_t size, bool zero)
2425{
2426 uint64_t pages = size / TARGET_PAGE_SIZE;
f7ccd61b 2427
56e93d26 2428 if (zero) {
9360447d 2429 ram_counters.duplicate += pages;
56e93d26 2430 } else {
9360447d
JQ
2431 ram_counters.normal += pages;
2432 ram_counters.transferred += size;
56e93d26
JQ
2433 qemu_update_position(f, size);
2434 }
2435}
2436
56e93d26
JQ
2437uint64_t ram_bytes_total(void)
2438{
2439 RAMBlock *block;
2440 uint64_t total = 0;
2441
2442 rcu_read_lock();
b895de50 2443 RAMBLOCK_FOREACH_MIGRATABLE(block) {
56e93d26 2444 total += block->used_length;
99e15582 2445 }
56e93d26
JQ
2446 rcu_read_unlock();
2447 return total;
2448}
2449
f265e0e4 2450static void xbzrle_load_setup(void)
56e93d26 2451{
f265e0e4 2452 XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
56e93d26
JQ
2453}
2454
f265e0e4
JQ
2455static void xbzrle_load_cleanup(void)
2456{
2457 g_free(XBZRLE.decoded_buf);
2458 XBZRLE.decoded_buf = NULL;
2459}
2460
7d7c96be
PX
2461static void ram_state_cleanup(RAMState **rsp)
2462{
b9ccaf6d
DDAG
2463 if (*rsp) {
2464 migration_page_queue_free(*rsp);
2465 qemu_mutex_destroy(&(*rsp)->bitmap_mutex);
2466 qemu_mutex_destroy(&(*rsp)->src_page_req_mutex);
2467 g_free(*rsp);
2468 *rsp = NULL;
2469 }
7d7c96be
PX
2470}
2471
84593a08
PX
2472static void xbzrle_cleanup(void)
2473{
2474 XBZRLE_cache_lock();
2475 if (XBZRLE.cache) {
2476 cache_fini(XBZRLE.cache);
2477 g_free(XBZRLE.encoded_buf);
2478 g_free(XBZRLE.current_buf);
2479 g_free(XBZRLE.zero_target_page);
2480 XBZRLE.cache = NULL;
2481 XBZRLE.encoded_buf = NULL;
2482 XBZRLE.current_buf = NULL;
2483 XBZRLE.zero_target_page = NULL;
2484 }
2485 XBZRLE_cache_unlock();
2486}
2487
f265e0e4 2488static void ram_save_cleanup(void *opaque)
56e93d26 2489{
53518d94 2490 RAMState **rsp = opaque;
6b6712ef 2491 RAMBlock *block;
eb859c53 2492
2ff64038
LZ
2493 /* caller have hold iothread lock or is in a bh, so there is
2494 * no writing race against this migration_bitmap
2495 */
6b6712ef
JQ
2496 memory_global_dirty_log_stop();
2497
b895de50 2498 RAMBLOCK_FOREACH_MIGRATABLE(block) {
6b6712ef
JQ
2499 g_free(block->bmap);
2500 block->bmap = NULL;
2501 g_free(block->unsentmap);
2502 block->unsentmap = NULL;
56e93d26
JQ
2503 }
2504
84593a08 2505 xbzrle_cleanup();
f0afa331 2506 compress_threads_save_cleanup();
7d7c96be 2507 ram_state_cleanup(rsp);
56e93d26
JQ
2508}
2509
6f37bb8b 2510static void ram_state_reset(RAMState *rs)
56e93d26 2511{
6f37bb8b
JQ
2512 rs->last_seen_block = NULL;
2513 rs->last_sent_block = NULL;
269ace29 2514 rs->last_page = 0;
6f37bb8b
JQ
2515 rs->last_version = ram_list.version;
2516 rs->ram_bulk_stage = true;
56e93d26
JQ
2517}
2518
2519#define MAX_WAIT 50 /* ms, half buffered_file limit */
2520
4f2e4252
DDAG
2521/*
2522 * 'expected' is the value you expect the bitmap mostly to be full
2523 * of; it won't bother printing lines that are all this value.
2524 * If 'todump' is null the migration bitmap is dumped.
2525 */
6b6712ef
JQ
2526void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
2527 unsigned long pages)
4f2e4252 2528{
4f2e4252
DDAG
2529 int64_t cur;
2530 int64_t linelen = 128;
2531 char linebuf[129];
2532
6b6712ef 2533 for (cur = 0; cur < pages; cur += linelen) {
4f2e4252
DDAG
2534 int64_t curb;
2535 bool found = false;
2536 /*
2537 * Last line; catch the case where the line length
2538 * is longer than remaining ram
2539 */
6b6712ef
JQ
2540 if (cur + linelen > pages) {
2541 linelen = pages - cur;
4f2e4252
DDAG
2542 }
2543 for (curb = 0; curb < linelen; curb++) {
2544 bool thisbit = test_bit(cur + curb, todump);
2545 linebuf[curb] = thisbit ? '1' : '.';
2546 found = found || (thisbit != expected);
2547 }
2548 if (found) {
2549 linebuf[curb] = '\0';
2550 fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf);
2551 }
2552 }
2553}
2554
e0b266f0
DDAG
2555/* **** functions for postcopy ***** */
2556
ced1c616
PB
2557void ram_postcopy_migrated_memory_release(MigrationState *ms)
2558{
2559 struct RAMBlock *block;
ced1c616 2560
b895de50 2561 RAMBLOCK_FOREACH_MIGRATABLE(block) {
6b6712ef
JQ
2562 unsigned long *bitmap = block->bmap;
2563 unsigned long range = block->used_length >> TARGET_PAGE_BITS;
2564 unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
ced1c616
PB
2565
2566 while (run_start < range) {
2567 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
aaa2064c 2568 ram_discard_range(block->idstr, run_start << TARGET_PAGE_BITS,
ced1c616
PB
2569 (run_end - run_start) << TARGET_PAGE_BITS);
2570 run_start = find_next_zero_bit(bitmap, range, run_end + 1);
2571 }
2572 }
2573}
2574
3d0684b2
JQ
2575/**
2576 * postcopy_send_discard_bm_ram: discard a RAMBlock
2577 *
2578 * Returns zero on success
2579 *
e0b266f0
DDAG
2580 * Callback from postcopy_each_ram_send_discard for each RAMBlock
2581 * Note: At this point the 'unsentmap' is the processed bitmap combined
2582 * with the dirtymap; so a '1' means it's either dirty or unsent.
3d0684b2
JQ
2583 *
2584 * @ms: current migration state
2585 * @pds: state for postcopy
2586 * @start: RAMBlock starting page
2587 * @length: RAMBlock size
e0b266f0
DDAG
2588 */
2589static int postcopy_send_discard_bm_ram(MigrationState *ms,
2590 PostcopyDiscardState *pds,
6b6712ef 2591 RAMBlock *block)
e0b266f0 2592{
6b6712ef 2593 unsigned long end = block->used_length >> TARGET_PAGE_BITS;
e0b266f0 2594 unsigned long current;
6b6712ef 2595 unsigned long *unsentmap = block->unsentmap;
e0b266f0 2596
6b6712ef 2597 for (current = 0; current < end; ) {
e0b266f0
DDAG
2598 unsigned long one = find_next_bit(unsentmap, end, current);
2599
2600 if (one <= end) {
2601 unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1);
2602 unsigned long discard_length;
2603
2604 if (zero >= end) {
2605 discard_length = end - one;
2606 } else {
2607 discard_length = zero - one;
2608 }
d688c62d
DDAG
2609 if (discard_length) {
2610 postcopy_discard_send_range(ms, pds, one, discard_length);
2611 }
e0b266f0
DDAG
2612 current = one + discard_length;
2613 } else {
2614 current = one;
2615 }
2616 }
2617
2618 return 0;
2619}
2620
3d0684b2
JQ
2621/**
2622 * postcopy_each_ram_send_discard: discard all RAMBlocks
2623 *
2624 * Returns 0 for success or negative for error
2625 *
e0b266f0
DDAG
2626 * Utility for the outgoing postcopy code.
2627 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2628 * passing it bitmap indexes and name.
e0b266f0
DDAG
2629 * (qemu_ram_foreach_block ends up passing unscaled lengths
2630 * which would mean postcopy code would have to deal with target page)
3d0684b2
JQ
2631 *
2632 * @ms: current migration state
e0b266f0
DDAG
2633 */
2634static int postcopy_each_ram_send_discard(MigrationState *ms)
2635{
2636 struct RAMBlock *block;
2637 int ret;
2638
b895de50 2639 RAMBLOCK_FOREACH_MIGRATABLE(block) {
6b6712ef
JQ
2640 PostcopyDiscardState *pds =
2641 postcopy_discard_send_init(ms, block->idstr);
e0b266f0
DDAG
2642
2643 /*
2644 * Postcopy sends chunks of bitmap over the wire, but it
2645 * just needs indexes at this point, avoids it having
2646 * target page specific code.
2647 */
6b6712ef 2648 ret = postcopy_send_discard_bm_ram(ms, pds, block);
e0b266f0
DDAG
2649 postcopy_discard_send_finish(ms, pds);
2650 if (ret) {
2651 return ret;
2652 }
2653 }
2654
2655 return 0;
2656}
2657
3d0684b2
JQ
2658/**
2659 * postcopy_chunk_hostpages_pass: canocalize bitmap in hostpages
2660 *
2661 * Helper for postcopy_chunk_hostpages; it's called twice to
2662 * canonicalize the two bitmaps, that are similar, but one is
2663 * inverted.
99e314eb 2664 *
3d0684b2
JQ
2665 * Postcopy requires that all target pages in a hostpage are dirty or
2666 * clean, not a mix. This function canonicalizes the bitmaps.
99e314eb 2667 *
3d0684b2
JQ
2668 * @ms: current migration state
2669 * @unsent_pass: if true we need to canonicalize partially unsent host pages
2670 * otherwise we need to canonicalize partially dirty host pages
2671 * @block: block that contains the page we want to canonicalize
2672 * @pds: state for postcopy
99e314eb
DDAG
2673 */
2674static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
2675 RAMBlock *block,
2676 PostcopyDiscardState *pds)
2677{
53518d94 2678 RAMState *rs = ram_state;
6b6712ef
JQ
2679 unsigned long *bitmap = block->bmap;
2680 unsigned long *unsentmap = block->unsentmap;
29c59172 2681 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
6b6712ef 2682 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
99e314eb
DDAG
2683 unsigned long run_start;
2684
29c59172
DDAG
2685 if (block->page_size == TARGET_PAGE_SIZE) {
2686 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2687 return;
2688 }
2689
99e314eb
DDAG
2690 if (unsent_pass) {
2691 /* Find a sent page */
6b6712ef 2692 run_start = find_next_zero_bit(unsentmap, pages, 0);
99e314eb
DDAG
2693 } else {
2694 /* Find a dirty page */
6b6712ef 2695 run_start = find_next_bit(bitmap, pages, 0);
99e314eb
DDAG
2696 }
2697
6b6712ef 2698 while (run_start < pages) {
99e314eb
DDAG
2699 bool do_fixup = false;
2700 unsigned long fixup_start_addr;
2701 unsigned long host_offset;
2702
2703 /*
2704 * If the start of this run of pages is in the middle of a host
2705 * page, then we need to fixup this host page.
2706 */
2707 host_offset = run_start % host_ratio;
2708 if (host_offset) {
2709 do_fixup = true;
2710 run_start -= host_offset;
2711 fixup_start_addr = run_start;
2712 /* For the next pass */
2713 run_start = run_start + host_ratio;
2714 } else {
2715 /* Find the end of this run */
2716 unsigned long run_end;
2717 if (unsent_pass) {
6b6712ef 2718 run_end = find_next_bit(unsentmap, pages, run_start + 1);
99e314eb 2719 } else {
6b6712ef 2720 run_end = find_next_zero_bit(bitmap, pages, run_start + 1);
99e314eb
DDAG
2721 }
2722 /*
2723 * If the end isn't at the start of a host page, then the
2724 * run doesn't finish at the end of a host page
2725 * and we need to discard.
2726 */
2727 host_offset = run_end % host_ratio;
2728 if (host_offset) {
2729 do_fixup = true;
2730 fixup_start_addr = run_end - host_offset;
2731 /*
2732 * This host page has gone, the next loop iteration starts
2733 * from after the fixup
2734 */
2735 run_start = fixup_start_addr + host_ratio;
2736 } else {
2737 /*
2738 * No discards on this iteration, next loop starts from
2739 * next sent/dirty page
2740 */
2741 run_start = run_end + 1;
2742 }
2743 }
2744
2745 if (do_fixup) {
2746 unsigned long page;
2747
2748 /* Tell the destination to discard this page */
2749 if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) {
2750 /* For the unsent_pass we:
2751 * discard partially sent pages
2752 * For the !unsent_pass (dirty) we:
2753 * discard partially dirty pages that were sent
2754 * (any partially sent pages were already discarded
2755 * by the previous unsent_pass)
2756 */
2757 postcopy_discard_send_range(ms, pds, fixup_start_addr,
2758 host_ratio);
2759 }
2760
2761 /* Clean up the bitmap */
2762 for (page = fixup_start_addr;
2763 page < fixup_start_addr + host_ratio; page++) {
2764 /* All pages in this host page are now not sent */
2765 set_bit(page, unsentmap);
2766
2767 /*
2768 * Remark them as dirty, updating the count for any pages
2769 * that weren't previously dirty.
2770 */
0d8ec885 2771 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
99e314eb
DDAG
2772 }
2773 }
2774
2775 if (unsent_pass) {
2776 /* Find the next sent page for the next iteration */
6b6712ef 2777 run_start = find_next_zero_bit(unsentmap, pages, run_start);
99e314eb
DDAG
2778 } else {
2779 /* Find the next dirty page for the next iteration */
6b6712ef 2780 run_start = find_next_bit(bitmap, pages, run_start);
99e314eb
DDAG
2781 }
2782 }
2783}
2784
3d0684b2
JQ
2785/**
2786 * postcopy_chuck_hostpages: discrad any partially sent host page
2787 *
99e314eb
DDAG
2788 * Utility for the outgoing postcopy code.
2789 *
2790 * Discard any partially sent host-page size chunks, mark any partially
29c59172
DDAG
2791 * dirty host-page size chunks as all dirty. In this case the host-page
2792 * is the host-page for the particular RAMBlock, i.e. it might be a huge page
99e314eb 2793 *
3d0684b2
JQ
2794 * Returns zero on success
2795 *
2796 * @ms: current migration state
6b6712ef 2797 * @block: block we want to work with
99e314eb 2798 */
6b6712ef 2799static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
99e314eb 2800{
6b6712ef
JQ
2801 PostcopyDiscardState *pds =
2802 postcopy_discard_send_init(ms, block->idstr);
99e314eb 2803
6b6712ef
JQ
2804 /* First pass: Discard all partially sent host pages */
2805 postcopy_chunk_hostpages_pass(ms, true, block, pds);
2806 /*
2807 * Second pass: Ensure that all partially dirty host pages are made
2808 * fully dirty.
2809 */
2810 postcopy_chunk_hostpages_pass(ms, false, block, pds);
99e314eb 2811
6b6712ef 2812 postcopy_discard_send_finish(ms, pds);
99e314eb
DDAG
2813 return 0;
2814}
2815
3d0684b2
JQ
2816/**
2817 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
2818 *
2819 * Returns zero on success
2820 *
e0b266f0
DDAG
2821 * Transmit the set of pages to be discarded after precopy to the target
2822 * these are pages that:
2823 * a) Have been previously transmitted but are now dirty again
2824 * b) Pages that have never been transmitted, this ensures that
2825 * any pages on the destination that have been mapped by background
2826 * tasks get discarded (transparent huge pages is the specific concern)
2827 * Hopefully this is pretty sparse
3d0684b2
JQ
2828 *
2829 * @ms: current migration state
e0b266f0
DDAG
2830 */
2831int ram_postcopy_send_discard_bitmap(MigrationState *ms)
2832{
53518d94 2833 RAMState *rs = ram_state;
6b6712ef 2834 RAMBlock *block;
e0b266f0 2835 int ret;
e0b266f0
DDAG
2836
2837 rcu_read_lock();
2838
2839 /* This should be our last sync, the src is now paused */
eb859c53 2840 migration_bitmap_sync(rs);
e0b266f0 2841
6b6712ef
JQ
2842 /* Easiest way to make sure we don't resume in the middle of a host-page */
2843 rs->last_seen_block = NULL;
2844 rs->last_sent_block = NULL;
2845 rs->last_page = 0;
e0b266f0 2846
b895de50 2847 RAMBLOCK_FOREACH_MIGRATABLE(block) {
6b6712ef
JQ
2848 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
2849 unsigned long *bitmap = block->bmap;
2850 unsigned long *unsentmap = block->unsentmap;
2851
2852 if (!unsentmap) {
2853 /* We don't have a safe way to resize the sentmap, so
2854 * if the bitmap was resized it will be NULL at this
2855 * point.
2856 */
2857 error_report("migration ram resized during precopy phase");
2858 rcu_read_unlock();
2859 return -EINVAL;
2860 }
2861 /* Deal with TPS != HPS and huge pages */
2862 ret = postcopy_chunk_hostpages(ms, block);
2863 if (ret) {
2864 rcu_read_unlock();
2865 return ret;
2866 }
e0b266f0 2867
6b6712ef
JQ
2868 /*
2869 * Update the unsentmap to be unsentmap = unsentmap | dirty
2870 */
2871 bitmap_or(unsentmap, unsentmap, bitmap, pages);
e0b266f0 2872#ifdef DEBUG_POSTCOPY
6b6712ef 2873 ram_debug_dump_bitmap(unsentmap, true, pages);
e0b266f0 2874#endif
6b6712ef
JQ
2875 }
2876 trace_ram_postcopy_send_discard_bitmap();
e0b266f0
DDAG
2877
2878 ret = postcopy_each_ram_send_discard(ms);
2879 rcu_read_unlock();
2880
2881 return ret;
2882}
2883
3d0684b2
JQ
2884/**
2885 * ram_discard_range: discard dirtied pages at the beginning of postcopy
e0b266f0 2886 *
3d0684b2 2887 * Returns zero on success
e0b266f0 2888 *
36449157
JQ
2889 * @rbname: name of the RAMBlock of the request. NULL means the
2890 * same that last one.
3d0684b2
JQ
2891 * @start: RAMBlock starting page
2892 * @length: RAMBlock size
e0b266f0 2893 */
aaa2064c 2894int ram_discard_range(const char *rbname, uint64_t start, size_t length)
e0b266f0
DDAG
2895{
2896 int ret = -1;
2897
36449157 2898 trace_ram_discard_range(rbname, start, length);
d3a5038c 2899
e0b266f0 2900 rcu_read_lock();
36449157 2901 RAMBlock *rb = qemu_ram_block_by_name(rbname);
e0b266f0
DDAG
2902
2903 if (!rb) {
36449157 2904 error_report("ram_discard_range: Failed to find block '%s'", rbname);
e0b266f0
DDAG
2905 goto err;
2906 }
2907
814bb08f
PX
2908 /*
2909 * On source VM, we don't need to update the received bitmap since
2910 * we don't even have one.
2911 */
2912 if (rb->receivedmap) {
2913 bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(),
2914 length >> qemu_target_page_bits());
2915 }
2916
d3a5038c 2917 ret = ram_block_discard_range(rb, start, length);
e0b266f0
DDAG
2918
2919err:
2920 rcu_read_unlock();
2921
2922 return ret;
2923}
2924
84593a08
PX
2925/*
2926 * For every allocation, we will try not to crash the VM if the
2927 * allocation failed.
2928 */
2929static int xbzrle_init(void)
2930{
2931 Error *local_err = NULL;
2932
2933 if (!migrate_use_xbzrle()) {
2934 return 0;
2935 }
2936
2937 XBZRLE_cache_lock();
2938
2939 XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
2940 if (!XBZRLE.zero_target_page) {
2941 error_report("%s: Error allocating zero page", __func__);
2942 goto err_out;
2943 }
2944
2945 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
2946 TARGET_PAGE_SIZE, &local_err);
2947 if (!XBZRLE.cache) {
2948 error_report_err(local_err);
2949 goto free_zero_page;
2950 }
2951
2952 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
2953 if (!XBZRLE.encoded_buf) {
2954 error_report("%s: Error allocating encoded_buf", __func__);
2955 goto free_cache;
2956 }
2957
2958 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
2959 if (!XBZRLE.current_buf) {
2960 error_report("%s: Error allocating current_buf", __func__);
2961 goto free_encoded_buf;
2962 }
2963
2964 /* We are all good */
2965 XBZRLE_cache_unlock();
2966 return 0;
2967
2968free_encoded_buf:
2969 g_free(XBZRLE.encoded_buf);
2970 XBZRLE.encoded_buf = NULL;
2971free_cache:
2972 cache_fini(XBZRLE.cache);
2973 XBZRLE.cache = NULL;
2974free_zero_page:
2975 g_free(XBZRLE.zero_target_page);
2976 XBZRLE.zero_target_page = NULL;
2977err_out:
2978 XBZRLE_cache_unlock();
2979 return -ENOMEM;
2980}
2981
53518d94 2982static int ram_state_init(RAMState **rsp)
56e93d26 2983{
7d00ee6a
PX
2984 *rsp = g_try_new0(RAMState, 1);
2985
2986 if (!*rsp) {
2987 error_report("%s: Init ramstate fail", __func__);
2988 return -1;
2989 }
53518d94
JQ
2990
2991 qemu_mutex_init(&(*rsp)->bitmap_mutex);
2992 qemu_mutex_init(&(*rsp)->src_page_req_mutex);
2993 QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
56e93d26 2994
7d00ee6a
PX
2995 /*
2996 * Count the total number of pages used by ram blocks not including any
2997 * gaps due to alignment or unplugs.
2998 */
2999 (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
3000
3001 ram_state_reset(*rsp);
3002
3003 return 0;
3004}
3005
d6eff5d7 3006static void ram_list_init_bitmaps(void)
7d00ee6a 3007{
d6eff5d7
PX
3008 RAMBlock *block;
3009 unsigned long pages;
56e93d26 3010
0827b9e9
AA
3011 /* Skip setting bitmap if there is no RAM */
3012 if (ram_bytes_total()) {
b895de50 3013 RAMBLOCK_FOREACH_MIGRATABLE(block) {
d6eff5d7 3014 pages = block->max_length >> TARGET_PAGE_BITS;
6b6712ef
JQ
3015 block->bmap = bitmap_new(pages);
3016 bitmap_set(block->bmap, 0, pages);
3017 if (migrate_postcopy_ram()) {
3018 block->unsentmap = bitmap_new(pages);
3019 bitmap_set(block->unsentmap, 0, pages);
3020 }
0827b9e9 3021 }
f3f491fc 3022 }
d6eff5d7
PX
3023}
3024
3025static void ram_init_bitmaps(RAMState *rs)
3026{
3027 /* For memory_global_dirty_log_start below. */
3028 qemu_mutex_lock_iothread();
3029 qemu_mutex_lock_ramlist();
3030 rcu_read_lock();
f3f491fc 3031
d6eff5d7 3032 ram_list_init_bitmaps();
56e93d26 3033 memory_global_dirty_log_start();
d6eff5d7
PX
3034 migration_bitmap_sync(rs);
3035
3036 rcu_read_unlock();
56e93d26 3037 qemu_mutex_unlock_ramlist();
49877834 3038 qemu_mutex_unlock_iothread();
d6eff5d7
PX
3039}
3040
3041static int ram_init_all(RAMState **rsp)
3042{
3043 if (ram_state_init(rsp)) {
3044 return -1;
3045 }
3046
3047 if (xbzrle_init()) {
3048 ram_state_cleanup(rsp);
3049 return -1;
3050 }
3051
3052 ram_init_bitmaps(*rsp);
a91246c9
HZ
3053
3054 return 0;
3055}
3056
08614f34
PX
3057static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
3058{
3059 RAMBlock *block;
3060 uint64_t pages = 0;
3061
3062 /*
3063 * Postcopy is not using xbzrle/compression, so no need for that.
3064 * Also, since source are already halted, we don't need to care
3065 * about dirty page logging as well.
3066 */
3067
ff0769a4 3068 RAMBLOCK_FOREACH_MIGRATABLE(block) {
08614f34
PX
3069 pages += bitmap_count_one(block->bmap,
3070 block->used_length >> TARGET_PAGE_BITS);
3071 }
3072
3073 /* This may not be aligned with current bitmaps. Recalculate. */
3074 rs->migration_dirty_pages = pages;
3075
3076 rs->last_seen_block = NULL;
3077 rs->last_sent_block = NULL;
3078 rs->last_page = 0;
3079 rs->last_version = ram_list.version;
3080 /*
3081 * Disable the bulk stage, otherwise we'll resend the whole RAM no
3082 * matter what we have sent.
3083 */
3084 rs->ram_bulk_stage = false;
3085
3086 /* Update RAMState cache of output QEMUFile */
3087 rs->f = out;
3088
3089 trace_ram_state_resume_prepare(pages);
3090}
3091
3d0684b2
JQ
3092/*
3093 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
a91246c9
HZ
3094 * long-running RCU critical section. When rcu-reclaims in the code
3095 * start to become numerous it will be necessary to reduce the
3096 * granularity of these critical sections.
3097 */
3098
3d0684b2
JQ
3099/**
3100 * ram_save_setup: Setup RAM for migration
3101 *
3102 * Returns zero to indicate success and negative for error
3103 *
3104 * @f: QEMUFile where to send the data
3105 * @opaque: RAMState pointer
3106 */
a91246c9
HZ
3107static int ram_save_setup(QEMUFile *f, void *opaque)
3108{
53518d94 3109 RAMState **rsp = opaque;
a91246c9
HZ
3110 RAMBlock *block;
3111
dcaf446e
XG
3112 if (compress_threads_save_setup()) {
3113 return -1;
3114 }
3115
a91246c9
HZ
3116 /* migration has already setup the bitmap, reuse it. */
3117 if (!migration_in_colo_state()) {
7d00ee6a 3118 if (ram_init_all(rsp) != 0) {
dcaf446e 3119 compress_threads_save_cleanup();
a91246c9 3120 return -1;
53518d94 3121 }
a91246c9 3122 }
53518d94 3123 (*rsp)->f = f;
a91246c9
HZ
3124
3125 rcu_read_lock();
56e93d26
JQ
3126
3127 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
3128
b895de50 3129 RAMBLOCK_FOREACH_MIGRATABLE(block) {
56e93d26
JQ
3130 qemu_put_byte(f, strlen(block->idstr));
3131 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
3132 qemu_put_be64(f, block->used_length);
ef08fb38
DDAG
3133 if (migrate_postcopy_ram() && block->page_size != qemu_host_page_size) {
3134 qemu_put_be64(f, block->page_size);
3135 }
56e93d26
JQ
3136 }
3137
3138 rcu_read_unlock();
3139
3140 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
3141 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
3142
6df264ac 3143 multifd_send_sync_main();
56e93d26 3144 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
35374cbd 3145 qemu_fflush(f);
56e93d26
JQ
3146
3147 return 0;
3148}
3149
3d0684b2
JQ
3150/**
3151 * ram_save_iterate: iterative stage for migration
3152 *
3153 * Returns zero to indicate success and negative for error
3154 *
3155 * @f: QEMUFile where to send the data
3156 * @opaque: RAMState pointer
3157 */
56e93d26
JQ
3158static int ram_save_iterate(QEMUFile *f, void *opaque)
3159{
53518d94
JQ
3160 RAMState **temp = opaque;
3161 RAMState *rs = *temp;
56e93d26
JQ
3162 int ret;
3163 int i;
3164 int64_t t0;
5c90308f 3165 int done = 0;
56e93d26 3166
b2557345
PL
3167 if (blk_mig_bulk_active()) {
3168 /* Avoid transferring ram during bulk phase of block migration as
3169 * the bulk phase will usually take a long time and transferring
3170 * ram updates during that time is pointless. */
3171 goto out;
3172 }
3173
56e93d26 3174 rcu_read_lock();
6f37bb8b
JQ
3175 if (ram_list.version != rs->last_version) {
3176 ram_state_reset(rs);
56e93d26
JQ
3177 }
3178
3179 /* Read version before ram_list.blocks */
3180 smp_rmb();
3181
3182 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
3183
3184 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
3185 i = 0;
e03a34f8
DDAG
3186 while ((ret = qemu_file_rate_limit(f)) == 0 ||
3187 !QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
56e93d26
JQ
3188 int pages;
3189
e03a34f8
DDAG
3190 if (qemu_file_get_error(f)) {
3191 break;
3192 }
3193
ce25d337 3194 pages = ram_find_and_save_block(rs, false);
56e93d26
JQ
3195 /* no more pages to sent */
3196 if (pages == 0) {
5c90308f 3197 done = 1;
56e93d26
JQ
3198 break;
3199 }
e8f3735f
XG
3200
3201 if (pages < 0) {
3202 qemu_file_set_error(f, pages);
3203 break;
3204 }
3205
be8b02ed 3206 rs->target_page_count += pages;
070afca2 3207
56e93d26
JQ
3208 /* we want to check in the 1st loop, just in case it was the 1st time
3209 and we had to sync the dirty bitmap.
3210 qemu_get_clock_ns() is a bit expensive, so we only check each some
3211 iterations
3212 */
3213 if ((i & 63) == 0) {
3214 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
3215 if (t1 > MAX_WAIT) {
55c4446b 3216 trace_ram_save_iterate_big_wait(t1, i);
56e93d26
JQ
3217 break;
3218 }
3219 }
3220 i++;
3221 }
ce25d337 3222 flush_compressed_data(rs);
56e93d26
JQ
3223 rcu_read_unlock();
3224
3225 /*
3226 * Must occur before EOS (or any QEMUFile operation)
3227 * because of RDMA protocol.
3228 */
3229 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
3230
6df264ac 3231 multifd_send_sync_main();
b2557345 3232out:
56e93d26 3233 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
35374cbd 3234 qemu_fflush(f);
9360447d 3235 ram_counters.transferred += 8;
56e93d26
JQ
3236
3237 ret = qemu_file_get_error(f);
3238 if (ret < 0) {
3239 return ret;
3240 }
3241
5c90308f 3242 return done;
56e93d26
JQ
3243}
3244
3d0684b2
JQ
3245/**
3246 * ram_save_complete: function called to send the remaining amount of ram
3247 *
e8f3735f 3248 * Returns zero to indicate success or negative on error
3d0684b2
JQ
3249 *
3250 * Called with iothread lock
3251 *
3252 * @f: QEMUFile where to send the data
3253 * @opaque: RAMState pointer
3254 */
56e93d26
JQ
3255static int ram_save_complete(QEMUFile *f, void *opaque)
3256{
53518d94
JQ
3257 RAMState **temp = opaque;
3258 RAMState *rs = *temp;
e8f3735f 3259 int ret = 0;
6f37bb8b 3260
56e93d26
JQ
3261 rcu_read_lock();
3262
5727309d 3263 if (!migration_in_postcopy()) {
8d820d6f 3264 migration_bitmap_sync(rs);
663e6c1d 3265 }
56e93d26
JQ
3266
3267 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
3268
3269 /* try transferring iterative blocks of memory */
3270
3271 /* flush all remaining blocks regardless of rate limiting */
3272 while (true) {
3273 int pages;
3274
ce25d337 3275 pages = ram_find_and_save_block(rs, !migration_in_colo_state());
56e93d26
JQ
3276 /* no more blocks to sent */
3277 if (pages == 0) {
3278 break;
3279 }
e8f3735f
XG
3280 if (pages < 0) {
3281 ret = pages;
3282 break;
3283 }
56e93d26
JQ
3284 }
3285
ce25d337 3286 flush_compressed_data(rs);
56e93d26 3287 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
56e93d26
JQ
3288
3289 rcu_read_unlock();
d09a6fde 3290
6df264ac 3291 multifd_send_sync_main();
56e93d26 3292 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
35374cbd 3293 qemu_fflush(f);
56e93d26 3294
e8f3735f 3295 return ret;
56e93d26
JQ
3296}
3297
c31b098f 3298static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
47995026
VSO
3299 uint64_t *res_precopy_only,
3300 uint64_t *res_compatible,
3301 uint64_t *res_postcopy_only)
56e93d26 3302{
53518d94
JQ
3303 RAMState **temp = opaque;
3304 RAMState *rs = *temp;
56e93d26
JQ
3305 uint64_t remaining_size;
3306
9edabd4d 3307 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
56e93d26 3308
5727309d 3309 if (!migration_in_postcopy() &&
663e6c1d 3310 remaining_size < max_size) {
56e93d26
JQ
3311 qemu_mutex_lock_iothread();
3312 rcu_read_lock();
8d820d6f 3313 migration_bitmap_sync(rs);
56e93d26
JQ
3314 rcu_read_unlock();
3315 qemu_mutex_unlock_iothread();
9edabd4d 3316 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
56e93d26 3317 }
c31b098f 3318
86e1167e
VSO
3319 if (migrate_postcopy_ram()) {
3320 /* We can do postcopy, and all the data is postcopiable */
47995026 3321 *res_compatible += remaining_size;
86e1167e 3322 } else {
47995026 3323 *res_precopy_only += remaining_size;
86e1167e 3324 }
56e93d26
JQ
3325}
3326
3327static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
3328{
3329 unsigned int xh_len;
3330 int xh_flags;
063e760a 3331 uint8_t *loaded_data;
56e93d26 3332
56e93d26
JQ
3333 /* extract RLE header */
3334 xh_flags = qemu_get_byte(f);
3335 xh_len = qemu_get_be16(f);
3336
3337 if (xh_flags != ENCODING_FLAG_XBZRLE) {
3338 error_report("Failed to load XBZRLE page - wrong compression!");
3339 return -1;
3340 }
3341
3342 if (xh_len > TARGET_PAGE_SIZE) {
3343 error_report("Failed to load XBZRLE page - len overflow!");
3344 return -1;
3345 }
f265e0e4 3346 loaded_data = XBZRLE.decoded_buf;
56e93d26 3347 /* load data and decode */
f265e0e4 3348 /* it can change loaded_data to point to an internal buffer */
063e760a 3349 qemu_get_buffer_in_place(f, &loaded_data, xh_len);
56e93d26
JQ
3350
3351 /* decode RLE */
063e760a 3352 if (xbzrle_decode_buffer(loaded_data, xh_len, host,
56e93d26
JQ
3353 TARGET_PAGE_SIZE) == -1) {
3354 error_report("Failed to load XBZRLE page - decode error!");
3355 return -1;
3356 }
3357
3358 return 0;
3359}
3360
3d0684b2
JQ
3361/**
3362 * ram_block_from_stream: read a RAMBlock id from the migration stream
3363 *
3364 * Must be called from within a rcu critical section.
3365 *
56e93d26 3366 * Returns a pointer from within the RCU-protected ram_list.
a7180877 3367 *
3d0684b2
JQ
3368 * @f: QEMUFile where to read the data from
3369 * @flags: Page flags (mostly to see if it's a continuation of previous block)
a7180877 3370 */
3d0684b2 3371static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
56e93d26
JQ
3372{
3373 static RAMBlock *block = NULL;
3374 char id[256];
3375 uint8_t len;
3376
3377 if (flags & RAM_SAVE_FLAG_CONTINUE) {
4c4bad48 3378 if (!block) {
56e93d26
JQ
3379 error_report("Ack, bad migration stream!");
3380 return NULL;
3381 }
4c4bad48 3382 return block;
56e93d26
JQ
3383 }
3384
3385 len = qemu_get_byte(f);
3386 qemu_get_buffer(f, (uint8_t *)id, len);
3387 id[len] = 0;
3388
e3dd7493 3389 block = qemu_ram_block_by_name(id);
4c4bad48
HZ
3390 if (!block) {
3391 error_report("Can't find block %s", id);
3392 return NULL;
56e93d26
JQ
3393 }
3394
b895de50
CLG
3395 if (!qemu_ram_is_migratable(block)) {
3396 error_report("block %s should not be migrated !", id);
3397 return NULL;
3398 }
3399
4c4bad48
HZ
3400 return block;
3401}
3402
3403static inline void *host_from_ram_block_offset(RAMBlock *block,
3404 ram_addr_t offset)
3405{
3406 if (!offset_in_ramblock(block, offset)) {
3407 return NULL;
3408 }
3409
3410 return block->host + offset;
56e93d26
JQ
3411}
3412
3d0684b2
JQ
3413/**
3414 * ram_handle_compressed: handle the zero page case
3415 *
56e93d26
JQ
3416 * If a page (or a whole RDMA chunk) has been
3417 * determined to be zero, then zap it.
3d0684b2
JQ
3418 *
3419 * @host: host address for the zero page
3420 * @ch: what the page is filled from. We only support zero
3421 * @size: size of the zero page
56e93d26
JQ
3422 */
3423void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
3424{
3425 if (ch != 0 || !is_zero_range(host, size)) {
3426 memset(host, ch, size);
3427 }
3428}
3429
797ca154
XG
3430/* return the size after decompression, or negative value on error */
3431static int
3432qemu_uncompress_data(z_stream *stream, uint8_t *dest, size_t dest_len,
3433 const uint8_t *source, size_t source_len)
3434{
3435 int err;
3436
3437 err = inflateReset(stream);
3438 if (err != Z_OK) {
3439 return -1;
3440 }
3441
3442 stream->avail_in = source_len;
3443 stream->next_in = (uint8_t *)source;
3444 stream->avail_out = dest_len;
3445 stream->next_out = dest;
3446
3447 err = inflate(stream, Z_NO_FLUSH);
3448 if (err != Z_STREAM_END) {
3449 return -1;
3450 }
3451
3452 return stream->total_out;
3453}
3454
56e93d26
JQ
3455static void *do_data_decompress(void *opaque)
3456{
3457 DecompressParam *param = opaque;
3458 unsigned long pagesize;
33d151f4 3459 uint8_t *des;
34ab9e97 3460 int len, ret;
56e93d26 3461
33d151f4 3462 qemu_mutex_lock(&param->mutex);
90e56fb4 3463 while (!param->quit) {
33d151f4
LL
3464 if (param->des) {
3465 des = param->des;
3466 len = param->len;
3467 param->des = 0;
3468 qemu_mutex_unlock(&param->mutex);
3469
56e93d26 3470 pagesize = TARGET_PAGE_SIZE;
34ab9e97
XG
3471
3472 ret = qemu_uncompress_data(&param->stream, des, pagesize,
3473 param->compbuf, len);
f548222c 3474 if (ret < 0 && migrate_get_current()->decompress_error_check) {
34ab9e97
XG
3475 error_report("decompress data failed");
3476 qemu_file_set_error(decomp_file, ret);
3477 }
73a8912b 3478
33d151f4
LL
3479 qemu_mutex_lock(&decomp_done_lock);
3480 param->done = true;
3481 qemu_cond_signal(&decomp_done_cond);
3482 qemu_mutex_unlock(&decomp_done_lock);
3483
3484 qemu_mutex_lock(&param->mutex);
3485 } else {
3486 qemu_cond_wait(&param->cond, &param->mutex);
3487 }
56e93d26 3488 }
33d151f4 3489 qemu_mutex_unlock(&param->mutex);
56e93d26
JQ
3490
3491 return NULL;
3492}
3493
34ab9e97 3494static int wait_for_decompress_done(void)
5533b2e9
LL
3495{
3496 int idx, thread_count;
3497
3498 if (!migrate_use_compression()) {
34ab9e97 3499 return 0;
5533b2e9
LL
3500 }
3501
3502 thread_count = migrate_decompress_threads();
3503 qemu_mutex_lock(&decomp_done_lock);
3504 for (idx = 0; idx < thread_count; idx++) {
3505 while (!decomp_param[idx].done) {
3506 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
3507 }
3508 }
3509 qemu_mutex_unlock(&decomp_done_lock);
34ab9e97 3510 return qemu_file_get_error(decomp_file);
5533b2e9
LL
3511}
3512
f0afa331 3513static void compress_threads_load_cleanup(void)
56e93d26
JQ
3514{
3515 int i, thread_count;
3516
3416ab5b
JQ
3517 if (!migrate_use_compression()) {
3518 return;
3519 }
56e93d26
JQ
3520 thread_count = migrate_decompress_threads();
3521 for (i = 0; i < thread_count; i++) {
797ca154
XG
3522 /*
3523 * we use it as a indicator which shows if the thread is
3524 * properly init'd or not
3525 */
3526 if (!decomp_param[i].compbuf) {
3527 break;
3528 }
3529
56e93d26 3530 qemu_mutex_lock(&decomp_param[i].mutex);
90e56fb4 3531 decomp_param[i].quit = true;
56e93d26
JQ
3532 qemu_cond_signal(&decomp_param[i].cond);
3533 qemu_mutex_unlock(&decomp_param[i].mutex);
3534 }
3535 for (i = 0; i < thread_count; i++) {
797ca154
XG
3536 if (!decomp_param[i].compbuf) {
3537 break;
3538 }
3539
56e93d26
JQ
3540 qemu_thread_join(decompress_threads + i);
3541 qemu_mutex_destroy(&decomp_param[i].mutex);
3542 qemu_cond_destroy(&decomp_param[i].cond);
797ca154 3543 inflateEnd(&decomp_param[i].stream);
56e93d26 3544 g_free(decomp_param[i].compbuf);
797ca154 3545 decomp_param[i].compbuf = NULL;
56e93d26
JQ
3546 }
3547 g_free(decompress_threads);
3548 g_free(decomp_param);
56e93d26
JQ
3549 decompress_threads = NULL;
3550 decomp_param = NULL;
34ab9e97 3551 decomp_file = NULL;
56e93d26
JQ
3552}
3553
34ab9e97 3554static int compress_threads_load_setup(QEMUFile *f)
797ca154
XG
3555{
3556 int i, thread_count;
3557
3558 if (!migrate_use_compression()) {
3559 return 0;
3560 }
3561
3562 thread_count = migrate_decompress_threads();
3563 decompress_threads = g_new0(QemuThread, thread_count);
3564 decomp_param = g_new0(DecompressParam, thread_count);
3565 qemu_mutex_init(&decomp_done_lock);
3566 qemu_cond_init(&decomp_done_cond);
34ab9e97 3567 decomp_file = f;
797ca154
XG
3568 for (i = 0; i < thread_count; i++) {
3569 if (inflateInit(&decomp_param[i].stream) != Z_OK) {
3570 goto exit;
3571 }
3572
3573 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
3574 qemu_mutex_init(&decomp_param[i].mutex);
3575 qemu_cond_init(&decomp_param[i].cond);
3576 decomp_param[i].done = true;
3577 decomp_param[i].quit = false;
3578 qemu_thread_create(decompress_threads + i, "decompress",
3579 do_data_decompress, decomp_param + i,
3580 QEMU_THREAD_JOINABLE);
3581 }
3582 return 0;
3583exit:
3584 compress_threads_load_cleanup();
3585 return -1;
3586}
3587
c1bc6626 3588static void decompress_data_with_multi_threads(QEMUFile *f,
56e93d26
JQ
3589 void *host, int len)
3590{
3591 int idx, thread_count;
3592
3593 thread_count = migrate_decompress_threads();
73a8912b 3594 qemu_mutex_lock(&decomp_done_lock);
56e93d26
JQ
3595 while (true) {
3596 for (idx = 0; idx < thread_count; idx++) {
73a8912b 3597 if (decomp_param[idx].done) {
33d151f4
LL
3598 decomp_param[idx].done = false;
3599 qemu_mutex_lock(&decomp_param[idx].mutex);
c1bc6626 3600 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
56e93d26
JQ
3601 decomp_param[idx].des = host;
3602 decomp_param[idx].len = len;
33d151f4
LL
3603 qemu_cond_signal(&decomp_param[idx].cond);
3604 qemu_mutex_unlock(&decomp_param[idx].mutex);
56e93d26
JQ
3605 break;
3606 }
3607 }
3608 if (idx < thread_count) {
3609 break;
73a8912b
LL
3610 } else {
3611 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
56e93d26
JQ
3612 }
3613 }
73a8912b 3614 qemu_mutex_unlock(&decomp_done_lock);
56e93d26
JQ
3615}
3616
f265e0e4
JQ
3617/**
3618 * ram_load_setup: Setup RAM for migration incoming side
3619 *
3620 * Returns zero to indicate success and negative for error
3621 *
3622 * @f: QEMUFile where to receive the data
3623 * @opaque: RAMState pointer
3624 */
3625static int ram_load_setup(QEMUFile *f, void *opaque)
3626{
34ab9e97 3627 if (compress_threads_load_setup(f)) {
797ca154
XG
3628 return -1;
3629 }
3630
f265e0e4 3631 xbzrle_load_setup();
f9494614 3632 ramblock_recv_map_init();
f265e0e4
JQ
3633 return 0;
3634}
3635
3636static int ram_load_cleanup(void *opaque)
3637{
f9494614 3638 RAMBlock *rb;
56eb90af
JH
3639
3640 RAMBLOCK_FOREACH_MIGRATABLE(rb) {
3641 if (ramblock_is_pmem(rb)) {
3642 pmem_persist(rb->host, rb->used_length);
3643 }
3644 }
3645
f265e0e4 3646 xbzrle_load_cleanup();
f0afa331 3647 compress_threads_load_cleanup();
f9494614 3648
b895de50 3649 RAMBLOCK_FOREACH_MIGRATABLE(rb) {
f9494614
AP
3650 g_free(rb->receivedmap);
3651 rb->receivedmap = NULL;
3652 }
f265e0e4
JQ
3653 return 0;
3654}
3655
3d0684b2
JQ
3656/**
3657 * ram_postcopy_incoming_init: allocate postcopy data structures
3658 *
3659 * Returns 0 for success and negative if there was one error
3660 *
3661 * @mis: current migration incoming state
3662 *
3663 * Allocate data structures etc needed by incoming migration with
3664 * postcopy-ram. postcopy-ram's similarly names
3665 * postcopy_ram_incoming_init does the work.
1caddf8a
DDAG
3666 */
3667int ram_postcopy_incoming_init(MigrationIncomingState *mis)
3668{
c136180c 3669 return postcopy_ram_incoming_init(mis);
1caddf8a
DDAG
3670}
3671
3d0684b2
JQ
3672/**
3673 * ram_load_postcopy: load a page in postcopy case
3674 *
3675 * Returns 0 for success or -errno in case of error
3676 *
a7180877
DDAG
3677 * Called in postcopy mode by ram_load().
3678 * rcu_read_lock is taken prior to this being called.
3d0684b2
JQ
3679 *
3680 * @f: QEMUFile where to send the data
a7180877
DDAG
3681 */
3682static int ram_load_postcopy(QEMUFile *f)
3683{
3684 int flags = 0, ret = 0;
3685 bool place_needed = false;
1aa83678 3686 bool matches_target_page_size = false;
a7180877
DDAG
3687 MigrationIncomingState *mis = migration_incoming_get_current();
3688 /* Temporary page that is later 'placed' */
3689 void *postcopy_host_page = postcopy_get_tmp_page(mis);
c53b7ddc 3690 void *last_host = NULL;
a3b6ff6d 3691 bool all_zero = false;
a7180877
DDAG
3692
3693 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
3694 ram_addr_t addr;
3695 void *host = NULL;
3696 void *page_buffer = NULL;
3697 void *place_source = NULL;
df9ff5e1 3698 RAMBlock *block = NULL;
a7180877 3699 uint8_t ch;
a7180877
DDAG
3700
3701 addr = qemu_get_be64(f);
7a9ddfbf
PX
3702
3703 /*
3704 * If qemu file error, we should stop here, and then "addr"
3705 * may be invalid
3706 */
3707 ret = qemu_file_get_error(f);
3708 if (ret) {
3709 break;
3710 }
3711
a7180877
DDAG
3712 flags = addr & ~TARGET_PAGE_MASK;
3713 addr &= TARGET_PAGE_MASK;
3714
3715 trace_ram_load_postcopy_loop((uint64_t)addr, flags);
3716 place_needed = false;
bb890ed5 3717 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) {
df9ff5e1 3718 block = ram_block_from_stream(f, flags);
4c4bad48
HZ
3719
3720 host = host_from_ram_block_offset(block, addr);
a7180877
DDAG
3721 if (!host) {
3722 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
3723 ret = -EINVAL;
3724 break;
3725 }
1aa83678 3726 matches_target_page_size = block->page_size == TARGET_PAGE_SIZE;
a7180877 3727 /*
28abd200
DDAG
3728 * Postcopy requires that we place whole host pages atomically;
3729 * these may be huge pages for RAMBlocks that are backed by
3730 * hugetlbfs.
a7180877
DDAG
3731 * To make it atomic, the data is read into a temporary page
3732 * that's moved into place later.
3733 * The migration protocol uses, possibly smaller, target-pages
3734 * however the source ensures it always sends all the components
3735 * of a host page in order.
3736 */
3737 page_buffer = postcopy_host_page +
28abd200 3738 ((uintptr_t)host & (block->page_size - 1));
a7180877 3739 /* If all TP are zero then we can optimise the place */
28abd200 3740 if (!((uintptr_t)host & (block->page_size - 1))) {
a7180877 3741 all_zero = true;
c53b7ddc
DDAG
3742 } else {
3743 /* not the 1st TP within the HP */
3744 if (host != (last_host + TARGET_PAGE_SIZE)) {
9af9e0fe 3745 error_report("Non-sequential target page %p/%p",
c53b7ddc
DDAG
3746 host, last_host);
3747 ret = -EINVAL;
3748 break;
3749 }
a7180877
DDAG
3750 }
3751
c53b7ddc 3752
a7180877
DDAG
3753 /*
3754 * If it's the last part of a host page then we place the host
3755 * page
3756 */
3757 place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
28abd200 3758 (block->page_size - 1)) == 0;
a7180877
DDAG
3759 place_source = postcopy_host_page;
3760 }
c53b7ddc 3761 last_host = host;
a7180877
DDAG
3762
3763 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
bb890ed5 3764 case RAM_SAVE_FLAG_ZERO:
a7180877
DDAG
3765 ch = qemu_get_byte(f);
3766 memset(page_buffer, ch, TARGET_PAGE_SIZE);
3767 if (ch) {
3768 all_zero = false;
3769 }
3770 break;
3771
3772 case RAM_SAVE_FLAG_PAGE:
3773 all_zero = false;
1aa83678
PX
3774 if (!matches_target_page_size) {
3775 /* For huge pages, we always use temporary buffer */
a7180877
DDAG
3776 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
3777 } else {
1aa83678
PX
3778 /*
3779 * For small pages that matches target page size, we
3780 * avoid the qemu_file copy. Instead we directly use
3781 * the buffer of QEMUFile to place the page. Note: we
3782 * cannot do any QEMUFile operation before using that
3783 * buffer to make sure the buffer is valid when
3784 * placing the page.
a7180877
DDAG
3785 */
3786 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
3787 TARGET_PAGE_SIZE);
3788 }
3789 break;
3790 case RAM_SAVE_FLAG_EOS:
3791 /* normal exit */
6df264ac 3792 multifd_recv_sync_main();
a7180877
DDAG
3793 break;
3794 default:
3795 error_report("Unknown combination of migration flags: %#x"
3796 " (postcopy mode)", flags);
3797 ret = -EINVAL;
7a9ddfbf
PX
3798 break;
3799 }
3800
3801 /* Detect for any possible file errors */
3802 if (!ret && qemu_file_get_error(f)) {
3803 ret = qemu_file_get_error(f);
a7180877
DDAG
3804 }
3805
7a9ddfbf 3806 if (!ret && place_needed) {
a7180877 3807 /* This gets called at the last target page in the host page */
df9ff5e1
DDAG
3808 void *place_dest = host + TARGET_PAGE_SIZE - block->page_size;
3809
a7180877 3810 if (all_zero) {
df9ff5e1 3811 ret = postcopy_place_page_zero(mis, place_dest,
8be4620b 3812 block);
a7180877 3813 } else {
df9ff5e1 3814 ret = postcopy_place_page(mis, place_dest,
8be4620b 3815 place_source, block);
a7180877
DDAG
3816 }
3817 }
a7180877
DDAG
3818 }
3819
3820 return ret;
3821}
3822
acab30b8
DHB
3823static bool postcopy_is_advised(void)
3824{
3825 PostcopyState ps = postcopy_state_get();
3826 return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
3827}
3828
3829static bool postcopy_is_running(void)
3830{
3831 PostcopyState ps = postcopy_state_get();
3832 return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END;
3833}
3834
56e93d26
JQ
3835static int ram_load(QEMUFile *f, void *opaque, int version_id)
3836{
edc60127 3837 int flags = 0, ret = 0, invalid_flags = 0;
56e93d26
JQ
3838 static uint64_t seq_iter;
3839 int len = 0;
a7180877
DDAG
3840 /*
3841 * If system is running in postcopy mode, page inserts to host memory must
3842 * be atomic
3843 */
acab30b8 3844 bool postcopy_running = postcopy_is_running();
ef08fb38 3845 /* ADVISE is earlier, it shows the source has the postcopy capability on */
acab30b8 3846 bool postcopy_advised = postcopy_is_advised();
56e93d26
JQ
3847
3848 seq_iter++;
3849
3850 if (version_id != 4) {
3851 ret = -EINVAL;
3852 }
3853
edc60127
JQ
3854 if (!migrate_use_compression()) {
3855 invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
3856 }
56e93d26
JQ
3857 /* This RCU critical section can be very long running.
3858 * When RCU reclaims in the code start to become numerous,
3859 * it will be necessary to reduce the granularity of this
3860 * critical section.
3861 */
3862 rcu_read_lock();
a7180877
DDAG
3863
3864 if (postcopy_running) {
3865 ret = ram_load_postcopy(f);
3866 }
3867
3868 while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
56e93d26 3869 ram_addr_t addr, total_ram_bytes;
a776aa15 3870 void *host = NULL;
56e93d26
JQ
3871 uint8_t ch;
3872
3873 addr = qemu_get_be64(f);
3874 flags = addr & ~TARGET_PAGE_MASK;
3875 addr &= TARGET_PAGE_MASK;
3876
edc60127
JQ
3877 if (flags & invalid_flags) {
3878 if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
3879 error_report("Received an unexpected compressed page");
3880 }
3881
3882 ret = -EINVAL;
3883 break;
3884 }
3885
bb890ed5 3886 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
a776aa15 3887 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
4c4bad48
HZ
3888 RAMBlock *block = ram_block_from_stream(f, flags);
3889
3890 host = host_from_ram_block_offset(block, addr);
a776aa15
DDAG
3891 if (!host) {
3892 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
3893 ret = -EINVAL;
3894 break;
3895 }
f9494614 3896 ramblock_recv_bitmap_set(block, host);
1db9d8e5 3897 trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
a776aa15
DDAG
3898 }
3899
56e93d26
JQ
3900 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
3901 case RAM_SAVE_FLAG_MEM_SIZE:
3902 /* Synchronize RAM block list */
3903 total_ram_bytes = addr;
3904 while (!ret && total_ram_bytes) {
3905 RAMBlock *block;
56e93d26
JQ
3906 char id[256];
3907 ram_addr_t length;
3908
3909 len = qemu_get_byte(f);
3910 qemu_get_buffer(f, (uint8_t *)id, len);
3911 id[len] = 0;
3912 length = qemu_get_be64(f);
3913
e3dd7493 3914 block = qemu_ram_block_by_name(id);
b895de50
CLG
3915 if (block && !qemu_ram_is_migratable(block)) {
3916 error_report("block %s should not be migrated !", id);
3917 ret = -EINVAL;
3918 } else if (block) {
e3dd7493
DDAG
3919 if (length != block->used_length) {
3920 Error *local_err = NULL;
56e93d26 3921
fa53a0e5 3922 ret = qemu_ram_resize(block, length,
e3dd7493
DDAG
3923 &local_err);
3924 if (local_err) {
3925 error_report_err(local_err);
56e93d26 3926 }
56e93d26 3927 }
ef08fb38
DDAG
3928 /* For postcopy we need to check hugepage sizes match */
3929 if (postcopy_advised &&
3930 block->page_size != qemu_host_page_size) {
3931 uint64_t remote_page_size = qemu_get_be64(f);
3932 if (remote_page_size != block->page_size) {
3933 error_report("Mismatched RAM page size %s "
3934 "(local) %zd != %" PRId64,
3935 id, block->page_size,
3936 remote_page_size);
3937 ret = -EINVAL;
3938 }
3939 }
e3dd7493
DDAG
3940 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
3941 block->idstr);
3942 } else {
56e93d26
JQ
3943 error_report("Unknown ramblock \"%s\", cannot "
3944 "accept migration", id);
3945 ret = -EINVAL;
3946 }
3947
3948 total_ram_bytes -= length;
3949 }
3950 break;
a776aa15 3951
bb890ed5 3952 case RAM_SAVE_FLAG_ZERO:
56e93d26
JQ
3953 ch = qemu_get_byte(f);
3954 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
3955 break;
a776aa15 3956
56e93d26 3957 case RAM_SAVE_FLAG_PAGE:
56e93d26
JQ
3958 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
3959 break;
56e93d26 3960
a776aa15 3961 case RAM_SAVE_FLAG_COMPRESS_PAGE:
56e93d26
JQ
3962 len = qemu_get_be32(f);
3963 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
3964 error_report("Invalid compressed data length: %d", len);
3965 ret = -EINVAL;
3966 break;
3967 }
c1bc6626 3968 decompress_data_with_multi_threads(f, host, len);
56e93d26 3969 break;
a776aa15 3970
56e93d26 3971 case RAM_SAVE_FLAG_XBZRLE:
56e93d26
JQ
3972 if (load_xbzrle(f, addr, host) < 0) {
3973 error_report("Failed to decompress XBZRLE page at "
3974 RAM_ADDR_FMT, addr);
3975 ret = -EINVAL;
3976 break;
3977 }
3978 break;
3979 case RAM_SAVE_FLAG_EOS:
3980 /* normal exit */
6df264ac 3981 multifd_recv_sync_main();
56e93d26
JQ
3982 break;
3983 default:
3984 if (flags & RAM_SAVE_FLAG_HOOK) {
632e3a5c 3985 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
56e93d26
JQ
3986 } else {
3987 error_report("Unknown combination of migration flags: %#x",
3988 flags);
3989 ret = -EINVAL;
3990 }
3991 }
3992 if (!ret) {
3993 ret = qemu_file_get_error(f);
3994 }
3995 }
3996
34ab9e97 3997 ret |= wait_for_decompress_done();
56e93d26 3998 rcu_read_unlock();
55c4446b 3999 trace_ram_load_complete(ret, seq_iter);
56e93d26
JQ
4000 return ret;
4001}
4002
c6467627
VSO
4003static bool ram_has_postcopy(void *opaque)
4004{
469dd51b
JH
4005 RAMBlock *rb;
4006 RAMBLOCK_FOREACH_MIGRATABLE(rb) {
4007 if (ramblock_is_pmem(rb)) {
4008 info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4009 "is not supported now!", rb->idstr, rb->host);
4010 return false;
4011 }
4012 }
4013
c6467627
VSO
4014 return migrate_postcopy_ram();
4015}
4016
edd090c7
PX
4017/* Sync all the dirty bitmap with destination VM. */
4018static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
4019{
4020 RAMBlock *block;
4021 QEMUFile *file = s->to_dst_file;
4022 int ramblock_count = 0;
4023
4024 trace_ram_dirty_bitmap_sync_start();
4025
ff0769a4 4026 RAMBLOCK_FOREACH_MIGRATABLE(block) {
edd090c7
PX
4027 qemu_savevm_send_recv_bitmap(file, block->idstr);
4028 trace_ram_dirty_bitmap_request(block->idstr);
4029 ramblock_count++;
4030 }
4031
4032 trace_ram_dirty_bitmap_sync_wait();
4033
4034 /* Wait until all the ramblocks' dirty bitmap synced */
4035 while (ramblock_count--) {
4036 qemu_sem_wait(&s->rp_state.rp_sem);
4037 }
4038
4039 trace_ram_dirty_bitmap_sync_complete();
4040
4041 return 0;
4042}
4043
4044static void ram_dirty_bitmap_reload_notify(MigrationState *s)
4045{
4046 qemu_sem_post(&s->rp_state.rp_sem);
4047}
4048
a335debb
PX
4049/*
4050 * Read the received bitmap, revert it as the initial dirty bitmap.
4051 * This is only used when the postcopy migration is paused but wants
4052 * to resume from a middle point.
4053 */
4054int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
4055{
4056 int ret = -EINVAL;
4057 QEMUFile *file = s->rp_state.from_dst_file;
4058 unsigned long *le_bitmap, nbits = block->used_length >> TARGET_PAGE_BITS;
a725ef9f 4059 uint64_t local_size = DIV_ROUND_UP(nbits, 8);
a335debb
PX
4060 uint64_t size, end_mark;
4061
4062 trace_ram_dirty_bitmap_reload_begin(block->idstr);
4063
4064 if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
4065 error_report("%s: incorrect state %s", __func__,
4066 MigrationStatus_str(s->state));
4067 return -EINVAL;
4068 }
4069
4070 /*
4071 * Note: see comments in ramblock_recv_bitmap_send() on why we
4072 * need the endianess convertion, and the paddings.
4073 */
4074 local_size = ROUND_UP(local_size, 8);
4075
4076 /* Add paddings */
4077 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
4078
4079 size = qemu_get_be64(file);
4080
4081 /* The size of the bitmap should match with our ramblock */
4082 if (size != local_size) {
4083 error_report("%s: ramblock '%s' bitmap size mismatch "
4084 "(0x%"PRIx64" != 0x%"PRIx64")", __func__,
4085 block->idstr, size, local_size);
4086 ret = -EINVAL;
4087 goto out;
4088 }
4089
4090 size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size);
4091 end_mark = qemu_get_be64(file);
4092
4093 ret = qemu_file_get_error(file);
4094 if (ret || size != local_size) {
4095 error_report("%s: read bitmap failed for ramblock '%s': %d"
4096 " (size 0x%"PRIx64", got: 0x%"PRIx64")",
4097 __func__, block->idstr, ret, local_size, size);
4098 ret = -EIO;
4099 goto out;
4100 }
4101
4102 if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) {
4103 error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIu64,
4104 __func__, block->idstr, end_mark);
4105 ret = -EINVAL;
4106 goto out;
4107 }
4108
4109 /*
4110 * Endianess convertion. We are during postcopy (though paused).
4111 * The dirty bitmap won't change. We can directly modify it.
4112 */
4113 bitmap_from_le(block->bmap, le_bitmap, nbits);
4114
4115 /*
4116 * What we received is "received bitmap". Revert it as the initial
4117 * dirty bitmap for this ramblock.
4118 */
4119 bitmap_complement(block->bmap, block->bmap, nbits);
4120
4121 trace_ram_dirty_bitmap_reload_complete(block->idstr);
4122
edd090c7
PX
4123 /*
4124 * We succeeded to sync bitmap for current ramblock. If this is
4125 * the last one to sync, we need to notify the main send thread.
4126 */
4127 ram_dirty_bitmap_reload_notify(s);
4128
a335debb
PX
4129 ret = 0;
4130out:
bf269906 4131 g_free(le_bitmap);
a335debb
PX
4132 return ret;
4133}
4134
edd090c7
PX
4135static int ram_resume_prepare(MigrationState *s, void *opaque)
4136{
4137 RAMState *rs = *(RAMState **)opaque;
08614f34 4138 int ret;
edd090c7 4139
08614f34
PX
4140 ret = ram_dirty_bitmap_sync_all(s, rs);
4141 if (ret) {
4142 return ret;
4143 }
4144
4145 ram_state_resume_prepare(rs, s->to_dst_file);
4146
4147 return 0;
edd090c7
PX
4148}
4149
56e93d26 4150static SaveVMHandlers savevm_ram_handlers = {
9907e842 4151 .save_setup = ram_save_setup,
56e93d26 4152 .save_live_iterate = ram_save_iterate,
763c906b 4153 .save_live_complete_postcopy = ram_save_complete,
a3e06c3d 4154 .save_live_complete_precopy = ram_save_complete,
c6467627 4155 .has_postcopy = ram_has_postcopy,
56e93d26
JQ
4156 .save_live_pending = ram_save_pending,
4157 .load_state = ram_load,
f265e0e4
JQ
4158 .save_cleanup = ram_save_cleanup,
4159 .load_setup = ram_load_setup,
4160 .load_cleanup = ram_load_cleanup,
edd090c7 4161 .resume_prepare = ram_resume_prepare,
56e93d26
JQ
4162};
4163
4164void ram_mig_init(void)
4165{
4166 qemu_mutex_init(&XBZRLE.lock);
6f37bb8b 4167 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, &ram_state);
56e93d26 4168}