]> git.proxmox.com Git - mirror_qemu.git/blame - migration/ram.c
migration: drop the return value of do_compress_ram_page
[mirror_qemu.git] / migration / ram.c
CommitLineData
56e93d26
JQ
1/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
76cc7b58
JQ
5 * Copyright (c) 2011-2015 Red Hat Inc
6 *
7 * Authors:
8 * Juan Quintela <quintela@redhat.com>
56e93d26
JQ
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 */
e688df6b 28
1393a485 29#include "qemu/osdep.h"
33c11879 30#include "cpu.h"
56e93d26 31#include <zlib.h>
f348b6d1 32#include "qemu/cutils.h"
56e93d26
JQ
33#include "qemu/bitops.h"
34#include "qemu/bitmap.h"
7205c9ec 35#include "qemu/main-loop.h"
56eb90af 36#include "qemu/pmem.h"
709e3fe8 37#include "xbzrle.h"
7b1e1a22 38#include "ram.h"
6666c96a 39#include "migration.h"
71bb07db 40#include "socket.h"
f2a8f0a6 41#include "migration/register.h"
7b1e1a22 42#include "migration/misc.h"
08a0aee1 43#include "qemu-file.h"
be07b0ac 44#include "postcopy-ram.h"
53d37d36 45#include "page_cache.h"
56e93d26 46#include "qemu/error-report.h"
e688df6b 47#include "qapi/error.h"
9af23989 48#include "qapi/qapi-events-migration.h"
8acabf69 49#include "qapi/qmp/qerror.h"
56e93d26 50#include "trace.h"
56e93d26 51#include "exec/ram_addr.h"
f9494614 52#include "exec/target_page.h"
56e93d26 53#include "qemu/rcu_queue.h"
a91246c9 54#include "migration/colo.h"
53d37d36 55#include "block.h"
af8b7d2b
JQ
56#include "sysemu/sysemu.h"
57#include "qemu/uuid.h"
edd090c7 58#include "savevm.h"
b9ee2f7d 59#include "qemu/iov.h"
56e93d26 60
56e93d26
JQ
61/***********************************************************/
62/* ram save/restore */
63
bb890ed5
JQ
64/* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
65 * worked for pages that where filled with the same char. We switched
66 * it to only search for the zero value. And to avoid confusion with
67 * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
68 */
69
56e93d26 70#define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
bb890ed5 71#define RAM_SAVE_FLAG_ZERO 0x02
56e93d26
JQ
72#define RAM_SAVE_FLAG_MEM_SIZE 0x04
73#define RAM_SAVE_FLAG_PAGE 0x08
74#define RAM_SAVE_FLAG_EOS 0x10
75#define RAM_SAVE_FLAG_CONTINUE 0x20
76#define RAM_SAVE_FLAG_XBZRLE 0x40
77/* 0x80 is reserved in migration.h start with 0x100 next */
78#define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
79
56e93d26
JQ
80static inline bool is_zero_range(uint8_t *p, uint64_t size)
81{
a1febc49 82 return buffer_is_zero(p, size);
56e93d26
JQ
83}
84
9360447d
JQ
85XBZRLECacheStats xbzrle_counters;
86
56e93d26
JQ
87/* struct contains XBZRLE cache and a static page
88 used by the compression */
89static struct {
90 /* buffer used for XBZRLE encoding */
91 uint8_t *encoded_buf;
92 /* buffer for storing page content */
93 uint8_t *current_buf;
94 /* Cache for XBZRLE, Protected by lock. */
95 PageCache *cache;
96 QemuMutex lock;
c00e0928
JQ
97 /* it will store a page full of zeros */
98 uint8_t *zero_target_page;
f265e0e4
JQ
99 /* buffer used for XBZRLE decoding */
100 uint8_t *decoded_buf;
56e93d26
JQ
101} XBZRLE;
102
56e93d26
JQ
103static void XBZRLE_cache_lock(void)
104{
105 if (migrate_use_xbzrle())
106 qemu_mutex_lock(&XBZRLE.lock);
107}
108
109static void XBZRLE_cache_unlock(void)
110{
111 if (migrate_use_xbzrle())
112 qemu_mutex_unlock(&XBZRLE.lock);
113}
114
3d0684b2
JQ
115/**
116 * xbzrle_cache_resize: resize the xbzrle cache
117 *
118 * This function is called from qmp_migrate_set_cache_size in main
119 * thread, possibly while a migration is in progress. A running
120 * migration may be using the cache and might finish during this call,
121 * hence changes to the cache are protected by XBZRLE.lock().
122 *
c9dede2d 123 * Returns 0 for success or -1 for error
3d0684b2
JQ
124 *
125 * @new_size: new cache size
8acabf69 126 * @errp: set *errp if the check failed, with reason
56e93d26 127 */
c9dede2d 128int xbzrle_cache_resize(int64_t new_size, Error **errp)
56e93d26
JQ
129{
130 PageCache *new_cache;
c9dede2d 131 int64_t ret = 0;
56e93d26 132
8acabf69
JQ
133 /* Check for truncation */
134 if (new_size != (size_t)new_size) {
135 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
136 "exceeding address space");
137 return -1;
138 }
139
2a313e5c
JQ
140 if (new_size == migrate_xbzrle_cache_size()) {
141 /* nothing to do */
c9dede2d 142 return 0;
2a313e5c
JQ
143 }
144
56e93d26
JQ
145 XBZRLE_cache_lock();
146
147 if (XBZRLE.cache != NULL) {
80f8dfde 148 new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp);
56e93d26 149 if (!new_cache) {
56e93d26
JQ
150 ret = -1;
151 goto out;
152 }
153
154 cache_fini(XBZRLE.cache);
155 XBZRLE.cache = new_cache;
156 }
56e93d26
JQ
157out:
158 XBZRLE_cache_unlock();
159 return ret;
160}
161
b895de50
CLG
162/* Should be holding either ram_list.mutex, or the RCU lock. */
163#define RAMBLOCK_FOREACH_MIGRATABLE(block) \
343f632c 164 INTERNAL_RAMBLOCK_FOREACH(block) \
b895de50
CLG
165 if (!qemu_ram_is_migratable(block)) {} else
166
343f632c
DDAG
167#undef RAMBLOCK_FOREACH
168
f9494614
AP
169static void ramblock_recv_map_init(void)
170{
171 RAMBlock *rb;
172
b895de50 173 RAMBLOCK_FOREACH_MIGRATABLE(rb) {
f9494614
AP
174 assert(!rb->receivedmap);
175 rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
176 }
177}
178
179int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr)
180{
181 return test_bit(ramblock_recv_bitmap_offset(host_addr, rb),
182 rb->receivedmap);
183}
184
1cba9f6e
DDAG
185bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset)
186{
187 return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap);
188}
189
f9494614
AP
190void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr)
191{
192 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap);
193}
194
195void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
196 size_t nr)
197{
198 bitmap_set_atomic(rb->receivedmap,
199 ramblock_recv_bitmap_offset(host_addr, rb),
200 nr);
201}
202
a335debb
PX
203#define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
204
205/*
206 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
207 *
208 * Returns >0 if success with sent bytes, or <0 if error.
209 */
210int64_t ramblock_recv_bitmap_send(QEMUFile *file,
211 const char *block_name)
212{
213 RAMBlock *block = qemu_ram_block_by_name(block_name);
214 unsigned long *le_bitmap, nbits;
215 uint64_t size;
216
217 if (!block) {
218 error_report("%s: invalid block name: %s", __func__, block_name);
219 return -1;
220 }
221
222 nbits = block->used_length >> TARGET_PAGE_BITS;
223
224 /*
225 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
226 * machines we may need 4 more bytes for padding (see below
227 * comment). So extend it a bit before hand.
228 */
229 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
230
231 /*
232 * Always use little endian when sending the bitmap. This is
233 * required that when source and destination VMs are not using the
234 * same endianess. (Note: big endian won't work.)
235 */
236 bitmap_to_le(le_bitmap, block->receivedmap, nbits);
237
238 /* Size of the bitmap, in bytes */
a725ef9f 239 size = DIV_ROUND_UP(nbits, 8);
a335debb
PX
240
241 /*
242 * size is always aligned to 8 bytes for 64bit machines, but it
243 * may not be true for 32bit machines. We need this padding to
244 * make sure the migration can survive even between 32bit and
245 * 64bit machines.
246 */
247 size = ROUND_UP(size, 8);
248
249 qemu_put_be64(file, size);
250 qemu_put_buffer(file, (const uint8_t *)le_bitmap, size);
251 /*
252 * Mark as an end, in case the middle part is screwed up due to
253 * some "misterious" reason.
254 */
255 qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING);
256 qemu_fflush(file);
257
bf269906 258 g_free(le_bitmap);
a335debb
PX
259
260 if (qemu_file_get_error(file)) {
261 return qemu_file_get_error(file);
262 }
263
264 return size + sizeof(size);
265}
266
ec481c6c
JQ
267/*
268 * An outstanding page request, on the source, having been received
269 * and queued
270 */
271struct RAMSrcPageRequest {
272 RAMBlock *rb;
273 hwaddr offset;
274 hwaddr len;
275
276 QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
277};
278
6f37bb8b
JQ
279/* State of RAM for migration */
280struct RAMState {
204b88b8
JQ
281 /* QEMUFile used for this migration */
282 QEMUFile *f;
6f37bb8b
JQ
283 /* Last block that we have visited searching for dirty pages */
284 RAMBlock *last_seen_block;
285 /* Last block from where we have sent data */
286 RAMBlock *last_sent_block;
269ace29
JQ
287 /* Last dirty target page we have sent */
288 ram_addr_t last_page;
6f37bb8b
JQ
289 /* last ram version we have seen */
290 uint32_t last_version;
291 /* We are in the first round */
292 bool ram_bulk_stage;
8d820d6f
JQ
293 /* How many times we have dirty too many pages */
294 int dirty_rate_high_cnt;
f664da80
JQ
295 /* these variables are used for bitmap sync */
296 /* last time we did a full bitmap_sync */
297 int64_t time_last_bitmap_sync;
eac74159 298 /* bytes transferred at start_time */
c4bdf0cf 299 uint64_t bytes_xfer_prev;
a66cd90c 300 /* number of dirty pages since start_time */
68908ed6 301 uint64_t num_dirty_pages_period;
b5833fde
JQ
302 /* xbzrle misses since the beginning of the period */
303 uint64_t xbzrle_cache_miss_prev;
36040d9c
JQ
304 /* number of iterations at the beginning of period */
305 uint64_t iterations_prev;
23b28c3c
JQ
306 /* Iterations since start */
307 uint64_t iterations;
9360447d 308 /* number of dirty bits in the bitmap */
2dfaf12e
PX
309 uint64_t migration_dirty_pages;
310 /* protects modification of the bitmap */
108cfae0 311 QemuMutex bitmap_mutex;
68a098f3
JQ
312 /* The RAMBlock used in the last src_page_requests */
313 RAMBlock *last_req_rb;
ec481c6c
JQ
314 /* Queue of outstanding page requests from the destination */
315 QemuMutex src_page_req_mutex;
316 QSIMPLEQ_HEAD(src_page_requests, RAMSrcPageRequest) src_page_requests;
6f37bb8b
JQ
317};
318typedef struct RAMState RAMState;
319
53518d94 320static RAMState *ram_state;
6f37bb8b 321
9edabd4d 322uint64_t ram_bytes_remaining(void)
2f4fde93 323{
bae416e5
DDAG
324 return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
325 0;
2f4fde93
JQ
326}
327
9360447d 328MigrationStats ram_counters;
96506894 329
b8fb8cb7
DDAG
330/* used by the search for pages to send */
331struct PageSearchStatus {
332 /* Current block being searched */
333 RAMBlock *block;
a935e30f
JQ
334 /* Current page to search from */
335 unsigned long page;
b8fb8cb7
DDAG
336 /* Set once we wrap around */
337 bool complete_round;
338};
339typedef struct PageSearchStatus PageSearchStatus;
340
56e93d26 341struct CompressParam {
56e93d26 342 bool done;
90e56fb4 343 bool quit;
56e93d26
JQ
344 QEMUFile *file;
345 QemuMutex mutex;
346 QemuCond cond;
347 RAMBlock *block;
348 ram_addr_t offset;
34ab9e97
XG
349
350 /* internally used fields */
dcaf446e 351 z_stream stream;
34ab9e97 352 uint8_t *originbuf;
56e93d26
JQ
353};
354typedef struct CompressParam CompressParam;
355
356struct DecompressParam {
73a8912b 357 bool done;
90e56fb4 358 bool quit;
56e93d26
JQ
359 QemuMutex mutex;
360 QemuCond cond;
361 void *des;
d341d9f3 362 uint8_t *compbuf;
56e93d26 363 int len;
797ca154 364 z_stream stream;
56e93d26
JQ
365};
366typedef struct DecompressParam DecompressParam;
367
368static CompressParam *comp_param;
369static QemuThread *compress_threads;
370/* comp_done_cond is used to wake up the migration thread when
371 * one of the compression threads has finished the compression.
372 * comp_done_lock is used to co-work with comp_done_cond.
373 */
0d9f9a5c
LL
374static QemuMutex comp_done_lock;
375static QemuCond comp_done_cond;
56e93d26
JQ
376/* The empty QEMUFileOps will be used by file in CompressParam */
377static const QEMUFileOps empty_ops = { };
378
34ab9e97 379static QEMUFile *decomp_file;
56e93d26
JQ
380static DecompressParam *decomp_param;
381static QemuThread *decompress_threads;
73a8912b
LL
382static QemuMutex decomp_done_lock;
383static QemuCond decomp_done_cond;
56e93d26 384
6ef3771c
XG
385static void do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
386 ram_addr_t offset, uint8_t *source_buf);
56e93d26
JQ
387
388static void *do_data_compress(void *opaque)
389{
390 CompressParam *param = opaque;
a7a9a88f
LL
391 RAMBlock *block;
392 ram_addr_t offset;
56e93d26 393
a7a9a88f 394 qemu_mutex_lock(&param->mutex);
90e56fb4 395 while (!param->quit) {
a7a9a88f
LL
396 if (param->block) {
397 block = param->block;
398 offset = param->offset;
399 param->block = NULL;
400 qemu_mutex_unlock(&param->mutex);
401
34ab9e97
XG
402 do_compress_ram_page(param->file, &param->stream, block, offset,
403 param->originbuf);
a7a9a88f 404
0d9f9a5c 405 qemu_mutex_lock(&comp_done_lock);
a7a9a88f 406 param->done = true;
0d9f9a5c
LL
407 qemu_cond_signal(&comp_done_cond);
408 qemu_mutex_unlock(&comp_done_lock);
a7a9a88f
LL
409
410 qemu_mutex_lock(&param->mutex);
411 } else {
56e93d26
JQ
412 qemu_cond_wait(&param->cond, &param->mutex);
413 }
56e93d26 414 }
a7a9a88f 415 qemu_mutex_unlock(&param->mutex);
56e93d26
JQ
416
417 return NULL;
418}
419
420static inline void terminate_compression_threads(void)
421{
422 int idx, thread_count;
423
424 thread_count = migrate_compress_threads();
3d0684b2 425
56e93d26
JQ
426 for (idx = 0; idx < thread_count; idx++) {
427 qemu_mutex_lock(&comp_param[idx].mutex);
90e56fb4 428 comp_param[idx].quit = true;
56e93d26
JQ
429 qemu_cond_signal(&comp_param[idx].cond);
430 qemu_mutex_unlock(&comp_param[idx].mutex);
431 }
432}
433
f0afa331 434static void compress_threads_save_cleanup(void)
56e93d26
JQ
435{
436 int i, thread_count;
437
438 if (!migrate_use_compression()) {
439 return;
440 }
441 terminate_compression_threads();
442 thread_count = migrate_compress_threads();
443 for (i = 0; i < thread_count; i++) {
dcaf446e
XG
444 /*
445 * we use it as a indicator which shows if the thread is
446 * properly init'd or not
447 */
448 if (!comp_param[i].file) {
449 break;
450 }
56e93d26 451 qemu_thread_join(compress_threads + i);
56e93d26
JQ
452 qemu_mutex_destroy(&comp_param[i].mutex);
453 qemu_cond_destroy(&comp_param[i].cond);
dcaf446e 454 deflateEnd(&comp_param[i].stream);
34ab9e97 455 g_free(comp_param[i].originbuf);
dcaf446e
XG
456 qemu_fclose(comp_param[i].file);
457 comp_param[i].file = NULL;
56e93d26 458 }
0d9f9a5c
LL
459 qemu_mutex_destroy(&comp_done_lock);
460 qemu_cond_destroy(&comp_done_cond);
56e93d26
JQ
461 g_free(compress_threads);
462 g_free(comp_param);
56e93d26
JQ
463 compress_threads = NULL;
464 comp_param = NULL;
56e93d26
JQ
465}
466
dcaf446e 467static int compress_threads_save_setup(void)
56e93d26
JQ
468{
469 int i, thread_count;
470
471 if (!migrate_use_compression()) {
dcaf446e 472 return 0;
56e93d26 473 }
56e93d26
JQ
474 thread_count = migrate_compress_threads();
475 compress_threads = g_new0(QemuThread, thread_count);
476 comp_param = g_new0(CompressParam, thread_count);
0d9f9a5c
LL
477 qemu_cond_init(&comp_done_cond);
478 qemu_mutex_init(&comp_done_lock);
56e93d26 479 for (i = 0; i < thread_count; i++) {
34ab9e97
XG
480 comp_param[i].originbuf = g_try_malloc(TARGET_PAGE_SIZE);
481 if (!comp_param[i].originbuf) {
482 goto exit;
483 }
484
dcaf446e
XG
485 if (deflateInit(&comp_param[i].stream,
486 migrate_compress_level()) != Z_OK) {
34ab9e97 487 g_free(comp_param[i].originbuf);
dcaf446e
XG
488 goto exit;
489 }
490
e110aa91
C
491 /* comp_param[i].file is just used as a dummy buffer to save data,
492 * set its ops to empty.
56e93d26
JQ
493 */
494 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
495 comp_param[i].done = true;
90e56fb4 496 comp_param[i].quit = false;
56e93d26
JQ
497 qemu_mutex_init(&comp_param[i].mutex);
498 qemu_cond_init(&comp_param[i].cond);
499 qemu_thread_create(compress_threads + i, "compress",
500 do_data_compress, comp_param + i,
501 QEMU_THREAD_JOINABLE);
502 }
dcaf446e
XG
503 return 0;
504
505exit:
506 compress_threads_save_cleanup();
507 return -1;
56e93d26
JQ
508}
509
f986c3d2
JQ
510/* Multiple fd's */
511
af8b7d2b
JQ
512#define MULTIFD_MAGIC 0x11223344U
513#define MULTIFD_VERSION 1
514
6df264ac
JQ
515#define MULTIFD_FLAG_SYNC (1 << 0)
516
af8b7d2b
JQ
517typedef struct {
518 uint32_t magic;
519 uint32_t version;
520 unsigned char uuid[16]; /* QemuUUID */
521 uint8_t id;
522} __attribute__((packed)) MultiFDInit_t;
523
2a26c979
JQ
524typedef struct {
525 uint32_t magic;
526 uint32_t version;
527 uint32_t flags;
528 uint32_t size;
529 uint32_t used;
530 uint64_t packet_num;
531 char ramblock[256];
532 uint64_t offset[];
533} __attribute__((packed)) MultiFDPacket_t;
534
34c55a94
JQ
535typedef struct {
536 /* number of used pages */
537 uint32_t used;
538 /* number of allocated pages */
539 uint32_t allocated;
540 /* global number of generated multifd packets */
541 uint64_t packet_num;
542 /* offset of each page */
543 ram_addr_t *offset;
544 /* pointer to each page */
545 struct iovec *iov;
546 RAMBlock *block;
547} MultiFDPages_t;
548
8c4598f2
JQ
549typedef struct {
550 /* this fields are not changed once the thread is created */
551 /* channel number */
f986c3d2 552 uint8_t id;
8c4598f2 553 /* channel thread name */
f986c3d2 554 char *name;
8c4598f2 555 /* channel thread id */
f986c3d2 556 QemuThread thread;
8c4598f2 557 /* communication channel */
60df2d4a 558 QIOChannel *c;
8c4598f2 559 /* sem where to wait for more work */
f986c3d2 560 QemuSemaphore sem;
8c4598f2 561 /* this mutex protects the following parameters */
f986c3d2 562 QemuMutex mutex;
8c4598f2 563 /* is this channel thread running */
66770707 564 bool running;
8c4598f2 565 /* should this thread finish */
f986c3d2 566 bool quit;
0beb5ed3
JQ
567 /* thread has work to do */
568 int pending_job;
34c55a94
JQ
569 /* array of pages to sent */
570 MultiFDPages_t *pages;
2a26c979
JQ
571 /* packet allocated len */
572 uint32_t packet_len;
573 /* pointer to the packet */
574 MultiFDPacket_t *packet;
575 /* multifd flags for each packet */
576 uint32_t flags;
577 /* global number of generated multifd packets */
578 uint64_t packet_num;
408ea6ae
JQ
579 /* thread local variables */
580 /* packets sent through this channel */
581 uint64_t num_packets;
582 /* pages sent through this channel */
583 uint64_t num_pages;
6df264ac
JQ
584 /* syncs main thread and channels */
585 QemuSemaphore sem_sync;
8c4598f2
JQ
586} MultiFDSendParams;
587
588typedef struct {
589 /* this fields are not changed once the thread is created */
590 /* channel number */
591 uint8_t id;
592 /* channel thread name */
593 char *name;
594 /* channel thread id */
595 QemuThread thread;
596 /* communication channel */
597 QIOChannel *c;
8c4598f2
JQ
598 /* this mutex protects the following parameters */
599 QemuMutex mutex;
600 /* is this channel thread running */
601 bool running;
34c55a94
JQ
602 /* array of pages to receive */
603 MultiFDPages_t *pages;
2a26c979
JQ
604 /* packet allocated len */
605 uint32_t packet_len;
606 /* pointer to the packet */
607 MultiFDPacket_t *packet;
608 /* multifd flags for each packet */
609 uint32_t flags;
610 /* global number of generated multifd packets */
611 uint64_t packet_num;
408ea6ae
JQ
612 /* thread local variables */
613 /* packets sent through this channel */
614 uint64_t num_packets;
615 /* pages sent through this channel */
616 uint64_t num_pages;
6df264ac
JQ
617 /* syncs main thread and channels */
618 QemuSemaphore sem_sync;
8c4598f2 619} MultiFDRecvParams;
f986c3d2 620
af8b7d2b
JQ
621static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp)
622{
623 MultiFDInit_t msg;
624 int ret;
625
626 msg.magic = cpu_to_be32(MULTIFD_MAGIC);
627 msg.version = cpu_to_be32(MULTIFD_VERSION);
628 msg.id = p->id;
629 memcpy(msg.uuid, &qemu_uuid.data, sizeof(msg.uuid));
630
631 ret = qio_channel_write_all(p->c, (char *)&msg, sizeof(msg), errp);
632 if (ret != 0) {
633 return -1;
634 }
635 return 0;
636}
637
638static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
639{
640 MultiFDInit_t msg;
641 int ret;
642
643 ret = qio_channel_read_all(c, (char *)&msg, sizeof(msg), errp);
644 if (ret != 0) {
645 return -1;
646 }
647
648 be32_to_cpus(&msg.magic);
649 be32_to_cpus(&msg.version);
650
651 if (msg.magic != MULTIFD_MAGIC) {
652 error_setg(errp, "multifd: received packet magic %x "
653 "expected %x", msg.magic, MULTIFD_MAGIC);
654 return -1;
655 }
656
657 if (msg.version != MULTIFD_VERSION) {
658 error_setg(errp, "multifd: received packet version %d "
659 "expected %d", msg.version, MULTIFD_VERSION);
660 return -1;
661 }
662
663 if (memcmp(msg.uuid, &qemu_uuid, sizeof(qemu_uuid))) {
664 char *uuid = qemu_uuid_unparse_strdup(&qemu_uuid);
665 char *msg_uuid = qemu_uuid_unparse_strdup((const QemuUUID *)msg.uuid);
666
667 error_setg(errp, "multifd: received uuid '%s' and expected "
668 "uuid '%s' for channel %hhd", msg_uuid, uuid, msg.id);
669 g_free(uuid);
670 g_free(msg_uuid);
671 return -1;
672 }
673
674 if (msg.id > migrate_multifd_channels()) {
675 error_setg(errp, "multifd: received channel version %d "
676 "expected %d", msg.version, MULTIFD_VERSION);
677 return -1;
678 }
679
680 return msg.id;
681}
682
34c55a94
JQ
683static MultiFDPages_t *multifd_pages_init(size_t size)
684{
685 MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
686
687 pages->allocated = size;
688 pages->iov = g_new0(struct iovec, size);
689 pages->offset = g_new0(ram_addr_t, size);
690
691 return pages;
692}
693
694static void multifd_pages_clear(MultiFDPages_t *pages)
695{
696 pages->used = 0;
697 pages->allocated = 0;
698 pages->packet_num = 0;
699 pages->block = NULL;
700 g_free(pages->iov);
701 pages->iov = NULL;
702 g_free(pages->offset);
703 pages->offset = NULL;
704 g_free(pages);
705}
706
2a26c979
JQ
707static void multifd_send_fill_packet(MultiFDSendParams *p)
708{
709 MultiFDPacket_t *packet = p->packet;
710 int i;
711
712 packet->magic = cpu_to_be32(MULTIFD_MAGIC);
713 packet->version = cpu_to_be32(MULTIFD_VERSION);
714 packet->flags = cpu_to_be32(p->flags);
715 packet->size = cpu_to_be32(migrate_multifd_page_count());
716 packet->used = cpu_to_be32(p->pages->used);
717 packet->packet_num = cpu_to_be64(p->packet_num);
718
719 if (p->pages->block) {
720 strncpy(packet->ramblock, p->pages->block->idstr, 256);
721 }
722
723 for (i = 0; i < p->pages->used; i++) {
724 packet->offset[i] = cpu_to_be64(p->pages->offset[i]);
725 }
726}
727
728static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
729{
730 MultiFDPacket_t *packet = p->packet;
731 RAMBlock *block;
732 int i;
733
2a26c979
JQ
734 be32_to_cpus(&packet->magic);
735 if (packet->magic != MULTIFD_MAGIC) {
736 error_setg(errp, "multifd: received packet "
737 "magic %x and expected magic %x",
738 packet->magic, MULTIFD_MAGIC);
739 return -1;
740 }
741
742 be32_to_cpus(&packet->version);
743 if (packet->version != MULTIFD_VERSION) {
744 error_setg(errp, "multifd: received packet "
745 "version %d and expected version %d",
746 packet->version, MULTIFD_VERSION);
747 return -1;
748 }
749
750 p->flags = be32_to_cpu(packet->flags);
751
752 be32_to_cpus(&packet->size);
753 if (packet->size > migrate_multifd_page_count()) {
754 error_setg(errp, "multifd: received packet "
755 "with size %d and expected maximum size %d",
756 packet->size, migrate_multifd_page_count()) ;
757 return -1;
758 }
759
760 p->pages->used = be32_to_cpu(packet->used);
761 if (p->pages->used > packet->size) {
762 error_setg(errp, "multifd: received packet "
763 "with size %d and expected maximum size %d",
764 p->pages->used, packet->size) ;
765 return -1;
766 }
767
768 p->packet_num = be64_to_cpu(packet->packet_num);
769
770 if (p->pages->used) {
771 /* make sure that ramblock is 0 terminated */
772 packet->ramblock[255] = 0;
773 block = qemu_ram_block_by_name(packet->ramblock);
774 if (!block) {
775 error_setg(errp, "multifd: unknown ram block %s",
776 packet->ramblock);
777 return -1;
778 }
779 }
780
781 for (i = 0; i < p->pages->used; i++) {
782 ram_addr_t offset = be64_to_cpu(packet->offset[i]);
783
784 if (offset > (block->used_length - TARGET_PAGE_SIZE)) {
785 error_setg(errp, "multifd: offset too long " RAM_ADDR_FMT
786 " (max " RAM_ADDR_FMT ")",
787 offset, block->max_length);
788 return -1;
789 }
790 p->pages->iov[i].iov_base = block->host + offset;
791 p->pages->iov[i].iov_len = TARGET_PAGE_SIZE;
792 }
793
794 return 0;
795}
796
f986c3d2
JQ
797struct {
798 MultiFDSendParams *params;
799 /* number of created threads */
800 int count;
34c55a94
JQ
801 /* array of pages to sent */
802 MultiFDPages_t *pages;
6df264ac
JQ
803 /* syncs main thread and channels */
804 QemuSemaphore sem_sync;
805 /* global number of generated multifd packets */
806 uint64_t packet_num;
b9ee2f7d
JQ
807 /* send channels ready */
808 QemuSemaphore channels_ready;
f986c3d2
JQ
809} *multifd_send_state;
810
b9ee2f7d
JQ
811/*
812 * How we use multifd_send_state->pages and channel->pages?
813 *
814 * We create a pages for each channel, and a main one. Each time that
815 * we need to send a batch of pages we interchange the ones between
816 * multifd_send_state and the channel that is sending it. There are
817 * two reasons for that:
818 * - to not have to do so many mallocs during migration
819 * - to make easier to know what to free at the end of migration
820 *
821 * This way we always know who is the owner of each "pages" struct,
822 * and we don't need any loocking. It belongs to the migration thread
823 * or to the channel thread. Switching is safe because the migration
824 * thread is using the channel mutex when changing it, and the channel
825 * have to had finish with its own, otherwise pending_job can't be
826 * false.
827 */
828
829static void multifd_send_pages(void)
830{
831 int i;
832 static int next_channel;
833 MultiFDSendParams *p = NULL; /* make happy gcc */
834 MultiFDPages_t *pages = multifd_send_state->pages;
835 uint64_t transferred;
836
837 qemu_sem_wait(&multifd_send_state->channels_ready);
838 for (i = next_channel;; i = (i + 1) % migrate_multifd_channels()) {
839 p = &multifd_send_state->params[i];
840
841 qemu_mutex_lock(&p->mutex);
842 if (!p->pending_job) {
843 p->pending_job++;
844 next_channel = (i + 1) % migrate_multifd_channels();
845 break;
846 }
847 qemu_mutex_unlock(&p->mutex);
848 }
849 p->pages->used = 0;
850
851 p->packet_num = multifd_send_state->packet_num++;
852 p->pages->block = NULL;
853 multifd_send_state->pages = p->pages;
854 p->pages = pages;
4fcefd44 855 transferred = ((uint64_t) pages->used) * TARGET_PAGE_SIZE + p->packet_len;
b9ee2f7d
JQ
856 ram_counters.multifd_bytes += transferred;
857 ram_counters.transferred += transferred;;
858 qemu_mutex_unlock(&p->mutex);
859 qemu_sem_post(&p->sem);
860}
861
862static void multifd_queue_page(RAMBlock *block, ram_addr_t offset)
863{
864 MultiFDPages_t *pages = multifd_send_state->pages;
865
866 if (!pages->block) {
867 pages->block = block;
868 }
869
870 if (pages->block == block) {
871 pages->offset[pages->used] = offset;
872 pages->iov[pages->used].iov_base = block->host + offset;
873 pages->iov[pages->used].iov_len = TARGET_PAGE_SIZE;
874 pages->used++;
875
876 if (pages->used < pages->allocated) {
877 return;
878 }
879 }
880
881 multifd_send_pages();
882
883 if (pages->block != block) {
884 multifd_queue_page(block, offset);
885 }
886}
887
66770707 888static void multifd_send_terminate_threads(Error *err)
f986c3d2
JQ
889{
890 int i;
891
7a169d74
JQ
892 if (err) {
893 MigrationState *s = migrate_get_current();
894 migrate_set_error(s, err);
895 if (s->state == MIGRATION_STATUS_SETUP ||
896 s->state == MIGRATION_STATUS_PRE_SWITCHOVER ||
897 s->state == MIGRATION_STATUS_DEVICE ||
898 s->state == MIGRATION_STATUS_ACTIVE) {
899 migrate_set_state(&s->state, s->state,
900 MIGRATION_STATUS_FAILED);
901 }
902 }
903
66770707 904 for (i = 0; i < migrate_multifd_channels(); i++) {
f986c3d2
JQ
905 MultiFDSendParams *p = &multifd_send_state->params[i];
906
907 qemu_mutex_lock(&p->mutex);
908 p->quit = true;
909 qemu_sem_post(&p->sem);
910 qemu_mutex_unlock(&p->mutex);
911 }
912}
913
914int multifd_save_cleanup(Error **errp)
915{
916 int i;
917 int ret = 0;
918
919 if (!migrate_use_multifd()) {
920 return 0;
921 }
66770707
JQ
922 multifd_send_terminate_threads(NULL);
923 for (i = 0; i < migrate_multifd_channels(); i++) {
f986c3d2
JQ
924 MultiFDSendParams *p = &multifd_send_state->params[i];
925
66770707
JQ
926 if (p->running) {
927 qemu_thread_join(&p->thread);
928 }
60df2d4a
JQ
929 socket_send_channel_destroy(p->c);
930 p->c = NULL;
f986c3d2
JQ
931 qemu_mutex_destroy(&p->mutex);
932 qemu_sem_destroy(&p->sem);
6df264ac 933 qemu_sem_destroy(&p->sem_sync);
f986c3d2
JQ
934 g_free(p->name);
935 p->name = NULL;
34c55a94
JQ
936 multifd_pages_clear(p->pages);
937 p->pages = NULL;
2a26c979
JQ
938 p->packet_len = 0;
939 g_free(p->packet);
940 p->packet = NULL;
f986c3d2 941 }
b9ee2f7d 942 qemu_sem_destroy(&multifd_send_state->channels_ready);
6df264ac 943 qemu_sem_destroy(&multifd_send_state->sem_sync);
f986c3d2
JQ
944 g_free(multifd_send_state->params);
945 multifd_send_state->params = NULL;
34c55a94
JQ
946 multifd_pages_clear(multifd_send_state->pages);
947 multifd_send_state->pages = NULL;
f986c3d2
JQ
948 g_free(multifd_send_state);
949 multifd_send_state = NULL;
950 return ret;
951}
952
6df264ac
JQ
953static void multifd_send_sync_main(void)
954{
955 int i;
956
957 if (!migrate_use_multifd()) {
958 return;
959 }
b9ee2f7d
JQ
960 if (multifd_send_state->pages->used) {
961 multifd_send_pages();
962 }
6df264ac
JQ
963 for (i = 0; i < migrate_multifd_channels(); i++) {
964 MultiFDSendParams *p = &multifd_send_state->params[i];
965
966 trace_multifd_send_sync_main_signal(p->id);
967
968 qemu_mutex_lock(&p->mutex);
b9ee2f7d
JQ
969
970 p->packet_num = multifd_send_state->packet_num++;
6df264ac
JQ
971 p->flags |= MULTIFD_FLAG_SYNC;
972 p->pending_job++;
973 qemu_mutex_unlock(&p->mutex);
974 qemu_sem_post(&p->sem);
975 }
976 for (i = 0; i < migrate_multifd_channels(); i++) {
977 MultiFDSendParams *p = &multifd_send_state->params[i];
978
979 trace_multifd_send_sync_main_wait(p->id);
980 qemu_sem_wait(&multifd_send_state->sem_sync);
981 }
982 trace_multifd_send_sync_main(multifd_send_state->packet_num);
983}
984
f986c3d2
JQ
985static void *multifd_send_thread(void *opaque)
986{
987 MultiFDSendParams *p = opaque;
af8b7d2b 988 Error *local_err = NULL;
8b2db7f5 989 int ret;
af8b7d2b 990
408ea6ae 991 trace_multifd_send_thread_start(p->id);
74637e6f 992 rcu_register_thread();
408ea6ae 993
af8b7d2b
JQ
994 if (multifd_send_initial_packet(p, &local_err) < 0) {
995 goto out;
996 }
408ea6ae
JQ
997 /* initial packet */
998 p->num_packets = 1;
f986c3d2
JQ
999
1000 while (true) {
d82628e4 1001 qemu_sem_wait(&p->sem);
f986c3d2 1002 qemu_mutex_lock(&p->mutex);
0beb5ed3
JQ
1003
1004 if (p->pending_job) {
1005 uint32_t used = p->pages->used;
1006 uint64_t packet_num = p->packet_num;
1007 uint32_t flags = p->flags;
1008
1009 multifd_send_fill_packet(p);
1010 p->flags = 0;
1011 p->num_packets++;
1012 p->num_pages += used;
1013 p->pages->used = 0;
1014 qemu_mutex_unlock(&p->mutex);
1015
1016 trace_multifd_send(p->id, packet_num, used, flags);
1017
8b2db7f5
JQ
1018 ret = qio_channel_write_all(p->c, (void *)p->packet,
1019 p->packet_len, &local_err);
1020 if (ret != 0) {
1021 break;
1022 }
1023
1024 ret = qio_channel_writev_all(p->c, p->pages->iov, used, &local_err);
1025 if (ret != 0) {
1026 break;
1027 }
0beb5ed3
JQ
1028
1029 qemu_mutex_lock(&p->mutex);
1030 p->pending_job--;
1031 qemu_mutex_unlock(&p->mutex);
6df264ac
JQ
1032
1033 if (flags & MULTIFD_FLAG_SYNC) {
1034 qemu_sem_post(&multifd_send_state->sem_sync);
1035 }
b9ee2f7d 1036 qemu_sem_post(&multifd_send_state->channels_ready);
0beb5ed3 1037 } else if (p->quit) {
f986c3d2
JQ
1038 qemu_mutex_unlock(&p->mutex);
1039 break;
6df264ac
JQ
1040 } else {
1041 qemu_mutex_unlock(&p->mutex);
1042 /* sometimes there are spurious wakeups */
f986c3d2 1043 }
f986c3d2
JQ
1044 }
1045
af8b7d2b
JQ
1046out:
1047 if (local_err) {
1048 multifd_send_terminate_threads(local_err);
1049 }
1050
66770707
JQ
1051 qemu_mutex_lock(&p->mutex);
1052 p->running = false;
1053 qemu_mutex_unlock(&p->mutex);
1054
74637e6f 1055 rcu_unregister_thread();
408ea6ae
JQ
1056 trace_multifd_send_thread_end(p->id, p->num_packets, p->num_pages);
1057
f986c3d2
JQ
1058 return NULL;
1059}
1060
60df2d4a
JQ
1061static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
1062{
1063 MultiFDSendParams *p = opaque;
1064 QIOChannel *sioc = QIO_CHANNEL(qio_task_get_source(task));
1065 Error *local_err = NULL;
1066
1067 if (qio_task_propagate_error(task, &local_err)) {
1068 if (multifd_save_cleanup(&local_err) != 0) {
1069 migrate_set_error(migrate_get_current(), local_err);
1070 }
1071 } else {
1072 p->c = QIO_CHANNEL(sioc);
1073 qio_channel_set_delay(p->c, false);
1074 p->running = true;
1075 qemu_thread_create(&p->thread, p->name, multifd_send_thread, p,
1076 QEMU_THREAD_JOINABLE);
1077
1078 atomic_inc(&multifd_send_state->count);
1079 }
1080}
1081
f986c3d2
JQ
1082int multifd_save_setup(void)
1083{
1084 int thread_count;
34c55a94 1085 uint32_t page_count = migrate_multifd_page_count();
f986c3d2
JQ
1086 uint8_t i;
1087
1088 if (!migrate_use_multifd()) {
1089 return 0;
1090 }
1091 thread_count = migrate_multifd_channels();
1092 multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
1093 multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
66770707 1094 atomic_set(&multifd_send_state->count, 0);
34c55a94 1095 multifd_send_state->pages = multifd_pages_init(page_count);
6df264ac 1096 qemu_sem_init(&multifd_send_state->sem_sync, 0);
b9ee2f7d 1097 qemu_sem_init(&multifd_send_state->channels_ready, 0);
34c55a94 1098
f986c3d2
JQ
1099 for (i = 0; i < thread_count; i++) {
1100 MultiFDSendParams *p = &multifd_send_state->params[i];
1101
1102 qemu_mutex_init(&p->mutex);
1103 qemu_sem_init(&p->sem, 0);
6df264ac 1104 qemu_sem_init(&p->sem_sync, 0);
f986c3d2 1105 p->quit = false;
0beb5ed3 1106 p->pending_job = 0;
f986c3d2 1107 p->id = i;
34c55a94 1108 p->pages = multifd_pages_init(page_count);
2a26c979
JQ
1109 p->packet_len = sizeof(MultiFDPacket_t)
1110 + sizeof(ram_addr_t) * page_count;
1111 p->packet = g_malloc0(p->packet_len);
f986c3d2 1112 p->name = g_strdup_printf("multifdsend_%d", i);
60df2d4a 1113 socket_send_channel_create(multifd_new_send_channel_async, p);
f986c3d2
JQ
1114 }
1115 return 0;
1116}
1117
f986c3d2
JQ
1118struct {
1119 MultiFDRecvParams *params;
1120 /* number of created threads */
1121 int count;
6df264ac
JQ
1122 /* syncs main thread and channels */
1123 QemuSemaphore sem_sync;
1124 /* global number of generated multifd packets */
1125 uint64_t packet_num;
f986c3d2
JQ
1126} *multifd_recv_state;
1127
66770707 1128static void multifd_recv_terminate_threads(Error *err)
f986c3d2
JQ
1129{
1130 int i;
1131
7a169d74
JQ
1132 if (err) {
1133 MigrationState *s = migrate_get_current();
1134 migrate_set_error(s, err);
1135 if (s->state == MIGRATION_STATUS_SETUP ||
1136 s->state == MIGRATION_STATUS_ACTIVE) {
1137 migrate_set_state(&s->state, s->state,
1138 MIGRATION_STATUS_FAILED);
1139 }
1140 }
1141
66770707 1142 for (i = 0; i < migrate_multifd_channels(); i++) {
f986c3d2
JQ
1143 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1144
1145 qemu_mutex_lock(&p->mutex);
7a5cc33c
JQ
1146 /* We could arrive here for two reasons:
1147 - normal quit, i.e. everything went fine, just finished
1148 - error quit: We close the channels so the channel threads
1149 finish the qio_channel_read_all_eof() */
1150 qio_channel_shutdown(p->c, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
f986c3d2
JQ
1151 qemu_mutex_unlock(&p->mutex);
1152 }
1153}
1154
1155int multifd_load_cleanup(Error **errp)
1156{
1157 int i;
1158 int ret = 0;
1159
1160 if (!migrate_use_multifd()) {
1161 return 0;
1162 }
66770707
JQ
1163 multifd_recv_terminate_threads(NULL);
1164 for (i = 0; i < migrate_multifd_channels(); i++) {
f986c3d2
JQ
1165 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1166
66770707
JQ
1167 if (p->running) {
1168 qemu_thread_join(&p->thread);
1169 }
60df2d4a
JQ
1170 object_unref(OBJECT(p->c));
1171 p->c = NULL;
f986c3d2 1172 qemu_mutex_destroy(&p->mutex);
6df264ac 1173 qemu_sem_destroy(&p->sem_sync);
f986c3d2
JQ
1174 g_free(p->name);
1175 p->name = NULL;
34c55a94
JQ
1176 multifd_pages_clear(p->pages);
1177 p->pages = NULL;
2a26c979
JQ
1178 p->packet_len = 0;
1179 g_free(p->packet);
1180 p->packet = NULL;
f986c3d2 1181 }
6df264ac 1182 qemu_sem_destroy(&multifd_recv_state->sem_sync);
f986c3d2
JQ
1183 g_free(multifd_recv_state->params);
1184 multifd_recv_state->params = NULL;
1185 g_free(multifd_recv_state);
1186 multifd_recv_state = NULL;
1187
1188 return ret;
1189}
1190
6df264ac
JQ
1191static void multifd_recv_sync_main(void)
1192{
1193 int i;
1194
1195 if (!migrate_use_multifd()) {
1196 return;
1197 }
1198 for (i = 0; i < migrate_multifd_channels(); i++) {
1199 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1200
6df264ac
JQ
1201 trace_multifd_recv_sync_main_wait(p->id);
1202 qemu_sem_wait(&multifd_recv_state->sem_sync);
1203 qemu_mutex_lock(&p->mutex);
1204 if (multifd_recv_state->packet_num < p->packet_num) {
1205 multifd_recv_state->packet_num = p->packet_num;
1206 }
1207 qemu_mutex_unlock(&p->mutex);
1208 }
1209 for (i = 0; i < migrate_multifd_channels(); i++) {
1210 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1211
1212 trace_multifd_recv_sync_main_signal(p->id);
6df264ac
JQ
1213 qemu_sem_post(&p->sem_sync);
1214 }
1215 trace_multifd_recv_sync_main(multifd_recv_state->packet_num);
1216}
1217
f986c3d2
JQ
1218static void *multifd_recv_thread(void *opaque)
1219{
1220 MultiFDRecvParams *p = opaque;
2a26c979
JQ
1221 Error *local_err = NULL;
1222 int ret;
f986c3d2 1223
408ea6ae 1224 trace_multifd_recv_thread_start(p->id);
74637e6f 1225 rcu_register_thread();
408ea6ae 1226
f986c3d2 1227 while (true) {
6df264ac
JQ
1228 uint32_t used;
1229 uint32_t flags;
0beb5ed3 1230
8b2db7f5
JQ
1231 ret = qio_channel_read_all_eof(p->c, (void *)p->packet,
1232 p->packet_len, &local_err);
1233 if (ret == 0) { /* EOF */
1234 break;
1235 }
1236 if (ret == -1) { /* Error */
1237 break;
1238 }
2a26c979 1239
6df264ac
JQ
1240 qemu_mutex_lock(&p->mutex);
1241 ret = multifd_recv_unfill_packet(p, &local_err);
1242 if (ret) {
f986c3d2
JQ
1243 qemu_mutex_unlock(&p->mutex);
1244 break;
1245 }
6df264ac
JQ
1246
1247 used = p->pages->used;
1248 flags = p->flags;
1249 trace_multifd_recv(p->id, p->packet_num, used, flags);
6df264ac
JQ
1250 p->num_packets++;
1251 p->num_pages += used;
f986c3d2 1252 qemu_mutex_unlock(&p->mutex);
6df264ac 1253
8b2db7f5
JQ
1254 ret = qio_channel_readv_all(p->c, p->pages->iov, used, &local_err);
1255 if (ret != 0) {
1256 break;
1257 }
1258
6df264ac
JQ
1259 if (flags & MULTIFD_FLAG_SYNC) {
1260 qemu_sem_post(&multifd_recv_state->sem_sync);
1261 qemu_sem_wait(&p->sem_sync);
1262 }
f986c3d2
JQ
1263 }
1264
d82628e4
JQ
1265 if (local_err) {
1266 multifd_recv_terminate_threads(local_err);
1267 }
66770707
JQ
1268 qemu_mutex_lock(&p->mutex);
1269 p->running = false;
1270 qemu_mutex_unlock(&p->mutex);
1271
74637e6f 1272 rcu_unregister_thread();
408ea6ae
JQ
1273 trace_multifd_recv_thread_end(p->id, p->num_packets, p->num_pages);
1274
f986c3d2
JQ
1275 return NULL;
1276}
1277
1278int multifd_load_setup(void)
1279{
1280 int thread_count;
34c55a94 1281 uint32_t page_count = migrate_multifd_page_count();
f986c3d2
JQ
1282 uint8_t i;
1283
1284 if (!migrate_use_multifd()) {
1285 return 0;
1286 }
1287 thread_count = migrate_multifd_channels();
1288 multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));
1289 multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);
66770707 1290 atomic_set(&multifd_recv_state->count, 0);
6df264ac 1291 qemu_sem_init(&multifd_recv_state->sem_sync, 0);
34c55a94 1292
f986c3d2
JQ
1293 for (i = 0; i < thread_count; i++) {
1294 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1295
1296 qemu_mutex_init(&p->mutex);
6df264ac 1297 qemu_sem_init(&p->sem_sync, 0);
f986c3d2 1298 p->id = i;
34c55a94 1299 p->pages = multifd_pages_init(page_count);
2a26c979
JQ
1300 p->packet_len = sizeof(MultiFDPacket_t)
1301 + sizeof(ram_addr_t) * page_count;
1302 p->packet = g_malloc0(p->packet_len);
f986c3d2 1303 p->name = g_strdup_printf("multifdrecv_%d", i);
f986c3d2
JQ
1304 }
1305 return 0;
1306}
1307
62c1e0ca
JQ
1308bool multifd_recv_all_channels_created(void)
1309{
1310 int thread_count = migrate_multifd_channels();
1311
1312 if (!migrate_use_multifd()) {
1313 return true;
1314 }
1315
1316 return thread_count == atomic_read(&multifd_recv_state->count);
1317}
1318
81e62053
PX
1319/* Return true if multifd is ready for the migration, otherwise false */
1320bool multifd_recv_new_channel(QIOChannel *ioc)
71bb07db 1321{
60df2d4a 1322 MultiFDRecvParams *p;
af8b7d2b
JQ
1323 Error *local_err = NULL;
1324 int id;
60df2d4a 1325
af8b7d2b
JQ
1326 id = multifd_recv_initial_packet(ioc, &local_err);
1327 if (id < 0) {
1328 multifd_recv_terminate_threads(local_err);
81e62053 1329 return false;
af8b7d2b
JQ
1330 }
1331
1332 p = &multifd_recv_state->params[id];
1333 if (p->c != NULL) {
1334 error_setg(&local_err, "multifd: received id '%d' already setup'",
1335 id);
1336 multifd_recv_terminate_threads(local_err);
81e62053 1337 return false;
af8b7d2b 1338 }
60df2d4a
JQ
1339 p->c = ioc;
1340 object_ref(OBJECT(ioc));
408ea6ae
JQ
1341 /* initial packet */
1342 p->num_packets = 1;
60df2d4a
JQ
1343
1344 p->running = true;
1345 qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,
1346 QEMU_THREAD_JOINABLE);
1347 atomic_inc(&multifd_recv_state->count);
81e62053 1348 return multifd_recv_state->count == migrate_multifd_channels();
71bb07db
JQ
1349}
1350
56e93d26 1351/**
3d0684b2 1352 * save_page_header: write page header to wire
56e93d26
JQ
1353 *
1354 * If this is the 1st block, it also writes the block identification
1355 *
3d0684b2 1356 * Returns the number of bytes written
56e93d26
JQ
1357 *
1358 * @f: QEMUFile where to send the data
1359 * @block: block that contains the page we want to send
1360 * @offset: offset inside the block for the page
1361 * in the lower bits, it contains flags
1362 */
2bf3aa85
JQ
1363static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
1364 ram_addr_t offset)
56e93d26 1365{
9f5f380b 1366 size_t size, len;
56e93d26 1367
24795694
JQ
1368 if (block == rs->last_sent_block) {
1369 offset |= RAM_SAVE_FLAG_CONTINUE;
1370 }
2bf3aa85 1371 qemu_put_be64(f, offset);
56e93d26
JQ
1372 size = 8;
1373
1374 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
9f5f380b 1375 len = strlen(block->idstr);
2bf3aa85
JQ
1376 qemu_put_byte(f, len);
1377 qemu_put_buffer(f, (uint8_t *)block->idstr, len);
9f5f380b 1378 size += 1 + len;
24795694 1379 rs->last_sent_block = block;
56e93d26
JQ
1380 }
1381 return size;
1382}
1383
3d0684b2
JQ
1384/**
1385 * mig_throttle_guest_down: throotle down the guest
1386 *
1387 * Reduce amount of guest cpu execution to hopefully slow down memory
1388 * writes. If guest dirty memory rate is reduced below the rate at
1389 * which we can transfer pages to the destination then we should be
1390 * able to complete migration. Some workloads dirty memory way too
1391 * fast and will not effectively converge, even with auto-converge.
070afca2
JH
1392 */
1393static void mig_throttle_guest_down(void)
1394{
1395 MigrationState *s = migrate_get_current();
2594f56d
DB
1396 uint64_t pct_initial = s->parameters.cpu_throttle_initial;
1397 uint64_t pct_icrement = s->parameters.cpu_throttle_increment;
4cbc9c7f 1398 int pct_max = s->parameters.max_cpu_throttle;
070afca2
JH
1399
1400 /* We have not started throttling yet. Let's start it. */
1401 if (!cpu_throttle_active()) {
1402 cpu_throttle_set(pct_initial);
1403 } else {
1404 /* Throttling already on, just increase the rate */
4cbc9c7f
LQ
1405 cpu_throttle_set(MIN(cpu_throttle_get_percentage() + pct_icrement,
1406 pct_max));
070afca2
JH
1407 }
1408}
1409
3d0684b2
JQ
1410/**
1411 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
1412 *
6f37bb8b 1413 * @rs: current RAM state
3d0684b2
JQ
1414 * @current_addr: address for the zero page
1415 *
1416 * Update the xbzrle cache to reflect a page that's been sent as all 0.
56e93d26
JQ
1417 * The important thing is that a stale (not-yet-0'd) page be replaced
1418 * by the new data.
1419 * As a bonus, if the page wasn't in the cache it gets added so that
3d0684b2 1420 * when a small write is made into the 0'd page it gets XBZRLE sent.
56e93d26 1421 */
6f37bb8b 1422static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
56e93d26 1423{
6f37bb8b 1424 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
56e93d26
JQ
1425 return;
1426 }
1427
1428 /* We don't care if this fails to allocate a new cache page
1429 * as long as it updated an old one */
c00e0928 1430 cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
9360447d 1431 ram_counters.dirty_sync_count);
56e93d26
JQ
1432}
1433
1434#define ENCODING_FLAG_XBZRLE 0x1
1435
1436/**
1437 * save_xbzrle_page: compress and send current page
1438 *
1439 * Returns: 1 means that we wrote the page
1440 * 0 means that page is identical to the one already sent
1441 * -1 means that xbzrle would be longer than normal
1442 *
5a987738 1443 * @rs: current RAM state
3d0684b2
JQ
1444 * @current_data: pointer to the address of the page contents
1445 * @current_addr: addr of the page
56e93d26
JQ
1446 * @block: block that contains the page we want to send
1447 * @offset: offset inside the block for the page
1448 * @last_stage: if we are at the completion stage
56e93d26 1449 */
204b88b8 1450static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
56e93d26 1451 ram_addr_t current_addr, RAMBlock *block,
072c2511 1452 ram_addr_t offset, bool last_stage)
56e93d26
JQ
1453{
1454 int encoded_len = 0, bytes_xbzrle;
1455 uint8_t *prev_cached_page;
1456
9360447d
JQ
1457 if (!cache_is_cached(XBZRLE.cache, current_addr,
1458 ram_counters.dirty_sync_count)) {
1459 xbzrle_counters.cache_miss++;
56e93d26
JQ
1460 if (!last_stage) {
1461 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
9360447d 1462 ram_counters.dirty_sync_count) == -1) {
56e93d26
JQ
1463 return -1;
1464 } else {
1465 /* update *current_data when the page has been
1466 inserted into cache */
1467 *current_data = get_cached_data(XBZRLE.cache, current_addr);
1468 }
1469 }
1470 return -1;
1471 }
1472
1473 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
1474
1475 /* save current buffer into memory */
1476 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
1477
1478 /* XBZRLE encoding (if there is no overflow) */
1479 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
1480 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
1481 TARGET_PAGE_SIZE);
1482 if (encoded_len == 0) {
55c4446b 1483 trace_save_xbzrle_page_skipping();
56e93d26
JQ
1484 return 0;
1485 } else if (encoded_len == -1) {
55c4446b 1486 trace_save_xbzrle_page_overflow();
9360447d 1487 xbzrle_counters.overflow++;
56e93d26
JQ
1488 /* update data in the cache */
1489 if (!last_stage) {
1490 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
1491 *current_data = prev_cached_page;
1492 }
1493 return -1;
1494 }
1495
1496 /* we need to update the data in the cache, in order to get the same data */
1497 if (!last_stage) {
1498 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
1499 }
1500
1501 /* Send XBZRLE based compressed page */
2bf3aa85 1502 bytes_xbzrle = save_page_header(rs, rs->f, block,
204b88b8
JQ
1503 offset | RAM_SAVE_FLAG_XBZRLE);
1504 qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
1505 qemu_put_be16(rs->f, encoded_len);
1506 qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
56e93d26 1507 bytes_xbzrle += encoded_len + 1 + 2;
9360447d
JQ
1508 xbzrle_counters.pages++;
1509 xbzrle_counters.bytes += bytes_xbzrle;
1510 ram_counters.transferred += bytes_xbzrle;
56e93d26
JQ
1511
1512 return 1;
1513}
1514
3d0684b2
JQ
1515/**
1516 * migration_bitmap_find_dirty: find the next dirty page from start
f3f491fc 1517 *
3d0684b2
JQ
1518 * Called with rcu_read_lock() to protect migration_bitmap
1519 *
1520 * Returns the byte offset within memory region of the start of a dirty page
1521 *
6f37bb8b 1522 * @rs: current RAM state
3d0684b2 1523 * @rb: RAMBlock where to search for dirty pages
a935e30f 1524 * @start: page where we start the search
f3f491fc 1525 */
56e93d26 1526static inline
a935e30f 1527unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
f20e2865 1528 unsigned long start)
56e93d26 1529{
6b6712ef
JQ
1530 unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
1531 unsigned long *bitmap = rb->bmap;
56e93d26
JQ
1532 unsigned long next;
1533
b895de50
CLG
1534 if (!qemu_ram_is_migratable(rb)) {
1535 return size;
1536 }
1537
6b6712ef
JQ
1538 if (rs->ram_bulk_stage && start > 0) {
1539 next = start + 1;
56e93d26 1540 } else {
6b6712ef 1541 next = find_next_bit(bitmap, size, start);
56e93d26
JQ
1542 }
1543
6b6712ef 1544 return next;
56e93d26
JQ
1545}
1546
06b10688 1547static inline bool migration_bitmap_clear_dirty(RAMState *rs,
f20e2865
JQ
1548 RAMBlock *rb,
1549 unsigned long page)
a82d593b
DDAG
1550{
1551 bool ret;
a82d593b 1552
6b6712ef 1553 ret = test_and_clear_bit(page, rb->bmap);
a82d593b
DDAG
1554
1555 if (ret) {
0d8ec885 1556 rs->migration_dirty_pages--;
a82d593b
DDAG
1557 }
1558 return ret;
1559}
1560
15440dd5
JQ
1561static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
1562 ram_addr_t start, ram_addr_t length)
56e93d26 1563{
0d8ec885 1564 rs->migration_dirty_pages +=
6b6712ef 1565 cpu_physical_memory_sync_dirty_bitmap(rb, start, length,
0d8ec885 1566 &rs->num_dirty_pages_period);
56e93d26
JQ
1567}
1568
3d0684b2
JQ
1569/**
1570 * ram_pagesize_summary: calculate all the pagesizes of a VM
1571 *
1572 * Returns a summary bitmap of the page sizes of all RAMBlocks
1573 *
1574 * For VMs with just normal pages this is equivalent to the host page
1575 * size. If it's got some huge pages then it's the OR of all the
1576 * different page sizes.
e8ca1db2
DDAG
1577 */
1578uint64_t ram_pagesize_summary(void)
1579{
1580 RAMBlock *block;
1581 uint64_t summary = 0;
1582
b895de50 1583 RAMBLOCK_FOREACH_MIGRATABLE(block) {
e8ca1db2
DDAG
1584 summary |= block->page_size;
1585 }
1586
1587 return summary;
1588}
1589
b734035b
XG
1590static void migration_update_rates(RAMState *rs, int64_t end_time)
1591{
1592 uint64_t iter_count = rs->iterations - rs->iterations_prev;
1593
1594 /* calculate period counters */
1595 ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
1596 / (end_time - rs->time_last_bitmap_sync);
1597
1598 if (!iter_count) {
1599 return;
1600 }
1601
1602 if (migrate_use_xbzrle()) {
1603 xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
1604 rs->xbzrle_cache_miss_prev) / iter_count;
1605 rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
1606 }
1607}
1608
8d820d6f 1609static void migration_bitmap_sync(RAMState *rs)
56e93d26
JQ
1610{
1611 RAMBlock *block;
56e93d26 1612 int64_t end_time;
c4bdf0cf 1613 uint64_t bytes_xfer_now;
56e93d26 1614
9360447d 1615 ram_counters.dirty_sync_count++;
56e93d26 1616
f664da80
JQ
1617 if (!rs->time_last_bitmap_sync) {
1618 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
56e93d26
JQ
1619 }
1620
1621 trace_migration_bitmap_sync_start();
9c1f8f44 1622 memory_global_dirty_log_sync();
56e93d26 1623
108cfae0 1624 qemu_mutex_lock(&rs->bitmap_mutex);
56e93d26 1625 rcu_read_lock();
b895de50 1626 RAMBLOCK_FOREACH_MIGRATABLE(block) {
15440dd5 1627 migration_bitmap_sync_range(rs, block, 0, block->used_length);
56e93d26 1628 }
650af890 1629 ram_counters.remaining = ram_bytes_remaining();
56e93d26 1630 rcu_read_unlock();
108cfae0 1631 qemu_mutex_unlock(&rs->bitmap_mutex);
56e93d26 1632
a66cd90c 1633 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
1ffb5dfd 1634
56e93d26
JQ
1635 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1636
1637 /* more than 1 second = 1000 millisecons */
f664da80 1638 if (end_time > rs->time_last_bitmap_sync + 1000) {
9360447d 1639 bytes_xfer_now = ram_counters.transferred;
d693c6f1 1640
9ac78b61
PL
1641 /* During block migration the auto-converge logic incorrectly detects
1642 * that ram migration makes no progress. Avoid this by disabling the
1643 * throttling logic during the bulk phase of block migration. */
1644 if (migrate_auto_converge() && !blk_mig_bulk_active()) {
56e93d26
JQ
1645 /* The following detection logic can be refined later. For now:
1646 Check to see if the dirtied bytes is 50% more than the approx.
1647 amount of bytes that just got transferred since the last time we
070afca2
JH
1648 were in this routine. If that happens twice, start or increase
1649 throttling */
070afca2 1650
d693c6f1 1651 if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
eac74159 1652 (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
b4a3c64b 1653 (++rs->dirty_rate_high_cnt >= 2)) {
56e93d26 1654 trace_migration_throttle();
8d820d6f 1655 rs->dirty_rate_high_cnt = 0;
070afca2 1656 mig_throttle_guest_down();
d693c6f1 1657 }
56e93d26 1658 }
070afca2 1659
b734035b
XG
1660 migration_update_rates(rs, end_time);
1661
1662 rs->iterations_prev = rs->iterations;
d693c6f1
FF
1663
1664 /* reset period counters */
f664da80 1665 rs->time_last_bitmap_sync = end_time;
a66cd90c 1666 rs->num_dirty_pages_period = 0;
d2a4d85a 1667 rs->bytes_xfer_prev = bytes_xfer_now;
56e93d26 1668 }
4addcd4f 1669 if (migrate_use_events()) {
9360447d 1670 qapi_event_send_migration_pass(ram_counters.dirty_sync_count, NULL);
4addcd4f 1671 }
56e93d26
JQ
1672}
1673
6c97ec5f
XG
1674/**
1675 * save_zero_page_to_file: send the zero page to the file
1676 *
1677 * Returns the size of data written to the file, 0 means the page is not
1678 * a zero page
1679 *
1680 * @rs: current RAM state
1681 * @file: the file where the data is saved
1682 * @block: block that contains the page we want to send
1683 * @offset: offset inside the block for the page
1684 */
1685static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
1686 RAMBlock *block, ram_addr_t offset)
1687{
1688 uint8_t *p = block->host + offset;
1689 int len = 0;
1690
1691 if (is_zero_range(p, TARGET_PAGE_SIZE)) {
1692 len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO);
1693 qemu_put_byte(file, 0);
1694 len += 1;
1695 }
1696 return len;
1697}
1698
56e93d26 1699/**
3d0684b2 1700 * save_zero_page: send the zero page to the stream
56e93d26 1701 *
3d0684b2 1702 * Returns the number of pages written.
56e93d26 1703 *
f7ccd61b 1704 * @rs: current RAM state
56e93d26
JQ
1705 * @block: block that contains the page we want to send
1706 * @offset: offset inside the block for the page
56e93d26 1707 */
7faccdc3 1708static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
56e93d26 1709{
6c97ec5f 1710 int len = save_zero_page_to_file(rs, rs->f, block, offset);
56e93d26 1711
6c97ec5f 1712 if (len) {
9360447d 1713 ram_counters.duplicate++;
6c97ec5f
XG
1714 ram_counters.transferred += len;
1715 return 1;
56e93d26 1716 }
6c97ec5f 1717 return -1;
56e93d26
JQ
1718}
1719
5727309d 1720static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
53f09a10 1721{
5727309d 1722 if (!migrate_release_ram() || !migration_in_postcopy()) {
53f09a10
PB
1723 return;
1724 }
1725
aaa2064c 1726 ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS);
53f09a10
PB
1727}
1728
059ff0fb
XG
1729/*
1730 * @pages: the number of pages written by the control path,
1731 * < 0 - error
1732 * > 0 - number of pages written
1733 *
1734 * Return true if the pages has been saved, otherwise false is returned.
1735 */
1736static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1737 int *pages)
1738{
1739 uint64_t bytes_xmit = 0;
1740 int ret;
1741
1742 *pages = -1;
1743 ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE,
1744 &bytes_xmit);
1745 if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
1746 return false;
1747 }
1748
1749 if (bytes_xmit) {
1750 ram_counters.transferred += bytes_xmit;
1751 *pages = 1;
1752 }
1753
1754 if (ret == RAM_SAVE_CONTROL_DELAYED) {
1755 return true;
1756 }
1757
1758 if (bytes_xmit > 0) {
1759 ram_counters.normal++;
1760 } else if (bytes_xmit == 0) {
1761 ram_counters.duplicate++;
1762 }
1763
1764 return true;
1765}
1766
65dacaa0
XG
1767/*
1768 * directly send the page to the stream
1769 *
1770 * Returns the number of pages written.
1771 *
1772 * @rs: current RAM state
1773 * @block: block that contains the page we want to send
1774 * @offset: offset inside the block for the page
1775 * @buf: the page to be sent
1776 * @async: send to page asyncly
1777 */
1778static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1779 uint8_t *buf, bool async)
1780{
1781 ram_counters.transferred += save_page_header(rs, rs->f, block,
1782 offset | RAM_SAVE_FLAG_PAGE);
1783 if (async) {
1784 qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE,
1785 migrate_release_ram() &
1786 migration_in_postcopy());
1787 } else {
1788 qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
1789 }
1790 ram_counters.transferred += TARGET_PAGE_SIZE;
1791 ram_counters.normal++;
1792 return 1;
1793}
1794
56e93d26 1795/**
3d0684b2 1796 * ram_save_page: send the given page to the stream
56e93d26 1797 *
3d0684b2 1798 * Returns the number of pages written.
3fd3c4b3
DDAG
1799 * < 0 - error
1800 * >=0 - Number of pages written - this might legally be 0
1801 * if xbzrle noticed the page was the same.
56e93d26 1802 *
6f37bb8b 1803 * @rs: current RAM state
56e93d26
JQ
1804 * @block: block that contains the page we want to send
1805 * @offset: offset inside the block for the page
1806 * @last_stage: if we are at the completion stage
56e93d26 1807 */
a0a8aa14 1808static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
56e93d26
JQ
1809{
1810 int pages = -1;
56e93d26 1811 uint8_t *p;
56e93d26 1812 bool send_async = true;
a08f6890 1813 RAMBlock *block = pss->block;
a935e30f 1814 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
059ff0fb 1815 ram_addr_t current_addr = block->offset + offset;
56e93d26 1816
2f68e399 1817 p = block->host + offset;
1db9d8e5 1818 trace_ram_save_page(block->idstr, (uint64_t)offset, p);
56e93d26 1819
56e93d26 1820 XBZRLE_cache_lock();
d7400a34
XG
1821 if (!rs->ram_bulk_stage && !migration_in_postcopy() &&
1822 migrate_use_xbzrle()) {
059ff0fb
XG
1823 pages = save_xbzrle_page(rs, &p, current_addr, block,
1824 offset, last_stage);
1825 if (!last_stage) {
1826 /* Can't send this cached data async, since the cache page
1827 * might get updated before it gets to the wire
56e93d26 1828 */
059ff0fb 1829 send_async = false;
56e93d26
JQ
1830 }
1831 }
1832
1833 /* XBZRLE overflow or normal page */
1834 if (pages == -1) {
65dacaa0 1835 pages = save_normal_page(rs, block, offset, p, send_async);
56e93d26
JQ
1836 }
1837
1838 XBZRLE_cache_unlock();
1839
1840 return pages;
1841}
1842
b9ee2f7d
JQ
1843static int ram_save_multifd_page(RAMState *rs, RAMBlock *block,
1844 ram_addr_t offset)
1845{
b9ee2f7d 1846 multifd_queue_page(block, offset);
b9ee2f7d
JQ
1847 ram_counters.normal++;
1848
1849 return 1;
1850}
1851
6ef3771c
XG
1852static void do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
1853 ram_addr_t offset, uint8_t *source_buf)
56e93d26 1854{
53518d94 1855 RAMState *rs = ram_state;
a7a9a88f 1856 uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
6ef3771c 1857 int ret;
56e93d26 1858
6ef3771c 1859 save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
34ab9e97
XG
1860
1861 /*
1862 * copy it to a internal buffer to avoid it being modified by VM
1863 * so that we can catch up the error during compression and
1864 * decompression
1865 */
1866 memcpy(source_buf, p, TARGET_PAGE_SIZE);
6ef3771c
XG
1867 ret = qemu_put_compression_data(f, stream, source_buf, TARGET_PAGE_SIZE);
1868 if (ret < 0) {
1869 qemu_file_set_error(migrate_get_current()->to_dst_file, ret);
b3be2896 1870 error_report("compressed data failed!");
6ef3771c 1871 return;
b3be2896 1872 }
56e93d26 1873
6ef3771c 1874 ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
56e93d26
JQ
1875}
1876
ce25d337 1877static void flush_compressed_data(RAMState *rs)
56e93d26
JQ
1878{
1879 int idx, len, thread_count;
1880
1881 if (!migrate_use_compression()) {
1882 return;
1883 }
1884 thread_count = migrate_compress_threads();
a7a9a88f 1885
0d9f9a5c 1886 qemu_mutex_lock(&comp_done_lock);
56e93d26 1887 for (idx = 0; idx < thread_count; idx++) {
a7a9a88f 1888 while (!comp_param[idx].done) {
0d9f9a5c 1889 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
56e93d26 1890 }
a7a9a88f 1891 }
0d9f9a5c 1892 qemu_mutex_unlock(&comp_done_lock);
a7a9a88f
LL
1893
1894 for (idx = 0; idx < thread_count; idx++) {
1895 qemu_mutex_lock(&comp_param[idx].mutex);
90e56fb4 1896 if (!comp_param[idx].quit) {
ce25d337 1897 len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
9360447d 1898 ram_counters.transferred += len;
56e93d26 1899 }
a7a9a88f 1900 qemu_mutex_unlock(&comp_param[idx].mutex);
56e93d26
JQ
1901 }
1902}
1903
1904static inline void set_compress_params(CompressParam *param, RAMBlock *block,
1905 ram_addr_t offset)
1906{
1907 param->block = block;
1908 param->offset = offset;
1909}
1910
ce25d337
JQ
1911static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
1912 ram_addr_t offset)
56e93d26
JQ
1913{
1914 int idx, thread_count, bytes_xmit = -1, pages = -1;
1d58872a 1915 bool wait = migrate_compress_wait_thread();
56e93d26
JQ
1916
1917 thread_count = migrate_compress_threads();
0d9f9a5c 1918 qemu_mutex_lock(&comp_done_lock);
1d58872a
XG
1919retry:
1920 for (idx = 0; idx < thread_count; idx++) {
1921 if (comp_param[idx].done) {
1922 comp_param[idx].done = false;
1923 bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
1924 qemu_mutex_lock(&comp_param[idx].mutex);
1925 set_compress_params(&comp_param[idx], block, offset);
1926 qemu_cond_signal(&comp_param[idx].cond);
1927 qemu_mutex_unlock(&comp_param[idx].mutex);
1928 pages = 1;
1d58872a 1929 ram_counters.transferred += bytes_xmit;
56e93d26 1930 break;
56e93d26
JQ
1931 }
1932 }
1d58872a
XG
1933
1934 /*
1935 * wait for the free thread if the user specifies 'compress-wait-thread',
1936 * otherwise we will post the page out in the main thread as normal page.
1937 */
1938 if (pages < 0 && wait) {
1939 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
1940 goto retry;
1941 }
0d9f9a5c 1942 qemu_mutex_unlock(&comp_done_lock);
56e93d26
JQ
1943
1944 return pages;
1945}
1946
3d0684b2
JQ
1947/**
1948 * find_dirty_block: find the next dirty page and update any state
1949 * associated with the search process.
b9e60928 1950 *
3d0684b2 1951 * Returns if a page is found
b9e60928 1952 *
6f37bb8b 1953 * @rs: current RAM state
3d0684b2
JQ
1954 * @pss: data about the state of the current dirty page scan
1955 * @again: set to false if the search has scanned the whole of RAM
b9e60928 1956 */
f20e2865 1957static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
b9e60928 1958{
f20e2865 1959 pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
6f37bb8b 1960 if (pss->complete_round && pss->block == rs->last_seen_block &&
a935e30f 1961 pss->page >= rs->last_page) {
b9e60928
DDAG
1962 /*
1963 * We've been once around the RAM and haven't found anything.
1964 * Give up.
1965 */
1966 *again = false;
1967 return false;
1968 }
a935e30f 1969 if ((pss->page << TARGET_PAGE_BITS) >= pss->block->used_length) {
b9e60928 1970 /* Didn't find anything in this RAM Block */
a935e30f 1971 pss->page = 0;
b9e60928
DDAG
1972 pss->block = QLIST_NEXT_RCU(pss->block, next);
1973 if (!pss->block) {
1974 /* Hit the end of the list */
1975 pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
1976 /* Flag that we've looped */
1977 pss->complete_round = true;
6f37bb8b 1978 rs->ram_bulk_stage = false;
b9e60928
DDAG
1979 if (migrate_use_xbzrle()) {
1980 /* If xbzrle is on, stop using the data compression at this
1981 * point. In theory, xbzrle can do better than compression.
1982 */
ce25d337 1983 flush_compressed_data(rs);
b9e60928
DDAG
1984 }
1985 }
1986 /* Didn't find anything this time, but try again on the new block */
1987 *again = true;
1988 return false;
1989 } else {
1990 /* Can go around again, but... */
1991 *again = true;
1992 /* We've found something so probably don't need to */
1993 return true;
1994 }
1995}
1996
3d0684b2
JQ
1997/**
1998 * unqueue_page: gets a page of the queue
1999 *
a82d593b 2000 * Helper for 'get_queued_page' - gets a page off the queue
a82d593b 2001 *
3d0684b2
JQ
2002 * Returns the block of the page (or NULL if none available)
2003 *
ec481c6c 2004 * @rs: current RAM state
3d0684b2 2005 * @offset: used to return the offset within the RAMBlock
a82d593b 2006 */
f20e2865 2007static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
a82d593b
DDAG
2008{
2009 RAMBlock *block = NULL;
2010
ec481c6c
JQ
2011 qemu_mutex_lock(&rs->src_page_req_mutex);
2012 if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
2013 struct RAMSrcPageRequest *entry =
2014 QSIMPLEQ_FIRST(&rs->src_page_requests);
a82d593b
DDAG
2015 block = entry->rb;
2016 *offset = entry->offset;
a82d593b
DDAG
2017
2018 if (entry->len > TARGET_PAGE_SIZE) {
2019 entry->len -= TARGET_PAGE_SIZE;
2020 entry->offset += TARGET_PAGE_SIZE;
2021 } else {
2022 memory_region_unref(block->mr);
ec481c6c 2023 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
a82d593b 2024 g_free(entry);
e03a34f8 2025 migration_consume_urgent_request();
a82d593b
DDAG
2026 }
2027 }
ec481c6c 2028 qemu_mutex_unlock(&rs->src_page_req_mutex);
a82d593b
DDAG
2029
2030 return block;
2031}
2032
3d0684b2
JQ
2033/**
2034 * get_queued_page: unqueue a page from the postocpy requests
2035 *
2036 * Skips pages that are already sent (!dirty)
a82d593b 2037 *
3d0684b2 2038 * Returns if a queued page is found
a82d593b 2039 *
6f37bb8b 2040 * @rs: current RAM state
3d0684b2 2041 * @pss: data about the state of the current dirty page scan
a82d593b 2042 */
f20e2865 2043static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
a82d593b
DDAG
2044{
2045 RAMBlock *block;
2046 ram_addr_t offset;
2047 bool dirty;
2048
2049 do {
f20e2865 2050 block = unqueue_page(rs, &offset);
a82d593b
DDAG
2051 /*
2052 * We're sending this page, and since it's postcopy nothing else
2053 * will dirty it, and we must make sure it doesn't get sent again
2054 * even if this queue request was received after the background
2055 * search already sent it.
2056 */
2057 if (block) {
f20e2865
JQ
2058 unsigned long page;
2059
6b6712ef
JQ
2060 page = offset >> TARGET_PAGE_BITS;
2061 dirty = test_bit(page, block->bmap);
a82d593b 2062 if (!dirty) {
06b10688 2063 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
6b6712ef 2064 page, test_bit(page, block->unsentmap));
a82d593b 2065 } else {
f20e2865 2066 trace_get_queued_page(block->idstr, (uint64_t)offset, page);
a82d593b
DDAG
2067 }
2068 }
2069
2070 } while (block && !dirty);
2071
2072 if (block) {
2073 /*
2074 * As soon as we start servicing pages out of order, then we have
2075 * to kill the bulk stage, since the bulk stage assumes
2076 * in (migration_bitmap_find_and_reset_dirty) that every page is
2077 * dirty, that's no longer true.
2078 */
6f37bb8b 2079 rs->ram_bulk_stage = false;
a82d593b
DDAG
2080
2081 /*
2082 * We want the background search to continue from the queued page
2083 * since the guest is likely to want other pages near to the page
2084 * it just requested.
2085 */
2086 pss->block = block;
a935e30f 2087 pss->page = offset >> TARGET_PAGE_BITS;
a82d593b
DDAG
2088 }
2089
2090 return !!block;
2091}
2092
6c595cde 2093/**
5e58f968
JQ
2094 * migration_page_queue_free: drop any remaining pages in the ram
2095 * request queue
6c595cde 2096 *
3d0684b2
JQ
2097 * It should be empty at the end anyway, but in error cases there may
2098 * be some left. in case that there is any page left, we drop it.
2099 *
6c595cde 2100 */
83c13382 2101static void migration_page_queue_free(RAMState *rs)
6c595cde 2102{
ec481c6c 2103 struct RAMSrcPageRequest *mspr, *next_mspr;
6c595cde
DDAG
2104 /* This queue generally should be empty - but in the case of a failed
2105 * migration might have some droppings in.
2106 */
2107 rcu_read_lock();
ec481c6c 2108 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
6c595cde 2109 memory_region_unref(mspr->rb->mr);
ec481c6c 2110 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
6c595cde
DDAG
2111 g_free(mspr);
2112 }
2113 rcu_read_unlock();
2114}
2115
2116/**
3d0684b2
JQ
2117 * ram_save_queue_pages: queue the page for transmission
2118 *
2119 * A request from postcopy destination for example.
2120 *
2121 * Returns zero on success or negative on error
2122 *
3d0684b2
JQ
2123 * @rbname: Name of the RAMBLock of the request. NULL means the
2124 * same that last one.
2125 * @start: starting address from the start of the RAMBlock
2126 * @len: length (in bytes) to send
6c595cde 2127 */
96506894 2128int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
6c595cde
DDAG
2129{
2130 RAMBlock *ramblock;
53518d94 2131 RAMState *rs = ram_state;
6c595cde 2132
9360447d 2133 ram_counters.postcopy_requests++;
6c595cde
DDAG
2134 rcu_read_lock();
2135 if (!rbname) {
2136 /* Reuse last RAMBlock */
68a098f3 2137 ramblock = rs->last_req_rb;
6c595cde
DDAG
2138
2139 if (!ramblock) {
2140 /*
2141 * Shouldn't happen, we can't reuse the last RAMBlock if
2142 * it's the 1st request.
2143 */
2144 error_report("ram_save_queue_pages no previous block");
2145 goto err;
2146 }
2147 } else {
2148 ramblock = qemu_ram_block_by_name(rbname);
2149
2150 if (!ramblock) {
2151 /* We shouldn't be asked for a non-existent RAMBlock */
2152 error_report("ram_save_queue_pages no block '%s'", rbname);
2153 goto err;
2154 }
68a098f3 2155 rs->last_req_rb = ramblock;
6c595cde
DDAG
2156 }
2157 trace_ram_save_queue_pages(ramblock->idstr, start, len);
2158 if (start+len > ramblock->used_length) {
9458ad6b
JQ
2159 error_report("%s request overrun start=" RAM_ADDR_FMT " len="
2160 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
6c595cde
DDAG
2161 __func__, start, len, ramblock->used_length);
2162 goto err;
2163 }
2164
ec481c6c
JQ
2165 struct RAMSrcPageRequest *new_entry =
2166 g_malloc0(sizeof(struct RAMSrcPageRequest));
6c595cde
DDAG
2167 new_entry->rb = ramblock;
2168 new_entry->offset = start;
2169 new_entry->len = len;
2170
2171 memory_region_ref(ramblock->mr);
ec481c6c
JQ
2172 qemu_mutex_lock(&rs->src_page_req_mutex);
2173 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
e03a34f8 2174 migration_make_urgent_request();
ec481c6c 2175 qemu_mutex_unlock(&rs->src_page_req_mutex);
6c595cde
DDAG
2176 rcu_read_unlock();
2177
2178 return 0;
2179
2180err:
2181 rcu_read_unlock();
2182 return -1;
2183}
2184
d7400a34
XG
2185static bool save_page_use_compression(RAMState *rs)
2186{
2187 if (!migrate_use_compression()) {
2188 return false;
2189 }
2190
2191 /*
2192 * If xbzrle is on, stop using the data compression after first
2193 * round of migration even if compression is enabled. In theory,
2194 * xbzrle can do better than compression.
2195 */
2196 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
2197 return true;
2198 }
2199
2200 return false;
2201}
2202
a82d593b 2203/**
3d0684b2 2204 * ram_save_target_page: save one target page
a82d593b 2205 *
3d0684b2 2206 * Returns the number of pages written
a82d593b 2207 *
6f37bb8b 2208 * @rs: current RAM state
3d0684b2 2209 * @pss: data about the page we want to send
a82d593b 2210 * @last_stage: if we are at the completion stage
a82d593b 2211 */
a0a8aa14 2212static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
f20e2865 2213 bool last_stage)
a82d593b 2214{
a8ec91f9
XG
2215 RAMBlock *block = pss->block;
2216 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
2217 int res;
2218
2219 if (control_save_page(rs, block, offset, &res)) {
2220 return res;
2221 }
2222
1faa5665 2223 /*
d7400a34
XG
2224 * When starting the process of a new block, the first page of
2225 * the block should be sent out before other pages in the same
2226 * block, and all the pages in last block should have been sent
2227 * out, keeping this order is important, because the 'cont' flag
2228 * is used to avoid resending the block name.
1faa5665 2229 */
d7400a34
XG
2230 if (block != rs->last_sent_block && save_page_use_compression(rs)) {
2231 flush_compressed_data(rs);
2232 }
2233
2234 res = save_zero_page(rs, block, offset);
2235 if (res > 0) {
2236 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
2237 * page would be stale
2238 */
2239 if (!save_page_use_compression(rs)) {
2240 XBZRLE_cache_lock();
2241 xbzrle_cache_zero_page(rs, block->offset + offset);
2242 XBZRLE_cache_unlock();
2243 }
2244 ram_release_pages(block->idstr, offset, res);
2245 return res;
2246 }
2247
da3f56cb
XG
2248 /*
2249 * Make sure the first page is sent out before other pages.
2250 *
2251 * we post it as normal page as compression will take much
2252 * CPU resource.
2253 */
2254 if (block == rs->last_sent_block && save_page_use_compression(rs)) {
1d58872a
XG
2255 res = compress_page_with_multi_thread(rs, block, offset);
2256 if (res > 0) {
2257 return res;
2258 }
b9ee2f7d
JQ
2259 } else if (migrate_use_multifd()) {
2260 return ram_save_multifd_page(rs, block, offset);
a82d593b
DDAG
2261 }
2262
1faa5665 2263 return ram_save_page(rs, pss, last_stage);
a82d593b
DDAG
2264}
2265
2266/**
3d0684b2 2267 * ram_save_host_page: save a whole host page
a82d593b 2268 *
3d0684b2
JQ
2269 * Starting at *offset send pages up to the end of the current host
2270 * page. It's valid for the initial offset to point into the middle of
2271 * a host page in which case the remainder of the hostpage is sent.
2272 * Only dirty target pages are sent. Note that the host page size may
2273 * be a huge page for this block.
1eb3fc0a
DDAG
2274 * The saving stops at the boundary of the used_length of the block
2275 * if the RAMBlock isn't a multiple of the host page size.
a82d593b 2276 *
3d0684b2
JQ
2277 * Returns the number of pages written or negative on error
2278 *
6f37bb8b 2279 * @rs: current RAM state
3d0684b2 2280 * @ms: current migration state
3d0684b2 2281 * @pss: data about the page we want to send
a82d593b 2282 * @last_stage: if we are at the completion stage
a82d593b 2283 */
a0a8aa14 2284static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
f20e2865 2285 bool last_stage)
a82d593b
DDAG
2286{
2287 int tmppages, pages = 0;
a935e30f
JQ
2288 size_t pagesize_bits =
2289 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
4c011c37 2290
b895de50
CLG
2291 if (!qemu_ram_is_migratable(pss->block)) {
2292 error_report("block %s should not be migrated !", pss->block->idstr);
2293 return 0;
2294 }
2295
a82d593b 2296 do {
1faa5665
XG
2297 /* Check the pages is dirty and if it is send it */
2298 if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
2299 pss->page++;
2300 continue;
2301 }
2302
f20e2865 2303 tmppages = ram_save_target_page(rs, pss, last_stage);
a82d593b
DDAG
2304 if (tmppages < 0) {
2305 return tmppages;
2306 }
2307
2308 pages += tmppages;
1faa5665
XG
2309 if (pss->block->unsentmap) {
2310 clear_bit(pss->page, pss->block->unsentmap);
2311 }
2312
a935e30f 2313 pss->page++;
1eb3fc0a
DDAG
2314 } while ((pss->page & (pagesize_bits - 1)) &&
2315 offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));
a82d593b
DDAG
2316
2317 /* The offset we leave with is the last one we looked at */
a935e30f 2318 pss->page--;
a82d593b
DDAG
2319 return pages;
2320}
6c595cde 2321
56e93d26 2322/**
3d0684b2 2323 * ram_find_and_save_block: finds a dirty page and sends it to f
56e93d26
JQ
2324 *
2325 * Called within an RCU critical section.
2326 *
3d0684b2 2327 * Returns the number of pages written where zero means no dirty pages
56e93d26 2328 *
6f37bb8b 2329 * @rs: current RAM state
56e93d26 2330 * @last_stage: if we are at the completion stage
a82d593b
DDAG
2331 *
2332 * On systems where host-page-size > target-page-size it will send all the
2333 * pages in a host page that are dirty.
56e93d26
JQ
2334 */
2335
ce25d337 2336static int ram_find_and_save_block(RAMState *rs, bool last_stage)
56e93d26 2337{
b8fb8cb7 2338 PageSearchStatus pss;
56e93d26 2339 int pages = 0;
b9e60928 2340 bool again, found;
56e93d26 2341
0827b9e9
AA
2342 /* No dirty page as there is zero RAM */
2343 if (!ram_bytes_total()) {
2344 return pages;
2345 }
2346
6f37bb8b 2347 pss.block = rs->last_seen_block;
a935e30f 2348 pss.page = rs->last_page;
b8fb8cb7
DDAG
2349 pss.complete_round = false;
2350
2351 if (!pss.block) {
2352 pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
2353 }
56e93d26 2354
b9e60928 2355 do {
a82d593b 2356 again = true;
f20e2865 2357 found = get_queued_page(rs, &pss);
b9e60928 2358
a82d593b
DDAG
2359 if (!found) {
2360 /* priority queue empty, so just search for something dirty */
f20e2865 2361 found = find_dirty_block(rs, &pss, &again);
a82d593b 2362 }
f3f491fc 2363
a82d593b 2364 if (found) {
f20e2865 2365 pages = ram_save_host_page(rs, &pss, last_stage);
56e93d26 2366 }
b9e60928 2367 } while (!pages && again);
56e93d26 2368
6f37bb8b 2369 rs->last_seen_block = pss.block;
a935e30f 2370 rs->last_page = pss.page;
56e93d26
JQ
2371
2372 return pages;
2373}
2374
2375void acct_update_position(QEMUFile *f, size_t size, bool zero)
2376{
2377 uint64_t pages = size / TARGET_PAGE_SIZE;
f7ccd61b 2378
56e93d26 2379 if (zero) {
9360447d 2380 ram_counters.duplicate += pages;
56e93d26 2381 } else {
9360447d
JQ
2382 ram_counters.normal += pages;
2383 ram_counters.transferred += size;
56e93d26
JQ
2384 qemu_update_position(f, size);
2385 }
2386}
2387
56e93d26
JQ
2388uint64_t ram_bytes_total(void)
2389{
2390 RAMBlock *block;
2391 uint64_t total = 0;
2392
2393 rcu_read_lock();
b895de50 2394 RAMBLOCK_FOREACH_MIGRATABLE(block) {
56e93d26 2395 total += block->used_length;
99e15582 2396 }
56e93d26
JQ
2397 rcu_read_unlock();
2398 return total;
2399}
2400
f265e0e4 2401static void xbzrle_load_setup(void)
56e93d26 2402{
f265e0e4 2403 XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
56e93d26
JQ
2404}
2405
f265e0e4
JQ
2406static void xbzrle_load_cleanup(void)
2407{
2408 g_free(XBZRLE.decoded_buf);
2409 XBZRLE.decoded_buf = NULL;
2410}
2411
7d7c96be
PX
2412static void ram_state_cleanup(RAMState **rsp)
2413{
b9ccaf6d
DDAG
2414 if (*rsp) {
2415 migration_page_queue_free(*rsp);
2416 qemu_mutex_destroy(&(*rsp)->bitmap_mutex);
2417 qemu_mutex_destroy(&(*rsp)->src_page_req_mutex);
2418 g_free(*rsp);
2419 *rsp = NULL;
2420 }
7d7c96be
PX
2421}
2422
84593a08
PX
2423static void xbzrle_cleanup(void)
2424{
2425 XBZRLE_cache_lock();
2426 if (XBZRLE.cache) {
2427 cache_fini(XBZRLE.cache);
2428 g_free(XBZRLE.encoded_buf);
2429 g_free(XBZRLE.current_buf);
2430 g_free(XBZRLE.zero_target_page);
2431 XBZRLE.cache = NULL;
2432 XBZRLE.encoded_buf = NULL;
2433 XBZRLE.current_buf = NULL;
2434 XBZRLE.zero_target_page = NULL;
2435 }
2436 XBZRLE_cache_unlock();
2437}
2438
f265e0e4 2439static void ram_save_cleanup(void *opaque)
56e93d26 2440{
53518d94 2441 RAMState **rsp = opaque;
6b6712ef 2442 RAMBlock *block;
eb859c53 2443
2ff64038
LZ
2444 /* caller have hold iothread lock or is in a bh, so there is
2445 * no writing race against this migration_bitmap
2446 */
6b6712ef
JQ
2447 memory_global_dirty_log_stop();
2448
b895de50 2449 RAMBLOCK_FOREACH_MIGRATABLE(block) {
6b6712ef
JQ
2450 g_free(block->bmap);
2451 block->bmap = NULL;
2452 g_free(block->unsentmap);
2453 block->unsentmap = NULL;
56e93d26
JQ
2454 }
2455
84593a08 2456 xbzrle_cleanup();
f0afa331 2457 compress_threads_save_cleanup();
7d7c96be 2458 ram_state_cleanup(rsp);
56e93d26
JQ
2459}
2460
6f37bb8b 2461static void ram_state_reset(RAMState *rs)
56e93d26 2462{
6f37bb8b
JQ
2463 rs->last_seen_block = NULL;
2464 rs->last_sent_block = NULL;
269ace29 2465 rs->last_page = 0;
6f37bb8b
JQ
2466 rs->last_version = ram_list.version;
2467 rs->ram_bulk_stage = true;
56e93d26
JQ
2468}
2469
2470#define MAX_WAIT 50 /* ms, half buffered_file limit */
2471
4f2e4252
DDAG
2472/*
2473 * 'expected' is the value you expect the bitmap mostly to be full
2474 * of; it won't bother printing lines that are all this value.
2475 * If 'todump' is null the migration bitmap is dumped.
2476 */
6b6712ef
JQ
2477void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
2478 unsigned long pages)
4f2e4252 2479{
4f2e4252
DDAG
2480 int64_t cur;
2481 int64_t linelen = 128;
2482 char linebuf[129];
2483
6b6712ef 2484 for (cur = 0; cur < pages; cur += linelen) {
4f2e4252
DDAG
2485 int64_t curb;
2486 bool found = false;
2487 /*
2488 * Last line; catch the case where the line length
2489 * is longer than remaining ram
2490 */
6b6712ef
JQ
2491 if (cur + linelen > pages) {
2492 linelen = pages - cur;
4f2e4252
DDAG
2493 }
2494 for (curb = 0; curb < linelen; curb++) {
2495 bool thisbit = test_bit(cur + curb, todump);
2496 linebuf[curb] = thisbit ? '1' : '.';
2497 found = found || (thisbit != expected);
2498 }
2499 if (found) {
2500 linebuf[curb] = '\0';
2501 fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf);
2502 }
2503 }
2504}
2505
e0b266f0
DDAG
2506/* **** functions for postcopy ***** */
2507
ced1c616
PB
2508void ram_postcopy_migrated_memory_release(MigrationState *ms)
2509{
2510 struct RAMBlock *block;
ced1c616 2511
b895de50 2512 RAMBLOCK_FOREACH_MIGRATABLE(block) {
6b6712ef
JQ
2513 unsigned long *bitmap = block->bmap;
2514 unsigned long range = block->used_length >> TARGET_PAGE_BITS;
2515 unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
ced1c616
PB
2516
2517 while (run_start < range) {
2518 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
aaa2064c 2519 ram_discard_range(block->idstr, run_start << TARGET_PAGE_BITS,
ced1c616
PB
2520 (run_end - run_start) << TARGET_PAGE_BITS);
2521 run_start = find_next_zero_bit(bitmap, range, run_end + 1);
2522 }
2523 }
2524}
2525
3d0684b2
JQ
2526/**
2527 * postcopy_send_discard_bm_ram: discard a RAMBlock
2528 *
2529 * Returns zero on success
2530 *
e0b266f0
DDAG
2531 * Callback from postcopy_each_ram_send_discard for each RAMBlock
2532 * Note: At this point the 'unsentmap' is the processed bitmap combined
2533 * with the dirtymap; so a '1' means it's either dirty or unsent.
3d0684b2
JQ
2534 *
2535 * @ms: current migration state
2536 * @pds: state for postcopy
2537 * @start: RAMBlock starting page
2538 * @length: RAMBlock size
e0b266f0
DDAG
2539 */
2540static int postcopy_send_discard_bm_ram(MigrationState *ms,
2541 PostcopyDiscardState *pds,
6b6712ef 2542 RAMBlock *block)
e0b266f0 2543{
6b6712ef 2544 unsigned long end = block->used_length >> TARGET_PAGE_BITS;
e0b266f0 2545 unsigned long current;
6b6712ef 2546 unsigned long *unsentmap = block->unsentmap;
e0b266f0 2547
6b6712ef 2548 for (current = 0; current < end; ) {
e0b266f0
DDAG
2549 unsigned long one = find_next_bit(unsentmap, end, current);
2550
2551 if (one <= end) {
2552 unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1);
2553 unsigned long discard_length;
2554
2555 if (zero >= end) {
2556 discard_length = end - one;
2557 } else {
2558 discard_length = zero - one;
2559 }
d688c62d
DDAG
2560 if (discard_length) {
2561 postcopy_discard_send_range(ms, pds, one, discard_length);
2562 }
e0b266f0
DDAG
2563 current = one + discard_length;
2564 } else {
2565 current = one;
2566 }
2567 }
2568
2569 return 0;
2570}
2571
3d0684b2
JQ
2572/**
2573 * postcopy_each_ram_send_discard: discard all RAMBlocks
2574 *
2575 * Returns 0 for success or negative for error
2576 *
e0b266f0
DDAG
2577 * Utility for the outgoing postcopy code.
2578 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2579 * passing it bitmap indexes and name.
e0b266f0
DDAG
2580 * (qemu_ram_foreach_block ends up passing unscaled lengths
2581 * which would mean postcopy code would have to deal with target page)
3d0684b2
JQ
2582 *
2583 * @ms: current migration state
e0b266f0
DDAG
2584 */
2585static int postcopy_each_ram_send_discard(MigrationState *ms)
2586{
2587 struct RAMBlock *block;
2588 int ret;
2589
b895de50 2590 RAMBLOCK_FOREACH_MIGRATABLE(block) {
6b6712ef
JQ
2591 PostcopyDiscardState *pds =
2592 postcopy_discard_send_init(ms, block->idstr);
e0b266f0
DDAG
2593
2594 /*
2595 * Postcopy sends chunks of bitmap over the wire, but it
2596 * just needs indexes at this point, avoids it having
2597 * target page specific code.
2598 */
6b6712ef 2599 ret = postcopy_send_discard_bm_ram(ms, pds, block);
e0b266f0
DDAG
2600 postcopy_discard_send_finish(ms, pds);
2601 if (ret) {
2602 return ret;
2603 }
2604 }
2605
2606 return 0;
2607}
2608
3d0684b2
JQ
2609/**
2610 * postcopy_chunk_hostpages_pass: canocalize bitmap in hostpages
2611 *
2612 * Helper for postcopy_chunk_hostpages; it's called twice to
2613 * canonicalize the two bitmaps, that are similar, but one is
2614 * inverted.
99e314eb 2615 *
3d0684b2
JQ
2616 * Postcopy requires that all target pages in a hostpage are dirty or
2617 * clean, not a mix. This function canonicalizes the bitmaps.
99e314eb 2618 *
3d0684b2
JQ
2619 * @ms: current migration state
2620 * @unsent_pass: if true we need to canonicalize partially unsent host pages
2621 * otherwise we need to canonicalize partially dirty host pages
2622 * @block: block that contains the page we want to canonicalize
2623 * @pds: state for postcopy
99e314eb
DDAG
2624 */
2625static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
2626 RAMBlock *block,
2627 PostcopyDiscardState *pds)
2628{
53518d94 2629 RAMState *rs = ram_state;
6b6712ef
JQ
2630 unsigned long *bitmap = block->bmap;
2631 unsigned long *unsentmap = block->unsentmap;
29c59172 2632 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
6b6712ef 2633 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
99e314eb
DDAG
2634 unsigned long run_start;
2635
29c59172
DDAG
2636 if (block->page_size == TARGET_PAGE_SIZE) {
2637 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2638 return;
2639 }
2640
99e314eb
DDAG
2641 if (unsent_pass) {
2642 /* Find a sent page */
6b6712ef 2643 run_start = find_next_zero_bit(unsentmap, pages, 0);
99e314eb
DDAG
2644 } else {
2645 /* Find a dirty page */
6b6712ef 2646 run_start = find_next_bit(bitmap, pages, 0);
99e314eb
DDAG
2647 }
2648
6b6712ef 2649 while (run_start < pages) {
99e314eb
DDAG
2650 bool do_fixup = false;
2651 unsigned long fixup_start_addr;
2652 unsigned long host_offset;
2653
2654 /*
2655 * If the start of this run of pages is in the middle of a host
2656 * page, then we need to fixup this host page.
2657 */
2658 host_offset = run_start % host_ratio;
2659 if (host_offset) {
2660 do_fixup = true;
2661 run_start -= host_offset;
2662 fixup_start_addr = run_start;
2663 /* For the next pass */
2664 run_start = run_start + host_ratio;
2665 } else {
2666 /* Find the end of this run */
2667 unsigned long run_end;
2668 if (unsent_pass) {
6b6712ef 2669 run_end = find_next_bit(unsentmap, pages, run_start + 1);
99e314eb 2670 } else {
6b6712ef 2671 run_end = find_next_zero_bit(bitmap, pages, run_start + 1);
99e314eb
DDAG
2672 }
2673 /*
2674 * If the end isn't at the start of a host page, then the
2675 * run doesn't finish at the end of a host page
2676 * and we need to discard.
2677 */
2678 host_offset = run_end % host_ratio;
2679 if (host_offset) {
2680 do_fixup = true;
2681 fixup_start_addr = run_end - host_offset;
2682 /*
2683 * This host page has gone, the next loop iteration starts
2684 * from after the fixup
2685 */
2686 run_start = fixup_start_addr + host_ratio;
2687 } else {
2688 /*
2689 * No discards on this iteration, next loop starts from
2690 * next sent/dirty page
2691 */
2692 run_start = run_end + 1;
2693 }
2694 }
2695
2696 if (do_fixup) {
2697 unsigned long page;
2698
2699 /* Tell the destination to discard this page */
2700 if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) {
2701 /* For the unsent_pass we:
2702 * discard partially sent pages
2703 * For the !unsent_pass (dirty) we:
2704 * discard partially dirty pages that were sent
2705 * (any partially sent pages were already discarded
2706 * by the previous unsent_pass)
2707 */
2708 postcopy_discard_send_range(ms, pds, fixup_start_addr,
2709 host_ratio);
2710 }
2711
2712 /* Clean up the bitmap */
2713 for (page = fixup_start_addr;
2714 page < fixup_start_addr + host_ratio; page++) {
2715 /* All pages in this host page are now not sent */
2716 set_bit(page, unsentmap);
2717
2718 /*
2719 * Remark them as dirty, updating the count for any pages
2720 * that weren't previously dirty.
2721 */
0d8ec885 2722 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
99e314eb
DDAG
2723 }
2724 }
2725
2726 if (unsent_pass) {
2727 /* Find the next sent page for the next iteration */
6b6712ef 2728 run_start = find_next_zero_bit(unsentmap, pages, run_start);
99e314eb
DDAG
2729 } else {
2730 /* Find the next dirty page for the next iteration */
6b6712ef 2731 run_start = find_next_bit(bitmap, pages, run_start);
99e314eb
DDAG
2732 }
2733 }
2734}
2735
3d0684b2
JQ
2736/**
2737 * postcopy_chuck_hostpages: discrad any partially sent host page
2738 *
99e314eb
DDAG
2739 * Utility for the outgoing postcopy code.
2740 *
2741 * Discard any partially sent host-page size chunks, mark any partially
29c59172
DDAG
2742 * dirty host-page size chunks as all dirty. In this case the host-page
2743 * is the host-page for the particular RAMBlock, i.e. it might be a huge page
99e314eb 2744 *
3d0684b2
JQ
2745 * Returns zero on success
2746 *
2747 * @ms: current migration state
6b6712ef 2748 * @block: block we want to work with
99e314eb 2749 */
6b6712ef 2750static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
99e314eb 2751{
6b6712ef
JQ
2752 PostcopyDiscardState *pds =
2753 postcopy_discard_send_init(ms, block->idstr);
99e314eb 2754
6b6712ef
JQ
2755 /* First pass: Discard all partially sent host pages */
2756 postcopy_chunk_hostpages_pass(ms, true, block, pds);
2757 /*
2758 * Second pass: Ensure that all partially dirty host pages are made
2759 * fully dirty.
2760 */
2761 postcopy_chunk_hostpages_pass(ms, false, block, pds);
99e314eb 2762
6b6712ef 2763 postcopy_discard_send_finish(ms, pds);
99e314eb
DDAG
2764 return 0;
2765}
2766
3d0684b2
JQ
2767/**
2768 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
2769 *
2770 * Returns zero on success
2771 *
e0b266f0
DDAG
2772 * Transmit the set of pages to be discarded after precopy to the target
2773 * these are pages that:
2774 * a) Have been previously transmitted but are now dirty again
2775 * b) Pages that have never been transmitted, this ensures that
2776 * any pages on the destination that have been mapped by background
2777 * tasks get discarded (transparent huge pages is the specific concern)
2778 * Hopefully this is pretty sparse
3d0684b2
JQ
2779 *
2780 * @ms: current migration state
e0b266f0
DDAG
2781 */
2782int ram_postcopy_send_discard_bitmap(MigrationState *ms)
2783{
53518d94 2784 RAMState *rs = ram_state;
6b6712ef 2785 RAMBlock *block;
e0b266f0 2786 int ret;
e0b266f0
DDAG
2787
2788 rcu_read_lock();
2789
2790 /* This should be our last sync, the src is now paused */
eb859c53 2791 migration_bitmap_sync(rs);
e0b266f0 2792
6b6712ef
JQ
2793 /* Easiest way to make sure we don't resume in the middle of a host-page */
2794 rs->last_seen_block = NULL;
2795 rs->last_sent_block = NULL;
2796 rs->last_page = 0;
e0b266f0 2797
b895de50 2798 RAMBLOCK_FOREACH_MIGRATABLE(block) {
6b6712ef
JQ
2799 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
2800 unsigned long *bitmap = block->bmap;
2801 unsigned long *unsentmap = block->unsentmap;
2802
2803 if (!unsentmap) {
2804 /* We don't have a safe way to resize the sentmap, so
2805 * if the bitmap was resized it will be NULL at this
2806 * point.
2807 */
2808 error_report("migration ram resized during precopy phase");
2809 rcu_read_unlock();
2810 return -EINVAL;
2811 }
2812 /* Deal with TPS != HPS and huge pages */
2813 ret = postcopy_chunk_hostpages(ms, block);
2814 if (ret) {
2815 rcu_read_unlock();
2816 return ret;
2817 }
e0b266f0 2818
6b6712ef
JQ
2819 /*
2820 * Update the unsentmap to be unsentmap = unsentmap | dirty
2821 */
2822 bitmap_or(unsentmap, unsentmap, bitmap, pages);
e0b266f0 2823#ifdef DEBUG_POSTCOPY
6b6712ef 2824 ram_debug_dump_bitmap(unsentmap, true, pages);
e0b266f0 2825#endif
6b6712ef
JQ
2826 }
2827 trace_ram_postcopy_send_discard_bitmap();
e0b266f0
DDAG
2828
2829 ret = postcopy_each_ram_send_discard(ms);
2830 rcu_read_unlock();
2831
2832 return ret;
2833}
2834
3d0684b2
JQ
2835/**
2836 * ram_discard_range: discard dirtied pages at the beginning of postcopy
e0b266f0 2837 *
3d0684b2 2838 * Returns zero on success
e0b266f0 2839 *
36449157
JQ
2840 * @rbname: name of the RAMBlock of the request. NULL means the
2841 * same that last one.
3d0684b2
JQ
2842 * @start: RAMBlock starting page
2843 * @length: RAMBlock size
e0b266f0 2844 */
aaa2064c 2845int ram_discard_range(const char *rbname, uint64_t start, size_t length)
e0b266f0
DDAG
2846{
2847 int ret = -1;
2848
36449157 2849 trace_ram_discard_range(rbname, start, length);
d3a5038c 2850
e0b266f0 2851 rcu_read_lock();
36449157 2852 RAMBlock *rb = qemu_ram_block_by_name(rbname);
e0b266f0
DDAG
2853
2854 if (!rb) {
36449157 2855 error_report("ram_discard_range: Failed to find block '%s'", rbname);
e0b266f0
DDAG
2856 goto err;
2857 }
2858
814bb08f
PX
2859 /*
2860 * On source VM, we don't need to update the received bitmap since
2861 * we don't even have one.
2862 */
2863 if (rb->receivedmap) {
2864 bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(),
2865 length >> qemu_target_page_bits());
2866 }
2867
d3a5038c 2868 ret = ram_block_discard_range(rb, start, length);
e0b266f0
DDAG
2869
2870err:
2871 rcu_read_unlock();
2872
2873 return ret;
2874}
2875
84593a08
PX
2876/*
2877 * For every allocation, we will try not to crash the VM if the
2878 * allocation failed.
2879 */
2880static int xbzrle_init(void)
2881{
2882 Error *local_err = NULL;
2883
2884 if (!migrate_use_xbzrle()) {
2885 return 0;
2886 }
2887
2888 XBZRLE_cache_lock();
2889
2890 XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
2891 if (!XBZRLE.zero_target_page) {
2892 error_report("%s: Error allocating zero page", __func__);
2893 goto err_out;
2894 }
2895
2896 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
2897 TARGET_PAGE_SIZE, &local_err);
2898 if (!XBZRLE.cache) {
2899 error_report_err(local_err);
2900 goto free_zero_page;
2901 }
2902
2903 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
2904 if (!XBZRLE.encoded_buf) {
2905 error_report("%s: Error allocating encoded_buf", __func__);
2906 goto free_cache;
2907 }
2908
2909 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
2910 if (!XBZRLE.current_buf) {
2911 error_report("%s: Error allocating current_buf", __func__);
2912 goto free_encoded_buf;
2913 }
2914
2915 /* We are all good */
2916 XBZRLE_cache_unlock();
2917 return 0;
2918
2919free_encoded_buf:
2920 g_free(XBZRLE.encoded_buf);
2921 XBZRLE.encoded_buf = NULL;
2922free_cache:
2923 cache_fini(XBZRLE.cache);
2924 XBZRLE.cache = NULL;
2925free_zero_page:
2926 g_free(XBZRLE.zero_target_page);
2927 XBZRLE.zero_target_page = NULL;
2928err_out:
2929 XBZRLE_cache_unlock();
2930 return -ENOMEM;
2931}
2932
53518d94 2933static int ram_state_init(RAMState **rsp)
56e93d26 2934{
7d00ee6a
PX
2935 *rsp = g_try_new0(RAMState, 1);
2936
2937 if (!*rsp) {
2938 error_report("%s: Init ramstate fail", __func__);
2939 return -1;
2940 }
53518d94
JQ
2941
2942 qemu_mutex_init(&(*rsp)->bitmap_mutex);
2943 qemu_mutex_init(&(*rsp)->src_page_req_mutex);
2944 QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
56e93d26 2945
7d00ee6a
PX
2946 /*
2947 * Count the total number of pages used by ram blocks not including any
2948 * gaps due to alignment or unplugs.
2949 */
2950 (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
2951
2952 ram_state_reset(*rsp);
2953
2954 return 0;
2955}
2956
d6eff5d7 2957static void ram_list_init_bitmaps(void)
7d00ee6a 2958{
d6eff5d7
PX
2959 RAMBlock *block;
2960 unsigned long pages;
56e93d26 2961
0827b9e9
AA
2962 /* Skip setting bitmap if there is no RAM */
2963 if (ram_bytes_total()) {
b895de50 2964 RAMBLOCK_FOREACH_MIGRATABLE(block) {
d6eff5d7 2965 pages = block->max_length >> TARGET_PAGE_BITS;
6b6712ef
JQ
2966 block->bmap = bitmap_new(pages);
2967 bitmap_set(block->bmap, 0, pages);
2968 if (migrate_postcopy_ram()) {
2969 block->unsentmap = bitmap_new(pages);
2970 bitmap_set(block->unsentmap, 0, pages);
2971 }
0827b9e9 2972 }
f3f491fc 2973 }
d6eff5d7
PX
2974}
2975
2976static void ram_init_bitmaps(RAMState *rs)
2977{
2978 /* For memory_global_dirty_log_start below. */
2979 qemu_mutex_lock_iothread();
2980 qemu_mutex_lock_ramlist();
2981 rcu_read_lock();
f3f491fc 2982
d6eff5d7 2983 ram_list_init_bitmaps();
56e93d26 2984 memory_global_dirty_log_start();
d6eff5d7
PX
2985 migration_bitmap_sync(rs);
2986
2987 rcu_read_unlock();
56e93d26 2988 qemu_mutex_unlock_ramlist();
49877834 2989 qemu_mutex_unlock_iothread();
d6eff5d7
PX
2990}
2991
2992static int ram_init_all(RAMState **rsp)
2993{
2994 if (ram_state_init(rsp)) {
2995 return -1;
2996 }
2997
2998 if (xbzrle_init()) {
2999 ram_state_cleanup(rsp);
3000 return -1;
3001 }
3002
3003 ram_init_bitmaps(*rsp);
a91246c9
HZ
3004
3005 return 0;
3006}
3007
08614f34
PX
3008static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
3009{
3010 RAMBlock *block;
3011 uint64_t pages = 0;
3012
3013 /*
3014 * Postcopy is not using xbzrle/compression, so no need for that.
3015 * Also, since source are already halted, we don't need to care
3016 * about dirty page logging as well.
3017 */
3018
ff0769a4 3019 RAMBLOCK_FOREACH_MIGRATABLE(block) {
08614f34
PX
3020 pages += bitmap_count_one(block->bmap,
3021 block->used_length >> TARGET_PAGE_BITS);
3022 }
3023
3024 /* This may not be aligned with current bitmaps. Recalculate. */
3025 rs->migration_dirty_pages = pages;
3026
3027 rs->last_seen_block = NULL;
3028 rs->last_sent_block = NULL;
3029 rs->last_page = 0;
3030 rs->last_version = ram_list.version;
3031 /*
3032 * Disable the bulk stage, otherwise we'll resend the whole RAM no
3033 * matter what we have sent.
3034 */
3035 rs->ram_bulk_stage = false;
3036
3037 /* Update RAMState cache of output QEMUFile */
3038 rs->f = out;
3039
3040 trace_ram_state_resume_prepare(pages);
3041}
3042
3d0684b2
JQ
3043/*
3044 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
a91246c9
HZ
3045 * long-running RCU critical section. When rcu-reclaims in the code
3046 * start to become numerous it will be necessary to reduce the
3047 * granularity of these critical sections.
3048 */
3049
3d0684b2
JQ
3050/**
3051 * ram_save_setup: Setup RAM for migration
3052 *
3053 * Returns zero to indicate success and negative for error
3054 *
3055 * @f: QEMUFile where to send the data
3056 * @opaque: RAMState pointer
3057 */
a91246c9
HZ
3058static int ram_save_setup(QEMUFile *f, void *opaque)
3059{
53518d94 3060 RAMState **rsp = opaque;
a91246c9
HZ
3061 RAMBlock *block;
3062
dcaf446e
XG
3063 if (compress_threads_save_setup()) {
3064 return -1;
3065 }
3066
a91246c9
HZ
3067 /* migration has already setup the bitmap, reuse it. */
3068 if (!migration_in_colo_state()) {
7d00ee6a 3069 if (ram_init_all(rsp) != 0) {
dcaf446e 3070 compress_threads_save_cleanup();
a91246c9 3071 return -1;
53518d94 3072 }
a91246c9 3073 }
53518d94 3074 (*rsp)->f = f;
a91246c9
HZ
3075
3076 rcu_read_lock();
56e93d26
JQ
3077
3078 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
3079
b895de50 3080 RAMBLOCK_FOREACH_MIGRATABLE(block) {
56e93d26
JQ
3081 qemu_put_byte(f, strlen(block->idstr));
3082 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
3083 qemu_put_be64(f, block->used_length);
ef08fb38
DDAG
3084 if (migrate_postcopy_ram() && block->page_size != qemu_host_page_size) {
3085 qemu_put_be64(f, block->page_size);
3086 }
56e93d26
JQ
3087 }
3088
3089 rcu_read_unlock();
3090
3091 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
3092 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
3093
6df264ac 3094 multifd_send_sync_main();
56e93d26 3095 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
35374cbd 3096 qemu_fflush(f);
56e93d26
JQ
3097
3098 return 0;
3099}
3100
3d0684b2
JQ
3101/**
3102 * ram_save_iterate: iterative stage for migration
3103 *
3104 * Returns zero to indicate success and negative for error
3105 *
3106 * @f: QEMUFile where to send the data
3107 * @opaque: RAMState pointer
3108 */
56e93d26
JQ
3109static int ram_save_iterate(QEMUFile *f, void *opaque)
3110{
53518d94
JQ
3111 RAMState **temp = opaque;
3112 RAMState *rs = *temp;
56e93d26
JQ
3113 int ret;
3114 int i;
3115 int64_t t0;
5c90308f 3116 int done = 0;
56e93d26 3117
b2557345
PL
3118 if (blk_mig_bulk_active()) {
3119 /* Avoid transferring ram during bulk phase of block migration as
3120 * the bulk phase will usually take a long time and transferring
3121 * ram updates during that time is pointless. */
3122 goto out;
3123 }
3124
56e93d26 3125 rcu_read_lock();
6f37bb8b
JQ
3126 if (ram_list.version != rs->last_version) {
3127 ram_state_reset(rs);
56e93d26
JQ
3128 }
3129
3130 /* Read version before ram_list.blocks */
3131 smp_rmb();
3132
3133 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
3134
3135 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
3136 i = 0;
e03a34f8
DDAG
3137 while ((ret = qemu_file_rate_limit(f)) == 0 ||
3138 !QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
56e93d26
JQ
3139 int pages;
3140
e03a34f8
DDAG
3141 if (qemu_file_get_error(f)) {
3142 break;
3143 }
3144
ce25d337 3145 pages = ram_find_and_save_block(rs, false);
56e93d26
JQ
3146 /* no more pages to sent */
3147 if (pages == 0) {
5c90308f 3148 done = 1;
56e93d26
JQ
3149 break;
3150 }
23b28c3c 3151 rs->iterations++;
070afca2 3152
56e93d26
JQ
3153 /* we want to check in the 1st loop, just in case it was the 1st time
3154 and we had to sync the dirty bitmap.
3155 qemu_get_clock_ns() is a bit expensive, so we only check each some
3156 iterations
3157 */
3158 if ((i & 63) == 0) {
3159 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
3160 if (t1 > MAX_WAIT) {
55c4446b 3161 trace_ram_save_iterate_big_wait(t1, i);
56e93d26
JQ
3162 break;
3163 }
3164 }
3165 i++;
3166 }
ce25d337 3167 flush_compressed_data(rs);
56e93d26
JQ
3168 rcu_read_unlock();
3169
3170 /*
3171 * Must occur before EOS (or any QEMUFile operation)
3172 * because of RDMA protocol.
3173 */
3174 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
3175
6df264ac 3176 multifd_send_sync_main();
b2557345 3177out:
56e93d26 3178 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
35374cbd 3179 qemu_fflush(f);
9360447d 3180 ram_counters.transferred += 8;
56e93d26
JQ
3181
3182 ret = qemu_file_get_error(f);
3183 if (ret < 0) {
3184 return ret;
3185 }
3186
5c90308f 3187 return done;
56e93d26
JQ
3188}
3189
3d0684b2
JQ
3190/**
3191 * ram_save_complete: function called to send the remaining amount of ram
3192 *
3193 * Returns zero to indicate success
3194 *
3195 * Called with iothread lock
3196 *
3197 * @f: QEMUFile where to send the data
3198 * @opaque: RAMState pointer
3199 */
56e93d26
JQ
3200static int ram_save_complete(QEMUFile *f, void *opaque)
3201{
53518d94
JQ
3202 RAMState **temp = opaque;
3203 RAMState *rs = *temp;
6f37bb8b 3204
56e93d26
JQ
3205 rcu_read_lock();
3206
5727309d 3207 if (!migration_in_postcopy()) {
8d820d6f 3208 migration_bitmap_sync(rs);
663e6c1d 3209 }
56e93d26
JQ
3210
3211 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
3212
3213 /* try transferring iterative blocks of memory */
3214
3215 /* flush all remaining blocks regardless of rate limiting */
3216 while (true) {
3217 int pages;
3218
ce25d337 3219 pages = ram_find_and_save_block(rs, !migration_in_colo_state());
56e93d26
JQ
3220 /* no more blocks to sent */
3221 if (pages == 0) {
3222 break;
3223 }
3224 }
3225
ce25d337 3226 flush_compressed_data(rs);
56e93d26 3227 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
56e93d26
JQ
3228
3229 rcu_read_unlock();
d09a6fde 3230
6df264ac 3231 multifd_send_sync_main();
56e93d26 3232 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
35374cbd 3233 qemu_fflush(f);
56e93d26
JQ
3234
3235 return 0;
3236}
3237
c31b098f 3238static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
47995026
VSO
3239 uint64_t *res_precopy_only,
3240 uint64_t *res_compatible,
3241 uint64_t *res_postcopy_only)
56e93d26 3242{
53518d94
JQ
3243 RAMState **temp = opaque;
3244 RAMState *rs = *temp;
56e93d26
JQ
3245 uint64_t remaining_size;
3246
9edabd4d 3247 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
56e93d26 3248
5727309d 3249 if (!migration_in_postcopy() &&
663e6c1d 3250 remaining_size < max_size) {
56e93d26
JQ
3251 qemu_mutex_lock_iothread();
3252 rcu_read_lock();
8d820d6f 3253 migration_bitmap_sync(rs);
56e93d26
JQ
3254 rcu_read_unlock();
3255 qemu_mutex_unlock_iothread();
9edabd4d 3256 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
56e93d26 3257 }
c31b098f 3258
86e1167e
VSO
3259 if (migrate_postcopy_ram()) {
3260 /* We can do postcopy, and all the data is postcopiable */
47995026 3261 *res_compatible += remaining_size;
86e1167e 3262 } else {
47995026 3263 *res_precopy_only += remaining_size;
86e1167e 3264 }
56e93d26
JQ
3265}
3266
3267static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
3268{
3269 unsigned int xh_len;
3270 int xh_flags;
063e760a 3271 uint8_t *loaded_data;
56e93d26 3272
56e93d26
JQ
3273 /* extract RLE header */
3274 xh_flags = qemu_get_byte(f);
3275 xh_len = qemu_get_be16(f);
3276
3277 if (xh_flags != ENCODING_FLAG_XBZRLE) {
3278 error_report("Failed to load XBZRLE page - wrong compression!");
3279 return -1;
3280 }
3281
3282 if (xh_len > TARGET_PAGE_SIZE) {
3283 error_report("Failed to load XBZRLE page - len overflow!");
3284 return -1;
3285 }
f265e0e4 3286 loaded_data = XBZRLE.decoded_buf;
56e93d26 3287 /* load data and decode */
f265e0e4 3288 /* it can change loaded_data to point to an internal buffer */
063e760a 3289 qemu_get_buffer_in_place(f, &loaded_data, xh_len);
56e93d26
JQ
3290
3291 /* decode RLE */
063e760a 3292 if (xbzrle_decode_buffer(loaded_data, xh_len, host,
56e93d26
JQ
3293 TARGET_PAGE_SIZE) == -1) {
3294 error_report("Failed to load XBZRLE page - decode error!");
3295 return -1;
3296 }
3297
3298 return 0;
3299}
3300
3d0684b2
JQ
3301/**
3302 * ram_block_from_stream: read a RAMBlock id from the migration stream
3303 *
3304 * Must be called from within a rcu critical section.
3305 *
56e93d26 3306 * Returns a pointer from within the RCU-protected ram_list.
a7180877 3307 *
3d0684b2
JQ
3308 * @f: QEMUFile where to read the data from
3309 * @flags: Page flags (mostly to see if it's a continuation of previous block)
a7180877 3310 */
3d0684b2 3311static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
56e93d26
JQ
3312{
3313 static RAMBlock *block = NULL;
3314 char id[256];
3315 uint8_t len;
3316
3317 if (flags & RAM_SAVE_FLAG_CONTINUE) {
4c4bad48 3318 if (!block) {
56e93d26
JQ
3319 error_report("Ack, bad migration stream!");
3320 return NULL;
3321 }
4c4bad48 3322 return block;
56e93d26
JQ
3323 }
3324
3325 len = qemu_get_byte(f);
3326 qemu_get_buffer(f, (uint8_t *)id, len);
3327 id[len] = 0;
3328
e3dd7493 3329 block = qemu_ram_block_by_name(id);
4c4bad48
HZ
3330 if (!block) {
3331 error_report("Can't find block %s", id);
3332 return NULL;
56e93d26
JQ
3333 }
3334
b895de50
CLG
3335 if (!qemu_ram_is_migratable(block)) {
3336 error_report("block %s should not be migrated !", id);
3337 return NULL;
3338 }
3339
4c4bad48
HZ
3340 return block;
3341}
3342
3343static inline void *host_from_ram_block_offset(RAMBlock *block,
3344 ram_addr_t offset)
3345{
3346 if (!offset_in_ramblock(block, offset)) {
3347 return NULL;
3348 }
3349
3350 return block->host + offset;
56e93d26
JQ
3351}
3352
3d0684b2
JQ
3353/**
3354 * ram_handle_compressed: handle the zero page case
3355 *
56e93d26
JQ
3356 * If a page (or a whole RDMA chunk) has been
3357 * determined to be zero, then zap it.
3d0684b2
JQ
3358 *
3359 * @host: host address for the zero page
3360 * @ch: what the page is filled from. We only support zero
3361 * @size: size of the zero page
56e93d26
JQ
3362 */
3363void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
3364{
3365 if (ch != 0 || !is_zero_range(host, size)) {
3366 memset(host, ch, size);
3367 }
3368}
3369
797ca154
XG
3370/* return the size after decompression, or negative value on error */
3371static int
3372qemu_uncompress_data(z_stream *stream, uint8_t *dest, size_t dest_len,
3373 const uint8_t *source, size_t source_len)
3374{
3375 int err;
3376
3377 err = inflateReset(stream);
3378 if (err != Z_OK) {
3379 return -1;
3380 }
3381
3382 stream->avail_in = source_len;
3383 stream->next_in = (uint8_t *)source;
3384 stream->avail_out = dest_len;
3385 stream->next_out = dest;
3386
3387 err = inflate(stream, Z_NO_FLUSH);
3388 if (err != Z_STREAM_END) {
3389 return -1;
3390 }
3391
3392 return stream->total_out;
3393}
3394
56e93d26
JQ
3395static void *do_data_decompress(void *opaque)
3396{
3397 DecompressParam *param = opaque;
3398 unsigned long pagesize;
33d151f4 3399 uint8_t *des;
34ab9e97 3400 int len, ret;
56e93d26 3401
33d151f4 3402 qemu_mutex_lock(&param->mutex);
90e56fb4 3403 while (!param->quit) {
33d151f4
LL
3404 if (param->des) {
3405 des = param->des;
3406 len = param->len;
3407 param->des = 0;
3408 qemu_mutex_unlock(&param->mutex);
3409
56e93d26 3410 pagesize = TARGET_PAGE_SIZE;
34ab9e97
XG
3411
3412 ret = qemu_uncompress_data(&param->stream, des, pagesize,
3413 param->compbuf, len);
f548222c 3414 if (ret < 0 && migrate_get_current()->decompress_error_check) {
34ab9e97
XG
3415 error_report("decompress data failed");
3416 qemu_file_set_error(decomp_file, ret);
3417 }
73a8912b 3418
33d151f4
LL
3419 qemu_mutex_lock(&decomp_done_lock);
3420 param->done = true;
3421 qemu_cond_signal(&decomp_done_cond);
3422 qemu_mutex_unlock(&decomp_done_lock);
3423
3424 qemu_mutex_lock(&param->mutex);
3425 } else {
3426 qemu_cond_wait(&param->cond, &param->mutex);
3427 }
56e93d26 3428 }
33d151f4 3429 qemu_mutex_unlock(&param->mutex);
56e93d26
JQ
3430
3431 return NULL;
3432}
3433
34ab9e97 3434static int wait_for_decompress_done(void)
5533b2e9
LL
3435{
3436 int idx, thread_count;
3437
3438 if (!migrate_use_compression()) {
34ab9e97 3439 return 0;
5533b2e9
LL
3440 }
3441
3442 thread_count = migrate_decompress_threads();
3443 qemu_mutex_lock(&decomp_done_lock);
3444 for (idx = 0; idx < thread_count; idx++) {
3445 while (!decomp_param[idx].done) {
3446 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
3447 }
3448 }
3449 qemu_mutex_unlock(&decomp_done_lock);
34ab9e97 3450 return qemu_file_get_error(decomp_file);
5533b2e9
LL
3451}
3452
f0afa331 3453static void compress_threads_load_cleanup(void)
56e93d26
JQ
3454{
3455 int i, thread_count;
3456
3416ab5b
JQ
3457 if (!migrate_use_compression()) {
3458 return;
3459 }
56e93d26
JQ
3460 thread_count = migrate_decompress_threads();
3461 for (i = 0; i < thread_count; i++) {
797ca154
XG
3462 /*
3463 * we use it as a indicator which shows if the thread is
3464 * properly init'd or not
3465 */
3466 if (!decomp_param[i].compbuf) {
3467 break;
3468 }
3469
56e93d26 3470 qemu_mutex_lock(&decomp_param[i].mutex);
90e56fb4 3471 decomp_param[i].quit = true;
56e93d26
JQ
3472 qemu_cond_signal(&decomp_param[i].cond);
3473 qemu_mutex_unlock(&decomp_param[i].mutex);
3474 }
3475 for (i = 0; i < thread_count; i++) {
797ca154
XG
3476 if (!decomp_param[i].compbuf) {
3477 break;
3478 }
3479
56e93d26
JQ
3480 qemu_thread_join(decompress_threads + i);
3481 qemu_mutex_destroy(&decomp_param[i].mutex);
3482 qemu_cond_destroy(&decomp_param[i].cond);
797ca154 3483 inflateEnd(&decomp_param[i].stream);
56e93d26 3484 g_free(decomp_param[i].compbuf);
797ca154 3485 decomp_param[i].compbuf = NULL;
56e93d26
JQ
3486 }
3487 g_free(decompress_threads);
3488 g_free(decomp_param);
56e93d26
JQ
3489 decompress_threads = NULL;
3490 decomp_param = NULL;
34ab9e97 3491 decomp_file = NULL;
56e93d26
JQ
3492}
3493
34ab9e97 3494static int compress_threads_load_setup(QEMUFile *f)
797ca154
XG
3495{
3496 int i, thread_count;
3497
3498 if (!migrate_use_compression()) {
3499 return 0;
3500 }
3501
3502 thread_count = migrate_decompress_threads();
3503 decompress_threads = g_new0(QemuThread, thread_count);
3504 decomp_param = g_new0(DecompressParam, thread_count);
3505 qemu_mutex_init(&decomp_done_lock);
3506 qemu_cond_init(&decomp_done_cond);
34ab9e97 3507 decomp_file = f;
797ca154
XG
3508 for (i = 0; i < thread_count; i++) {
3509 if (inflateInit(&decomp_param[i].stream) != Z_OK) {
3510 goto exit;
3511 }
3512
3513 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
3514 qemu_mutex_init(&decomp_param[i].mutex);
3515 qemu_cond_init(&decomp_param[i].cond);
3516 decomp_param[i].done = true;
3517 decomp_param[i].quit = false;
3518 qemu_thread_create(decompress_threads + i, "decompress",
3519 do_data_decompress, decomp_param + i,
3520 QEMU_THREAD_JOINABLE);
3521 }
3522 return 0;
3523exit:
3524 compress_threads_load_cleanup();
3525 return -1;
3526}
3527
c1bc6626 3528static void decompress_data_with_multi_threads(QEMUFile *f,
56e93d26
JQ
3529 void *host, int len)
3530{
3531 int idx, thread_count;
3532
3533 thread_count = migrate_decompress_threads();
73a8912b 3534 qemu_mutex_lock(&decomp_done_lock);
56e93d26
JQ
3535 while (true) {
3536 for (idx = 0; idx < thread_count; idx++) {
73a8912b 3537 if (decomp_param[idx].done) {
33d151f4
LL
3538 decomp_param[idx].done = false;
3539 qemu_mutex_lock(&decomp_param[idx].mutex);
c1bc6626 3540 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
56e93d26
JQ
3541 decomp_param[idx].des = host;
3542 decomp_param[idx].len = len;
33d151f4
LL
3543 qemu_cond_signal(&decomp_param[idx].cond);
3544 qemu_mutex_unlock(&decomp_param[idx].mutex);
56e93d26
JQ
3545 break;
3546 }
3547 }
3548 if (idx < thread_count) {
3549 break;
73a8912b
LL
3550 } else {
3551 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
56e93d26
JQ
3552 }
3553 }
73a8912b 3554 qemu_mutex_unlock(&decomp_done_lock);
56e93d26
JQ
3555}
3556
f265e0e4
JQ
3557/**
3558 * ram_load_setup: Setup RAM for migration incoming side
3559 *
3560 * Returns zero to indicate success and negative for error
3561 *
3562 * @f: QEMUFile where to receive the data
3563 * @opaque: RAMState pointer
3564 */
3565static int ram_load_setup(QEMUFile *f, void *opaque)
3566{
34ab9e97 3567 if (compress_threads_load_setup(f)) {
797ca154
XG
3568 return -1;
3569 }
3570
f265e0e4 3571 xbzrle_load_setup();
f9494614 3572 ramblock_recv_map_init();
f265e0e4
JQ
3573 return 0;
3574}
3575
3576static int ram_load_cleanup(void *opaque)
3577{
f9494614 3578 RAMBlock *rb;
56eb90af
JH
3579
3580 RAMBLOCK_FOREACH_MIGRATABLE(rb) {
3581 if (ramblock_is_pmem(rb)) {
3582 pmem_persist(rb->host, rb->used_length);
3583 }
3584 }
3585
f265e0e4 3586 xbzrle_load_cleanup();
f0afa331 3587 compress_threads_load_cleanup();
f9494614 3588
b895de50 3589 RAMBLOCK_FOREACH_MIGRATABLE(rb) {
f9494614
AP
3590 g_free(rb->receivedmap);
3591 rb->receivedmap = NULL;
3592 }
f265e0e4
JQ
3593 return 0;
3594}
3595
3d0684b2
JQ
3596/**
3597 * ram_postcopy_incoming_init: allocate postcopy data structures
3598 *
3599 * Returns 0 for success and negative if there was one error
3600 *
3601 * @mis: current migration incoming state
3602 *
3603 * Allocate data structures etc needed by incoming migration with
3604 * postcopy-ram. postcopy-ram's similarly names
3605 * postcopy_ram_incoming_init does the work.
1caddf8a
DDAG
3606 */
3607int ram_postcopy_incoming_init(MigrationIncomingState *mis)
3608{
c136180c 3609 return postcopy_ram_incoming_init(mis);
1caddf8a
DDAG
3610}
3611
3d0684b2
JQ
3612/**
3613 * ram_load_postcopy: load a page in postcopy case
3614 *
3615 * Returns 0 for success or -errno in case of error
3616 *
a7180877
DDAG
3617 * Called in postcopy mode by ram_load().
3618 * rcu_read_lock is taken prior to this being called.
3d0684b2
JQ
3619 *
3620 * @f: QEMUFile where to send the data
a7180877
DDAG
3621 */
3622static int ram_load_postcopy(QEMUFile *f)
3623{
3624 int flags = 0, ret = 0;
3625 bool place_needed = false;
1aa83678 3626 bool matches_target_page_size = false;
a7180877
DDAG
3627 MigrationIncomingState *mis = migration_incoming_get_current();
3628 /* Temporary page that is later 'placed' */
3629 void *postcopy_host_page = postcopy_get_tmp_page(mis);
c53b7ddc 3630 void *last_host = NULL;
a3b6ff6d 3631 bool all_zero = false;
a7180877
DDAG
3632
3633 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
3634 ram_addr_t addr;
3635 void *host = NULL;
3636 void *page_buffer = NULL;
3637 void *place_source = NULL;
df9ff5e1 3638 RAMBlock *block = NULL;
a7180877 3639 uint8_t ch;
a7180877
DDAG
3640
3641 addr = qemu_get_be64(f);
7a9ddfbf
PX
3642
3643 /*
3644 * If qemu file error, we should stop here, and then "addr"
3645 * may be invalid
3646 */
3647 ret = qemu_file_get_error(f);
3648 if (ret) {
3649 break;
3650 }
3651
a7180877
DDAG
3652 flags = addr & ~TARGET_PAGE_MASK;
3653 addr &= TARGET_PAGE_MASK;
3654
3655 trace_ram_load_postcopy_loop((uint64_t)addr, flags);
3656 place_needed = false;
bb890ed5 3657 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) {
df9ff5e1 3658 block = ram_block_from_stream(f, flags);
4c4bad48
HZ
3659
3660 host = host_from_ram_block_offset(block, addr);
a7180877
DDAG
3661 if (!host) {
3662 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
3663 ret = -EINVAL;
3664 break;
3665 }
1aa83678 3666 matches_target_page_size = block->page_size == TARGET_PAGE_SIZE;
a7180877 3667 /*
28abd200
DDAG
3668 * Postcopy requires that we place whole host pages atomically;
3669 * these may be huge pages for RAMBlocks that are backed by
3670 * hugetlbfs.
a7180877
DDAG
3671 * To make it atomic, the data is read into a temporary page
3672 * that's moved into place later.
3673 * The migration protocol uses, possibly smaller, target-pages
3674 * however the source ensures it always sends all the components
3675 * of a host page in order.
3676 */
3677 page_buffer = postcopy_host_page +
28abd200 3678 ((uintptr_t)host & (block->page_size - 1));
a7180877 3679 /* If all TP are zero then we can optimise the place */
28abd200 3680 if (!((uintptr_t)host & (block->page_size - 1))) {
a7180877 3681 all_zero = true;
c53b7ddc
DDAG
3682 } else {
3683 /* not the 1st TP within the HP */
3684 if (host != (last_host + TARGET_PAGE_SIZE)) {
9af9e0fe 3685 error_report("Non-sequential target page %p/%p",
c53b7ddc
DDAG
3686 host, last_host);
3687 ret = -EINVAL;
3688 break;
3689 }
a7180877
DDAG
3690 }
3691
c53b7ddc 3692
a7180877
DDAG
3693 /*
3694 * If it's the last part of a host page then we place the host
3695 * page
3696 */
3697 place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
28abd200 3698 (block->page_size - 1)) == 0;
a7180877
DDAG
3699 place_source = postcopy_host_page;
3700 }
c53b7ddc 3701 last_host = host;
a7180877
DDAG
3702
3703 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
bb890ed5 3704 case RAM_SAVE_FLAG_ZERO:
a7180877
DDAG
3705 ch = qemu_get_byte(f);
3706 memset(page_buffer, ch, TARGET_PAGE_SIZE);
3707 if (ch) {
3708 all_zero = false;
3709 }
3710 break;
3711
3712 case RAM_SAVE_FLAG_PAGE:
3713 all_zero = false;
1aa83678
PX
3714 if (!matches_target_page_size) {
3715 /* For huge pages, we always use temporary buffer */
a7180877
DDAG
3716 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
3717 } else {
1aa83678
PX
3718 /*
3719 * For small pages that matches target page size, we
3720 * avoid the qemu_file copy. Instead we directly use
3721 * the buffer of QEMUFile to place the page. Note: we
3722 * cannot do any QEMUFile operation before using that
3723 * buffer to make sure the buffer is valid when
3724 * placing the page.
a7180877
DDAG
3725 */
3726 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
3727 TARGET_PAGE_SIZE);
3728 }
3729 break;
3730 case RAM_SAVE_FLAG_EOS:
3731 /* normal exit */
6df264ac 3732 multifd_recv_sync_main();
a7180877
DDAG
3733 break;
3734 default:
3735 error_report("Unknown combination of migration flags: %#x"
3736 " (postcopy mode)", flags);
3737 ret = -EINVAL;
7a9ddfbf
PX
3738 break;
3739 }
3740
3741 /* Detect for any possible file errors */
3742 if (!ret && qemu_file_get_error(f)) {
3743 ret = qemu_file_get_error(f);
a7180877
DDAG
3744 }
3745
7a9ddfbf 3746 if (!ret && place_needed) {
a7180877 3747 /* This gets called at the last target page in the host page */
df9ff5e1
DDAG
3748 void *place_dest = host + TARGET_PAGE_SIZE - block->page_size;
3749
a7180877 3750 if (all_zero) {
df9ff5e1 3751 ret = postcopy_place_page_zero(mis, place_dest,
8be4620b 3752 block);
a7180877 3753 } else {
df9ff5e1 3754 ret = postcopy_place_page(mis, place_dest,
8be4620b 3755 place_source, block);
a7180877
DDAG
3756 }
3757 }
a7180877
DDAG
3758 }
3759
3760 return ret;
3761}
3762
acab30b8
DHB
3763static bool postcopy_is_advised(void)
3764{
3765 PostcopyState ps = postcopy_state_get();
3766 return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
3767}
3768
3769static bool postcopy_is_running(void)
3770{
3771 PostcopyState ps = postcopy_state_get();
3772 return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END;
3773}
3774
56e93d26
JQ
3775static int ram_load(QEMUFile *f, void *opaque, int version_id)
3776{
edc60127 3777 int flags = 0, ret = 0, invalid_flags = 0;
56e93d26
JQ
3778 static uint64_t seq_iter;
3779 int len = 0;
a7180877
DDAG
3780 /*
3781 * If system is running in postcopy mode, page inserts to host memory must
3782 * be atomic
3783 */
acab30b8 3784 bool postcopy_running = postcopy_is_running();
ef08fb38 3785 /* ADVISE is earlier, it shows the source has the postcopy capability on */
acab30b8 3786 bool postcopy_advised = postcopy_is_advised();
56e93d26
JQ
3787
3788 seq_iter++;
3789
3790 if (version_id != 4) {
3791 ret = -EINVAL;
3792 }
3793
edc60127
JQ
3794 if (!migrate_use_compression()) {
3795 invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
3796 }
56e93d26
JQ
3797 /* This RCU critical section can be very long running.
3798 * When RCU reclaims in the code start to become numerous,
3799 * it will be necessary to reduce the granularity of this
3800 * critical section.
3801 */
3802 rcu_read_lock();
a7180877
DDAG
3803
3804 if (postcopy_running) {
3805 ret = ram_load_postcopy(f);
3806 }
3807
3808 while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
56e93d26 3809 ram_addr_t addr, total_ram_bytes;
a776aa15 3810 void *host = NULL;
56e93d26
JQ
3811 uint8_t ch;
3812
3813 addr = qemu_get_be64(f);
3814 flags = addr & ~TARGET_PAGE_MASK;
3815 addr &= TARGET_PAGE_MASK;
3816
edc60127
JQ
3817 if (flags & invalid_flags) {
3818 if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
3819 error_report("Received an unexpected compressed page");
3820 }
3821
3822 ret = -EINVAL;
3823 break;
3824 }
3825
bb890ed5 3826 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
a776aa15 3827 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
4c4bad48
HZ
3828 RAMBlock *block = ram_block_from_stream(f, flags);
3829
3830 host = host_from_ram_block_offset(block, addr);
a776aa15
DDAG
3831 if (!host) {
3832 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
3833 ret = -EINVAL;
3834 break;
3835 }
f9494614 3836 ramblock_recv_bitmap_set(block, host);
1db9d8e5 3837 trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
a776aa15
DDAG
3838 }
3839
56e93d26
JQ
3840 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
3841 case RAM_SAVE_FLAG_MEM_SIZE:
3842 /* Synchronize RAM block list */
3843 total_ram_bytes = addr;
3844 while (!ret && total_ram_bytes) {
3845 RAMBlock *block;
56e93d26
JQ
3846 char id[256];
3847 ram_addr_t length;
3848
3849 len = qemu_get_byte(f);
3850 qemu_get_buffer(f, (uint8_t *)id, len);
3851 id[len] = 0;
3852 length = qemu_get_be64(f);
3853
e3dd7493 3854 block = qemu_ram_block_by_name(id);
b895de50
CLG
3855 if (block && !qemu_ram_is_migratable(block)) {
3856 error_report("block %s should not be migrated !", id);
3857 ret = -EINVAL;
3858 } else if (block) {
e3dd7493
DDAG
3859 if (length != block->used_length) {
3860 Error *local_err = NULL;
56e93d26 3861
fa53a0e5 3862 ret = qemu_ram_resize(block, length,
e3dd7493
DDAG
3863 &local_err);
3864 if (local_err) {
3865 error_report_err(local_err);
56e93d26 3866 }
56e93d26 3867 }
ef08fb38
DDAG
3868 /* For postcopy we need to check hugepage sizes match */
3869 if (postcopy_advised &&
3870 block->page_size != qemu_host_page_size) {
3871 uint64_t remote_page_size = qemu_get_be64(f);
3872 if (remote_page_size != block->page_size) {
3873 error_report("Mismatched RAM page size %s "
3874 "(local) %zd != %" PRId64,
3875 id, block->page_size,
3876 remote_page_size);
3877 ret = -EINVAL;
3878 }
3879 }
e3dd7493
DDAG
3880 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
3881 block->idstr);
3882 } else {
56e93d26
JQ
3883 error_report("Unknown ramblock \"%s\", cannot "
3884 "accept migration", id);
3885 ret = -EINVAL;
3886 }
3887
3888 total_ram_bytes -= length;
3889 }
3890 break;
a776aa15 3891
bb890ed5 3892 case RAM_SAVE_FLAG_ZERO:
56e93d26
JQ
3893 ch = qemu_get_byte(f);
3894 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
3895 break;
a776aa15 3896
56e93d26 3897 case RAM_SAVE_FLAG_PAGE:
56e93d26
JQ
3898 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
3899 break;
56e93d26 3900
a776aa15 3901 case RAM_SAVE_FLAG_COMPRESS_PAGE:
56e93d26
JQ
3902 len = qemu_get_be32(f);
3903 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
3904 error_report("Invalid compressed data length: %d", len);
3905 ret = -EINVAL;
3906 break;
3907 }
c1bc6626 3908 decompress_data_with_multi_threads(f, host, len);
56e93d26 3909 break;
a776aa15 3910
56e93d26 3911 case RAM_SAVE_FLAG_XBZRLE:
56e93d26
JQ
3912 if (load_xbzrle(f, addr, host) < 0) {
3913 error_report("Failed to decompress XBZRLE page at "
3914 RAM_ADDR_FMT, addr);
3915 ret = -EINVAL;
3916 break;
3917 }
3918 break;
3919 case RAM_SAVE_FLAG_EOS:
3920 /* normal exit */
6df264ac 3921 multifd_recv_sync_main();
56e93d26
JQ
3922 break;
3923 default:
3924 if (flags & RAM_SAVE_FLAG_HOOK) {
632e3a5c 3925 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
56e93d26
JQ
3926 } else {
3927 error_report("Unknown combination of migration flags: %#x",
3928 flags);
3929 ret = -EINVAL;
3930 }
3931 }
3932 if (!ret) {
3933 ret = qemu_file_get_error(f);
3934 }
3935 }
3936
34ab9e97 3937 ret |= wait_for_decompress_done();
56e93d26 3938 rcu_read_unlock();
55c4446b 3939 trace_ram_load_complete(ret, seq_iter);
56e93d26
JQ
3940 return ret;
3941}
3942
c6467627
VSO
3943static bool ram_has_postcopy(void *opaque)
3944{
469dd51b
JH
3945 RAMBlock *rb;
3946 RAMBLOCK_FOREACH_MIGRATABLE(rb) {
3947 if (ramblock_is_pmem(rb)) {
3948 info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
3949 "is not supported now!", rb->idstr, rb->host);
3950 return false;
3951 }
3952 }
3953
c6467627
VSO
3954 return migrate_postcopy_ram();
3955}
3956
edd090c7
PX
3957/* Sync all the dirty bitmap with destination VM. */
3958static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
3959{
3960 RAMBlock *block;
3961 QEMUFile *file = s->to_dst_file;
3962 int ramblock_count = 0;
3963
3964 trace_ram_dirty_bitmap_sync_start();
3965
ff0769a4 3966 RAMBLOCK_FOREACH_MIGRATABLE(block) {
edd090c7
PX
3967 qemu_savevm_send_recv_bitmap(file, block->idstr);
3968 trace_ram_dirty_bitmap_request(block->idstr);
3969 ramblock_count++;
3970 }
3971
3972 trace_ram_dirty_bitmap_sync_wait();
3973
3974 /* Wait until all the ramblocks' dirty bitmap synced */
3975 while (ramblock_count--) {
3976 qemu_sem_wait(&s->rp_state.rp_sem);
3977 }
3978
3979 trace_ram_dirty_bitmap_sync_complete();
3980
3981 return 0;
3982}
3983
3984static void ram_dirty_bitmap_reload_notify(MigrationState *s)
3985{
3986 qemu_sem_post(&s->rp_state.rp_sem);
3987}
3988
a335debb
PX
3989/*
3990 * Read the received bitmap, revert it as the initial dirty bitmap.
3991 * This is only used when the postcopy migration is paused but wants
3992 * to resume from a middle point.
3993 */
3994int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
3995{
3996 int ret = -EINVAL;
3997 QEMUFile *file = s->rp_state.from_dst_file;
3998 unsigned long *le_bitmap, nbits = block->used_length >> TARGET_PAGE_BITS;
a725ef9f 3999 uint64_t local_size = DIV_ROUND_UP(nbits, 8);
a335debb
PX
4000 uint64_t size, end_mark;
4001
4002 trace_ram_dirty_bitmap_reload_begin(block->idstr);
4003
4004 if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
4005 error_report("%s: incorrect state %s", __func__,
4006 MigrationStatus_str(s->state));
4007 return -EINVAL;
4008 }
4009
4010 /*
4011 * Note: see comments in ramblock_recv_bitmap_send() on why we
4012 * need the endianess convertion, and the paddings.
4013 */
4014 local_size = ROUND_UP(local_size, 8);
4015
4016 /* Add paddings */
4017 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
4018
4019 size = qemu_get_be64(file);
4020
4021 /* The size of the bitmap should match with our ramblock */
4022 if (size != local_size) {
4023 error_report("%s: ramblock '%s' bitmap size mismatch "
4024 "(0x%"PRIx64" != 0x%"PRIx64")", __func__,
4025 block->idstr, size, local_size);
4026 ret = -EINVAL;
4027 goto out;
4028 }
4029
4030 size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size);
4031 end_mark = qemu_get_be64(file);
4032
4033 ret = qemu_file_get_error(file);
4034 if (ret || size != local_size) {
4035 error_report("%s: read bitmap failed for ramblock '%s': %d"
4036 " (size 0x%"PRIx64", got: 0x%"PRIx64")",
4037 __func__, block->idstr, ret, local_size, size);
4038 ret = -EIO;
4039 goto out;
4040 }
4041
4042 if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) {
4043 error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIu64,
4044 __func__, block->idstr, end_mark);
4045 ret = -EINVAL;
4046 goto out;
4047 }
4048
4049 /*
4050 * Endianess convertion. We are during postcopy (though paused).
4051 * The dirty bitmap won't change. We can directly modify it.
4052 */
4053 bitmap_from_le(block->bmap, le_bitmap, nbits);
4054
4055 /*
4056 * What we received is "received bitmap". Revert it as the initial
4057 * dirty bitmap for this ramblock.
4058 */
4059 bitmap_complement(block->bmap, block->bmap, nbits);
4060
4061 trace_ram_dirty_bitmap_reload_complete(block->idstr);
4062
edd090c7
PX
4063 /*
4064 * We succeeded to sync bitmap for current ramblock. If this is
4065 * the last one to sync, we need to notify the main send thread.
4066 */
4067 ram_dirty_bitmap_reload_notify(s);
4068
a335debb
PX
4069 ret = 0;
4070out:
bf269906 4071 g_free(le_bitmap);
a335debb
PX
4072 return ret;
4073}
4074
edd090c7
PX
4075static int ram_resume_prepare(MigrationState *s, void *opaque)
4076{
4077 RAMState *rs = *(RAMState **)opaque;
08614f34 4078 int ret;
edd090c7 4079
08614f34
PX
4080 ret = ram_dirty_bitmap_sync_all(s, rs);
4081 if (ret) {
4082 return ret;
4083 }
4084
4085 ram_state_resume_prepare(rs, s->to_dst_file);
4086
4087 return 0;
edd090c7
PX
4088}
4089
56e93d26 4090static SaveVMHandlers savevm_ram_handlers = {
9907e842 4091 .save_setup = ram_save_setup,
56e93d26 4092 .save_live_iterate = ram_save_iterate,
763c906b 4093 .save_live_complete_postcopy = ram_save_complete,
a3e06c3d 4094 .save_live_complete_precopy = ram_save_complete,
c6467627 4095 .has_postcopy = ram_has_postcopy,
56e93d26
JQ
4096 .save_live_pending = ram_save_pending,
4097 .load_state = ram_load,
f265e0e4
JQ
4098 .save_cleanup = ram_save_cleanup,
4099 .load_setup = ram_load_setup,
4100 .load_cleanup = ram_load_cleanup,
edd090c7 4101 .resume_prepare = ram_resume_prepare,
56e93d26
JQ
4102};
4103
4104void ram_mig_init(void)
4105{
4106 qemu_mutex_init(&XBZRLE.lock);
6f37bb8b 4107 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, &ram_state);
56e93d26 4108}