]> git.proxmox.com Git - mirror_qemu.git/blame - migration/ram.c
migration: set dirty_pages_rate before autoconverge logic
[mirror_qemu.git] / migration / ram.c
CommitLineData
56e93d26
JQ
1/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
76cc7b58
JQ
5 * Copyright (c) 2011-2015 Red Hat Inc
6 *
7 * Authors:
8 * Juan Quintela <quintela@redhat.com>
56e93d26
JQ
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 */
1393a485 28#include "qemu/osdep.h"
33c11879
PB
29#include "qemu-common.h"
30#include "cpu.h"
56e93d26 31#include <zlib.h>
4addcd4f 32#include "qapi-event.h"
f348b6d1 33#include "qemu/cutils.h"
56e93d26
JQ
34#include "qemu/bitops.h"
35#include "qemu/bitmap.h"
7205c9ec
JQ
36#include "qemu/timer.h"
37#include "qemu/main-loop.h"
709e3fe8 38#include "xbzrle.h"
56e93d26 39#include "migration/migration.h"
82b9d0f0 40#include "migration/qemu-file.h"
987772d9 41#include "migration/vmstate.h"
be07b0ac 42#include "postcopy-ram.h"
56e93d26
JQ
43#include "exec/address-spaces.h"
44#include "migration/page_cache.h"
56e93d26 45#include "qemu/error-report.h"
56e93d26 46#include "trace.h"
56e93d26 47#include "exec/ram_addr.h"
56e93d26 48#include "qemu/rcu_queue.h"
a91246c9 49#include "migration/colo.h"
56e93d26 50
56e93d26
JQ
51/***********************************************************/
52/* ram save/restore */
53
bb890ed5
JQ
54/* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
55 * worked for pages that where filled with the same char. We switched
56 * it to only search for the zero value. And to avoid confusion with
57 * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
58 */
59
56e93d26 60#define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
bb890ed5 61#define RAM_SAVE_FLAG_ZERO 0x02
56e93d26
JQ
62#define RAM_SAVE_FLAG_MEM_SIZE 0x04
63#define RAM_SAVE_FLAG_PAGE 0x08
64#define RAM_SAVE_FLAG_EOS 0x10
65#define RAM_SAVE_FLAG_CONTINUE 0x20
66#define RAM_SAVE_FLAG_XBZRLE 0x40
67/* 0x80 is reserved in migration.h start with 0x100 next */
68#define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
69
adb65dec 70static uint8_t *ZERO_TARGET_PAGE;
56e93d26
JQ
71
72static inline bool is_zero_range(uint8_t *p, uint64_t size)
73{
a1febc49 74 return buffer_is_zero(p, size);
56e93d26
JQ
75}
76
77/* struct contains XBZRLE cache and a static page
78 used by the compression */
79static struct {
80 /* buffer used for XBZRLE encoding */
81 uint8_t *encoded_buf;
82 /* buffer for storing page content */
83 uint8_t *current_buf;
84 /* Cache for XBZRLE, Protected by lock. */
85 PageCache *cache;
86 QemuMutex lock;
87} XBZRLE;
88
89/* buffer used for XBZRLE decoding */
90static uint8_t *xbzrle_decoded_buf;
91
92static void XBZRLE_cache_lock(void)
93{
94 if (migrate_use_xbzrle())
95 qemu_mutex_lock(&XBZRLE.lock);
96}
97
98static void XBZRLE_cache_unlock(void)
99{
100 if (migrate_use_xbzrle())
101 qemu_mutex_unlock(&XBZRLE.lock);
102}
103
3d0684b2
JQ
104/**
105 * xbzrle_cache_resize: resize the xbzrle cache
106 *
107 * This function is called from qmp_migrate_set_cache_size in main
108 * thread, possibly while a migration is in progress. A running
109 * migration may be using the cache and might finish during this call,
110 * hence changes to the cache are protected by XBZRLE.lock().
111 *
112 * Returns the new_size or negative in case of error.
113 *
114 * @new_size: new cache size
56e93d26
JQ
115 */
116int64_t xbzrle_cache_resize(int64_t new_size)
117{
118 PageCache *new_cache;
119 int64_t ret;
120
121 if (new_size < TARGET_PAGE_SIZE) {
122 return -1;
123 }
124
125 XBZRLE_cache_lock();
126
127 if (XBZRLE.cache != NULL) {
128 if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
129 goto out_new_size;
130 }
131 new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
132 TARGET_PAGE_SIZE);
133 if (!new_cache) {
134 error_report("Error creating cache");
135 ret = -1;
136 goto out;
137 }
138
139 cache_fini(XBZRLE.cache);
140 XBZRLE.cache = new_cache;
141 }
142
143out_new_size:
144 ret = pow2floor(new_size);
145out:
146 XBZRLE_cache_unlock();
147 return ret;
148}
149
ec481c6c
JQ
150/*
151 * An outstanding page request, on the source, having been received
152 * and queued
153 */
154struct RAMSrcPageRequest {
155 RAMBlock *rb;
156 hwaddr offset;
157 hwaddr len;
158
159 QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
160};
161
6f37bb8b
JQ
162/* State of RAM for migration */
163struct RAMState {
204b88b8
JQ
164 /* QEMUFile used for this migration */
165 QEMUFile *f;
6f37bb8b
JQ
166 /* Last block that we have visited searching for dirty pages */
167 RAMBlock *last_seen_block;
168 /* Last block from where we have sent data */
169 RAMBlock *last_sent_block;
269ace29
JQ
170 /* Last dirty target page we have sent */
171 ram_addr_t last_page;
6f37bb8b
JQ
172 /* last ram version we have seen */
173 uint32_t last_version;
174 /* We are in the first round */
175 bool ram_bulk_stage;
8d820d6f
JQ
176 /* How many times we have dirty too many pages */
177 int dirty_rate_high_cnt;
5a987738
JQ
178 /* How many times we have synchronized the bitmap */
179 uint64_t bitmap_sync_count;
f664da80
JQ
180 /* these variables are used for bitmap sync */
181 /* last time we did a full bitmap_sync */
182 int64_t time_last_bitmap_sync;
eac74159 183 /* bytes transferred at start_time */
c4bdf0cf 184 uint64_t bytes_xfer_prev;
a66cd90c 185 /* number of dirty pages since start_time */
68908ed6 186 uint64_t num_dirty_pages_period;
b5833fde
JQ
187 /* xbzrle misses since the beginning of the period */
188 uint64_t xbzrle_cache_miss_prev;
36040d9c
JQ
189 /* number of iterations at the beginning of period */
190 uint64_t iterations_prev;
f7ccd61b
JQ
191 /* Accounting fields */
192 /* number of zero pages. It used to be pages filled by the same char. */
193 uint64_t zero_pages;
b4d1c6e7
JQ
194 /* number of normal transferred pages */
195 uint64_t norm_pages;
23b28c3c
JQ
196 /* Iterations since start */
197 uint64_t iterations;
f36ada95
JQ
198 /* xbzrle transmitted bytes. Notice that this is with
199 * compression, they can't be calculated from the pages */
07ed50a2 200 uint64_t xbzrle_bytes;
f36ada95
JQ
201 /* xbzrle transmmited pages */
202 uint64_t xbzrle_pages;
544c36f1
JQ
203 /* xbzrle number of cache miss */
204 uint64_t xbzrle_cache_miss;
b07016b6
JQ
205 /* xbzrle miss rate */
206 double xbzrle_cache_miss_rate;
180f61f7
JQ
207 /* xbzrle number of overflows */
208 uint64_t xbzrle_overflows;
0d8ec885
JQ
209 /* number of dirty bits in the bitmap */
210 uint64_t migration_dirty_pages;
2f4fde93
JQ
211 /* total number of bytes transferred */
212 uint64_t bytes_transferred;
47ad8619
JQ
213 /* number of dirtied pages in the last second */
214 uint64_t dirty_pages_rate;
96506894
JQ
215 /* Count of requests incoming from destination */
216 uint64_t postcopy_requests;
108cfae0
JQ
217 /* protects modification of the bitmap */
218 QemuMutex bitmap_mutex;
68a098f3
JQ
219 /* The RAMBlock used in the last src_page_requests */
220 RAMBlock *last_req_rb;
ec481c6c
JQ
221 /* Queue of outstanding page requests from the destination */
222 QemuMutex src_page_req_mutex;
223 QSIMPLEQ_HEAD(src_page_requests, RAMSrcPageRequest) src_page_requests;
6f37bb8b
JQ
224};
225typedef struct RAMState RAMState;
226
227static RAMState ram_state;
228
56e93d26
JQ
229uint64_t dup_mig_pages_transferred(void)
230{
f7ccd61b 231 return ram_state.zero_pages;
56e93d26
JQ
232}
233
56e93d26
JQ
234uint64_t norm_mig_pages_transferred(void)
235{
b4d1c6e7 236 return ram_state.norm_pages;
56e93d26
JQ
237}
238
239uint64_t xbzrle_mig_bytes_transferred(void)
240{
07ed50a2 241 return ram_state.xbzrle_bytes;
56e93d26
JQ
242}
243
244uint64_t xbzrle_mig_pages_transferred(void)
245{
f36ada95 246 return ram_state.xbzrle_pages;
56e93d26
JQ
247}
248
249uint64_t xbzrle_mig_pages_cache_miss(void)
250{
544c36f1 251 return ram_state.xbzrle_cache_miss;
56e93d26
JQ
252}
253
254double xbzrle_mig_cache_miss_rate(void)
255{
b07016b6 256 return ram_state.xbzrle_cache_miss_rate;
56e93d26
JQ
257}
258
259uint64_t xbzrle_mig_pages_overflow(void)
260{
180f61f7 261 return ram_state.xbzrle_overflows;
56e93d26
JQ
262}
263
9edabd4d 264uint64_t ram_bytes_transferred(void)
0d8ec885 265{
9edabd4d 266 return ram_state.bytes_transferred;
0d8ec885
JQ
267}
268
9edabd4d 269uint64_t ram_bytes_remaining(void)
2f4fde93 270{
9edabd4d 271 return ram_state.migration_dirty_pages * TARGET_PAGE_SIZE;
2f4fde93
JQ
272}
273
42d219d3
JQ
274uint64_t ram_dirty_sync_count(void)
275{
276 return ram_state.bitmap_sync_count;
277}
278
47ad8619
JQ
279uint64_t ram_dirty_pages_rate(void)
280{
281 return ram_state.dirty_pages_rate;
282}
283
96506894
JQ
284uint64_t ram_postcopy_requests(void)
285{
286 return ram_state.postcopy_requests;
287}
288
b8fb8cb7
DDAG
289/* used by the search for pages to send */
290struct PageSearchStatus {
291 /* Current block being searched */
292 RAMBlock *block;
a935e30f
JQ
293 /* Current page to search from */
294 unsigned long page;
b8fb8cb7
DDAG
295 /* Set once we wrap around */
296 bool complete_round;
297};
298typedef struct PageSearchStatus PageSearchStatus;
299
56e93d26 300struct CompressParam {
56e93d26 301 bool done;
90e56fb4 302 bool quit;
56e93d26
JQ
303 QEMUFile *file;
304 QemuMutex mutex;
305 QemuCond cond;
306 RAMBlock *block;
307 ram_addr_t offset;
308};
309typedef struct CompressParam CompressParam;
310
311struct DecompressParam {
73a8912b 312 bool done;
90e56fb4 313 bool quit;
56e93d26
JQ
314 QemuMutex mutex;
315 QemuCond cond;
316 void *des;
d341d9f3 317 uint8_t *compbuf;
56e93d26
JQ
318 int len;
319};
320typedef struct DecompressParam DecompressParam;
321
322static CompressParam *comp_param;
323static QemuThread *compress_threads;
324/* comp_done_cond is used to wake up the migration thread when
325 * one of the compression threads has finished the compression.
326 * comp_done_lock is used to co-work with comp_done_cond.
327 */
0d9f9a5c
LL
328static QemuMutex comp_done_lock;
329static QemuCond comp_done_cond;
56e93d26
JQ
330/* The empty QEMUFileOps will be used by file in CompressParam */
331static const QEMUFileOps empty_ops = { };
332
56e93d26
JQ
333static DecompressParam *decomp_param;
334static QemuThread *decompress_threads;
73a8912b
LL
335static QemuMutex decomp_done_lock;
336static QemuCond decomp_done_cond;
56e93d26 337
a7a9a88f
LL
338static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
339 ram_addr_t offset);
56e93d26
JQ
340
341static void *do_data_compress(void *opaque)
342{
343 CompressParam *param = opaque;
a7a9a88f
LL
344 RAMBlock *block;
345 ram_addr_t offset;
56e93d26 346
a7a9a88f 347 qemu_mutex_lock(&param->mutex);
90e56fb4 348 while (!param->quit) {
a7a9a88f
LL
349 if (param->block) {
350 block = param->block;
351 offset = param->offset;
352 param->block = NULL;
353 qemu_mutex_unlock(&param->mutex);
354
355 do_compress_ram_page(param->file, block, offset);
356
0d9f9a5c 357 qemu_mutex_lock(&comp_done_lock);
a7a9a88f 358 param->done = true;
0d9f9a5c
LL
359 qemu_cond_signal(&comp_done_cond);
360 qemu_mutex_unlock(&comp_done_lock);
a7a9a88f
LL
361
362 qemu_mutex_lock(&param->mutex);
363 } else {
56e93d26
JQ
364 qemu_cond_wait(&param->cond, &param->mutex);
365 }
56e93d26 366 }
a7a9a88f 367 qemu_mutex_unlock(&param->mutex);
56e93d26
JQ
368
369 return NULL;
370}
371
372static inline void terminate_compression_threads(void)
373{
374 int idx, thread_count;
375
376 thread_count = migrate_compress_threads();
3d0684b2 377
56e93d26
JQ
378 for (idx = 0; idx < thread_count; idx++) {
379 qemu_mutex_lock(&comp_param[idx].mutex);
90e56fb4 380 comp_param[idx].quit = true;
56e93d26
JQ
381 qemu_cond_signal(&comp_param[idx].cond);
382 qemu_mutex_unlock(&comp_param[idx].mutex);
383 }
384}
385
386void migrate_compress_threads_join(void)
387{
388 int i, thread_count;
389
390 if (!migrate_use_compression()) {
391 return;
392 }
393 terminate_compression_threads();
394 thread_count = migrate_compress_threads();
395 for (i = 0; i < thread_count; i++) {
396 qemu_thread_join(compress_threads + i);
397 qemu_fclose(comp_param[i].file);
398 qemu_mutex_destroy(&comp_param[i].mutex);
399 qemu_cond_destroy(&comp_param[i].cond);
400 }
0d9f9a5c
LL
401 qemu_mutex_destroy(&comp_done_lock);
402 qemu_cond_destroy(&comp_done_cond);
56e93d26
JQ
403 g_free(compress_threads);
404 g_free(comp_param);
56e93d26
JQ
405 compress_threads = NULL;
406 comp_param = NULL;
56e93d26
JQ
407}
408
409void migrate_compress_threads_create(void)
410{
411 int i, thread_count;
412
413 if (!migrate_use_compression()) {
414 return;
415 }
56e93d26
JQ
416 thread_count = migrate_compress_threads();
417 compress_threads = g_new0(QemuThread, thread_count);
418 comp_param = g_new0(CompressParam, thread_count);
0d9f9a5c
LL
419 qemu_cond_init(&comp_done_cond);
420 qemu_mutex_init(&comp_done_lock);
56e93d26 421 for (i = 0; i < thread_count; i++) {
e110aa91
C
422 /* comp_param[i].file is just used as a dummy buffer to save data,
423 * set its ops to empty.
56e93d26
JQ
424 */
425 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
426 comp_param[i].done = true;
90e56fb4 427 comp_param[i].quit = false;
56e93d26
JQ
428 qemu_mutex_init(&comp_param[i].mutex);
429 qemu_cond_init(&comp_param[i].cond);
430 qemu_thread_create(compress_threads + i, "compress",
431 do_data_compress, comp_param + i,
432 QEMU_THREAD_JOINABLE);
433 }
434}
435
436/**
3d0684b2 437 * save_page_header: write page header to wire
56e93d26
JQ
438 *
439 * If this is the 1st block, it also writes the block identification
440 *
3d0684b2 441 * Returns the number of bytes written
56e93d26
JQ
442 *
443 * @f: QEMUFile where to send the data
444 * @block: block that contains the page we want to send
445 * @offset: offset inside the block for the page
446 * in the lower bits, it contains flags
447 */
2bf3aa85
JQ
448static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
449 ram_addr_t offset)
56e93d26 450{
9f5f380b 451 size_t size, len;
56e93d26 452
24795694
JQ
453 if (block == rs->last_sent_block) {
454 offset |= RAM_SAVE_FLAG_CONTINUE;
455 }
2bf3aa85 456 qemu_put_be64(f, offset);
56e93d26
JQ
457 size = 8;
458
459 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
9f5f380b 460 len = strlen(block->idstr);
2bf3aa85
JQ
461 qemu_put_byte(f, len);
462 qemu_put_buffer(f, (uint8_t *)block->idstr, len);
9f5f380b 463 size += 1 + len;
24795694 464 rs->last_sent_block = block;
56e93d26
JQ
465 }
466 return size;
467}
468
3d0684b2
JQ
469/**
470 * mig_throttle_guest_down: throotle down the guest
471 *
472 * Reduce amount of guest cpu execution to hopefully slow down memory
473 * writes. If guest dirty memory rate is reduced below the rate at
474 * which we can transfer pages to the destination then we should be
475 * able to complete migration. Some workloads dirty memory way too
476 * fast and will not effectively converge, even with auto-converge.
070afca2
JH
477 */
478static void mig_throttle_guest_down(void)
479{
480 MigrationState *s = migrate_get_current();
2594f56d
DB
481 uint64_t pct_initial = s->parameters.cpu_throttle_initial;
482 uint64_t pct_icrement = s->parameters.cpu_throttle_increment;
070afca2
JH
483
484 /* We have not started throttling yet. Let's start it. */
485 if (!cpu_throttle_active()) {
486 cpu_throttle_set(pct_initial);
487 } else {
488 /* Throttling already on, just increase the rate */
489 cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement);
490 }
491}
492
3d0684b2
JQ
493/**
494 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
495 *
6f37bb8b 496 * @rs: current RAM state
3d0684b2
JQ
497 * @current_addr: address for the zero page
498 *
499 * Update the xbzrle cache to reflect a page that's been sent as all 0.
56e93d26
JQ
500 * The important thing is that a stale (not-yet-0'd) page be replaced
501 * by the new data.
502 * As a bonus, if the page wasn't in the cache it gets added so that
3d0684b2 503 * when a small write is made into the 0'd page it gets XBZRLE sent.
56e93d26 504 */
6f37bb8b 505static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
56e93d26 506{
6f37bb8b 507 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
56e93d26
JQ
508 return;
509 }
510
511 /* We don't care if this fails to allocate a new cache page
512 * as long as it updated an old one */
513 cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
5a987738 514 rs->bitmap_sync_count);
56e93d26
JQ
515}
516
517#define ENCODING_FLAG_XBZRLE 0x1
518
519/**
520 * save_xbzrle_page: compress and send current page
521 *
522 * Returns: 1 means that we wrote the page
523 * 0 means that page is identical to the one already sent
524 * -1 means that xbzrle would be longer than normal
525 *
5a987738 526 * @rs: current RAM state
3d0684b2
JQ
527 * @current_data: pointer to the address of the page contents
528 * @current_addr: addr of the page
56e93d26
JQ
529 * @block: block that contains the page we want to send
530 * @offset: offset inside the block for the page
531 * @last_stage: if we are at the completion stage
56e93d26 532 */
204b88b8 533static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
56e93d26 534 ram_addr_t current_addr, RAMBlock *block,
072c2511 535 ram_addr_t offset, bool last_stage)
56e93d26
JQ
536{
537 int encoded_len = 0, bytes_xbzrle;
538 uint8_t *prev_cached_page;
539
5a987738 540 if (!cache_is_cached(XBZRLE.cache, current_addr, rs->bitmap_sync_count)) {
544c36f1 541 rs->xbzrle_cache_miss++;
56e93d26
JQ
542 if (!last_stage) {
543 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
5a987738 544 rs->bitmap_sync_count) == -1) {
56e93d26
JQ
545 return -1;
546 } else {
547 /* update *current_data when the page has been
548 inserted into cache */
549 *current_data = get_cached_data(XBZRLE.cache, current_addr);
550 }
551 }
552 return -1;
553 }
554
555 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
556
557 /* save current buffer into memory */
558 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
559
560 /* XBZRLE encoding (if there is no overflow) */
561 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
562 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
563 TARGET_PAGE_SIZE);
564 if (encoded_len == 0) {
55c4446b 565 trace_save_xbzrle_page_skipping();
56e93d26
JQ
566 return 0;
567 } else if (encoded_len == -1) {
55c4446b 568 trace_save_xbzrle_page_overflow();
180f61f7 569 rs->xbzrle_overflows++;
56e93d26
JQ
570 /* update data in the cache */
571 if (!last_stage) {
572 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
573 *current_data = prev_cached_page;
574 }
575 return -1;
576 }
577
578 /* we need to update the data in the cache, in order to get the same data */
579 if (!last_stage) {
580 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
581 }
582
583 /* Send XBZRLE based compressed page */
2bf3aa85 584 bytes_xbzrle = save_page_header(rs, rs->f, block,
204b88b8
JQ
585 offset | RAM_SAVE_FLAG_XBZRLE);
586 qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
587 qemu_put_be16(rs->f, encoded_len);
588 qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
56e93d26 589 bytes_xbzrle += encoded_len + 1 + 2;
f36ada95 590 rs->xbzrle_pages++;
07ed50a2 591 rs->xbzrle_bytes += bytes_xbzrle;
072c2511 592 rs->bytes_transferred += bytes_xbzrle;
56e93d26
JQ
593
594 return 1;
595}
596
3d0684b2
JQ
597/**
598 * migration_bitmap_find_dirty: find the next dirty page from start
f3f491fc 599 *
3d0684b2
JQ
600 * Called with rcu_read_lock() to protect migration_bitmap
601 *
602 * Returns the byte offset within memory region of the start of a dirty page
603 *
6f37bb8b 604 * @rs: current RAM state
3d0684b2 605 * @rb: RAMBlock where to search for dirty pages
a935e30f 606 * @start: page where we start the search
f3f491fc 607 */
56e93d26 608static inline
a935e30f 609unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
f20e2865 610 unsigned long start)
56e93d26 611{
6b6712ef
JQ
612 unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
613 unsigned long *bitmap = rb->bmap;
56e93d26
JQ
614 unsigned long next;
615
6b6712ef
JQ
616 if (rs->ram_bulk_stage && start > 0) {
617 next = start + 1;
56e93d26 618 } else {
6b6712ef 619 next = find_next_bit(bitmap, size, start);
56e93d26
JQ
620 }
621
6b6712ef 622 return next;
56e93d26
JQ
623}
624
06b10688 625static inline bool migration_bitmap_clear_dirty(RAMState *rs,
f20e2865
JQ
626 RAMBlock *rb,
627 unsigned long page)
a82d593b
DDAG
628{
629 bool ret;
a82d593b 630
6b6712ef 631 ret = test_and_clear_bit(page, rb->bmap);
a82d593b
DDAG
632
633 if (ret) {
0d8ec885 634 rs->migration_dirty_pages--;
a82d593b
DDAG
635 }
636 return ret;
637}
638
15440dd5
JQ
639static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
640 ram_addr_t start, ram_addr_t length)
56e93d26 641{
0d8ec885 642 rs->migration_dirty_pages +=
6b6712ef 643 cpu_physical_memory_sync_dirty_bitmap(rb, start, length,
0d8ec885 644 &rs->num_dirty_pages_period);
56e93d26
JQ
645}
646
3d0684b2
JQ
647/**
648 * ram_pagesize_summary: calculate all the pagesizes of a VM
649 *
650 * Returns a summary bitmap of the page sizes of all RAMBlocks
651 *
652 * For VMs with just normal pages this is equivalent to the host page
653 * size. If it's got some huge pages then it's the OR of all the
654 * different page sizes.
e8ca1db2
DDAG
655 */
656uint64_t ram_pagesize_summary(void)
657{
658 RAMBlock *block;
659 uint64_t summary = 0;
660
99e15582 661 RAMBLOCK_FOREACH(block) {
e8ca1db2
DDAG
662 summary |= block->page_size;
663 }
664
665 return summary;
666}
667
8d820d6f 668static void migration_bitmap_sync(RAMState *rs)
56e93d26
JQ
669{
670 RAMBlock *block;
56e93d26 671 int64_t end_time;
c4bdf0cf 672 uint64_t bytes_xfer_now;
56e93d26 673
5a987738 674 rs->bitmap_sync_count++;
56e93d26 675
f664da80
JQ
676 if (!rs->time_last_bitmap_sync) {
677 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
56e93d26
JQ
678 }
679
680 trace_migration_bitmap_sync_start();
9c1f8f44 681 memory_global_dirty_log_sync();
56e93d26 682
108cfae0 683 qemu_mutex_lock(&rs->bitmap_mutex);
56e93d26 684 rcu_read_lock();
99e15582 685 RAMBLOCK_FOREACH(block) {
15440dd5 686 migration_bitmap_sync_range(rs, block, 0, block->used_length);
56e93d26
JQ
687 }
688 rcu_read_unlock();
108cfae0 689 qemu_mutex_unlock(&rs->bitmap_mutex);
56e93d26 690
a66cd90c 691 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
1ffb5dfd 692
56e93d26
JQ
693 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
694
695 /* more than 1 second = 1000 millisecons */
f664da80 696 if (end_time > rs->time_last_bitmap_sync + 1000) {
d693c6f1
FF
697 /* calculate period counters */
698 rs->dirty_pages_rate = rs->num_dirty_pages_period * 1000
699 / (end_time - rs->time_last_bitmap_sync);
700
56e93d26
JQ
701 if (migrate_auto_converge()) {
702 /* The following detection logic can be refined later. For now:
703 Check to see if the dirtied bytes is 50% more than the approx.
704 amount of bytes that just got transferred since the last time we
070afca2
JH
705 were in this routine. If that happens twice, start or increase
706 throttling */
56e93d26 707 bytes_xfer_now = ram_bytes_transferred();
070afca2 708
d693c6f1 709 if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
eac74159 710 (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
d693c6f1 711 (rs->dirty_rate_high_cnt++ >= 2)) {
56e93d26 712 trace_migration_throttle();
8d820d6f 713 rs->dirty_rate_high_cnt = 0;
070afca2 714 mig_throttle_guest_down();
d693c6f1
FF
715 }
716 rs->bytes_xfer_prev = bytes_xfer_now;
56e93d26 717 }
070afca2 718
56e93d26 719 if (migrate_use_xbzrle()) {
23b28c3c 720 if (rs->iterations_prev != rs->iterations) {
b07016b6 721 rs->xbzrle_cache_miss_rate =
544c36f1 722 (double)(rs->xbzrle_cache_miss -
b5833fde 723 rs->xbzrle_cache_miss_prev) /
23b28c3c 724 (rs->iterations - rs->iterations_prev);
56e93d26 725 }
23b28c3c 726 rs->iterations_prev = rs->iterations;
544c36f1 727 rs->xbzrle_cache_miss_prev = rs->xbzrle_cache_miss;
56e93d26 728 }
d693c6f1
FF
729
730 /* reset period counters */
f664da80 731 rs->time_last_bitmap_sync = end_time;
a66cd90c 732 rs->num_dirty_pages_period = 0;
56e93d26 733 }
4addcd4f 734 if (migrate_use_events()) {
5a987738 735 qapi_event_send_migration_pass(rs->bitmap_sync_count, NULL);
4addcd4f 736 }
56e93d26
JQ
737}
738
739/**
3d0684b2 740 * save_zero_page: send the zero page to the stream
56e93d26 741 *
3d0684b2 742 * Returns the number of pages written.
56e93d26 743 *
f7ccd61b 744 * @rs: current RAM state
56e93d26
JQ
745 * @block: block that contains the page we want to send
746 * @offset: offset inside the block for the page
747 * @p: pointer to the page
56e93d26 748 */
ce25d337
JQ
749static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
750 uint8_t *p)
56e93d26
JQ
751{
752 int pages = -1;
753
754 if (is_zero_range(p, TARGET_PAGE_SIZE)) {
f7ccd61b 755 rs->zero_pages++;
072c2511 756 rs->bytes_transferred +=
bb890ed5 757 save_page_header(rs, rs->f, block, offset | RAM_SAVE_FLAG_ZERO);
ce25d337 758 qemu_put_byte(rs->f, 0);
072c2511 759 rs->bytes_transferred += 1;
56e93d26
JQ
760 pages = 1;
761 }
762
763 return pages;
764}
765
5727309d 766static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
53f09a10 767{
5727309d 768 if (!migrate_release_ram() || !migration_in_postcopy()) {
53f09a10
PB
769 return;
770 }
771
aaa2064c 772 ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS);
53f09a10
PB
773}
774
56e93d26 775/**
3d0684b2 776 * ram_save_page: send the given page to the stream
56e93d26 777 *
3d0684b2 778 * Returns the number of pages written.
3fd3c4b3
DDAG
779 * < 0 - error
780 * >=0 - Number of pages written - this might legally be 0
781 * if xbzrle noticed the page was the same.
56e93d26 782 *
6f37bb8b 783 * @rs: current RAM state
56e93d26
JQ
784 * @block: block that contains the page we want to send
785 * @offset: offset inside the block for the page
786 * @last_stage: if we are at the completion stage
56e93d26 787 */
a0a8aa14 788static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
56e93d26
JQ
789{
790 int pages = -1;
791 uint64_t bytes_xmit;
792 ram_addr_t current_addr;
56e93d26
JQ
793 uint8_t *p;
794 int ret;
795 bool send_async = true;
a08f6890 796 RAMBlock *block = pss->block;
a935e30f 797 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
56e93d26 798
2f68e399 799 p = block->host + offset;
1db9d8e5 800 trace_ram_save_page(block->idstr, (uint64_t)offset, p);
56e93d26
JQ
801
802 /* In doubt sent page as normal */
803 bytes_xmit = 0;
ce25d337 804 ret = ram_control_save_page(rs->f, block->offset,
56e93d26
JQ
805 offset, TARGET_PAGE_SIZE, &bytes_xmit);
806 if (bytes_xmit) {
072c2511 807 rs->bytes_transferred += bytes_xmit;
56e93d26
JQ
808 pages = 1;
809 }
810
811 XBZRLE_cache_lock();
812
813 current_addr = block->offset + offset;
814
56e93d26
JQ
815 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
816 if (ret != RAM_SAVE_CONTROL_DELAYED) {
817 if (bytes_xmit > 0) {
b4d1c6e7 818 rs->norm_pages++;
56e93d26 819 } else if (bytes_xmit == 0) {
f7ccd61b 820 rs->zero_pages++;
56e93d26
JQ
821 }
822 }
823 } else {
ce25d337 824 pages = save_zero_page(rs, block, offset, p);
56e93d26
JQ
825 if (pages > 0) {
826 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
827 * page would be stale
828 */
6f37bb8b 829 xbzrle_cache_zero_page(rs, current_addr);
a935e30f 830 ram_release_pages(block->idstr, offset, pages);
6f37bb8b 831 } else if (!rs->ram_bulk_stage &&
5727309d 832 !migration_in_postcopy() && migrate_use_xbzrle()) {
204b88b8 833 pages = save_xbzrle_page(rs, &p, current_addr, block,
072c2511 834 offset, last_stage);
56e93d26
JQ
835 if (!last_stage) {
836 /* Can't send this cached data async, since the cache page
837 * might get updated before it gets to the wire
838 */
839 send_async = false;
840 }
841 }
842 }
843
844 /* XBZRLE overflow or normal page */
845 if (pages == -1) {
2bf3aa85 846 rs->bytes_transferred += save_page_header(rs, rs->f, block,
24795694 847 offset | RAM_SAVE_FLAG_PAGE);
56e93d26 848 if (send_async) {
ce25d337 849 qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE,
53f09a10 850 migrate_release_ram() &
5727309d 851 migration_in_postcopy());
56e93d26 852 } else {
ce25d337 853 qemu_put_buffer(rs->f, p, TARGET_PAGE_SIZE);
56e93d26 854 }
072c2511 855 rs->bytes_transferred += TARGET_PAGE_SIZE;
56e93d26 856 pages = 1;
b4d1c6e7 857 rs->norm_pages++;
56e93d26
JQ
858 }
859
860 XBZRLE_cache_unlock();
861
862 return pages;
863}
864
a7a9a88f
LL
865static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
866 ram_addr_t offset)
56e93d26 867{
24795694 868 RAMState *rs = &ram_state;
56e93d26 869 int bytes_sent, blen;
a7a9a88f 870 uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
56e93d26 871
2bf3aa85 872 bytes_sent = save_page_header(rs, f, block, offset |
56e93d26 873 RAM_SAVE_FLAG_COMPRESS_PAGE);
a7a9a88f 874 blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE,
56e93d26 875 migrate_compress_level());
b3be2896
LL
876 if (blen < 0) {
877 bytes_sent = 0;
878 qemu_file_set_error(migrate_get_current()->to_dst_file, blen);
879 error_report("compressed data failed!");
880 } else {
881 bytes_sent += blen;
5727309d 882 ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
b3be2896 883 }
56e93d26
JQ
884
885 return bytes_sent;
886}
887
ce25d337 888static void flush_compressed_data(RAMState *rs)
56e93d26
JQ
889{
890 int idx, len, thread_count;
891
892 if (!migrate_use_compression()) {
893 return;
894 }
895 thread_count = migrate_compress_threads();
a7a9a88f 896
0d9f9a5c 897 qemu_mutex_lock(&comp_done_lock);
56e93d26 898 for (idx = 0; idx < thread_count; idx++) {
a7a9a88f 899 while (!comp_param[idx].done) {
0d9f9a5c 900 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
56e93d26 901 }
a7a9a88f 902 }
0d9f9a5c 903 qemu_mutex_unlock(&comp_done_lock);
a7a9a88f
LL
904
905 for (idx = 0; idx < thread_count; idx++) {
906 qemu_mutex_lock(&comp_param[idx].mutex);
90e56fb4 907 if (!comp_param[idx].quit) {
ce25d337 908 len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
2f4fde93 909 rs->bytes_transferred += len;
56e93d26 910 }
a7a9a88f 911 qemu_mutex_unlock(&comp_param[idx].mutex);
56e93d26
JQ
912 }
913}
914
915static inline void set_compress_params(CompressParam *param, RAMBlock *block,
916 ram_addr_t offset)
917{
918 param->block = block;
919 param->offset = offset;
920}
921
ce25d337
JQ
922static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
923 ram_addr_t offset)
56e93d26
JQ
924{
925 int idx, thread_count, bytes_xmit = -1, pages = -1;
926
927 thread_count = migrate_compress_threads();
0d9f9a5c 928 qemu_mutex_lock(&comp_done_lock);
56e93d26
JQ
929 while (true) {
930 for (idx = 0; idx < thread_count; idx++) {
931 if (comp_param[idx].done) {
a7a9a88f 932 comp_param[idx].done = false;
ce25d337 933 bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
a7a9a88f 934 qemu_mutex_lock(&comp_param[idx].mutex);
56e93d26 935 set_compress_params(&comp_param[idx], block, offset);
a7a9a88f
LL
936 qemu_cond_signal(&comp_param[idx].cond);
937 qemu_mutex_unlock(&comp_param[idx].mutex);
56e93d26 938 pages = 1;
b4d1c6e7 939 rs->norm_pages++;
072c2511 940 rs->bytes_transferred += bytes_xmit;
56e93d26
JQ
941 break;
942 }
943 }
944 if (pages > 0) {
945 break;
946 } else {
0d9f9a5c 947 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
56e93d26
JQ
948 }
949 }
0d9f9a5c 950 qemu_mutex_unlock(&comp_done_lock);
56e93d26
JQ
951
952 return pages;
953}
954
955/**
956 * ram_save_compressed_page: compress the given page and send it to the stream
957 *
3d0684b2 958 * Returns the number of pages written.
56e93d26 959 *
6f37bb8b 960 * @rs: current RAM state
56e93d26
JQ
961 * @block: block that contains the page we want to send
962 * @offset: offset inside the block for the page
963 * @last_stage: if we are at the completion stage
56e93d26 964 */
a0a8aa14
JQ
965static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss,
966 bool last_stage)
56e93d26
JQ
967{
968 int pages = -1;
fc50438e 969 uint64_t bytes_xmit = 0;
56e93d26 970 uint8_t *p;
fc50438e 971 int ret, blen;
a08f6890 972 RAMBlock *block = pss->block;
a935e30f 973 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
56e93d26 974
2f68e399 975 p = block->host + offset;
56e93d26 976
ce25d337 977 ret = ram_control_save_page(rs->f, block->offset,
56e93d26
JQ
978 offset, TARGET_PAGE_SIZE, &bytes_xmit);
979 if (bytes_xmit) {
072c2511 980 rs->bytes_transferred += bytes_xmit;
56e93d26
JQ
981 pages = 1;
982 }
56e93d26
JQ
983 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
984 if (ret != RAM_SAVE_CONTROL_DELAYED) {
985 if (bytes_xmit > 0) {
b4d1c6e7 986 rs->norm_pages++;
56e93d26 987 } else if (bytes_xmit == 0) {
f7ccd61b 988 rs->zero_pages++;
56e93d26
JQ
989 }
990 }
991 } else {
992 /* When starting the process of a new block, the first page of
993 * the block should be sent out before other pages in the same
994 * block, and all the pages in last block should have been sent
995 * out, keeping this order is important, because the 'cont' flag
996 * is used to avoid resending the block name.
997 */
6f37bb8b 998 if (block != rs->last_sent_block) {
ce25d337
JQ
999 flush_compressed_data(rs);
1000 pages = save_zero_page(rs, block, offset, p);
56e93d26 1001 if (pages == -1) {
fc50438e 1002 /* Make sure the first page is sent out before other pages */
2bf3aa85 1003 bytes_xmit = save_page_header(rs, rs->f, block, offset |
fc50438e 1004 RAM_SAVE_FLAG_COMPRESS_PAGE);
ce25d337 1005 blen = qemu_put_compression_data(rs->f, p, TARGET_PAGE_SIZE,
fc50438e
LL
1006 migrate_compress_level());
1007 if (blen > 0) {
072c2511 1008 rs->bytes_transferred += bytes_xmit + blen;
b4d1c6e7 1009 rs->norm_pages++;
b3be2896 1010 pages = 1;
fc50438e 1011 } else {
ce25d337 1012 qemu_file_set_error(rs->f, blen);
fc50438e 1013 error_report("compressed data failed!");
b3be2896 1014 }
56e93d26 1015 }
53f09a10 1016 if (pages > 0) {
a935e30f 1017 ram_release_pages(block->idstr, offset, pages);
53f09a10 1018 }
56e93d26 1019 } else {
ce25d337 1020 pages = save_zero_page(rs, block, offset, p);
56e93d26 1021 if (pages == -1) {
ce25d337 1022 pages = compress_page_with_multi_thread(rs, block, offset);
53f09a10 1023 } else {
a935e30f 1024 ram_release_pages(block->idstr, offset, pages);
56e93d26
JQ
1025 }
1026 }
1027 }
1028
1029 return pages;
1030}
1031
3d0684b2
JQ
1032/**
1033 * find_dirty_block: find the next dirty page and update any state
1034 * associated with the search process.
b9e60928 1035 *
3d0684b2 1036 * Returns if a page is found
b9e60928 1037 *
6f37bb8b 1038 * @rs: current RAM state
3d0684b2
JQ
1039 * @pss: data about the state of the current dirty page scan
1040 * @again: set to false if the search has scanned the whole of RAM
b9e60928 1041 */
f20e2865 1042static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
b9e60928 1043{
f20e2865 1044 pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
6f37bb8b 1045 if (pss->complete_round && pss->block == rs->last_seen_block &&
a935e30f 1046 pss->page >= rs->last_page) {
b9e60928
DDAG
1047 /*
1048 * We've been once around the RAM and haven't found anything.
1049 * Give up.
1050 */
1051 *again = false;
1052 return false;
1053 }
a935e30f 1054 if ((pss->page << TARGET_PAGE_BITS) >= pss->block->used_length) {
b9e60928 1055 /* Didn't find anything in this RAM Block */
a935e30f 1056 pss->page = 0;
b9e60928
DDAG
1057 pss->block = QLIST_NEXT_RCU(pss->block, next);
1058 if (!pss->block) {
1059 /* Hit the end of the list */
1060 pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
1061 /* Flag that we've looped */
1062 pss->complete_round = true;
6f37bb8b 1063 rs->ram_bulk_stage = false;
b9e60928
DDAG
1064 if (migrate_use_xbzrle()) {
1065 /* If xbzrle is on, stop using the data compression at this
1066 * point. In theory, xbzrle can do better than compression.
1067 */
ce25d337 1068 flush_compressed_data(rs);
b9e60928
DDAG
1069 }
1070 }
1071 /* Didn't find anything this time, but try again on the new block */
1072 *again = true;
1073 return false;
1074 } else {
1075 /* Can go around again, but... */
1076 *again = true;
1077 /* We've found something so probably don't need to */
1078 return true;
1079 }
1080}
1081
3d0684b2
JQ
1082/**
1083 * unqueue_page: gets a page of the queue
1084 *
a82d593b 1085 * Helper for 'get_queued_page' - gets a page off the queue
a82d593b 1086 *
3d0684b2
JQ
1087 * Returns the block of the page (or NULL if none available)
1088 *
ec481c6c 1089 * @rs: current RAM state
3d0684b2 1090 * @offset: used to return the offset within the RAMBlock
a82d593b 1091 */
f20e2865 1092static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
a82d593b
DDAG
1093{
1094 RAMBlock *block = NULL;
1095
ec481c6c
JQ
1096 qemu_mutex_lock(&rs->src_page_req_mutex);
1097 if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
1098 struct RAMSrcPageRequest *entry =
1099 QSIMPLEQ_FIRST(&rs->src_page_requests);
a82d593b
DDAG
1100 block = entry->rb;
1101 *offset = entry->offset;
a82d593b
DDAG
1102
1103 if (entry->len > TARGET_PAGE_SIZE) {
1104 entry->len -= TARGET_PAGE_SIZE;
1105 entry->offset += TARGET_PAGE_SIZE;
1106 } else {
1107 memory_region_unref(block->mr);
ec481c6c 1108 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
a82d593b
DDAG
1109 g_free(entry);
1110 }
1111 }
ec481c6c 1112 qemu_mutex_unlock(&rs->src_page_req_mutex);
a82d593b
DDAG
1113
1114 return block;
1115}
1116
3d0684b2
JQ
1117/**
1118 * get_queued_page: unqueue a page from the postocpy requests
1119 *
1120 * Skips pages that are already sent (!dirty)
a82d593b 1121 *
3d0684b2 1122 * Returns if a queued page is found
a82d593b 1123 *
6f37bb8b 1124 * @rs: current RAM state
3d0684b2 1125 * @pss: data about the state of the current dirty page scan
a82d593b 1126 */
f20e2865 1127static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
a82d593b
DDAG
1128{
1129 RAMBlock *block;
1130 ram_addr_t offset;
1131 bool dirty;
1132
1133 do {
f20e2865 1134 block = unqueue_page(rs, &offset);
a82d593b
DDAG
1135 /*
1136 * We're sending this page, and since it's postcopy nothing else
1137 * will dirty it, and we must make sure it doesn't get sent again
1138 * even if this queue request was received after the background
1139 * search already sent it.
1140 */
1141 if (block) {
f20e2865
JQ
1142 unsigned long page;
1143
6b6712ef
JQ
1144 page = offset >> TARGET_PAGE_BITS;
1145 dirty = test_bit(page, block->bmap);
a82d593b 1146 if (!dirty) {
06b10688 1147 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
6b6712ef 1148 page, test_bit(page, block->unsentmap));
a82d593b 1149 } else {
f20e2865 1150 trace_get_queued_page(block->idstr, (uint64_t)offset, page);
a82d593b
DDAG
1151 }
1152 }
1153
1154 } while (block && !dirty);
1155
1156 if (block) {
1157 /*
1158 * As soon as we start servicing pages out of order, then we have
1159 * to kill the bulk stage, since the bulk stage assumes
1160 * in (migration_bitmap_find_and_reset_dirty) that every page is
1161 * dirty, that's no longer true.
1162 */
6f37bb8b 1163 rs->ram_bulk_stage = false;
a82d593b
DDAG
1164
1165 /*
1166 * We want the background search to continue from the queued page
1167 * since the guest is likely to want other pages near to the page
1168 * it just requested.
1169 */
1170 pss->block = block;
a935e30f 1171 pss->page = offset >> TARGET_PAGE_BITS;
a82d593b
DDAG
1172 }
1173
1174 return !!block;
1175}
1176
6c595cde 1177/**
5e58f968
JQ
1178 * migration_page_queue_free: drop any remaining pages in the ram
1179 * request queue
6c595cde 1180 *
3d0684b2
JQ
1181 * It should be empty at the end anyway, but in error cases there may
1182 * be some left. in case that there is any page left, we drop it.
1183 *
6c595cde 1184 */
ec481c6c 1185void migration_page_queue_free(void)
6c595cde 1186{
ec481c6c
JQ
1187 struct RAMSrcPageRequest *mspr, *next_mspr;
1188 RAMState *rs = &ram_state;
6c595cde
DDAG
1189 /* This queue generally should be empty - but in the case of a failed
1190 * migration might have some droppings in.
1191 */
1192 rcu_read_lock();
ec481c6c 1193 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
6c595cde 1194 memory_region_unref(mspr->rb->mr);
ec481c6c 1195 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
6c595cde
DDAG
1196 g_free(mspr);
1197 }
1198 rcu_read_unlock();
1199}
1200
1201/**
3d0684b2
JQ
1202 * ram_save_queue_pages: queue the page for transmission
1203 *
1204 * A request from postcopy destination for example.
1205 *
1206 * Returns zero on success or negative on error
1207 *
3d0684b2
JQ
1208 * @rbname: Name of the RAMBLock of the request. NULL means the
1209 * same that last one.
1210 * @start: starting address from the start of the RAMBlock
1211 * @len: length (in bytes) to send
6c595cde 1212 */
96506894 1213int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
6c595cde
DDAG
1214{
1215 RAMBlock *ramblock;
68a098f3 1216 RAMState *rs = &ram_state;
6c595cde 1217
96506894 1218 rs->postcopy_requests++;
6c595cde
DDAG
1219 rcu_read_lock();
1220 if (!rbname) {
1221 /* Reuse last RAMBlock */
68a098f3 1222 ramblock = rs->last_req_rb;
6c595cde
DDAG
1223
1224 if (!ramblock) {
1225 /*
1226 * Shouldn't happen, we can't reuse the last RAMBlock if
1227 * it's the 1st request.
1228 */
1229 error_report("ram_save_queue_pages no previous block");
1230 goto err;
1231 }
1232 } else {
1233 ramblock = qemu_ram_block_by_name(rbname);
1234
1235 if (!ramblock) {
1236 /* We shouldn't be asked for a non-existent RAMBlock */
1237 error_report("ram_save_queue_pages no block '%s'", rbname);
1238 goto err;
1239 }
68a098f3 1240 rs->last_req_rb = ramblock;
6c595cde
DDAG
1241 }
1242 trace_ram_save_queue_pages(ramblock->idstr, start, len);
1243 if (start+len > ramblock->used_length) {
9458ad6b
JQ
1244 error_report("%s request overrun start=" RAM_ADDR_FMT " len="
1245 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
6c595cde
DDAG
1246 __func__, start, len, ramblock->used_length);
1247 goto err;
1248 }
1249
ec481c6c
JQ
1250 struct RAMSrcPageRequest *new_entry =
1251 g_malloc0(sizeof(struct RAMSrcPageRequest));
6c595cde
DDAG
1252 new_entry->rb = ramblock;
1253 new_entry->offset = start;
1254 new_entry->len = len;
1255
1256 memory_region_ref(ramblock->mr);
ec481c6c
JQ
1257 qemu_mutex_lock(&rs->src_page_req_mutex);
1258 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
1259 qemu_mutex_unlock(&rs->src_page_req_mutex);
6c595cde
DDAG
1260 rcu_read_unlock();
1261
1262 return 0;
1263
1264err:
1265 rcu_read_unlock();
1266 return -1;
1267}
1268
a82d593b 1269/**
3d0684b2 1270 * ram_save_target_page: save one target page
a82d593b 1271 *
3d0684b2 1272 * Returns the number of pages written
a82d593b 1273 *
6f37bb8b 1274 * @rs: current RAM state
3d0684b2 1275 * @ms: current migration state
3d0684b2 1276 * @pss: data about the page we want to send
a82d593b 1277 * @last_stage: if we are at the completion stage
a82d593b 1278 */
a0a8aa14 1279static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
f20e2865 1280 bool last_stage)
a82d593b
DDAG
1281{
1282 int res = 0;
1283
1284 /* Check the pages is dirty and if it is send it */
f20e2865 1285 if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
6d358d94
JQ
1286 /*
1287 * If xbzrle is on, stop using the data compression after first
1288 * round of migration even if compression is enabled. In theory,
1289 * xbzrle can do better than compression.
1290 */
6b6712ef
JQ
1291 if (migrate_use_compression() &&
1292 (rs->ram_bulk_stage || !migrate_use_xbzrle())) {
a0a8aa14 1293 res = ram_save_compressed_page(rs, pss, last_stage);
a82d593b 1294 } else {
a0a8aa14 1295 res = ram_save_page(rs, pss, last_stage);
a82d593b
DDAG
1296 }
1297
1298 if (res < 0) {
1299 return res;
1300 }
6b6712ef
JQ
1301 if (pss->block->unsentmap) {
1302 clear_bit(pss->page, pss->block->unsentmap);
a82d593b
DDAG
1303 }
1304 }
1305
1306 return res;
1307}
1308
1309/**
3d0684b2 1310 * ram_save_host_page: save a whole host page
a82d593b 1311 *
3d0684b2
JQ
1312 * Starting at *offset send pages up to the end of the current host
1313 * page. It's valid for the initial offset to point into the middle of
1314 * a host page in which case the remainder of the hostpage is sent.
1315 * Only dirty target pages are sent. Note that the host page size may
1316 * be a huge page for this block.
1eb3fc0a
DDAG
1317 * The saving stops at the boundary of the used_length of the block
1318 * if the RAMBlock isn't a multiple of the host page size.
a82d593b 1319 *
3d0684b2
JQ
1320 * Returns the number of pages written or negative on error
1321 *
6f37bb8b 1322 * @rs: current RAM state
3d0684b2 1323 * @ms: current migration state
3d0684b2 1324 * @pss: data about the page we want to send
a82d593b 1325 * @last_stage: if we are at the completion stage
a82d593b 1326 */
a0a8aa14 1327static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
f20e2865 1328 bool last_stage)
a82d593b
DDAG
1329{
1330 int tmppages, pages = 0;
a935e30f
JQ
1331 size_t pagesize_bits =
1332 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
4c011c37 1333
a82d593b 1334 do {
f20e2865 1335 tmppages = ram_save_target_page(rs, pss, last_stage);
a82d593b
DDAG
1336 if (tmppages < 0) {
1337 return tmppages;
1338 }
1339
1340 pages += tmppages;
a935e30f 1341 pss->page++;
1eb3fc0a
DDAG
1342 } while ((pss->page & (pagesize_bits - 1)) &&
1343 offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));
a82d593b
DDAG
1344
1345 /* The offset we leave with is the last one we looked at */
a935e30f 1346 pss->page--;
a82d593b
DDAG
1347 return pages;
1348}
6c595cde 1349
56e93d26 1350/**
3d0684b2 1351 * ram_find_and_save_block: finds a dirty page and sends it to f
56e93d26
JQ
1352 *
1353 * Called within an RCU critical section.
1354 *
3d0684b2 1355 * Returns the number of pages written where zero means no dirty pages
56e93d26 1356 *
6f37bb8b 1357 * @rs: current RAM state
56e93d26 1358 * @last_stage: if we are at the completion stage
a82d593b
DDAG
1359 *
1360 * On systems where host-page-size > target-page-size it will send all the
1361 * pages in a host page that are dirty.
56e93d26
JQ
1362 */
1363
ce25d337 1364static int ram_find_and_save_block(RAMState *rs, bool last_stage)
56e93d26 1365{
b8fb8cb7 1366 PageSearchStatus pss;
56e93d26 1367 int pages = 0;
b9e60928 1368 bool again, found;
56e93d26 1369
0827b9e9
AA
1370 /* No dirty page as there is zero RAM */
1371 if (!ram_bytes_total()) {
1372 return pages;
1373 }
1374
6f37bb8b 1375 pss.block = rs->last_seen_block;
a935e30f 1376 pss.page = rs->last_page;
b8fb8cb7
DDAG
1377 pss.complete_round = false;
1378
1379 if (!pss.block) {
1380 pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
1381 }
56e93d26 1382
b9e60928 1383 do {
a82d593b 1384 again = true;
f20e2865 1385 found = get_queued_page(rs, &pss);
b9e60928 1386
a82d593b
DDAG
1387 if (!found) {
1388 /* priority queue empty, so just search for something dirty */
f20e2865 1389 found = find_dirty_block(rs, &pss, &again);
a82d593b 1390 }
f3f491fc 1391
a82d593b 1392 if (found) {
f20e2865 1393 pages = ram_save_host_page(rs, &pss, last_stage);
56e93d26 1394 }
b9e60928 1395 } while (!pages && again);
56e93d26 1396
6f37bb8b 1397 rs->last_seen_block = pss.block;
a935e30f 1398 rs->last_page = pss.page;
56e93d26
JQ
1399
1400 return pages;
1401}
1402
1403void acct_update_position(QEMUFile *f, size_t size, bool zero)
1404{
1405 uint64_t pages = size / TARGET_PAGE_SIZE;
f7ccd61b
JQ
1406 RAMState *rs = &ram_state;
1407
56e93d26 1408 if (zero) {
f7ccd61b 1409 rs->zero_pages += pages;
56e93d26 1410 } else {
b4d1c6e7 1411 rs->norm_pages += pages;
2f4fde93 1412 rs->bytes_transferred += size;
56e93d26
JQ
1413 qemu_update_position(f, size);
1414 }
1415}
1416
56e93d26
JQ
1417uint64_t ram_bytes_total(void)
1418{
1419 RAMBlock *block;
1420 uint64_t total = 0;
1421
1422 rcu_read_lock();
99e15582 1423 RAMBLOCK_FOREACH(block) {
56e93d26 1424 total += block->used_length;
99e15582 1425 }
56e93d26
JQ
1426 rcu_read_unlock();
1427 return total;
1428}
1429
1430void free_xbzrle_decoded_buf(void)
1431{
1432 g_free(xbzrle_decoded_buf);
1433 xbzrle_decoded_buf = NULL;
1434}
1435
6ad2a215 1436static void ram_migration_cleanup(void *opaque)
56e93d26 1437{
6b6712ef 1438 RAMBlock *block;
eb859c53 1439
2ff64038
LZ
1440 /* caller have hold iothread lock or is in a bh, so there is
1441 * no writing race against this migration_bitmap
1442 */
6b6712ef
JQ
1443 memory_global_dirty_log_stop();
1444
1445 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1446 g_free(block->bmap);
1447 block->bmap = NULL;
1448 g_free(block->unsentmap);
1449 block->unsentmap = NULL;
56e93d26
JQ
1450 }
1451
1452 XBZRLE_cache_lock();
1453 if (XBZRLE.cache) {
1454 cache_fini(XBZRLE.cache);
1455 g_free(XBZRLE.encoded_buf);
1456 g_free(XBZRLE.current_buf);
adb65dec 1457 g_free(ZERO_TARGET_PAGE);
56e93d26
JQ
1458 XBZRLE.cache = NULL;
1459 XBZRLE.encoded_buf = NULL;
1460 XBZRLE.current_buf = NULL;
1461 }
1462 XBZRLE_cache_unlock();
1463}
1464
6f37bb8b 1465static void ram_state_reset(RAMState *rs)
56e93d26 1466{
6f37bb8b
JQ
1467 rs->last_seen_block = NULL;
1468 rs->last_sent_block = NULL;
269ace29 1469 rs->last_page = 0;
6f37bb8b
JQ
1470 rs->last_version = ram_list.version;
1471 rs->ram_bulk_stage = true;
56e93d26
JQ
1472}
1473
1474#define MAX_WAIT 50 /* ms, half buffered_file limit */
1475
4f2e4252
DDAG
1476/*
1477 * 'expected' is the value you expect the bitmap mostly to be full
1478 * of; it won't bother printing lines that are all this value.
1479 * If 'todump' is null the migration bitmap is dumped.
1480 */
6b6712ef
JQ
1481void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
1482 unsigned long pages)
4f2e4252 1483{
4f2e4252
DDAG
1484 int64_t cur;
1485 int64_t linelen = 128;
1486 char linebuf[129];
1487
6b6712ef 1488 for (cur = 0; cur < pages; cur += linelen) {
4f2e4252
DDAG
1489 int64_t curb;
1490 bool found = false;
1491 /*
1492 * Last line; catch the case where the line length
1493 * is longer than remaining ram
1494 */
6b6712ef
JQ
1495 if (cur + linelen > pages) {
1496 linelen = pages - cur;
4f2e4252
DDAG
1497 }
1498 for (curb = 0; curb < linelen; curb++) {
1499 bool thisbit = test_bit(cur + curb, todump);
1500 linebuf[curb] = thisbit ? '1' : '.';
1501 found = found || (thisbit != expected);
1502 }
1503 if (found) {
1504 linebuf[curb] = '\0';
1505 fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf);
1506 }
1507 }
1508}
1509
e0b266f0
DDAG
1510/* **** functions for postcopy ***** */
1511
ced1c616
PB
1512void ram_postcopy_migrated_memory_release(MigrationState *ms)
1513{
1514 struct RAMBlock *block;
ced1c616 1515
99e15582 1516 RAMBLOCK_FOREACH(block) {
6b6712ef
JQ
1517 unsigned long *bitmap = block->bmap;
1518 unsigned long range = block->used_length >> TARGET_PAGE_BITS;
1519 unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
ced1c616
PB
1520
1521 while (run_start < range) {
1522 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
aaa2064c 1523 ram_discard_range(block->idstr, run_start << TARGET_PAGE_BITS,
ced1c616
PB
1524 (run_end - run_start) << TARGET_PAGE_BITS);
1525 run_start = find_next_zero_bit(bitmap, range, run_end + 1);
1526 }
1527 }
1528}
1529
3d0684b2
JQ
1530/**
1531 * postcopy_send_discard_bm_ram: discard a RAMBlock
1532 *
1533 * Returns zero on success
1534 *
e0b266f0
DDAG
1535 * Callback from postcopy_each_ram_send_discard for each RAMBlock
1536 * Note: At this point the 'unsentmap' is the processed bitmap combined
1537 * with the dirtymap; so a '1' means it's either dirty or unsent.
3d0684b2
JQ
1538 *
1539 * @ms: current migration state
1540 * @pds: state for postcopy
1541 * @start: RAMBlock starting page
1542 * @length: RAMBlock size
e0b266f0
DDAG
1543 */
1544static int postcopy_send_discard_bm_ram(MigrationState *ms,
1545 PostcopyDiscardState *pds,
6b6712ef 1546 RAMBlock *block)
e0b266f0 1547{
6b6712ef 1548 unsigned long end = block->used_length >> TARGET_PAGE_BITS;
e0b266f0 1549 unsigned long current;
6b6712ef 1550 unsigned long *unsentmap = block->unsentmap;
e0b266f0 1551
6b6712ef 1552 for (current = 0; current < end; ) {
e0b266f0
DDAG
1553 unsigned long one = find_next_bit(unsentmap, end, current);
1554
1555 if (one <= end) {
1556 unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1);
1557 unsigned long discard_length;
1558
1559 if (zero >= end) {
1560 discard_length = end - one;
1561 } else {
1562 discard_length = zero - one;
1563 }
d688c62d
DDAG
1564 if (discard_length) {
1565 postcopy_discard_send_range(ms, pds, one, discard_length);
1566 }
e0b266f0
DDAG
1567 current = one + discard_length;
1568 } else {
1569 current = one;
1570 }
1571 }
1572
1573 return 0;
1574}
1575
3d0684b2
JQ
1576/**
1577 * postcopy_each_ram_send_discard: discard all RAMBlocks
1578 *
1579 * Returns 0 for success or negative for error
1580 *
e0b266f0
DDAG
1581 * Utility for the outgoing postcopy code.
1582 * Calls postcopy_send_discard_bm_ram for each RAMBlock
1583 * passing it bitmap indexes and name.
e0b266f0
DDAG
1584 * (qemu_ram_foreach_block ends up passing unscaled lengths
1585 * which would mean postcopy code would have to deal with target page)
3d0684b2
JQ
1586 *
1587 * @ms: current migration state
e0b266f0
DDAG
1588 */
1589static int postcopy_each_ram_send_discard(MigrationState *ms)
1590{
1591 struct RAMBlock *block;
1592 int ret;
1593
99e15582 1594 RAMBLOCK_FOREACH(block) {
6b6712ef
JQ
1595 PostcopyDiscardState *pds =
1596 postcopy_discard_send_init(ms, block->idstr);
e0b266f0
DDAG
1597
1598 /*
1599 * Postcopy sends chunks of bitmap over the wire, but it
1600 * just needs indexes at this point, avoids it having
1601 * target page specific code.
1602 */
6b6712ef 1603 ret = postcopy_send_discard_bm_ram(ms, pds, block);
e0b266f0
DDAG
1604 postcopy_discard_send_finish(ms, pds);
1605 if (ret) {
1606 return ret;
1607 }
1608 }
1609
1610 return 0;
1611}
1612
3d0684b2
JQ
1613/**
1614 * postcopy_chunk_hostpages_pass: canocalize bitmap in hostpages
1615 *
1616 * Helper for postcopy_chunk_hostpages; it's called twice to
1617 * canonicalize the two bitmaps, that are similar, but one is
1618 * inverted.
99e314eb 1619 *
3d0684b2
JQ
1620 * Postcopy requires that all target pages in a hostpage are dirty or
1621 * clean, not a mix. This function canonicalizes the bitmaps.
99e314eb 1622 *
3d0684b2
JQ
1623 * @ms: current migration state
1624 * @unsent_pass: if true we need to canonicalize partially unsent host pages
1625 * otherwise we need to canonicalize partially dirty host pages
1626 * @block: block that contains the page we want to canonicalize
1627 * @pds: state for postcopy
99e314eb
DDAG
1628 */
1629static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
1630 RAMBlock *block,
1631 PostcopyDiscardState *pds)
1632{
0d8ec885 1633 RAMState *rs = &ram_state;
6b6712ef
JQ
1634 unsigned long *bitmap = block->bmap;
1635 unsigned long *unsentmap = block->unsentmap;
29c59172 1636 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
6b6712ef 1637 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
99e314eb
DDAG
1638 unsigned long run_start;
1639
29c59172
DDAG
1640 if (block->page_size == TARGET_PAGE_SIZE) {
1641 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
1642 return;
1643 }
1644
99e314eb
DDAG
1645 if (unsent_pass) {
1646 /* Find a sent page */
6b6712ef 1647 run_start = find_next_zero_bit(unsentmap, pages, 0);
99e314eb
DDAG
1648 } else {
1649 /* Find a dirty page */
6b6712ef 1650 run_start = find_next_bit(bitmap, pages, 0);
99e314eb
DDAG
1651 }
1652
6b6712ef 1653 while (run_start < pages) {
99e314eb
DDAG
1654 bool do_fixup = false;
1655 unsigned long fixup_start_addr;
1656 unsigned long host_offset;
1657
1658 /*
1659 * If the start of this run of pages is in the middle of a host
1660 * page, then we need to fixup this host page.
1661 */
1662 host_offset = run_start % host_ratio;
1663 if (host_offset) {
1664 do_fixup = true;
1665 run_start -= host_offset;
1666 fixup_start_addr = run_start;
1667 /* For the next pass */
1668 run_start = run_start + host_ratio;
1669 } else {
1670 /* Find the end of this run */
1671 unsigned long run_end;
1672 if (unsent_pass) {
6b6712ef 1673 run_end = find_next_bit(unsentmap, pages, run_start + 1);
99e314eb 1674 } else {
6b6712ef 1675 run_end = find_next_zero_bit(bitmap, pages, run_start + 1);
99e314eb
DDAG
1676 }
1677 /*
1678 * If the end isn't at the start of a host page, then the
1679 * run doesn't finish at the end of a host page
1680 * and we need to discard.
1681 */
1682 host_offset = run_end % host_ratio;
1683 if (host_offset) {
1684 do_fixup = true;
1685 fixup_start_addr = run_end - host_offset;
1686 /*
1687 * This host page has gone, the next loop iteration starts
1688 * from after the fixup
1689 */
1690 run_start = fixup_start_addr + host_ratio;
1691 } else {
1692 /*
1693 * No discards on this iteration, next loop starts from
1694 * next sent/dirty page
1695 */
1696 run_start = run_end + 1;
1697 }
1698 }
1699
1700 if (do_fixup) {
1701 unsigned long page;
1702
1703 /* Tell the destination to discard this page */
1704 if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) {
1705 /* For the unsent_pass we:
1706 * discard partially sent pages
1707 * For the !unsent_pass (dirty) we:
1708 * discard partially dirty pages that were sent
1709 * (any partially sent pages were already discarded
1710 * by the previous unsent_pass)
1711 */
1712 postcopy_discard_send_range(ms, pds, fixup_start_addr,
1713 host_ratio);
1714 }
1715
1716 /* Clean up the bitmap */
1717 for (page = fixup_start_addr;
1718 page < fixup_start_addr + host_ratio; page++) {
1719 /* All pages in this host page are now not sent */
1720 set_bit(page, unsentmap);
1721
1722 /*
1723 * Remark them as dirty, updating the count for any pages
1724 * that weren't previously dirty.
1725 */
0d8ec885 1726 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
99e314eb
DDAG
1727 }
1728 }
1729
1730 if (unsent_pass) {
1731 /* Find the next sent page for the next iteration */
6b6712ef 1732 run_start = find_next_zero_bit(unsentmap, pages, run_start);
99e314eb
DDAG
1733 } else {
1734 /* Find the next dirty page for the next iteration */
6b6712ef 1735 run_start = find_next_bit(bitmap, pages, run_start);
99e314eb
DDAG
1736 }
1737 }
1738}
1739
3d0684b2
JQ
1740/**
1741 * postcopy_chuck_hostpages: discrad any partially sent host page
1742 *
99e314eb
DDAG
1743 * Utility for the outgoing postcopy code.
1744 *
1745 * Discard any partially sent host-page size chunks, mark any partially
29c59172
DDAG
1746 * dirty host-page size chunks as all dirty. In this case the host-page
1747 * is the host-page for the particular RAMBlock, i.e. it might be a huge page
99e314eb 1748 *
3d0684b2
JQ
1749 * Returns zero on success
1750 *
1751 * @ms: current migration state
6b6712ef 1752 * @block: block we want to work with
99e314eb 1753 */
6b6712ef 1754static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
99e314eb 1755{
6b6712ef
JQ
1756 PostcopyDiscardState *pds =
1757 postcopy_discard_send_init(ms, block->idstr);
99e314eb 1758
6b6712ef
JQ
1759 /* First pass: Discard all partially sent host pages */
1760 postcopy_chunk_hostpages_pass(ms, true, block, pds);
1761 /*
1762 * Second pass: Ensure that all partially dirty host pages are made
1763 * fully dirty.
1764 */
1765 postcopy_chunk_hostpages_pass(ms, false, block, pds);
99e314eb 1766
6b6712ef 1767 postcopy_discard_send_finish(ms, pds);
99e314eb
DDAG
1768 return 0;
1769}
1770
3d0684b2
JQ
1771/**
1772 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
1773 *
1774 * Returns zero on success
1775 *
e0b266f0
DDAG
1776 * Transmit the set of pages to be discarded after precopy to the target
1777 * these are pages that:
1778 * a) Have been previously transmitted but are now dirty again
1779 * b) Pages that have never been transmitted, this ensures that
1780 * any pages on the destination that have been mapped by background
1781 * tasks get discarded (transparent huge pages is the specific concern)
1782 * Hopefully this is pretty sparse
3d0684b2
JQ
1783 *
1784 * @ms: current migration state
e0b266f0
DDAG
1785 */
1786int ram_postcopy_send_discard_bitmap(MigrationState *ms)
1787{
eb859c53 1788 RAMState *rs = &ram_state;
6b6712ef 1789 RAMBlock *block;
e0b266f0 1790 int ret;
e0b266f0
DDAG
1791
1792 rcu_read_lock();
1793
1794 /* This should be our last sync, the src is now paused */
eb859c53 1795 migration_bitmap_sync(rs);
e0b266f0 1796
6b6712ef
JQ
1797 /* Easiest way to make sure we don't resume in the middle of a host-page */
1798 rs->last_seen_block = NULL;
1799 rs->last_sent_block = NULL;
1800 rs->last_page = 0;
e0b266f0 1801
6b6712ef
JQ
1802 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1803 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
1804 unsigned long *bitmap = block->bmap;
1805 unsigned long *unsentmap = block->unsentmap;
1806
1807 if (!unsentmap) {
1808 /* We don't have a safe way to resize the sentmap, so
1809 * if the bitmap was resized it will be NULL at this
1810 * point.
1811 */
1812 error_report("migration ram resized during precopy phase");
1813 rcu_read_unlock();
1814 return -EINVAL;
1815 }
1816 /* Deal with TPS != HPS and huge pages */
1817 ret = postcopy_chunk_hostpages(ms, block);
1818 if (ret) {
1819 rcu_read_unlock();
1820 return ret;
1821 }
e0b266f0 1822
6b6712ef
JQ
1823 /*
1824 * Update the unsentmap to be unsentmap = unsentmap | dirty
1825 */
1826 bitmap_or(unsentmap, unsentmap, bitmap, pages);
e0b266f0 1827#ifdef DEBUG_POSTCOPY
6b6712ef 1828 ram_debug_dump_bitmap(unsentmap, true, pages);
e0b266f0 1829#endif
6b6712ef
JQ
1830 }
1831 trace_ram_postcopy_send_discard_bitmap();
e0b266f0
DDAG
1832
1833 ret = postcopy_each_ram_send_discard(ms);
1834 rcu_read_unlock();
1835
1836 return ret;
1837}
1838
3d0684b2
JQ
1839/**
1840 * ram_discard_range: discard dirtied pages at the beginning of postcopy
e0b266f0 1841 *
3d0684b2 1842 * Returns zero on success
e0b266f0 1843 *
36449157
JQ
1844 * @rbname: name of the RAMBlock of the request. NULL means the
1845 * same that last one.
3d0684b2
JQ
1846 * @start: RAMBlock starting page
1847 * @length: RAMBlock size
e0b266f0 1848 */
aaa2064c 1849int ram_discard_range(const char *rbname, uint64_t start, size_t length)
e0b266f0
DDAG
1850{
1851 int ret = -1;
1852
36449157 1853 trace_ram_discard_range(rbname, start, length);
d3a5038c 1854
e0b266f0 1855 rcu_read_lock();
36449157 1856 RAMBlock *rb = qemu_ram_block_by_name(rbname);
e0b266f0
DDAG
1857
1858 if (!rb) {
36449157 1859 error_report("ram_discard_range: Failed to find block '%s'", rbname);
e0b266f0
DDAG
1860 goto err;
1861 }
1862
d3a5038c 1863 ret = ram_block_discard_range(rb, start, length);
e0b266f0
DDAG
1864
1865err:
1866 rcu_read_unlock();
1867
1868 return ret;
1869}
1870
ceb4d168 1871static int ram_state_init(RAMState *rs)
56e93d26 1872{
ceb4d168 1873 memset(rs, 0, sizeof(*rs));
108cfae0 1874 qemu_mutex_init(&rs->bitmap_mutex);
ec481c6c
JQ
1875 qemu_mutex_init(&rs->src_page_req_mutex);
1876 QSIMPLEQ_INIT(&rs->src_page_requests);
56e93d26
JQ
1877
1878 if (migrate_use_xbzrle()) {
1879 XBZRLE_cache_lock();
adb65dec 1880 ZERO_TARGET_PAGE = g_malloc0(TARGET_PAGE_SIZE);
56e93d26
JQ
1881 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
1882 TARGET_PAGE_SIZE,
1883 TARGET_PAGE_SIZE);
1884 if (!XBZRLE.cache) {
1885 XBZRLE_cache_unlock();
1886 error_report("Error creating cache");
1887 return -1;
1888 }
1889 XBZRLE_cache_unlock();
1890
1891 /* We prefer not to abort if there is no memory */
1892 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
1893 if (!XBZRLE.encoded_buf) {
1894 error_report("Error allocating encoded_buf");
1895 return -1;
1896 }
1897
1898 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
1899 if (!XBZRLE.current_buf) {
1900 error_report("Error allocating current_buf");
1901 g_free(XBZRLE.encoded_buf);
1902 XBZRLE.encoded_buf = NULL;
1903 return -1;
1904 }
56e93d26
JQ
1905 }
1906
49877834
PB
1907 /* For memory_global_dirty_log_start below. */
1908 qemu_mutex_lock_iothread();
1909
56e93d26
JQ
1910 qemu_mutex_lock_ramlist();
1911 rcu_read_lock();
6f37bb8b 1912 ram_state_reset(rs);
56e93d26 1913
0827b9e9
AA
1914 /* Skip setting bitmap if there is no RAM */
1915 if (ram_bytes_total()) {
6b6712ef
JQ
1916 RAMBlock *block;
1917
1918 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1919 unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
0827b9e9 1920
6b6712ef
JQ
1921 block->bmap = bitmap_new(pages);
1922 bitmap_set(block->bmap, 0, pages);
1923 if (migrate_postcopy_ram()) {
1924 block->unsentmap = bitmap_new(pages);
1925 bitmap_set(block->unsentmap, 0, pages);
1926 }
0827b9e9 1927 }
f3f491fc
DDAG
1928 }
1929
56e93d26
JQ
1930 /*
1931 * Count the total number of pages used by ram blocks not including any
1932 * gaps due to alignment or unplugs.
1933 */
0d8ec885 1934 rs->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
56e93d26
JQ
1935
1936 memory_global_dirty_log_start();
8d820d6f 1937 migration_bitmap_sync(rs);
56e93d26 1938 qemu_mutex_unlock_ramlist();
49877834 1939 qemu_mutex_unlock_iothread();
a91246c9
HZ
1940 rcu_read_unlock();
1941
1942 return 0;
1943}
1944
3d0684b2
JQ
1945/*
1946 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
a91246c9
HZ
1947 * long-running RCU critical section. When rcu-reclaims in the code
1948 * start to become numerous it will be necessary to reduce the
1949 * granularity of these critical sections.
1950 */
1951
3d0684b2
JQ
1952/**
1953 * ram_save_setup: Setup RAM for migration
1954 *
1955 * Returns zero to indicate success and negative for error
1956 *
1957 * @f: QEMUFile where to send the data
1958 * @opaque: RAMState pointer
1959 */
a91246c9
HZ
1960static int ram_save_setup(QEMUFile *f, void *opaque)
1961{
6f37bb8b 1962 RAMState *rs = opaque;
a91246c9
HZ
1963 RAMBlock *block;
1964
1965 /* migration has already setup the bitmap, reuse it. */
1966 if (!migration_in_colo_state()) {
ceb4d168 1967 if (ram_state_init(rs) < 0) {
a91246c9
HZ
1968 return -1;
1969 }
1970 }
204b88b8 1971 rs->f = f;
a91246c9
HZ
1972
1973 rcu_read_lock();
56e93d26
JQ
1974
1975 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
1976
99e15582 1977 RAMBLOCK_FOREACH(block) {
56e93d26
JQ
1978 qemu_put_byte(f, strlen(block->idstr));
1979 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
1980 qemu_put_be64(f, block->used_length);
ef08fb38
DDAG
1981 if (migrate_postcopy_ram() && block->page_size != qemu_host_page_size) {
1982 qemu_put_be64(f, block->page_size);
1983 }
56e93d26
JQ
1984 }
1985
1986 rcu_read_unlock();
1987
1988 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
1989 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
1990
1991 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
1992
1993 return 0;
1994}
1995
3d0684b2
JQ
1996/**
1997 * ram_save_iterate: iterative stage for migration
1998 *
1999 * Returns zero to indicate success and negative for error
2000 *
2001 * @f: QEMUFile where to send the data
2002 * @opaque: RAMState pointer
2003 */
56e93d26
JQ
2004static int ram_save_iterate(QEMUFile *f, void *opaque)
2005{
6f37bb8b 2006 RAMState *rs = opaque;
56e93d26
JQ
2007 int ret;
2008 int i;
2009 int64_t t0;
5c90308f 2010 int done = 0;
56e93d26
JQ
2011
2012 rcu_read_lock();
6f37bb8b
JQ
2013 if (ram_list.version != rs->last_version) {
2014 ram_state_reset(rs);
56e93d26
JQ
2015 }
2016
2017 /* Read version before ram_list.blocks */
2018 smp_rmb();
2019
2020 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
2021
2022 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2023 i = 0;
2024 while ((ret = qemu_file_rate_limit(f)) == 0) {
2025 int pages;
2026
ce25d337 2027 pages = ram_find_and_save_block(rs, false);
56e93d26
JQ
2028 /* no more pages to sent */
2029 if (pages == 0) {
5c90308f 2030 done = 1;
56e93d26
JQ
2031 break;
2032 }
23b28c3c 2033 rs->iterations++;
070afca2 2034
56e93d26
JQ
2035 /* we want to check in the 1st loop, just in case it was the 1st time
2036 and we had to sync the dirty bitmap.
2037 qemu_get_clock_ns() is a bit expensive, so we only check each some
2038 iterations
2039 */
2040 if ((i & 63) == 0) {
2041 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
2042 if (t1 > MAX_WAIT) {
55c4446b 2043 trace_ram_save_iterate_big_wait(t1, i);
56e93d26
JQ
2044 break;
2045 }
2046 }
2047 i++;
2048 }
ce25d337 2049 flush_compressed_data(rs);
56e93d26
JQ
2050 rcu_read_unlock();
2051
2052 /*
2053 * Must occur before EOS (or any QEMUFile operation)
2054 * because of RDMA protocol.
2055 */
2056 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
2057
2058 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2f4fde93 2059 rs->bytes_transferred += 8;
56e93d26
JQ
2060
2061 ret = qemu_file_get_error(f);
2062 if (ret < 0) {
2063 return ret;
2064 }
2065
5c90308f 2066 return done;
56e93d26
JQ
2067}
2068
3d0684b2
JQ
2069/**
2070 * ram_save_complete: function called to send the remaining amount of ram
2071 *
2072 * Returns zero to indicate success
2073 *
2074 * Called with iothread lock
2075 *
2076 * @f: QEMUFile where to send the data
2077 * @opaque: RAMState pointer
2078 */
56e93d26
JQ
2079static int ram_save_complete(QEMUFile *f, void *opaque)
2080{
6f37bb8b
JQ
2081 RAMState *rs = opaque;
2082
56e93d26
JQ
2083 rcu_read_lock();
2084
5727309d 2085 if (!migration_in_postcopy()) {
8d820d6f 2086 migration_bitmap_sync(rs);
663e6c1d 2087 }
56e93d26
JQ
2088
2089 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
2090
2091 /* try transferring iterative blocks of memory */
2092
2093 /* flush all remaining blocks regardless of rate limiting */
2094 while (true) {
2095 int pages;
2096
ce25d337 2097 pages = ram_find_and_save_block(rs, !migration_in_colo_state());
56e93d26
JQ
2098 /* no more blocks to sent */
2099 if (pages == 0) {
2100 break;
2101 }
2102 }
2103
ce25d337 2104 flush_compressed_data(rs);
56e93d26 2105 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
56e93d26
JQ
2106
2107 rcu_read_unlock();
d09a6fde 2108
56e93d26
JQ
2109 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2110
2111 return 0;
2112}
2113
c31b098f
DDAG
2114static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
2115 uint64_t *non_postcopiable_pending,
2116 uint64_t *postcopiable_pending)
56e93d26 2117{
8d820d6f 2118 RAMState *rs = opaque;
56e93d26
JQ
2119 uint64_t remaining_size;
2120
9edabd4d 2121 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
56e93d26 2122
5727309d 2123 if (!migration_in_postcopy() &&
663e6c1d 2124 remaining_size < max_size) {
56e93d26
JQ
2125 qemu_mutex_lock_iothread();
2126 rcu_read_lock();
8d820d6f 2127 migration_bitmap_sync(rs);
56e93d26
JQ
2128 rcu_read_unlock();
2129 qemu_mutex_unlock_iothread();
9edabd4d 2130 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
56e93d26 2131 }
c31b098f
DDAG
2132
2133 /* We can do postcopy, and all the data is postcopiable */
2134 *postcopiable_pending += remaining_size;
56e93d26
JQ
2135}
2136
2137static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
2138{
2139 unsigned int xh_len;
2140 int xh_flags;
063e760a 2141 uint8_t *loaded_data;
56e93d26
JQ
2142
2143 if (!xbzrle_decoded_buf) {
2144 xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
2145 }
063e760a 2146 loaded_data = xbzrle_decoded_buf;
56e93d26
JQ
2147
2148 /* extract RLE header */
2149 xh_flags = qemu_get_byte(f);
2150 xh_len = qemu_get_be16(f);
2151
2152 if (xh_flags != ENCODING_FLAG_XBZRLE) {
2153 error_report("Failed to load XBZRLE page - wrong compression!");
2154 return -1;
2155 }
2156
2157 if (xh_len > TARGET_PAGE_SIZE) {
2158 error_report("Failed to load XBZRLE page - len overflow!");
2159 return -1;
2160 }
2161 /* load data and decode */
063e760a 2162 qemu_get_buffer_in_place(f, &loaded_data, xh_len);
56e93d26
JQ
2163
2164 /* decode RLE */
063e760a 2165 if (xbzrle_decode_buffer(loaded_data, xh_len, host,
56e93d26
JQ
2166 TARGET_PAGE_SIZE) == -1) {
2167 error_report("Failed to load XBZRLE page - decode error!");
2168 return -1;
2169 }
2170
2171 return 0;
2172}
2173
3d0684b2
JQ
2174/**
2175 * ram_block_from_stream: read a RAMBlock id from the migration stream
2176 *
2177 * Must be called from within a rcu critical section.
2178 *
56e93d26 2179 * Returns a pointer from within the RCU-protected ram_list.
a7180877 2180 *
3d0684b2
JQ
2181 * @f: QEMUFile where to read the data from
2182 * @flags: Page flags (mostly to see if it's a continuation of previous block)
a7180877 2183 */
3d0684b2 2184static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
56e93d26
JQ
2185{
2186 static RAMBlock *block = NULL;
2187 char id[256];
2188 uint8_t len;
2189
2190 if (flags & RAM_SAVE_FLAG_CONTINUE) {
4c4bad48 2191 if (!block) {
56e93d26
JQ
2192 error_report("Ack, bad migration stream!");
2193 return NULL;
2194 }
4c4bad48 2195 return block;
56e93d26
JQ
2196 }
2197
2198 len = qemu_get_byte(f);
2199 qemu_get_buffer(f, (uint8_t *)id, len);
2200 id[len] = 0;
2201
e3dd7493 2202 block = qemu_ram_block_by_name(id);
4c4bad48
HZ
2203 if (!block) {
2204 error_report("Can't find block %s", id);
2205 return NULL;
56e93d26
JQ
2206 }
2207
4c4bad48
HZ
2208 return block;
2209}
2210
2211static inline void *host_from_ram_block_offset(RAMBlock *block,
2212 ram_addr_t offset)
2213{
2214 if (!offset_in_ramblock(block, offset)) {
2215 return NULL;
2216 }
2217
2218 return block->host + offset;
56e93d26
JQ
2219}
2220
3d0684b2
JQ
2221/**
2222 * ram_handle_compressed: handle the zero page case
2223 *
56e93d26
JQ
2224 * If a page (or a whole RDMA chunk) has been
2225 * determined to be zero, then zap it.
3d0684b2
JQ
2226 *
2227 * @host: host address for the zero page
2228 * @ch: what the page is filled from. We only support zero
2229 * @size: size of the zero page
56e93d26
JQ
2230 */
2231void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
2232{
2233 if (ch != 0 || !is_zero_range(host, size)) {
2234 memset(host, ch, size);
2235 }
2236}
2237
2238static void *do_data_decompress(void *opaque)
2239{
2240 DecompressParam *param = opaque;
2241 unsigned long pagesize;
33d151f4
LL
2242 uint8_t *des;
2243 int len;
56e93d26 2244
33d151f4 2245 qemu_mutex_lock(&param->mutex);
90e56fb4 2246 while (!param->quit) {
33d151f4
LL
2247 if (param->des) {
2248 des = param->des;
2249 len = param->len;
2250 param->des = 0;
2251 qemu_mutex_unlock(&param->mutex);
2252
56e93d26 2253 pagesize = TARGET_PAGE_SIZE;
73a8912b
LL
2254 /* uncompress() will return failed in some case, especially
2255 * when the page is dirted when doing the compression, it's
2256 * not a problem because the dirty page will be retransferred
2257 * and uncompress() won't break the data in other pages.
2258 */
33d151f4
LL
2259 uncompress((Bytef *)des, &pagesize,
2260 (const Bytef *)param->compbuf, len);
73a8912b 2261
33d151f4
LL
2262 qemu_mutex_lock(&decomp_done_lock);
2263 param->done = true;
2264 qemu_cond_signal(&decomp_done_cond);
2265 qemu_mutex_unlock(&decomp_done_lock);
2266
2267 qemu_mutex_lock(&param->mutex);
2268 } else {
2269 qemu_cond_wait(&param->cond, &param->mutex);
2270 }
56e93d26 2271 }
33d151f4 2272 qemu_mutex_unlock(&param->mutex);
56e93d26
JQ
2273
2274 return NULL;
2275}
2276
5533b2e9
LL
2277static void wait_for_decompress_done(void)
2278{
2279 int idx, thread_count;
2280
2281 if (!migrate_use_compression()) {
2282 return;
2283 }
2284
2285 thread_count = migrate_decompress_threads();
2286 qemu_mutex_lock(&decomp_done_lock);
2287 for (idx = 0; idx < thread_count; idx++) {
2288 while (!decomp_param[idx].done) {
2289 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
2290 }
2291 }
2292 qemu_mutex_unlock(&decomp_done_lock);
2293}
2294
56e93d26
JQ
2295void migrate_decompress_threads_create(void)
2296{
2297 int i, thread_count;
2298
2299 thread_count = migrate_decompress_threads();
2300 decompress_threads = g_new0(QemuThread, thread_count);
2301 decomp_param = g_new0(DecompressParam, thread_count);
73a8912b
LL
2302 qemu_mutex_init(&decomp_done_lock);
2303 qemu_cond_init(&decomp_done_cond);
56e93d26
JQ
2304 for (i = 0; i < thread_count; i++) {
2305 qemu_mutex_init(&decomp_param[i].mutex);
2306 qemu_cond_init(&decomp_param[i].cond);
2307 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
73a8912b 2308 decomp_param[i].done = true;
90e56fb4 2309 decomp_param[i].quit = false;
56e93d26
JQ
2310 qemu_thread_create(decompress_threads + i, "decompress",
2311 do_data_decompress, decomp_param + i,
2312 QEMU_THREAD_JOINABLE);
2313 }
2314}
2315
2316void migrate_decompress_threads_join(void)
2317{
2318 int i, thread_count;
2319
56e93d26
JQ
2320 thread_count = migrate_decompress_threads();
2321 for (i = 0; i < thread_count; i++) {
2322 qemu_mutex_lock(&decomp_param[i].mutex);
90e56fb4 2323 decomp_param[i].quit = true;
56e93d26
JQ
2324 qemu_cond_signal(&decomp_param[i].cond);
2325 qemu_mutex_unlock(&decomp_param[i].mutex);
2326 }
2327 for (i = 0; i < thread_count; i++) {
2328 qemu_thread_join(decompress_threads + i);
2329 qemu_mutex_destroy(&decomp_param[i].mutex);
2330 qemu_cond_destroy(&decomp_param[i].cond);
2331 g_free(decomp_param[i].compbuf);
2332 }
2333 g_free(decompress_threads);
2334 g_free(decomp_param);
56e93d26
JQ
2335 decompress_threads = NULL;
2336 decomp_param = NULL;
56e93d26
JQ
2337}
2338
c1bc6626 2339static void decompress_data_with_multi_threads(QEMUFile *f,
56e93d26
JQ
2340 void *host, int len)
2341{
2342 int idx, thread_count;
2343
2344 thread_count = migrate_decompress_threads();
73a8912b 2345 qemu_mutex_lock(&decomp_done_lock);
56e93d26
JQ
2346 while (true) {
2347 for (idx = 0; idx < thread_count; idx++) {
73a8912b 2348 if (decomp_param[idx].done) {
33d151f4
LL
2349 decomp_param[idx].done = false;
2350 qemu_mutex_lock(&decomp_param[idx].mutex);
c1bc6626 2351 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
56e93d26
JQ
2352 decomp_param[idx].des = host;
2353 decomp_param[idx].len = len;
33d151f4
LL
2354 qemu_cond_signal(&decomp_param[idx].cond);
2355 qemu_mutex_unlock(&decomp_param[idx].mutex);
56e93d26
JQ
2356 break;
2357 }
2358 }
2359 if (idx < thread_count) {
2360 break;
73a8912b
LL
2361 } else {
2362 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
56e93d26
JQ
2363 }
2364 }
73a8912b 2365 qemu_mutex_unlock(&decomp_done_lock);
56e93d26
JQ
2366}
2367
3d0684b2
JQ
2368/**
2369 * ram_postcopy_incoming_init: allocate postcopy data structures
2370 *
2371 * Returns 0 for success and negative if there was one error
2372 *
2373 * @mis: current migration incoming state
2374 *
2375 * Allocate data structures etc needed by incoming migration with
2376 * postcopy-ram. postcopy-ram's similarly names
2377 * postcopy_ram_incoming_init does the work.
1caddf8a
DDAG
2378 */
2379int ram_postcopy_incoming_init(MigrationIncomingState *mis)
2380{
b8c48993 2381 unsigned long ram_pages = last_ram_page();
1caddf8a
DDAG
2382
2383 return postcopy_ram_incoming_init(mis, ram_pages);
2384}
2385
3d0684b2
JQ
2386/**
2387 * ram_load_postcopy: load a page in postcopy case
2388 *
2389 * Returns 0 for success or -errno in case of error
2390 *
a7180877
DDAG
2391 * Called in postcopy mode by ram_load().
2392 * rcu_read_lock is taken prior to this being called.
3d0684b2
JQ
2393 *
2394 * @f: QEMUFile where to send the data
a7180877
DDAG
2395 */
2396static int ram_load_postcopy(QEMUFile *f)
2397{
2398 int flags = 0, ret = 0;
2399 bool place_needed = false;
28abd200 2400 bool matching_page_sizes = false;
a7180877
DDAG
2401 MigrationIncomingState *mis = migration_incoming_get_current();
2402 /* Temporary page that is later 'placed' */
2403 void *postcopy_host_page = postcopy_get_tmp_page(mis);
c53b7ddc 2404 void *last_host = NULL;
a3b6ff6d 2405 bool all_zero = false;
a7180877
DDAG
2406
2407 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
2408 ram_addr_t addr;
2409 void *host = NULL;
2410 void *page_buffer = NULL;
2411 void *place_source = NULL;
df9ff5e1 2412 RAMBlock *block = NULL;
a7180877 2413 uint8_t ch;
a7180877
DDAG
2414
2415 addr = qemu_get_be64(f);
2416 flags = addr & ~TARGET_PAGE_MASK;
2417 addr &= TARGET_PAGE_MASK;
2418
2419 trace_ram_load_postcopy_loop((uint64_t)addr, flags);
2420 place_needed = false;
bb890ed5 2421 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) {
df9ff5e1 2422 block = ram_block_from_stream(f, flags);
4c4bad48
HZ
2423
2424 host = host_from_ram_block_offset(block, addr);
a7180877
DDAG
2425 if (!host) {
2426 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2427 ret = -EINVAL;
2428 break;
2429 }
28abd200 2430 matching_page_sizes = block->page_size == TARGET_PAGE_SIZE;
a7180877 2431 /*
28abd200
DDAG
2432 * Postcopy requires that we place whole host pages atomically;
2433 * these may be huge pages for RAMBlocks that are backed by
2434 * hugetlbfs.
a7180877
DDAG
2435 * To make it atomic, the data is read into a temporary page
2436 * that's moved into place later.
2437 * The migration protocol uses, possibly smaller, target-pages
2438 * however the source ensures it always sends all the components
2439 * of a host page in order.
2440 */
2441 page_buffer = postcopy_host_page +
28abd200 2442 ((uintptr_t)host & (block->page_size - 1));
a7180877 2443 /* If all TP are zero then we can optimise the place */
28abd200 2444 if (!((uintptr_t)host & (block->page_size - 1))) {
a7180877 2445 all_zero = true;
c53b7ddc
DDAG
2446 } else {
2447 /* not the 1st TP within the HP */
2448 if (host != (last_host + TARGET_PAGE_SIZE)) {
9af9e0fe 2449 error_report("Non-sequential target page %p/%p",
c53b7ddc
DDAG
2450 host, last_host);
2451 ret = -EINVAL;
2452 break;
2453 }
a7180877
DDAG
2454 }
2455
c53b7ddc 2456
a7180877
DDAG
2457 /*
2458 * If it's the last part of a host page then we place the host
2459 * page
2460 */
2461 place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
28abd200 2462 (block->page_size - 1)) == 0;
a7180877
DDAG
2463 place_source = postcopy_host_page;
2464 }
c53b7ddc 2465 last_host = host;
a7180877
DDAG
2466
2467 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
bb890ed5 2468 case RAM_SAVE_FLAG_ZERO:
a7180877
DDAG
2469 ch = qemu_get_byte(f);
2470 memset(page_buffer, ch, TARGET_PAGE_SIZE);
2471 if (ch) {
2472 all_zero = false;
2473 }
2474 break;
2475
2476 case RAM_SAVE_FLAG_PAGE:
2477 all_zero = false;
2478 if (!place_needed || !matching_page_sizes) {
2479 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
2480 } else {
2481 /* Avoids the qemu_file copy during postcopy, which is
2482 * going to do a copy later; can only do it when we
2483 * do this read in one go (matching page sizes)
2484 */
2485 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
2486 TARGET_PAGE_SIZE);
2487 }
2488 break;
2489 case RAM_SAVE_FLAG_EOS:
2490 /* normal exit */
2491 break;
2492 default:
2493 error_report("Unknown combination of migration flags: %#x"
2494 " (postcopy mode)", flags);
2495 ret = -EINVAL;
2496 }
2497
2498 if (place_needed) {
2499 /* This gets called at the last target page in the host page */
df9ff5e1
DDAG
2500 void *place_dest = host + TARGET_PAGE_SIZE - block->page_size;
2501
a7180877 2502 if (all_zero) {
df9ff5e1
DDAG
2503 ret = postcopy_place_page_zero(mis, place_dest,
2504 block->page_size);
a7180877 2505 } else {
df9ff5e1
DDAG
2506 ret = postcopy_place_page(mis, place_dest,
2507 place_source, block->page_size);
a7180877
DDAG
2508 }
2509 }
2510 if (!ret) {
2511 ret = qemu_file_get_error(f);
2512 }
2513 }
2514
2515 return ret;
2516}
2517
56e93d26
JQ
2518static int ram_load(QEMUFile *f, void *opaque, int version_id)
2519{
2520 int flags = 0, ret = 0;
2521 static uint64_t seq_iter;
2522 int len = 0;
a7180877
DDAG
2523 /*
2524 * If system is running in postcopy mode, page inserts to host memory must
2525 * be atomic
2526 */
2527 bool postcopy_running = postcopy_state_get() >= POSTCOPY_INCOMING_LISTENING;
ef08fb38
DDAG
2528 /* ADVISE is earlier, it shows the source has the postcopy capability on */
2529 bool postcopy_advised = postcopy_state_get() >= POSTCOPY_INCOMING_ADVISE;
56e93d26
JQ
2530
2531 seq_iter++;
2532
2533 if (version_id != 4) {
2534 ret = -EINVAL;
2535 }
2536
2537 /* This RCU critical section can be very long running.
2538 * When RCU reclaims in the code start to become numerous,
2539 * it will be necessary to reduce the granularity of this
2540 * critical section.
2541 */
2542 rcu_read_lock();
a7180877
DDAG
2543
2544 if (postcopy_running) {
2545 ret = ram_load_postcopy(f);
2546 }
2547
2548 while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
56e93d26 2549 ram_addr_t addr, total_ram_bytes;
a776aa15 2550 void *host = NULL;
56e93d26
JQ
2551 uint8_t ch;
2552
2553 addr = qemu_get_be64(f);
2554 flags = addr & ~TARGET_PAGE_MASK;
2555 addr &= TARGET_PAGE_MASK;
2556
bb890ed5 2557 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
a776aa15 2558 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
4c4bad48
HZ
2559 RAMBlock *block = ram_block_from_stream(f, flags);
2560
2561 host = host_from_ram_block_offset(block, addr);
a776aa15
DDAG
2562 if (!host) {
2563 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2564 ret = -EINVAL;
2565 break;
2566 }
1db9d8e5 2567 trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
a776aa15
DDAG
2568 }
2569
56e93d26
JQ
2570 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2571 case RAM_SAVE_FLAG_MEM_SIZE:
2572 /* Synchronize RAM block list */
2573 total_ram_bytes = addr;
2574 while (!ret && total_ram_bytes) {
2575 RAMBlock *block;
56e93d26
JQ
2576 char id[256];
2577 ram_addr_t length;
2578
2579 len = qemu_get_byte(f);
2580 qemu_get_buffer(f, (uint8_t *)id, len);
2581 id[len] = 0;
2582 length = qemu_get_be64(f);
2583
e3dd7493
DDAG
2584 block = qemu_ram_block_by_name(id);
2585 if (block) {
2586 if (length != block->used_length) {
2587 Error *local_err = NULL;
56e93d26 2588
fa53a0e5 2589 ret = qemu_ram_resize(block, length,
e3dd7493
DDAG
2590 &local_err);
2591 if (local_err) {
2592 error_report_err(local_err);
56e93d26 2593 }
56e93d26 2594 }
ef08fb38
DDAG
2595 /* For postcopy we need to check hugepage sizes match */
2596 if (postcopy_advised &&
2597 block->page_size != qemu_host_page_size) {
2598 uint64_t remote_page_size = qemu_get_be64(f);
2599 if (remote_page_size != block->page_size) {
2600 error_report("Mismatched RAM page size %s "
2601 "(local) %zd != %" PRId64,
2602 id, block->page_size,
2603 remote_page_size);
2604 ret = -EINVAL;
2605 }
2606 }
e3dd7493
DDAG
2607 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
2608 block->idstr);
2609 } else {
56e93d26
JQ
2610 error_report("Unknown ramblock \"%s\", cannot "
2611 "accept migration", id);
2612 ret = -EINVAL;
2613 }
2614
2615 total_ram_bytes -= length;
2616 }
2617 break;
a776aa15 2618
bb890ed5 2619 case RAM_SAVE_FLAG_ZERO:
56e93d26
JQ
2620 ch = qemu_get_byte(f);
2621 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
2622 break;
a776aa15 2623
56e93d26 2624 case RAM_SAVE_FLAG_PAGE:
56e93d26
JQ
2625 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
2626 break;
56e93d26 2627
a776aa15 2628 case RAM_SAVE_FLAG_COMPRESS_PAGE:
56e93d26
JQ
2629 len = qemu_get_be32(f);
2630 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
2631 error_report("Invalid compressed data length: %d", len);
2632 ret = -EINVAL;
2633 break;
2634 }
c1bc6626 2635 decompress_data_with_multi_threads(f, host, len);
56e93d26 2636 break;
a776aa15 2637
56e93d26 2638 case RAM_SAVE_FLAG_XBZRLE:
56e93d26
JQ
2639 if (load_xbzrle(f, addr, host) < 0) {
2640 error_report("Failed to decompress XBZRLE page at "
2641 RAM_ADDR_FMT, addr);
2642 ret = -EINVAL;
2643 break;
2644 }
2645 break;
2646 case RAM_SAVE_FLAG_EOS:
2647 /* normal exit */
2648 break;
2649 default:
2650 if (flags & RAM_SAVE_FLAG_HOOK) {
632e3a5c 2651 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
56e93d26
JQ
2652 } else {
2653 error_report("Unknown combination of migration flags: %#x",
2654 flags);
2655 ret = -EINVAL;
2656 }
2657 }
2658 if (!ret) {
2659 ret = qemu_file_get_error(f);
2660 }
2661 }
2662
5533b2e9 2663 wait_for_decompress_done();
56e93d26 2664 rcu_read_unlock();
55c4446b 2665 trace_ram_load_complete(ret, seq_iter);
56e93d26
JQ
2666 return ret;
2667}
2668
2669static SaveVMHandlers savevm_ram_handlers = {
2670 .save_live_setup = ram_save_setup,
2671 .save_live_iterate = ram_save_iterate,
763c906b 2672 .save_live_complete_postcopy = ram_save_complete,
a3e06c3d 2673 .save_live_complete_precopy = ram_save_complete,
56e93d26
JQ
2674 .save_live_pending = ram_save_pending,
2675 .load_state = ram_load,
6ad2a215 2676 .cleanup = ram_migration_cleanup,
56e93d26
JQ
2677};
2678
2679void ram_mig_init(void)
2680{
2681 qemu_mutex_init(&XBZRLE.lock);
6f37bb8b 2682 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, &ram_state);
56e93d26 2683}