]> git.proxmox.com Git - mirror_qemu.git/blame - migration/ram.c
savevm: Split load vm state function qemu_loadvm_state
[mirror_qemu.git] / migration / ram.c
CommitLineData
56e93d26
JQ
1/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
76cc7b58
JQ
5 * Copyright (c) 2011-2015 Red Hat Inc
6 *
7 * Authors:
8 * Juan Quintela <quintela@redhat.com>
56e93d26
JQ
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 */
1393a485 28#include "qemu/osdep.h"
56e93d26 29#include <zlib.h>
4addcd4f 30#include "qapi-event.h"
56e93d26
JQ
31#include "qemu/bitops.h"
32#include "qemu/bitmap.h"
7205c9ec
JQ
33#include "qemu/timer.h"
34#include "qemu/main-loop.h"
56e93d26 35#include "migration/migration.h"
e0b266f0 36#include "migration/postcopy-ram.h"
56e93d26
JQ
37#include "exec/address-spaces.h"
38#include "migration/page_cache.h"
56e93d26 39#include "qemu/error-report.h"
56e93d26 40#include "trace.h"
56e93d26 41#include "exec/ram_addr.h"
56e93d26
JQ
42#include "qemu/rcu_queue.h"
43
44#ifdef DEBUG_MIGRATION_RAM
45#define DPRINTF(fmt, ...) \
46 do { fprintf(stdout, "migration_ram: " fmt, ## __VA_ARGS__); } while (0)
47#else
48#define DPRINTF(fmt, ...) \
49 do { } while (0)
50#endif
51
56e93d26 52static int dirty_rate_high_cnt;
56e93d26
JQ
53
54static uint64_t bitmap_sync_count;
55
56/***********************************************************/
57/* ram save/restore */
58
59#define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
60#define RAM_SAVE_FLAG_COMPRESS 0x02
61#define RAM_SAVE_FLAG_MEM_SIZE 0x04
62#define RAM_SAVE_FLAG_PAGE 0x08
63#define RAM_SAVE_FLAG_EOS 0x10
64#define RAM_SAVE_FLAG_CONTINUE 0x20
65#define RAM_SAVE_FLAG_XBZRLE 0x40
66/* 0x80 is reserved in migration.h start with 0x100 next */
67#define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
68
69static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE];
70
71static inline bool is_zero_range(uint8_t *p, uint64_t size)
72{
73 return buffer_find_nonzero_offset(p, size) == size;
74}
75
76/* struct contains XBZRLE cache and a static page
77 used by the compression */
78static struct {
79 /* buffer used for XBZRLE encoding */
80 uint8_t *encoded_buf;
81 /* buffer for storing page content */
82 uint8_t *current_buf;
83 /* Cache for XBZRLE, Protected by lock. */
84 PageCache *cache;
85 QemuMutex lock;
86} XBZRLE;
87
88/* buffer used for XBZRLE decoding */
89static uint8_t *xbzrle_decoded_buf;
90
91static void XBZRLE_cache_lock(void)
92{
93 if (migrate_use_xbzrle())
94 qemu_mutex_lock(&XBZRLE.lock);
95}
96
97static void XBZRLE_cache_unlock(void)
98{
99 if (migrate_use_xbzrle())
100 qemu_mutex_unlock(&XBZRLE.lock);
101}
102
103/*
104 * called from qmp_migrate_set_cache_size in main thread, possibly while
105 * a migration is in progress.
106 * A running migration maybe using the cache and might finish during this
107 * call, hence changes to the cache are protected by XBZRLE.lock().
108 */
109int64_t xbzrle_cache_resize(int64_t new_size)
110{
111 PageCache *new_cache;
112 int64_t ret;
113
114 if (new_size < TARGET_PAGE_SIZE) {
115 return -1;
116 }
117
118 XBZRLE_cache_lock();
119
120 if (XBZRLE.cache != NULL) {
121 if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
122 goto out_new_size;
123 }
124 new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
125 TARGET_PAGE_SIZE);
126 if (!new_cache) {
127 error_report("Error creating cache");
128 ret = -1;
129 goto out;
130 }
131
132 cache_fini(XBZRLE.cache);
133 XBZRLE.cache = new_cache;
134 }
135
136out_new_size:
137 ret = pow2floor(new_size);
138out:
139 XBZRLE_cache_unlock();
140 return ret;
141}
142
143/* accounting for migration statistics */
144typedef struct AccountingInfo {
145 uint64_t dup_pages;
146 uint64_t skipped_pages;
147 uint64_t norm_pages;
148 uint64_t iterations;
149 uint64_t xbzrle_bytes;
150 uint64_t xbzrle_pages;
151 uint64_t xbzrle_cache_miss;
152 double xbzrle_cache_miss_rate;
153 uint64_t xbzrle_overflows;
154} AccountingInfo;
155
156static AccountingInfo acct_info;
157
158static void acct_clear(void)
159{
160 memset(&acct_info, 0, sizeof(acct_info));
161}
162
163uint64_t dup_mig_bytes_transferred(void)
164{
165 return acct_info.dup_pages * TARGET_PAGE_SIZE;
166}
167
168uint64_t dup_mig_pages_transferred(void)
169{
170 return acct_info.dup_pages;
171}
172
173uint64_t skipped_mig_bytes_transferred(void)
174{
175 return acct_info.skipped_pages * TARGET_PAGE_SIZE;
176}
177
178uint64_t skipped_mig_pages_transferred(void)
179{
180 return acct_info.skipped_pages;
181}
182
183uint64_t norm_mig_bytes_transferred(void)
184{
185 return acct_info.norm_pages * TARGET_PAGE_SIZE;
186}
187
188uint64_t norm_mig_pages_transferred(void)
189{
190 return acct_info.norm_pages;
191}
192
193uint64_t xbzrle_mig_bytes_transferred(void)
194{
195 return acct_info.xbzrle_bytes;
196}
197
198uint64_t xbzrle_mig_pages_transferred(void)
199{
200 return acct_info.xbzrle_pages;
201}
202
203uint64_t xbzrle_mig_pages_cache_miss(void)
204{
205 return acct_info.xbzrle_cache_miss;
206}
207
208double xbzrle_mig_cache_miss_rate(void)
209{
210 return acct_info.xbzrle_cache_miss_rate;
211}
212
213uint64_t xbzrle_mig_pages_overflow(void)
214{
215 return acct_info.xbzrle_overflows;
216}
217
218/* This is the last block that we have visited serching for dirty pages
219 */
220static RAMBlock *last_seen_block;
221/* This is the last block from where we have sent data */
222static RAMBlock *last_sent_block;
223static ram_addr_t last_offset;
dd631697 224static QemuMutex migration_bitmap_mutex;
56e93d26
JQ
225static uint64_t migration_dirty_pages;
226static uint32_t last_version;
227static bool ram_bulk_stage;
228
b8fb8cb7
DDAG
229/* used by the search for pages to send */
230struct PageSearchStatus {
231 /* Current block being searched */
232 RAMBlock *block;
233 /* Current offset to search from */
234 ram_addr_t offset;
235 /* Set once we wrap around */
236 bool complete_round;
237};
238typedef struct PageSearchStatus PageSearchStatus;
239
60be6340
DL
240static struct BitmapRcu {
241 struct rcu_head rcu;
f3f491fc 242 /* Main migration bitmap */
60be6340 243 unsigned long *bmap;
f3f491fc
DDAG
244 /* bitmap of pages that haven't been sent even once
245 * only maintained and used in postcopy at the moment
246 * where it's used to send the dirtymap at the start
247 * of the postcopy phase
248 */
249 unsigned long *unsentmap;
60be6340
DL
250} *migration_bitmap_rcu;
251
56e93d26
JQ
252struct CompressParam {
253 bool start;
254 bool done;
255 QEMUFile *file;
256 QemuMutex mutex;
257 QemuCond cond;
258 RAMBlock *block;
259 ram_addr_t offset;
260};
261typedef struct CompressParam CompressParam;
262
263struct DecompressParam {
264 bool start;
265 QemuMutex mutex;
266 QemuCond cond;
267 void *des;
d341d9f3 268 uint8_t *compbuf;
56e93d26
JQ
269 int len;
270};
271typedef struct DecompressParam DecompressParam;
272
273static CompressParam *comp_param;
274static QemuThread *compress_threads;
275/* comp_done_cond is used to wake up the migration thread when
276 * one of the compression threads has finished the compression.
277 * comp_done_lock is used to co-work with comp_done_cond.
278 */
279static QemuMutex *comp_done_lock;
280static QemuCond *comp_done_cond;
281/* The empty QEMUFileOps will be used by file in CompressParam */
282static const QEMUFileOps empty_ops = { };
283
284static bool compression_switch;
285static bool quit_comp_thread;
286static bool quit_decomp_thread;
287static DecompressParam *decomp_param;
288static QemuThread *decompress_threads;
56e93d26
JQ
289
290static int do_compress_ram_page(CompressParam *param);
291
292static void *do_data_compress(void *opaque)
293{
294 CompressParam *param = opaque;
295
296 while (!quit_comp_thread) {
297 qemu_mutex_lock(&param->mutex);
298 /* Re-check the quit_comp_thread in case of
299 * terminate_compression_threads is called just before
300 * qemu_mutex_lock(&param->mutex) and after
301 * while(!quit_comp_thread), re-check it here can make
302 * sure the compression thread terminate as expected.
303 */
304 while (!param->start && !quit_comp_thread) {
305 qemu_cond_wait(&param->cond, &param->mutex);
306 }
307 if (!quit_comp_thread) {
308 do_compress_ram_page(param);
309 }
310 param->start = false;
311 qemu_mutex_unlock(&param->mutex);
312
313 qemu_mutex_lock(comp_done_lock);
314 param->done = true;
315 qemu_cond_signal(comp_done_cond);
316 qemu_mutex_unlock(comp_done_lock);
317 }
318
319 return NULL;
320}
321
322static inline void terminate_compression_threads(void)
323{
324 int idx, thread_count;
325
326 thread_count = migrate_compress_threads();
327 quit_comp_thread = true;
328 for (idx = 0; idx < thread_count; idx++) {
329 qemu_mutex_lock(&comp_param[idx].mutex);
330 qemu_cond_signal(&comp_param[idx].cond);
331 qemu_mutex_unlock(&comp_param[idx].mutex);
332 }
333}
334
335void migrate_compress_threads_join(void)
336{
337 int i, thread_count;
338
339 if (!migrate_use_compression()) {
340 return;
341 }
342 terminate_compression_threads();
343 thread_count = migrate_compress_threads();
344 for (i = 0; i < thread_count; i++) {
345 qemu_thread_join(compress_threads + i);
346 qemu_fclose(comp_param[i].file);
347 qemu_mutex_destroy(&comp_param[i].mutex);
348 qemu_cond_destroy(&comp_param[i].cond);
349 }
350 qemu_mutex_destroy(comp_done_lock);
351 qemu_cond_destroy(comp_done_cond);
352 g_free(compress_threads);
353 g_free(comp_param);
354 g_free(comp_done_cond);
355 g_free(comp_done_lock);
356 compress_threads = NULL;
357 comp_param = NULL;
358 comp_done_cond = NULL;
359 comp_done_lock = NULL;
360}
361
362void migrate_compress_threads_create(void)
363{
364 int i, thread_count;
365
366 if (!migrate_use_compression()) {
367 return;
368 }
369 quit_comp_thread = false;
370 compression_switch = true;
371 thread_count = migrate_compress_threads();
372 compress_threads = g_new0(QemuThread, thread_count);
373 comp_param = g_new0(CompressParam, thread_count);
374 comp_done_cond = g_new0(QemuCond, 1);
375 comp_done_lock = g_new0(QemuMutex, 1);
376 qemu_cond_init(comp_done_cond);
377 qemu_mutex_init(comp_done_lock);
378 for (i = 0; i < thread_count; i++) {
379 /* com_param[i].file is just used as a dummy buffer to save data, set
380 * it's ops to empty.
381 */
382 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
383 comp_param[i].done = true;
384 qemu_mutex_init(&comp_param[i].mutex);
385 qemu_cond_init(&comp_param[i].cond);
386 qemu_thread_create(compress_threads + i, "compress",
387 do_data_compress, comp_param + i,
388 QEMU_THREAD_JOINABLE);
389 }
390}
391
392/**
393 * save_page_header: Write page header to wire
394 *
395 * If this is the 1st block, it also writes the block identification
396 *
397 * Returns: Number of bytes written
398 *
399 * @f: QEMUFile where to send the data
400 * @block: block that contains the page we want to send
401 * @offset: offset inside the block for the page
402 * in the lower bits, it contains flags
403 */
404static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
405{
9f5f380b 406 size_t size, len;
56e93d26
JQ
407
408 qemu_put_be64(f, offset);
409 size = 8;
410
411 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
9f5f380b
LL
412 len = strlen(block->idstr);
413 qemu_put_byte(f, len);
414 qemu_put_buffer(f, (uint8_t *)block->idstr, len);
415 size += 1 + len;
56e93d26
JQ
416 }
417 return size;
418}
419
070afca2
JH
420/* Reduce amount of guest cpu execution to hopefully slow down memory writes.
421 * If guest dirty memory rate is reduced below the rate at which we can
422 * transfer pages to the destination then we should be able to complete
423 * migration. Some workloads dirty memory way too fast and will not effectively
424 * converge, even with auto-converge.
425 */
426static void mig_throttle_guest_down(void)
427{
428 MigrationState *s = migrate_get_current();
429 uint64_t pct_initial =
430 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL];
431 uint64_t pct_icrement =
432 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT];
433
434 /* We have not started throttling yet. Let's start it. */
435 if (!cpu_throttle_active()) {
436 cpu_throttle_set(pct_initial);
437 } else {
438 /* Throttling already on, just increase the rate */
439 cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement);
440 }
441}
442
56e93d26
JQ
443/* Update the xbzrle cache to reflect a page that's been sent as all 0.
444 * The important thing is that a stale (not-yet-0'd) page be replaced
445 * by the new data.
446 * As a bonus, if the page wasn't in the cache it gets added so that
447 * when a small write is made into the 0'd page it gets XBZRLE sent
448 */
449static void xbzrle_cache_zero_page(ram_addr_t current_addr)
450{
451 if (ram_bulk_stage || !migrate_use_xbzrle()) {
452 return;
453 }
454
455 /* We don't care if this fails to allocate a new cache page
456 * as long as it updated an old one */
457 cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
458 bitmap_sync_count);
459}
460
461#define ENCODING_FLAG_XBZRLE 0x1
462
463/**
464 * save_xbzrle_page: compress and send current page
465 *
466 * Returns: 1 means that we wrote the page
467 * 0 means that page is identical to the one already sent
468 * -1 means that xbzrle would be longer than normal
469 *
470 * @f: QEMUFile where to send the data
471 * @current_data:
472 * @current_addr:
473 * @block: block that contains the page we want to send
474 * @offset: offset inside the block for the page
475 * @last_stage: if we are at the completion stage
476 * @bytes_transferred: increase it with the number of transferred bytes
477 */
478static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
479 ram_addr_t current_addr, RAMBlock *block,
480 ram_addr_t offset, bool last_stage,
481 uint64_t *bytes_transferred)
482{
483 int encoded_len = 0, bytes_xbzrle;
484 uint8_t *prev_cached_page;
485
486 if (!cache_is_cached(XBZRLE.cache, current_addr, bitmap_sync_count)) {
487 acct_info.xbzrle_cache_miss++;
488 if (!last_stage) {
489 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
490 bitmap_sync_count) == -1) {
491 return -1;
492 } else {
493 /* update *current_data when the page has been
494 inserted into cache */
495 *current_data = get_cached_data(XBZRLE.cache, current_addr);
496 }
497 }
498 return -1;
499 }
500
501 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
502
503 /* save current buffer into memory */
504 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
505
506 /* XBZRLE encoding (if there is no overflow) */
507 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
508 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
509 TARGET_PAGE_SIZE);
510 if (encoded_len == 0) {
511 DPRINTF("Skipping unmodified page\n");
512 return 0;
513 } else if (encoded_len == -1) {
514 DPRINTF("Overflow\n");
515 acct_info.xbzrle_overflows++;
516 /* update data in the cache */
517 if (!last_stage) {
518 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
519 *current_data = prev_cached_page;
520 }
521 return -1;
522 }
523
524 /* we need to update the data in the cache, in order to get the same data */
525 if (!last_stage) {
526 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
527 }
528
529 /* Send XBZRLE based compressed page */
530 bytes_xbzrle = save_page_header(f, block, offset | RAM_SAVE_FLAG_XBZRLE);
531 qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
532 qemu_put_be16(f, encoded_len);
533 qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
534 bytes_xbzrle += encoded_len + 1 + 2;
535 acct_info.xbzrle_pages++;
536 acct_info.xbzrle_bytes += bytes_xbzrle;
537 *bytes_transferred += bytes_xbzrle;
538
539 return 1;
540}
541
f3f491fc
DDAG
542/* Called with rcu_read_lock() to protect migration_bitmap
543 * rb: The RAMBlock to search for dirty pages in
544 * start: Start address (typically so we can continue from previous page)
545 * ram_addr_abs: Pointer into which to store the address of the dirty page
546 * within the global ram_addr space
547 *
548 * Returns: byte offset within memory region of the start of a dirty page
549 */
56e93d26 550static inline
a82d593b
DDAG
551ram_addr_t migration_bitmap_find_dirty(RAMBlock *rb,
552 ram_addr_t start,
553 ram_addr_t *ram_addr_abs)
56e93d26 554{
2f68e399 555 unsigned long base = rb->offset >> TARGET_PAGE_BITS;
56e93d26 556 unsigned long nr = base + (start >> TARGET_PAGE_BITS);
2f68e399
DDAG
557 uint64_t rb_size = rb->used_length;
558 unsigned long size = base + (rb_size >> TARGET_PAGE_BITS);
2ff64038 559 unsigned long *bitmap;
56e93d26
JQ
560
561 unsigned long next;
562
60be6340 563 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
56e93d26
JQ
564 if (ram_bulk_stage && nr > base) {
565 next = nr + 1;
566 } else {
2ff64038 567 next = find_next_bit(bitmap, size, nr);
56e93d26
JQ
568 }
569
f3f491fc 570 *ram_addr_abs = next << TARGET_PAGE_BITS;
56e93d26
JQ
571 return (next - base) << TARGET_PAGE_BITS;
572}
573
a82d593b
DDAG
574static inline bool migration_bitmap_clear_dirty(ram_addr_t addr)
575{
576 bool ret;
577 int nr = addr >> TARGET_PAGE_BITS;
578 unsigned long *bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
579
580 ret = test_and_clear_bit(nr, bitmap);
581
582 if (ret) {
583 migration_dirty_pages--;
584 }
585 return ret;
586}
587
56e93d26
JQ
588static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
589{
2ff64038 590 unsigned long *bitmap;
60be6340 591 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
56e93d26 592 migration_dirty_pages +=
2ff64038 593 cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
56e93d26
JQ
594}
595
56e93d26
JQ
596/* Fix me: there are too many global variables used in migration process. */
597static int64_t start_time;
598static int64_t bytes_xfer_prev;
599static int64_t num_dirty_pages_period;
600static uint64_t xbzrle_cache_miss_prev;
601static uint64_t iterations_prev;
602
603static void migration_bitmap_sync_init(void)
604{
605 start_time = 0;
606 bytes_xfer_prev = 0;
607 num_dirty_pages_period = 0;
608 xbzrle_cache_miss_prev = 0;
609 iterations_prev = 0;
610}
611
612/* Called with iothread lock held, to protect ram_list.dirty_memory[] */
613static void migration_bitmap_sync(void)
614{
615 RAMBlock *block;
616 uint64_t num_dirty_pages_init = migration_dirty_pages;
617 MigrationState *s = migrate_get_current();
618 int64_t end_time;
619 int64_t bytes_xfer_now;
620
621 bitmap_sync_count++;
622
623 if (!bytes_xfer_prev) {
624 bytes_xfer_prev = ram_bytes_transferred();
625 }
626
627 if (!start_time) {
628 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
629 }
630
631 trace_migration_bitmap_sync_start();
632 address_space_sync_dirty_bitmap(&address_space_memory);
633
dd631697 634 qemu_mutex_lock(&migration_bitmap_mutex);
56e93d26
JQ
635 rcu_read_lock();
636 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
2f68e399 637 migration_bitmap_sync_range(block->offset, block->used_length);
56e93d26
JQ
638 }
639 rcu_read_unlock();
dd631697 640 qemu_mutex_unlock(&migration_bitmap_mutex);
56e93d26
JQ
641
642 trace_migration_bitmap_sync_end(migration_dirty_pages
643 - num_dirty_pages_init);
644 num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
645 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
646
647 /* more than 1 second = 1000 millisecons */
648 if (end_time > start_time + 1000) {
649 if (migrate_auto_converge()) {
650 /* The following detection logic can be refined later. For now:
651 Check to see if the dirtied bytes is 50% more than the approx.
652 amount of bytes that just got transferred since the last time we
070afca2
JH
653 were in this routine. If that happens twice, start or increase
654 throttling */
56e93d26 655 bytes_xfer_now = ram_bytes_transferred();
070afca2 656
56e93d26
JQ
657 if (s->dirty_pages_rate &&
658 (num_dirty_pages_period * TARGET_PAGE_SIZE >
659 (bytes_xfer_now - bytes_xfer_prev)/2) &&
070afca2 660 (dirty_rate_high_cnt++ >= 2)) {
56e93d26 661 trace_migration_throttle();
56e93d26 662 dirty_rate_high_cnt = 0;
070afca2 663 mig_throttle_guest_down();
56e93d26
JQ
664 }
665 bytes_xfer_prev = bytes_xfer_now;
56e93d26 666 }
070afca2 667
56e93d26
JQ
668 if (migrate_use_xbzrle()) {
669 if (iterations_prev != acct_info.iterations) {
670 acct_info.xbzrle_cache_miss_rate =
671 (double)(acct_info.xbzrle_cache_miss -
672 xbzrle_cache_miss_prev) /
673 (acct_info.iterations - iterations_prev);
674 }
675 iterations_prev = acct_info.iterations;
676 xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss;
677 }
678 s->dirty_pages_rate = num_dirty_pages_period * 1000
679 / (end_time - start_time);
680 s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
681 start_time = end_time;
682 num_dirty_pages_period = 0;
683 }
684 s->dirty_sync_count = bitmap_sync_count;
4addcd4f
DDAG
685 if (migrate_use_events()) {
686 qapi_event_send_migration_pass(bitmap_sync_count, NULL);
687 }
56e93d26
JQ
688}
689
690/**
691 * save_zero_page: Send the zero page to the stream
692 *
693 * Returns: Number of pages written.
694 *
695 * @f: QEMUFile where to send the data
696 * @block: block that contains the page we want to send
697 * @offset: offset inside the block for the page
698 * @p: pointer to the page
699 * @bytes_transferred: increase it with the number of transferred bytes
700 */
701static int save_zero_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
702 uint8_t *p, uint64_t *bytes_transferred)
703{
704 int pages = -1;
705
706 if (is_zero_range(p, TARGET_PAGE_SIZE)) {
707 acct_info.dup_pages++;
708 *bytes_transferred += save_page_header(f, block,
709 offset | RAM_SAVE_FLAG_COMPRESS);
710 qemu_put_byte(f, 0);
711 *bytes_transferred += 1;
712 pages = 1;
713 }
714
715 return pages;
716}
717
718/**
719 * ram_save_page: Send the given page to the stream
720 *
721 * Returns: Number of pages written.
3fd3c4b3
DDAG
722 * < 0 - error
723 * >=0 - Number of pages written - this might legally be 0
724 * if xbzrle noticed the page was the same.
56e93d26
JQ
725 *
726 * @f: QEMUFile where to send the data
727 * @block: block that contains the page we want to send
728 * @offset: offset inside the block for the page
729 * @last_stage: if we are at the completion stage
730 * @bytes_transferred: increase it with the number of transferred bytes
731 */
732static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
733 bool last_stage, uint64_t *bytes_transferred)
734{
735 int pages = -1;
736 uint64_t bytes_xmit;
737 ram_addr_t current_addr;
56e93d26
JQ
738 uint8_t *p;
739 int ret;
740 bool send_async = true;
741
2f68e399 742 p = block->host + offset;
56e93d26
JQ
743
744 /* In doubt sent page as normal */
745 bytes_xmit = 0;
746 ret = ram_control_save_page(f, block->offset,
747 offset, TARGET_PAGE_SIZE, &bytes_xmit);
748 if (bytes_xmit) {
749 *bytes_transferred += bytes_xmit;
750 pages = 1;
751 }
752
753 XBZRLE_cache_lock();
754
755 current_addr = block->offset + offset;
756
757 if (block == last_sent_block) {
758 offset |= RAM_SAVE_FLAG_CONTINUE;
759 }
760 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
761 if (ret != RAM_SAVE_CONTROL_DELAYED) {
762 if (bytes_xmit > 0) {
763 acct_info.norm_pages++;
764 } else if (bytes_xmit == 0) {
765 acct_info.dup_pages++;
766 }
767 }
768 } else {
769 pages = save_zero_page(f, block, offset, p, bytes_transferred);
770 if (pages > 0) {
771 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
772 * page would be stale
773 */
774 xbzrle_cache_zero_page(current_addr);
775 } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
776 pages = save_xbzrle_page(f, &p, current_addr, block,
777 offset, last_stage, bytes_transferred);
778 if (!last_stage) {
779 /* Can't send this cached data async, since the cache page
780 * might get updated before it gets to the wire
781 */
782 send_async = false;
783 }
784 }
785 }
786
787 /* XBZRLE overflow or normal page */
788 if (pages == -1) {
789 *bytes_transferred += save_page_header(f, block,
790 offset | RAM_SAVE_FLAG_PAGE);
791 if (send_async) {
792 qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
793 } else {
794 qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
795 }
796 *bytes_transferred += TARGET_PAGE_SIZE;
797 pages = 1;
798 acct_info.norm_pages++;
799 }
800
801 XBZRLE_cache_unlock();
802
803 return pages;
804}
805
806static int do_compress_ram_page(CompressParam *param)
807{
808 int bytes_sent, blen;
809 uint8_t *p;
810 RAMBlock *block = param->block;
811 ram_addr_t offset = param->offset;
812
2f68e399 813 p = block->host + (offset & TARGET_PAGE_MASK);
56e93d26
JQ
814
815 bytes_sent = save_page_header(param->file, block, offset |
816 RAM_SAVE_FLAG_COMPRESS_PAGE);
817 blen = qemu_put_compression_data(param->file, p, TARGET_PAGE_SIZE,
818 migrate_compress_level());
819 bytes_sent += blen;
820
821 return bytes_sent;
822}
823
824static inline void start_compression(CompressParam *param)
825{
826 param->done = false;
827 qemu_mutex_lock(&param->mutex);
828 param->start = true;
829 qemu_cond_signal(&param->cond);
830 qemu_mutex_unlock(&param->mutex);
831}
832
833static inline void start_decompression(DecompressParam *param)
834{
835 qemu_mutex_lock(&param->mutex);
836 param->start = true;
837 qemu_cond_signal(&param->cond);
838 qemu_mutex_unlock(&param->mutex);
839}
840
841static uint64_t bytes_transferred;
842
843static void flush_compressed_data(QEMUFile *f)
844{
845 int idx, len, thread_count;
846
847 if (!migrate_use_compression()) {
848 return;
849 }
850 thread_count = migrate_compress_threads();
851 for (idx = 0; idx < thread_count; idx++) {
852 if (!comp_param[idx].done) {
853 qemu_mutex_lock(comp_done_lock);
854 while (!comp_param[idx].done && !quit_comp_thread) {
855 qemu_cond_wait(comp_done_cond, comp_done_lock);
856 }
857 qemu_mutex_unlock(comp_done_lock);
858 }
859 if (!quit_comp_thread) {
860 len = qemu_put_qemu_file(f, comp_param[idx].file);
861 bytes_transferred += len;
862 }
863 }
864}
865
866static inline void set_compress_params(CompressParam *param, RAMBlock *block,
867 ram_addr_t offset)
868{
869 param->block = block;
870 param->offset = offset;
871}
872
873static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block,
874 ram_addr_t offset,
875 uint64_t *bytes_transferred)
876{
877 int idx, thread_count, bytes_xmit = -1, pages = -1;
878
879 thread_count = migrate_compress_threads();
880 qemu_mutex_lock(comp_done_lock);
881 while (true) {
882 for (idx = 0; idx < thread_count; idx++) {
883 if (comp_param[idx].done) {
884 bytes_xmit = qemu_put_qemu_file(f, comp_param[idx].file);
885 set_compress_params(&comp_param[idx], block, offset);
886 start_compression(&comp_param[idx]);
887 pages = 1;
888 acct_info.norm_pages++;
889 *bytes_transferred += bytes_xmit;
890 break;
891 }
892 }
893 if (pages > 0) {
894 break;
895 } else {
896 qemu_cond_wait(comp_done_cond, comp_done_lock);
897 }
898 }
899 qemu_mutex_unlock(comp_done_lock);
900
901 return pages;
902}
903
904/**
905 * ram_save_compressed_page: compress the given page and send it to the stream
906 *
907 * Returns: Number of pages written.
908 *
909 * @f: QEMUFile where to send the data
910 * @block: block that contains the page we want to send
911 * @offset: offset inside the block for the page
912 * @last_stage: if we are at the completion stage
913 * @bytes_transferred: increase it with the number of transferred bytes
914 */
915static int ram_save_compressed_page(QEMUFile *f, RAMBlock *block,
916 ram_addr_t offset, bool last_stage,
917 uint64_t *bytes_transferred)
918{
919 int pages = -1;
920 uint64_t bytes_xmit;
56e93d26
JQ
921 uint8_t *p;
922 int ret;
923
2f68e399 924 p = block->host + offset;
56e93d26
JQ
925
926 bytes_xmit = 0;
927 ret = ram_control_save_page(f, block->offset,
928 offset, TARGET_PAGE_SIZE, &bytes_xmit);
929 if (bytes_xmit) {
930 *bytes_transferred += bytes_xmit;
931 pages = 1;
932 }
933 if (block == last_sent_block) {
934 offset |= RAM_SAVE_FLAG_CONTINUE;
935 }
936 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
937 if (ret != RAM_SAVE_CONTROL_DELAYED) {
938 if (bytes_xmit > 0) {
939 acct_info.norm_pages++;
940 } else if (bytes_xmit == 0) {
941 acct_info.dup_pages++;
942 }
943 }
944 } else {
945 /* When starting the process of a new block, the first page of
946 * the block should be sent out before other pages in the same
947 * block, and all the pages in last block should have been sent
948 * out, keeping this order is important, because the 'cont' flag
949 * is used to avoid resending the block name.
950 */
951 if (block != last_sent_block) {
952 flush_compressed_data(f);
953 pages = save_zero_page(f, block, offset, p, bytes_transferred);
954 if (pages == -1) {
955 set_compress_params(&comp_param[0], block, offset);
956 /* Use the qemu thread to compress the data to make sure the
957 * first page is sent out before other pages
958 */
959 bytes_xmit = do_compress_ram_page(&comp_param[0]);
960 acct_info.norm_pages++;
961 qemu_put_qemu_file(f, comp_param[0].file);
962 *bytes_transferred += bytes_xmit;
963 pages = 1;
964 }
965 } else {
966 pages = save_zero_page(f, block, offset, p, bytes_transferred);
967 if (pages == -1) {
968 pages = compress_page_with_multi_thread(f, block, offset,
969 bytes_transferred);
970 }
971 }
972 }
973
974 return pages;
975}
976
b9e60928
DDAG
977/*
978 * Find the next dirty page and update any state associated with
979 * the search process.
980 *
981 * Returns: True if a page is found
982 *
983 * @f: Current migration stream.
984 * @pss: Data about the state of the current dirty page scan.
985 * @*again: Set to false if the search has scanned the whole of RAM
e0b266f0
DDAG
986 * *ram_addr_abs: Pointer into which to store the address of the dirty page
987 * within the global ram_addr space
b9e60928
DDAG
988 */
989static bool find_dirty_block(QEMUFile *f, PageSearchStatus *pss,
f3f491fc 990 bool *again, ram_addr_t *ram_addr_abs)
b9e60928 991{
a82d593b
DDAG
992 pss->offset = migration_bitmap_find_dirty(pss->block, pss->offset,
993 ram_addr_abs);
b9e60928
DDAG
994 if (pss->complete_round && pss->block == last_seen_block &&
995 pss->offset >= last_offset) {
996 /*
997 * We've been once around the RAM and haven't found anything.
998 * Give up.
999 */
1000 *again = false;
1001 return false;
1002 }
1003 if (pss->offset >= pss->block->used_length) {
1004 /* Didn't find anything in this RAM Block */
1005 pss->offset = 0;
1006 pss->block = QLIST_NEXT_RCU(pss->block, next);
1007 if (!pss->block) {
1008 /* Hit the end of the list */
1009 pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
1010 /* Flag that we've looped */
1011 pss->complete_round = true;
1012 ram_bulk_stage = false;
1013 if (migrate_use_xbzrle()) {
1014 /* If xbzrle is on, stop using the data compression at this
1015 * point. In theory, xbzrle can do better than compression.
1016 */
1017 flush_compressed_data(f);
1018 compression_switch = false;
1019 }
1020 }
1021 /* Didn't find anything this time, but try again on the new block */
1022 *again = true;
1023 return false;
1024 } else {
1025 /* Can go around again, but... */
1026 *again = true;
1027 /* We've found something so probably don't need to */
1028 return true;
1029 }
1030}
1031
a82d593b
DDAG
1032/*
1033 * Helper for 'get_queued_page' - gets a page off the queue
1034 * ms: MigrationState in
1035 * *offset: Used to return the offset within the RAMBlock
1036 * ram_addr_abs: global offset in the dirty/sent bitmaps
1037 *
1038 * Returns: block (or NULL if none available)
1039 */
1040static RAMBlock *unqueue_page(MigrationState *ms, ram_addr_t *offset,
1041 ram_addr_t *ram_addr_abs)
1042{
1043 RAMBlock *block = NULL;
1044
1045 qemu_mutex_lock(&ms->src_page_req_mutex);
1046 if (!QSIMPLEQ_EMPTY(&ms->src_page_requests)) {
1047 struct MigrationSrcPageRequest *entry =
1048 QSIMPLEQ_FIRST(&ms->src_page_requests);
1049 block = entry->rb;
1050 *offset = entry->offset;
1051 *ram_addr_abs = (entry->offset + entry->rb->offset) &
1052 TARGET_PAGE_MASK;
1053
1054 if (entry->len > TARGET_PAGE_SIZE) {
1055 entry->len -= TARGET_PAGE_SIZE;
1056 entry->offset += TARGET_PAGE_SIZE;
1057 } else {
1058 memory_region_unref(block->mr);
1059 QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
1060 g_free(entry);
1061 }
1062 }
1063 qemu_mutex_unlock(&ms->src_page_req_mutex);
1064
1065 return block;
1066}
1067
1068/*
1069 * Unqueue a page from the queue fed by postcopy page requests; skips pages
1070 * that are already sent (!dirty)
1071 *
1072 * ms: MigrationState in
1073 * pss: PageSearchStatus structure updated with found block/offset
1074 * ram_addr_abs: global offset in the dirty/sent bitmaps
1075 *
1076 * Returns: true if a queued page is found
1077 */
1078static bool get_queued_page(MigrationState *ms, PageSearchStatus *pss,
1079 ram_addr_t *ram_addr_abs)
1080{
1081 RAMBlock *block;
1082 ram_addr_t offset;
1083 bool dirty;
1084
1085 do {
1086 block = unqueue_page(ms, &offset, ram_addr_abs);
1087 /*
1088 * We're sending this page, and since it's postcopy nothing else
1089 * will dirty it, and we must make sure it doesn't get sent again
1090 * even if this queue request was received after the background
1091 * search already sent it.
1092 */
1093 if (block) {
1094 unsigned long *bitmap;
1095 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1096 dirty = test_bit(*ram_addr_abs >> TARGET_PAGE_BITS, bitmap);
1097 if (!dirty) {
1098 trace_get_queued_page_not_dirty(
1099 block->idstr, (uint64_t)offset,
1100 (uint64_t)*ram_addr_abs,
1101 test_bit(*ram_addr_abs >> TARGET_PAGE_BITS,
1102 atomic_rcu_read(&migration_bitmap_rcu)->unsentmap));
1103 } else {
1104 trace_get_queued_page(block->idstr,
1105 (uint64_t)offset,
1106 (uint64_t)*ram_addr_abs);
1107 }
1108 }
1109
1110 } while (block && !dirty);
1111
1112 if (block) {
1113 /*
1114 * As soon as we start servicing pages out of order, then we have
1115 * to kill the bulk stage, since the bulk stage assumes
1116 * in (migration_bitmap_find_and_reset_dirty) that every page is
1117 * dirty, that's no longer true.
1118 */
1119 ram_bulk_stage = false;
1120
1121 /*
1122 * We want the background search to continue from the queued page
1123 * since the guest is likely to want other pages near to the page
1124 * it just requested.
1125 */
1126 pss->block = block;
1127 pss->offset = offset;
1128 }
1129
1130 return !!block;
1131}
1132
6c595cde
DDAG
1133/**
1134 * flush_page_queue: Flush any remaining pages in the ram request queue
1135 * it should be empty at the end anyway, but in error cases there may be
1136 * some left.
1137 *
1138 * ms: MigrationState
1139 */
1140void flush_page_queue(MigrationState *ms)
1141{
1142 struct MigrationSrcPageRequest *mspr, *next_mspr;
1143 /* This queue generally should be empty - but in the case of a failed
1144 * migration might have some droppings in.
1145 */
1146 rcu_read_lock();
1147 QSIMPLEQ_FOREACH_SAFE(mspr, &ms->src_page_requests, next_req, next_mspr) {
1148 memory_region_unref(mspr->rb->mr);
1149 QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
1150 g_free(mspr);
1151 }
1152 rcu_read_unlock();
1153}
1154
1155/**
1156 * Queue the pages for transmission, e.g. a request from postcopy destination
1157 * ms: MigrationStatus in which the queue is held
1158 * rbname: The RAMBlock the request is for - may be NULL (to mean reuse last)
1159 * start: Offset from the start of the RAMBlock
1160 * len: Length (in bytes) to send
1161 * Return: 0 on success
1162 */
1163int ram_save_queue_pages(MigrationState *ms, const char *rbname,
1164 ram_addr_t start, ram_addr_t len)
1165{
1166 RAMBlock *ramblock;
1167
1168 rcu_read_lock();
1169 if (!rbname) {
1170 /* Reuse last RAMBlock */
1171 ramblock = ms->last_req_rb;
1172
1173 if (!ramblock) {
1174 /*
1175 * Shouldn't happen, we can't reuse the last RAMBlock if
1176 * it's the 1st request.
1177 */
1178 error_report("ram_save_queue_pages no previous block");
1179 goto err;
1180 }
1181 } else {
1182 ramblock = qemu_ram_block_by_name(rbname);
1183
1184 if (!ramblock) {
1185 /* We shouldn't be asked for a non-existent RAMBlock */
1186 error_report("ram_save_queue_pages no block '%s'", rbname);
1187 goto err;
1188 }
1189 ms->last_req_rb = ramblock;
1190 }
1191 trace_ram_save_queue_pages(ramblock->idstr, start, len);
1192 if (start+len > ramblock->used_length) {
9458ad6b
JQ
1193 error_report("%s request overrun start=" RAM_ADDR_FMT " len="
1194 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
6c595cde
DDAG
1195 __func__, start, len, ramblock->used_length);
1196 goto err;
1197 }
1198
1199 struct MigrationSrcPageRequest *new_entry =
1200 g_malloc0(sizeof(struct MigrationSrcPageRequest));
1201 new_entry->rb = ramblock;
1202 new_entry->offset = start;
1203 new_entry->len = len;
1204
1205 memory_region_ref(ramblock->mr);
1206 qemu_mutex_lock(&ms->src_page_req_mutex);
1207 QSIMPLEQ_INSERT_TAIL(&ms->src_page_requests, new_entry, next_req);
1208 qemu_mutex_unlock(&ms->src_page_req_mutex);
1209 rcu_read_unlock();
1210
1211 return 0;
1212
1213err:
1214 rcu_read_unlock();
1215 return -1;
1216}
1217
a82d593b
DDAG
1218/**
1219 * ram_save_target_page: Save one target page
1220 *
1221 *
1222 * @f: QEMUFile where to send the data
1223 * @block: pointer to block that contains the page we want to send
1224 * @offset: offset inside the block for the page;
1225 * @last_stage: if we are at the completion stage
1226 * @bytes_transferred: increase it with the number of transferred bytes
1227 * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
1228 *
1229 * Returns: Number of pages written.
1230 */
1231static int ram_save_target_page(MigrationState *ms, QEMUFile *f,
1232 RAMBlock *block, ram_addr_t offset,
1233 bool last_stage,
1234 uint64_t *bytes_transferred,
1235 ram_addr_t dirty_ram_abs)
1236{
1237 int res = 0;
1238
1239 /* Check the pages is dirty and if it is send it */
1240 if (migration_bitmap_clear_dirty(dirty_ram_abs)) {
1241 unsigned long *unsentmap;
1242 if (compression_switch && migrate_use_compression()) {
1243 res = ram_save_compressed_page(f, block, offset,
1244 last_stage,
1245 bytes_transferred);
1246 } else {
1247 res = ram_save_page(f, block, offset, last_stage,
1248 bytes_transferred);
1249 }
1250
1251 if (res < 0) {
1252 return res;
1253 }
1254 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1255 if (unsentmap) {
1256 clear_bit(dirty_ram_abs >> TARGET_PAGE_BITS, unsentmap);
1257 }
3fd3c4b3
DDAG
1258 /* Only update last_sent_block if a block was actually sent; xbzrle
1259 * might have decided the page was identical so didn't bother writing
1260 * to the stream.
1261 */
1262 if (res > 0) {
1263 last_sent_block = block;
1264 }
a82d593b
DDAG
1265 }
1266
1267 return res;
1268}
1269
1270/**
1271 * ram_save_host_page: Starting at *offset send pages upto the end
1272 * of the current host page. It's valid for the initial
1273 * offset to point into the middle of a host page
1274 * in which case the remainder of the hostpage is sent.
1275 * Only dirty target pages are sent.
1276 *
1277 * Returns: Number of pages written.
1278 *
1279 * @f: QEMUFile where to send the data
1280 * @block: pointer to block that contains the page we want to send
1281 * @offset: offset inside the block for the page; updated to last target page
1282 * sent
1283 * @last_stage: if we are at the completion stage
1284 * @bytes_transferred: increase it with the number of transferred bytes
1285 * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
1286 */
1287static int ram_save_host_page(MigrationState *ms, QEMUFile *f, RAMBlock *block,
1288 ram_addr_t *offset, bool last_stage,
1289 uint64_t *bytes_transferred,
1290 ram_addr_t dirty_ram_abs)
1291{
1292 int tmppages, pages = 0;
1293 do {
1294 tmppages = ram_save_target_page(ms, f, block, *offset, last_stage,
1295 bytes_transferred, dirty_ram_abs);
1296 if (tmppages < 0) {
1297 return tmppages;
1298 }
1299
1300 pages += tmppages;
1301 *offset += TARGET_PAGE_SIZE;
1302 dirty_ram_abs += TARGET_PAGE_SIZE;
1303 } while (*offset & (qemu_host_page_size - 1));
1304
1305 /* The offset we leave with is the last one we looked at */
1306 *offset -= TARGET_PAGE_SIZE;
1307 return pages;
1308}
6c595cde 1309
56e93d26
JQ
1310/**
1311 * ram_find_and_save_block: Finds a dirty page and sends it to f
1312 *
1313 * Called within an RCU critical section.
1314 *
1315 * Returns: The number of pages written
1316 * 0 means no dirty pages
1317 *
1318 * @f: QEMUFile where to send the data
1319 * @last_stage: if we are at the completion stage
1320 * @bytes_transferred: increase it with the number of transferred bytes
a82d593b
DDAG
1321 *
1322 * On systems where host-page-size > target-page-size it will send all the
1323 * pages in a host page that are dirty.
56e93d26
JQ
1324 */
1325
1326static int ram_find_and_save_block(QEMUFile *f, bool last_stage,
1327 uint64_t *bytes_transferred)
1328{
b8fb8cb7 1329 PageSearchStatus pss;
a82d593b 1330 MigrationState *ms = migrate_get_current();
56e93d26 1331 int pages = 0;
b9e60928 1332 bool again, found;
f3f491fc
DDAG
1333 ram_addr_t dirty_ram_abs; /* Address of the start of the dirty page in
1334 ram_addr_t space */
56e93d26 1335
b8fb8cb7
DDAG
1336 pss.block = last_seen_block;
1337 pss.offset = last_offset;
1338 pss.complete_round = false;
1339
1340 if (!pss.block) {
1341 pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
1342 }
56e93d26 1343
b9e60928 1344 do {
a82d593b
DDAG
1345 again = true;
1346 found = get_queued_page(ms, &pss, &dirty_ram_abs);
b9e60928 1347
a82d593b
DDAG
1348 if (!found) {
1349 /* priority queue empty, so just search for something dirty */
1350 found = find_dirty_block(f, &pss, &again, &dirty_ram_abs);
1351 }
f3f491fc 1352
a82d593b
DDAG
1353 if (found) {
1354 pages = ram_save_host_page(ms, f, pss.block, &pss.offset,
1355 last_stage, bytes_transferred,
1356 dirty_ram_abs);
56e93d26 1357 }
b9e60928 1358 } while (!pages && again);
56e93d26 1359
b8fb8cb7
DDAG
1360 last_seen_block = pss.block;
1361 last_offset = pss.offset;
56e93d26
JQ
1362
1363 return pages;
1364}
1365
1366void acct_update_position(QEMUFile *f, size_t size, bool zero)
1367{
1368 uint64_t pages = size / TARGET_PAGE_SIZE;
1369 if (zero) {
1370 acct_info.dup_pages += pages;
1371 } else {
1372 acct_info.norm_pages += pages;
1373 bytes_transferred += size;
1374 qemu_update_position(f, size);
1375 }
1376}
1377
1378static ram_addr_t ram_save_remaining(void)
1379{
1380 return migration_dirty_pages;
1381}
1382
1383uint64_t ram_bytes_remaining(void)
1384{
1385 return ram_save_remaining() * TARGET_PAGE_SIZE;
1386}
1387
1388uint64_t ram_bytes_transferred(void)
1389{
1390 return bytes_transferred;
1391}
1392
1393uint64_t ram_bytes_total(void)
1394{
1395 RAMBlock *block;
1396 uint64_t total = 0;
1397
1398 rcu_read_lock();
1399 QLIST_FOREACH_RCU(block, &ram_list.blocks, next)
1400 total += block->used_length;
1401 rcu_read_unlock();
1402 return total;
1403}
1404
1405void free_xbzrle_decoded_buf(void)
1406{
1407 g_free(xbzrle_decoded_buf);
1408 xbzrle_decoded_buf = NULL;
1409}
1410
60be6340
DL
1411static void migration_bitmap_free(struct BitmapRcu *bmap)
1412{
1413 g_free(bmap->bmap);
f3f491fc 1414 g_free(bmap->unsentmap);
60be6340
DL
1415 g_free(bmap);
1416}
1417
6ad2a215 1418static void ram_migration_cleanup(void *opaque)
56e93d26 1419{
2ff64038
LZ
1420 /* caller have hold iothread lock or is in a bh, so there is
1421 * no writing race against this migration_bitmap
1422 */
60be6340
DL
1423 struct BitmapRcu *bitmap = migration_bitmap_rcu;
1424 atomic_rcu_set(&migration_bitmap_rcu, NULL);
2ff64038 1425 if (bitmap) {
56e93d26 1426 memory_global_dirty_log_stop();
60be6340 1427 call_rcu(bitmap, migration_bitmap_free, rcu);
56e93d26
JQ
1428 }
1429
1430 XBZRLE_cache_lock();
1431 if (XBZRLE.cache) {
1432 cache_fini(XBZRLE.cache);
1433 g_free(XBZRLE.encoded_buf);
1434 g_free(XBZRLE.current_buf);
1435 XBZRLE.cache = NULL;
1436 XBZRLE.encoded_buf = NULL;
1437 XBZRLE.current_buf = NULL;
1438 }
1439 XBZRLE_cache_unlock();
1440}
1441
56e93d26
JQ
1442static void reset_ram_globals(void)
1443{
1444 last_seen_block = NULL;
1445 last_sent_block = NULL;
1446 last_offset = 0;
1447 last_version = ram_list.version;
1448 ram_bulk_stage = true;
1449}
1450
1451#define MAX_WAIT 50 /* ms, half buffered_file limit */
1452
dd631697
LZ
1453void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
1454{
1455 /* called in qemu main thread, so there is
1456 * no writing race against this migration_bitmap
1457 */
60be6340
DL
1458 if (migration_bitmap_rcu) {
1459 struct BitmapRcu *old_bitmap = migration_bitmap_rcu, *bitmap;
1460 bitmap = g_new(struct BitmapRcu, 1);
1461 bitmap->bmap = bitmap_new(new);
dd631697
LZ
1462
1463 /* prevent migration_bitmap content from being set bit
1464 * by migration_bitmap_sync_range() at the same time.
1465 * it is safe to migration if migration_bitmap is cleared bit
1466 * at the same time.
1467 */
1468 qemu_mutex_lock(&migration_bitmap_mutex);
60be6340
DL
1469 bitmap_copy(bitmap->bmap, old_bitmap->bmap, old);
1470 bitmap_set(bitmap->bmap, old, new - old);
f3f491fc
DDAG
1471
1472 /* We don't have a way to safely extend the sentmap
1473 * with RCU; so mark it as missing, entry to postcopy
1474 * will fail.
1475 */
1476 bitmap->unsentmap = NULL;
1477
60be6340 1478 atomic_rcu_set(&migration_bitmap_rcu, bitmap);
dd631697
LZ
1479 qemu_mutex_unlock(&migration_bitmap_mutex);
1480 migration_dirty_pages += new - old;
60be6340 1481 call_rcu(old_bitmap, migration_bitmap_free, rcu);
dd631697
LZ
1482 }
1483}
56e93d26 1484
4f2e4252
DDAG
1485/*
1486 * 'expected' is the value you expect the bitmap mostly to be full
1487 * of; it won't bother printing lines that are all this value.
1488 * If 'todump' is null the migration bitmap is dumped.
1489 */
1490void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
1491{
1492 int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
1493
1494 int64_t cur;
1495 int64_t linelen = 128;
1496 char linebuf[129];
1497
1498 if (!todump) {
1499 todump = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1500 }
1501
1502 for (cur = 0; cur < ram_pages; cur += linelen) {
1503 int64_t curb;
1504 bool found = false;
1505 /*
1506 * Last line; catch the case where the line length
1507 * is longer than remaining ram
1508 */
1509 if (cur + linelen > ram_pages) {
1510 linelen = ram_pages - cur;
1511 }
1512 for (curb = 0; curb < linelen; curb++) {
1513 bool thisbit = test_bit(cur + curb, todump);
1514 linebuf[curb] = thisbit ? '1' : '.';
1515 found = found || (thisbit != expected);
1516 }
1517 if (found) {
1518 linebuf[curb] = '\0';
1519 fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf);
1520 }
1521 }
1522}
1523
e0b266f0
DDAG
1524/* **** functions for postcopy ***** */
1525
1526/*
1527 * Callback from postcopy_each_ram_send_discard for each RAMBlock
1528 * Note: At this point the 'unsentmap' is the processed bitmap combined
1529 * with the dirtymap; so a '1' means it's either dirty or unsent.
1530 * start,length: Indexes into the bitmap for the first bit
1531 * representing the named block and length in target-pages
1532 */
1533static int postcopy_send_discard_bm_ram(MigrationState *ms,
1534 PostcopyDiscardState *pds,
1535 unsigned long start,
1536 unsigned long length)
1537{
1538 unsigned long end = start + length; /* one after the end */
1539 unsigned long current;
1540 unsigned long *unsentmap;
1541
1542 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1543 for (current = start; current < end; ) {
1544 unsigned long one = find_next_bit(unsentmap, end, current);
1545
1546 if (one <= end) {
1547 unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1);
1548 unsigned long discard_length;
1549
1550 if (zero >= end) {
1551 discard_length = end - one;
1552 } else {
1553 discard_length = zero - one;
1554 }
1555 postcopy_discard_send_range(ms, pds, one, discard_length);
1556 current = one + discard_length;
1557 } else {
1558 current = one;
1559 }
1560 }
1561
1562 return 0;
1563}
1564
1565/*
1566 * Utility for the outgoing postcopy code.
1567 * Calls postcopy_send_discard_bm_ram for each RAMBlock
1568 * passing it bitmap indexes and name.
1569 * Returns: 0 on success
1570 * (qemu_ram_foreach_block ends up passing unscaled lengths
1571 * which would mean postcopy code would have to deal with target page)
1572 */
1573static int postcopy_each_ram_send_discard(MigrationState *ms)
1574{
1575 struct RAMBlock *block;
1576 int ret;
1577
1578 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1579 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1580 PostcopyDiscardState *pds = postcopy_discard_send_init(ms,
1581 first,
1582 block->idstr);
1583
1584 /*
1585 * Postcopy sends chunks of bitmap over the wire, but it
1586 * just needs indexes at this point, avoids it having
1587 * target page specific code.
1588 */
1589 ret = postcopy_send_discard_bm_ram(ms, pds, first,
1590 block->used_length >> TARGET_PAGE_BITS);
1591 postcopy_discard_send_finish(ms, pds);
1592 if (ret) {
1593 return ret;
1594 }
1595 }
1596
1597 return 0;
1598}
1599
99e314eb
DDAG
1600/*
1601 * Helper for postcopy_chunk_hostpages; it's called twice to cleanup
1602 * the two bitmaps, that are similar, but one is inverted.
1603 *
1604 * We search for runs of target-pages that don't start or end on a
1605 * host page boundary;
1606 * unsent_pass=true: Cleans up partially unsent host pages by searching
1607 * the unsentmap
1608 * unsent_pass=false: Cleans up partially dirty host pages by searching
1609 * the main migration bitmap
1610 *
1611 */
1612static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
1613 RAMBlock *block,
1614 PostcopyDiscardState *pds)
1615{
1616 unsigned long *bitmap;
1617 unsigned long *unsentmap;
1618 unsigned int host_ratio = qemu_host_page_size / TARGET_PAGE_SIZE;
1619 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1620 unsigned long len = block->used_length >> TARGET_PAGE_BITS;
1621 unsigned long last = first + (len - 1);
1622 unsigned long run_start;
1623
1624 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1625 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1626
1627 if (unsent_pass) {
1628 /* Find a sent page */
1629 run_start = find_next_zero_bit(unsentmap, last + 1, first);
1630 } else {
1631 /* Find a dirty page */
1632 run_start = find_next_bit(bitmap, last + 1, first);
1633 }
1634
1635 while (run_start <= last) {
1636 bool do_fixup = false;
1637 unsigned long fixup_start_addr;
1638 unsigned long host_offset;
1639
1640 /*
1641 * If the start of this run of pages is in the middle of a host
1642 * page, then we need to fixup this host page.
1643 */
1644 host_offset = run_start % host_ratio;
1645 if (host_offset) {
1646 do_fixup = true;
1647 run_start -= host_offset;
1648 fixup_start_addr = run_start;
1649 /* For the next pass */
1650 run_start = run_start + host_ratio;
1651 } else {
1652 /* Find the end of this run */
1653 unsigned long run_end;
1654 if (unsent_pass) {
1655 run_end = find_next_bit(unsentmap, last + 1, run_start + 1);
1656 } else {
1657 run_end = find_next_zero_bit(bitmap, last + 1, run_start + 1);
1658 }
1659 /*
1660 * If the end isn't at the start of a host page, then the
1661 * run doesn't finish at the end of a host page
1662 * and we need to discard.
1663 */
1664 host_offset = run_end % host_ratio;
1665 if (host_offset) {
1666 do_fixup = true;
1667 fixup_start_addr = run_end - host_offset;
1668 /*
1669 * This host page has gone, the next loop iteration starts
1670 * from after the fixup
1671 */
1672 run_start = fixup_start_addr + host_ratio;
1673 } else {
1674 /*
1675 * No discards on this iteration, next loop starts from
1676 * next sent/dirty page
1677 */
1678 run_start = run_end + 1;
1679 }
1680 }
1681
1682 if (do_fixup) {
1683 unsigned long page;
1684
1685 /* Tell the destination to discard this page */
1686 if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) {
1687 /* For the unsent_pass we:
1688 * discard partially sent pages
1689 * For the !unsent_pass (dirty) we:
1690 * discard partially dirty pages that were sent
1691 * (any partially sent pages were already discarded
1692 * by the previous unsent_pass)
1693 */
1694 postcopy_discard_send_range(ms, pds, fixup_start_addr,
1695 host_ratio);
1696 }
1697
1698 /* Clean up the bitmap */
1699 for (page = fixup_start_addr;
1700 page < fixup_start_addr + host_ratio; page++) {
1701 /* All pages in this host page are now not sent */
1702 set_bit(page, unsentmap);
1703
1704 /*
1705 * Remark them as dirty, updating the count for any pages
1706 * that weren't previously dirty.
1707 */
1708 migration_dirty_pages += !test_and_set_bit(page, bitmap);
1709 }
1710 }
1711
1712 if (unsent_pass) {
1713 /* Find the next sent page for the next iteration */
1714 run_start = find_next_zero_bit(unsentmap, last + 1,
1715 run_start);
1716 } else {
1717 /* Find the next dirty page for the next iteration */
1718 run_start = find_next_bit(bitmap, last + 1, run_start);
1719 }
1720 }
1721}
1722
1723/*
1724 * Utility for the outgoing postcopy code.
1725 *
1726 * Discard any partially sent host-page size chunks, mark any partially
1727 * dirty host-page size chunks as all dirty.
1728 *
1729 * Returns: 0 on success
1730 */
1731static int postcopy_chunk_hostpages(MigrationState *ms)
1732{
1733 struct RAMBlock *block;
1734
1735 if (qemu_host_page_size == TARGET_PAGE_SIZE) {
1736 /* Easy case - TPS==HPS - nothing to be done */
1737 return 0;
1738 }
1739
1740 /* Easiest way to make sure we don't resume in the middle of a host-page */
1741 last_seen_block = NULL;
1742 last_sent_block = NULL;
1743 last_offset = 0;
1744
1745 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1746 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1747
1748 PostcopyDiscardState *pds =
1749 postcopy_discard_send_init(ms, first, block->idstr);
1750
1751 /* First pass: Discard all partially sent host pages */
1752 postcopy_chunk_hostpages_pass(ms, true, block, pds);
1753 /*
1754 * Second pass: Ensure that all partially dirty host pages are made
1755 * fully dirty.
1756 */
1757 postcopy_chunk_hostpages_pass(ms, false, block, pds);
1758
1759 postcopy_discard_send_finish(ms, pds);
1760 } /* ram_list loop */
1761
1762 return 0;
1763}
1764
e0b266f0
DDAG
1765/*
1766 * Transmit the set of pages to be discarded after precopy to the target
1767 * these are pages that:
1768 * a) Have been previously transmitted but are now dirty again
1769 * b) Pages that have never been transmitted, this ensures that
1770 * any pages on the destination that have been mapped by background
1771 * tasks get discarded (transparent huge pages is the specific concern)
1772 * Hopefully this is pretty sparse
1773 */
1774int ram_postcopy_send_discard_bitmap(MigrationState *ms)
1775{
1776 int ret;
1777 unsigned long *bitmap, *unsentmap;
1778
1779 rcu_read_lock();
1780
1781 /* This should be our last sync, the src is now paused */
1782 migration_bitmap_sync();
1783
1784 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1785 if (!unsentmap) {
1786 /* We don't have a safe way to resize the sentmap, so
1787 * if the bitmap was resized it will be NULL at this
1788 * point.
1789 */
1790 error_report("migration ram resized during precopy phase");
1791 rcu_read_unlock();
1792 return -EINVAL;
1793 }
1794
99e314eb
DDAG
1795 /* Deal with TPS != HPS */
1796 ret = postcopy_chunk_hostpages(ms);
1797 if (ret) {
1798 rcu_read_unlock();
1799 return ret;
1800 }
1801
e0b266f0
DDAG
1802 /*
1803 * Update the unsentmap to be unsentmap = unsentmap | dirty
1804 */
1805 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1806 bitmap_or(unsentmap, unsentmap, bitmap,
1807 last_ram_offset() >> TARGET_PAGE_BITS);
1808
1809
1810 trace_ram_postcopy_send_discard_bitmap();
1811#ifdef DEBUG_POSTCOPY
1812 ram_debug_dump_bitmap(unsentmap, true);
1813#endif
1814
1815 ret = postcopy_each_ram_send_discard(ms);
1816 rcu_read_unlock();
1817
1818 return ret;
1819}
1820
1821/*
1822 * At the start of the postcopy phase of migration, any now-dirty
1823 * precopied pages are discarded.
1824 *
1825 * start, length describe a byte address range within the RAMBlock
1826 *
1827 * Returns 0 on success.
1828 */
1829int ram_discard_range(MigrationIncomingState *mis,
1830 const char *block_name,
1831 uint64_t start, size_t length)
1832{
1833 int ret = -1;
1834
1835 rcu_read_lock();
1836 RAMBlock *rb = qemu_ram_block_by_name(block_name);
1837
1838 if (!rb) {
1839 error_report("ram_discard_range: Failed to find block '%s'",
1840 block_name);
1841 goto err;
1842 }
1843
1844 uint8_t *host_startaddr = rb->host + start;
1845
1846 if ((uintptr_t)host_startaddr & (qemu_host_page_size - 1)) {
1847 error_report("ram_discard_range: Unaligned start address: %p",
1848 host_startaddr);
1849 goto err;
1850 }
1851
1852 if ((start + length) <= rb->used_length) {
1853 uint8_t *host_endaddr = host_startaddr + length;
1854 if ((uintptr_t)host_endaddr & (qemu_host_page_size - 1)) {
1855 error_report("ram_discard_range: Unaligned end address: %p",
1856 host_endaddr);
1857 goto err;
1858 }
1859 ret = postcopy_ram_discard_range(mis, host_startaddr, length);
1860 } else {
1861 error_report("ram_discard_range: Overrun block '%s' (%" PRIu64
9458ad6b 1862 "/%zx/" RAM_ADDR_FMT")",
e0b266f0
DDAG
1863 block_name, start, length, rb->used_length);
1864 }
1865
1866err:
1867 rcu_read_unlock();
1868
1869 return ret;
1870}
1871
1872
56e93d26
JQ
1873/* Each of ram_save_setup, ram_save_iterate and ram_save_complete has
1874 * long-running RCU critical section. When rcu-reclaims in the code
1875 * start to become numerous it will be necessary to reduce the
1876 * granularity of these critical sections.
1877 */
1878
1879static int ram_save_setup(QEMUFile *f, void *opaque)
1880{
1881 RAMBlock *block;
1882 int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
1883
56e93d26
JQ
1884 dirty_rate_high_cnt = 0;
1885 bitmap_sync_count = 0;
1886 migration_bitmap_sync_init();
dd631697 1887 qemu_mutex_init(&migration_bitmap_mutex);
56e93d26
JQ
1888
1889 if (migrate_use_xbzrle()) {
1890 XBZRLE_cache_lock();
1891 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
1892 TARGET_PAGE_SIZE,
1893 TARGET_PAGE_SIZE);
1894 if (!XBZRLE.cache) {
1895 XBZRLE_cache_unlock();
1896 error_report("Error creating cache");
1897 return -1;
1898 }
1899 XBZRLE_cache_unlock();
1900
1901 /* We prefer not to abort if there is no memory */
1902 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
1903 if (!XBZRLE.encoded_buf) {
1904 error_report("Error allocating encoded_buf");
1905 return -1;
1906 }
1907
1908 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
1909 if (!XBZRLE.current_buf) {
1910 error_report("Error allocating current_buf");
1911 g_free(XBZRLE.encoded_buf);
1912 XBZRLE.encoded_buf = NULL;
1913 return -1;
1914 }
1915
1916 acct_clear();
1917 }
1918
1919 /* iothread lock needed for ram_list.dirty_memory[] */
1920 qemu_mutex_lock_iothread();
1921 qemu_mutex_lock_ramlist();
1922 rcu_read_lock();
1923 bytes_transferred = 0;
1924 reset_ram_globals();
1925
1926 ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
f3f491fc 1927 migration_bitmap_rcu = g_new0(struct BitmapRcu, 1);
60be6340
DL
1928 migration_bitmap_rcu->bmap = bitmap_new(ram_bitmap_pages);
1929 bitmap_set(migration_bitmap_rcu->bmap, 0, ram_bitmap_pages);
56e93d26 1930
f3f491fc
DDAG
1931 if (migrate_postcopy_ram()) {
1932 migration_bitmap_rcu->unsentmap = bitmap_new(ram_bitmap_pages);
1933 bitmap_set(migration_bitmap_rcu->unsentmap, 0, ram_bitmap_pages);
1934 }
1935
56e93d26
JQ
1936 /*
1937 * Count the total number of pages used by ram blocks not including any
1938 * gaps due to alignment or unplugs.
1939 */
1940 migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
1941
1942 memory_global_dirty_log_start();
1943 migration_bitmap_sync();
1944 qemu_mutex_unlock_ramlist();
1945 qemu_mutex_unlock_iothread();
1946
1947 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
1948
1949 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1950 qemu_put_byte(f, strlen(block->idstr));
1951 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
1952 qemu_put_be64(f, block->used_length);
1953 }
1954
1955 rcu_read_unlock();
1956
1957 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
1958 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
1959
1960 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
1961
1962 return 0;
1963}
1964
1965static int ram_save_iterate(QEMUFile *f, void *opaque)
1966{
1967 int ret;
1968 int i;
1969 int64_t t0;
1970 int pages_sent = 0;
1971
1972 rcu_read_lock();
1973 if (ram_list.version != last_version) {
1974 reset_ram_globals();
1975 }
1976
1977 /* Read version before ram_list.blocks */
1978 smp_rmb();
1979
1980 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
1981
1982 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1983 i = 0;
1984 while ((ret = qemu_file_rate_limit(f)) == 0) {
1985 int pages;
1986
1987 pages = ram_find_and_save_block(f, false, &bytes_transferred);
1988 /* no more pages to sent */
1989 if (pages == 0) {
1990 break;
1991 }
1992 pages_sent += pages;
1993 acct_info.iterations++;
070afca2 1994
56e93d26
JQ
1995 /* we want to check in the 1st loop, just in case it was the 1st time
1996 and we had to sync the dirty bitmap.
1997 qemu_get_clock_ns() is a bit expensive, so we only check each some
1998 iterations
1999 */
2000 if ((i & 63) == 0) {
2001 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
2002 if (t1 > MAX_WAIT) {
2003 DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
2004 t1, i);
2005 break;
2006 }
2007 }
2008 i++;
2009 }
2010 flush_compressed_data(f);
2011 rcu_read_unlock();
2012
2013 /*
2014 * Must occur before EOS (or any QEMUFile operation)
2015 * because of RDMA protocol.
2016 */
2017 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
2018
2019 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2020 bytes_transferred += 8;
2021
2022 ret = qemu_file_get_error(f);
2023 if (ret < 0) {
2024 return ret;
2025 }
2026
2027 return pages_sent;
2028}
2029
2030/* Called with iothread lock */
2031static int ram_save_complete(QEMUFile *f, void *opaque)
2032{
2033 rcu_read_lock();
2034
663e6c1d
DDAG
2035 if (!migration_in_postcopy(migrate_get_current())) {
2036 migration_bitmap_sync();
2037 }
56e93d26
JQ
2038
2039 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
2040
2041 /* try transferring iterative blocks of memory */
2042
2043 /* flush all remaining blocks regardless of rate limiting */
2044 while (true) {
2045 int pages;
2046
2047 pages = ram_find_and_save_block(f, true, &bytes_transferred);
2048 /* no more blocks to sent */
2049 if (pages == 0) {
2050 break;
2051 }
2052 }
2053
2054 flush_compressed_data(f);
2055 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
56e93d26
JQ
2056
2057 rcu_read_unlock();
d09a6fde 2058
56e93d26
JQ
2059 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2060
2061 return 0;
2062}
2063
c31b098f
DDAG
2064static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
2065 uint64_t *non_postcopiable_pending,
2066 uint64_t *postcopiable_pending)
56e93d26
JQ
2067{
2068 uint64_t remaining_size;
2069
2070 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
2071
663e6c1d
DDAG
2072 if (!migration_in_postcopy(migrate_get_current()) &&
2073 remaining_size < max_size) {
56e93d26
JQ
2074 qemu_mutex_lock_iothread();
2075 rcu_read_lock();
2076 migration_bitmap_sync();
2077 rcu_read_unlock();
2078 qemu_mutex_unlock_iothread();
2079 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
2080 }
c31b098f
DDAG
2081
2082 /* We can do postcopy, and all the data is postcopiable */
2083 *postcopiable_pending += remaining_size;
56e93d26
JQ
2084}
2085
2086static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
2087{
2088 unsigned int xh_len;
2089 int xh_flags;
063e760a 2090 uint8_t *loaded_data;
56e93d26
JQ
2091
2092 if (!xbzrle_decoded_buf) {
2093 xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
2094 }
063e760a 2095 loaded_data = xbzrle_decoded_buf;
56e93d26
JQ
2096
2097 /* extract RLE header */
2098 xh_flags = qemu_get_byte(f);
2099 xh_len = qemu_get_be16(f);
2100
2101 if (xh_flags != ENCODING_FLAG_XBZRLE) {
2102 error_report("Failed to load XBZRLE page - wrong compression!");
2103 return -1;
2104 }
2105
2106 if (xh_len > TARGET_PAGE_SIZE) {
2107 error_report("Failed to load XBZRLE page - len overflow!");
2108 return -1;
2109 }
2110 /* load data and decode */
063e760a 2111 qemu_get_buffer_in_place(f, &loaded_data, xh_len);
56e93d26
JQ
2112
2113 /* decode RLE */
063e760a 2114 if (xbzrle_decode_buffer(loaded_data, xh_len, host,
56e93d26
JQ
2115 TARGET_PAGE_SIZE) == -1) {
2116 error_report("Failed to load XBZRLE page - decode error!");
2117 return -1;
2118 }
2119
2120 return 0;
2121}
2122
2123/* Must be called from within a rcu critical section.
2124 * Returns a pointer from within the RCU-protected ram_list.
2125 */
a7180877 2126/*
4c4bad48 2127 * Read a RAMBlock ID from the stream f.
a7180877
DDAG
2128 *
2129 * f: Stream to read from
a7180877
DDAG
2130 * flags: Page flags (mostly to see if it's a continuation of previous block)
2131 */
4c4bad48
HZ
2132static inline RAMBlock *ram_block_from_stream(QEMUFile *f,
2133 int flags)
56e93d26
JQ
2134{
2135 static RAMBlock *block = NULL;
2136 char id[256];
2137 uint8_t len;
2138
2139 if (flags & RAM_SAVE_FLAG_CONTINUE) {
4c4bad48 2140 if (!block) {
56e93d26
JQ
2141 error_report("Ack, bad migration stream!");
2142 return NULL;
2143 }
4c4bad48 2144 return block;
56e93d26
JQ
2145 }
2146
2147 len = qemu_get_byte(f);
2148 qemu_get_buffer(f, (uint8_t *)id, len);
2149 id[len] = 0;
2150
e3dd7493 2151 block = qemu_ram_block_by_name(id);
4c4bad48
HZ
2152 if (!block) {
2153 error_report("Can't find block %s", id);
2154 return NULL;
56e93d26
JQ
2155 }
2156
4c4bad48
HZ
2157 return block;
2158}
2159
2160static inline void *host_from_ram_block_offset(RAMBlock *block,
2161 ram_addr_t offset)
2162{
2163 if (!offset_in_ramblock(block, offset)) {
2164 return NULL;
2165 }
2166
2167 return block->host + offset;
56e93d26
JQ
2168}
2169
2170/*
2171 * If a page (or a whole RDMA chunk) has been
2172 * determined to be zero, then zap it.
2173 */
2174void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
2175{
2176 if (ch != 0 || !is_zero_range(host, size)) {
2177 memset(host, ch, size);
2178 }
2179}
2180
2181static void *do_data_decompress(void *opaque)
2182{
2183 DecompressParam *param = opaque;
2184 unsigned long pagesize;
2185
2186 while (!quit_decomp_thread) {
2187 qemu_mutex_lock(&param->mutex);
2188 while (!param->start && !quit_decomp_thread) {
2189 qemu_cond_wait(&param->cond, &param->mutex);
2190 pagesize = TARGET_PAGE_SIZE;
2191 if (!quit_decomp_thread) {
2192 /* uncompress() will return failed in some case, especially
2193 * when the page is dirted when doing the compression, it's
2194 * not a problem because the dirty page will be retransferred
2195 * and uncompress() won't break the data in other pages.
2196 */
2197 uncompress((Bytef *)param->des, &pagesize,
2198 (const Bytef *)param->compbuf, param->len);
2199 }
2200 param->start = false;
2201 }
2202 qemu_mutex_unlock(&param->mutex);
2203 }
2204
2205 return NULL;
2206}
2207
2208void migrate_decompress_threads_create(void)
2209{
2210 int i, thread_count;
2211
2212 thread_count = migrate_decompress_threads();
2213 decompress_threads = g_new0(QemuThread, thread_count);
2214 decomp_param = g_new0(DecompressParam, thread_count);
56e93d26
JQ
2215 quit_decomp_thread = false;
2216 for (i = 0; i < thread_count; i++) {
2217 qemu_mutex_init(&decomp_param[i].mutex);
2218 qemu_cond_init(&decomp_param[i].cond);
2219 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
2220 qemu_thread_create(decompress_threads + i, "decompress",
2221 do_data_decompress, decomp_param + i,
2222 QEMU_THREAD_JOINABLE);
2223 }
2224}
2225
2226void migrate_decompress_threads_join(void)
2227{
2228 int i, thread_count;
2229
2230 quit_decomp_thread = true;
2231 thread_count = migrate_decompress_threads();
2232 for (i = 0; i < thread_count; i++) {
2233 qemu_mutex_lock(&decomp_param[i].mutex);
2234 qemu_cond_signal(&decomp_param[i].cond);
2235 qemu_mutex_unlock(&decomp_param[i].mutex);
2236 }
2237 for (i = 0; i < thread_count; i++) {
2238 qemu_thread_join(decompress_threads + i);
2239 qemu_mutex_destroy(&decomp_param[i].mutex);
2240 qemu_cond_destroy(&decomp_param[i].cond);
2241 g_free(decomp_param[i].compbuf);
2242 }
2243 g_free(decompress_threads);
2244 g_free(decomp_param);
56e93d26
JQ
2245 decompress_threads = NULL;
2246 decomp_param = NULL;
56e93d26
JQ
2247}
2248
c1bc6626 2249static void decompress_data_with_multi_threads(QEMUFile *f,
56e93d26
JQ
2250 void *host, int len)
2251{
2252 int idx, thread_count;
2253
2254 thread_count = migrate_decompress_threads();
2255 while (true) {
2256 for (idx = 0; idx < thread_count; idx++) {
2257 if (!decomp_param[idx].start) {
c1bc6626 2258 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
56e93d26
JQ
2259 decomp_param[idx].des = host;
2260 decomp_param[idx].len = len;
2261 start_decompression(&decomp_param[idx]);
2262 break;
2263 }
2264 }
2265 if (idx < thread_count) {
2266 break;
2267 }
2268 }
2269}
2270
1caddf8a
DDAG
2271/*
2272 * Allocate data structures etc needed by incoming migration with postcopy-ram
2273 * postcopy-ram's similarly names postcopy_ram_incoming_init does the work
2274 */
2275int ram_postcopy_incoming_init(MigrationIncomingState *mis)
2276{
2277 size_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
2278
2279 return postcopy_ram_incoming_init(mis, ram_pages);
2280}
2281
a7180877
DDAG
2282/*
2283 * Called in postcopy mode by ram_load().
2284 * rcu_read_lock is taken prior to this being called.
2285 */
2286static int ram_load_postcopy(QEMUFile *f)
2287{
2288 int flags = 0, ret = 0;
2289 bool place_needed = false;
2290 bool matching_page_sizes = qemu_host_page_size == TARGET_PAGE_SIZE;
2291 MigrationIncomingState *mis = migration_incoming_get_current();
2292 /* Temporary page that is later 'placed' */
2293 void *postcopy_host_page = postcopy_get_tmp_page(mis);
c53b7ddc 2294 void *last_host = NULL;
a3b6ff6d 2295 bool all_zero = false;
a7180877
DDAG
2296
2297 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
2298 ram_addr_t addr;
2299 void *host = NULL;
2300 void *page_buffer = NULL;
2301 void *place_source = NULL;
2302 uint8_t ch;
a7180877
DDAG
2303
2304 addr = qemu_get_be64(f);
2305 flags = addr & ~TARGET_PAGE_MASK;
2306 addr &= TARGET_PAGE_MASK;
2307
2308 trace_ram_load_postcopy_loop((uint64_t)addr, flags);
2309 place_needed = false;
2310 if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE)) {
4c4bad48
HZ
2311 RAMBlock *block = ram_block_from_stream(f, flags);
2312
2313 host = host_from_ram_block_offset(block, addr);
a7180877
DDAG
2314 if (!host) {
2315 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2316 ret = -EINVAL;
2317 break;
2318 }
2319 page_buffer = host;
2320 /*
2321 * Postcopy requires that we place whole host pages atomically.
2322 * To make it atomic, the data is read into a temporary page
2323 * that's moved into place later.
2324 * The migration protocol uses, possibly smaller, target-pages
2325 * however the source ensures it always sends all the components
2326 * of a host page in order.
2327 */
2328 page_buffer = postcopy_host_page +
2329 ((uintptr_t)host & ~qemu_host_page_mask);
2330 /* If all TP are zero then we can optimise the place */
2331 if (!((uintptr_t)host & ~qemu_host_page_mask)) {
2332 all_zero = true;
c53b7ddc
DDAG
2333 } else {
2334 /* not the 1st TP within the HP */
2335 if (host != (last_host + TARGET_PAGE_SIZE)) {
9af9e0fe 2336 error_report("Non-sequential target page %p/%p",
c53b7ddc
DDAG
2337 host, last_host);
2338 ret = -EINVAL;
2339 break;
2340 }
a7180877
DDAG
2341 }
2342
c53b7ddc 2343
a7180877
DDAG
2344 /*
2345 * If it's the last part of a host page then we place the host
2346 * page
2347 */
2348 place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
2349 ~qemu_host_page_mask) == 0;
2350 place_source = postcopy_host_page;
2351 }
c53b7ddc 2352 last_host = host;
a7180877
DDAG
2353
2354 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2355 case RAM_SAVE_FLAG_COMPRESS:
2356 ch = qemu_get_byte(f);
2357 memset(page_buffer, ch, TARGET_PAGE_SIZE);
2358 if (ch) {
2359 all_zero = false;
2360 }
2361 break;
2362
2363 case RAM_SAVE_FLAG_PAGE:
2364 all_zero = false;
2365 if (!place_needed || !matching_page_sizes) {
2366 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
2367 } else {
2368 /* Avoids the qemu_file copy during postcopy, which is
2369 * going to do a copy later; can only do it when we
2370 * do this read in one go (matching page sizes)
2371 */
2372 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
2373 TARGET_PAGE_SIZE);
2374 }
2375 break;
2376 case RAM_SAVE_FLAG_EOS:
2377 /* normal exit */
2378 break;
2379 default:
2380 error_report("Unknown combination of migration flags: %#x"
2381 " (postcopy mode)", flags);
2382 ret = -EINVAL;
2383 }
2384
2385 if (place_needed) {
2386 /* This gets called at the last target page in the host page */
2387 if (all_zero) {
2388 ret = postcopy_place_page_zero(mis,
2389 host + TARGET_PAGE_SIZE -
2390 qemu_host_page_size);
2391 } else {
2392 ret = postcopy_place_page(mis, host + TARGET_PAGE_SIZE -
2393 qemu_host_page_size,
2394 place_source);
2395 }
2396 }
2397 if (!ret) {
2398 ret = qemu_file_get_error(f);
2399 }
2400 }
2401
2402 return ret;
2403}
2404
56e93d26
JQ
2405static int ram_load(QEMUFile *f, void *opaque, int version_id)
2406{
2407 int flags = 0, ret = 0;
2408 static uint64_t seq_iter;
2409 int len = 0;
a7180877
DDAG
2410 /*
2411 * If system is running in postcopy mode, page inserts to host memory must
2412 * be atomic
2413 */
2414 bool postcopy_running = postcopy_state_get() >= POSTCOPY_INCOMING_LISTENING;
56e93d26
JQ
2415
2416 seq_iter++;
2417
2418 if (version_id != 4) {
2419 ret = -EINVAL;
2420 }
2421
2422 /* This RCU critical section can be very long running.
2423 * When RCU reclaims in the code start to become numerous,
2424 * it will be necessary to reduce the granularity of this
2425 * critical section.
2426 */
2427 rcu_read_lock();
a7180877
DDAG
2428
2429 if (postcopy_running) {
2430 ret = ram_load_postcopy(f);
2431 }
2432
2433 while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
56e93d26 2434 ram_addr_t addr, total_ram_bytes;
a776aa15 2435 void *host = NULL;
56e93d26
JQ
2436 uint8_t ch;
2437
2438 addr = qemu_get_be64(f);
2439 flags = addr & ~TARGET_PAGE_MASK;
2440 addr &= TARGET_PAGE_MASK;
2441
a776aa15
DDAG
2442 if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE |
2443 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
4c4bad48
HZ
2444 RAMBlock *block = ram_block_from_stream(f, flags);
2445
2446 host = host_from_ram_block_offset(block, addr);
a776aa15
DDAG
2447 if (!host) {
2448 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2449 ret = -EINVAL;
2450 break;
2451 }
2452 }
2453
56e93d26
JQ
2454 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2455 case RAM_SAVE_FLAG_MEM_SIZE:
2456 /* Synchronize RAM block list */
2457 total_ram_bytes = addr;
2458 while (!ret && total_ram_bytes) {
2459 RAMBlock *block;
56e93d26
JQ
2460 char id[256];
2461 ram_addr_t length;
2462
2463 len = qemu_get_byte(f);
2464 qemu_get_buffer(f, (uint8_t *)id, len);
2465 id[len] = 0;
2466 length = qemu_get_be64(f);
2467
e3dd7493
DDAG
2468 block = qemu_ram_block_by_name(id);
2469 if (block) {
2470 if (length != block->used_length) {
2471 Error *local_err = NULL;
56e93d26 2472
e3dd7493
DDAG
2473 ret = qemu_ram_resize(block->offset, length,
2474 &local_err);
2475 if (local_err) {
2476 error_report_err(local_err);
56e93d26 2477 }
56e93d26 2478 }
e3dd7493
DDAG
2479 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
2480 block->idstr);
2481 } else {
56e93d26
JQ
2482 error_report("Unknown ramblock \"%s\", cannot "
2483 "accept migration", id);
2484 ret = -EINVAL;
2485 }
2486
2487 total_ram_bytes -= length;
2488 }
2489 break;
a776aa15 2490
56e93d26 2491 case RAM_SAVE_FLAG_COMPRESS:
56e93d26
JQ
2492 ch = qemu_get_byte(f);
2493 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
2494 break;
a776aa15 2495
56e93d26 2496 case RAM_SAVE_FLAG_PAGE:
56e93d26
JQ
2497 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
2498 break;
56e93d26 2499
a776aa15 2500 case RAM_SAVE_FLAG_COMPRESS_PAGE:
56e93d26
JQ
2501 len = qemu_get_be32(f);
2502 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
2503 error_report("Invalid compressed data length: %d", len);
2504 ret = -EINVAL;
2505 break;
2506 }
c1bc6626 2507 decompress_data_with_multi_threads(f, host, len);
56e93d26 2508 break;
a776aa15 2509
56e93d26 2510 case RAM_SAVE_FLAG_XBZRLE:
56e93d26
JQ
2511 if (load_xbzrle(f, addr, host) < 0) {
2512 error_report("Failed to decompress XBZRLE page at "
2513 RAM_ADDR_FMT, addr);
2514 ret = -EINVAL;
2515 break;
2516 }
2517 break;
2518 case RAM_SAVE_FLAG_EOS:
2519 /* normal exit */
2520 break;
2521 default:
2522 if (flags & RAM_SAVE_FLAG_HOOK) {
632e3a5c 2523 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
56e93d26
JQ
2524 } else {
2525 error_report("Unknown combination of migration flags: %#x",
2526 flags);
2527 ret = -EINVAL;
2528 }
2529 }
2530 if (!ret) {
2531 ret = qemu_file_get_error(f);
2532 }
2533 }
2534
2535 rcu_read_unlock();
2536 DPRINTF("Completed load of VM with exit code %d seq iteration "
2537 "%" PRIu64 "\n", ret, seq_iter);
2538 return ret;
2539}
2540
2541static SaveVMHandlers savevm_ram_handlers = {
2542 .save_live_setup = ram_save_setup,
2543 .save_live_iterate = ram_save_iterate,
763c906b 2544 .save_live_complete_postcopy = ram_save_complete,
a3e06c3d 2545 .save_live_complete_precopy = ram_save_complete,
56e93d26
JQ
2546 .save_live_pending = ram_save_pending,
2547 .load_state = ram_load,
6ad2a215 2548 .cleanup = ram_migration_cleanup,
56e93d26
JQ
2549};
2550
2551void ram_mig_init(void)
2552{
2553 qemu_mutex_init(&XBZRLE.lock);
2554 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
2555}