]> git.proxmox.com Git - mirror_qemu.git/blame - migration/ram.c
target/hppa: Fix IOR and ISR on unaligned access trap
[mirror_qemu.git] / migration / ram.c
CommitLineData
56e93d26
JQ
1/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
76cc7b58
JQ
5 * Copyright (c) 2011-2015 Red Hat Inc
6 *
7 * Authors:
8 * Juan Quintela <quintela@redhat.com>
56e93d26
JQ
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 */
e688df6b 28
1393a485 29#include "qemu/osdep.h"
f348b6d1 30#include "qemu/cutils.h"
56e93d26
JQ
31#include "qemu/bitops.h"
32#include "qemu/bitmap.h"
b85ea5fa 33#include "qemu/madvise.h"
7205c9ec 34#include "qemu/main-loop.h"
709e3fe8 35#include "xbzrle.h"
b5ca3368 36#include "ram-compress.h"
7b1e1a22 37#include "ram.h"
6666c96a 38#include "migration.h"
947701cc 39#include "migration-stats.h"
f2a8f0a6 40#include "migration/register.h"
7b1e1a22 41#include "migration/misc.h"
08a0aee1 42#include "qemu-file.h"
be07b0ac 43#include "postcopy-ram.h"
53d37d36 44#include "page_cache.h"
56e93d26 45#include "qemu/error-report.h"
e688df6b 46#include "qapi/error.h"
ab7cbb0b 47#include "qapi/qapi-types-migration.h"
9af23989 48#include "qapi/qapi-events-migration.h"
acac51ba 49#include "qapi/qapi-commands-migration.h"
8acabf69 50#include "qapi/qmp/qerror.h"
56e93d26 51#include "trace.h"
56e93d26 52#include "exec/ram_addr.h"
f9494614 53#include "exec/target_page.h"
56e93d26 54#include "qemu/rcu_queue.h"
a91246c9 55#include "migration/colo.h"
53d37d36 56#include "block.h"
b0c3cf94 57#include "sysemu/cpu-throttle.h"
edd090c7 58#include "savevm.h"
b9ee2f7d 59#include "qemu/iov.h"
d32ca5ad 60#include "multifd.h"
278e2f55 61#include "sysemu/runstate.h"
48408174 62#include "rdma.h"
1f0776f1 63#include "options.h"
acac51ba
HH
64#include "sysemu/dirtylimit.h"
65#include "sysemu/kvm.h"
278e2f55 66
e5fdf920
LS
67#include "hw/boards.h" /* for machine_dump_guest_core() */
68
278e2f55
AG
69#if defined(__linux__)
70#include "qemu/userfaultfd.h"
71#endif /* defined(__linux__) */
56e93d26 72
56e93d26
JQ
73/***********************************************************/
74/* ram save/restore */
75
7b548761
JQ
76/*
77 * RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
78 * worked for pages that were filled with the same char. We switched
bb890ed5 79 * it to only search for the zero value. And to avoid confusion with
7b548761 80 * RAM_SAVE_FLAG_COMPRESS_PAGE just rename it.
bb890ed5 81 */
7b548761
JQ
82/*
83 * RAM_SAVE_FLAG_FULL was obsoleted in 2009, it can be reused now
84 */
85#define RAM_SAVE_FLAG_FULL 0x01
bb890ed5 86#define RAM_SAVE_FLAG_ZERO 0x02
56e93d26
JQ
87#define RAM_SAVE_FLAG_MEM_SIZE 0x04
88#define RAM_SAVE_FLAG_PAGE 0x08
89#define RAM_SAVE_FLAG_EOS 0x10
90#define RAM_SAVE_FLAG_CONTINUE 0x20
91#define RAM_SAVE_FLAG_XBZRLE 0x40
10cb3336 92/* 0x80 is reserved in rdma.h for RAM_SAVE_FLAG_HOOK */
56e93d26 93#define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
294e5a40 94#define RAM_SAVE_FLAG_MULTIFD_FLUSH 0x200
7b548761 95/* We can't use any flag that is bigger than 0x200 */
56e93d26 96
9360447d
JQ
97XBZRLECacheStats xbzrle_counters;
98
f1668764
PX
99/* used by the search for pages to send */
100struct PageSearchStatus {
101 /* The migration channel used for a specific host page */
102 QEMUFile *pss_channel;
ec6f3ab9
PX
103 /* Last block from where we have sent data */
104 RAMBlock *last_sent_block;
f1668764
PX
105 /* Current block being searched */
106 RAMBlock *block;
107 /* Current page to search from */
108 unsigned long page;
109 /* Set once we wrap around */
110 bool complete_round;
f1668764
PX
111 /* Whether we're sending a host page */
112 bool host_page_sending;
113 /* The start/end of current host page. Invalid if host_page_sending==false */
114 unsigned long host_page_start;
115 unsigned long host_page_end;
116};
117typedef struct PageSearchStatus PageSearchStatus;
118
56e93d26
JQ
119/* struct contains XBZRLE cache and a static page
120 used by the compression */
121static struct {
122 /* buffer used for XBZRLE encoding */
123 uint8_t *encoded_buf;
124 /* buffer for storing page content */
125 uint8_t *current_buf;
126 /* Cache for XBZRLE, Protected by lock. */
127 PageCache *cache;
128 QemuMutex lock;
c00e0928
JQ
129 /* it will store a page full of zeros */
130 uint8_t *zero_target_page;
f265e0e4
JQ
131 /* buffer used for XBZRLE decoding */
132 uint8_t *decoded_buf;
56e93d26
JQ
133} XBZRLE;
134
56e93d26
JQ
135static void XBZRLE_cache_lock(void)
136{
87dca0c9 137 if (migrate_xbzrle()) {
56e93d26 138 qemu_mutex_lock(&XBZRLE.lock);
f4c51a6b 139 }
56e93d26
JQ
140}
141
142static void XBZRLE_cache_unlock(void)
143{
87dca0c9 144 if (migrate_xbzrle()) {
56e93d26 145 qemu_mutex_unlock(&XBZRLE.lock);
f4c51a6b 146 }
56e93d26
JQ
147}
148
3d0684b2
JQ
149/**
150 * xbzrle_cache_resize: resize the xbzrle cache
151 *
cbde7be9 152 * This function is called from migrate_params_apply in main
3d0684b2
JQ
153 * thread, possibly while a migration is in progress. A running
154 * migration may be using the cache and might finish during this call,
155 * hence changes to the cache are protected by XBZRLE.lock().
156 *
c9dede2d 157 * Returns 0 for success or -1 for error
3d0684b2
JQ
158 *
159 * @new_size: new cache size
8acabf69 160 * @errp: set *errp if the check failed, with reason
56e93d26 161 */
8b9407a0 162int xbzrle_cache_resize(uint64_t new_size, Error **errp)
56e93d26
JQ
163{
164 PageCache *new_cache;
c9dede2d 165 int64_t ret = 0;
56e93d26 166
8acabf69
JQ
167 /* Check for truncation */
168 if (new_size != (size_t)new_size) {
169 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
170 "exceeding address space");
171 return -1;
172 }
173
2a313e5c
JQ
174 if (new_size == migrate_xbzrle_cache_size()) {
175 /* nothing to do */
c9dede2d 176 return 0;
2a313e5c
JQ
177 }
178
56e93d26
JQ
179 XBZRLE_cache_lock();
180
181 if (XBZRLE.cache != NULL) {
80f8dfde 182 new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp);
56e93d26 183 if (!new_cache) {
56e93d26
JQ
184 ret = -1;
185 goto out;
186 }
187
188 cache_fini(XBZRLE.cache);
189 XBZRLE.cache = new_cache;
190 }
56e93d26
JQ
191out:
192 XBZRLE_cache_unlock();
193 return ret;
194}
195
20123ee1
PX
196static bool postcopy_preempt_active(void)
197{
198 return migrate_postcopy_preempt() && migration_in_postcopy();
199}
200
f161c88a 201bool migrate_ram_is_ignored(RAMBlock *block)
fbd162e6
YK
202{
203 return !qemu_ram_is_migratable(block) ||
b0182e53
SS
204 (migrate_ignore_shared() && qemu_ram_is_shared(block)
205 && qemu_ram_is_named_file(block));
fbd162e6
YK
206}
207
343f632c
DDAG
208#undef RAMBLOCK_FOREACH
209
fbd162e6
YK
210int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque)
211{
212 RAMBlock *block;
213 int ret = 0;
214
89ac5a1d
DDAG
215 RCU_READ_LOCK_GUARD();
216
fbd162e6
YK
217 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
218 ret = func(block, opaque);
219 if (ret) {
220 break;
221 }
222 }
fbd162e6
YK
223 return ret;
224}
225
f9494614
AP
226static void ramblock_recv_map_init(void)
227{
228 RAMBlock *rb;
229
fbd162e6 230 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
f9494614
AP
231 assert(!rb->receivedmap);
232 rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
233 }
234}
235
236int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr)
237{
238 return test_bit(ramblock_recv_bitmap_offset(host_addr, rb),
239 rb->receivedmap);
240}
241
1cba9f6e
DDAG
242bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset)
243{
244 return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap);
245}
246
f9494614
AP
247void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr)
248{
249 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap);
250}
251
252void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
253 size_t nr)
254{
255 bitmap_set_atomic(rb->receivedmap,
256 ramblock_recv_bitmap_offset(host_addr, rb),
257 nr);
258}
259
a335debb
PX
260#define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
261
262/*
263 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
264 *
265 * Returns >0 if success with sent bytes, or <0 if error.
266 */
267int64_t ramblock_recv_bitmap_send(QEMUFile *file,
268 const char *block_name)
269{
270 RAMBlock *block = qemu_ram_block_by_name(block_name);
271 unsigned long *le_bitmap, nbits;
272 uint64_t size;
273
274 if (!block) {
275 error_report("%s: invalid block name: %s", __func__, block_name);
276 return -1;
277 }
278
898ba906 279 nbits = block->postcopy_length >> TARGET_PAGE_BITS;
a335debb
PX
280
281 /*
282 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
283 * machines we may need 4 more bytes for padding (see below
284 * comment). So extend it a bit before hand.
285 */
286 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
287
288 /*
289 * Always use little endian when sending the bitmap. This is
290 * required that when source and destination VMs are not using the
3a4452d8 291 * same endianness. (Note: big endian won't work.)
a335debb
PX
292 */
293 bitmap_to_le(le_bitmap, block->receivedmap, nbits);
294
295 /* Size of the bitmap, in bytes */
a725ef9f 296 size = DIV_ROUND_UP(nbits, 8);
a335debb
PX
297
298 /*
299 * size is always aligned to 8 bytes for 64bit machines, but it
300 * may not be true for 32bit machines. We need this padding to
301 * make sure the migration can survive even between 32bit and
302 * 64bit machines.
303 */
304 size = ROUND_UP(size, 8);
305
306 qemu_put_be64(file, size);
307 qemu_put_buffer(file, (const uint8_t *)le_bitmap, size);
be07a0ed 308 g_free(le_bitmap);
a335debb
PX
309 /*
310 * Mark as an end, in case the middle part is screwed up due to
3a4452d8 311 * some "mysterious" reason.
a335debb
PX
312 */
313 qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING);
be07a0ed
JQ
314 int ret = qemu_fflush(file);
315 if (ret) {
316 return ret;
a335debb
PX
317 }
318
319 return size + sizeof(size);
320}
321
ec481c6c
JQ
322/*
323 * An outstanding page request, on the source, having been received
324 * and queued
325 */
326struct RAMSrcPageRequest {
327 RAMBlock *rb;
328 hwaddr offset;
329 hwaddr len;
330
331 QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
332};
333
6f37bb8b
JQ
334/* State of RAM for migration */
335struct RAMState {
f1668764
PX
336 /*
337 * PageSearchStatus structures for the channels when send pages.
338 * Protected by the bitmap_mutex.
339 */
340 PageSearchStatus pss[RAM_CHANNEL_MAX];
278e2f55
AG
341 /* UFFD file descriptor, used in 'write-tracking' migration */
342 int uffdio_fd;
8d80e195
JQ
343 /* total ram size in bytes */
344 uint64_t ram_bytes_total;
6f37bb8b
JQ
345 /* Last block that we have visited searching for dirty pages */
346 RAMBlock *last_seen_block;
269ace29
JQ
347 /* Last dirty target page we have sent */
348 ram_addr_t last_page;
6f37bb8b
JQ
349 /* last ram version we have seen */
350 uint32_t last_version;
8d820d6f
JQ
351 /* How many times we have dirty too many pages */
352 int dirty_rate_high_cnt;
f664da80
JQ
353 /* these variables are used for bitmap sync */
354 /* last time we did a full bitmap_sync */
355 int64_t time_last_bitmap_sync;
eac74159 356 /* bytes transferred at start_time */
c4bdf0cf 357 uint64_t bytes_xfer_prev;
a66cd90c 358 /* number of dirty pages since start_time */
68908ed6 359 uint64_t num_dirty_pages_period;
b5833fde
JQ
360 /* xbzrle misses since the beginning of the period */
361 uint64_t xbzrle_cache_miss_prev;
e460a4b1
WW
362 /* Amount of xbzrle pages since the beginning of the period */
363 uint64_t xbzrle_pages_prev;
364 /* Amount of xbzrle encoded bytes since the beginning of the period */
365 uint64_t xbzrle_bytes_prev;
f3095cc8
JQ
366 /* Are we really using XBZRLE (e.g., after the first round). */
367 bool xbzrle_started;
05931ec5
JQ
368 /* Are we on the last stage of migration */
369 bool last_stage;
76e03000 370
be8b02ed
XG
371 /* total handled target pages at the beginning of period */
372 uint64_t target_page_count_prev;
373 /* total handled target pages since start */
374 uint64_t target_page_count;
9360447d 375 /* number of dirty bits in the bitmap */
2dfaf12e 376 uint64_t migration_dirty_pages;
f1668764
PX
377 /*
378 * Protects:
379 * - dirty/clear bitmap
380 * - migration_dirty_pages
381 * - pss structures
382 */
108cfae0 383 QemuMutex bitmap_mutex;
68a098f3
JQ
384 /* The RAMBlock used in the last src_page_requests */
385 RAMBlock *last_req_rb;
ec481c6c
JQ
386 /* Queue of outstanding page requests from the destination */
387 QemuMutex src_page_req_mutex;
b58deb34 388 QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests;
1015ff54
PX
389
390 /*
391 * This is only used when postcopy is in recovery phase, to communicate
392 * between the migration thread and the return path thread on dirty
393 * bitmap synchronizations. This field is unused in other stages of
394 * RAM migration.
395 */
396 unsigned int postcopy_bmap_sync_requested;
6f37bb8b
JQ
397};
398typedef struct RAMState RAMState;
399
53518d94 400static RAMState *ram_state;
6f37bb8b 401
bd227060
WW
402static NotifierWithReturnList precopy_notifier_list;
403
a1fe28df
PX
404/* Whether postcopy has queued requests? */
405static bool postcopy_has_request(RAMState *rs)
406{
407 return !QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests);
408}
409
bd227060
WW
410void precopy_infrastructure_init(void)
411{
412 notifier_with_return_list_init(&precopy_notifier_list);
413}
414
415void precopy_add_notifier(NotifierWithReturn *n)
416{
417 notifier_with_return_list_add(&precopy_notifier_list, n);
418}
419
420void precopy_remove_notifier(NotifierWithReturn *n)
421{
422 notifier_with_return_remove(n);
423}
424
425int precopy_notify(PrecopyNotifyReason reason, Error **errp)
426{
427 PrecopyNotifyData pnd;
428 pnd.reason = reason;
429 pnd.errp = errp;
430
431 return notifier_with_return_list_notify(&precopy_notifier_list, &pnd);
432}
433
9edabd4d 434uint64_t ram_bytes_remaining(void)
2f4fde93 435{
bae416e5
DDAG
436 return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
437 0;
2f4fde93
JQ
438}
439
26a26069 440void ram_transferred_add(uint64_t bytes)
4c2d0f6d 441{
ae680668 442 if (runstate_is_running()) {
aff3f660 443 stat64_add(&mig_stats.precopy_bytes, bytes);
ae680668 444 } else if (migration_in_postcopy()) {
aff3f660 445 stat64_add(&mig_stats.postcopy_bytes, bytes);
ae680668 446 } else {
aff3f660 447 stat64_add(&mig_stats.downtime_bytes, bytes);
ae680668 448 }
4c2d0f6d
DE
449}
450
4010ba38
JQ
451struct MigrationOps {
452 int (*ram_save_target_page)(RAMState *rs, PageSearchStatus *pss);
453};
454typedef struct MigrationOps MigrationOps;
455
456MigrationOps *migration_ops;
457
93589827
PX
458static int ram_save_host_page_urgent(PageSearchStatus *pss);
459
ebd88a49
PX
460/* NOTE: page is the PFN not real ram_addr_t. */
461static void pss_init(PageSearchStatus *pss, RAMBlock *rb, ram_addr_t page)
462{
463 pss->block = rb;
464 pss->page = page;
465 pss->complete_round = false;
466}
467
93589827
PX
468/*
469 * Check whether two PSSs are actively sending the same page. Return true
470 * if it is, false otherwise.
471 */
472static bool pss_overlap(PageSearchStatus *pss1, PageSearchStatus *pss2)
473{
474 return pss1->host_page_sending && pss2->host_page_sending &&
475 (pss1->host_page_start == pss2->host_page_start);
476}
477
56e93d26 478/**
3d0684b2 479 * save_page_header: write page header to wire
56e93d26
JQ
480 *
481 * If this is the 1st block, it also writes the block identification
482 *
3d0684b2 483 * Returns the number of bytes written
56e93d26 484 *
ec6f3ab9 485 * @pss: current PSS channel status
56e93d26
JQ
486 * @block: block that contains the page we want to send
487 * @offset: offset inside the block for the page
488 * in the lower bits, it contains flags
489 */
37502df3
LS
490static size_t save_page_header(PageSearchStatus *pss, QEMUFile *f,
491 RAMBlock *block, ram_addr_t offset)
56e93d26 492{
9f5f380b 493 size_t size, len;
ec6f3ab9 494 bool same_block = (block == pss->last_sent_block);
56e93d26 495
10661f11 496 if (same_block) {
24795694
JQ
497 offset |= RAM_SAVE_FLAG_CONTINUE;
498 }
2bf3aa85 499 qemu_put_be64(f, offset);
56e93d26
JQ
500 size = 8;
501
10661f11 502 if (!same_block) {
9f5f380b 503 len = strlen(block->idstr);
2bf3aa85
JQ
504 qemu_put_byte(f, len);
505 qemu_put_buffer(f, (uint8_t *)block->idstr, len);
9f5f380b 506 size += 1 + len;
ec6f3ab9 507 pss->last_sent_block = block;
56e93d26
JQ
508 }
509 return size;
510}
511
3d0684b2 512/**
179a8080 513 * mig_throttle_guest_down: throttle down the guest
3d0684b2
JQ
514 *
515 * Reduce amount of guest cpu execution to hopefully slow down memory
516 * writes. If guest dirty memory rate is reduced below the rate at
517 * which we can transfer pages to the destination then we should be
518 * able to complete migration. Some workloads dirty memory way too
519 * fast and will not effectively converge, even with auto-converge.
070afca2 520 */
cbbf8182
KZ
521static void mig_throttle_guest_down(uint64_t bytes_dirty_period,
522 uint64_t bytes_dirty_threshold)
070afca2 523{
2a8ec380 524 uint64_t pct_initial = migrate_cpu_throttle_initial();
9605c2ac 525 uint64_t pct_increment = migrate_cpu_throttle_increment();
873f674c 526 bool pct_tailslow = migrate_cpu_throttle_tailslow();
24155bd0 527 int pct_max = migrate_max_cpu_throttle();
070afca2 528
cbbf8182
KZ
529 uint64_t throttle_now = cpu_throttle_get_percentage();
530 uint64_t cpu_now, cpu_ideal, throttle_inc;
531
070afca2
JH
532 /* We have not started throttling yet. Let's start it. */
533 if (!cpu_throttle_active()) {
534 cpu_throttle_set(pct_initial);
535 } else {
536 /* Throttling already on, just increase the rate */
cbbf8182
KZ
537 if (!pct_tailslow) {
538 throttle_inc = pct_increment;
539 } else {
540 /* Compute the ideal CPU percentage used by Guest, which may
541 * make the dirty rate match the dirty rate threshold. */
542 cpu_now = 100 - throttle_now;
543 cpu_ideal = cpu_now * (bytes_dirty_threshold * 1.0 /
544 bytes_dirty_period);
545 throttle_inc = MIN(cpu_now - cpu_ideal, pct_increment);
546 }
547 cpu_throttle_set(MIN(throttle_now + throttle_inc, pct_max));
070afca2
JH
548 }
549}
550
91fe9a8d
RL
551void mig_throttle_counter_reset(void)
552{
553 RAMState *rs = ram_state;
554
555 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
556 rs->num_dirty_pages_period = 0;
897fd8bd 557 rs->bytes_xfer_prev = migration_transferred_bytes();
91fe9a8d
RL
558}
559
3d0684b2
JQ
560/**
561 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
562 *
563 * @current_addr: address for the zero page
564 *
565 * Update the xbzrle cache to reflect a page that's been sent as all 0.
56e93d26
JQ
566 * The important thing is that a stale (not-yet-0'd) page be replaced
567 * by the new data.
568 * As a bonus, if the page wasn't in the cache it gets added so that
3d0684b2 569 * when a small write is made into the 0'd page it gets XBZRLE sent.
56e93d26 570 */
8f47d4ee 571static void xbzrle_cache_zero_page(ram_addr_t current_addr)
56e93d26 572{
56e93d26
JQ
573 /* We don't care if this fails to allocate a new cache page
574 * as long as it updated an old one */
c00e0928 575 cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
aff3f660 576 stat64_get(&mig_stats.dirty_sync_count));
56e93d26
JQ
577}
578
579#define ENCODING_FLAG_XBZRLE 0x1
580
581/**
582 * save_xbzrle_page: compress and send current page
583 *
584 * Returns: 1 means that we wrote the page
585 * 0 means that page is identical to the one already sent
586 * -1 means that xbzrle would be longer than normal
587 *
5a987738 588 * @rs: current RAM state
ec6f3ab9 589 * @pss: current PSS channel
3d0684b2
JQ
590 * @current_data: pointer to the address of the page contents
591 * @current_addr: addr of the page
56e93d26
JQ
592 * @block: block that contains the page we want to send
593 * @offset: offset inside the block for the page
56e93d26 594 */
ec6f3ab9 595static int save_xbzrle_page(RAMState *rs, PageSearchStatus *pss,
61717ea9
PX
596 uint8_t **current_data, ram_addr_t current_addr,
597 RAMBlock *block, ram_addr_t offset)
56e93d26
JQ
598{
599 int encoded_len = 0, bytes_xbzrle;
600 uint8_t *prev_cached_page;
ec6f3ab9 601 QEMUFile *file = pss->pss_channel;
aff3f660 602 uint64_t generation = stat64_get(&mig_stats.dirty_sync_count);
56e93d26 603
536b5a4e 604 if (!cache_is_cached(XBZRLE.cache, current_addr, generation)) {
9360447d 605 xbzrle_counters.cache_miss++;
05931ec5 606 if (!rs->last_stage) {
56e93d26 607 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
536b5a4e 608 generation) == -1) {
56e93d26
JQ
609 return -1;
610 } else {
611 /* update *current_data when the page has been
612 inserted into cache */
613 *current_data = get_cached_data(XBZRLE.cache, current_addr);
614 }
615 }
616 return -1;
617 }
618
e460a4b1
WW
619 /*
620 * Reaching here means the page has hit the xbzrle cache, no matter what
621 * encoding result it is (normal encoding, overflow or skipping the page),
3a4452d8 622 * count the page as encoded. This is used to calculate the encoding rate.
e460a4b1
WW
623 *
624 * Example: 2 pages (8KB) being encoded, first page encoding generates 2KB,
625 * 2nd page turns out to be skipped (i.e. no new bytes written to the
626 * page), the overall encoding rate will be 8KB / 2KB = 4, which has the
627 * skipped page included. In this way, the encoding rate can tell if the
628 * guest page is good for xbzrle encoding.
629 */
630 xbzrle_counters.pages++;
56e93d26
JQ
631 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
632
633 /* save current buffer into memory */
634 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
635
636 /* XBZRLE encoding (if there is no overflow) */
7ba7db9f
RH
637 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
638 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
639 TARGET_PAGE_SIZE);
ca353803
WY
640
641 /*
642 * Update the cache contents, so that it corresponds to the data
643 * sent, in all cases except where we skip the page.
644 */
05931ec5 645 if (!rs->last_stage && encoded_len != 0) {
ca353803
WY
646 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
647 /*
648 * In the case where we couldn't compress, ensure that the caller
649 * sends the data from the cache, since the guest might have
650 * changed the RAM since we copied it.
651 */
652 *current_data = prev_cached_page;
653 }
654
56e93d26 655 if (encoded_len == 0) {
55c4446b 656 trace_save_xbzrle_page_skipping();
56e93d26
JQ
657 return 0;
658 } else if (encoded_len == -1) {
55c4446b 659 trace_save_xbzrle_page_overflow();
9360447d 660 xbzrle_counters.overflow++;
e460a4b1 661 xbzrle_counters.bytes += TARGET_PAGE_SIZE;
56e93d26
JQ
662 return -1;
663 }
664
56e93d26 665 /* Send XBZRLE based compressed page */
37502df3 666 bytes_xbzrle = save_page_header(pss, pss->pss_channel, block,
204b88b8 667 offset | RAM_SAVE_FLAG_XBZRLE);
61717ea9
PX
668 qemu_put_byte(file, ENCODING_FLAG_XBZRLE);
669 qemu_put_be16(file, encoded_len);
670 qemu_put_buffer(file, XBZRLE.encoded_buf, encoded_len);
56e93d26 671 bytes_xbzrle += encoded_len + 1 + 2;
e460a4b1
WW
672 /*
673 * Like compressed_size (please see update_compress_thread_counts),
674 * the xbzrle encoded bytes don't count the 8 byte header with
675 * RAM_SAVE_FLAG_CONTINUE.
676 */
677 xbzrle_counters.bytes += bytes_xbzrle - 8;
4c2d0f6d 678 ram_transferred_add(bytes_xbzrle);
56e93d26
JQ
679
680 return 1;
681}
682
3d0684b2 683/**
d9e474ea 684 * pss_find_next_dirty: find the next dirty page of current ramblock
f3f491fc 685 *
d9e474ea
PX
686 * This function updates pss->page to point to the next dirty page index
687 * within the ramblock to migrate, or the end of ramblock when nothing
688 * found. Note that when pss->host_page_sending==true it means we're
689 * during sending a host page, so we won't look for dirty page that is
690 * outside the host page boundary.
3d0684b2 691 *
d9e474ea 692 * @pss: the current page search status
f3f491fc 693 */
d9e474ea 694static void pss_find_next_dirty(PageSearchStatus *pss)
56e93d26 695{
d9e474ea 696 RAMBlock *rb = pss->block;
6b6712ef
JQ
697 unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
698 unsigned long *bitmap = rb->bmap;
56e93d26 699
f161c88a 700 if (migrate_ram_is_ignored(rb)) {
d9e474ea
PX
701 /* Points directly to the end, so we know no dirty page */
702 pss->page = size;
703 return;
704 }
705
706 /*
707 * If during sending a host page, only look for dirty pages within the
708 * current host page being send.
709 */
710 if (pss->host_page_sending) {
711 assert(pss->host_page_end);
712 size = MIN(size, pss->host_page_end);
b895de50
CLG
713 }
714
d9e474ea 715 pss->page = find_next_bit(bitmap, size, pss->page);
56e93d26
JQ
716}
717
1230a25f 718static void migration_clear_memory_region_dirty_bitmap(RAMBlock *rb,
3143577d
WW
719 unsigned long page)
720{
721 uint8_t shift;
722 hwaddr size, start;
723
724 if (!rb->clear_bmap || !clear_bmap_test_and_clear(rb, page)) {
725 return;
726 }
727
728 shift = rb->clear_bmap_shift;
729 /*
730 * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this
731 * can make things easier sometimes since then start address
732 * of the small chunk will always be 64 pages aligned so the
733 * bitmap will always be aligned to unsigned long. We should
734 * even be able to remove this restriction but I'm simply
735 * keeping it.
736 */
737 assert(shift >= 6);
738
739 size = 1ULL << (TARGET_PAGE_BITS + shift);
7648297d 740 start = QEMU_ALIGN_DOWN((ram_addr_t)page << TARGET_PAGE_BITS, size);
3143577d
WW
741 trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page);
742 memory_region_clear_dirty_bitmap(rb->mr, start, size);
743}
744
745static void
1230a25f 746migration_clear_memory_region_dirty_bitmap_range(RAMBlock *rb,
3143577d
WW
747 unsigned long start,
748 unsigned long npages)
749{
750 unsigned long i, chunk_pages = 1UL << rb->clear_bmap_shift;
751 unsigned long chunk_start = QEMU_ALIGN_DOWN(start, chunk_pages);
752 unsigned long chunk_end = QEMU_ALIGN_UP(start + npages, chunk_pages);
753
754 /*
755 * Clear pages from start to start + npages - 1, so the end boundary is
756 * exclusive.
757 */
758 for (i = chunk_start; i < chunk_end; i += chunk_pages) {
1230a25f 759 migration_clear_memory_region_dirty_bitmap(rb, i);
3143577d
WW
760 }
761}
762
a6a83cef
RL
763/*
764 * colo_bitmap_find_diry:find contiguous dirty pages from start
765 *
766 * Returns the page offset within memory region of the start of the contiguout
767 * dirty page
768 *
769 * @rs: current RAM state
770 * @rb: RAMBlock where to search for dirty pages
771 * @start: page where we start the search
772 * @num: the number of contiguous dirty pages
773 */
774static inline
775unsigned long colo_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
776 unsigned long start, unsigned long *num)
777{
778 unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
779 unsigned long *bitmap = rb->bmap;
780 unsigned long first, next;
781
782 *num = 0;
783
f161c88a 784 if (migrate_ram_is_ignored(rb)) {
a6a83cef
RL
785 return size;
786 }
787
788 first = find_next_bit(bitmap, size, start);
789 if (first >= size) {
790 return first;
791 }
792 next = find_next_zero_bit(bitmap, size, first + 1);
793 assert(next >= first);
794 *num = next - first;
795 return first;
796}
797
06b10688 798static inline bool migration_bitmap_clear_dirty(RAMState *rs,
f20e2865
JQ
799 RAMBlock *rb,
800 unsigned long page)
a82d593b
DDAG
801{
802 bool ret;
a82d593b 803
002cad6b
PX
804 /*
805 * Clear dirty bitmap if needed. This _must_ be called before we
806 * send any of the page in the chunk because we need to make sure
807 * we can capture further page content changes when we sync dirty
808 * log the next time. So as long as we are going to send any of
809 * the page in the chunk we clear the remote dirty bitmap for all.
810 * Clearing it earlier won't be a problem, but too late will.
811 */
1230a25f 812 migration_clear_memory_region_dirty_bitmap(rb, page);
002cad6b 813
6b6712ef 814 ret = test_and_clear_bit(page, rb->bmap);
a82d593b 815 if (ret) {
0d8ec885 816 rs->migration_dirty_pages--;
a82d593b 817 }
386a907b 818
a82d593b
DDAG
819 return ret;
820}
821
be39b4cd
DH
822static void dirty_bitmap_clear_section(MemoryRegionSection *section,
823 void *opaque)
824{
825 const hwaddr offset = section->offset_within_region;
826 const hwaddr size = int128_get64(section->size);
827 const unsigned long start = offset >> TARGET_PAGE_BITS;
828 const unsigned long npages = size >> TARGET_PAGE_BITS;
829 RAMBlock *rb = section->mr->ram_block;
830 uint64_t *cleared_bits = opaque;
831
832 /*
833 * We don't grab ram_state->bitmap_mutex because we expect to run
834 * only when starting migration or during postcopy recovery where
835 * we don't have concurrent access.
836 */
837 if (!migration_in_postcopy() && !migrate_background_snapshot()) {
838 migration_clear_memory_region_dirty_bitmap_range(rb, start, npages);
839 }
840 *cleared_bits += bitmap_count_one_with_offset(rb->bmap, start, npages);
841 bitmap_clear(rb->bmap, start, npages);
842}
843
844/*
845 * Exclude all dirty pages from migration that fall into a discarded range as
846 * managed by a RamDiscardManager responsible for the mapped memory region of
847 * the RAMBlock. Clear the corresponding bits in the dirty bitmaps.
848 *
849 * Discarded pages ("logically unplugged") have undefined content and must
850 * not get migrated, because even reading these pages for migration might
851 * result in undesired behavior.
852 *
853 * Returns the number of cleared bits in the RAMBlock dirty bitmap.
854 *
855 * Note: The result is only stable while migrating (precopy/postcopy).
856 */
857static uint64_t ramblock_dirty_bitmap_clear_discarded_pages(RAMBlock *rb)
858{
859 uint64_t cleared_bits = 0;
860
861 if (rb->mr && rb->bmap && memory_region_has_ram_discard_manager(rb->mr)) {
862 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr);
863 MemoryRegionSection section = {
864 .mr = rb->mr,
865 .offset_within_region = 0,
866 .size = int128_make64(qemu_ram_get_used_length(rb)),
867 };
868
869 ram_discard_manager_replay_discarded(rdm, &section,
870 dirty_bitmap_clear_section,
871 &cleared_bits);
872 }
873 return cleared_bits;
874}
875
9470c5e0
DH
876/*
877 * Check if a host-page aligned page falls into a discarded range as managed by
878 * a RamDiscardManager responsible for the mapped memory region of the RAMBlock.
879 *
880 * Note: The result is only stable while migrating (precopy/postcopy).
881 */
882bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start)
883{
884 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) {
885 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr);
886 MemoryRegionSection section = {
887 .mr = rb->mr,
888 .offset_within_region = start,
889 .size = int128_make64(qemu_ram_pagesize(rb)),
890 };
891
892 return !ram_discard_manager_is_populated(rdm, &section);
893 }
894 return false;
895}
896
267691b6 897/* Called with RCU critical section */
7a3e9571 898static void ramblock_sync_dirty_bitmap(RAMState *rs, RAMBlock *rb)
56e93d26 899{
fb613580
KZ
900 uint64_t new_dirty_pages =
901 cpu_physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length);
902
903 rs->migration_dirty_pages += new_dirty_pages;
904 rs->num_dirty_pages_period += new_dirty_pages;
56e93d26
JQ
905}
906
3d0684b2
JQ
907/**
908 * ram_pagesize_summary: calculate all the pagesizes of a VM
909 *
910 * Returns a summary bitmap of the page sizes of all RAMBlocks
911 *
912 * For VMs with just normal pages this is equivalent to the host page
913 * size. If it's got some huge pages then it's the OR of all the
914 * different page sizes.
e8ca1db2
DDAG
915 */
916uint64_t ram_pagesize_summary(void)
917{
918 RAMBlock *block;
919 uint64_t summary = 0;
920
fbd162e6 921 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
e8ca1db2
DDAG
922 summary |= block->page_size;
923 }
924
925 return summary;
926}
927
aecbfe9c
XG
928uint64_t ram_get_total_transferred_pages(void)
929{
aff3f660
JQ
930 return stat64_get(&mig_stats.normal_pages) +
931 stat64_get(&mig_stats.zero_pages) +
8258f2fa 932 compress_ram_pages() + xbzrle_counters.pages;
aecbfe9c
XG
933}
934
b734035b
XG
935static void migration_update_rates(RAMState *rs, int64_t end_time)
936{
be8b02ed 937 uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
b734035b
XG
938
939 /* calculate period counters */
aff3f660 940 stat64_set(&mig_stats.dirty_pages_rate,
72f8e587
JQ
941 rs->num_dirty_pages_period * 1000 /
942 (end_time - rs->time_last_bitmap_sync));
b734035b 943
be8b02ed 944 if (!page_count) {
b734035b
XG
945 return;
946 }
947
87dca0c9 948 if (migrate_xbzrle()) {
e460a4b1
WW
949 double encoded_size, unencoded_size;
950
b734035b 951 xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
be8b02ed 952 rs->xbzrle_cache_miss_prev) / page_count;
b734035b 953 rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
e460a4b1
WW
954 unencoded_size = (xbzrle_counters.pages - rs->xbzrle_pages_prev) *
955 TARGET_PAGE_SIZE;
956 encoded_size = xbzrle_counters.bytes - rs->xbzrle_bytes_prev;
92271402 957 if (xbzrle_counters.pages == rs->xbzrle_pages_prev || !encoded_size) {
e460a4b1 958 xbzrle_counters.encoding_rate = 0;
e460a4b1
WW
959 } else {
960 xbzrle_counters.encoding_rate = unencoded_size / encoded_size;
961 }
962 rs->xbzrle_pages_prev = xbzrle_counters.pages;
963 rs->xbzrle_bytes_prev = xbzrle_counters.bytes;
b734035b 964 }
fb36fb27 965 compress_update_rates(page_count);
b734035b
XG
966}
967
acac51ba
HH
968/*
969 * Enable dirty-limit to throttle down the guest
970 */
971static void migration_dirty_limit_guest(void)
972{
973 /*
974 * dirty page rate quota for all vCPUs fetched from
975 * migration parameter 'vcpu_dirty_limit'
976 */
977 static int64_t quota_dirtyrate;
978 MigrationState *s = migrate_get_current();
979
980 /*
981 * If dirty limit already enabled and migration parameter
982 * vcpu-dirty-limit untouched.
983 */
984 if (dirtylimit_in_service() &&
985 quota_dirtyrate == s->parameters.vcpu_dirty_limit) {
986 return;
987 }
988
989 quota_dirtyrate = s->parameters.vcpu_dirty_limit;
990
991 /*
992 * Set all vCPU a quota dirtyrate, note that the second
993 * parameter will be ignored if setting all vCPU for the vm
994 */
995 qmp_set_vcpu_dirty_limit(false, -1, quota_dirtyrate, NULL);
996 trace_migration_dirty_limit_guest(quota_dirtyrate);
997}
998
dc14a470
KZ
999static void migration_trigger_throttle(RAMState *rs)
1000{
6499efdb 1001 uint64_t threshold = migrate_throttle_trigger_threshold();
23b7576d 1002 uint64_t bytes_xfer_period =
897fd8bd 1003 migration_transferred_bytes() - rs->bytes_xfer_prev;
dc14a470
KZ
1004 uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE;
1005 uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100;
1006
1007 /* During block migration the auto-converge logic incorrectly detects
1008 * that ram migration makes no progress. Avoid this by disabling the
1009 * throttling logic during the bulk phase of block migration. */
bb9993c6
HH
1010 if (blk_mig_bulk_active()) {
1011 return;
1012 }
1013
310ad562
HH
1014 /*
1015 * The following detection logic can be refined later. For now:
1016 * Check to see if the ratio between dirtied bytes and the approx.
1017 * amount of bytes that just got transferred since the last time
1018 * we were in this routine reaches the threshold. If that happens
1019 * twice, start or increase throttling.
1020 */
1021 if ((bytes_dirty_period > bytes_dirty_threshold) &&
1022 (++rs->dirty_rate_high_cnt >= 2)) {
1023 rs->dirty_rate_high_cnt = 0;
1024 if (migrate_auto_converge()) {
dc14a470 1025 trace_migration_throttle();
cbbf8182
KZ
1026 mig_throttle_guest_down(bytes_dirty_period,
1027 bytes_dirty_threshold);
acac51ba
HH
1028 } else if (migrate_dirty_limit()) {
1029 migration_dirty_limit_guest();
dc14a470
KZ
1030 }
1031 }
1032}
1033
1e493be5 1034static void migration_bitmap_sync(RAMState *rs, bool last_stage)
56e93d26
JQ
1035{
1036 RAMBlock *block;
56e93d26 1037 int64_t end_time;
56e93d26 1038
aff3f660 1039 stat64_add(&mig_stats.dirty_sync_count, 1);
56e93d26 1040
f664da80
JQ
1041 if (!rs->time_last_bitmap_sync) {
1042 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
56e93d26
JQ
1043 }
1044
1045 trace_migration_bitmap_sync_start();
1e493be5 1046 memory_global_dirty_log_sync(last_stage);
56e93d26 1047
108cfae0 1048 qemu_mutex_lock(&rs->bitmap_mutex);
89ac5a1d
DDAG
1049 WITH_RCU_READ_LOCK_GUARD() {
1050 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1051 ramblock_sync_dirty_bitmap(rs, block);
1052 }
aff3f660 1053 stat64_set(&mig_stats.dirty_bytes_last_sync, ram_bytes_remaining());
56e93d26 1054 }
108cfae0 1055 qemu_mutex_unlock(&rs->bitmap_mutex);
56e93d26 1056
9458a9a1 1057 memory_global_after_dirty_log_sync();
a66cd90c 1058 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
1ffb5dfd 1059
56e93d26
JQ
1060 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1061
1062 /* more than 1 second = 1000 millisecons */
f664da80 1063 if (end_time > rs->time_last_bitmap_sync + 1000) {
dc14a470 1064 migration_trigger_throttle(rs);
070afca2 1065
b734035b
XG
1066 migration_update_rates(rs, end_time);
1067
be8b02ed 1068 rs->target_page_count_prev = rs->target_page_count;
d693c6f1
FF
1069
1070 /* reset period counters */
f664da80 1071 rs->time_last_bitmap_sync = end_time;
a66cd90c 1072 rs->num_dirty_pages_period = 0;
897fd8bd 1073 rs->bytes_xfer_prev = migration_transferred_bytes();
56e93d26 1074 }
b890902c 1075 if (migrate_events()) {
aff3f660 1076 uint64_t generation = stat64_get(&mig_stats.dirty_sync_count);
536b5a4e 1077 qapi_event_send_migration_pass(generation);
4addcd4f 1078 }
56e93d26
JQ
1079}
1080
1e493be5 1081static void migration_bitmap_sync_precopy(RAMState *rs, bool last_stage)
bd227060
WW
1082{
1083 Error *local_err = NULL;
1084
1085 /*
1086 * The current notifier usage is just an optimization to migration, so we
1087 * don't stop the normal migration process in the error case.
1088 */
1089 if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC, &local_err)) {
1090 error_report_err(local_err);
b4a1733c 1091 local_err = NULL;
bd227060
WW
1092 }
1093
1e493be5 1094 migration_bitmap_sync(rs, last_stage);
bd227060
WW
1095
1096 if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) {
1097 error_report_err(local_err);
1098 }
1099}
1100
a4dbaf8e 1101void ram_release_page(const char *rbname, uint64_t offset)
47fe16ff
JQ
1102{
1103 if (!migrate_release_ram() || !migration_in_postcopy()) {
1104 return;
1105 }
1106
1107 ram_discard_range(rbname, offset, TARGET_PAGE_SIZE);
1108}
1109
56e93d26 1110/**
3d0684b2 1111 * save_zero_page: send the zero page to the stream
56e93d26 1112 *
3d0684b2 1113 * Returns the number of pages written.
56e93d26 1114 *
ccc09db8 1115 * @rs: current RAM state
ec6f3ab9 1116 * @pss: current PSS channel
56e93d26 1117 * @offset: offset inside the block for the page
56e93d26 1118 */
e8e4e7ac 1119static int save_zero_page(RAMState *rs, PageSearchStatus *pss,
61717ea9 1120 ram_addr_t offset)
56e93d26 1121{
e8e4e7ac 1122 uint8_t *p = pss->block->host + offset;
8697eb85
FR
1123 QEMUFile *file = pss->pss_channel;
1124 int len = 0;
56e93d26 1125
8697eb85
FR
1126 if (!buffer_is_zero(p, TARGET_PAGE_SIZE)) {
1127 return 0;
56e93d26 1128 }
ccc09db8 1129
e8e4e7ac 1130 len += save_page_header(pss, file, pss->block, offset | RAM_SAVE_FLAG_ZERO);
8697eb85
FR
1131 qemu_put_byte(file, 0);
1132 len += 1;
e8e4e7ac 1133 ram_release_page(pss->block->idstr, offset);
8697eb85 1134
ccc09db8
FR
1135 stat64_add(&mig_stats.zero_pages, 1);
1136 ram_transferred_add(len);
1137
1138 /*
1139 * Must let xbzrle know, otherwise a previous (now 0'd) cached
1140 * page would be stale.
1141 */
1142 if (rs->xbzrle_started) {
1143 XBZRLE_cache_lock();
e8e4e7ac 1144 xbzrle_cache_zero_page(pss->block->offset + offset);
ccc09db8
FR
1145 XBZRLE_cache_unlock();
1146 }
1147
8697eb85 1148 return len;
56e93d26
JQ
1149}
1150
059ff0fb
XG
1151/*
1152 * @pages: the number of pages written by the control path,
1153 * < 0 - error
1154 * > 0 - number of pages written
1155 *
1156 * Return true if the pages has been saved, otherwise false is returned.
1157 */
944853c2 1158static bool control_save_page(PageSearchStatus *pss,
61717ea9 1159 ram_addr_t offset, int *pages)
059ff0fb 1160{
059ff0fb
XG
1161 int ret;
1162
944853c2 1163 ret = rdma_control_save_page(pss->pss_channel, pss->block->offset, offset,
e493008d 1164 TARGET_PAGE_SIZE);
059ff0fb
XG
1165 if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
1166 return false;
1167 }
1168
059ff0fb 1169 if (ret == RAM_SAVE_CONTROL_DELAYED) {
9c53d369 1170 *pages = 1;
059ff0fb
XG
1171 return true;
1172 }
9c53d369 1173 *pages = ret;
059ff0fb
XG
1174 return true;
1175}
1176
65dacaa0
XG
1177/*
1178 * directly send the page to the stream
1179 *
1180 * Returns the number of pages written.
1181 *
ec6f3ab9 1182 * @pss: current PSS channel
65dacaa0
XG
1183 * @block: block that contains the page we want to send
1184 * @offset: offset inside the block for the page
1185 * @buf: the page to be sent
1186 * @async: send to page asyncly
1187 */
ec6f3ab9 1188static int save_normal_page(PageSearchStatus *pss, RAMBlock *block,
61717ea9 1189 ram_addr_t offset, uint8_t *buf, bool async)
65dacaa0 1190{
ec6f3ab9
PX
1191 QEMUFile *file = pss->pss_channel;
1192
37502df3 1193 ram_transferred_add(save_page_header(pss, pss->pss_channel, block,
4c2d0f6d 1194 offset | RAM_SAVE_FLAG_PAGE));
65dacaa0 1195 if (async) {
61717ea9 1196 qemu_put_buffer_async(file, buf, TARGET_PAGE_SIZE,
f912ec5b 1197 migrate_release_ram() &&
65dacaa0
XG
1198 migration_in_postcopy());
1199 } else {
61717ea9 1200 qemu_put_buffer(file, buf, TARGET_PAGE_SIZE);
65dacaa0 1201 }
4c2d0f6d 1202 ram_transferred_add(TARGET_PAGE_SIZE);
aff3f660 1203 stat64_add(&mig_stats.normal_pages, 1);
65dacaa0
XG
1204 return 1;
1205}
1206
56e93d26 1207/**
3d0684b2 1208 * ram_save_page: send the given page to the stream
56e93d26 1209 *
3d0684b2 1210 * Returns the number of pages written.
3fd3c4b3
DDAG
1211 * < 0 - error
1212 * >=0 - Number of pages written - this might legally be 0
1213 * if xbzrle noticed the page was the same.
56e93d26 1214 *
6f37bb8b 1215 * @rs: current RAM state
56e93d26
JQ
1216 * @block: block that contains the page we want to send
1217 * @offset: offset inside the block for the page
56e93d26 1218 */
05931ec5 1219static int ram_save_page(RAMState *rs, PageSearchStatus *pss)
56e93d26
JQ
1220{
1221 int pages = -1;
56e93d26 1222 uint8_t *p;
56e93d26 1223 bool send_async = true;
a08f6890 1224 RAMBlock *block = pss->block;
8bba004c 1225 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
059ff0fb 1226 ram_addr_t current_addr = block->offset + offset;
56e93d26 1227
2f68e399 1228 p = block->host + offset;
1db9d8e5 1229 trace_ram_save_page(block->idstr, (uint64_t)offset, p);
56e93d26 1230
56e93d26 1231 XBZRLE_cache_lock();
f3095cc8 1232 if (rs->xbzrle_started && !migration_in_postcopy()) {
ec6f3ab9 1233 pages = save_xbzrle_page(rs, pss, &p, current_addr,
61717ea9 1234 block, offset);
05931ec5 1235 if (!rs->last_stage) {
059ff0fb
XG
1236 /* Can't send this cached data async, since the cache page
1237 * might get updated before it gets to the wire
56e93d26 1238 */
059ff0fb 1239 send_async = false;
56e93d26
JQ
1240 }
1241 }
1242
1243 /* XBZRLE overflow or normal page */
1244 if (pages == -1) {
ec6f3ab9 1245 pages = save_normal_page(pss, block, offset, p, send_async);
56e93d26
JQ
1246 }
1247
1248 XBZRLE_cache_unlock();
1249
1250 return pages;
1251}
1252
61717ea9 1253static int ram_save_multifd_page(QEMUFile *file, RAMBlock *block,
b9ee2f7d
JQ
1254 ram_addr_t offset)
1255{
61717ea9 1256 if (multifd_queue_page(file, block, offset) < 0) {
713f762a
IR
1257 return -1;
1258 }
aff3f660 1259 stat64_add(&mig_stats.normal_pages, 1);
b9ee2f7d
JQ
1260
1261 return 1;
1262}
1263
742ec5f3 1264int compress_send_queued_data(CompressParam *param)
56e93d26 1265{
3e81763e 1266 PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_PRECOPY];
eaa238ab 1267 MigrationState *ms = migrate_get_current();
3e81763e
LS
1268 QEMUFile *file = ms->to_dst_file;
1269 int len = 0;
1270
1271 RAMBlock *block = param->block;
1272 ram_addr_t offset = param->offset;
1273
1274 if (param->result == RES_NONE) {
1275 return 0;
1276 }
1277
1278 assert(block == pss->last_sent_block);
1279
1280 if (param->result == RES_ZEROPAGE) {
4024cc85 1281 assert(qemu_file_buffer_empty(param->file));
3e81763e
LS
1282 len += save_page_header(pss, file, block, offset | RAM_SAVE_FLAG_ZERO);
1283 qemu_put_byte(file, 0);
1284 len += 1;
1285 ram_release_page(block->idstr, offset);
1286 } else if (param->result == RES_COMPRESS) {
4024cc85 1287 assert(!qemu_file_buffer_empty(param->file));
3e81763e
LS
1288 len += save_page_header(pss, file, block,
1289 offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
1290 len += qemu_put_qemu_file(file, param->file);
1291 } else {
1292 abort();
1293 }
1294
680628d2
LS
1295 update_compress_thread_counts(param, len);
1296
3e81763e
LS
1297 return len;
1298}
1299
31e2ac74
JQ
1300#define PAGE_ALL_CLEAN 0
1301#define PAGE_TRY_AGAIN 1
1302#define PAGE_DIRTY_FOUND 2
3d0684b2
JQ
1303/**
1304 * find_dirty_block: find the next dirty page and update any state
1305 * associated with the search process.
b9e60928 1306 *
31e2ac74 1307 * Returns:
294e5a40 1308 * <0: An error happened
31e2ac74
JQ
1309 * PAGE_ALL_CLEAN: no dirty page found, give up
1310 * PAGE_TRY_AGAIN: no dirty page found, retry for next block
1311 * PAGE_DIRTY_FOUND: dirty page found
b9e60928 1312 *
6f37bb8b 1313 * @rs: current RAM state
3d0684b2
JQ
1314 * @pss: data about the state of the current dirty page scan
1315 * @again: set to false if the search has scanned the whole of RAM
b9e60928 1316 */
31e2ac74 1317static int find_dirty_block(RAMState *rs, PageSearchStatus *pss)
b9e60928 1318{
d9e474ea
PX
1319 /* Update pss->page for the next dirty bit in ramblock */
1320 pss_find_next_dirty(pss);
1321
6f37bb8b 1322 if (pss->complete_round && pss->block == rs->last_seen_block &&
a935e30f 1323 pss->page >= rs->last_page) {
b9e60928
DDAG
1324 /*
1325 * We've been once around the RAM and haven't found anything.
1326 * Give up.
1327 */
31e2ac74 1328 return PAGE_ALL_CLEAN;
b9e60928 1329 }
542147f4
DH
1330 if (!offset_in_ramblock(pss->block,
1331 ((ram_addr_t)pss->page) << TARGET_PAGE_BITS)) {
b9e60928 1332 /* Didn't find anything in this RAM Block */
a935e30f 1333 pss->page = 0;
b9e60928
DDAG
1334 pss->block = QLIST_NEXT_RCU(pss->block, next);
1335 if (!pss->block) {
d4f34485
JQ
1336 if (migrate_multifd() &&
1337 !migrate_multifd_flush_after_each_section()) {
294e5a40
JQ
1338 QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel;
1339 int ret = multifd_send_sync_main(f);
1340 if (ret < 0) {
1341 return ret;
1342 }
1343 qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
1344 qemu_fflush(f);
1345 }
48df9d80
XG
1346 /*
1347 * If memory migration starts over, we will meet a dirtied page
1348 * which may still exists in compression threads's ring, so we
1349 * should flush the compressed data to make sure the new page
1350 * is not overwritten by the old one in the destination.
1351 *
1352 * Also If xbzrle is on, stop using the data compression at this
1353 * point. In theory, xbzrle can do better than compression.
1354 */
8020bc9a 1355 compress_flush_data();
48df9d80 1356
b9e60928
DDAG
1357 /* Hit the end of the list */
1358 pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
1359 /* Flag that we've looped */
1360 pss->complete_round = true;
1a373522 1361 /* After the first round, enable XBZRLE. */
87dca0c9 1362 if (migrate_xbzrle()) {
f3095cc8 1363 rs->xbzrle_started = true;
1a373522 1364 }
b9e60928
DDAG
1365 }
1366 /* Didn't find anything this time, but try again on the new block */
31e2ac74 1367 return PAGE_TRY_AGAIN;
b9e60928 1368 } else {
31e2ac74
JQ
1369 /* We've found something */
1370 return PAGE_DIRTY_FOUND;
b9e60928
DDAG
1371 }
1372}
1373
3d0684b2
JQ
1374/**
1375 * unqueue_page: gets a page of the queue
1376 *
a82d593b 1377 * Helper for 'get_queued_page' - gets a page off the queue
a82d593b 1378 *
3d0684b2
JQ
1379 * Returns the block of the page (or NULL if none available)
1380 *
ec481c6c 1381 * @rs: current RAM state
3d0684b2 1382 * @offset: used to return the offset within the RAMBlock
a82d593b 1383 */
f20e2865 1384static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
a82d593b 1385{
a1fe28df 1386 struct RAMSrcPageRequest *entry;
a82d593b
DDAG
1387 RAMBlock *block = NULL;
1388
a1fe28df 1389 if (!postcopy_has_request(rs)) {
ae526e32
XG
1390 return NULL;
1391 }
1392
6e8a355d 1393 QEMU_LOCK_GUARD(&rs->src_page_req_mutex);
a1fe28df
PX
1394
1395 /*
1396 * This should _never_ change even after we take the lock, because no one
1397 * should be taking anything off the request list other than us.
1398 */
1399 assert(postcopy_has_request(rs));
1400
1401 entry = QSIMPLEQ_FIRST(&rs->src_page_requests);
1402 block = entry->rb;
1403 *offset = entry->offset;
1404
777f53c7
TH
1405 if (entry->len > TARGET_PAGE_SIZE) {
1406 entry->len -= TARGET_PAGE_SIZE;
1407 entry->offset += TARGET_PAGE_SIZE;
a1fe28df
PX
1408 } else {
1409 memory_region_unref(block->mr);
1410 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
1411 g_free(entry);
1412 migration_consume_urgent_request();
a82d593b 1413 }
a82d593b
DDAG
1414
1415 return block;
1416}
1417
278e2f55
AG
1418#if defined(__linux__)
1419/**
1420 * poll_fault_page: try to get next UFFD write fault page and, if pending fault
1421 * is found, return RAM block pointer and page offset
1422 *
1423 * Returns pointer to the RAMBlock containing faulting page,
1424 * NULL if no write faults are pending
1425 *
1426 * @rs: current RAM state
1427 * @offset: page offset from the beginning of the block
1428 */
1429static RAMBlock *poll_fault_page(RAMState *rs, ram_addr_t *offset)
1430{
1431 struct uffd_msg uffd_msg;
1432 void *page_address;
82ea3e3b 1433 RAMBlock *block;
278e2f55
AG
1434 int res;
1435
1436 if (!migrate_background_snapshot()) {
1437 return NULL;
1438 }
1439
1440 res = uffd_read_events(rs->uffdio_fd, &uffd_msg, 1);
1441 if (res <= 0) {
1442 return NULL;
1443 }
1444
1445 page_address = (void *)(uintptr_t) uffd_msg.arg.pagefault.address;
82ea3e3b
AG
1446 block = qemu_ram_block_from_host(page_address, false, offset);
1447 assert(block && (block->flags & RAM_UF_WRITEPROTECT) != 0);
1448 return block;
278e2f55
AG
1449}
1450
1451/**
1452 * ram_save_release_protection: release UFFD write protection after
1453 * a range of pages has been saved
1454 *
1455 * @rs: current RAM state
1456 * @pss: page-search-status structure
1457 * @start_page: index of the first page in the range relative to pss->block
1458 *
1459 * Returns 0 on success, negative value in case of an error
1460*/
1461static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss,
1462 unsigned long start_page)
1463{
1464 int res = 0;
1465
1466 /* Check if page is from UFFD-managed region. */
1467 if (pss->block->flags & RAM_UF_WRITEPROTECT) {
1468 void *page_address = pss->block->host + (start_page << TARGET_PAGE_BITS);
258f5c98 1469 uint64_t run_length = (pss->page - start_page) << TARGET_PAGE_BITS;
278e2f55
AG
1470
1471 /* Flush async buffers before un-protect. */
61717ea9 1472 qemu_fflush(pss->pss_channel);
278e2f55
AG
1473 /* Un-protect memory range. */
1474 res = uffd_change_protection(rs->uffdio_fd, page_address, run_length,
1475 false, false);
1476 }
1477
1478 return res;
1479}
1480
1481/* ram_write_tracking_available: check if kernel supports required UFFD features
1482 *
1483 * Returns true if supports, false otherwise
1484 */
1485bool ram_write_tracking_available(void)
1486{
1487 uint64_t uffd_features;
1488 int res;
1489
1490 res = uffd_query_features(&uffd_features);
1491 return (res == 0 &&
1492 (uffd_features & UFFD_FEATURE_PAGEFAULT_FLAG_WP) != 0);
1493}
1494
1495/* ram_write_tracking_compatible: check if guest configuration is
1496 * compatible with 'write-tracking'
1497 *
1498 * Returns true if compatible, false otherwise
1499 */
1500bool ram_write_tracking_compatible(void)
1501{
1502 const uint64_t uffd_ioctls_mask = BIT(_UFFDIO_WRITEPROTECT);
1503 int uffd_fd;
82ea3e3b 1504 RAMBlock *block;
278e2f55
AG
1505 bool ret = false;
1506
1507 /* Open UFFD file descriptor */
1508 uffd_fd = uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP, false);
1509 if (uffd_fd < 0) {
1510 return false;
1511 }
1512
1513 RCU_READ_LOCK_GUARD();
1514
82ea3e3b 1515 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
278e2f55
AG
1516 uint64_t uffd_ioctls;
1517
1518 /* Nothing to do with read-only and MMIO-writable regions */
82ea3e3b 1519 if (block->mr->readonly || block->mr->rom_device) {
278e2f55
AG
1520 continue;
1521 }
1522 /* Try to register block memory via UFFD-IO to track writes */
82ea3e3b 1523 if (uffd_register_memory(uffd_fd, block->host, block->max_length,
278e2f55
AG
1524 UFFDIO_REGISTER_MODE_WP, &uffd_ioctls)) {
1525 goto out;
1526 }
1527 if ((uffd_ioctls & uffd_ioctls_mask) != uffd_ioctls_mask) {
1528 goto out;
1529 }
1530 }
1531 ret = true;
1532
1533out:
1534 uffd_close_fd(uffd_fd);
1535 return ret;
1536}
1537
f7b9dcfb
DH
1538static inline void populate_read_range(RAMBlock *block, ram_addr_t offset,
1539 ram_addr_t size)
1540{
5f19a449
DH
1541 const ram_addr_t end = offset + size;
1542
f7b9dcfb
DH
1543 /*
1544 * We read one byte of each page; this will preallocate page tables if
1545 * required and populate the shared zeropage on MAP_PRIVATE anonymous memory
1546 * where no page was populated yet. This might require adaption when
1547 * supporting other mappings, like shmem.
1548 */
5f19a449 1549 for (; offset < end; offset += block->page_size) {
f7b9dcfb
DH
1550 char tmp = *((char *)block->host + offset);
1551
1552 /* Don't optimize the read out */
1553 asm volatile("" : "+r" (tmp));
1554 }
1555}
1556
6fee3a1f
DH
1557static inline int populate_read_section(MemoryRegionSection *section,
1558 void *opaque)
1559{
1560 const hwaddr size = int128_get64(section->size);
1561 hwaddr offset = section->offset_within_region;
1562 RAMBlock *block = section->mr->ram_block;
1563
1564 populate_read_range(block, offset, size);
1565 return 0;
1566}
1567
eeccb99c 1568/*
f7b9dcfb
DH
1569 * ram_block_populate_read: preallocate page tables and populate pages in the
1570 * RAM block by reading a byte of each page.
eeccb99c
AG
1571 *
1572 * Since it's solely used for userfault_fd WP feature, here we just
1573 * hardcode page size to qemu_real_host_page_size.
1574 *
82ea3e3b 1575 * @block: RAM block to populate
eeccb99c 1576 */
6fee3a1f 1577static void ram_block_populate_read(RAMBlock *rb)
eeccb99c 1578{
6fee3a1f
DH
1579 /*
1580 * Skip populating all pages that fall into a discarded range as managed by
1581 * a RamDiscardManager responsible for the mapped memory region of the
1582 * RAMBlock. Such discarded ("logically unplugged") parts of a RAMBlock
1583 * must not get populated automatically. We don't have to track
1584 * modifications via userfaultfd WP reliably, because these pages will
1585 * not be part of the migration stream either way -- see
1586 * ramblock_dirty_bitmap_exclude_discarded_pages().
1587 *
1588 * Note: The result is only stable while migrating (precopy/postcopy).
1589 */
1590 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) {
1591 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr);
1592 MemoryRegionSection section = {
1593 .mr = rb->mr,
1594 .offset_within_region = 0,
1595 .size = rb->mr->size,
1596 };
1597
1598 ram_discard_manager_replay_populated(rdm, &section,
1599 populate_read_section, NULL);
1600 } else {
1601 populate_read_range(rb, 0, rb->used_length);
1602 }
eeccb99c
AG
1603}
1604
1605/*
1606 * ram_write_tracking_prepare: prepare for UFFD-WP memory tracking
1607 */
1608void ram_write_tracking_prepare(void)
1609{
82ea3e3b 1610 RAMBlock *block;
eeccb99c
AG
1611
1612 RCU_READ_LOCK_GUARD();
1613
82ea3e3b 1614 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
eeccb99c 1615 /* Nothing to do with read-only and MMIO-writable regions */
82ea3e3b 1616 if (block->mr->readonly || block->mr->rom_device) {
eeccb99c
AG
1617 continue;
1618 }
1619
1620 /*
1621 * Populate pages of the RAM block before enabling userfault_fd
1622 * write protection.
1623 *
1624 * This stage is required since ioctl(UFFDIO_WRITEPROTECT) with
1625 * UFFDIO_WRITEPROTECT_MODE_WP mode setting would silently skip
1626 * pages with pte_none() entries in page table.
1627 */
f7b9dcfb 1628 ram_block_populate_read(block);
eeccb99c
AG
1629 }
1630}
1631
e41c5770
DH
1632static inline int uffd_protect_section(MemoryRegionSection *section,
1633 void *opaque)
1634{
1635 const hwaddr size = int128_get64(section->size);
1636 const hwaddr offset = section->offset_within_region;
1637 RAMBlock *rb = section->mr->ram_block;
1638 int uffd_fd = (uintptr_t)opaque;
1639
1640 return uffd_change_protection(uffd_fd, rb->host + offset, size, true,
1641 false);
1642}
1643
1644static int ram_block_uffd_protect(RAMBlock *rb, int uffd_fd)
1645{
1646 assert(rb->flags & RAM_UF_WRITEPROTECT);
1647
1648 /* See ram_block_populate_read() */
1649 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) {
1650 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr);
1651 MemoryRegionSection section = {
1652 .mr = rb->mr,
1653 .offset_within_region = 0,
1654 .size = rb->mr->size,
1655 };
1656
1657 return ram_discard_manager_replay_populated(rdm, &section,
1658 uffd_protect_section,
1659 (void *)(uintptr_t)uffd_fd);
1660 }
1661 return uffd_change_protection(uffd_fd, rb->host,
1662 rb->used_length, true, false);
1663}
1664
278e2f55
AG
1665/*
1666 * ram_write_tracking_start: start UFFD-WP memory tracking
1667 *
1668 * Returns 0 for success or negative value in case of error
1669 */
1670int ram_write_tracking_start(void)
1671{
1672 int uffd_fd;
1673 RAMState *rs = ram_state;
82ea3e3b 1674 RAMBlock *block;
278e2f55
AG
1675
1676 /* Open UFFD file descriptor */
1677 uffd_fd = uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP, true);
1678 if (uffd_fd < 0) {
1679 return uffd_fd;
1680 }
1681 rs->uffdio_fd = uffd_fd;
1682
1683 RCU_READ_LOCK_GUARD();
1684
82ea3e3b 1685 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
278e2f55 1686 /* Nothing to do with read-only and MMIO-writable regions */
82ea3e3b 1687 if (block->mr->readonly || block->mr->rom_device) {
278e2f55
AG
1688 continue;
1689 }
1690
1691 /* Register block memory with UFFD to track writes */
82ea3e3b
AG
1692 if (uffd_register_memory(rs->uffdio_fd, block->host,
1693 block->max_length, UFFDIO_REGISTER_MODE_WP, NULL)) {
278e2f55
AG
1694 goto fail;
1695 }
72ef3a37
DH
1696 block->flags |= RAM_UF_WRITEPROTECT;
1697 memory_region_ref(block->mr);
1698
278e2f55 1699 /* Apply UFFD write protection to the block memory range */
e41c5770 1700 if (ram_block_uffd_protect(block, uffd_fd)) {
278e2f55
AG
1701 goto fail;
1702 }
278e2f55 1703
82ea3e3b
AG
1704 trace_ram_write_tracking_ramblock_start(block->idstr, block->page_size,
1705 block->host, block->max_length);
278e2f55
AG
1706 }
1707
1708 return 0;
1709
1710fail:
1711 error_report("ram_write_tracking_start() failed: restoring initial memory state");
1712
82ea3e3b
AG
1713 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1714 if ((block->flags & RAM_UF_WRITEPROTECT) == 0) {
278e2f55
AG
1715 continue;
1716 }
82ea3e3b 1717 uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length);
278e2f55 1718 /* Cleanup flags and remove reference */
82ea3e3b
AG
1719 block->flags &= ~RAM_UF_WRITEPROTECT;
1720 memory_region_unref(block->mr);
278e2f55
AG
1721 }
1722
1723 uffd_close_fd(uffd_fd);
1724 rs->uffdio_fd = -1;
1725 return -1;
1726}
1727
1728/**
1729 * ram_write_tracking_stop: stop UFFD-WP memory tracking and remove protection
1730 */
1731void ram_write_tracking_stop(void)
1732{
1733 RAMState *rs = ram_state;
82ea3e3b 1734 RAMBlock *block;
278e2f55
AG
1735
1736 RCU_READ_LOCK_GUARD();
1737
82ea3e3b
AG
1738 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1739 if ((block->flags & RAM_UF_WRITEPROTECT) == 0) {
278e2f55
AG
1740 continue;
1741 }
82ea3e3b 1742 uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length);
278e2f55 1743
82ea3e3b
AG
1744 trace_ram_write_tracking_ramblock_stop(block->idstr, block->page_size,
1745 block->host, block->max_length);
278e2f55
AG
1746
1747 /* Cleanup flags and remove reference */
82ea3e3b
AG
1748 block->flags &= ~RAM_UF_WRITEPROTECT;
1749 memory_region_unref(block->mr);
278e2f55
AG
1750 }
1751
1752 /* Finally close UFFD file descriptor */
1753 uffd_close_fd(rs->uffdio_fd);
1754 rs->uffdio_fd = -1;
1755}
1756
1757#else
1758/* No target OS support, stubs just fail or ignore */
1759
1760static RAMBlock *poll_fault_page(RAMState *rs, ram_addr_t *offset)
1761{
1762 (void) rs;
1763 (void) offset;
1764
1765 return NULL;
1766}
1767
1768static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss,
1769 unsigned long start_page)
1770{
1771 (void) rs;
1772 (void) pss;
1773 (void) start_page;
1774
1775 return 0;
1776}
1777
1778bool ram_write_tracking_available(void)
1779{
1780 return false;
1781}
1782
1783bool ram_write_tracking_compatible(void)
1784{
1785 assert(0);
1786 return false;
1787}
1788
1789int ram_write_tracking_start(void)
1790{
1791 assert(0);
1792 return -1;
1793}
1794
1795void ram_write_tracking_stop(void)
1796{
1797 assert(0);
1798}
1799#endif /* defined(__linux__) */
1800
3d0684b2 1801/**
ff1543af 1802 * get_queued_page: unqueue a page from the postcopy requests
3d0684b2
JQ
1803 *
1804 * Skips pages that are already sent (!dirty)
a82d593b 1805 *
a5f7b1a6 1806 * Returns true if a queued page is found
a82d593b 1807 *
6f37bb8b 1808 * @rs: current RAM state
3d0684b2 1809 * @pss: data about the state of the current dirty page scan
a82d593b 1810 */
f20e2865 1811static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
a82d593b
DDAG
1812{
1813 RAMBlock *block;
1814 ram_addr_t offset;
777f53c7
TH
1815 bool dirty;
1816
1817 do {
1818 block = unqueue_page(rs, &offset);
1819 /*
1820 * We're sending this page, and since it's postcopy nothing else
1821 * will dirty it, and we must make sure it doesn't get sent again
1822 * even if this queue request was received after the background
1823 * search already sent it.
1824 */
1825 if (block) {
1826 unsigned long page;
1827
1828 page = offset >> TARGET_PAGE_BITS;
1829 dirty = test_bit(page, block->bmap);
1830 if (!dirty) {
1831 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
1832 page);
1833 } else {
1834 trace_get_queued_page(block->idstr, (uint64_t)offset, page);
1835 }
1836 }
a82d593b 1837
777f53c7 1838 } while (block && !dirty);
a82d593b 1839
b062106d 1840 if (!block) {
278e2f55
AG
1841 /*
1842 * Poll write faults too if background snapshot is enabled; that's
1843 * when we have vcpus got blocked by the write protected pages.
1844 */
1845 block = poll_fault_page(rs, &offset);
1846 }
1847
a82d593b 1848 if (block) {
a82d593b
DDAG
1849 /*
1850 * We want the background search to continue from the queued page
1851 * since the guest is likely to want other pages near to the page
1852 * it just requested.
1853 */
1854 pss->block = block;
a935e30f 1855 pss->page = offset >> TARGET_PAGE_BITS;
422314e7
WY
1856
1857 /*
1858 * This unqueued page would break the "one round" check, even is
1859 * really rare.
1860 */
1861 pss->complete_round = false;
a82d593b
DDAG
1862 }
1863
1864 return !!block;
1865}
1866
6c595cde 1867/**
5e58f968
JQ
1868 * migration_page_queue_free: drop any remaining pages in the ram
1869 * request queue
6c595cde 1870 *
3d0684b2
JQ
1871 * It should be empty at the end anyway, but in error cases there may
1872 * be some left. in case that there is any page left, we drop it.
1873 *
6c595cde 1874 */
83c13382 1875static void migration_page_queue_free(RAMState *rs)
6c595cde 1876{
ec481c6c 1877 struct RAMSrcPageRequest *mspr, *next_mspr;
6c595cde
DDAG
1878 /* This queue generally should be empty - but in the case of a failed
1879 * migration might have some droppings in.
1880 */
89ac5a1d 1881 RCU_READ_LOCK_GUARD();
ec481c6c 1882 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
6c595cde 1883 memory_region_unref(mspr->rb->mr);
ec481c6c 1884 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
6c595cde
DDAG
1885 g_free(mspr);
1886 }
6c595cde
DDAG
1887}
1888
1889/**
3d0684b2
JQ
1890 * ram_save_queue_pages: queue the page for transmission
1891 *
1892 * A request from postcopy destination for example.
1893 *
1894 * Returns zero on success or negative on error
1895 *
3d0684b2
JQ
1896 * @rbname: Name of the RAMBLock of the request. NULL means the
1897 * same that last one.
1898 * @start: starting address from the start of the RAMBlock
1899 * @len: length (in bytes) to send
6c595cde 1900 */
7aa6070d
PX
1901int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len,
1902 Error **errp)
6c595cde
DDAG
1903{
1904 RAMBlock *ramblock;
53518d94 1905 RAMState *rs = ram_state;
6c595cde 1906
aff3f660 1907 stat64_add(&mig_stats.postcopy_requests, 1);
89ac5a1d
DDAG
1908 RCU_READ_LOCK_GUARD();
1909
6c595cde
DDAG
1910 if (!rbname) {
1911 /* Reuse last RAMBlock */
68a098f3 1912 ramblock = rs->last_req_rb;
6c595cde
DDAG
1913
1914 if (!ramblock) {
1915 /*
1916 * Shouldn't happen, we can't reuse the last RAMBlock if
1917 * it's the 1st request.
1918 */
7aa6070d 1919 error_setg(errp, "MIG_RP_MSG_REQ_PAGES has no previous block");
03acb4e9 1920 return -1;
6c595cde
DDAG
1921 }
1922 } else {
1923 ramblock = qemu_ram_block_by_name(rbname);
1924
1925 if (!ramblock) {
1926 /* We shouldn't be asked for a non-existent RAMBlock */
7aa6070d 1927 error_setg(errp, "MIG_RP_MSG_REQ_PAGES has no block '%s'", rbname);
03acb4e9 1928 return -1;
6c595cde 1929 }
68a098f3 1930 rs->last_req_rb = ramblock;
6c595cde
DDAG
1931 }
1932 trace_ram_save_queue_pages(ramblock->idstr, start, len);
542147f4 1933 if (!offset_in_ramblock(ramblock, start + len - 1)) {
7aa6070d
PX
1934 error_setg(errp, "MIG_RP_MSG_REQ_PAGES request overrun, "
1935 "start=" RAM_ADDR_FMT " len="
1936 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
1937 start, len, ramblock->used_length);
03acb4e9 1938 return -1;
6c595cde
DDAG
1939 }
1940
93589827
PX
1941 /*
1942 * When with postcopy preempt, we send back the page directly in the
1943 * rp-return thread.
1944 */
1945 if (postcopy_preempt_active()) {
1946 ram_addr_t page_start = start >> TARGET_PAGE_BITS;
1947 size_t page_size = qemu_ram_pagesize(ramblock);
1948 PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_POSTCOPY];
1949 int ret = 0;
1950
1951 qemu_mutex_lock(&rs->bitmap_mutex);
1952
1953 pss_init(pss, ramblock, page_start);
1954 /*
1955 * Always use the preempt channel, and make sure it's there. It's
1956 * safe to access without lock, because when rp-thread is running
1957 * we should be the only one who operates on the qemufile
1958 */
1959 pss->pss_channel = migrate_get_current()->postcopy_qemufile_src;
93589827
PX
1960 assert(pss->pss_channel);
1961
1962 /*
1963 * It must be either one or multiple of host page size. Just
1964 * assert; if something wrong we're mostly split brain anyway.
1965 */
1966 assert(len % page_size == 0);
1967 while (len) {
1968 if (ram_save_host_page_urgent(pss)) {
7aa6070d
PX
1969 error_setg(errp, "ram_save_host_page_urgent() failed: "
1970 "ramblock=%s, start_addr=0x"RAM_ADDR_FMT,
1971 ramblock->idstr, start);
93589827
PX
1972 ret = -1;
1973 break;
1974 }
1975 /*
1976 * NOTE: after ram_save_host_page_urgent() succeeded, pss->page
1977 * will automatically be moved and point to the next host page
1978 * we're going to send, so no need to update here.
1979 *
1980 * Normally QEMU never sends >1 host page in requests, so
1981 * logically we don't even need that as the loop should only
1982 * run once, but just to be consistent.
1983 */
1984 len -= page_size;
1985 };
1986 qemu_mutex_unlock(&rs->bitmap_mutex);
1987
1988 return ret;
1989 }
1990
ec481c6c 1991 struct RAMSrcPageRequest *new_entry =
b21e2380 1992 g_new0(struct RAMSrcPageRequest, 1);
6c595cde
DDAG
1993 new_entry->rb = ramblock;
1994 new_entry->offset = start;
1995 new_entry->len = len;
1996
1997 memory_region_ref(ramblock->mr);
ec481c6c
JQ
1998 qemu_mutex_lock(&rs->src_page_req_mutex);
1999 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
e03a34f8 2000 migration_make_urgent_request();
ec481c6c 2001 qemu_mutex_unlock(&rs->src_page_req_mutex);
6c595cde
DDAG
2002
2003 return 0;
6c595cde
DDAG
2004}
2005
5e5fdcff
XG
2006/*
2007 * try to compress the page before posting it out, return true if the page
2008 * has been properly handled by compression, otherwise needs other
2009 * paths to handle it
2010 */
ec6f3ab9 2011static bool save_compress_page(RAMState *rs, PageSearchStatus *pss,
d637a182 2012 ram_addr_t offset)
5e5fdcff 2013{
4e400f90 2014 if (!migrate_compress()) {
5e5fdcff
XG
2015 return false;
2016 }
2017
2018 /*
2019 * When starting the process of a new block, the first page of
2020 * the block should be sent out before other pages in the same
2021 * block, and all the pages in last block should have been sent
2022 * out, keeping this order is important, because the 'cont' flag
2023 * is used to avoid resending the block name.
2024 *
2025 * We post the fist page as normal page as compression will take
2026 * much CPU resource.
2027 */
d637a182 2028 if (pss->block != pss->last_sent_block) {
8020bc9a 2029 compress_flush_data();
5e5fdcff
XG
2030 return false;
2031 }
2032
250b1d7e 2033 return compress_page_with_multi_thread(pss->block, offset,
742ec5f3 2034 compress_send_queued_data);
5e5fdcff
XG
2035}
2036
a82d593b 2037/**
4010ba38 2038 * ram_save_target_page_legacy: save one target page
a82d593b 2039 *
3d0684b2 2040 * Returns the number of pages written
a82d593b 2041 *
6f37bb8b 2042 * @rs: current RAM state
3d0684b2 2043 * @pss: data about the page we want to send
a82d593b 2044 */
4010ba38 2045static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss)
a82d593b 2046{
a8ec91f9 2047 RAMBlock *block = pss->block;
8bba004c 2048 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
a8ec91f9
XG
2049 int res;
2050
944853c2 2051 if (control_save_page(pss, offset, &res)) {
a8ec91f9
XG
2052 return res;
2053 }
2054
d637a182 2055 if (save_compress_page(rs, pss, offset)) {
5e5fdcff 2056 return 1;
d7400a34
XG
2057 }
2058
e8e4e7ac 2059 if (save_zero_page(rs, pss, offset)) {
8697eb85 2060 return 1;
d7400a34
XG
2061 }
2062
da3f56cb 2063 /*
6f39c90b
PX
2064 * Do not use multifd in postcopy as one whole host page should be
2065 * placed. Meanwhile postcopy requires atomic update of pages, so even
2066 * if host page size == guest page size the dest guest during run may
2067 * still see partially copied pages which is data corruption.
da3f56cb 2068 */
51b07548 2069 if (migrate_multifd() && !migration_in_postcopy()) {
61717ea9 2070 return ram_save_multifd_page(pss->pss_channel, block, offset);
a82d593b
DDAG
2071 }
2072
05931ec5 2073 return ram_save_page(rs, pss);
a82d593b
DDAG
2074}
2075
d9e474ea
PX
2076/* Should be called before sending a host page */
2077static void pss_host_page_prepare(PageSearchStatus *pss)
2078{
2079 /* How many guest pages are there in one host page? */
2080 size_t guest_pfns = qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
2081
2082 pss->host_page_sending = true;
301d7ffe
PX
2083 if (guest_pfns <= 1) {
2084 /*
2085 * This covers both when guest psize == host psize, or when guest
2086 * has larger psize than the host (guest_pfns==0).
2087 *
2088 * For the latter, we always send one whole guest page per
2089 * iteration of the host page (example: an Alpha VM on x86 host
2090 * will have guest psize 8K while host psize 4K).
2091 */
2092 pss->host_page_start = pss->page;
2093 pss->host_page_end = pss->page + 1;
2094 } else {
2095 /*
2096 * The host page spans over multiple guest pages, we send them
2097 * within the same host page iteration.
2098 */
2099 pss->host_page_start = ROUND_DOWN(pss->page, guest_pfns);
2100 pss->host_page_end = ROUND_UP(pss->page + 1, guest_pfns);
2101 }
d9e474ea
PX
2102}
2103
2104/*
2105 * Whether the page pointed by PSS is within the host page being sent.
2106 * Must be called after a previous pss_host_page_prepare().
2107 */
2108static bool pss_within_range(PageSearchStatus *pss)
2109{
2110 ram_addr_t ram_addr;
2111
2112 assert(pss->host_page_sending);
2113
2114 /* Over host-page boundary? */
2115 if (pss->page >= pss->host_page_end) {
2116 return false;
2117 }
2118
2119 ram_addr = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
2120
2121 return offset_in_ramblock(pss->block, ram_addr);
2122}
2123
2124static void pss_host_page_finish(PageSearchStatus *pss)
2125{
2126 pss->host_page_sending = false;
2127 /* This is not needed, but just to reset it */
2128 pss->host_page_start = pss->host_page_end = 0;
2129}
2130
93589827
PX
2131/*
2132 * Send an urgent host page specified by `pss'. Need to be called with
2133 * bitmap_mutex held.
2134 *
2135 * Returns 0 if save host page succeeded, false otherwise.
2136 */
2137static int ram_save_host_page_urgent(PageSearchStatus *pss)
2138{
2139 bool page_dirty, sent = false;
2140 RAMState *rs = ram_state;
2141 int ret = 0;
2142
2143 trace_postcopy_preempt_send_host_page(pss->block->idstr, pss->page);
2144 pss_host_page_prepare(pss);
2145
2146 /*
2147 * If precopy is sending the same page, let it be done in precopy, or
2148 * we could send the same page in two channels and none of them will
2149 * receive the whole page.
2150 */
2151 if (pss_overlap(pss, &ram_state->pss[RAM_CHANNEL_PRECOPY])) {
2152 trace_postcopy_preempt_hit(pss->block->idstr,
2153 pss->page << TARGET_PAGE_BITS);
2154 return 0;
2155 }
2156
2157 do {
2158 page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page);
2159
2160 if (page_dirty) {
2161 /* Be strict to return code; it must be 1, or what else? */
4010ba38 2162 if (migration_ops->ram_save_target_page(rs, pss) != 1) {
93589827
PX
2163 error_report_once("%s: ram_save_target_page failed", __func__);
2164 ret = -1;
2165 goto out;
2166 }
2167 sent = true;
2168 }
2169 pss_find_next_dirty(pss);
2170 } while (pss_within_range(pss));
2171out:
2172 pss_host_page_finish(pss);
2173 /* For urgent requests, flush immediately if sent */
2174 if (sent) {
2175 qemu_fflush(pss->pss_channel);
2176 }
2177 return ret;
2178}
2179
a82d593b 2180/**
3d0684b2 2181 * ram_save_host_page: save a whole host page
a82d593b 2182 *
3d0684b2
JQ
2183 * Starting at *offset send pages up to the end of the current host
2184 * page. It's valid for the initial offset to point into the middle of
2185 * a host page in which case the remainder of the hostpage is sent.
2186 * Only dirty target pages are sent. Note that the host page size may
2187 * be a huge page for this block.
f3321554 2188 *
1eb3fc0a
DDAG
2189 * The saving stops at the boundary of the used_length of the block
2190 * if the RAMBlock isn't a multiple of the host page size.
a82d593b 2191 *
f3321554
PX
2192 * The caller must be with ram_state.bitmap_mutex held to call this
2193 * function. Note that this function can temporarily release the lock, but
2194 * when the function is returned it'll make sure the lock is still held.
2195 *
3d0684b2
JQ
2196 * Returns the number of pages written or negative on error
2197 *
6f37bb8b 2198 * @rs: current RAM state
3d0684b2 2199 * @pss: data about the page we want to send
a82d593b 2200 */
05931ec5 2201static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss)
a82d593b 2202{
f3321554 2203 bool page_dirty, preempt_active = postcopy_preempt_active();
a82d593b 2204 int tmppages, pages = 0;
a935e30f
JQ
2205 size_t pagesize_bits =
2206 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
278e2f55
AG
2207 unsigned long start_page = pss->page;
2208 int res;
4c011c37 2209
f161c88a 2210 if (migrate_ram_is_ignored(pss->block)) {
b895de50
CLG
2211 error_report("block %s should not be migrated !", pss->block->idstr);
2212 return 0;
2213 }
2214
d9e474ea
PX
2215 /* Update host page boundary information */
2216 pss_host_page_prepare(pss);
2217
a82d593b 2218 do {
f3321554 2219 page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page);
a82d593b 2220
f3321554
PX
2221 /* Check the pages is dirty and if it is send it */
2222 if (page_dirty) {
ba1b7c81 2223 /*
f3321554
PX
2224 * Properly yield the lock only in postcopy preempt mode
2225 * because both migration thread and rp-return thread can
2226 * operate on the bitmaps.
ba1b7c81 2227 */
f3321554
PX
2228 if (preempt_active) {
2229 qemu_mutex_unlock(&rs->bitmap_mutex);
ba1b7c81 2230 }
4010ba38 2231 tmppages = migration_ops->ram_save_target_page(rs, pss);
f3321554
PX
2232 if (tmppages >= 0) {
2233 pages += tmppages;
2234 /*
2235 * Allow rate limiting to happen in the middle of huge pages if
2236 * something is sent in the current iteration.
2237 */
2238 if (pagesize_bits > 1 && tmppages > 0) {
2239 migration_rate_limit();
2240 }
2241 }
2242 if (preempt_active) {
2243 qemu_mutex_lock(&rs->bitmap_mutex);
2244 }
2245 } else {
2246 tmppages = 0;
23feba90 2247 }
f3321554
PX
2248
2249 if (tmppages < 0) {
d9e474ea 2250 pss_host_page_finish(pss);
f3321554
PX
2251 return tmppages;
2252 }
2253
d9e474ea
PX
2254 pss_find_next_dirty(pss);
2255 } while (pss_within_range(pss));
2256
2257 pss_host_page_finish(pss);
278e2f55
AG
2258
2259 res = ram_save_release_protection(rs, pss, start_page);
2260 return (res < 0 ? res : pages);
a82d593b 2261}
6c595cde 2262
56e93d26 2263/**
3d0684b2 2264 * ram_find_and_save_block: finds a dirty page and sends it to f
56e93d26
JQ
2265 *
2266 * Called within an RCU critical section.
2267 *
e8f3735f
XG
2268 * Returns the number of pages written where zero means no dirty pages,
2269 * or negative on error
56e93d26 2270 *
6f37bb8b 2271 * @rs: current RAM state
a82d593b
DDAG
2272 *
2273 * On systems where host-page-size > target-page-size it will send all the
2274 * pages in a host page that are dirty.
56e93d26 2275 */
05931ec5 2276static int ram_find_and_save_block(RAMState *rs)
56e93d26 2277{
f1668764 2278 PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY];
56e93d26 2279 int pages = 0;
56e93d26 2280
0827b9e9 2281 /* No dirty page as there is zero RAM */
8d80e195 2282 if (!rs->ram_bytes_total) {
0827b9e9
AA
2283 return pages;
2284 }
2285
4934a5dd
PX
2286 /*
2287 * Always keep last_seen_block/last_page valid during this procedure,
2288 * because find_dirty_block() relies on these values (e.g., we compare
2289 * last_seen_block with pss.block to see whether we searched all the
2290 * ramblocks) to detect the completion of migration. Having NULL value
2291 * of last_seen_block can conditionally cause below loop to run forever.
2292 */
2293 if (!rs->last_seen_block) {
2294 rs->last_seen_block = QLIST_FIRST_RCU(&ram_list.blocks);
2295 rs->last_page = 0;
2296 }
2297
f1668764 2298 pss_init(pss, rs->last_seen_block, rs->last_page);
b8fb8cb7 2299
31e2ac74 2300 while (true){
51efd36f 2301 if (!get_queued_page(rs, pss)) {
b062106d 2302 /* priority queue empty, so just search for something dirty */
31e2ac74
JQ
2303 int res = find_dirty_block(rs, pss);
2304 if (res != PAGE_DIRTY_FOUND) {
2305 if (res == PAGE_ALL_CLEAN) {
51efd36f 2306 break;
31e2ac74
JQ
2307 } else if (res == PAGE_TRY_AGAIN) {
2308 continue;
294e5a40
JQ
2309 } else if (res < 0) {
2310 pages = res;
2311 break;
51efd36f
JQ
2312 }
2313 }
56e93d26 2314 }
51efd36f 2315 pages = ram_save_host_page(rs, pss);
31e2ac74
JQ
2316 if (pages) {
2317 break;
2318 }
2319 }
56e93d26 2320
f1668764
PX
2321 rs->last_seen_block = pss->block;
2322 rs->last_page = pss->page;
56e93d26
JQ
2323
2324 return pages;
2325}
2326
8008a272 2327static uint64_t ram_bytes_total_with_ignored(void)
56e93d26
JQ
2328{
2329 RAMBlock *block;
2330 uint64_t total = 0;
2331
89ac5a1d
DDAG
2332 RCU_READ_LOCK_GUARD();
2333
8008a272
JQ
2334 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2335 total += block->used_length;
99e15582 2336 }
56e93d26
JQ
2337 return total;
2338}
2339
fbd162e6
YK
2340uint64_t ram_bytes_total(void)
2341{
8008a272
JQ
2342 RAMBlock *block;
2343 uint64_t total = 0;
2344
2345 RCU_READ_LOCK_GUARD();
2346
2347 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2348 total += block->used_length;
2349 }
2350 return total;
fbd162e6
YK
2351}
2352
f265e0e4 2353static void xbzrle_load_setup(void)
56e93d26 2354{
f265e0e4 2355 XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
56e93d26
JQ
2356}
2357
f265e0e4
JQ
2358static void xbzrle_load_cleanup(void)
2359{
2360 g_free(XBZRLE.decoded_buf);
2361 XBZRLE.decoded_buf = NULL;
2362}
2363
7d7c96be
PX
2364static void ram_state_cleanup(RAMState **rsp)
2365{
b9ccaf6d
DDAG
2366 if (*rsp) {
2367 migration_page_queue_free(*rsp);
2368 qemu_mutex_destroy(&(*rsp)->bitmap_mutex);
2369 qemu_mutex_destroy(&(*rsp)->src_page_req_mutex);
2370 g_free(*rsp);
2371 *rsp = NULL;
2372 }
7d7c96be
PX
2373}
2374
84593a08
PX
2375static void xbzrle_cleanup(void)
2376{
2377 XBZRLE_cache_lock();
2378 if (XBZRLE.cache) {
2379 cache_fini(XBZRLE.cache);
2380 g_free(XBZRLE.encoded_buf);
2381 g_free(XBZRLE.current_buf);
2382 g_free(XBZRLE.zero_target_page);
2383 XBZRLE.cache = NULL;
2384 XBZRLE.encoded_buf = NULL;
2385 XBZRLE.current_buf = NULL;
2386 XBZRLE.zero_target_page = NULL;
2387 }
2388 XBZRLE_cache_unlock();
2389}
2390
f265e0e4 2391static void ram_save_cleanup(void *opaque)
56e93d26 2392{
53518d94 2393 RAMState **rsp = opaque;
6b6712ef 2394 RAMBlock *block;
eb859c53 2395
278e2f55
AG
2396 /* We don't use dirty log with background snapshots */
2397 if (!migrate_background_snapshot()) {
2398 /* caller have hold iothread lock or is in a bh, so there is
2399 * no writing race against the migration bitmap
2400 */
63b41db4
HH
2401 if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) {
2402 /*
2403 * do not stop dirty log without starting it, since
2404 * memory_global_dirty_log_stop will assert that
2405 * memory_global_dirty_log_start/stop used in pairs
2406 */
2407 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION);
2408 }
278e2f55 2409 }
6b6712ef 2410
fbd162e6 2411 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
002cad6b
PX
2412 g_free(block->clear_bmap);
2413 block->clear_bmap = NULL;
6b6712ef
JQ
2414 g_free(block->bmap);
2415 block->bmap = NULL;
56e93d26
JQ
2416 }
2417
84593a08 2418 xbzrle_cleanup();
f0afa331 2419 compress_threads_save_cleanup();
7d7c96be 2420 ram_state_cleanup(rsp);
4010ba38
JQ
2421 g_free(migration_ops);
2422 migration_ops = NULL;
56e93d26
JQ
2423}
2424
6f37bb8b 2425static void ram_state_reset(RAMState *rs)
56e93d26 2426{
ec6f3ab9
PX
2427 int i;
2428
2429 for (i = 0; i < RAM_CHANNEL_MAX; i++) {
2430 rs->pss[i].last_sent_block = NULL;
2431 }
2432
6f37bb8b 2433 rs->last_seen_block = NULL;
269ace29 2434 rs->last_page = 0;
6f37bb8b 2435 rs->last_version = ram_list.version;
f3095cc8 2436 rs->xbzrle_started = false;
56e93d26
JQ
2437}
2438
2439#define MAX_WAIT 50 /* ms, half buffered_file limit */
2440
e0b266f0
DDAG
2441/* **** functions for postcopy ***** */
2442
ced1c616
PB
2443void ram_postcopy_migrated_memory_release(MigrationState *ms)
2444{
2445 struct RAMBlock *block;
ced1c616 2446
fbd162e6 2447 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
6b6712ef
JQ
2448 unsigned long *bitmap = block->bmap;
2449 unsigned long range = block->used_length >> TARGET_PAGE_BITS;
2450 unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
ced1c616
PB
2451
2452 while (run_start < range) {
2453 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
8bba004c
AR
2454 ram_discard_range(block->idstr,
2455 ((ram_addr_t)run_start) << TARGET_PAGE_BITS,
2456 ((ram_addr_t)(run_end - run_start))
2457 << TARGET_PAGE_BITS);
ced1c616
PB
2458 run_start = find_next_zero_bit(bitmap, range, run_end + 1);
2459 }
2460 }
2461}
2462
3d0684b2
JQ
2463/**
2464 * postcopy_send_discard_bm_ram: discard a RAMBlock
2465 *
e0b266f0 2466 * Callback from postcopy_each_ram_send_discard for each RAMBlock
3d0684b2
JQ
2467 *
2468 * @ms: current migration state
89dab31b 2469 * @block: RAMBlock to discard
e0b266f0 2470 */
9e7d1223 2471static void postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block)
e0b266f0 2472{
6b6712ef 2473 unsigned long end = block->used_length >> TARGET_PAGE_BITS;
e0b266f0 2474 unsigned long current;
1e7cf8c3 2475 unsigned long *bitmap = block->bmap;
e0b266f0 2476
6b6712ef 2477 for (current = 0; current < end; ) {
1e7cf8c3 2478 unsigned long one = find_next_bit(bitmap, end, current);
33a5cb62 2479 unsigned long zero, discard_length;
e0b266f0 2480
33a5cb62
WY
2481 if (one >= end) {
2482 break;
2483 }
e0b266f0 2484
1e7cf8c3 2485 zero = find_next_zero_bit(bitmap, end, one + 1);
33a5cb62
WY
2486
2487 if (zero >= end) {
2488 discard_length = end - one;
e0b266f0 2489 } else {
33a5cb62
WY
2490 discard_length = zero - one;
2491 }
810cf2bb 2492 postcopy_discard_send_range(ms, one, discard_length);
33a5cb62 2493 current = one + discard_length;
e0b266f0 2494 }
e0b266f0
DDAG
2495}
2496
f30c2e5b
PX
2497static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block);
2498
3d0684b2
JQ
2499/**
2500 * postcopy_each_ram_send_discard: discard all RAMBlocks
2501 *
e0b266f0
DDAG
2502 * Utility for the outgoing postcopy code.
2503 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2504 * passing it bitmap indexes and name.
e0b266f0
DDAG
2505 * (qemu_ram_foreach_block ends up passing unscaled lengths
2506 * which would mean postcopy code would have to deal with target page)
3d0684b2
JQ
2507 *
2508 * @ms: current migration state
e0b266f0 2509 */
739fcc1b 2510static void postcopy_each_ram_send_discard(MigrationState *ms)
e0b266f0
DDAG
2511{
2512 struct RAMBlock *block;
e0b266f0 2513
fbd162e6 2514 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
810cf2bb 2515 postcopy_discard_send_init(ms, block->idstr);
e0b266f0 2516
f30c2e5b
PX
2517 /*
2518 * Deal with TPS != HPS and huge pages. It discard any partially sent
2519 * host-page size chunks, mark any partially dirty host-page size
2520 * chunks as all dirty. In this case the host-page is the host-page
2521 * for the particular RAMBlock, i.e. it might be a huge page.
2522 */
2523 postcopy_chunk_hostpages_pass(ms, block);
2524
e0b266f0
DDAG
2525 /*
2526 * Postcopy sends chunks of bitmap over the wire, but it
2527 * just needs indexes at this point, avoids it having
2528 * target page specific code.
2529 */
739fcc1b 2530 postcopy_send_discard_bm_ram(ms, block);
810cf2bb 2531 postcopy_discard_send_finish(ms);
e0b266f0 2532 }
e0b266f0
DDAG
2533}
2534
3d0684b2 2535/**
8324ef86 2536 * postcopy_chunk_hostpages_pass: canonicalize bitmap in hostpages
3d0684b2
JQ
2537 *
2538 * Helper for postcopy_chunk_hostpages; it's called twice to
2539 * canonicalize the two bitmaps, that are similar, but one is
2540 * inverted.
99e314eb 2541 *
3d0684b2
JQ
2542 * Postcopy requires that all target pages in a hostpage are dirty or
2543 * clean, not a mix. This function canonicalizes the bitmaps.
99e314eb 2544 *
3d0684b2 2545 * @ms: current migration state
3d0684b2 2546 * @block: block that contains the page we want to canonicalize
99e314eb 2547 */
1e7cf8c3 2548static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block)
99e314eb 2549{
53518d94 2550 RAMState *rs = ram_state;
6b6712ef 2551 unsigned long *bitmap = block->bmap;
29c59172 2552 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
6b6712ef 2553 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
99e314eb
DDAG
2554 unsigned long run_start;
2555
29c59172
DDAG
2556 if (block->page_size == TARGET_PAGE_SIZE) {
2557 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2558 return;
2559 }
2560
1e7cf8c3
WY
2561 /* Find a dirty page */
2562 run_start = find_next_bit(bitmap, pages, 0);
99e314eb 2563
6b6712ef 2564 while (run_start < pages) {
99e314eb
DDAG
2565
2566 /*
2567 * If the start of this run of pages is in the middle of a host
2568 * page, then we need to fixup this host page.
2569 */
9dec3cc3 2570 if (QEMU_IS_ALIGNED(run_start, host_ratio)) {
99e314eb 2571 /* Find the end of this run */
1e7cf8c3 2572 run_start = find_next_zero_bit(bitmap, pages, run_start + 1);
99e314eb
DDAG
2573 /*
2574 * If the end isn't at the start of a host page, then the
2575 * run doesn't finish at the end of a host page
2576 * and we need to discard.
2577 */
99e314eb
DDAG
2578 }
2579
9dec3cc3 2580 if (!QEMU_IS_ALIGNED(run_start, host_ratio)) {
99e314eb 2581 unsigned long page;
dad45ab2
WY
2582 unsigned long fixup_start_addr = QEMU_ALIGN_DOWN(run_start,
2583 host_ratio);
2584 run_start = QEMU_ALIGN_UP(run_start, host_ratio);
99e314eb 2585
99e314eb
DDAG
2586 /* Clean up the bitmap */
2587 for (page = fixup_start_addr;
2588 page < fixup_start_addr + host_ratio; page++) {
99e314eb
DDAG
2589 /*
2590 * Remark them as dirty, updating the count for any pages
2591 * that weren't previously dirty.
2592 */
0d8ec885 2593 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
99e314eb
DDAG
2594 }
2595 }
2596
1e7cf8c3
WY
2597 /* Find the next dirty page for the next iteration */
2598 run_start = find_next_bit(bitmap, pages, run_start);
99e314eb
DDAG
2599 }
2600}
2601
3d0684b2
JQ
2602/**
2603 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
2604 *
e0b266f0
DDAG
2605 * Transmit the set of pages to be discarded after precopy to the target
2606 * these are pages that:
2607 * a) Have been previously transmitted but are now dirty again
2608 * b) Pages that have never been transmitted, this ensures that
2609 * any pages on the destination that have been mapped by background
2610 * tasks get discarded (transparent huge pages is the specific concern)
2611 * Hopefully this is pretty sparse
3d0684b2
JQ
2612 *
2613 * @ms: current migration state
e0b266f0 2614 */
739fcc1b 2615void ram_postcopy_send_discard_bitmap(MigrationState *ms)
e0b266f0 2616{
53518d94 2617 RAMState *rs = ram_state;
e0b266f0 2618
89ac5a1d 2619 RCU_READ_LOCK_GUARD();
e0b266f0
DDAG
2620
2621 /* This should be our last sync, the src is now paused */
1e493be5 2622 migration_bitmap_sync(rs, false);
e0b266f0 2623
6b6712ef 2624 /* Easiest way to make sure we don't resume in the middle of a host-page */
ec6f3ab9 2625 rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL;
6b6712ef 2626 rs->last_seen_block = NULL;
6b6712ef 2627 rs->last_page = 0;
e0b266f0 2628
739fcc1b 2629 postcopy_each_ram_send_discard(ms);
e0b266f0 2630
739fcc1b 2631 trace_ram_postcopy_send_discard_bitmap();
e0b266f0
DDAG
2632}
2633
3d0684b2
JQ
2634/**
2635 * ram_discard_range: discard dirtied pages at the beginning of postcopy
e0b266f0 2636 *
3d0684b2 2637 * Returns zero on success
e0b266f0 2638 *
36449157
JQ
2639 * @rbname: name of the RAMBlock of the request. NULL means the
2640 * same that last one.
3d0684b2
JQ
2641 * @start: RAMBlock starting page
2642 * @length: RAMBlock size
e0b266f0 2643 */
aaa2064c 2644int ram_discard_range(const char *rbname, uint64_t start, size_t length)
e0b266f0 2645{
36449157 2646 trace_ram_discard_range(rbname, start, length);
d3a5038c 2647
89ac5a1d 2648 RCU_READ_LOCK_GUARD();
36449157 2649 RAMBlock *rb = qemu_ram_block_by_name(rbname);
e0b266f0
DDAG
2650
2651 if (!rb) {
36449157 2652 error_report("ram_discard_range: Failed to find block '%s'", rbname);
03acb4e9 2653 return -1;
e0b266f0
DDAG
2654 }
2655
814bb08f
PX
2656 /*
2657 * On source VM, we don't need to update the received bitmap since
2658 * we don't even have one.
2659 */
2660 if (rb->receivedmap) {
2661 bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(),
2662 length >> qemu_target_page_bits());
2663 }
2664
03acb4e9 2665 return ram_block_discard_range(rb, start, length);
e0b266f0
DDAG
2666}
2667
84593a08
PX
2668/*
2669 * For every allocation, we will try not to crash the VM if the
2670 * allocation failed.
2671 */
2672static int xbzrle_init(void)
2673{
2674 Error *local_err = NULL;
2675
87dca0c9 2676 if (!migrate_xbzrle()) {
84593a08
PX
2677 return 0;
2678 }
2679
2680 XBZRLE_cache_lock();
2681
2682 XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
2683 if (!XBZRLE.zero_target_page) {
2684 error_report("%s: Error allocating zero page", __func__);
2685 goto err_out;
2686 }
2687
2688 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
2689 TARGET_PAGE_SIZE, &local_err);
2690 if (!XBZRLE.cache) {
2691 error_report_err(local_err);
2692 goto free_zero_page;
2693 }
2694
2695 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
2696 if (!XBZRLE.encoded_buf) {
2697 error_report("%s: Error allocating encoded_buf", __func__);
2698 goto free_cache;
2699 }
2700
2701 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
2702 if (!XBZRLE.current_buf) {
2703 error_report("%s: Error allocating current_buf", __func__);
2704 goto free_encoded_buf;
2705 }
2706
2707 /* We are all good */
2708 XBZRLE_cache_unlock();
2709 return 0;
2710
2711free_encoded_buf:
2712 g_free(XBZRLE.encoded_buf);
2713 XBZRLE.encoded_buf = NULL;
2714free_cache:
2715 cache_fini(XBZRLE.cache);
2716 XBZRLE.cache = NULL;
2717free_zero_page:
2718 g_free(XBZRLE.zero_target_page);
2719 XBZRLE.zero_target_page = NULL;
2720err_out:
2721 XBZRLE_cache_unlock();
2722 return -ENOMEM;
2723}
2724
53518d94 2725static int ram_state_init(RAMState **rsp)
56e93d26 2726{
7d00ee6a
PX
2727 *rsp = g_try_new0(RAMState, 1);
2728
2729 if (!*rsp) {
2730 error_report("%s: Init ramstate fail", __func__);
2731 return -1;
2732 }
53518d94
JQ
2733
2734 qemu_mutex_init(&(*rsp)->bitmap_mutex);
2735 qemu_mutex_init(&(*rsp)->src_page_req_mutex);
2736 QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
8d80e195 2737 (*rsp)->ram_bytes_total = ram_bytes_total();
56e93d26 2738
7d00ee6a 2739 /*
40c4d4a8
IR
2740 * Count the total number of pages used by ram blocks not including any
2741 * gaps due to alignment or unplugs.
03158519 2742 * This must match with the initial values of dirty bitmap.
7d00ee6a 2743 */
8d80e195 2744 (*rsp)->migration_dirty_pages = (*rsp)->ram_bytes_total >> TARGET_PAGE_BITS;
7d00ee6a
PX
2745 ram_state_reset(*rsp);
2746
2747 return 0;
2748}
2749
d6eff5d7 2750static void ram_list_init_bitmaps(void)
7d00ee6a 2751{
002cad6b 2752 MigrationState *ms = migrate_get_current();
d6eff5d7
PX
2753 RAMBlock *block;
2754 unsigned long pages;
002cad6b 2755 uint8_t shift;
56e93d26 2756
0827b9e9
AA
2757 /* Skip setting bitmap if there is no RAM */
2758 if (ram_bytes_total()) {
002cad6b
PX
2759 shift = ms->clear_bitmap_shift;
2760 if (shift > CLEAR_BITMAP_SHIFT_MAX) {
2761 error_report("clear_bitmap_shift (%u) too big, using "
2762 "max value (%u)", shift, CLEAR_BITMAP_SHIFT_MAX);
2763 shift = CLEAR_BITMAP_SHIFT_MAX;
2764 } else if (shift < CLEAR_BITMAP_SHIFT_MIN) {
2765 error_report("clear_bitmap_shift (%u) too small, using "
2766 "min value (%u)", shift, CLEAR_BITMAP_SHIFT_MIN);
2767 shift = CLEAR_BITMAP_SHIFT_MIN;
2768 }
2769
fbd162e6 2770 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
d6eff5d7 2771 pages = block->max_length >> TARGET_PAGE_BITS;
03158519
WY
2772 /*
2773 * The initial dirty bitmap for migration must be set with all
2774 * ones to make sure we'll migrate every guest RAM page to
2775 * destination.
40c4d4a8
IR
2776 * Here we set RAMBlock.bmap all to 1 because when rebegin a
2777 * new migration after a failed migration, ram_list.
2778 * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole
2779 * guest memory.
03158519 2780 */
6b6712ef 2781 block->bmap = bitmap_new(pages);
40c4d4a8 2782 bitmap_set(block->bmap, 0, pages);
002cad6b
PX
2783 block->clear_bmap_shift = shift;
2784 block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift));
0827b9e9 2785 }
f3f491fc 2786 }
d6eff5d7
PX
2787}
2788
be39b4cd
DH
2789static void migration_bitmap_clear_discarded_pages(RAMState *rs)
2790{
2791 unsigned long pages;
2792 RAMBlock *rb;
2793
2794 RCU_READ_LOCK_GUARD();
2795
2796 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
2797 pages = ramblock_dirty_bitmap_clear_discarded_pages(rb);
2798 rs->migration_dirty_pages -= pages;
2799 }
2800}
2801
d6eff5d7
PX
2802static void ram_init_bitmaps(RAMState *rs)
2803{
d6eff5d7 2804 qemu_mutex_lock_ramlist();
f3f491fc 2805
89ac5a1d
DDAG
2806 WITH_RCU_READ_LOCK_GUARD() {
2807 ram_list_init_bitmaps();
278e2f55
AG
2808 /* We don't use dirty log with background snapshots */
2809 if (!migrate_background_snapshot()) {
63b41db4 2810 memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION);
1e493be5 2811 migration_bitmap_sync_precopy(rs, false);
278e2f55 2812 }
89ac5a1d 2813 }
56e93d26 2814 qemu_mutex_unlock_ramlist();
be39b4cd
DH
2815
2816 /*
2817 * After an eventual first bitmap sync, fixup the initial bitmap
2818 * containing all 1s to exclude any discarded pages from migration.
2819 */
2820 migration_bitmap_clear_discarded_pages(rs);
d6eff5d7
PX
2821}
2822
2823static int ram_init_all(RAMState **rsp)
2824{
2825 if (ram_state_init(rsp)) {
2826 return -1;
2827 }
2828
2829 if (xbzrle_init()) {
2830 ram_state_cleanup(rsp);
2831 return -1;
2832 }
2833
2834 ram_init_bitmaps(*rsp);
a91246c9
HZ
2835
2836 return 0;
2837}
2838
08614f34
PX
2839static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
2840{
2841 RAMBlock *block;
2842 uint64_t pages = 0;
2843
2844 /*
2845 * Postcopy is not using xbzrle/compression, so no need for that.
2846 * Also, since source are already halted, we don't need to care
2847 * about dirty page logging as well.
2848 */
2849
fbd162e6 2850 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
08614f34
PX
2851 pages += bitmap_count_one(block->bmap,
2852 block->used_length >> TARGET_PAGE_BITS);
2853 }
2854
2855 /* This may not be aligned with current bitmaps. Recalculate. */
2856 rs->migration_dirty_pages = pages;
2857
1a373522 2858 ram_state_reset(rs);
08614f34
PX
2859
2860 /* Update RAMState cache of output QEMUFile */
7f401b80 2861 rs->pss[RAM_CHANNEL_PRECOPY].pss_channel = out;
08614f34
PX
2862
2863 trace_ram_state_resume_prepare(pages);
2864}
2865
6bcb05fc
WW
2866/*
2867 * This function clears bits of the free pages reported by the caller from the
2868 * migration dirty bitmap. @addr is the host address corresponding to the
2869 * start of the continuous guest free pages, and @len is the total bytes of
2870 * those pages.
2871 */
2872void qemu_guest_free_page_hint(void *addr, size_t len)
2873{
2874 RAMBlock *block;
2875 ram_addr_t offset;
2876 size_t used_len, start, npages;
2877 MigrationState *s = migrate_get_current();
2878
2879 /* This function is currently expected to be used during live migration */
2880 if (!migration_is_setup_or_active(s->state)) {
2881 return;
2882 }
2883
2884 for (; len > 0; len -= used_len, addr += used_len) {
2885 block = qemu_ram_block_from_host(addr, false, &offset);
2886 if (unlikely(!block || offset >= block->used_length)) {
2887 /*
2888 * The implementation might not support RAMBlock resize during
2889 * live migration, but it could happen in theory with future
2890 * updates. So we add a check here to capture that case.
2891 */
2892 error_report_once("%s unexpected error", __func__);
2893 return;
2894 }
2895
2896 if (len <= block->used_length - offset) {
2897 used_len = len;
2898 } else {
2899 used_len = block->used_length - offset;
2900 }
2901
2902 start = offset >> TARGET_PAGE_BITS;
2903 npages = used_len >> TARGET_PAGE_BITS;
2904
2905 qemu_mutex_lock(&ram_state->bitmap_mutex);
3143577d
WW
2906 /*
2907 * The skipped free pages are equavalent to be sent from clear_bmap's
2908 * perspective, so clear the bits from the memory region bitmap which
2909 * are initially set. Otherwise those skipped pages will be sent in
2910 * the next round after syncing from the memory region bitmap.
2911 */
1230a25f 2912 migration_clear_memory_region_dirty_bitmap_range(block, start, npages);
6bcb05fc
WW
2913 ram_state->migration_dirty_pages -=
2914 bitmap_count_one_with_offset(block->bmap, start, npages);
2915 bitmap_clear(block->bmap, start, npages);
2916 qemu_mutex_unlock(&ram_state->bitmap_mutex);
2917 }
2918}
2919
3d0684b2
JQ
2920/*
2921 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
a91246c9
HZ
2922 * long-running RCU critical section. When rcu-reclaims in the code
2923 * start to become numerous it will be necessary to reduce the
2924 * granularity of these critical sections.
2925 */
2926
3d0684b2
JQ
2927/**
2928 * ram_save_setup: Setup RAM for migration
2929 *
2930 * Returns zero to indicate success and negative for error
2931 *
2932 * @f: QEMUFile where to send the data
2933 * @opaque: RAMState pointer
2934 */
a91246c9
HZ
2935static int ram_save_setup(QEMUFile *f, void *opaque)
2936{
53518d94 2937 RAMState **rsp = opaque;
a91246c9 2938 RAMBlock *block;
33d70973 2939 int ret;
a91246c9 2940
dcaf446e
XG
2941 if (compress_threads_save_setup()) {
2942 return -1;
2943 }
2944
a91246c9
HZ
2945 /* migration has already setup the bitmap, reuse it. */
2946 if (!migration_in_colo_state()) {
7d00ee6a 2947 if (ram_init_all(rsp) != 0) {
dcaf446e 2948 compress_threads_save_cleanup();
a91246c9 2949 return -1;
53518d94 2950 }
a91246c9 2951 }
7f401b80 2952 (*rsp)->pss[RAM_CHANNEL_PRECOPY].pss_channel = f;
a91246c9 2953
0e6ebd48 2954 WITH_RCU_READ_LOCK_GUARD() {
8008a272
JQ
2955 qemu_put_be64(f, ram_bytes_total_with_ignored()
2956 | RAM_SAVE_FLAG_MEM_SIZE);
56e93d26 2957
0e6ebd48
DDAG
2958 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2959 qemu_put_byte(f, strlen(block->idstr));
2960 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
2961 qemu_put_be64(f, block->used_length);
2962 if (migrate_postcopy_ram() && block->page_size !=
2963 qemu_host_page_size) {
2964 qemu_put_be64(f, block->page_size);
2965 }
2966 if (migrate_ignore_shared()) {
2967 qemu_put_be64(f, block->mr->addr);
2968 }
fbd162e6 2969 }
56e93d26
JQ
2970 }
2971
b1b38387 2972 ret = rdma_registration_start(f, RAM_CONTROL_SETUP);
48408174
JQ
2973 if (ret < 0) {
2974 qemu_file_set_error(f, ret);
a2326705 2975 return ret;
48408174 2976 }
5f5b8858 2977
b1b38387 2978 ret = rdma_registration_stop(f, RAM_CONTROL_SETUP);
5f5b8858
JQ
2979 if (ret < 0) {
2980 qemu_file_set_error(f, ret);
a2326705 2981 return ret;
5f5b8858 2982 }
56e93d26 2983
4010ba38
JQ
2984 migration_ops = g_malloc0(sizeof(MigrationOps));
2985 migration_ops->ram_save_target_page = ram_save_target_page_legacy;
930e239d
FE
2986
2987 qemu_mutex_unlock_iothread();
8ebb6ecc 2988 ret = multifd_send_sync_main(f);
930e239d 2989 qemu_mutex_lock_iothread();
33d70973
LB
2990 if (ret < 0) {
2991 return ret;
2992 }
2993
d4f34485 2994 if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) {
294e5a40
JQ
2995 qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
2996 }
2997
56e93d26 2998 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
be07a0ed 2999 return qemu_fflush(f);
56e93d26
JQ
3000}
3001
3d0684b2
JQ
3002/**
3003 * ram_save_iterate: iterative stage for migration
3004 *
3005 * Returns zero to indicate success and negative for error
3006 *
3007 * @f: QEMUFile where to send the data
3008 * @opaque: RAMState pointer
3009 */
56e93d26
JQ
3010static int ram_save_iterate(QEMUFile *f, void *opaque)
3011{
53518d94
JQ
3012 RAMState **temp = opaque;
3013 RAMState *rs = *temp;
3d4095b2 3014 int ret = 0;
56e93d26
JQ
3015 int i;
3016 int64_t t0;
5c90308f 3017 int done = 0;
56e93d26 3018
b2557345
PL
3019 if (blk_mig_bulk_active()) {
3020 /* Avoid transferring ram during bulk phase of block migration as
3021 * the bulk phase will usually take a long time and transferring
3022 * ram updates during that time is pointless. */
3023 goto out;
3024 }
3025
63268c49
PX
3026 /*
3027 * We'll take this lock a little bit long, but it's okay for two reasons.
3028 * Firstly, the only possible other thread to take it is who calls
3029 * qemu_guest_free_page_hint(), which should be rare; secondly, see
3030 * MAX_WAIT (if curious, further see commit 4508bd9ed8053ce) below, which
3031 * guarantees that we'll at least released it in a regular basis.
3032 */
0983125b
JQ
3033 WITH_QEMU_LOCK_GUARD(&rs->bitmap_mutex) {
3034 WITH_RCU_READ_LOCK_GUARD() {
3035 if (ram_list.version != rs->last_version) {
3036 ram_state_reset(rs);
3037 }
56e93d26 3038
0983125b
JQ
3039 /* Read version before ram_list.blocks */
3040 smp_rmb();
56e93d26 3041
0983125b
JQ
3042 ret = rdma_registration_start(f, RAM_CONTROL_ROUND);
3043 if (ret < 0) {
3044 qemu_file_set_error(f, ret);
3045 goto out;
3046 }
56e93d26 3047
0983125b
JQ
3048 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
3049 i = 0;
3050 while ((ret = migration_rate_exceeded(f)) == 0 ||
3051 postcopy_has_request(rs)) {
3052 int pages;
e03a34f8 3053
0983125b
JQ
3054 if (qemu_file_get_error(f)) {
3055 break;
3056 }
e8f3735f 3057
0983125b
JQ
3058 pages = ram_find_and_save_block(rs);
3059 /* no more pages to sent */
3060 if (pages == 0) {
3061 done = 1;
3062 break;
3063 }
e8f3735f 3064
0983125b
JQ
3065 if (pages < 0) {
3066 qemu_file_set_error(f, pages);
3067 break;
3068 }
89ac5a1d 3069
0983125b 3070 rs->target_page_count += pages;
89ac5a1d 3071
0983125b
JQ
3072 /*
3073 * During postcopy, it is necessary to make sure one whole host
3074 * page is sent in one chunk.
3075 */
3076 if (migrate_postcopy_ram()) {
3077 compress_flush_data();
3078 }
644acf99 3079
0983125b
JQ
3080 /*
3081 * we want to check in the 1st loop, just in case it was the 1st
3082 * time and we had to sync the dirty bitmap.
3083 * qemu_clock_get_ns() is a bit expensive, so we only check each
3084 * some iterations
3085 */
3086 if ((i & 63) == 0) {
3087 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) /
3088 1000000;
3089 if (t1 > MAX_WAIT) {
3090 trace_ram_save_iterate_big_wait(t1, i);
3091 break;
3092 }
89ac5a1d 3093 }
0983125b 3094 i++;
89ac5a1d 3095 }
56e93d26 3096 }
56e93d26 3097 }
56e93d26
JQ
3098
3099 /*
3100 * Must occur before EOS (or any QEMUFile operation)
3101 * because of RDMA protocol.
3102 */
b1b38387 3103 ret = rdma_registration_stop(f, RAM_CONTROL_ROUND);
5f5b8858
JQ
3104 if (ret < 0) {
3105 qemu_file_set_error(f, ret);
3106 }
56e93d26 3107
b2557345 3108out:
b69a0227
JQ
3109 if (ret >= 0
3110 && migration_is_setup_or_active(migrate_get_current()->state)) {
d4f34485 3111 if (migrate_multifd() && migrate_multifd_flush_after_each_section()) {
b05292c2
JQ
3112 ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel);
3113 if (ret < 0) {
3114 return ret;
3115 }
33d70973
LB
3116 }
3117
3d4095b2 3118 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
4c2d0f6d 3119 ram_transferred_add(8);
be07a0ed 3120 ret = qemu_fflush(f);
3d4095b2 3121 }
56e93d26
JQ
3122 if (ret < 0) {
3123 return ret;
3124 }
3125
5c90308f 3126 return done;
56e93d26
JQ
3127}
3128
3d0684b2
JQ
3129/**
3130 * ram_save_complete: function called to send the remaining amount of ram
3131 *
e8f3735f 3132 * Returns zero to indicate success or negative on error
3d0684b2
JQ
3133 *
3134 * Called with iothread lock
3135 *
3136 * @f: QEMUFile where to send the data
3137 * @opaque: RAMState pointer
3138 */
56e93d26
JQ
3139static int ram_save_complete(QEMUFile *f, void *opaque)
3140{
53518d94
JQ
3141 RAMState **temp = opaque;
3142 RAMState *rs = *temp;
e8f3735f 3143 int ret = 0;
6f37bb8b 3144
05931ec5
JQ
3145 rs->last_stage = !migration_in_colo_state();
3146
89ac5a1d
DDAG
3147 WITH_RCU_READ_LOCK_GUARD() {
3148 if (!migration_in_postcopy()) {
1e493be5 3149 migration_bitmap_sync_precopy(rs, true);
89ac5a1d 3150 }
56e93d26 3151
b1b38387 3152 ret = rdma_registration_start(f, RAM_CONTROL_FINISH);
48408174
JQ
3153 if (ret < 0) {
3154 qemu_file_set_error(f, ret);
a2326705 3155 return ret;
48408174 3156 }
56e93d26 3157
89ac5a1d 3158 /* try transferring iterative blocks of memory */
56e93d26 3159
89ac5a1d 3160 /* flush all remaining blocks regardless of rate limiting */
c13221b5 3161 qemu_mutex_lock(&rs->bitmap_mutex);
89ac5a1d
DDAG
3162 while (true) {
3163 int pages;
56e93d26 3164
05931ec5 3165 pages = ram_find_and_save_block(rs);
89ac5a1d
DDAG
3166 /* no more blocks to sent */
3167 if (pages == 0) {
3168 break;
3169 }
3170 if (pages < 0) {
a2326705
PX
3171 qemu_mutex_unlock(&rs->bitmap_mutex);
3172 return pages;
89ac5a1d 3173 }
e8f3735f 3174 }
c13221b5 3175 qemu_mutex_unlock(&rs->bitmap_mutex);
56e93d26 3176
8020bc9a 3177 compress_flush_data();
5f5b8858 3178
a2326705
PX
3179 ret = rdma_registration_stop(f, RAM_CONTROL_FINISH);
3180 if (ret < 0) {
3181 qemu_file_set_error(f, ret);
3182 return ret;
5f5b8858 3183 }
89ac5a1d 3184 }
d09a6fde 3185
7f401b80 3186 ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel);
33d70973
LB
3187 if (ret < 0) {
3188 return ret;
3189 }
3190
d4f34485 3191 if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) {
294e5a40
JQ
3192 qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
3193 }
33d70973 3194 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
be07a0ed 3195 return qemu_fflush(f);
56e93d26
JQ
3196}
3197
24beea4e
JQ
3198static void ram_state_pending_estimate(void *opaque, uint64_t *must_precopy,
3199 uint64_t *can_postcopy)
56e93d26 3200{
53518d94
JQ
3201 RAMState **temp = opaque;
3202 RAMState *rs = *temp;
56e93d26 3203
c8df4a7a 3204 uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
56e93d26 3205
c8df4a7a
JQ
3206 if (migrate_postcopy_ram()) {
3207 /* We can do postcopy, and all the data is postcopiable */
24beea4e 3208 *can_postcopy += remaining_size;
c8df4a7a 3209 } else {
24beea4e 3210 *must_precopy += remaining_size;
c8df4a7a
JQ
3211 }
3212}
3213
24beea4e
JQ
3214static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy,
3215 uint64_t *can_postcopy)
c8df4a7a 3216{
28ef5339 3217 MigrationState *s = migrate_get_current();
c8df4a7a
JQ
3218 RAMState **temp = opaque;
3219 RAMState *rs = *temp;
3220
3221 uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
3222
28ef5339 3223 if (!migration_in_postcopy() && remaining_size < s->threshold_size) {
56e93d26 3224 qemu_mutex_lock_iothread();
89ac5a1d 3225 WITH_RCU_READ_LOCK_GUARD() {
1e493be5 3226 migration_bitmap_sync_precopy(rs, false);
89ac5a1d 3227 }
56e93d26 3228 qemu_mutex_unlock_iothread();
9edabd4d 3229 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
56e93d26 3230 }
c31b098f 3231
86e1167e
VSO
3232 if (migrate_postcopy_ram()) {
3233 /* We can do postcopy, and all the data is postcopiable */
24beea4e 3234 *can_postcopy += remaining_size;
86e1167e 3235 } else {
24beea4e 3236 *must_precopy += remaining_size;
86e1167e 3237 }
56e93d26
JQ
3238}
3239
3240static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
3241{
3242 unsigned int xh_len;
3243 int xh_flags;
063e760a 3244 uint8_t *loaded_data;
56e93d26 3245
56e93d26
JQ
3246 /* extract RLE header */
3247 xh_flags = qemu_get_byte(f);
3248 xh_len = qemu_get_be16(f);
3249
3250 if (xh_flags != ENCODING_FLAG_XBZRLE) {
3251 error_report("Failed to load XBZRLE page - wrong compression!");
3252 return -1;
3253 }
3254
3255 if (xh_len > TARGET_PAGE_SIZE) {
3256 error_report("Failed to load XBZRLE page - len overflow!");
3257 return -1;
3258 }
f265e0e4 3259 loaded_data = XBZRLE.decoded_buf;
56e93d26 3260 /* load data and decode */
f265e0e4 3261 /* it can change loaded_data to point to an internal buffer */
063e760a 3262 qemu_get_buffer_in_place(f, &loaded_data, xh_len);
56e93d26
JQ
3263
3264 /* decode RLE */
063e760a 3265 if (xbzrle_decode_buffer(loaded_data, xh_len, host,
56e93d26
JQ
3266 TARGET_PAGE_SIZE) == -1) {
3267 error_report("Failed to load XBZRLE page - decode error!");
3268 return -1;
3269 }
3270
3271 return 0;
3272}
3273
3d0684b2
JQ
3274/**
3275 * ram_block_from_stream: read a RAMBlock id from the migration stream
3276 *
3277 * Must be called from within a rcu critical section.
3278 *
56e93d26 3279 * Returns a pointer from within the RCU-protected ram_list.
a7180877 3280 *
755e8d7c 3281 * @mis: the migration incoming state pointer
3d0684b2
JQ
3282 * @f: QEMUFile where to read the data from
3283 * @flags: Page flags (mostly to see if it's a continuation of previous block)
c01b16ed 3284 * @channel: the channel we're using
a7180877 3285 */
755e8d7c 3286static inline RAMBlock *ram_block_from_stream(MigrationIncomingState *mis,
c01b16ed
PX
3287 QEMUFile *f, int flags,
3288 int channel)
56e93d26 3289{
c01b16ed 3290 RAMBlock *block = mis->last_recv_block[channel];
56e93d26
JQ
3291 char id[256];
3292 uint8_t len;
3293
3294 if (flags & RAM_SAVE_FLAG_CONTINUE) {
4c4bad48 3295 if (!block) {
56e93d26
JQ
3296 error_report("Ack, bad migration stream!");
3297 return NULL;
3298 }
4c4bad48 3299 return block;
56e93d26
JQ
3300 }
3301
3302 len = qemu_get_byte(f);
3303 qemu_get_buffer(f, (uint8_t *)id, len);
3304 id[len] = 0;
3305
e3dd7493 3306 block = qemu_ram_block_by_name(id);
4c4bad48
HZ
3307 if (!block) {
3308 error_report("Can't find block %s", id);
3309 return NULL;
56e93d26
JQ
3310 }
3311
f161c88a 3312 if (migrate_ram_is_ignored(block)) {
b895de50
CLG
3313 error_report("block %s should not be migrated !", id);
3314 return NULL;
3315 }
3316
c01b16ed 3317 mis->last_recv_block[channel] = block;
755e8d7c 3318
4c4bad48
HZ
3319 return block;
3320}
3321
3322static inline void *host_from_ram_block_offset(RAMBlock *block,
3323 ram_addr_t offset)
3324{
3325 if (!offset_in_ramblock(block, offset)) {
3326 return NULL;
3327 }
3328
3329 return block->host + offset;
56e93d26
JQ
3330}
3331
6a23f639
DH
3332static void *host_page_from_ram_block_offset(RAMBlock *block,
3333 ram_addr_t offset)
3334{
3335 /* Note: Explicitly no check against offset_in_ramblock(). */
3336 return (void *)QEMU_ALIGN_DOWN((uintptr_t)(block->host + offset),
3337 block->page_size);
3338}
3339
3340static ram_addr_t host_page_offset_from_ram_block_offset(RAMBlock *block,
3341 ram_addr_t offset)
3342{
3343 return ((uintptr_t)block->host + offset) & (block->page_size - 1);
3344}
3345
871cfc54
LS
3346void colo_record_bitmap(RAMBlock *block, ram_addr_t *normal, uint32_t pages)
3347{
3348 qemu_mutex_lock(&ram_state->bitmap_mutex);
3349 for (int i = 0; i < pages; i++) {
3350 ram_addr_t offset = normal[i];
3351 ram_state->migration_dirty_pages += !test_and_set_bit(
3352 offset >> TARGET_PAGE_BITS,
3353 block->bmap);
3354 }
3355 qemu_mutex_unlock(&ram_state->bitmap_mutex);
3356}
3357
13af18f2 3358static inline void *colo_cache_from_block_offset(RAMBlock *block,
8af66371 3359 ram_addr_t offset, bool record_bitmap)
13af18f2
ZC
3360{
3361 if (!offset_in_ramblock(block, offset)) {
3362 return NULL;
3363 }
3364 if (!block->colo_cache) {
3365 error_report("%s: colo_cache is NULL in block :%s",
3366 __func__, block->idstr);
3367 return NULL;
3368 }
7d9acafa
ZC
3369
3370 /*
3371 * During colo checkpoint, we need bitmap of these migrated pages.
3372 * It help us to decide which pages in ram cache should be flushed
3373 * into VM's RAM later.
3374 */
871cfc54
LS
3375 if (record_bitmap) {
3376 colo_record_bitmap(block, &offset, 1);
7d9acafa 3377 }
13af18f2
ZC
3378 return block->colo_cache + offset;
3379}
3380
3d0684b2 3381/**
7091dabe 3382 * ram_handle_zero: handle the zero page case
3d0684b2 3383 *
56e93d26
JQ
3384 * If a page (or a whole RDMA chunk) has been
3385 * determined to be zero, then zap it.
3d0684b2
JQ
3386 *
3387 * @host: host address for the zero page
3388 * @ch: what the page is filled from. We only support zero
3389 * @size: size of the zero page
56e93d26 3390 */
7091dabe 3391void ram_handle_zero(void *host, uint64_t size)
56e93d26 3392{
7091dabe
JQ
3393 if (!buffer_is_zero(host, size)) {
3394 memset(host, 0, size);
56e93d26
JQ
3395 }
3396}
3397
b70cb3b4
RL
3398static void colo_init_ram_state(void)
3399{
3400 ram_state_init(&ram_state);
b70cb3b4
RL
3401}
3402
13af18f2
ZC
3403/*
3404 * colo cache: this is for secondary VM, we cache the whole
3405 * memory of the secondary VM, it is need to hold the global lock
3406 * to call this helper.
3407 */
3408int colo_init_ram_cache(void)
3409{
3410 RAMBlock *block;
3411
44901b5a
PB
3412 WITH_RCU_READ_LOCK_GUARD() {
3413 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3414 block->colo_cache = qemu_anon_ram_alloc(block->used_length,
8dbe22c6 3415 NULL, false, false);
44901b5a
PB
3416 if (!block->colo_cache) {
3417 error_report("%s: Can't alloc memory for COLO cache of block %s,"
3418 "size 0x" RAM_ADDR_FMT, __func__, block->idstr,
3419 block->used_length);
3420 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3421 if (block->colo_cache) {
3422 qemu_anon_ram_free(block->colo_cache, block->used_length);
3423 block->colo_cache = NULL;
3424 }
89ac5a1d 3425 }
44901b5a 3426 return -errno;
89ac5a1d 3427 }
e5fdf920
LS
3428 if (!machine_dump_guest_core(current_machine)) {
3429 qemu_madvise(block->colo_cache, block->used_length,
3430 QEMU_MADV_DONTDUMP);
3431 }
13af18f2 3432 }
13af18f2 3433 }
44901b5a 3434
7d9acafa
ZC
3435 /*
3436 * Record the dirty pages that sent by PVM, we use this dirty bitmap together
3437 * with to decide which page in cache should be flushed into SVM's RAM. Here
3438 * we use the same name 'ram_bitmap' as for migration.
3439 */
3440 if (ram_bytes_total()) {
fbd162e6 3441 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
7d9acafa 3442 unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
7d9acafa 3443 block->bmap = bitmap_new(pages);
7d9acafa
ZC
3444 }
3445 }
7d9acafa 3446
b70cb3b4 3447 colo_init_ram_state();
13af18f2 3448 return 0;
13af18f2
ZC
3449}
3450
0393031a
HZ
3451/* TODO: duplicated with ram_init_bitmaps */
3452void colo_incoming_start_dirty_log(void)
3453{
3454 RAMBlock *block = NULL;
3455 /* For memory_global_dirty_log_start below. */
3456 qemu_mutex_lock_iothread();
3457 qemu_mutex_lock_ramlist();
3458
1e493be5 3459 memory_global_dirty_log_sync(false);
0393031a
HZ
3460 WITH_RCU_READ_LOCK_GUARD() {
3461 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3462 ramblock_sync_dirty_bitmap(ram_state, block);
3463 /* Discard this dirty bitmap record */
3464 bitmap_zero(block->bmap, block->max_length >> TARGET_PAGE_BITS);
3465 }
63b41db4 3466 memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION);
0393031a
HZ
3467 }
3468 ram_state->migration_dirty_pages = 0;
3469 qemu_mutex_unlock_ramlist();
3470 qemu_mutex_unlock_iothread();
3471}
3472
13af18f2
ZC
3473/* It is need to hold the global lock to call this helper */
3474void colo_release_ram_cache(void)
3475{
3476 RAMBlock *block;
3477
63b41db4 3478 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION);
fbd162e6 3479 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
7d9acafa
ZC
3480 g_free(block->bmap);
3481 block->bmap = NULL;
3482 }
3483
89ac5a1d
DDAG
3484 WITH_RCU_READ_LOCK_GUARD() {
3485 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3486 if (block->colo_cache) {
3487 qemu_anon_ram_free(block->colo_cache, block->used_length);
3488 block->colo_cache = NULL;
3489 }
13af18f2
ZC
3490 }
3491 }
0393031a 3492 ram_state_cleanup(&ram_state);
13af18f2
ZC
3493}
3494
f265e0e4
JQ
3495/**
3496 * ram_load_setup: Setup RAM for migration incoming side
3497 *
3498 * Returns zero to indicate success and negative for error
3499 *
3500 * @f: QEMUFile where to receive the data
3501 * @opaque: RAMState pointer
3502 */
3503static int ram_load_setup(QEMUFile *f, void *opaque)
3504{
3505 xbzrle_load_setup();
f9494614 3506 ramblock_recv_map_init();
13af18f2 3507
f265e0e4
JQ
3508 return 0;
3509}
3510
3511static int ram_load_cleanup(void *opaque)
3512{
f9494614 3513 RAMBlock *rb;
56eb90af 3514
fbd162e6 3515 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
bd108a44 3516 qemu_ram_block_writeback(rb);
56eb90af
JH
3517 }
3518
f265e0e4 3519 xbzrle_load_cleanup();
f9494614 3520
fbd162e6 3521 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
f9494614
AP
3522 g_free(rb->receivedmap);
3523 rb->receivedmap = NULL;
3524 }
13af18f2 3525
f265e0e4
JQ
3526 return 0;
3527}
3528
3d0684b2
JQ
3529/**
3530 * ram_postcopy_incoming_init: allocate postcopy data structures
3531 *
3532 * Returns 0 for success and negative if there was one error
3533 *
3534 * @mis: current migration incoming state
3535 *
3536 * Allocate data structures etc needed by incoming migration with
3537 * postcopy-ram. postcopy-ram's similarly names
3538 * postcopy_ram_incoming_init does the work.
1caddf8a
DDAG
3539 */
3540int ram_postcopy_incoming_init(MigrationIncomingState *mis)
3541{
c136180c 3542 return postcopy_ram_incoming_init(mis);
1caddf8a
DDAG
3543}
3544
3d0684b2
JQ
3545/**
3546 * ram_load_postcopy: load a page in postcopy case
3547 *
3548 * Returns 0 for success or -errno in case of error
3549 *
a7180877
DDAG
3550 * Called in postcopy mode by ram_load().
3551 * rcu_read_lock is taken prior to this being called.
3d0684b2
JQ
3552 *
3553 * @f: QEMUFile where to send the data
36f62f11 3554 * @channel: the channel to use for loading
a7180877 3555 */
36f62f11 3556int ram_load_postcopy(QEMUFile *f, int channel)
a7180877
DDAG
3557{
3558 int flags = 0, ret = 0;
3559 bool place_needed = false;
1aa83678 3560 bool matches_target_page_size = false;
a7180877 3561 MigrationIncomingState *mis = migration_incoming_get_current();
36f62f11 3562 PostcopyTmpPage *tmp_page = &mis->postcopy_tmp_pages[channel];
a7180877
DDAG
3563
3564 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
3565 ram_addr_t addr;
a7180877
DDAG
3566 void *page_buffer = NULL;
3567 void *place_source = NULL;
df9ff5e1 3568 RAMBlock *block = NULL;
a7180877 3569 uint8_t ch;
644acf99 3570 int len;
a7180877
DDAG
3571
3572 addr = qemu_get_be64(f);
7a9ddfbf
PX
3573
3574 /*
3575 * If qemu file error, we should stop here, and then "addr"
3576 * may be invalid
3577 */
3578 ret = qemu_file_get_error(f);
3579 if (ret) {
3580 break;
3581 }
3582
a7180877
DDAG
3583 flags = addr & ~TARGET_PAGE_MASK;
3584 addr &= TARGET_PAGE_MASK;
3585
36f62f11 3586 trace_ram_load_postcopy_loop(channel, (uint64_t)addr, flags);
644acf99
WY
3587 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
3588 RAM_SAVE_FLAG_COMPRESS_PAGE)) {
c01b16ed 3589 block = ram_block_from_stream(mis, f, flags, channel);
6a23f639
DH
3590 if (!block) {
3591 ret = -EINVAL;
3592 break;
3593 }
4c4bad48 3594
898ba906
DH
3595 /*
3596 * Relying on used_length is racy and can result in false positives.
3597 * We might place pages beyond used_length in case RAM was shrunk
3598 * while in postcopy, which is fine - trying to place via
3599 * UFFDIO_COPY/UFFDIO_ZEROPAGE will never segfault.
3600 */
3601 if (!block->host || addr >= block->postcopy_length) {
a7180877
DDAG
3602 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
3603 ret = -EINVAL;
3604 break;
3605 }
77dadc3f 3606 tmp_page->target_pages++;
1aa83678 3607 matches_target_page_size = block->page_size == TARGET_PAGE_SIZE;
a7180877 3608 /*
28abd200
DDAG
3609 * Postcopy requires that we place whole host pages atomically;
3610 * these may be huge pages for RAMBlocks that are backed by
3611 * hugetlbfs.
a7180877
DDAG
3612 * To make it atomic, the data is read into a temporary page
3613 * that's moved into place later.
3614 * The migration protocol uses, possibly smaller, target-pages
3615 * however the source ensures it always sends all the components
91ba442f 3616 * of a host page in one chunk.
a7180877 3617 */
77dadc3f 3618 page_buffer = tmp_page->tmp_huge_page +
6a23f639
DH
3619 host_page_offset_from_ram_block_offset(block, addr);
3620 /* If all TP are zero then we can optimise the place */
77dadc3f
PX
3621 if (tmp_page->target_pages == 1) {
3622 tmp_page->host_addr =
3623 host_page_from_ram_block_offset(block, addr);
3624 } else if (tmp_page->host_addr !=
3625 host_page_from_ram_block_offset(block, addr)) {
c53b7ddc 3626 /* not the 1st TP within the HP */
36f62f11 3627 error_report("Non-same host page detected on channel %d: "
cfc7dc8a
PX
3628 "Target host page %p, received host page %p "
3629 "(rb %s offset 0x"RAM_ADDR_FMT" target_pages %d)",
36f62f11 3630 channel, tmp_page->host_addr,
cfc7dc8a
PX
3631 host_page_from_ram_block_offset(block, addr),
3632 block->idstr, addr, tmp_page->target_pages);
6a23f639
DH
3633 ret = -EINVAL;
3634 break;
a7180877
DDAG
3635 }
3636
3637 /*
3638 * If it's the last part of a host page then we place the host
3639 * page
3640 */
77dadc3f
PX
3641 if (tmp_page->target_pages ==
3642 (block->page_size / TARGET_PAGE_SIZE)) {
4cbb3c63 3643 place_needed = true;
4cbb3c63 3644 }
77dadc3f 3645 place_source = tmp_page->tmp_huge_page;
a7180877
DDAG
3646 }
3647
3648 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
bb890ed5 3649 case RAM_SAVE_FLAG_ZERO:
a7180877 3650 ch = qemu_get_byte(f);
413d64fe
JQ
3651 if (ch != 0) {
3652 error_report("Found a zero page with value %d", ch);
3653 ret = -EINVAL;
3654 break;
3655 }
2e36bc1b
WY
3656 /*
3657 * Can skip to set page_buffer when
3658 * this is a zero page and (block->page_size == TARGET_PAGE_SIZE).
3659 */
413d64fe 3660 if (!matches_target_page_size) {
2e36bc1b
WY
3661 memset(page_buffer, ch, TARGET_PAGE_SIZE);
3662 }
a7180877
DDAG
3663 break;
3664
3665 case RAM_SAVE_FLAG_PAGE:
77dadc3f 3666 tmp_page->all_zero = false;
1aa83678
PX
3667 if (!matches_target_page_size) {
3668 /* For huge pages, we always use temporary buffer */
a7180877
DDAG
3669 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
3670 } else {
1aa83678
PX
3671 /*
3672 * For small pages that matches target page size, we
3673 * avoid the qemu_file copy. Instead we directly use
3674 * the buffer of QEMUFile to place the page. Note: we
3675 * cannot do any QEMUFile operation before using that
3676 * buffer to make sure the buffer is valid when
3677 * placing the page.
a7180877
DDAG
3678 */
3679 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
3680 TARGET_PAGE_SIZE);
3681 }
3682 break;
644acf99 3683 case RAM_SAVE_FLAG_COMPRESS_PAGE:
77dadc3f 3684 tmp_page->all_zero = false;
644acf99
WY
3685 len = qemu_get_be32(f);
3686 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
3687 error_report("Invalid compressed data length: %d", len);
3688 ret = -EINVAL;
3689 break;
3690 }
3691 decompress_data_with_multi_threads(f, page_buffer, len);
3692 break;
294e5a40
JQ
3693 case RAM_SAVE_FLAG_MULTIFD_FLUSH:
3694 multifd_recv_sync_main();
3695 break;
a7180877
DDAG
3696 case RAM_SAVE_FLAG_EOS:
3697 /* normal exit */
d4f34485
JQ
3698 if (migrate_multifd() &&
3699 migrate_multifd_flush_after_each_section()) {
b05292c2
JQ
3700 multifd_recv_sync_main();
3701 }
a7180877
DDAG
3702 break;
3703 default:
29fccade 3704 error_report("Unknown combination of migration flags: 0x%x"
a7180877
DDAG
3705 " (postcopy mode)", flags);
3706 ret = -EINVAL;
7a9ddfbf
PX
3707 break;
3708 }
3709
644acf99
WY
3710 /* Got the whole host page, wait for decompress before placing. */
3711 if (place_needed) {
3712 ret |= wait_for_decompress_done();
3713 }
3714
7a9ddfbf
PX
3715 /* Detect for any possible file errors */
3716 if (!ret && qemu_file_get_error(f)) {
3717 ret = qemu_file_get_error(f);
a7180877
DDAG
3718 }
3719
7a9ddfbf 3720 if (!ret && place_needed) {
77dadc3f
PX
3721 if (tmp_page->all_zero) {
3722 ret = postcopy_place_page_zero(mis, tmp_page->host_addr, block);
a7180877 3723 } else {
77dadc3f
PX
3724 ret = postcopy_place_page(mis, tmp_page->host_addr,
3725 place_source, block);
a7180877 3726 }
ddf35bdf 3727 place_needed = false;
77dadc3f 3728 postcopy_temp_page_reset(tmp_page);
a7180877 3729 }
a7180877
DDAG
3730 }
3731
3732 return ret;
3733}
3734
acab30b8
DHB
3735static bool postcopy_is_running(void)
3736{
3737 PostcopyState ps = postcopy_state_get();
3738 return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END;
3739}
3740
e6f4aa18
ZC
3741/*
3742 * Flush content of RAM cache into SVM's memory.
3743 * Only flush the pages that be dirtied by PVM or SVM or both.
3744 */
24fa16f8 3745void colo_flush_ram_cache(void)
e6f4aa18
ZC
3746{
3747 RAMBlock *block = NULL;
3748 void *dst_host;
3749 void *src_host;
3750 unsigned long offset = 0;
3751
1e493be5 3752 memory_global_dirty_log_sync(false);
9d638407 3753 qemu_mutex_lock(&ram_state->bitmap_mutex);
89ac5a1d
DDAG
3754 WITH_RCU_READ_LOCK_GUARD() {
3755 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3756 ramblock_sync_dirty_bitmap(ram_state, block);
3757 }
d1955d22 3758 }
d1955d22 3759
e6f4aa18 3760 trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages);
89ac5a1d
DDAG
3761 WITH_RCU_READ_LOCK_GUARD() {
3762 block = QLIST_FIRST_RCU(&ram_list.blocks);
e6f4aa18 3763
89ac5a1d 3764 while (block) {
a6a83cef 3765 unsigned long num = 0;
e6f4aa18 3766
a6a83cef 3767 offset = colo_bitmap_find_dirty(ram_state, block, offset, &num);
542147f4
DH
3768 if (!offset_in_ramblock(block,
3769 ((ram_addr_t)offset) << TARGET_PAGE_BITS)) {
89ac5a1d 3770 offset = 0;
a6a83cef 3771 num = 0;
89ac5a1d
DDAG
3772 block = QLIST_NEXT_RCU(block, next);
3773 } else {
a6a83cef
RL
3774 unsigned long i = 0;
3775
3776 for (i = 0; i < num; i++) {
3777 migration_bitmap_clear_dirty(ram_state, block, offset + i);
3778 }
8bba004c
AR
3779 dst_host = block->host
3780 + (((ram_addr_t)offset) << TARGET_PAGE_BITS);
3781 src_host = block->colo_cache
3782 + (((ram_addr_t)offset) << TARGET_PAGE_BITS);
a6a83cef
RL
3783 memcpy(dst_host, src_host, TARGET_PAGE_SIZE * num);
3784 offset += num;
89ac5a1d 3785 }
e6f4aa18
ZC
3786 }
3787 }
9d638407 3788 qemu_mutex_unlock(&ram_state->bitmap_mutex);
e6f4aa18
ZC
3789 trace_colo_flush_ram_cache_end();
3790}
3791
2f5ced5b
NB
3792static int parse_ramblock(QEMUFile *f, RAMBlock *block, ram_addr_t length)
3793{
3794 int ret = 0;
3795 /* ADVISE is earlier, it shows the source has the postcopy capability on */
3796 bool postcopy_advised = migration_incoming_postcopy_advised();
3797
3798 assert(block);
3799
3800 if (!qemu_ram_is_migratable(block)) {
3801 error_report("block %s should not be migrated !", block->idstr);
3802 return -EINVAL;
3803 }
3804
3805 if (length != block->used_length) {
3806 Error *local_err = NULL;
3807
3808 ret = qemu_ram_resize(block, length, &local_err);
3809 if (local_err) {
3810 error_report_err(local_err);
2c36076a 3811 return ret;
2f5ced5b
NB
3812 }
3813 }
3814 /* For postcopy we need to check hugepage sizes match */
3815 if (postcopy_advised && migrate_postcopy_ram() &&
3816 block->page_size != qemu_host_page_size) {
3817 uint64_t remote_page_size = qemu_get_be64(f);
3818 if (remote_page_size != block->page_size) {
3819 error_report("Mismatched RAM page size %s "
3820 "(local) %zd != %" PRId64, block->idstr,
3821 block->page_size, remote_page_size);
2c36076a 3822 return -EINVAL;
2f5ced5b
NB
3823 }
3824 }
3825 if (migrate_ignore_shared()) {
3826 hwaddr addr = qemu_get_be64(f);
3827 if (migrate_ram_is_ignored(block) &&
3828 block->mr->addr != addr) {
3829 error_report("Mismatched GPAs for block %s "
3830 "%" PRId64 "!= %" PRId64, block->idstr,
3831 (uint64_t)addr, (uint64_t)block->mr->addr);
2c36076a 3832 return -EINVAL;
2f5ced5b
NB
3833 }
3834 }
3835 ret = rdma_block_notification_handle(f, block->idstr);
3836 if (ret < 0) {
3837 qemu_file_set_error(f, ret);
3838 }
3839
3840 return ret;
3841}
3842
3843static int parse_ramblocks(QEMUFile *f, ram_addr_t total_ram_bytes)
3844{
3845 int ret = 0;
3846
3847 /* Synchronize RAM block list */
3848 while (!ret && total_ram_bytes) {
3849 RAMBlock *block;
3850 char id[256];
3851 ram_addr_t length;
3852 int len = qemu_get_byte(f);
3853
3854 qemu_get_buffer(f, (uint8_t *)id, len);
3855 id[len] = 0;
3856 length = qemu_get_be64(f);
3857
3858 block = qemu_ram_block_by_name(id);
3859 if (block) {
3860 ret = parse_ramblock(f, block, length);
3861 } else {
3862 error_report("Unknown ramblock \"%s\", cannot accept "
3863 "migration", id);
3864 ret = -EINVAL;
3865 }
3866 total_ram_bytes -= length;
3867 }
3868
3869 return ret;
3870}
3871
10da4a36
WY
3872/**
3873 * ram_load_precopy: load pages in precopy case
3874 *
3875 * Returns 0 for success or -errno in case of error
3876 *
3877 * Called in precopy mode by ram_load().
3878 * rcu_read_lock is taken prior to this being called.
3879 *
3880 * @f: QEMUFile where to send the data
3881 */
3882static int ram_load_precopy(QEMUFile *f)
56e93d26 3883{
755e8d7c 3884 MigrationIncomingState *mis = migration_incoming_get_current();
e65cec5e 3885 int flags = 0, ret = 0, invalid_flags = 0, len = 0, i = 0;
2f5ced5b 3886
a7a94d14 3887 if (!migrate_compress()) {
edc60127
JQ
3888 invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
3889 }
a7180877 3890
10da4a36 3891 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
2f5ced5b 3892 ram_addr_t addr;
0393031a 3893 void *host = NULL, *host_bak = NULL;
56e93d26
JQ
3894 uint8_t ch;
3895
e65cec5e
YK
3896 /*
3897 * Yield periodically to let main loop run, but an iteration of
3898 * the main loop is expensive, so do it each some iterations
3899 */
3900 if ((i & 32767) == 0 && qemu_in_coroutine()) {
3901 aio_co_schedule(qemu_get_current_aio_context(),
3902 qemu_coroutine_self());
3903 qemu_coroutine_yield();
3904 }
3905 i++;
3906
56e93d26
JQ
3907 addr = qemu_get_be64(f);
3908 flags = addr & ~TARGET_PAGE_MASK;
3909 addr &= TARGET_PAGE_MASK;
3910
edc60127
JQ
3911 if (flags & invalid_flags) {
3912 if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
3913 error_report("Received an unexpected compressed page");
3914 }
3915
3916 ret = -EINVAL;
3917 break;
3918 }
3919
bb890ed5 3920 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
a776aa15 3921 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
c01b16ed
PX
3922 RAMBlock *block = ram_block_from_stream(mis, f, flags,
3923 RAM_CHANNEL_PRECOPY);
4c4bad48 3924
0393031a 3925 host = host_from_ram_block_offset(block, addr);
13af18f2 3926 /*
0393031a
HZ
3927 * After going into COLO stage, we should not load the page
3928 * into SVM's memory directly, we put them into colo_cache firstly.
3929 * NOTE: We need to keep a copy of SVM's ram in colo_cache.
3930 * Previously, we copied all these memory in preparing stage of COLO
3931 * while we need to stop VM, which is a time-consuming process.
3932 * Here we optimize it by a trick, back-up every page while in
3933 * migration process while COLO is enabled, though it affects the
3934 * speed of the migration, but it obviously reduce the downtime of
3935 * back-up all SVM'S memory in COLO preparing stage.
13af18f2 3936 */
0393031a
HZ
3937 if (migration_incoming_colo_enabled()) {
3938 if (migration_incoming_in_colo_state()) {
3939 /* In COLO stage, put all pages into cache temporarily */
8af66371 3940 host = colo_cache_from_block_offset(block, addr, true);
0393031a
HZ
3941 } else {
3942 /*
3943 * In migration stage but before COLO stage,
3944 * Put all pages into both cache and SVM's memory.
3945 */
8af66371 3946 host_bak = colo_cache_from_block_offset(block, addr, false);
0393031a 3947 }
13af18f2 3948 }
a776aa15
DDAG
3949 if (!host) {
3950 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
3951 ret = -EINVAL;
3952 break;
3953 }
13af18f2
ZC
3954 if (!migration_incoming_in_colo_state()) {
3955 ramblock_recv_bitmap_set(block, host);
3956 }
3957
1db9d8e5 3958 trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
a776aa15
DDAG
3959 }
3960
56e93d26
JQ
3961 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
3962 case RAM_SAVE_FLAG_MEM_SIZE:
2f5ced5b 3963 ret = parse_ramblocks(f, addr);
56e93d26 3964 break;
a776aa15 3965
bb890ed5 3966 case RAM_SAVE_FLAG_ZERO:
56e93d26 3967 ch = qemu_get_byte(f);
413d64fe
JQ
3968 if (ch != 0) {
3969 error_report("Found a zero page with value %d", ch);
3970 ret = -EINVAL;
3971 break;
3972 }
7091dabe 3973 ram_handle_zero(host, TARGET_PAGE_SIZE);
56e93d26 3974 break;
a776aa15 3975
56e93d26 3976 case RAM_SAVE_FLAG_PAGE:
56e93d26
JQ
3977 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
3978 break;
56e93d26 3979
a776aa15 3980 case RAM_SAVE_FLAG_COMPRESS_PAGE:
56e93d26
JQ
3981 len = qemu_get_be32(f);
3982 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
3983 error_report("Invalid compressed data length: %d", len);
3984 ret = -EINVAL;
3985 break;
3986 }
c1bc6626 3987 decompress_data_with_multi_threads(f, host, len);
56e93d26 3988 break;
a776aa15 3989
56e93d26 3990 case RAM_SAVE_FLAG_XBZRLE:
56e93d26
JQ
3991 if (load_xbzrle(f, addr, host) < 0) {
3992 error_report("Failed to decompress XBZRLE page at "
3993 RAM_ADDR_FMT, addr);
3994 ret = -EINVAL;
3995 break;
3996 }
3997 break;
294e5a40
JQ
3998 case RAM_SAVE_FLAG_MULTIFD_FLUSH:
3999 multifd_recv_sync_main();
4000 break;
56e93d26
JQ
4001 case RAM_SAVE_FLAG_EOS:
4002 /* normal exit */
d4f34485
JQ
4003 if (migrate_multifd() &&
4004 migrate_multifd_flush_after_each_section()) {
b05292c2
JQ
4005 multifd_recv_sync_main();
4006 }
56e93d26 4007 break;
5f1e7540 4008 case RAM_SAVE_FLAG_HOOK:
b1b38387 4009 ret = rdma_registration_handle(f);
f6d6c089
JQ
4010 if (ret < 0) {
4011 qemu_file_set_error(f, ret);
4012 }
5f1e7540 4013 break;
56e93d26 4014 default:
5f1e7540
JQ
4015 error_report("Unknown combination of migration flags: 0x%x", flags);
4016 ret = -EINVAL;
56e93d26
JQ
4017 }
4018 if (!ret) {
4019 ret = qemu_file_get_error(f);
4020 }
0393031a
HZ
4021 if (!ret && host_bak) {
4022 memcpy(host_bak, host, TARGET_PAGE_SIZE);
4023 }
56e93d26
JQ
4024 }
4025
ca1a6b70 4026 ret |= wait_for_decompress_done();
10da4a36
WY
4027 return ret;
4028}
4029
4030static int ram_load(QEMUFile *f, void *opaque, int version_id)
4031{
4032 int ret = 0;
4033 static uint64_t seq_iter;
4034 /*
4035 * If system is running in postcopy mode, page inserts to host memory must
4036 * be atomic
4037 */
4038 bool postcopy_running = postcopy_is_running();
4039
4040 seq_iter++;
4041
4042 if (version_id != 4) {
4043 return -EINVAL;
4044 }
4045
4046 /*
4047 * This RCU critical section can be very long running.
4048 * When RCU reclaims in the code start to become numerous,
4049 * it will be necessary to reduce the granularity of this
4050 * critical section.
4051 */
89ac5a1d
DDAG
4052 WITH_RCU_READ_LOCK_GUARD() {
4053 if (postcopy_running) {
36f62f11
PX
4054 /*
4055 * Note! Here RAM_CHANNEL_PRECOPY is the precopy channel of
4056 * postcopy migration, we have another RAM_CHANNEL_POSTCOPY to
4057 * service fast page faults.
4058 */
4059 ret = ram_load_postcopy(f, RAM_CHANNEL_PRECOPY);
89ac5a1d
DDAG
4060 } else {
4061 ret = ram_load_precopy(f);
4062 }
10da4a36 4063 }
55c4446b 4064 trace_ram_load_complete(ret, seq_iter);
e6f4aa18 4065
56e93d26
JQ
4066 return ret;
4067}
4068
c6467627
VSO
4069static bool ram_has_postcopy(void *opaque)
4070{
469dd51b 4071 RAMBlock *rb;
fbd162e6 4072 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
469dd51b
JH
4073 if (ramblock_is_pmem(rb)) {
4074 info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4075 "is not supported now!", rb->idstr, rb->host);
4076 return false;
4077 }
4078 }
4079
c6467627
VSO
4080 return migrate_postcopy_ram();
4081}
4082
edd090c7
PX
4083/* Sync all the dirty bitmap with destination VM. */
4084static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
4085{
4086 RAMBlock *block;
4087 QEMUFile *file = s->to_dst_file;
edd090c7
PX
4088
4089 trace_ram_dirty_bitmap_sync_start();
4090
1015ff54 4091 qatomic_set(&rs->postcopy_bmap_sync_requested, 0);
fbd162e6 4092 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
edd090c7
PX
4093 qemu_savevm_send_recv_bitmap(file, block->idstr);
4094 trace_ram_dirty_bitmap_request(block->idstr);
1015ff54 4095 qatomic_inc(&rs->postcopy_bmap_sync_requested);
edd090c7
PX
4096 }
4097
4098 trace_ram_dirty_bitmap_sync_wait();
4099
4100 /* Wait until all the ramblocks' dirty bitmap synced */
1015ff54 4101 while (qatomic_read(&rs->postcopy_bmap_sync_requested)) {
f8c543e8
PX
4102 if (migration_rp_wait(s)) {
4103 return -1;
4104 }
edd090c7
PX
4105 }
4106
4107 trace_ram_dirty_bitmap_sync_complete();
4108
4109 return 0;
4110}
4111
a335debb
PX
4112/*
4113 * Read the received bitmap, revert it as the initial dirty bitmap.
4114 * This is only used when the postcopy migration is paused but wants
4115 * to resume from a middle point.
88577f32
PX
4116 *
4117 * Returns true if succeeded, false for errors.
a335debb 4118 */
88577f32 4119bool ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block, Error **errp)
a335debb 4120{
43044ac0 4121 /* from_dst_file is always valid because we're within rp_thread */
a335debb 4122 QEMUFile *file = s->rp_state.from_dst_file;
1a36e4c9
PMD
4123 g_autofree unsigned long *le_bitmap = NULL;
4124 unsigned long nbits = block->used_length >> TARGET_PAGE_BITS;
a725ef9f 4125 uint64_t local_size = DIV_ROUND_UP(nbits, 8);
a335debb 4126 uint64_t size, end_mark;
1015ff54 4127 RAMState *rs = ram_state;
a335debb
PX
4128
4129 trace_ram_dirty_bitmap_reload_begin(block->idstr);
4130
4131 if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
7aa6070d
PX
4132 error_setg(errp, "Reload bitmap in incorrect state %s",
4133 MigrationStatus_str(s->state));
88577f32 4134 return false;
a335debb
PX
4135 }
4136
4137 /*
4138 * Note: see comments in ramblock_recv_bitmap_send() on why we
3a4452d8 4139 * need the endianness conversion, and the paddings.
a335debb
PX
4140 */
4141 local_size = ROUND_UP(local_size, 8);
4142
4143 /* Add paddings */
4144 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
4145
4146 size = qemu_get_be64(file);
4147
4148 /* The size of the bitmap should match with our ramblock */
4149 if (size != local_size) {
7aa6070d
PX
4150 error_setg(errp, "ramblock '%s' bitmap size mismatch (0x%"PRIx64
4151 " != 0x%"PRIx64")", block->idstr, size, local_size);
88577f32 4152 return false;
a335debb
PX
4153 }
4154
4155 size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size);
4156 end_mark = qemu_get_be64(file);
4157
88577f32
PX
4158 if (qemu_file_get_error(file) || size != local_size) {
4159 error_setg(errp, "read bitmap failed for ramblock '%s': "
4160 "(size 0x%"PRIx64", got: 0x%"PRIx64")",
4161 block->idstr, local_size, size);
4162 return false;
a335debb
PX
4163 }
4164
4165 if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) {
7aa6070d
PX
4166 error_setg(errp, "ramblock '%s' end mark incorrect: 0x%"PRIx64,
4167 block->idstr, end_mark);
88577f32 4168 return false;
a335debb
PX
4169 }
4170
4171 /*
3a4452d8 4172 * Endianness conversion. We are during postcopy (though paused).
a335debb
PX
4173 * The dirty bitmap won't change. We can directly modify it.
4174 */
4175 bitmap_from_le(block->bmap, le_bitmap, nbits);
4176
4177 /*
4178 * What we received is "received bitmap". Revert it as the initial
4179 * dirty bitmap for this ramblock.
4180 */
4181 bitmap_complement(block->bmap, block->bmap, nbits);
4182
be39b4cd
DH
4183 /* Clear dirty bits of discarded ranges that we don't want to migrate. */
4184 ramblock_dirty_bitmap_clear_discarded_pages(block);
4185
4186 /* We'll recalculate migration_dirty_pages in ram_state_resume_prepare(). */
a335debb
PX
4187 trace_ram_dirty_bitmap_reload_complete(block->idstr);
4188
1015ff54
PX
4189 qatomic_dec(&rs->postcopy_bmap_sync_requested);
4190
edd090c7 4191 /*
5e79a4bf
PX
4192 * We succeeded to sync bitmap for current ramblock. Always kick the
4193 * migration thread to check whether all requested bitmaps are
4194 * reloaded. NOTE: it's racy to only kick when requested==0, because
4195 * we don't know whether the migration thread may still be increasing
4196 * it.
edd090c7 4197 */
5e79a4bf 4198 migration_rp_kick(s);
edd090c7 4199
88577f32 4200 return true;
a335debb
PX
4201}
4202
edd090c7
PX
4203static int ram_resume_prepare(MigrationState *s, void *opaque)
4204{
4205 RAMState *rs = *(RAMState **)opaque;
08614f34 4206 int ret;
edd090c7 4207
08614f34
PX
4208 ret = ram_dirty_bitmap_sync_all(s, rs);
4209 if (ret) {
4210 return ret;
4211 }
4212
4213 ram_state_resume_prepare(rs, s->to_dst_file);
4214
4215 return 0;
edd090c7
PX
4216}
4217
36f62f11
PX
4218void postcopy_preempt_shutdown_file(MigrationState *s)
4219{
4220 qemu_put_be64(s->postcopy_qemufile_src, RAM_SAVE_FLAG_EOS);
4221 qemu_fflush(s->postcopy_qemufile_src);
4222}
4223
56e93d26 4224static SaveVMHandlers savevm_ram_handlers = {
9907e842 4225 .save_setup = ram_save_setup,
56e93d26 4226 .save_live_iterate = ram_save_iterate,
763c906b 4227 .save_live_complete_postcopy = ram_save_complete,
a3e06c3d 4228 .save_live_complete_precopy = ram_save_complete,
c6467627 4229 .has_postcopy = ram_has_postcopy,
c8df4a7a
JQ
4230 .state_pending_exact = ram_state_pending_exact,
4231 .state_pending_estimate = ram_state_pending_estimate,
56e93d26 4232 .load_state = ram_load,
f265e0e4
JQ
4233 .save_cleanup = ram_save_cleanup,
4234 .load_setup = ram_load_setup,
4235 .load_cleanup = ram_load_cleanup,
edd090c7 4236 .resume_prepare = ram_resume_prepare,
56e93d26
JQ
4237};
4238
c7c0e724
DH
4239static void ram_mig_ram_block_resized(RAMBlockNotifier *n, void *host,
4240 size_t old_size, size_t new_size)
4241{
cc61c703 4242 PostcopyState ps = postcopy_state_get();
c7c0e724
DH
4243 ram_addr_t offset;
4244 RAMBlock *rb = qemu_ram_block_from_host(host, false, &offset);
4245 Error *err = NULL;
4246
f75ed59f
DF
4247 if (!rb) {
4248 error_report("RAM block not found");
4249 return;
4250 }
4251
f161c88a 4252 if (migrate_ram_is_ignored(rb)) {
c7c0e724
DH
4253 return;
4254 }
4255
4256 if (!migration_is_idle()) {
4257 /*
4258 * Precopy code on the source cannot deal with the size of RAM blocks
4259 * changing at random points in time - especially after sending the
4260 * RAM block sizes in the migration stream, they must no longer change.
4261 * Abort and indicate a proper reason.
4262 */
4263 error_setg(&err, "RAM block '%s' resized during precopy.", rb->idstr);
458fecca 4264 migration_cancel(err);
c7c0e724 4265 error_free(err);
c7c0e724 4266 }
cc61c703
DH
4267
4268 switch (ps) {
4269 case POSTCOPY_INCOMING_ADVISE:
4270 /*
4271 * Update what ram_postcopy_incoming_init()->init_range() does at the
4272 * time postcopy was advised. Syncing RAM blocks with the source will
4273 * result in RAM resizes.
4274 */
4275 if (old_size < new_size) {
4276 if (ram_discard_range(rb->idstr, old_size, new_size - old_size)) {
4277 error_report("RAM block '%s' discard of resized RAM failed",
4278 rb->idstr);
4279 }
4280 }
898ba906 4281 rb->postcopy_length = new_size;
cc61c703
DH
4282 break;
4283 case POSTCOPY_INCOMING_NONE:
4284 case POSTCOPY_INCOMING_RUNNING:
4285 case POSTCOPY_INCOMING_END:
4286 /*
4287 * Once our guest is running, postcopy does no longer care about
4288 * resizes. When growing, the new memory was not available on the
4289 * source, no handler needed.
4290 */
4291 break;
4292 default:
4293 error_report("RAM block '%s' resized during postcopy state: %d",
4294 rb->idstr, ps);
4295 exit(-1);
4296 }
c7c0e724
DH
4297}
4298
4299static RAMBlockNotifier ram_mig_ram_notifier = {
4300 .ram_block_resized = ram_mig_ram_block_resized,
4301};
4302
56e93d26
JQ
4303void ram_mig_init(void)
4304{
4305 qemu_mutex_init(&XBZRLE.lock);
ce62df53 4306 register_savevm_live("ram", 0, 4, &savevm_ram_handlers, &ram_state);
c7c0e724 4307 ram_block_notifier_add(&ram_mig_ram_notifier);
56e93d26 4308}