]> git.proxmox.com Git - mirror_qemu.git/blob - migration/ram.c
migration: Do chunk page in postcopy_each_ram_send_discard()
[mirror_qemu.git] / migration / ram.c
1 /*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2011-2015 Red Hat Inc
6 *
7 * Authors:
8 * Juan Quintela <quintela@redhat.com>
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 */
28
29 #include "qemu/osdep.h"
30 #include "qemu/cutils.h"
31 #include "qemu/bitops.h"
32 #include "qemu/bitmap.h"
33 #include "qemu/main-loop.h"
34 #include "xbzrle.h"
35 #include "ram.h"
36 #include "migration.h"
37 #include "migration/register.h"
38 #include "migration/misc.h"
39 #include "qemu-file.h"
40 #include "postcopy-ram.h"
41 #include "page_cache.h"
42 #include "qemu/error-report.h"
43 #include "qapi/error.h"
44 #include "qapi/qapi-types-migration.h"
45 #include "qapi/qapi-events-migration.h"
46 #include "qapi/qmp/qerror.h"
47 #include "trace.h"
48 #include "exec/ram_addr.h"
49 #include "exec/target_page.h"
50 #include "qemu/rcu_queue.h"
51 #include "migration/colo.h"
52 #include "block.h"
53 #include "sysemu/cpu-throttle.h"
54 #include "savevm.h"
55 #include "qemu/iov.h"
56 #include "multifd.h"
57 #include "sysemu/runstate.h"
58
59 #include "hw/boards.h" /* for machine_dump_guest_core() */
60
61 #if defined(__linux__)
62 #include "qemu/userfaultfd.h"
63 #endif /* defined(__linux__) */
64
65 /***********************************************************/
66 /* ram save/restore */
67
68 /* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
69 * worked for pages that where filled with the same char. We switched
70 * it to only search for the zero value. And to avoid confusion with
71 * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
72 */
73
74 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
75 #define RAM_SAVE_FLAG_ZERO 0x02
76 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
77 #define RAM_SAVE_FLAG_PAGE 0x08
78 #define RAM_SAVE_FLAG_EOS 0x10
79 #define RAM_SAVE_FLAG_CONTINUE 0x20
80 #define RAM_SAVE_FLAG_XBZRLE 0x40
81 /* 0x80 is reserved in migration.h start with 0x100 next */
82 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
83
84 XBZRLECacheStats xbzrle_counters;
85
86 /* struct contains XBZRLE cache and a static page
87 used by the compression */
88 static struct {
89 /* buffer used for XBZRLE encoding */
90 uint8_t *encoded_buf;
91 /* buffer for storing page content */
92 uint8_t *current_buf;
93 /* Cache for XBZRLE, Protected by lock. */
94 PageCache *cache;
95 QemuMutex lock;
96 /* it will store a page full of zeros */
97 uint8_t *zero_target_page;
98 /* buffer used for XBZRLE decoding */
99 uint8_t *decoded_buf;
100 } XBZRLE;
101
102 static void XBZRLE_cache_lock(void)
103 {
104 if (migrate_use_xbzrle()) {
105 qemu_mutex_lock(&XBZRLE.lock);
106 }
107 }
108
109 static void XBZRLE_cache_unlock(void)
110 {
111 if (migrate_use_xbzrle()) {
112 qemu_mutex_unlock(&XBZRLE.lock);
113 }
114 }
115
116 /**
117 * xbzrle_cache_resize: resize the xbzrle cache
118 *
119 * This function is called from migrate_params_apply in main
120 * thread, possibly while a migration is in progress. A running
121 * migration may be using the cache and might finish during this call,
122 * hence changes to the cache are protected by XBZRLE.lock().
123 *
124 * Returns 0 for success or -1 for error
125 *
126 * @new_size: new cache size
127 * @errp: set *errp if the check failed, with reason
128 */
129 int xbzrle_cache_resize(uint64_t new_size, Error **errp)
130 {
131 PageCache *new_cache;
132 int64_t ret = 0;
133
134 /* Check for truncation */
135 if (new_size != (size_t)new_size) {
136 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
137 "exceeding address space");
138 return -1;
139 }
140
141 if (new_size == migrate_xbzrle_cache_size()) {
142 /* nothing to do */
143 return 0;
144 }
145
146 XBZRLE_cache_lock();
147
148 if (XBZRLE.cache != NULL) {
149 new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp);
150 if (!new_cache) {
151 ret = -1;
152 goto out;
153 }
154
155 cache_fini(XBZRLE.cache);
156 XBZRLE.cache = new_cache;
157 }
158 out:
159 XBZRLE_cache_unlock();
160 return ret;
161 }
162
163 bool ramblock_is_ignored(RAMBlock *block)
164 {
165 return !qemu_ram_is_migratable(block) ||
166 (migrate_ignore_shared() && qemu_ram_is_shared(block));
167 }
168
169 #undef RAMBLOCK_FOREACH
170
171 int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque)
172 {
173 RAMBlock *block;
174 int ret = 0;
175
176 RCU_READ_LOCK_GUARD();
177
178 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
179 ret = func(block, opaque);
180 if (ret) {
181 break;
182 }
183 }
184 return ret;
185 }
186
187 static void ramblock_recv_map_init(void)
188 {
189 RAMBlock *rb;
190
191 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
192 assert(!rb->receivedmap);
193 rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
194 }
195 }
196
197 int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr)
198 {
199 return test_bit(ramblock_recv_bitmap_offset(host_addr, rb),
200 rb->receivedmap);
201 }
202
203 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset)
204 {
205 return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap);
206 }
207
208 void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr)
209 {
210 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap);
211 }
212
213 void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
214 size_t nr)
215 {
216 bitmap_set_atomic(rb->receivedmap,
217 ramblock_recv_bitmap_offset(host_addr, rb),
218 nr);
219 }
220
221 #define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
222
223 /*
224 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
225 *
226 * Returns >0 if success with sent bytes, or <0 if error.
227 */
228 int64_t ramblock_recv_bitmap_send(QEMUFile *file,
229 const char *block_name)
230 {
231 RAMBlock *block = qemu_ram_block_by_name(block_name);
232 unsigned long *le_bitmap, nbits;
233 uint64_t size;
234
235 if (!block) {
236 error_report("%s: invalid block name: %s", __func__, block_name);
237 return -1;
238 }
239
240 nbits = block->postcopy_length >> TARGET_PAGE_BITS;
241
242 /*
243 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
244 * machines we may need 4 more bytes for padding (see below
245 * comment). So extend it a bit before hand.
246 */
247 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
248
249 /*
250 * Always use little endian when sending the bitmap. This is
251 * required that when source and destination VMs are not using the
252 * same endianness. (Note: big endian won't work.)
253 */
254 bitmap_to_le(le_bitmap, block->receivedmap, nbits);
255
256 /* Size of the bitmap, in bytes */
257 size = DIV_ROUND_UP(nbits, 8);
258
259 /*
260 * size is always aligned to 8 bytes for 64bit machines, but it
261 * may not be true for 32bit machines. We need this padding to
262 * make sure the migration can survive even between 32bit and
263 * 64bit machines.
264 */
265 size = ROUND_UP(size, 8);
266
267 qemu_put_be64(file, size);
268 qemu_put_buffer(file, (const uint8_t *)le_bitmap, size);
269 /*
270 * Mark as an end, in case the middle part is screwed up due to
271 * some "mysterious" reason.
272 */
273 qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING);
274 qemu_fflush(file);
275
276 g_free(le_bitmap);
277
278 if (qemu_file_get_error(file)) {
279 return qemu_file_get_error(file);
280 }
281
282 return size + sizeof(size);
283 }
284
285 /*
286 * An outstanding page request, on the source, having been received
287 * and queued
288 */
289 struct RAMSrcPageRequest {
290 RAMBlock *rb;
291 hwaddr offset;
292 hwaddr len;
293
294 QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
295 };
296
297 /* State of RAM for migration */
298 struct RAMState {
299 /* QEMUFile used for this migration */
300 QEMUFile *f;
301 /* UFFD file descriptor, used in 'write-tracking' migration */
302 int uffdio_fd;
303 /* Last block that we have visited searching for dirty pages */
304 RAMBlock *last_seen_block;
305 /* Last block from where we have sent data */
306 RAMBlock *last_sent_block;
307 /* Last dirty target page we have sent */
308 ram_addr_t last_page;
309 /* last ram version we have seen */
310 uint32_t last_version;
311 /* How many times we have dirty too many pages */
312 int dirty_rate_high_cnt;
313 /* these variables are used for bitmap sync */
314 /* last time we did a full bitmap_sync */
315 int64_t time_last_bitmap_sync;
316 /* bytes transferred at start_time */
317 uint64_t bytes_xfer_prev;
318 /* number of dirty pages since start_time */
319 uint64_t num_dirty_pages_period;
320 /* xbzrle misses since the beginning of the period */
321 uint64_t xbzrle_cache_miss_prev;
322 /* Amount of xbzrle pages since the beginning of the period */
323 uint64_t xbzrle_pages_prev;
324 /* Amount of xbzrle encoded bytes since the beginning of the period */
325 uint64_t xbzrle_bytes_prev;
326 /* Start using XBZRLE (e.g., after the first round). */
327 bool xbzrle_enabled;
328 /* Are we on the last stage of migration */
329 bool last_stage;
330 /* compression statistics since the beginning of the period */
331 /* amount of count that no free thread to compress data */
332 uint64_t compress_thread_busy_prev;
333 /* amount bytes after compression */
334 uint64_t compressed_size_prev;
335 /* amount of compressed pages */
336 uint64_t compress_pages_prev;
337
338 /* total handled target pages at the beginning of period */
339 uint64_t target_page_count_prev;
340 /* total handled target pages since start */
341 uint64_t target_page_count;
342 /* number of dirty bits in the bitmap */
343 uint64_t migration_dirty_pages;
344 /* Protects modification of the bitmap and migration dirty pages */
345 QemuMutex bitmap_mutex;
346 /* The RAMBlock used in the last src_page_requests */
347 RAMBlock *last_req_rb;
348 /* Queue of outstanding page requests from the destination */
349 QemuMutex src_page_req_mutex;
350 QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests;
351 };
352 typedef struct RAMState RAMState;
353
354 static RAMState *ram_state;
355
356 static NotifierWithReturnList precopy_notifier_list;
357
358 void precopy_infrastructure_init(void)
359 {
360 notifier_with_return_list_init(&precopy_notifier_list);
361 }
362
363 void precopy_add_notifier(NotifierWithReturn *n)
364 {
365 notifier_with_return_list_add(&precopy_notifier_list, n);
366 }
367
368 void precopy_remove_notifier(NotifierWithReturn *n)
369 {
370 notifier_with_return_remove(n);
371 }
372
373 int precopy_notify(PrecopyNotifyReason reason, Error **errp)
374 {
375 PrecopyNotifyData pnd;
376 pnd.reason = reason;
377 pnd.errp = errp;
378
379 return notifier_with_return_list_notify(&precopy_notifier_list, &pnd);
380 }
381
382 uint64_t ram_bytes_remaining(void)
383 {
384 return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
385 0;
386 }
387
388 MigrationStats ram_counters;
389
390 /* used by the search for pages to send */
391 struct PageSearchStatus {
392 /* Current block being searched */
393 RAMBlock *block;
394 /* Current page to search from */
395 unsigned long page;
396 /* Set once we wrap around */
397 bool complete_round;
398 };
399 typedef struct PageSearchStatus PageSearchStatus;
400
401 CompressionStats compression_counters;
402
403 struct CompressParam {
404 bool done;
405 bool quit;
406 bool zero_page;
407 QEMUFile *file;
408 QemuMutex mutex;
409 QemuCond cond;
410 RAMBlock *block;
411 ram_addr_t offset;
412
413 /* internally used fields */
414 z_stream stream;
415 uint8_t *originbuf;
416 };
417 typedef struct CompressParam CompressParam;
418
419 struct DecompressParam {
420 bool done;
421 bool quit;
422 QemuMutex mutex;
423 QemuCond cond;
424 void *des;
425 uint8_t *compbuf;
426 int len;
427 z_stream stream;
428 };
429 typedef struct DecompressParam DecompressParam;
430
431 static CompressParam *comp_param;
432 static QemuThread *compress_threads;
433 /* comp_done_cond is used to wake up the migration thread when
434 * one of the compression threads has finished the compression.
435 * comp_done_lock is used to co-work with comp_done_cond.
436 */
437 static QemuMutex comp_done_lock;
438 static QemuCond comp_done_cond;
439 /* The empty QEMUFileOps will be used by file in CompressParam */
440 static const QEMUFileOps empty_ops = { };
441
442 static QEMUFile *decomp_file;
443 static DecompressParam *decomp_param;
444 static QemuThread *decompress_threads;
445 static QemuMutex decomp_done_lock;
446 static QemuCond decomp_done_cond;
447
448 static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
449 ram_addr_t offset, uint8_t *source_buf);
450
451 static void *do_data_compress(void *opaque)
452 {
453 CompressParam *param = opaque;
454 RAMBlock *block;
455 ram_addr_t offset;
456 bool zero_page;
457
458 qemu_mutex_lock(&param->mutex);
459 while (!param->quit) {
460 if (param->block) {
461 block = param->block;
462 offset = param->offset;
463 param->block = NULL;
464 qemu_mutex_unlock(&param->mutex);
465
466 zero_page = do_compress_ram_page(param->file, &param->stream,
467 block, offset, param->originbuf);
468
469 qemu_mutex_lock(&comp_done_lock);
470 param->done = true;
471 param->zero_page = zero_page;
472 qemu_cond_signal(&comp_done_cond);
473 qemu_mutex_unlock(&comp_done_lock);
474
475 qemu_mutex_lock(&param->mutex);
476 } else {
477 qemu_cond_wait(&param->cond, &param->mutex);
478 }
479 }
480 qemu_mutex_unlock(&param->mutex);
481
482 return NULL;
483 }
484
485 static void compress_threads_save_cleanup(void)
486 {
487 int i, thread_count;
488
489 if (!migrate_use_compression() || !comp_param) {
490 return;
491 }
492
493 thread_count = migrate_compress_threads();
494 for (i = 0; i < thread_count; i++) {
495 /*
496 * we use it as a indicator which shows if the thread is
497 * properly init'd or not
498 */
499 if (!comp_param[i].file) {
500 break;
501 }
502
503 qemu_mutex_lock(&comp_param[i].mutex);
504 comp_param[i].quit = true;
505 qemu_cond_signal(&comp_param[i].cond);
506 qemu_mutex_unlock(&comp_param[i].mutex);
507
508 qemu_thread_join(compress_threads + i);
509 qemu_mutex_destroy(&comp_param[i].mutex);
510 qemu_cond_destroy(&comp_param[i].cond);
511 deflateEnd(&comp_param[i].stream);
512 g_free(comp_param[i].originbuf);
513 qemu_fclose(comp_param[i].file);
514 comp_param[i].file = NULL;
515 }
516 qemu_mutex_destroy(&comp_done_lock);
517 qemu_cond_destroy(&comp_done_cond);
518 g_free(compress_threads);
519 g_free(comp_param);
520 compress_threads = NULL;
521 comp_param = NULL;
522 }
523
524 static int compress_threads_save_setup(void)
525 {
526 int i, thread_count;
527
528 if (!migrate_use_compression()) {
529 return 0;
530 }
531 thread_count = migrate_compress_threads();
532 compress_threads = g_new0(QemuThread, thread_count);
533 comp_param = g_new0(CompressParam, thread_count);
534 qemu_cond_init(&comp_done_cond);
535 qemu_mutex_init(&comp_done_lock);
536 for (i = 0; i < thread_count; i++) {
537 comp_param[i].originbuf = g_try_malloc(TARGET_PAGE_SIZE);
538 if (!comp_param[i].originbuf) {
539 goto exit;
540 }
541
542 if (deflateInit(&comp_param[i].stream,
543 migrate_compress_level()) != Z_OK) {
544 g_free(comp_param[i].originbuf);
545 goto exit;
546 }
547
548 /* comp_param[i].file is just used as a dummy buffer to save data,
549 * set its ops to empty.
550 */
551 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops, false);
552 comp_param[i].done = true;
553 comp_param[i].quit = false;
554 qemu_mutex_init(&comp_param[i].mutex);
555 qemu_cond_init(&comp_param[i].cond);
556 qemu_thread_create(compress_threads + i, "compress",
557 do_data_compress, comp_param + i,
558 QEMU_THREAD_JOINABLE);
559 }
560 return 0;
561
562 exit:
563 compress_threads_save_cleanup();
564 return -1;
565 }
566
567 /**
568 * save_page_header: write page header to wire
569 *
570 * If this is the 1st block, it also writes the block identification
571 *
572 * Returns the number of bytes written
573 *
574 * @f: QEMUFile where to send the data
575 * @block: block that contains the page we want to send
576 * @offset: offset inside the block for the page
577 * in the lower bits, it contains flags
578 */
579 static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
580 ram_addr_t offset)
581 {
582 size_t size, len;
583
584 if (block == rs->last_sent_block) {
585 offset |= RAM_SAVE_FLAG_CONTINUE;
586 }
587 qemu_put_be64(f, offset);
588 size = 8;
589
590 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
591 len = strlen(block->idstr);
592 qemu_put_byte(f, len);
593 qemu_put_buffer(f, (uint8_t *)block->idstr, len);
594 size += 1 + len;
595 rs->last_sent_block = block;
596 }
597 return size;
598 }
599
600 /**
601 * mig_throttle_guest_down: throttle down the guest
602 *
603 * Reduce amount of guest cpu execution to hopefully slow down memory
604 * writes. If guest dirty memory rate is reduced below the rate at
605 * which we can transfer pages to the destination then we should be
606 * able to complete migration. Some workloads dirty memory way too
607 * fast and will not effectively converge, even with auto-converge.
608 */
609 static void mig_throttle_guest_down(uint64_t bytes_dirty_period,
610 uint64_t bytes_dirty_threshold)
611 {
612 MigrationState *s = migrate_get_current();
613 uint64_t pct_initial = s->parameters.cpu_throttle_initial;
614 uint64_t pct_increment = s->parameters.cpu_throttle_increment;
615 bool pct_tailslow = s->parameters.cpu_throttle_tailslow;
616 int pct_max = s->parameters.max_cpu_throttle;
617
618 uint64_t throttle_now = cpu_throttle_get_percentage();
619 uint64_t cpu_now, cpu_ideal, throttle_inc;
620
621 /* We have not started throttling yet. Let's start it. */
622 if (!cpu_throttle_active()) {
623 cpu_throttle_set(pct_initial);
624 } else {
625 /* Throttling already on, just increase the rate */
626 if (!pct_tailslow) {
627 throttle_inc = pct_increment;
628 } else {
629 /* Compute the ideal CPU percentage used by Guest, which may
630 * make the dirty rate match the dirty rate threshold. */
631 cpu_now = 100 - throttle_now;
632 cpu_ideal = cpu_now * (bytes_dirty_threshold * 1.0 /
633 bytes_dirty_period);
634 throttle_inc = MIN(cpu_now - cpu_ideal, pct_increment);
635 }
636 cpu_throttle_set(MIN(throttle_now + throttle_inc, pct_max));
637 }
638 }
639
640 void mig_throttle_counter_reset(void)
641 {
642 RAMState *rs = ram_state;
643
644 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
645 rs->num_dirty_pages_period = 0;
646 rs->bytes_xfer_prev = ram_counters.transferred;
647 }
648
649 /**
650 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
651 *
652 * @rs: current RAM state
653 * @current_addr: address for the zero page
654 *
655 * Update the xbzrle cache to reflect a page that's been sent as all 0.
656 * The important thing is that a stale (not-yet-0'd) page be replaced
657 * by the new data.
658 * As a bonus, if the page wasn't in the cache it gets added so that
659 * when a small write is made into the 0'd page it gets XBZRLE sent.
660 */
661 static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
662 {
663 if (!rs->xbzrle_enabled) {
664 return;
665 }
666
667 /* We don't care if this fails to allocate a new cache page
668 * as long as it updated an old one */
669 cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
670 ram_counters.dirty_sync_count);
671 }
672
673 #define ENCODING_FLAG_XBZRLE 0x1
674
675 /**
676 * save_xbzrle_page: compress and send current page
677 *
678 * Returns: 1 means that we wrote the page
679 * 0 means that page is identical to the one already sent
680 * -1 means that xbzrle would be longer than normal
681 *
682 * @rs: current RAM state
683 * @current_data: pointer to the address of the page contents
684 * @current_addr: addr of the page
685 * @block: block that contains the page we want to send
686 * @offset: offset inside the block for the page
687 */
688 static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
689 ram_addr_t current_addr, RAMBlock *block,
690 ram_addr_t offset)
691 {
692 int encoded_len = 0, bytes_xbzrle;
693 uint8_t *prev_cached_page;
694
695 if (!cache_is_cached(XBZRLE.cache, current_addr,
696 ram_counters.dirty_sync_count)) {
697 xbzrle_counters.cache_miss++;
698 if (!rs->last_stage) {
699 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
700 ram_counters.dirty_sync_count) == -1) {
701 return -1;
702 } else {
703 /* update *current_data when the page has been
704 inserted into cache */
705 *current_data = get_cached_data(XBZRLE.cache, current_addr);
706 }
707 }
708 return -1;
709 }
710
711 /*
712 * Reaching here means the page has hit the xbzrle cache, no matter what
713 * encoding result it is (normal encoding, overflow or skipping the page),
714 * count the page as encoded. This is used to calculate the encoding rate.
715 *
716 * Example: 2 pages (8KB) being encoded, first page encoding generates 2KB,
717 * 2nd page turns out to be skipped (i.e. no new bytes written to the
718 * page), the overall encoding rate will be 8KB / 2KB = 4, which has the
719 * skipped page included. In this way, the encoding rate can tell if the
720 * guest page is good for xbzrle encoding.
721 */
722 xbzrle_counters.pages++;
723 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
724
725 /* save current buffer into memory */
726 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
727
728 /* XBZRLE encoding (if there is no overflow) */
729 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
730 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
731 TARGET_PAGE_SIZE);
732
733 /*
734 * Update the cache contents, so that it corresponds to the data
735 * sent, in all cases except where we skip the page.
736 */
737 if (!rs->last_stage && encoded_len != 0) {
738 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
739 /*
740 * In the case where we couldn't compress, ensure that the caller
741 * sends the data from the cache, since the guest might have
742 * changed the RAM since we copied it.
743 */
744 *current_data = prev_cached_page;
745 }
746
747 if (encoded_len == 0) {
748 trace_save_xbzrle_page_skipping();
749 return 0;
750 } else if (encoded_len == -1) {
751 trace_save_xbzrle_page_overflow();
752 xbzrle_counters.overflow++;
753 xbzrle_counters.bytes += TARGET_PAGE_SIZE;
754 return -1;
755 }
756
757 /* Send XBZRLE based compressed page */
758 bytes_xbzrle = save_page_header(rs, rs->f, block,
759 offset | RAM_SAVE_FLAG_XBZRLE);
760 qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
761 qemu_put_be16(rs->f, encoded_len);
762 qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
763 bytes_xbzrle += encoded_len + 1 + 2;
764 /*
765 * Like compressed_size (please see update_compress_thread_counts),
766 * the xbzrle encoded bytes don't count the 8 byte header with
767 * RAM_SAVE_FLAG_CONTINUE.
768 */
769 xbzrle_counters.bytes += bytes_xbzrle - 8;
770 ram_counters.transferred += bytes_xbzrle;
771
772 return 1;
773 }
774
775 /**
776 * migration_bitmap_find_dirty: find the next dirty page from start
777 *
778 * Returns the page offset within memory region of the start of a dirty page
779 *
780 * @rs: current RAM state
781 * @rb: RAMBlock where to search for dirty pages
782 * @start: page where we start the search
783 */
784 static inline
785 unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
786 unsigned long start)
787 {
788 unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
789 unsigned long *bitmap = rb->bmap;
790
791 if (ramblock_is_ignored(rb)) {
792 return size;
793 }
794
795 return find_next_bit(bitmap, size, start);
796 }
797
798 static void migration_clear_memory_region_dirty_bitmap(RAMBlock *rb,
799 unsigned long page)
800 {
801 uint8_t shift;
802 hwaddr size, start;
803
804 if (!rb->clear_bmap || !clear_bmap_test_and_clear(rb, page)) {
805 return;
806 }
807
808 shift = rb->clear_bmap_shift;
809 /*
810 * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this
811 * can make things easier sometimes since then start address
812 * of the small chunk will always be 64 pages aligned so the
813 * bitmap will always be aligned to unsigned long. We should
814 * even be able to remove this restriction but I'm simply
815 * keeping it.
816 */
817 assert(shift >= 6);
818
819 size = 1ULL << (TARGET_PAGE_BITS + shift);
820 start = QEMU_ALIGN_DOWN((ram_addr_t)page << TARGET_PAGE_BITS, size);
821 trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page);
822 memory_region_clear_dirty_bitmap(rb->mr, start, size);
823 }
824
825 static void
826 migration_clear_memory_region_dirty_bitmap_range(RAMBlock *rb,
827 unsigned long start,
828 unsigned long npages)
829 {
830 unsigned long i, chunk_pages = 1UL << rb->clear_bmap_shift;
831 unsigned long chunk_start = QEMU_ALIGN_DOWN(start, chunk_pages);
832 unsigned long chunk_end = QEMU_ALIGN_UP(start + npages, chunk_pages);
833
834 /*
835 * Clear pages from start to start + npages - 1, so the end boundary is
836 * exclusive.
837 */
838 for (i = chunk_start; i < chunk_end; i += chunk_pages) {
839 migration_clear_memory_region_dirty_bitmap(rb, i);
840 }
841 }
842
843 /*
844 * colo_bitmap_find_diry:find contiguous dirty pages from start
845 *
846 * Returns the page offset within memory region of the start of the contiguout
847 * dirty page
848 *
849 * @rs: current RAM state
850 * @rb: RAMBlock where to search for dirty pages
851 * @start: page where we start the search
852 * @num: the number of contiguous dirty pages
853 */
854 static inline
855 unsigned long colo_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
856 unsigned long start, unsigned long *num)
857 {
858 unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
859 unsigned long *bitmap = rb->bmap;
860 unsigned long first, next;
861
862 *num = 0;
863
864 if (ramblock_is_ignored(rb)) {
865 return size;
866 }
867
868 first = find_next_bit(bitmap, size, start);
869 if (first >= size) {
870 return first;
871 }
872 next = find_next_zero_bit(bitmap, size, first + 1);
873 assert(next >= first);
874 *num = next - first;
875 return first;
876 }
877
878 static inline bool migration_bitmap_clear_dirty(RAMState *rs,
879 RAMBlock *rb,
880 unsigned long page)
881 {
882 bool ret;
883
884 /*
885 * Clear dirty bitmap if needed. This _must_ be called before we
886 * send any of the page in the chunk because we need to make sure
887 * we can capture further page content changes when we sync dirty
888 * log the next time. So as long as we are going to send any of
889 * the page in the chunk we clear the remote dirty bitmap for all.
890 * Clearing it earlier won't be a problem, but too late will.
891 */
892 migration_clear_memory_region_dirty_bitmap(rb, page);
893
894 ret = test_and_clear_bit(page, rb->bmap);
895 if (ret) {
896 rs->migration_dirty_pages--;
897 }
898
899 return ret;
900 }
901
902 static void dirty_bitmap_clear_section(MemoryRegionSection *section,
903 void *opaque)
904 {
905 const hwaddr offset = section->offset_within_region;
906 const hwaddr size = int128_get64(section->size);
907 const unsigned long start = offset >> TARGET_PAGE_BITS;
908 const unsigned long npages = size >> TARGET_PAGE_BITS;
909 RAMBlock *rb = section->mr->ram_block;
910 uint64_t *cleared_bits = opaque;
911
912 /*
913 * We don't grab ram_state->bitmap_mutex because we expect to run
914 * only when starting migration or during postcopy recovery where
915 * we don't have concurrent access.
916 */
917 if (!migration_in_postcopy() && !migrate_background_snapshot()) {
918 migration_clear_memory_region_dirty_bitmap_range(rb, start, npages);
919 }
920 *cleared_bits += bitmap_count_one_with_offset(rb->bmap, start, npages);
921 bitmap_clear(rb->bmap, start, npages);
922 }
923
924 /*
925 * Exclude all dirty pages from migration that fall into a discarded range as
926 * managed by a RamDiscardManager responsible for the mapped memory region of
927 * the RAMBlock. Clear the corresponding bits in the dirty bitmaps.
928 *
929 * Discarded pages ("logically unplugged") have undefined content and must
930 * not get migrated, because even reading these pages for migration might
931 * result in undesired behavior.
932 *
933 * Returns the number of cleared bits in the RAMBlock dirty bitmap.
934 *
935 * Note: The result is only stable while migrating (precopy/postcopy).
936 */
937 static uint64_t ramblock_dirty_bitmap_clear_discarded_pages(RAMBlock *rb)
938 {
939 uint64_t cleared_bits = 0;
940
941 if (rb->mr && rb->bmap && memory_region_has_ram_discard_manager(rb->mr)) {
942 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr);
943 MemoryRegionSection section = {
944 .mr = rb->mr,
945 .offset_within_region = 0,
946 .size = int128_make64(qemu_ram_get_used_length(rb)),
947 };
948
949 ram_discard_manager_replay_discarded(rdm, &section,
950 dirty_bitmap_clear_section,
951 &cleared_bits);
952 }
953 return cleared_bits;
954 }
955
956 /*
957 * Check if a host-page aligned page falls into a discarded range as managed by
958 * a RamDiscardManager responsible for the mapped memory region of the RAMBlock.
959 *
960 * Note: The result is only stable while migrating (precopy/postcopy).
961 */
962 bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start)
963 {
964 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) {
965 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr);
966 MemoryRegionSection section = {
967 .mr = rb->mr,
968 .offset_within_region = start,
969 .size = int128_make64(qemu_ram_pagesize(rb)),
970 };
971
972 return !ram_discard_manager_is_populated(rdm, &section);
973 }
974 return false;
975 }
976
977 /* Called with RCU critical section */
978 static void ramblock_sync_dirty_bitmap(RAMState *rs, RAMBlock *rb)
979 {
980 uint64_t new_dirty_pages =
981 cpu_physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length);
982
983 rs->migration_dirty_pages += new_dirty_pages;
984 rs->num_dirty_pages_period += new_dirty_pages;
985 }
986
987 /**
988 * ram_pagesize_summary: calculate all the pagesizes of a VM
989 *
990 * Returns a summary bitmap of the page sizes of all RAMBlocks
991 *
992 * For VMs with just normal pages this is equivalent to the host page
993 * size. If it's got some huge pages then it's the OR of all the
994 * different page sizes.
995 */
996 uint64_t ram_pagesize_summary(void)
997 {
998 RAMBlock *block;
999 uint64_t summary = 0;
1000
1001 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1002 summary |= block->page_size;
1003 }
1004
1005 return summary;
1006 }
1007
1008 uint64_t ram_get_total_transferred_pages(void)
1009 {
1010 return ram_counters.normal + ram_counters.duplicate +
1011 compression_counters.pages + xbzrle_counters.pages;
1012 }
1013
1014 static void migration_update_rates(RAMState *rs, int64_t end_time)
1015 {
1016 uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
1017 double compressed_size;
1018
1019 /* calculate period counters */
1020 ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
1021 / (end_time - rs->time_last_bitmap_sync);
1022
1023 if (!page_count) {
1024 return;
1025 }
1026
1027 if (migrate_use_xbzrle()) {
1028 double encoded_size, unencoded_size;
1029
1030 xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
1031 rs->xbzrle_cache_miss_prev) / page_count;
1032 rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
1033 unencoded_size = (xbzrle_counters.pages - rs->xbzrle_pages_prev) *
1034 TARGET_PAGE_SIZE;
1035 encoded_size = xbzrle_counters.bytes - rs->xbzrle_bytes_prev;
1036 if (xbzrle_counters.pages == rs->xbzrle_pages_prev || !encoded_size) {
1037 xbzrle_counters.encoding_rate = 0;
1038 } else {
1039 xbzrle_counters.encoding_rate = unencoded_size / encoded_size;
1040 }
1041 rs->xbzrle_pages_prev = xbzrle_counters.pages;
1042 rs->xbzrle_bytes_prev = xbzrle_counters.bytes;
1043 }
1044
1045 if (migrate_use_compression()) {
1046 compression_counters.busy_rate = (double)(compression_counters.busy -
1047 rs->compress_thread_busy_prev) / page_count;
1048 rs->compress_thread_busy_prev = compression_counters.busy;
1049
1050 compressed_size = compression_counters.compressed_size -
1051 rs->compressed_size_prev;
1052 if (compressed_size) {
1053 double uncompressed_size = (compression_counters.pages -
1054 rs->compress_pages_prev) * TARGET_PAGE_SIZE;
1055
1056 /* Compression-Ratio = Uncompressed-size / Compressed-size */
1057 compression_counters.compression_rate =
1058 uncompressed_size / compressed_size;
1059
1060 rs->compress_pages_prev = compression_counters.pages;
1061 rs->compressed_size_prev = compression_counters.compressed_size;
1062 }
1063 }
1064 }
1065
1066 static void migration_trigger_throttle(RAMState *rs)
1067 {
1068 MigrationState *s = migrate_get_current();
1069 uint64_t threshold = s->parameters.throttle_trigger_threshold;
1070
1071 uint64_t bytes_xfer_period = ram_counters.transferred - rs->bytes_xfer_prev;
1072 uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE;
1073 uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100;
1074
1075 /* During block migration the auto-converge logic incorrectly detects
1076 * that ram migration makes no progress. Avoid this by disabling the
1077 * throttling logic during the bulk phase of block migration. */
1078 if (migrate_auto_converge() && !blk_mig_bulk_active()) {
1079 /* The following detection logic can be refined later. For now:
1080 Check to see if the ratio between dirtied bytes and the approx.
1081 amount of bytes that just got transferred since the last time
1082 we were in this routine reaches the threshold. If that happens
1083 twice, start or increase throttling. */
1084
1085 if ((bytes_dirty_period > bytes_dirty_threshold) &&
1086 (++rs->dirty_rate_high_cnt >= 2)) {
1087 trace_migration_throttle();
1088 rs->dirty_rate_high_cnt = 0;
1089 mig_throttle_guest_down(bytes_dirty_period,
1090 bytes_dirty_threshold);
1091 }
1092 }
1093 }
1094
1095 static void migration_bitmap_sync(RAMState *rs)
1096 {
1097 RAMBlock *block;
1098 int64_t end_time;
1099
1100 ram_counters.dirty_sync_count++;
1101
1102 if (!rs->time_last_bitmap_sync) {
1103 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1104 }
1105
1106 trace_migration_bitmap_sync_start();
1107 memory_global_dirty_log_sync();
1108
1109 qemu_mutex_lock(&rs->bitmap_mutex);
1110 WITH_RCU_READ_LOCK_GUARD() {
1111 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1112 ramblock_sync_dirty_bitmap(rs, block);
1113 }
1114 ram_counters.remaining = ram_bytes_remaining();
1115 }
1116 qemu_mutex_unlock(&rs->bitmap_mutex);
1117
1118 memory_global_after_dirty_log_sync();
1119 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
1120
1121 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1122
1123 /* more than 1 second = 1000 millisecons */
1124 if (end_time > rs->time_last_bitmap_sync + 1000) {
1125 migration_trigger_throttle(rs);
1126
1127 migration_update_rates(rs, end_time);
1128
1129 rs->target_page_count_prev = rs->target_page_count;
1130
1131 /* reset period counters */
1132 rs->time_last_bitmap_sync = end_time;
1133 rs->num_dirty_pages_period = 0;
1134 rs->bytes_xfer_prev = ram_counters.transferred;
1135 }
1136 if (migrate_use_events()) {
1137 qapi_event_send_migration_pass(ram_counters.dirty_sync_count);
1138 }
1139 }
1140
1141 static void migration_bitmap_sync_precopy(RAMState *rs)
1142 {
1143 Error *local_err = NULL;
1144
1145 /*
1146 * The current notifier usage is just an optimization to migration, so we
1147 * don't stop the normal migration process in the error case.
1148 */
1149 if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC, &local_err)) {
1150 error_report_err(local_err);
1151 local_err = NULL;
1152 }
1153
1154 migration_bitmap_sync(rs);
1155
1156 if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) {
1157 error_report_err(local_err);
1158 }
1159 }
1160
1161 static void ram_release_page(const char *rbname, uint64_t offset)
1162 {
1163 if (!migrate_release_ram() || !migration_in_postcopy()) {
1164 return;
1165 }
1166
1167 ram_discard_range(rbname, offset, TARGET_PAGE_SIZE);
1168 }
1169
1170 /**
1171 * save_zero_page_to_file: send the zero page to the file
1172 *
1173 * Returns the size of data written to the file, 0 means the page is not
1174 * a zero page
1175 *
1176 * @rs: current RAM state
1177 * @file: the file where the data is saved
1178 * @block: block that contains the page we want to send
1179 * @offset: offset inside the block for the page
1180 */
1181 static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
1182 RAMBlock *block, ram_addr_t offset)
1183 {
1184 uint8_t *p = block->host + offset;
1185 int len = 0;
1186
1187 if (buffer_is_zero(p, TARGET_PAGE_SIZE)) {
1188 len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO);
1189 qemu_put_byte(file, 0);
1190 len += 1;
1191 ram_release_page(block->idstr, offset);
1192 }
1193 return len;
1194 }
1195
1196 /**
1197 * save_zero_page: send the zero page to the stream
1198 *
1199 * Returns the number of pages written.
1200 *
1201 * @rs: current RAM state
1202 * @block: block that contains the page we want to send
1203 * @offset: offset inside the block for the page
1204 */
1205 static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
1206 {
1207 int len = save_zero_page_to_file(rs, rs->f, block, offset);
1208
1209 if (len) {
1210 ram_counters.duplicate++;
1211 ram_counters.transferred += len;
1212 return 1;
1213 }
1214 return -1;
1215 }
1216
1217 /*
1218 * @pages: the number of pages written by the control path,
1219 * < 0 - error
1220 * > 0 - number of pages written
1221 *
1222 * Return true if the pages has been saved, otherwise false is returned.
1223 */
1224 static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1225 int *pages)
1226 {
1227 uint64_t bytes_xmit = 0;
1228 int ret;
1229
1230 *pages = -1;
1231 ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE,
1232 &bytes_xmit);
1233 if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
1234 return false;
1235 }
1236
1237 if (bytes_xmit) {
1238 ram_counters.transferred += bytes_xmit;
1239 *pages = 1;
1240 }
1241
1242 if (ret == RAM_SAVE_CONTROL_DELAYED) {
1243 return true;
1244 }
1245
1246 if (bytes_xmit > 0) {
1247 ram_counters.normal++;
1248 } else if (bytes_xmit == 0) {
1249 ram_counters.duplicate++;
1250 }
1251
1252 return true;
1253 }
1254
1255 /*
1256 * directly send the page to the stream
1257 *
1258 * Returns the number of pages written.
1259 *
1260 * @rs: current RAM state
1261 * @block: block that contains the page we want to send
1262 * @offset: offset inside the block for the page
1263 * @buf: the page to be sent
1264 * @async: send to page asyncly
1265 */
1266 static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1267 uint8_t *buf, bool async)
1268 {
1269 ram_counters.transferred += save_page_header(rs, rs->f, block,
1270 offset | RAM_SAVE_FLAG_PAGE);
1271 if (async) {
1272 qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE,
1273 migrate_release_ram() &
1274 migration_in_postcopy());
1275 } else {
1276 qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
1277 }
1278 ram_counters.transferred += TARGET_PAGE_SIZE;
1279 ram_counters.normal++;
1280 return 1;
1281 }
1282
1283 /**
1284 * ram_save_page: send the given page to the stream
1285 *
1286 * Returns the number of pages written.
1287 * < 0 - error
1288 * >=0 - Number of pages written - this might legally be 0
1289 * if xbzrle noticed the page was the same.
1290 *
1291 * @rs: current RAM state
1292 * @block: block that contains the page we want to send
1293 * @offset: offset inside the block for the page
1294 */
1295 static int ram_save_page(RAMState *rs, PageSearchStatus *pss)
1296 {
1297 int pages = -1;
1298 uint8_t *p;
1299 bool send_async = true;
1300 RAMBlock *block = pss->block;
1301 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
1302 ram_addr_t current_addr = block->offset + offset;
1303
1304 p = block->host + offset;
1305 trace_ram_save_page(block->idstr, (uint64_t)offset, p);
1306
1307 XBZRLE_cache_lock();
1308 if (rs->xbzrle_enabled && !migration_in_postcopy()) {
1309 pages = save_xbzrle_page(rs, &p, current_addr, block,
1310 offset);
1311 if (!rs->last_stage) {
1312 /* Can't send this cached data async, since the cache page
1313 * might get updated before it gets to the wire
1314 */
1315 send_async = false;
1316 }
1317 }
1318
1319 /* XBZRLE overflow or normal page */
1320 if (pages == -1) {
1321 pages = save_normal_page(rs, block, offset, p, send_async);
1322 }
1323
1324 XBZRLE_cache_unlock();
1325
1326 return pages;
1327 }
1328
1329 static int ram_save_multifd_page(RAMState *rs, RAMBlock *block,
1330 ram_addr_t offset)
1331 {
1332 if (multifd_queue_page(rs->f, block, offset) < 0) {
1333 return -1;
1334 }
1335 ram_counters.normal++;
1336
1337 return 1;
1338 }
1339
1340 static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
1341 ram_addr_t offset, uint8_t *source_buf)
1342 {
1343 RAMState *rs = ram_state;
1344 uint8_t *p = block->host + offset;
1345 int ret;
1346
1347 if (save_zero_page_to_file(rs, f, block, offset)) {
1348 return true;
1349 }
1350
1351 save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
1352
1353 /*
1354 * copy it to a internal buffer to avoid it being modified by VM
1355 * so that we can catch up the error during compression and
1356 * decompression
1357 */
1358 memcpy(source_buf, p, TARGET_PAGE_SIZE);
1359 ret = qemu_put_compression_data(f, stream, source_buf, TARGET_PAGE_SIZE);
1360 if (ret < 0) {
1361 qemu_file_set_error(migrate_get_current()->to_dst_file, ret);
1362 error_report("compressed data failed!");
1363 }
1364 return false;
1365 }
1366
1367 static void
1368 update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
1369 {
1370 ram_counters.transferred += bytes_xmit;
1371
1372 if (param->zero_page) {
1373 ram_counters.duplicate++;
1374 return;
1375 }
1376
1377 /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
1378 compression_counters.compressed_size += bytes_xmit - 8;
1379 compression_counters.pages++;
1380 }
1381
1382 static bool save_page_use_compression(RAMState *rs);
1383
1384 static void flush_compressed_data(RAMState *rs)
1385 {
1386 int idx, len, thread_count;
1387
1388 if (!save_page_use_compression(rs)) {
1389 return;
1390 }
1391 thread_count = migrate_compress_threads();
1392
1393 qemu_mutex_lock(&comp_done_lock);
1394 for (idx = 0; idx < thread_count; idx++) {
1395 while (!comp_param[idx].done) {
1396 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
1397 }
1398 }
1399 qemu_mutex_unlock(&comp_done_lock);
1400
1401 for (idx = 0; idx < thread_count; idx++) {
1402 qemu_mutex_lock(&comp_param[idx].mutex);
1403 if (!comp_param[idx].quit) {
1404 len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
1405 /*
1406 * it's safe to fetch zero_page without holding comp_done_lock
1407 * as there is no further request submitted to the thread,
1408 * i.e, the thread should be waiting for a request at this point.
1409 */
1410 update_compress_thread_counts(&comp_param[idx], len);
1411 }
1412 qemu_mutex_unlock(&comp_param[idx].mutex);
1413 }
1414 }
1415
1416 static inline void set_compress_params(CompressParam *param, RAMBlock *block,
1417 ram_addr_t offset)
1418 {
1419 param->block = block;
1420 param->offset = offset;
1421 }
1422
1423 static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
1424 ram_addr_t offset)
1425 {
1426 int idx, thread_count, bytes_xmit = -1, pages = -1;
1427 bool wait = migrate_compress_wait_thread();
1428
1429 thread_count = migrate_compress_threads();
1430 qemu_mutex_lock(&comp_done_lock);
1431 retry:
1432 for (idx = 0; idx < thread_count; idx++) {
1433 if (comp_param[idx].done) {
1434 comp_param[idx].done = false;
1435 bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
1436 qemu_mutex_lock(&comp_param[idx].mutex);
1437 set_compress_params(&comp_param[idx], block, offset);
1438 qemu_cond_signal(&comp_param[idx].cond);
1439 qemu_mutex_unlock(&comp_param[idx].mutex);
1440 pages = 1;
1441 update_compress_thread_counts(&comp_param[idx], bytes_xmit);
1442 break;
1443 }
1444 }
1445
1446 /*
1447 * wait for the free thread if the user specifies 'compress-wait-thread',
1448 * otherwise we will post the page out in the main thread as normal page.
1449 */
1450 if (pages < 0 && wait) {
1451 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
1452 goto retry;
1453 }
1454 qemu_mutex_unlock(&comp_done_lock);
1455
1456 return pages;
1457 }
1458
1459 /**
1460 * find_dirty_block: find the next dirty page and update any state
1461 * associated with the search process.
1462 *
1463 * Returns true if a page is found
1464 *
1465 * @rs: current RAM state
1466 * @pss: data about the state of the current dirty page scan
1467 * @again: set to false if the search has scanned the whole of RAM
1468 */
1469 static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
1470 {
1471 pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
1472 if (pss->complete_round && pss->block == rs->last_seen_block &&
1473 pss->page >= rs->last_page) {
1474 /*
1475 * We've been once around the RAM and haven't found anything.
1476 * Give up.
1477 */
1478 *again = false;
1479 return false;
1480 }
1481 if (!offset_in_ramblock(pss->block,
1482 ((ram_addr_t)pss->page) << TARGET_PAGE_BITS)) {
1483 /* Didn't find anything in this RAM Block */
1484 pss->page = 0;
1485 pss->block = QLIST_NEXT_RCU(pss->block, next);
1486 if (!pss->block) {
1487 /*
1488 * If memory migration starts over, we will meet a dirtied page
1489 * which may still exists in compression threads's ring, so we
1490 * should flush the compressed data to make sure the new page
1491 * is not overwritten by the old one in the destination.
1492 *
1493 * Also If xbzrle is on, stop using the data compression at this
1494 * point. In theory, xbzrle can do better than compression.
1495 */
1496 flush_compressed_data(rs);
1497
1498 /* Hit the end of the list */
1499 pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
1500 /* Flag that we've looped */
1501 pss->complete_round = true;
1502 /* After the first round, enable XBZRLE. */
1503 if (migrate_use_xbzrle()) {
1504 rs->xbzrle_enabled = true;
1505 }
1506 }
1507 /* Didn't find anything this time, but try again on the new block */
1508 *again = true;
1509 return false;
1510 } else {
1511 /* Can go around again, but... */
1512 *again = true;
1513 /* We've found something so probably don't need to */
1514 return true;
1515 }
1516 }
1517
1518 /**
1519 * unqueue_page: gets a page of the queue
1520 *
1521 * Helper for 'get_queued_page' - gets a page off the queue
1522 *
1523 * Returns the block of the page (or NULL if none available)
1524 *
1525 * @rs: current RAM state
1526 * @offset: used to return the offset within the RAMBlock
1527 */
1528 static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
1529 {
1530 RAMBlock *block = NULL;
1531
1532 if (QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests)) {
1533 return NULL;
1534 }
1535
1536 QEMU_LOCK_GUARD(&rs->src_page_req_mutex);
1537 if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
1538 struct RAMSrcPageRequest *entry =
1539 QSIMPLEQ_FIRST(&rs->src_page_requests);
1540 block = entry->rb;
1541 *offset = entry->offset;
1542
1543 if (entry->len > TARGET_PAGE_SIZE) {
1544 entry->len -= TARGET_PAGE_SIZE;
1545 entry->offset += TARGET_PAGE_SIZE;
1546 } else {
1547 memory_region_unref(block->mr);
1548 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
1549 g_free(entry);
1550 migration_consume_urgent_request();
1551 }
1552 }
1553
1554 return block;
1555 }
1556
1557 #if defined(__linux__)
1558 /**
1559 * poll_fault_page: try to get next UFFD write fault page and, if pending fault
1560 * is found, return RAM block pointer and page offset
1561 *
1562 * Returns pointer to the RAMBlock containing faulting page,
1563 * NULL if no write faults are pending
1564 *
1565 * @rs: current RAM state
1566 * @offset: page offset from the beginning of the block
1567 */
1568 static RAMBlock *poll_fault_page(RAMState *rs, ram_addr_t *offset)
1569 {
1570 struct uffd_msg uffd_msg;
1571 void *page_address;
1572 RAMBlock *block;
1573 int res;
1574
1575 if (!migrate_background_snapshot()) {
1576 return NULL;
1577 }
1578
1579 res = uffd_read_events(rs->uffdio_fd, &uffd_msg, 1);
1580 if (res <= 0) {
1581 return NULL;
1582 }
1583
1584 page_address = (void *)(uintptr_t) uffd_msg.arg.pagefault.address;
1585 block = qemu_ram_block_from_host(page_address, false, offset);
1586 assert(block && (block->flags & RAM_UF_WRITEPROTECT) != 0);
1587 return block;
1588 }
1589
1590 /**
1591 * ram_save_release_protection: release UFFD write protection after
1592 * a range of pages has been saved
1593 *
1594 * @rs: current RAM state
1595 * @pss: page-search-status structure
1596 * @start_page: index of the first page in the range relative to pss->block
1597 *
1598 * Returns 0 on success, negative value in case of an error
1599 */
1600 static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss,
1601 unsigned long start_page)
1602 {
1603 int res = 0;
1604
1605 /* Check if page is from UFFD-managed region. */
1606 if (pss->block->flags & RAM_UF_WRITEPROTECT) {
1607 void *page_address = pss->block->host + (start_page << TARGET_PAGE_BITS);
1608 uint64_t run_length = (pss->page - start_page + 1) << TARGET_PAGE_BITS;
1609
1610 /* Flush async buffers before un-protect. */
1611 qemu_fflush(rs->f);
1612 /* Un-protect memory range. */
1613 res = uffd_change_protection(rs->uffdio_fd, page_address, run_length,
1614 false, false);
1615 }
1616
1617 return res;
1618 }
1619
1620 /* ram_write_tracking_available: check if kernel supports required UFFD features
1621 *
1622 * Returns true if supports, false otherwise
1623 */
1624 bool ram_write_tracking_available(void)
1625 {
1626 uint64_t uffd_features;
1627 int res;
1628
1629 res = uffd_query_features(&uffd_features);
1630 return (res == 0 &&
1631 (uffd_features & UFFD_FEATURE_PAGEFAULT_FLAG_WP) != 0);
1632 }
1633
1634 /* ram_write_tracking_compatible: check if guest configuration is
1635 * compatible with 'write-tracking'
1636 *
1637 * Returns true if compatible, false otherwise
1638 */
1639 bool ram_write_tracking_compatible(void)
1640 {
1641 const uint64_t uffd_ioctls_mask = BIT(_UFFDIO_WRITEPROTECT);
1642 int uffd_fd;
1643 RAMBlock *block;
1644 bool ret = false;
1645
1646 /* Open UFFD file descriptor */
1647 uffd_fd = uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP, false);
1648 if (uffd_fd < 0) {
1649 return false;
1650 }
1651
1652 RCU_READ_LOCK_GUARD();
1653
1654 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1655 uint64_t uffd_ioctls;
1656
1657 /* Nothing to do with read-only and MMIO-writable regions */
1658 if (block->mr->readonly || block->mr->rom_device) {
1659 continue;
1660 }
1661 /* Try to register block memory via UFFD-IO to track writes */
1662 if (uffd_register_memory(uffd_fd, block->host, block->max_length,
1663 UFFDIO_REGISTER_MODE_WP, &uffd_ioctls)) {
1664 goto out;
1665 }
1666 if ((uffd_ioctls & uffd_ioctls_mask) != uffd_ioctls_mask) {
1667 goto out;
1668 }
1669 }
1670 ret = true;
1671
1672 out:
1673 uffd_close_fd(uffd_fd);
1674 return ret;
1675 }
1676
1677 static inline void populate_read_range(RAMBlock *block, ram_addr_t offset,
1678 ram_addr_t size)
1679 {
1680 /*
1681 * We read one byte of each page; this will preallocate page tables if
1682 * required and populate the shared zeropage on MAP_PRIVATE anonymous memory
1683 * where no page was populated yet. This might require adaption when
1684 * supporting other mappings, like shmem.
1685 */
1686 for (; offset < size; offset += block->page_size) {
1687 char tmp = *((char *)block->host + offset);
1688
1689 /* Don't optimize the read out */
1690 asm volatile("" : "+r" (tmp));
1691 }
1692 }
1693
1694 static inline int populate_read_section(MemoryRegionSection *section,
1695 void *opaque)
1696 {
1697 const hwaddr size = int128_get64(section->size);
1698 hwaddr offset = section->offset_within_region;
1699 RAMBlock *block = section->mr->ram_block;
1700
1701 populate_read_range(block, offset, size);
1702 return 0;
1703 }
1704
1705 /*
1706 * ram_block_populate_read: preallocate page tables and populate pages in the
1707 * RAM block by reading a byte of each page.
1708 *
1709 * Since it's solely used for userfault_fd WP feature, here we just
1710 * hardcode page size to qemu_real_host_page_size.
1711 *
1712 * @block: RAM block to populate
1713 */
1714 static void ram_block_populate_read(RAMBlock *rb)
1715 {
1716 /*
1717 * Skip populating all pages that fall into a discarded range as managed by
1718 * a RamDiscardManager responsible for the mapped memory region of the
1719 * RAMBlock. Such discarded ("logically unplugged") parts of a RAMBlock
1720 * must not get populated automatically. We don't have to track
1721 * modifications via userfaultfd WP reliably, because these pages will
1722 * not be part of the migration stream either way -- see
1723 * ramblock_dirty_bitmap_exclude_discarded_pages().
1724 *
1725 * Note: The result is only stable while migrating (precopy/postcopy).
1726 */
1727 if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) {
1728 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr);
1729 MemoryRegionSection section = {
1730 .mr = rb->mr,
1731 .offset_within_region = 0,
1732 .size = rb->mr->size,
1733 };
1734
1735 ram_discard_manager_replay_populated(rdm, &section,
1736 populate_read_section, NULL);
1737 } else {
1738 populate_read_range(rb, 0, rb->used_length);
1739 }
1740 }
1741
1742 /*
1743 * ram_write_tracking_prepare: prepare for UFFD-WP memory tracking
1744 */
1745 void ram_write_tracking_prepare(void)
1746 {
1747 RAMBlock *block;
1748
1749 RCU_READ_LOCK_GUARD();
1750
1751 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1752 /* Nothing to do with read-only and MMIO-writable regions */
1753 if (block->mr->readonly || block->mr->rom_device) {
1754 continue;
1755 }
1756
1757 /*
1758 * Populate pages of the RAM block before enabling userfault_fd
1759 * write protection.
1760 *
1761 * This stage is required since ioctl(UFFDIO_WRITEPROTECT) with
1762 * UFFDIO_WRITEPROTECT_MODE_WP mode setting would silently skip
1763 * pages with pte_none() entries in page table.
1764 */
1765 ram_block_populate_read(block);
1766 }
1767 }
1768
1769 /*
1770 * ram_write_tracking_start: start UFFD-WP memory tracking
1771 *
1772 * Returns 0 for success or negative value in case of error
1773 */
1774 int ram_write_tracking_start(void)
1775 {
1776 int uffd_fd;
1777 RAMState *rs = ram_state;
1778 RAMBlock *block;
1779
1780 /* Open UFFD file descriptor */
1781 uffd_fd = uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP, true);
1782 if (uffd_fd < 0) {
1783 return uffd_fd;
1784 }
1785 rs->uffdio_fd = uffd_fd;
1786
1787 RCU_READ_LOCK_GUARD();
1788
1789 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1790 /* Nothing to do with read-only and MMIO-writable regions */
1791 if (block->mr->readonly || block->mr->rom_device) {
1792 continue;
1793 }
1794
1795 /* Register block memory with UFFD to track writes */
1796 if (uffd_register_memory(rs->uffdio_fd, block->host,
1797 block->max_length, UFFDIO_REGISTER_MODE_WP, NULL)) {
1798 goto fail;
1799 }
1800 /* Apply UFFD write protection to the block memory range */
1801 if (uffd_change_protection(rs->uffdio_fd, block->host,
1802 block->max_length, true, false)) {
1803 goto fail;
1804 }
1805 block->flags |= RAM_UF_WRITEPROTECT;
1806 memory_region_ref(block->mr);
1807
1808 trace_ram_write_tracking_ramblock_start(block->idstr, block->page_size,
1809 block->host, block->max_length);
1810 }
1811
1812 return 0;
1813
1814 fail:
1815 error_report("ram_write_tracking_start() failed: restoring initial memory state");
1816
1817 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1818 if ((block->flags & RAM_UF_WRITEPROTECT) == 0) {
1819 continue;
1820 }
1821 /*
1822 * In case some memory block failed to be write-protected
1823 * remove protection and unregister all succeeded RAM blocks
1824 */
1825 uffd_change_protection(rs->uffdio_fd, block->host, block->max_length,
1826 false, false);
1827 uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length);
1828 /* Cleanup flags and remove reference */
1829 block->flags &= ~RAM_UF_WRITEPROTECT;
1830 memory_region_unref(block->mr);
1831 }
1832
1833 uffd_close_fd(uffd_fd);
1834 rs->uffdio_fd = -1;
1835 return -1;
1836 }
1837
1838 /**
1839 * ram_write_tracking_stop: stop UFFD-WP memory tracking and remove protection
1840 */
1841 void ram_write_tracking_stop(void)
1842 {
1843 RAMState *rs = ram_state;
1844 RAMBlock *block;
1845
1846 RCU_READ_LOCK_GUARD();
1847
1848 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1849 if ((block->flags & RAM_UF_WRITEPROTECT) == 0) {
1850 continue;
1851 }
1852 /* Remove protection and unregister all affected RAM blocks */
1853 uffd_change_protection(rs->uffdio_fd, block->host, block->max_length,
1854 false, false);
1855 uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length);
1856
1857 trace_ram_write_tracking_ramblock_stop(block->idstr, block->page_size,
1858 block->host, block->max_length);
1859
1860 /* Cleanup flags and remove reference */
1861 block->flags &= ~RAM_UF_WRITEPROTECT;
1862 memory_region_unref(block->mr);
1863 }
1864
1865 /* Finally close UFFD file descriptor */
1866 uffd_close_fd(rs->uffdio_fd);
1867 rs->uffdio_fd = -1;
1868 }
1869
1870 #else
1871 /* No target OS support, stubs just fail or ignore */
1872
1873 static RAMBlock *poll_fault_page(RAMState *rs, ram_addr_t *offset)
1874 {
1875 (void) rs;
1876 (void) offset;
1877
1878 return NULL;
1879 }
1880
1881 static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss,
1882 unsigned long start_page)
1883 {
1884 (void) rs;
1885 (void) pss;
1886 (void) start_page;
1887
1888 return 0;
1889 }
1890
1891 bool ram_write_tracking_available(void)
1892 {
1893 return false;
1894 }
1895
1896 bool ram_write_tracking_compatible(void)
1897 {
1898 assert(0);
1899 return false;
1900 }
1901
1902 int ram_write_tracking_start(void)
1903 {
1904 assert(0);
1905 return -1;
1906 }
1907
1908 void ram_write_tracking_stop(void)
1909 {
1910 assert(0);
1911 }
1912 #endif /* defined(__linux__) */
1913
1914 /**
1915 * get_queued_page: unqueue a page from the postcopy requests
1916 *
1917 * Skips pages that are already sent (!dirty)
1918 *
1919 * Returns true if a queued page is found
1920 *
1921 * @rs: current RAM state
1922 * @pss: data about the state of the current dirty page scan
1923 */
1924 static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
1925 {
1926 RAMBlock *block;
1927 ram_addr_t offset;
1928 bool dirty;
1929
1930 do {
1931 block = unqueue_page(rs, &offset);
1932 /*
1933 * We're sending this page, and since it's postcopy nothing else
1934 * will dirty it, and we must make sure it doesn't get sent again
1935 * even if this queue request was received after the background
1936 * search already sent it.
1937 */
1938 if (block) {
1939 unsigned long page;
1940
1941 page = offset >> TARGET_PAGE_BITS;
1942 dirty = test_bit(page, block->bmap);
1943 if (!dirty) {
1944 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
1945 page);
1946 } else {
1947 trace_get_queued_page(block->idstr, (uint64_t)offset, page);
1948 }
1949 }
1950
1951 } while (block && !dirty);
1952
1953 if (!block) {
1954 /*
1955 * Poll write faults too if background snapshot is enabled; that's
1956 * when we have vcpus got blocked by the write protected pages.
1957 */
1958 block = poll_fault_page(rs, &offset);
1959 }
1960
1961 if (block) {
1962 /*
1963 * We want the background search to continue from the queued page
1964 * since the guest is likely to want other pages near to the page
1965 * it just requested.
1966 */
1967 pss->block = block;
1968 pss->page = offset >> TARGET_PAGE_BITS;
1969
1970 /*
1971 * This unqueued page would break the "one round" check, even is
1972 * really rare.
1973 */
1974 pss->complete_round = false;
1975 }
1976
1977 return !!block;
1978 }
1979
1980 /**
1981 * migration_page_queue_free: drop any remaining pages in the ram
1982 * request queue
1983 *
1984 * It should be empty at the end anyway, but in error cases there may
1985 * be some left. in case that there is any page left, we drop it.
1986 *
1987 */
1988 static void migration_page_queue_free(RAMState *rs)
1989 {
1990 struct RAMSrcPageRequest *mspr, *next_mspr;
1991 /* This queue generally should be empty - but in the case of a failed
1992 * migration might have some droppings in.
1993 */
1994 RCU_READ_LOCK_GUARD();
1995 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
1996 memory_region_unref(mspr->rb->mr);
1997 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
1998 g_free(mspr);
1999 }
2000 }
2001
2002 /**
2003 * ram_save_queue_pages: queue the page for transmission
2004 *
2005 * A request from postcopy destination for example.
2006 *
2007 * Returns zero on success or negative on error
2008 *
2009 * @rbname: Name of the RAMBLock of the request. NULL means the
2010 * same that last one.
2011 * @start: starting address from the start of the RAMBlock
2012 * @len: length (in bytes) to send
2013 */
2014 int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
2015 {
2016 RAMBlock *ramblock;
2017 RAMState *rs = ram_state;
2018
2019 ram_counters.postcopy_requests++;
2020 RCU_READ_LOCK_GUARD();
2021
2022 if (!rbname) {
2023 /* Reuse last RAMBlock */
2024 ramblock = rs->last_req_rb;
2025
2026 if (!ramblock) {
2027 /*
2028 * Shouldn't happen, we can't reuse the last RAMBlock if
2029 * it's the 1st request.
2030 */
2031 error_report("ram_save_queue_pages no previous block");
2032 return -1;
2033 }
2034 } else {
2035 ramblock = qemu_ram_block_by_name(rbname);
2036
2037 if (!ramblock) {
2038 /* We shouldn't be asked for a non-existent RAMBlock */
2039 error_report("ram_save_queue_pages no block '%s'", rbname);
2040 return -1;
2041 }
2042 rs->last_req_rb = ramblock;
2043 }
2044 trace_ram_save_queue_pages(ramblock->idstr, start, len);
2045 if (!offset_in_ramblock(ramblock, start + len - 1)) {
2046 error_report("%s request overrun start=" RAM_ADDR_FMT " len="
2047 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
2048 __func__, start, len, ramblock->used_length);
2049 return -1;
2050 }
2051
2052 struct RAMSrcPageRequest *new_entry =
2053 g_malloc0(sizeof(struct RAMSrcPageRequest));
2054 new_entry->rb = ramblock;
2055 new_entry->offset = start;
2056 new_entry->len = len;
2057
2058 memory_region_ref(ramblock->mr);
2059 qemu_mutex_lock(&rs->src_page_req_mutex);
2060 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
2061 migration_make_urgent_request();
2062 qemu_mutex_unlock(&rs->src_page_req_mutex);
2063
2064 return 0;
2065 }
2066
2067 static bool save_page_use_compression(RAMState *rs)
2068 {
2069 if (!migrate_use_compression()) {
2070 return false;
2071 }
2072
2073 /*
2074 * If xbzrle is enabled (e.g., after first round of migration), stop
2075 * using the data compression. In theory, xbzrle can do better than
2076 * compression.
2077 */
2078 if (rs->xbzrle_enabled) {
2079 return false;
2080 }
2081
2082 return true;
2083 }
2084
2085 /*
2086 * try to compress the page before posting it out, return true if the page
2087 * has been properly handled by compression, otherwise needs other
2088 * paths to handle it
2089 */
2090 static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
2091 {
2092 if (!save_page_use_compression(rs)) {
2093 return false;
2094 }
2095
2096 /*
2097 * When starting the process of a new block, the first page of
2098 * the block should be sent out before other pages in the same
2099 * block, and all the pages in last block should have been sent
2100 * out, keeping this order is important, because the 'cont' flag
2101 * is used to avoid resending the block name.
2102 *
2103 * We post the fist page as normal page as compression will take
2104 * much CPU resource.
2105 */
2106 if (block != rs->last_sent_block) {
2107 flush_compressed_data(rs);
2108 return false;
2109 }
2110
2111 if (compress_page_with_multi_thread(rs, block, offset) > 0) {
2112 return true;
2113 }
2114
2115 compression_counters.busy++;
2116 return false;
2117 }
2118
2119 /**
2120 * ram_save_target_page: save one target page
2121 *
2122 * Returns the number of pages written
2123 *
2124 * @rs: current RAM state
2125 * @pss: data about the page we want to send
2126 */
2127 static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss)
2128 {
2129 RAMBlock *block = pss->block;
2130 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
2131 int res;
2132
2133 if (control_save_page(rs, block, offset, &res)) {
2134 return res;
2135 }
2136
2137 if (save_compress_page(rs, block, offset)) {
2138 return 1;
2139 }
2140
2141 res = save_zero_page(rs, block, offset);
2142 if (res > 0) {
2143 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
2144 * page would be stale
2145 */
2146 if (!save_page_use_compression(rs)) {
2147 XBZRLE_cache_lock();
2148 xbzrle_cache_zero_page(rs, block->offset + offset);
2149 XBZRLE_cache_unlock();
2150 }
2151 return res;
2152 }
2153
2154 /*
2155 * Do not use multifd for:
2156 * 1. Compression as the first page in the new block should be posted out
2157 * before sending the compressed page
2158 * 2. In postcopy as one whole host page should be placed
2159 */
2160 if (!save_page_use_compression(rs) && migrate_use_multifd()
2161 && !migration_in_postcopy()) {
2162 return ram_save_multifd_page(rs, block, offset);
2163 }
2164
2165 return ram_save_page(rs, pss);
2166 }
2167
2168 /**
2169 * ram_save_host_page: save a whole host page
2170 *
2171 * Starting at *offset send pages up to the end of the current host
2172 * page. It's valid for the initial offset to point into the middle of
2173 * a host page in which case the remainder of the hostpage is sent.
2174 * Only dirty target pages are sent. Note that the host page size may
2175 * be a huge page for this block.
2176 * The saving stops at the boundary of the used_length of the block
2177 * if the RAMBlock isn't a multiple of the host page size.
2178 *
2179 * Returns the number of pages written or negative on error
2180 *
2181 * @rs: current RAM state
2182 * @pss: data about the page we want to send
2183 */
2184 static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss)
2185 {
2186 int tmppages, pages = 0;
2187 size_t pagesize_bits =
2188 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
2189 unsigned long hostpage_boundary =
2190 QEMU_ALIGN_UP(pss->page + 1, pagesize_bits);
2191 unsigned long start_page = pss->page;
2192 int res;
2193
2194 if (ramblock_is_ignored(pss->block)) {
2195 error_report("block %s should not be migrated !", pss->block->idstr);
2196 return 0;
2197 }
2198
2199 do {
2200 /* Check the pages is dirty and if it is send it */
2201 if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
2202 tmppages = ram_save_target_page(rs, pss);
2203 if (tmppages < 0) {
2204 return tmppages;
2205 }
2206
2207 pages += tmppages;
2208 /*
2209 * Allow rate limiting to happen in the middle of huge pages if
2210 * something is sent in the current iteration.
2211 */
2212 if (pagesize_bits > 1 && tmppages > 0) {
2213 migration_rate_limit();
2214 }
2215 }
2216 pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
2217 } while ((pss->page < hostpage_boundary) &&
2218 offset_in_ramblock(pss->block,
2219 ((ram_addr_t)pss->page) << TARGET_PAGE_BITS));
2220 /* The offset we leave with is the min boundary of host page and block */
2221 pss->page = MIN(pss->page, hostpage_boundary) - 1;
2222
2223 res = ram_save_release_protection(rs, pss, start_page);
2224 return (res < 0 ? res : pages);
2225 }
2226
2227 /**
2228 * ram_find_and_save_block: finds a dirty page and sends it to f
2229 *
2230 * Called within an RCU critical section.
2231 *
2232 * Returns the number of pages written where zero means no dirty pages,
2233 * or negative on error
2234 *
2235 * @rs: current RAM state
2236 *
2237 * On systems where host-page-size > target-page-size it will send all the
2238 * pages in a host page that are dirty.
2239 */
2240 static int ram_find_and_save_block(RAMState *rs)
2241 {
2242 PageSearchStatus pss;
2243 int pages = 0;
2244 bool again, found;
2245
2246 /* No dirty page as there is zero RAM */
2247 if (!ram_bytes_total()) {
2248 return pages;
2249 }
2250
2251 pss.block = rs->last_seen_block;
2252 pss.page = rs->last_page;
2253 pss.complete_round = false;
2254
2255 if (!pss.block) {
2256 pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
2257 }
2258
2259 do {
2260 again = true;
2261 found = get_queued_page(rs, &pss);
2262
2263 if (!found) {
2264 /* priority queue empty, so just search for something dirty */
2265 found = find_dirty_block(rs, &pss, &again);
2266 }
2267
2268 if (found) {
2269 pages = ram_save_host_page(rs, &pss);
2270 }
2271 } while (!pages && again);
2272
2273 rs->last_seen_block = pss.block;
2274 rs->last_page = pss.page;
2275
2276 return pages;
2277 }
2278
2279 void acct_update_position(QEMUFile *f, size_t size, bool zero)
2280 {
2281 uint64_t pages = size / TARGET_PAGE_SIZE;
2282
2283 if (zero) {
2284 ram_counters.duplicate += pages;
2285 } else {
2286 ram_counters.normal += pages;
2287 ram_counters.transferred += size;
2288 qemu_update_position(f, size);
2289 }
2290 }
2291
2292 static uint64_t ram_bytes_total_common(bool count_ignored)
2293 {
2294 RAMBlock *block;
2295 uint64_t total = 0;
2296
2297 RCU_READ_LOCK_GUARD();
2298
2299 if (count_ignored) {
2300 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2301 total += block->used_length;
2302 }
2303 } else {
2304 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2305 total += block->used_length;
2306 }
2307 }
2308 return total;
2309 }
2310
2311 uint64_t ram_bytes_total(void)
2312 {
2313 return ram_bytes_total_common(false);
2314 }
2315
2316 static void xbzrle_load_setup(void)
2317 {
2318 XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
2319 }
2320
2321 static void xbzrle_load_cleanup(void)
2322 {
2323 g_free(XBZRLE.decoded_buf);
2324 XBZRLE.decoded_buf = NULL;
2325 }
2326
2327 static void ram_state_cleanup(RAMState **rsp)
2328 {
2329 if (*rsp) {
2330 migration_page_queue_free(*rsp);
2331 qemu_mutex_destroy(&(*rsp)->bitmap_mutex);
2332 qemu_mutex_destroy(&(*rsp)->src_page_req_mutex);
2333 g_free(*rsp);
2334 *rsp = NULL;
2335 }
2336 }
2337
2338 static void xbzrle_cleanup(void)
2339 {
2340 XBZRLE_cache_lock();
2341 if (XBZRLE.cache) {
2342 cache_fini(XBZRLE.cache);
2343 g_free(XBZRLE.encoded_buf);
2344 g_free(XBZRLE.current_buf);
2345 g_free(XBZRLE.zero_target_page);
2346 XBZRLE.cache = NULL;
2347 XBZRLE.encoded_buf = NULL;
2348 XBZRLE.current_buf = NULL;
2349 XBZRLE.zero_target_page = NULL;
2350 }
2351 XBZRLE_cache_unlock();
2352 }
2353
2354 static void ram_save_cleanup(void *opaque)
2355 {
2356 RAMState **rsp = opaque;
2357 RAMBlock *block;
2358
2359 /* We don't use dirty log with background snapshots */
2360 if (!migrate_background_snapshot()) {
2361 /* caller have hold iothread lock or is in a bh, so there is
2362 * no writing race against the migration bitmap
2363 */
2364 if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) {
2365 /*
2366 * do not stop dirty log without starting it, since
2367 * memory_global_dirty_log_stop will assert that
2368 * memory_global_dirty_log_start/stop used in pairs
2369 */
2370 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION);
2371 }
2372 }
2373
2374 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2375 g_free(block->clear_bmap);
2376 block->clear_bmap = NULL;
2377 g_free(block->bmap);
2378 block->bmap = NULL;
2379 }
2380
2381 xbzrle_cleanup();
2382 compress_threads_save_cleanup();
2383 ram_state_cleanup(rsp);
2384 }
2385
2386 static void ram_state_reset(RAMState *rs)
2387 {
2388 rs->last_seen_block = NULL;
2389 rs->last_sent_block = NULL;
2390 rs->last_page = 0;
2391 rs->last_version = ram_list.version;
2392 rs->xbzrle_enabled = false;
2393 }
2394
2395 #define MAX_WAIT 50 /* ms, half buffered_file limit */
2396
2397 /* **** functions for postcopy ***** */
2398
2399 void ram_postcopy_migrated_memory_release(MigrationState *ms)
2400 {
2401 struct RAMBlock *block;
2402
2403 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2404 unsigned long *bitmap = block->bmap;
2405 unsigned long range = block->used_length >> TARGET_PAGE_BITS;
2406 unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
2407
2408 while (run_start < range) {
2409 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
2410 ram_discard_range(block->idstr,
2411 ((ram_addr_t)run_start) << TARGET_PAGE_BITS,
2412 ((ram_addr_t)(run_end - run_start))
2413 << TARGET_PAGE_BITS);
2414 run_start = find_next_zero_bit(bitmap, range, run_end + 1);
2415 }
2416 }
2417 }
2418
2419 /**
2420 * postcopy_send_discard_bm_ram: discard a RAMBlock
2421 *
2422 * Returns zero on success
2423 *
2424 * Callback from postcopy_each_ram_send_discard for each RAMBlock
2425 *
2426 * @ms: current migration state
2427 * @block: RAMBlock to discard
2428 */
2429 static int postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block)
2430 {
2431 unsigned long end = block->used_length >> TARGET_PAGE_BITS;
2432 unsigned long current;
2433 unsigned long *bitmap = block->bmap;
2434
2435 for (current = 0; current < end; ) {
2436 unsigned long one = find_next_bit(bitmap, end, current);
2437 unsigned long zero, discard_length;
2438
2439 if (one >= end) {
2440 break;
2441 }
2442
2443 zero = find_next_zero_bit(bitmap, end, one + 1);
2444
2445 if (zero >= end) {
2446 discard_length = end - one;
2447 } else {
2448 discard_length = zero - one;
2449 }
2450 postcopy_discard_send_range(ms, one, discard_length);
2451 current = one + discard_length;
2452 }
2453
2454 return 0;
2455 }
2456
2457 static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block);
2458
2459 /**
2460 * postcopy_each_ram_send_discard: discard all RAMBlocks
2461 *
2462 * Returns 0 for success or negative for error
2463 *
2464 * Utility for the outgoing postcopy code.
2465 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2466 * passing it bitmap indexes and name.
2467 * (qemu_ram_foreach_block ends up passing unscaled lengths
2468 * which would mean postcopy code would have to deal with target page)
2469 *
2470 * @ms: current migration state
2471 */
2472 static int postcopy_each_ram_send_discard(MigrationState *ms)
2473 {
2474 struct RAMBlock *block;
2475 int ret;
2476
2477 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2478 postcopy_discard_send_init(ms, block->idstr);
2479
2480 /*
2481 * Deal with TPS != HPS and huge pages. It discard any partially sent
2482 * host-page size chunks, mark any partially dirty host-page size
2483 * chunks as all dirty. In this case the host-page is the host-page
2484 * for the particular RAMBlock, i.e. it might be a huge page.
2485 */
2486 postcopy_chunk_hostpages_pass(ms, block);
2487
2488 /*
2489 * Postcopy sends chunks of bitmap over the wire, but it
2490 * just needs indexes at this point, avoids it having
2491 * target page specific code.
2492 */
2493 ret = postcopy_send_discard_bm_ram(ms, block);
2494 postcopy_discard_send_finish(ms);
2495 if (ret) {
2496 return ret;
2497 }
2498 }
2499
2500 return 0;
2501 }
2502
2503 /**
2504 * postcopy_chunk_hostpages_pass: canonicalize bitmap in hostpages
2505 *
2506 * Helper for postcopy_chunk_hostpages; it's called twice to
2507 * canonicalize the two bitmaps, that are similar, but one is
2508 * inverted.
2509 *
2510 * Postcopy requires that all target pages in a hostpage are dirty or
2511 * clean, not a mix. This function canonicalizes the bitmaps.
2512 *
2513 * @ms: current migration state
2514 * @block: block that contains the page we want to canonicalize
2515 */
2516 static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block)
2517 {
2518 RAMState *rs = ram_state;
2519 unsigned long *bitmap = block->bmap;
2520 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
2521 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
2522 unsigned long run_start;
2523
2524 if (block->page_size == TARGET_PAGE_SIZE) {
2525 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2526 return;
2527 }
2528
2529 /* Find a dirty page */
2530 run_start = find_next_bit(bitmap, pages, 0);
2531
2532 while (run_start < pages) {
2533
2534 /*
2535 * If the start of this run of pages is in the middle of a host
2536 * page, then we need to fixup this host page.
2537 */
2538 if (QEMU_IS_ALIGNED(run_start, host_ratio)) {
2539 /* Find the end of this run */
2540 run_start = find_next_zero_bit(bitmap, pages, run_start + 1);
2541 /*
2542 * If the end isn't at the start of a host page, then the
2543 * run doesn't finish at the end of a host page
2544 * and we need to discard.
2545 */
2546 }
2547
2548 if (!QEMU_IS_ALIGNED(run_start, host_ratio)) {
2549 unsigned long page;
2550 unsigned long fixup_start_addr = QEMU_ALIGN_DOWN(run_start,
2551 host_ratio);
2552 run_start = QEMU_ALIGN_UP(run_start, host_ratio);
2553
2554 /* Clean up the bitmap */
2555 for (page = fixup_start_addr;
2556 page < fixup_start_addr + host_ratio; page++) {
2557 /*
2558 * Remark them as dirty, updating the count for any pages
2559 * that weren't previously dirty.
2560 */
2561 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
2562 }
2563 }
2564
2565 /* Find the next dirty page for the next iteration */
2566 run_start = find_next_bit(bitmap, pages, run_start);
2567 }
2568 }
2569
2570 /**
2571 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
2572 *
2573 * Returns zero on success
2574 *
2575 * Transmit the set of pages to be discarded after precopy to the target
2576 * these are pages that:
2577 * a) Have been previously transmitted but are now dirty again
2578 * b) Pages that have never been transmitted, this ensures that
2579 * any pages on the destination that have been mapped by background
2580 * tasks get discarded (transparent huge pages is the specific concern)
2581 * Hopefully this is pretty sparse
2582 *
2583 * @ms: current migration state
2584 */
2585 int ram_postcopy_send_discard_bitmap(MigrationState *ms)
2586 {
2587 RAMState *rs = ram_state;
2588
2589 RCU_READ_LOCK_GUARD();
2590
2591 /* This should be our last sync, the src is now paused */
2592 migration_bitmap_sync(rs);
2593
2594 /* Easiest way to make sure we don't resume in the middle of a host-page */
2595 rs->last_seen_block = NULL;
2596 rs->last_sent_block = NULL;
2597 rs->last_page = 0;
2598
2599 trace_ram_postcopy_send_discard_bitmap();
2600
2601 return postcopy_each_ram_send_discard(ms);
2602 }
2603
2604 /**
2605 * ram_discard_range: discard dirtied pages at the beginning of postcopy
2606 *
2607 * Returns zero on success
2608 *
2609 * @rbname: name of the RAMBlock of the request. NULL means the
2610 * same that last one.
2611 * @start: RAMBlock starting page
2612 * @length: RAMBlock size
2613 */
2614 int ram_discard_range(const char *rbname, uint64_t start, size_t length)
2615 {
2616 trace_ram_discard_range(rbname, start, length);
2617
2618 RCU_READ_LOCK_GUARD();
2619 RAMBlock *rb = qemu_ram_block_by_name(rbname);
2620
2621 if (!rb) {
2622 error_report("ram_discard_range: Failed to find block '%s'", rbname);
2623 return -1;
2624 }
2625
2626 /*
2627 * On source VM, we don't need to update the received bitmap since
2628 * we don't even have one.
2629 */
2630 if (rb->receivedmap) {
2631 bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(),
2632 length >> qemu_target_page_bits());
2633 }
2634
2635 return ram_block_discard_range(rb, start, length);
2636 }
2637
2638 /*
2639 * For every allocation, we will try not to crash the VM if the
2640 * allocation failed.
2641 */
2642 static int xbzrle_init(void)
2643 {
2644 Error *local_err = NULL;
2645
2646 if (!migrate_use_xbzrle()) {
2647 return 0;
2648 }
2649
2650 XBZRLE_cache_lock();
2651
2652 XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
2653 if (!XBZRLE.zero_target_page) {
2654 error_report("%s: Error allocating zero page", __func__);
2655 goto err_out;
2656 }
2657
2658 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
2659 TARGET_PAGE_SIZE, &local_err);
2660 if (!XBZRLE.cache) {
2661 error_report_err(local_err);
2662 goto free_zero_page;
2663 }
2664
2665 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
2666 if (!XBZRLE.encoded_buf) {
2667 error_report("%s: Error allocating encoded_buf", __func__);
2668 goto free_cache;
2669 }
2670
2671 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
2672 if (!XBZRLE.current_buf) {
2673 error_report("%s: Error allocating current_buf", __func__);
2674 goto free_encoded_buf;
2675 }
2676
2677 /* We are all good */
2678 XBZRLE_cache_unlock();
2679 return 0;
2680
2681 free_encoded_buf:
2682 g_free(XBZRLE.encoded_buf);
2683 XBZRLE.encoded_buf = NULL;
2684 free_cache:
2685 cache_fini(XBZRLE.cache);
2686 XBZRLE.cache = NULL;
2687 free_zero_page:
2688 g_free(XBZRLE.zero_target_page);
2689 XBZRLE.zero_target_page = NULL;
2690 err_out:
2691 XBZRLE_cache_unlock();
2692 return -ENOMEM;
2693 }
2694
2695 static int ram_state_init(RAMState **rsp)
2696 {
2697 *rsp = g_try_new0(RAMState, 1);
2698
2699 if (!*rsp) {
2700 error_report("%s: Init ramstate fail", __func__);
2701 return -1;
2702 }
2703
2704 qemu_mutex_init(&(*rsp)->bitmap_mutex);
2705 qemu_mutex_init(&(*rsp)->src_page_req_mutex);
2706 QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
2707
2708 /*
2709 * Count the total number of pages used by ram blocks not including any
2710 * gaps due to alignment or unplugs.
2711 * This must match with the initial values of dirty bitmap.
2712 */
2713 (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
2714 ram_state_reset(*rsp);
2715
2716 return 0;
2717 }
2718
2719 static void ram_list_init_bitmaps(void)
2720 {
2721 MigrationState *ms = migrate_get_current();
2722 RAMBlock *block;
2723 unsigned long pages;
2724 uint8_t shift;
2725
2726 /* Skip setting bitmap if there is no RAM */
2727 if (ram_bytes_total()) {
2728 shift = ms->clear_bitmap_shift;
2729 if (shift > CLEAR_BITMAP_SHIFT_MAX) {
2730 error_report("clear_bitmap_shift (%u) too big, using "
2731 "max value (%u)", shift, CLEAR_BITMAP_SHIFT_MAX);
2732 shift = CLEAR_BITMAP_SHIFT_MAX;
2733 } else if (shift < CLEAR_BITMAP_SHIFT_MIN) {
2734 error_report("clear_bitmap_shift (%u) too small, using "
2735 "min value (%u)", shift, CLEAR_BITMAP_SHIFT_MIN);
2736 shift = CLEAR_BITMAP_SHIFT_MIN;
2737 }
2738
2739 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2740 pages = block->max_length >> TARGET_PAGE_BITS;
2741 /*
2742 * The initial dirty bitmap for migration must be set with all
2743 * ones to make sure we'll migrate every guest RAM page to
2744 * destination.
2745 * Here we set RAMBlock.bmap all to 1 because when rebegin a
2746 * new migration after a failed migration, ram_list.
2747 * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole
2748 * guest memory.
2749 */
2750 block->bmap = bitmap_new(pages);
2751 bitmap_set(block->bmap, 0, pages);
2752 block->clear_bmap_shift = shift;
2753 block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift));
2754 }
2755 }
2756 }
2757
2758 static void migration_bitmap_clear_discarded_pages(RAMState *rs)
2759 {
2760 unsigned long pages;
2761 RAMBlock *rb;
2762
2763 RCU_READ_LOCK_GUARD();
2764
2765 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
2766 pages = ramblock_dirty_bitmap_clear_discarded_pages(rb);
2767 rs->migration_dirty_pages -= pages;
2768 }
2769 }
2770
2771 static void ram_init_bitmaps(RAMState *rs)
2772 {
2773 /* For memory_global_dirty_log_start below. */
2774 qemu_mutex_lock_iothread();
2775 qemu_mutex_lock_ramlist();
2776
2777 WITH_RCU_READ_LOCK_GUARD() {
2778 ram_list_init_bitmaps();
2779 /* We don't use dirty log with background snapshots */
2780 if (!migrate_background_snapshot()) {
2781 memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION);
2782 migration_bitmap_sync_precopy(rs);
2783 }
2784 }
2785 qemu_mutex_unlock_ramlist();
2786 qemu_mutex_unlock_iothread();
2787
2788 /*
2789 * After an eventual first bitmap sync, fixup the initial bitmap
2790 * containing all 1s to exclude any discarded pages from migration.
2791 */
2792 migration_bitmap_clear_discarded_pages(rs);
2793 }
2794
2795 static int ram_init_all(RAMState **rsp)
2796 {
2797 if (ram_state_init(rsp)) {
2798 return -1;
2799 }
2800
2801 if (xbzrle_init()) {
2802 ram_state_cleanup(rsp);
2803 return -1;
2804 }
2805
2806 ram_init_bitmaps(*rsp);
2807
2808 return 0;
2809 }
2810
2811 static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
2812 {
2813 RAMBlock *block;
2814 uint64_t pages = 0;
2815
2816 /*
2817 * Postcopy is not using xbzrle/compression, so no need for that.
2818 * Also, since source are already halted, we don't need to care
2819 * about dirty page logging as well.
2820 */
2821
2822 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2823 pages += bitmap_count_one(block->bmap,
2824 block->used_length >> TARGET_PAGE_BITS);
2825 }
2826
2827 /* This may not be aligned with current bitmaps. Recalculate. */
2828 rs->migration_dirty_pages = pages;
2829
2830 ram_state_reset(rs);
2831
2832 /* Update RAMState cache of output QEMUFile */
2833 rs->f = out;
2834
2835 trace_ram_state_resume_prepare(pages);
2836 }
2837
2838 /*
2839 * This function clears bits of the free pages reported by the caller from the
2840 * migration dirty bitmap. @addr is the host address corresponding to the
2841 * start of the continuous guest free pages, and @len is the total bytes of
2842 * those pages.
2843 */
2844 void qemu_guest_free_page_hint(void *addr, size_t len)
2845 {
2846 RAMBlock *block;
2847 ram_addr_t offset;
2848 size_t used_len, start, npages;
2849 MigrationState *s = migrate_get_current();
2850
2851 /* This function is currently expected to be used during live migration */
2852 if (!migration_is_setup_or_active(s->state)) {
2853 return;
2854 }
2855
2856 for (; len > 0; len -= used_len, addr += used_len) {
2857 block = qemu_ram_block_from_host(addr, false, &offset);
2858 if (unlikely(!block || offset >= block->used_length)) {
2859 /*
2860 * The implementation might not support RAMBlock resize during
2861 * live migration, but it could happen in theory with future
2862 * updates. So we add a check here to capture that case.
2863 */
2864 error_report_once("%s unexpected error", __func__);
2865 return;
2866 }
2867
2868 if (len <= block->used_length - offset) {
2869 used_len = len;
2870 } else {
2871 used_len = block->used_length - offset;
2872 }
2873
2874 start = offset >> TARGET_PAGE_BITS;
2875 npages = used_len >> TARGET_PAGE_BITS;
2876
2877 qemu_mutex_lock(&ram_state->bitmap_mutex);
2878 /*
2879 * The skipped free pages are equavalent to be sent from clear_bmap's
2880 * perspective, so clear the bits from the memory region bitmap which
2881 * are initially set. Otherwise those skipped pages will be sent in
2882 * the next round after syncing from the memory region bitmap.
2883 */
2884 migration_clear_memory_region_dirty_bitmap_range(block, start, npages);
2885 ram_state->migration_dirty_pages -=
2886 bitmap_count_one_with_offset(block->bmap, start, npages);
2887 bitmap_clear(block->bmap, start, npages);
2888 qemu_mutex_unlock(&ram_state->bitmap_mutex);
2889 }
2890 }
2891
2892 /*
2893 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
2894 * long-running RCU critical section. When rcu-reclaims in the code
2895 * start to become numerous it will be necessary to reduce the
2896 * granularity of these critical sections.
2897 */
2898
2899 /**
2900 * ram_save_setup: Setup RAM for migration
2901 *
2902 * Returns zero to indicate success and negative for error
2903 *
2904 * @f: QEMUFile where to send the data
2905 * @opaque: RAMState pointer
2906 */
2907 static int ram_save_setup(QEMUFile *f, void *opaque)
2908 {
2909 RAMState **rsp = opaque;
2910 RAMBlock *block;
2911
2912 if (compress_threads_save_setup()) {
2913 return -1;
2914 }
2915
2916 /* migration has already setup the bitmap, reuse it. */
2917 if (!migration_in_colo_state()) {
2918 if (ram_init_all(rsp) != 0) {
2919 compress_threads_save_cleanup();
2920 return -1;
2921 }
2922 }
2923 (*rsp)->f = f;
2924
2925 WITH_RCU_READ_LOCK_GUARD() {
2926 qemu_put_be64(f, ram_bytes_total_common(true) | RAM_SAVE_FLAG_MEM_SIZE);
2927
2928 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2929 qemu_put_byte(f, strlen(block->idstr));
2930 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
2931 qemu_put_be64(f, block->used_length);
2932 if (migrate_postcopy_ram() && block->page_size !=
2933 qemu_host_page_size) {
2934 qemu_put_be64(f, block->page_size);
2935 }
2936 if (migrate_ignore_shared()) {
2937 qemu_put_be64(f, block->mr->addr);
2938 }
2939 }
2940 }
2941
2942 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
2943 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
2944
2945 multifd_send_sync_main(f);
2946 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2947 qemu_fflush(f);
2948
2949 return 0;
2950 }
2951
2952 /**
2953 * ram_save_iterate: iterative stage for migration
2954 *
2955 * Returns zero to indicate success and negative for error
2956 *
2957 * @f: QEMUFile where to send the data
2958 * @opaque: RAMState pointer
2959 */
2960 static int ram_save_iterate(QEMUFile *f, void *opaque)
2961 {
2962 RAMState **temp = opaque;
2963 RAMState *rs = *temp;
2964 int ret = 0;
2965 int i;
2966 int64_t t0;
2967 int done = 0;
2968
2969 if (blk_mig_bulk_active()) {
2970 /* Avoid transferring ram during bulk phase of block migration as
2971 * the bulk phase will usually take a long time and transferring
2972 * ram updates during that time is pointless. */
2973 goto out;
2974 }
2975
2976 /*
2977 * We'll take this lock a little bit long, but it's okay for two reasons.
2978 * Firstly, the only possible other thread to take it is who calls
2979 * qemu_guest_free_page_hint(), which should be rare; secondly, see
2980 * MAX_WAIT (if curious, further see commit 4508bd9ed8053ce) below, which
2981 * guarantees that we'll at least released it in a regular basis.
2982 */
2983 qemu_mutex_lock(&rs->bitmap_mutex);
2984 WITH_RCU_READ_LOCK_GUARD() {
2985 if (ram_list.version != rs->last_version) {
2986 ram_state_reset(rs);
2987 }
2988
2989 /* Read version before ram_list.blocks */
2990 smp_rmb();
2991
2992 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
2993
2994 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2995 i = 0;
2996 while ((ret = qemu_file_rate_limit(f)) == 0 ||
2997 !QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
2998 int pages;
2999
3000 if (qemu_file_get_error(f)) {
3001 break;
3002 }
3003
3004 pages = ram_find_and_save_block(rs);
3005 /* no more pages to sent */
3006 if (pages == 0) {
3007 done = 1;
3008 break;
3009 }
3010
3011 if (pages < 0) {
3012 qemu_file_set_error(f, pages);
3013 break;
3014 }
3015
3016 rs->target_page_count += pages;
3017
3018 /*
3019 * During postcopy, it is necessary to make sure one whole host
3020 * page is sent in one chunk.
3021 */
3022 if (migrate_postcopy_ram()) {
3023 flush_compressed_data(rs);
3024 }
3025
3026 /*
3027 * we want to check in the 1st loop, just in case it was the 1st
3028 * time and we had to sync the dirty bitmap.
3029 * qemu_clock_get_ns() is a bit expensive, so we only check each
3030 * some iterations
3031 */
3032 if ((i & 63) == 0) {
3033 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) /
3034 1000000;
3035 if (t1 > MAX_WAIT) {
3036 trace_ram_save_iterate_big_wait(t1, i);
3037 break;
3038 }
3039 }
3040 i++;
3041 }
3042 }
3043 qemu_mutex_unlock(&rs->bitmap_mutex);
3044
3045 /*
3046 * Must occur before EOS (or any QEMUFile operation)
3047 * because of RDMA protocol.
3048 */
3049 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
3050
3051 out:
3052 if (ret >= 0
3053 && migration_is_setup_or_active(migrate_get_current()->state)) {
3054 multifd_send_sync_main(rs->f);
3055 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3056 qemu_fflush(f);
3057 ram_counters.transferred += 8;
3058
3059 ret = qemu_file_get_error(f);
3060 }
3061 if (ret < 0) {
3062 return ret;
3063 }
3064
3065 return done;
3066 }
3067
3068 /**
3069 * ram_save_complete: function called to send the remaining amount of ram
3070 *
3071 * Returns zero to indicate success or negative on error
3072 *
3073 * Called with iothread lock
3074 *
3075 * @f: QEMUFile where to send the data
3076 * @opaque: RAMState pointer
3077 */
3078 static int ram_save_complete(QEMUFile *f, void *opaque)
3079 {
3080 RAMState **temp = opaque;
3081 RAMState *rs = *temp;
3082 int ret = 0;
3083
3084 rs->last_stage = !migration_in_colo_state();
3085
3086 WITH_RCU_READ_LOCK_GUARD() {
3087 if (!migration_in_postcopy()) {
3088 migration_bitmap_sync_precopy(rs);
3089 }
3090
3091 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
3092
3093 /* try transferring iterative blocks of memory */
3094
3095 /* flush all remaining blocks regardless of rate limiting */
3096 while (true) {
3097 int pages;
3098
3099 pages = ram_find_and_save_block(rs);
3100 /* no more blocks to sent */
3101 if (pages == 0) {
3102 break;
3103 }
3104 if (pages < 0) {
3105 ret = pages;
3106 break;
3107 }
3108 }
3109
3110 flush_compressed_data(rs);
3111 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
3112 }
3113
3114 if (ret >= 0) {
3115 multifd_send_sync_main(rs->f);
3116 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3117 qemu_fflush(f);
3118 }
3119
3120 return ret;
3121 }
3122
3123 static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
3124 uint64_t *res_precopy_only,
3125 uint64_t *res_compatible,
3126 uint64_t *res_postcopy_only)
3127 {
3128 RAMState **temp = opaque;
3129 RAMState *rs = *temp;
3130 uint64_t remaining_size;
3131
3132 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
3133
3134 if (!migration_in_postcopy() &&
3135 remaining_size < max_size) {
3136 qemu_mutex_lock_iothread();
3137 WITH_RCU_READ_LOCK_GUARD() {
3138 migration_bitmap_sync_precopy(rs);
3139 }
3140 qemu_mutex_unlock_iothread();
3141 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
3142 }
3143
3144 if (migrate_postcopy_ram()) {
3145 /* We can do postcopy, and all the data is postcopiable */
3146 *res_compatible += remaining_size;
3147 } else {
3148 *res_precopy_only += remaining_size;
3149 }
3150 }
3151
3152 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
3153 {
3154 unsigned int xh_len;
3155 int xh_flags;
3156 uint8_t *loaded_data;
3157
3158 /* extract RLE header */
3159 xh_flags = qemu_get_byte(f);
3160 xh_len = qemu_get_be16(f);
3161
3162 if (xh_flags != ENCODING_FLAG_XBZRLE) {
3163 error_report("Failed to load XBZRLE page - wrong compression!");
3164 return -1;
3165 }
3166
3167 if (xh_len > TARGET_PAGE_SIZE) {
3168 error_report("Failed to load XBZRLE page - len overflow!");
3169 return -1;
3170 }
3171 loaded_data = XBZRLE.decoded_buf;
3172 /* load data and decode */
3173 /* it can change loaded_data to point to an internal buffer */
3174 qemu_get_buffer_in_place(f, &loaded_data, xh_len);
3175
3176 /* decode RLE */
3177 if (xbzrle_decode_buffer(loaded_data, xh_len, host,
3178 TARGET_PAGE_SIZE) == -1) {
3179 error_report("Failed to load XBZRLE page - decode error!");
3180 return -1;
3181 }
3182
3183 return 0;
3184 }
3185
3186 /**
3187 * ram_block_from_stream: read a RAMBlock id from the migration stream
3188 *
3189 * Must be called from within a rcu critical section.
3190 *
3191 * Returns a pointer from within the RCU-protected ram_list.
3192 *
3193 * @f: QEMUFile where to read the data from
3194 * @flags: Page flags (mostly to see if it's a continuation of previous block)
3195 */
3196 static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
3197 {
3198 static RAMBlock *block;
3199 char id[256];
3200 uint8_t len;
3201
3202 if (flags & RAM_SAVE_FLAG_CONTINUE) {
3203 if (!block) {
3204 error_report("Ack, bad migration stream!");
3205 return NULL;
3206 }
3207 return block;
3208 }
3209
3210 len = qemu_get_byte(f);
3211 qemu_get_buffer(f, (uint8_t *)id, len);
3212 id[len] = 0;
3213
3214 block = qemu_ram_block_by_name(id);
3215 if (!block) {
3216 error_report("Can't find block %s", id);
3217 return NULL;
3218 }
3219
3220 if (ramblock_is_ignored(block)) {
3221 error_report("block %s should not be migrated !", id);
3222 return NULL;
3223 }
3224
3225 return block;
3226 }
3227
3228 static inline void *host_from_ram_block_offset(RAMBlock *block,
3229 ram_addr_t offset)
3230 {
3231 if (!offset_in_ramblock(block, offset)) {
3232 return NULL;
3233 }
3234
3235 return block->host + offset;
3236 }
3237
3238 static void *host_page_from_ram_block_offset(RAMBlock *block,
3239 ram_addr_t offset)
3240 {
3241 /* Note: Explicitly no check against offset_in_ramblock(). */
3242 return (void *)QEMU_ALIGN_DOWN((uintptr_t)(block->host + offset),
3243 block->page_size);
3244 }
3245
3246 static ram_addr_t host_page_offset_from_ram_block_offset(RAMBlock *block,
3247 ram_addr_t offset)
3248 {
3249 return ((uintptr_t)block->host + offset) & (block->page_size - 1);
3250 }
3251
3252 static inline void *colo_cache_from_block_offset(RAMBlock *block,
3253 ram_addr_t offset, bool record_bitmap)
3254 {
3255 if (!offset_in_ramblock(block, offset)) {
3256 return NULL;
3257 }
3258 if (!block->colo_cache) {
3259 error_report("%s: colo_cache is NULL in block :%s",
3260 __func__, block->idstr);
3261 return NULL;
3262 }
3263
3264 /*
3265 * During colo checkpoint, we need bitmap of these migrated pages.
3266 * It help us to decide which pages in ram cache should be flushed
3267 * into VM's RAM later.
3268 */
3269 if (record_bitmap &&
3270 !test_and_set_bit(offset >> TARGET_PAGE_BITS, block->bmap)) {
3271 ram_state->migration_dirty_pages++;
3272 }
3273 return block->colo_cache + offset;
3274 }
3275
3276 /**
3277 * ram_handle_compressed: handle the zero page case
3278 *
3279 * If a page (or a whole RDMA chunk) has been
3280 * determined to be zero, then zap it.
3281 *
3282 * @host: host address for the zero page
3283 * @ch: what the page is filled from. We only support zero
3284 * @size: size of the zero page
3285 */
3286 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
3287 {
3288 if (ch != 0 || !buffer_is_zero(host, size)) {
3289 memset(host, ch, size);
3290 }
3291 }
3292
3293 /* return the size after decompression, or negative value on error */
3294 static int
3295 qemu_uncompress_data(z_stream *stream, uint8_t *dest, size_t dest_len,
3296 const uint8_t *source, size_t source_len)
3297 {
3298 int err;
3299
3300 err = inflateReset(stream);
3301 if (err != Z_OK) {
3302 return -1;
3303 }
3304
3305 stream->avail_in = source_len;
3306 stream->next_in = (uint8_t *)source;
3307 stream->avail_out = dest_len;
3308 stream->next_out = dest;
3309
3310 err = inflate(stream, Z_NO_FLUSH);
3311 if (err != Z_STREAM_END) {
3312 return -1;
3313 }
3314
3315 return stream->total_out;
3316 }
3317
3318 static void *do_data_decompress(void *opaque)
3319 {
3320 DecompressParam *param = opaque;
3321 unsigned long pagesize;
3322 uint8_t *des;
3323 int len, ret;
3324
3325 qemu_mutex_lock(&param->mutex);
3326 while (!param->quit) {
3327 if (param->des) {
3328 des = param->des;
3329 len = param->len;
3330 param->des = 0;
3331 qemu_mutex_unlock(&param->mutex);
3332
3333 pagesize = TARGET_PAGE_SIZE;
3334
3335 ret = qemu_uncompress_data(&param->stream, des, pagesize,
3336 param->compbuf, len);
3337 if (ret < 0 && migrate_get_current()->decompress_error_check) {
3338 error_report("decompress data failed");
3339 qemu_file_set_error(decomp_file, ret);
3340 }
3341
3342 qemu_mutex_lock(&decomp_done_lock);
3343 param->done = true;
3344 qemu_cond_signal(&decomp_done_cond);
3345 qemu_mutex_unlock(&decomp_done_lock);
3346
3347 qemu_mutex_lock(&param->mutex);
3348 } else {
3349 qemu_cond_wait(&param->cond, &param->mutex);
3350 }
3351 }
3352 qemu_mutex_unlock(&param->mutex);
3353
3354 return NULL;
3355 }
3356
3357 static int wait_for_decompress_done(void)
3358 {
3359 int idx, thread_count;
3360
3361 if (!migrate_use_compression()) {
3362 return 0;
3363 }
3364
3365 thread_count = migrate_decompress_threads();
3366 qemu_mutex_lock(&decomp_done_lock);
3367 for (idx = 0; idx < thread_count; idx++) {
3368 while (!decomp_param[idx].done) {
3369 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
3370 }
3371 }
3372 qemu_mutex_unlock(&decomp_done_lock);
3373 return qemu_file_get_error(decomp_file);
3374 }
3375
3376 static void compress_threads_load_cleanup(void)
3377 {
3378 int i, thread_count;
3379
3380 if (!migrate_use_compression()) {
3381 return;
3382 }
3383 thread_count = migrate_decompress_threads();
3384 for (i = 0; i < thread_count; i++) {
3385 /*
3386 * we use it as a indicator which shows if the thread is
3387 * properly init'd or not
3388 */
3389 if (!decomp_param[i].compbuf) {
3390 break;
3391 }
3392
3393 qemu_mutex_lock(&decomp_param[i].mutex);
3394 decomp_param[i].quit = true;
3395 qemu_cond_signal(&decomp_param[i].cond);
3396 qemu_mutex_unlock(&decomp_param[i].mutex);
3397 }
3398 for (i = 0; i < thread_count; i++) {
3399 if (!decomp_param[i].compbuf) {
3400 break;
3401 }
3402
3403 qemu_thread_join(decompress_threads + i);
3404 qemu_mutex_destroy(&decomp_param[i].mutex);
3405 qemu_cond_destroy(&decomp_param[i].cond);
3406 inflateEnd(&decomp_param[i].stream);
3407 g_free(decomp_param[i].compbuf);
3408 decomp_param[i].compbuf = NULL;
3409 }
3410 g_free(decompress_threads);
3411 g_free(decomp_param);
3412 decompress_threads = NULL;
3413 decomp_param = NULL;
3414 decomp_file = NULL;
3415 }
3416
3417 static int compress_threads_load_setup(QEMUFile *f)
3418 {
3419 int i, thread_count;
3420
3421 if (!migrate_use_compression()) {
3422 return 0;
3423 }
3424
3425 thread_count = migrate_decompress_threads();
3426 decompress_threads = g_new0(QemuThread, thread_count);
3427 decomp_param = g_new0(DecompressParam, thread_count);
3428 qemu_mutex_init(&decomp_done_lock);
3429 qemu_cond_init(&decomp_done_cond);
3430 decomp_file = f;
3431 for (i = 0; i < thread_count; i++) {
3432 if (inflateInit(&decomp_param[i].stream) != Z_OK) {
3433 goto exit;
3434 }
3435
3436 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
3437 qemu_mutex_init(&decomp_param[i].mutex);
3438 qemu_cond_init(&decomp_param[i].cond);
3439 decomp_param[i].done = true;
3440 decomp_param[i].quit = false;
3441 qemu_thread_create(decompress_threads + i, "decompress",
3442 do_data_decompress, decomp_param + i,
3443 QEMU_THREAD_JOINABLE);
3444 }
3445 return 0;
3446 exit:
3447 compress_threads_load_cleanup();
3448 return -1;
3449 }
3450
3451 static void decompress_data_with_multi_threads(QEMUFile *f,
3452 void *host, int len)
3453 {
3454 int idx, thread_count;
3455
3456 thread_count = migrate_decompress_threads();
3457 QEMU_LOCK_GUARD(&decomp_done_lock);
3458 while (true) {
3459 for (idx = 0; idx < thread_count; idx++) {
3460 if (decomp_param[idx].done) {
3461 decomp_param[idx].done = false;
3462 qemu_mutex_lock(&decomp_param[idx].mutex);
3463 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
3464 decomp_param[idx].des = host;
3465 decomp_param[idx].len = len;
3466 qemu_cond_signal(&decomp_param[idx].cond);
3467 qemu_mutex_unlock(&decomp_param[idx].mutex);
3468 break;
3469 }
3470 }
3471 if (idx < thread_count) {
3472 break;
3473 } else {
3474 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
3475 }
3476 }
3477 }
3478
3479 static void colo_init_ram_state(void)
3480 {
3481 ram_state_init(&ram_state);
3482 }
3483
3484 /*
3485 * colo cache: this is for secondary VM, we cache the whole
3486 * memory of the secondary VM, it is need to hold the global lock
3487 * to call this helper.
3488 */
3489 int colo_init_ram_cache(void)
3490 {
3491 RAMBlock *block;
3492
3493 WITH_RCU_READ_LOCK_GUARD() {
3494 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3495 block->colo_cache = qemu_anon_ram_alloc(block->used_length,
3496 NULL, false, false);
3497 if (!block->colo_cache) {
3498 error_report("%s: Can't alloc memory for COLO cache of block %s,"
3499 "size 0x" RAM_ADDR_FMT, __func__, block->idstr,
3500 block->used_length);
3501 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3502 if (block->colo_cache) {
3503 qemu_anon_ram_free(block->colo_cache, block->used_length);
3504 block->colo_cache = NULL;
3505 }
3506 }
3507 return -errno;
3508 }
3509 if (!machine_dump_guest_core(current_machine)) {
3510 qemu_madvise(block->colo_cache, block->used_length,
3511 QEMU_MADV_DONTDUMP);
3512 }
3513 }
3514 }
3515
3516 /*
3517 * Record the dirty pages that sent by PVM, we use this dirty bitmap together
3518 * with to decide which page in cache should be flushed into SVM's RAM. Here
3519 * we use the same name 'ram_bitmap' as for migration.
3520 */
3521 if (ram_bytes_total()) {
3522 RAMBlock *block;
3523
3524 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3525 unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
3526 block->bmap = bitmap_new(pages);
3527 }
3528 }
3529
3530 colo_init_ram_state();
3531 return 0;
3532 }
3533
3534 /* TODO: duplicated with ram_init_bitmaps */
3535 void colo_incoming_start_dirty_log(void)
3536 {
3537 RAMBlock *block = NULL;
3538 /* For memory_global_dirty_log_start below. */
3539 qemu_mutex_lock_iothread();
3540 qemu_mutex_lock_ramlist();
3541
3542 memory_global_dirty_log_sync();
3543 WITH_RCU_READ_LOCK_GUARD() {
3544 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3545 ramblock_sync_dirty_bitmap(ram_state, block);
3546 /* Discard this dirty bitmap record */
3547 bitmap_zero(block->bmap, block->max_length >> TARGET_PAGE_BITS);
3548 }
3549 memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION);
3550 }
3551 ram_state->migration_dirty_pages = 0;
3552 qemu_mutex_unlock_ramlist();
3553 qemu_mutex_unlock_iothread();
3554 }
3555
3556 /* It is need to hold the global lock to call this helper */
3557 void colo_release_ram_cache(void)
3558 {
3559 RAMBlock *block;
3560
3561 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION);
3562 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3563 g_free(block->bmap);
3564 block->bmap = NULL;
3565 }
3566
3567 WITH_RCU_READ_LOCK_GUARD() {
3568 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3569 if (block->colo_cache) {
3570 qemu_anon_ram_free(block->colo_cache, block->used_length);
3571 block->colo_cache = NULL;
3572 }
3573 }
3574 }
3575 ram_state_cleanup(&ram_state);
3576 }
3577
3578 /**
3579 * ram_load_setup: Setup RAM for migration incoming side
3580 *
3581 * Returns zero to indicate success and negative for error
3582 *
3583 * @f: QEMUFile where to receive the data
3584 * @opaque: RAMState pointer
3585 */
3586 static int ram_load_setup(QEMUFile *f, void *opaque)
3587 {
3588 if (compress_threads_load_setup(f)) {
3589 return -1;
3590 }
3591
3592 xbzrle_load_setup();
3593 ramblock_recv_map_init();
3594
3595 return 0;
3596 }
3597
3598 static int ram_load_cleanup(void *opaque)
3599 {
3600 RAMBlock *rb;
3601
3602 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
3603 qemu_ram_block_writeback(rb);
3604 }
3605
3606 xbzrle_load_cleanup();
3607 compress_threads_load_cleanup();
3608
3609 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
3610 g_free(rb->receivedmap);
3611 rb->receivedmap = NULL;
3612 }
3613
3614 return 0;
3615 }
3616
3617 /**
3618 * ram_postcopy_incoming_init: allocate postcopy data structures
3619 *
3620 * Returns 0 for success and negative if there was one error
3621 *
3622 * @mis: current migration incoming state
3623 *
3624 * Allocate data structures etc needed by incoming migration with
3625 * postcopy-ram. postcopy-ram's similarly names
3626 * postcopy_ram_incoming_init does the work.
3627 */
3628 int ram_postcopy_incoming_init(MigrationIncomingState *mis)
3629 {
3630 return postcopy_ram_incoming_init(mis);
3631 }
3632
3633 /**
3634 * ram_load_postcopy: load a page in postcopy case
3635 *
3636 * Returns 0 for success or -errno in case of error
3637 *
3638 * Called in postcopy mode by ram_load().
3639 * rcu_read_lock is taken prior to this being called.
3640 *
3641 * @f: QEMUFile where to send the data
3642 */
3643 static int ram_load_postcopy(QEMUFile *f)
3644 {
3645 int flags = 0, ret = 0;
3646 bool place_needed = false;
3647 bool matches_target_page_size = false;
3648 MigrationIncomingState *mis = migration_incoming_get_current();
3649 /* Temporary page that is later 'placed' */
3650 void *postcopy_host_page = mis->postcopy_tmp_page;
3651 void *host_page = NULL;
3652 bool all_zero = true;
3653 int target_pages = 0;
3654
3655 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
3656 ram_addr_t addr;
3657 void *page_buffer = NULL;
3658 void *place_source = NULL;
3659 RAMBlock *block = NULL;
3660 uint8_t ch;
3661 int len;
3662
3663 addr = qemu_get_be64(f);
3664
3665 /*
3666 * If qemu file error, we should stop here, and then "addr"
3667 * may be invalid
3668 */
3669 ret = qemu_file_get_error(f);
3670 if (ret) {
3671 break;
3672 }
3673
3674 flags = addr & ~TARGET_PAGE_MASK;
3675 addr &= TARGET_PAGE_MASK;
3676
3677 trace_ram_load_postcopy_loop((uint64_t)addr, flags);
3678 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
3679 RAM_SAVE_FLAG_COMPRESS_PAGE)) {
3680 block = ram_block_from_stream(f, flags);
3681 if (!block) {
3682 ret = -EINVAL;
3683 break;
3684 }
3685
3686 /*
3687 * Relying on used_length is racy and can result in false positives.
3688 * We might place pages beyond used_length in case RAM was shrunk
3689 * while in postcopy, which is fine - trying to place via
3690 * UFFDIO_COPY/UFFDIO_ZEROPAGE will never segfault.
3691 */
3692 if (!block->host || addr >= block->postcopy_length) {
3693 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
3694 ret = -EINVAL;
3695 break;
3696 }
3697 target_pages++;
3698 matches_target_page_size = block->page_size == TARGET_PAGE_SIZE;
3699 /*
3700 * Postcopy requires that we place whole host pages atomically;
3701 * these may be huge pages for RAMBlocks that are backed by
3702 * hugetlbfs.
3703 * To make it atomic, the data is read into a temporary page
3704 * that's moved into place later.
3705 * The migration protocol uses, possibly smaller, target-pages
3706 * however the source ensures it always sends all the components
3707 * of a host page in one chunk.
3708 */
3709 page_buffer = postcopy_host_page +
3710 host_page_offset_from_ram_block_offset(block, addr);
3711 /* If all TP are zero then we can optimise the place */
3712 if (target_pages == 1) {
3713 host_page = host_page_from_ram_block_offset(block, addr);
3714 } else if (host_page != host_page_from_ram_block_offset(block,
3715 addr)) {
3716 /* not the 1st TP within the HP */
3717 error_report("Non-same host page %p/%p", host_page,
3718 host_page_from_ram_block_offset(block, addr));
3719 ret = -EINVAL;
3720 break;
3721 }
3722
3723 /*
3724 * If it's the last part of a host page then we place the host
3725 * page
3726 */
3727 if (target_pages == (block->page_size / TARGET_PAGE_SIZE)) {
3728 place_needed = true;
3729 }
3730 place_source = postcopy_host_page;
3731 }
3732
3733 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
3734 case RAM_SAVE_FLAG_ZERO:
3735 ch = qemu_get_byte(f);
3736 /*
3737 * Can skip to set page_buffer when
3738 * this is a zero page and (block->page_size == TARGET_PAGE_SIZE).
3739 */
3740 if (ch || !matches_target_page_size) {
3741 memset(page_buffer, ch, TARGET_PAGE_SIZE);
3742 }
3743 if (ch) {
3744 all_zero = false;
3745 }
3746 break;
3747
3748 case RAM_SAVE_FLAG_PAGE:
3749 all_zero = false;
3750 if (!matches_target_page_size) {
3751 /* For huge pages, we always use temporary buffer */
3752 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
3753 } else {
3754 /*
3755 * For small pages that matches target page size, we
3756 * avoid the qemu_file copy. Instead we directly use
3757 * the buffer of QEMUFile to place the page. Note: we
3758 * cannot do any QEMUFile operation before using that
3759 * buffer to make sure the buffer is valid when
3760 * placing the page.
3761 */
3762 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
3763 TARGET_PAGE_SIZE);
3764 }
3765 break;
3766 case RAM_SAVE_FLAG_COMPRESS_PAGE:
3767 all_zero = false;
3768 len = qemu_get_be32(f);
3769 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
3770 error_report("Invalid compressed data length: %d", len);
3771 ret = -EINVAL;
3772 break;
3773 }
3774 decompress_data_with_multi_threads(f, page_buffer, len);
3775 break;
3776
3777 case RAM_SAVE_FLAG_EOS:
3778 /* normal exit */
3779 multifd_recv_sync_main();
3780 break;
3781 default:
3782 error_report("Unknown combination of migration flags: 0x%x"
3783 " (postcopy mode)", flags);
3784 ret = -EINVAL;
3785 break;
3786 }
3787
3788 /* Got the whole host page, wait for decompress before placing. */
3789 if (place_needed) {
3790 ret |= wait_for_decompress_done();
3791 }
3792
3793 /* Detect for any possible file errors */
3794 if (!ret && qemu_file_get_error(f)) {
3795 ret = qemu_file_get_error(f);
3796 }
3797
3798 if (!ret && place_needed) {
3799 if (all_zero) {
3800 ret = postcopy_place_page_zero(mis, host_page, block);
3801 } else {
3802 ret = postcopy_place_page(mis, host_page, place_source,
3803 block);
3804 }
3805 place_needed = false;
3806 target_pages = 0;
3807 /* Assume we have a zero page until we detect something different */
3808 all_zero = true;
3809 }
3810 }
3811
3812 return ret;
3813 }
3814
3815 static bool postcopy_is_advised(void)
3816 {
3817 PostcopyState ps = postcopy_state_get();
3818 return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
3819 }
3820
3821 static bool postcopy_is_running(void)
3822 {
3823 PostcopyState ps = postcopy_state_get();
3824 return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END;
3825 }
3826
3827 /*
3828 * Flush content of RAM cache into SVM's memory.
3829 * Only flush the pages that be dirtied by PVM or SVM or both.
3830 */
3831 void colo_flush_ram_cache(void)
3832 {
3833 RAMBlock *block = NULL;
3834 void *dst_host;
3835 void *src_host;
3836 unsigned long offset = 0;
3837
3838 memory_global_dirty_log_sync();
3839 WITH_RCU_READ_LOCK_GUARD() {
3840 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3841 ramblock_sync_dirty_bitmap(ram_state, block);
3842 }
3843 }
3844
3845 trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages);
3846 WITH_RCU_READ_LOCK_GUARD() {
3847 block = QLIST_FIRST_RCU(&ram_list.blocks);
3848
3849 while (block) {
3850 unsigned long num = 0;
3851
3852 offset = colo_bitmap_find_dirty(ram_state, block, offset, &num);
3853 if (!offset_in_ramblock(block,
3854 ((ram_addr_t)offset) << TARGET_PAGE_BITS)) {
3855 offset = 0;
3856 num = 0;
3857 block = QLIST_NEXT_RCU(block, next);
3858 } else {
3859 unsigned long i = 0;
3860
3861 for (i = 0; i < num; i++) {
3862 migration_bitmap_clear_dirty(ram_state, block, offset + i);
3863 }
3864 dst_host = block->host
3865 + (((ram_addr_t)offset) << TARGET_PAGE_BITS);
3866 src_host = block->colo_cache
3867 + (((ram_addr_t)offset) << TARGET_PAGE_BITS);
3868 memcpy(dst_host, src_host, TARGET_PAGE_SIZE * num);
3869 offset += num;
3870 }
3871 }
3872 }
3873 trace_colo_flush_ram_cache_end();
3874 }
3875
3876 /**
3877 * ram_load_precopy: load pages in precopy case
3878 *
3879 * Returns 0 for success or -errno in case of error
3880 *
3881 * Called in precopy mode by ram_load().
3882 * rcu_read_lock is taken prior to this being called.
3883 *
3884 * @f: QEMUFile where to send the data
3885 */
3886 static int ram_load_precopy(QEMUFile *f)
3887 {
3888 int flags = 0, ret = 0, invalid_flags = 0, len = 0, i = 0;
3889 /* ADVISE is earlier, it shows the source has the postcopy capability on */
3890 bool postcopy_advised = postcopy_is_advised();
3891 if (!migrate_use_compression()) {
3892 invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
3893 }
3894
3895 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
3896 ram_addr_t addr, total_ram_bytes;
3897 void *host = NULL, *host_bak = NULL;
3898 uint8_t ch;
3899
3900 /*
3901 * Yield periodically to let main loop run, but an iteration of
3902 * the main loop is expensive, so do it each some iterations
3903 */
3904 if ((i & 32767) == 0 && qemu_in_coroutine()) {
3905 aio_co_schedule(qemu_get_current_aio_context(),
3906 qemu_coroutine_self());
3907 qemu_coroutine_yield();
3908 }
3909 i++;
3910
3911 addr = qemu_get_be64(f);
3912 flags = addr & ~TARGET_PAGE_MASK;
3913 addr &= TARGET_PAGE_MASK;
3914
3915 if (flags & invalid_flags) {
3916 if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
3917 error_report("Received an unexpected compressed page");
3918 }
3919
3920 ret = -EINVAL;
3921 break;
3922 }
3923
3924 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
3925 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
3926 RAMBlock *block = ram_block_from_stream(f, flags);
3927
3928 host = host_from_ram_block_offset(block, addr);
3929 /*
3930 * After going into COLO stage, we should not load the page
3931 * into SVM's memory directly, we put them into colo_cache firstly.
3932 * NOTE: We need to keep a copy of SVM's ram in colo_cache.
3933 * Previously, we copied all these memory in preparing stage of COLO
3934 * while we need to stop VM, which is a time-consuming process.
3935 * Here we optimize it by a trick, back-up every page while in
3936 * migration process while COLO is enabled, though it affects the
3937 * speed of the migration, but it obviously reduce the downtime of
3938 * back-up all SVM'S memory in COLO preparing stage.
3939 */
3940 if (migration_incoming_colo_enabled()) {
3941 if (migration_incoming_in_colo_state()) {
3942 /* In COLO stage, put all pages into cache temporarily */
3943 host = colo_cache_from_block_offset(block, addr, true);
3944 } else {
3945 /*
3946 * In migration stage but before COLO stage,
3947 * Put all pages into both cache and SVM's memory.
3948 */
3949 host_bak = colo_cache_from_block_offset(block, addr, false);
3950 }
3951 }
3952 if (!host) {
3953 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
3954 ret = -EINVAL;
3955 break;
3956 }
3957 if (!migration_incoming_in_colo_state()) {
3958 ramblock_recv_bitmap_set(block, host);
3959 }
3960
3961 trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
3962 }
3963
3964 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
3965 case RAM_SAVE_FLAG_MEM_SIZE:
3966 /* Synchronize RAM block list */
3967 total_ram_bytes = addr;
3968 while (!ret && total_ram_bytes) {
3969 RAMBlock *block;
3970 char id[256];
3971 ram_addr_t length;
3972
3973 len = qemu_get_byte(f);
3974 qemu_get_buffer(f, (uint8_t *)id, len);
3975 id[len] = 0;
3976 length = qemu_get_be64(f);
3977
3978 block = qemu_ram_block_by_name(id);
3979 if (block && !qemu_ram_is_migratable(block)) {
3980 error_report("block %s should not be migrated !", id);
3981 ret = -EINVAL;
3982 } else if (block) {
3983 if (length != block->used_length) {
3984 Error *local_err = NULL;
3985
3986 ret = qemu_ram_resize(block, length,
3987 &local_err);
3988 if (local_err) {
3989 error_report_err(local_err);
3990 }
3991 }
3992 /* For postcopy we need to check hugepage sizes match */
3993 if (postcopy_advised && migrate_postcopy_ram() &&
3994 block->page_size != qemu_host_page_size) {
3995 uint64_t remote_page_size = qemu_get_be64(f);
3996 if (remote_page_size != block->page_size) {
3997 error_report("Mismatched RAM page size %s "
3998 "(local) %zd != %" PRId64,
3999 id, block->page_size,
4000 remote_page_size);
4001 ret = -EINVAL;
4002 }
4003 }
4004 if (migrate_ignore_shared()) {
4005 hwaddr addr = qemu_get_be64(f);
4006 if (ramblock_is_ignored(block) &&
4007 block->mr->addr != addr) {
4008 error_report("Mismatched GPAs for block %s "
4009 "%" PRId64 "!= %" PRId64,
4010 id, (uint64_t)addr,
4011 (uint64_t)block->mr->addr);
4012 ret = -EINVAL;
4013 }
4014 }
4015 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
4016 block->idstr);
4017 } else {
4018 error_report("Unknown ramblock \"%s\", cannot "
4019 "accept migration", id);
4020 ret = -EINVAL;
4021 }
4022
4023 total_ram_bytes -= length;
4024 }
4025 break;
4026
4027 case RAM_SAVE_FLAG_ZERO:
4028 ch = qemu_get_byte(f);
4029 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
4030 break;
4031
4032 case RAM_SAVE_FLAG_PAGE:
4033 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
4034 break;
4035
4036 case RAM_SAVE_FLAG_COMPRESS_PAGE:
4037 len = qemu_get_be32(f);
4038 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
4039 error_report("Invalid compressed data length: %d", len);
4040 ret = -EINVAL;
4041 break;
4042 }
4043 decompress_data_with_multi_threads(f, host, len);
4044 break;
4045
4046 case RAM_SAVE_FLAG_XBZRLE:
4047 if (load_xbzrle(f, addr, host) < 0) {
4048 error_report("Failed to decompress XBZRLE page at "
4049 RAM_ADDR_FMT, addr);
4050 ret = -EINVAL;
4051 break;
4052 }
4053 break;
4054 case RAM_SAVE_FLAG_EOS:
4055 /* normal exit */
4056 multifd_recv_sync_main();
4057 break;
4058 default:
4059 if (flags & RAM_SAVE_FLAG_HOOK) {
4060 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
4061 } else {
4062 error_report("Unknown combination of migration flags: 0x%x",
4063 flags);
4064 ret = -EINVAL;
4065 }
4066 }
4067 if (!ret) {
4068 ret = qemu_file_get_error(f);
4069 }
4070 if (!ret && host_bak) {
4071 memcpy(host_bak, host, TARGET_PAGE_SIZE);
4072 }
4073 }
4074
4075 ret |= wait_for_decompress_done();
4076 return ret;
4077 }
4078
4079 static int ram_load(QEMUFile *f, void *opaque, int version_id)
4080 {
4081 int ret = 0;
4082 static uint64_t seq_iter;
4083 /*
4084 * If system is running in postcopy mode, page inserts to host memory must
4085 * be atomic
4086 */
4087 bool postcopy_running = postcopy_is_running();
4088
4089 seq_iter++;
4090
4091 if (version_id != 4) {
4092 return -EINVAL;
4093 }
4094
4095 /*
4096 * This RCU critical section can be very long running.
4097 * When RCU reclaims in the code start to become numerous,
4098 * it will be necessary to reduce the granularity of this
4099 * critical section.
4100 */
4101 WITH_RCU_READ_LOCK_GUARD() {
4102 if (postcopy_running) {
4103 ret = ram_load_postcopy(f);
4104 } else {
4105 ret = ram_load_precopy(f);
4106 }
4107 }
4108 trace_ram_load_complete(ret, seq_iter);
4109
4110 return ret;
4111 }
4112
4113 static bool ram_has_postcopy(void *opaque)
4114 {
4115 RAMBlock *rb;
4116 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
4117 if (ramblock_is_pmem(rb)) {
4118 info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4119 "is not supported now!", rb->idstr, rb->host);
4120 return false;
4121 }
4122 }
4123
4124 return migrate_postcopy_ram();
4125 }
4126
4127 /* Sync all the dirty bitmap with destination VM. */
4128 static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
4129 {
4130 RAMBlock *block;
4131 QEMUFile *file = s->to_dst_file;
4132 int ramblock_count = 0;
4133
4134 trace_ram_dirty_bitmap_sync_start();
4135
4136 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
4137 qemu_savevm_send_recv_bitmap(file, block->idstr);
4138 trace_ram_dirty_bitmap_request(block->idstr);
4139 ramblock_count++;
4140 }
4141
4142 trace_ram_dirty_bitmap_sync_wait();
4143
4144 /* Wait until all the ramblocks' dirty bitmap synced */
4145 while (ramblock_count--) {
4146 qemu_sem_wait(&s->rp_state.rp_sem);
4147 }
4148
4149 trace_ram_dirty_bitmap_sync_complete();
4150
4151 return 0;
4152 }
4153
4154 static void ram_dirty_bitmap_reload_notify(MigrationState *s)
4155 {
4156 qemu_sem_post(&s->rp_state.rp_sem);
4157 }
4158
4159 /*
4160 * Read the received bitmap, revert it as the initial dirty bitmap.
4161 * This is only used when the postcopy migration is paused but wants
4162 * to resume from a middle point.
4163 */
4164 int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
4165 {
4166 int ret = -EINVAL;
4167 /* from_dst_file is always valid because we're within rp_thread */
4168 QEMUFile *file = s->rp_state.from_dst_file;
4169 unsigned long *le_bitmap, nbits = block->used_length >> TARGET_PAGE_BITS;
4170 uint64_t local_size = DIV_ROUND_UP(nbits, 8);
4171 uint64_t size, end_mark;
4172
4173 trace_ram_dirty_bitmap_reload_begin(block->idstr);
4174
4175 if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
4176 error_report("%s: incorrect state %s", __func__,
4177 MigrationStatus_str(s->state));
4178 return -EINVAL;
4179 }
4180
4181 /*
4182 * Note: see comments in ramblock_recv_bitmap_send() on why we
4183 * need the endianness conversion, and the paddings.
4184 */
4185 local_size = ROUND_UP(local_size, 8);
4186
4187 /* Add paddings */
4188 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
4189
4190 size = qemu_get_be64(file);
4191
4192 /* The size of the bitmap should match with our ramblock */
4193 if (size != local_size) {
4194 error_report("%s: ramblock '%s' bitmap size mismatch "
4195 "(0x%"PRIx64" != 0x%"PRIx64")", __func__,
4196 block->idstr, size, local_size);
4197 ret = -EINVAL;
4198 goto out;
4199 }
4200
4201 size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size);
4202 end_mark = qemu_get_be64(file);
4203
4204 ret = qemu_file_get_error(file);
4205 if (ret || size != local_size) {
4206 error_report("%s: read bitmap failed for ramblock '%s': %d"
4207 " (size 0x%"PRIx64", got: 0x%"PRIx64")",
4208 __func__, block->idstr, ret, local_size, size);
4209 ret = -EIO;
4210 goto out;
4211 }
4212
4213 if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) {
4214 error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIx64,
4215 __func__, block->idstr, end_mark);
4216 ret = -EINVAL;
4217 goto out;
4218 }
4219
4220 /*
4221 * Endianness conversion. We are during postcopy (though paused).
4222 * The dirty bitmap won't change. We can directly modify it.
4223 */
4224 bitmap_from_le(block->bmap, le_bitmap, nbits);
4225
4226 /*
4227 * What we received is "received bitmap". Revert it as the initial
4228 * dirty bitmap for this ramblock.
4229 */
4230 bitmap_complement(block->bmap, block->bmap, nbits);
4231
4232 /* Clear dirty bits of discarded ranges that we don't want to migrate. */
4233 ramblock_dirty_bitmap_clear_discarded_pages(block);
4234
4235 /* We'll recalculate migration_dirty_pages in ram_state_resume_prepare(). */
4236 trace_ram_dirty_bitmap_reload_complete(block->idstr);
4237
4238 /*
4239 * We succeeded to sync bitmap for current ramblock. If this is
4240 * the last one to sync, we need to notify the main send thread.
4241 */
4242 ram_dirty_bitmap_reload_notify(s);
4243
4244 ret = 0;
4245 out:
4246 g_free(le_bitmap);
4247 return ret;
4248 }
4249
4250 static int ram_resume_prepare(MigrationState *s, void *opaque)
4251 {
4252 RAMState *rs = *(RAMState **)opaque;
4253 int ret;
4254
4255 ret = ram_dirty_bitmap_sync_all(s, rs);
4256 if (ret) {
4257 return ret;
4258 }
4259
4260 ram_state_resume_prepare(rs, s->to_dst_file);
4261
4262 return 0;
4263 }
4264
4265 static SaveVMHandlers savevm_ram_handlers = {
4266 .save_setup = ram_save_setup,
4267 .save_live_iterate = ram_save_iterate,
4268 .save_live_complete_postcopy = ram_save_complete,
4269 .save_live_complete_precopy = ram_save_complete,
4270 .has_postcopy = ram_has_postcopy,
4271 .save_live_pending = ram_save_pending,
4272 .load_state = ram_load,
4273 .save_cleanup = ram_save_cleanup,
4274 .load_setup = ram_load_setup,
4275 .load_cleanup = ram_load_cleanup,
4276 .resume_prepare = ram_resume_prepare,
4277 };
4278
4279 static void ram_mig_ram_block_resized(RAMBlockNotifier *n, void *host,
4280 size_t old_size, size_t new_size)
4281 {
4282 PostcopyState ps = postcopy_state_get();
4283 ram_addr_t offset;
4284 RAMBlock *rb = qemu_ram_block_from_host(host, false, &offset);
4285 Error *err = NULL;
4286
4287 if (ramblock_is_ignored(rb)) {
4288 return;
4289 }
4290
4291 if (!migration_is_idle()) {
4292 /*
4293 * Precopy code on the source cannot deal with the size of RAM blocks
4294 * changing at random points in time - especially after sending the
4295 * RAM block sizes in the migration stream, they must no longer change.
4296 * Abort and indicate a proper reason.
4297 */
4298 error_setg(&err, "RAM block '%s' resized during precopy.", rb->idstr);
4299 migration_cancel(err);
4300 error_free(err);
4301 }
4302
4303 switch (ps) {
4304 case POSTCOPY_INCOMING_ADVISE:
4305 /*
4306 * Update what ram_postcopy_incoming_init()->init_range() does at the
4307 * time postcopy was advised. Syncing RAM blocks with the source will
4308 * result in RAM resizes.
4309 */
4310 if (old_size < new_size) {
4311 if (ram_discard_range(rb->idstr, old_size, new_size - old_size)) {
4312 error_report("RAM block '%s' discard of resized RAM failed",
4313 rb->idstr);
4314 }
4315 }
4316 rb->postcopy_length = new_size;
4317 break;
4318 case POSTCOPY_INCOMING_NONE:
4319 case POSTCOPY_INCOMING_RUNNING:
4320 case POSTCOPY_INCOMING_END:
4321 /*
4322 * Once our guest is running, postcopy does no longer care about
4323 * resizes. When growing, the new memory was not available on the
4324 * source, no handler needed.
4325 */
4326 break;
4327 default:
4328 error_report("RAM block '%s' resized during postcopy state: %d",
4329 rb->idstr, ps);
4330 exit(-1);
4331 }
4332 }
4333
4334 static RAMBlockNotifier ram_mig_ram_notifier = {
4335 .ram_block_resized = ram_mig_ram_block_resized,
4336 };
4337
4338 void ram_mig_init(void)
4339 {
4340 qemu_mutex_init(&XBZRLE.lock);
4341 register_savevm_live("ram", 0, 4, &savevm_ram_handlers, &ram_state);
4342 ram_block_notifier_add(&ram_mig_ram_notifier);
4343 }