]> git.proxmox.com Git - mirror_qemu.git/blob - arch_init.c
migration: Make compression co-work with xbzrle
[mirror_qemu.git] / arch_init.c
1 /*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24 #include <stdint.h>
25 #include <stdarg.h>
26 #include <stdlib.h>
27 #include <zlib.h>
28 #ifndef _WIN32
29 #include <sys/types.h>
30 #include <sys/mman.h>
31 #endif
32 #include "config.h"
33 #include "monitor/monitor.h"
34 #include "sysemu/sysemu.h"
35 #include "qemu/bitops.h"
36 #include "qemu/bitmap.h"
37 #include "sysemu/arch_init.h"
38 #include "audio/audio.h"
39 #include "hw/i386/pc.h"
40 #include "hw/pci/pci.h"
41 #include "hw/audio/audio.h"
42 #include "sysemu/kvm.h"
43 #include "migration/migration.h"
44 #include "hw/i386/smbios.h"
45 #include "exec/address-spaces.h"
46 #include "hw/audio/pcspk.h"
47 #include "migration/page_cache.h"
48 #include "qemu/config-file.h"
49 #include "qemu/error-report.h"
50 #include "qmp-commands.h"
51 #include "trace.h"
52 #include "exec/cpu-all.h"
53 #include "exec/ram_addr.h"
54 #include "hw/acpi/acpi.h"
55 #include "qemu/host-utils.h"
56 #include "qemu/rcu_queue.h"
57
58 #ifdef DEBUG_ARCH_INIT
59 #define DPRINTF(fmt, ...) \
60 do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
61 #else
62 #define DPRINTF(fmt, ...) \
63 do { } while (0)
64 #endif
65
66 #ifdef TARGET_SPARC
67 int graphic_width = 1024;
68 int graphic_height = 768;
69 int graphic_depth = 8;
70 #else
71 int graphic_width = 800;
72 int graphic_height = 600;
73 int graphic_depth = 32;
74 #endif
75
76
77 #if defined(TARGET_ALPHA)
78 #define QEMU_ARCH QEMU_ARCH_ALPHA
79 #elif defined(TARGET_ARM)
80 #define QEMU_ARCH QEMU_ARCH_ARM
81 #elif defined(TARGET_CRIS)
82 #define QEMU_ARCH QEMU_ARCH_CRIS
83 #elif defined(TARGET_I386)
84 #define QEMU_ARCH QEMU_ARCH_I386
85 #elif defined(TARGET_M68K)
86 #define QEMU_ARCH QEMU_ARCH_M68K
87 #elif defined(TARGET_LM32)
88 #define QEMU_ARCH QEMU_ARCH_LM32
89 #elif defined(TARGET_MICROBLAZE)
90 #define QEMU_ARCH QEMU_ARCH_MICROBLAZE
91 #elif defined(TARGET_MIPS)
92 #define QEMU_ARCH QEMU_ARCH_MIPS
93 #elif defined(TARGET_MOXIE)
94 #define QEMU_ARCH QEMU_ARCH_MOXIE
95 #elif defined(TARGET_OPENRISC)
96 #define QEMU_ARCH QEMU_ARCH_OPENRISC
97 #elif defined(TARGET_PPC)
98 #define QEMU_ARCH QEMU_ARCH_PPC
99 #elif defined(TARGET_S390X)
100 #define QEMU_ARCH QEMU_ARCH_S390X
101 #elif defined(TARGET_SH4)
102 #define QEMU_ARCH QEMU_ARCH_SH4
103 #elif defined(TARGET_SPARC)
104 #define QEMU_ARCH QEMU_ARCH_SPARC
105 #elif defined(TARGET_XTENSA)
106 #define QEMU_ARCH QEMU_ARCH_XTENSA
107 #elif defined(TARGET_UNICORE32)
108 #define QEMU_ARCH QEMU_ARCH_UNICORE32
109 #elif defined(TARGET_TRICORE)
110 #define QEMU_ARCH QEMU_ARCH_TRICORE
111 #endif
112
113 const uint32_t arch_type = QEMU_ARCH;
114 static bool mig_throttle_on;
115 static int dirty_rate_high_cnt;
116 static void check_guest_throttling(void);
117
118 static uint64_t bitmap_sync_count;
119
120 /***********************************************************/
121 /* ram save/restore */
122
123 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
124 #define RAM_SAVE_FLAG_COMPRESS 0x02
125 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
126 #define RAM_SAVE_FLAG_PAGE 0x08
127 #define RAM_SAVE_FLAG_EOS 0x10
128 #define RAM_SAVE_FLAG_CONTINUE 0x20
129 #define RAM_SAVE_FLAG_XBZRLE 0x40
130 /* 0x80 is reserved in migration.h start with 0x100 next */
131 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
132
133 static struct defconfig_file {
134 const char *filename;
135 /* Indicates it is an user config file (disabled by -no-user-config) */
136 bool userconfig;
137 } default_config_files[] = {
138 { CONFIG_QEMU_CONFDIR "/qemu.conf", true },
139 { CONFIG_QEMU_CONFDIR "/target-" TARGET_NAME ".conf", true },
140 { NULL }, /* end of list */
141 };
142
143 static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE];
144
145 int qemu_read_default_config_files(bool userconfig)
146 {
147 int ret;
148 struct defconfig_file *f;
149
150 for (f = default_config_files; f->filename; f++) {
151 if (!userconfig && f->userconfig) {
152 continue;
153 }
154 ret = qemu_read_config_file(f->filename);
155 if (ret < 0 && ret != -ENOENT) {
156 return ret;
157 }
158 }
159
160 return 0;
161 }
162
163 static inline bool is_zero_range(uint8_t *p, uint64_t size)
164 {
165 return buffer_find_nonzero_offset(p, size) == size;
166 }
167
168 /* struct contains XBZRLE cache and a static page
169 used by the compression */
170 static struct {
171 /* buffer used for XBZRLE encoding */
172 uint8_t *encoded_buf;
173 /* buffer for storing page content */
174 uint8_t *current_buf;
175 /* Cache for XBZRLE, Protected by lock. */
176 PageCache *cache;
177 QemuMutex lock;
178 } XBZRLE;
179
180 /* buffer used for XBZRLE decoding */
181 static uint8_t *xbzrle_decoded_buf;
182
183 static void XBZRLE_cache_lock(void)
184 {
185 if (migrate_use_xbzrle())
186 qemu_mutex_lock(&XBZRLE.lock);
187 }
188
189 static void XBZRLE_cache_unlock(void)
190 {
191 if (migrate_use_xbzrle())
192 qemu_mutex_unlock(&XBZRLE.lock);
193 }
194
195 /*
196 * called from qmp_migrate_set_cache_size in main thread, possibly while
197 * a migration is in progress.
198 * A running migration maybe using the cache and might finish during this
199 * call, hence changes to the cache are protected by XBZRLE.lock().
200 */
201 int64_t xbzrle_cache_resize(int64_t new_size)
202 {
203 PageCache *new_cache;
204 int64_t ret;
205
206 if (new_size < TARGET_PAGE_SIZE) {
207 return -1;
208 }
209
210 XBZRLE_cache_lock();
211
212 if (XBZRLE.cache != NULL) {
213 if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
214 goto out_new_size;
215 }
216 new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
217 TARGET_PAGE_SIZE);
218 if (!new_cache) {
219 error_report("Error creating cache");
220 ret = -1;
221 goto out;
222 }
223
224 cache_fini(XBZRLE.cache);
225 XBZRLE.cache = new_cache;
226 }
227
228 out_new_size:
229 ret = pow2floor(new_size);
230 out:
231 XBZRLE_cache_unlock();
232 return ret;
233 }
234
235 /* accounting for migration statistics */
236 typedef struct AccountingInfo {
237 uint64_t dup_pages;
238 uint64_t skipped_pages;
239 uint64_t norm_pages;
240 uint64_t iterations;
241 uint64_t xbzrle_bytes;
242 uint64_t xbzrle_pages;
243 uint64_t xbzrle_cache_miss;
244 double xbzrle_cache_miss_rate;
245 uint64_t xbzrle_overflows;
246 } AccountingInfo;
247
248 static AccountingInfo acct_info;
249
250 static void acct_clear(void)
251 {
252 memset(&acct_info, 0, sizeof(acct_info));
253 }
254
255 uint64_t dup_mig_bytes_transferred(void)
256 {
257 return acct_info.dup_pages * TARGET_PAGE_SIZE;
258 }
259
260 uint64_t dup_mig_pages_transferred(void)
261 {
262 return acct_info.dup_pages;
263 }
264
265 uint64_t skipped_mig_bytes_transferred(void)
266 {
267 return acct_info.skipped_pages * TARGET_PAGE_SIZE;
268 }
269
270 uint64_t skipped_mig_pages_transferred(void)
271 {
272 return acct_info.skipped_pages;
273 }
274
275 uint64_t norm_mig_bytes_transferred(void)
276 {
277 return acct_info.norm_pages * TARGET_PAGE_SIZE;
278 }
279
280 uint64_t norm_mig_pages_transferred(void)
281 {
282 return acct_info.norm_pages;
283 }
284
285 uint64_t xbzrle_mig_bytes_transferred(void)
286 {
287 return acct_info.xbzrle_bytes;
288 }
289
290 uint64_t xbzrle_mig_pages_transferred(void)
291 {
292 return acct_info.xbzrle_pages;
293 }
294
295 uint64_t xbzrle_mig_pages_cache_miss(void)
296 {
297 return acct_info.xbzrle_cache_miss;
298 }
299
300 double xbzrle_mig_cache_miss_rate(void)
301 {
302 return acct_info.xbzrle_cache_miss_rate;
303 }
304
305 uint64_t xbzrle_mig_pages_overflow(void)
306 {
307 return acct_info.xbzrle_overflows;
308 }
309
310 /* This is the last block that we have visited serching for dirty pages
311 */
312 static RAMBlock *last_seen_block;
313 /* This is the last block from where we have sent data */
314 static RAMBlock *last_sent_block;
315 static ram_addr_t last_offset;
316 static unsigned long *migration_bitmap;
317 static uint64_t migration_dirty_pages;
318 static uint32_t last_version;
319 static bool ram_bulk_stage;
320
321 struct CompressParam {
322 bool start;
323 bool done;
324 QEMUFile *file;
325 QemuMutex mutex;
326 QemuCond cond;
327 RAMBlock *block;
328 ram_addr_t offset;
329 };
330 typedef struct CompressParam CompressParam;
331
332 struct DecompressParam {
333 bool start;
334 QemuMutex mutex;
335 QemuCond cond;
336 void *des;
337 uint8 *compbuf;
338 int len;
339 };
340 typedef struct DecompressParam DecompressParam;
341
342 static CompressParam *comp_param;
343 static QemuThread *compress_threads;
344 /* comp_done_cond is used to wake up the migration thread when
345 * one of the compression threads has finished the compression.
346 * comp_done_lock is used to co-work with comp_done_cond.
347 */
348 static QemuMutex *comp_done_lock;
349 static QemuCond *comp_done_cond;
350 /* The empty QEMUFileOps will be used by file in CompressParam */
351 static const QEMUFileOps empty_ops = { };
352
353 static bool compression_switch;
354 static bool quit_comp_thread;
355 static bool quit_decomp_thread;
356 static DecompressParam *decomp_param;
357 static QemuThread *decompress_threads;
358 static uint8_t *compressed_data_buf;
359
360 static int do_compress_ram_page(CompressParam *param);
361
362 static void *do_data_compress(void *opaque)
363 {
364 CompressParam *param = opaque;
365
366 while (!quit_comp_thread) {
367 qemu_mutex_lock(&param->mutex);
368 /* Re-check the quit_comp_thread in case of
369 * terminate_compression_threads is called just before
370 * qemu_mutex_lock(&param->mutex) and after
371 * while(!quit_comp_thread), re-check it here can make
372 * sure the compression thread terminate as expected.
373 */
374 while (!param->start && !quit_comp_thread) {
375 qemu_cond_wait(&param->cond, &param->mutex);
376 }
377 if (!quit_comp_thread) {
378 do_compress_ram_page(param);
379 }
380 param->start = false;
381 qemu_mutex_unlock(&param->mutex);
382
383 qemu_mutex_lock(comp_done_lock);
384 param->done = true;
385 qemu_cond_signal(comp_done_cond);
386 qemu_mutex_unlock(comp_done_lock);
387 }
388
389 return NULL;
390 }
391
392 static inline void terminate_compression_threads(void)
393 {
394 int idx, thread_count;
395
396 thread_count = migrate_compress_threads();
397 quit_comp_thread = true;
398 for (idx = 0; idx < thread_count; idx++) {
399 qemu_mutex_lock(&comp_param[idx].mutex);
400 qemu_cond_signal(&comp_param[idx].cond);
401 qemu_mutex_unlock(&comp_param[idx].mutex);
402 }
403 }
404
405 void migrate_compress_threads_join(void)
406 {
407 int i, thread_count;
408
409 if (!migrate_use_compression()) {
410 return;
411 }
412 terminate_compression_threads();
413 thread_count = migrate_compress_threads();
414 for (i = 0; i < thread_count; i++) {
415 qemu_thread_join(compress_threads + i);
416 qemu_fclose(comp_param[i].file);
417 qemu_mutex_destroy(&comp_param[i].mutex);
418 qemu_cond_destroy(&comp_param[i].cond);
419 }
420 qemu_mutex_destroy(comp_done_lock);
421 qemu_cond_destroy(comp_done_cond);
422 g_free(compress_threads);
423 g_free(comp_param);
424 g_free(comp_done_cond);
425 g_free(comp_done_lock);
426 compress_threads = NULL;
427 comp_param = NULL;
428 comp_done_cond = NULL;
429 comp_done_lock = NULL;
430 }
431
432 void migrate_compress_threads_create(void)
433 {
434 int i, thread_count;
435
436 if (!migrate_use_compression()) {
437 return;
438 }
439 quit_comp_thread = false;
440 compression_switch = true;
441 thread_count = migrate_compress_threads();
442 compress_threads = g_new0(QemuThread, thread_count);
443 comp_param = g_new0(CompressParam, thread_count);
444 comp_done_cond = g_new0(QemuCond, 1);
445 comp_done_lock = g_new0(QemuMutex, 1);
446 qemu_cond_init(comp_done_cond);
447 qemu_mutex_init(comp_done_lock);
448 for (i = 0; i < thread_count; i++) {
449 /* com_param[i].file is just used as a dummy buffer to save data, set
450 * it's ops to empty.
451 */
452 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
453 comp_param[i].done = true;
454 qemu_mutex_init(&comp_param[i].mutex);
455 qemu_cond_init(&comp_param[i].cond);
456 qemu_thread_create(compress_threads + i, "compress",
457 do_data_compress, comp_param + i,
458 QEMU_THREAD_JOINABLE);
459 }
460 }
461
462 /**
463 * save_page_header: Write page header to wire
464 *
465 * If this is the 1st block, it also writes the block identification
466 *
467 * Returns: Number of bytes written
468 *
469 * @f: QEMUFile where to send the data
470 * @block: block that contains the page we want to send
471 * @offset: offset inside the block for the page
472 * in the lower bits, it contains flags
473 */
474 static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
475 {
476 size_t size;
477
478 qemu_put_be64(f, offset);
479 size = 8;
480
481 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
482 qemu_put_byte(f, strlen(block->idstr));
483 qemu_put_buffer(f, (uint8_t *)block->idstr,
484 strlen(block->idstr));
485 size += 1 + strlen(block->idstr);
486 }
487 return size;
488 }
489
490 /* Update the xbzrle cache to reflect a page that's been sent as all 0.
491 * The important thing is that a stale (not-yet-0'd) page be replaced
492 * by the new data.
493 * As a bonus, if the page wasn't in the cache it gets added so that
494 * when a small write is made into the 0'd page it gets XBZRLE sent
495 */
496 static void xbzrle_cache_zero_page(ram_addr_t current_addr)
497 {
498 if (ram_bulk_stage || !migrate_use_xbzrle()) {
499 return;
500 }
501
502 /* We don't care if this fails to allocate a new cache page
503 * as long as it updated an old one */
504 cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
505 bitmap_sync_count);
506 }
507
508 #define ENCODING_FLAG_XBZRLE 0x1
509
510 /**
511 * save_xbzrle_page: compress and send current page
512 *
513 * Returns: 1 means that we wrote the page
514 * 0 means that page is identical to the one already sent
515 * -1 means that xbzrle would be longer than normal
516 *
517 * @f: QEMUFile where to send the data
518 * @current_data:
519 * @current_addr:
520 * @block: block that contains the page we want to send
521 * @offset: offset inside the block for the page
522 * @last_stage: if we are at the completion stage
523 * @bytes_transferred: increase it with the number of transferred bytes
524 */
525 static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
526 ram_addr_t current_addr, RAMBlock *block,
527 ram_addr_t offset, bool last_stage,
528 uint64_t *bytes_transferred)
529 {
530 int encoded_len = 0, bytes_xbzrle;
531 uint8_t *prev_cached_page;
532
533 if (!cache_is_cached(XBZRLE.cache, current_addr, bitmap_sync_count)) {
534 acct_info.xbzrle_cache_miss++;
535 if (!last_stage) {
536 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
537 bitmap_sync_count) == -1) {
538 return -1;
539 } else {
540 /* update *current_data when the page has been
541 inserted into cache */
542 *current_data = get_cached_data(XBZRLE.cache, current_addr);
543 }
544 }
545 return -1;
546 }
547
548 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
549
550 /* save current buffer into memory */
551 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
552
553 /* XBZRLE encoding (if there is no overflow) */
554 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
555 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
556 TARGET_PAGE_SIZE);
557 if (encoded_len == 0) {
558 DPRINTF("Skipping unmodified page\n");
559 return 0;
560 } else if (encoded_len == -1) {
561 DPRINTF("Overflow\n");
562 acct_info.xbzrle_overflows++;
563 /* update data in the cache */
564 if (!last_stage) {
565 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
566 *current_data = prev_cached_page;
567 }
568 return -1;
569 }
570
571 /* we need to update the data in the cache, in order to get the same data */
572 if (!last_stage) {
573 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
574 }
575
576 /* Send XBZRLE based compressed page */
577 bytes_xbzrle = save_page_header(f, block, offset | RAM_SAVE_FLAG_XBZRLE);
578 qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
579 qemu_put_be16(f, encoded_len);
580 qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
581 bytes_xbzrle += encoded_len + 1 + 2;
582 acct_info.xbzrle_pages++;
583 acct_info.xbzrle_bytes += bytes_xbzrle;
584 *bytes_transferred += bytes_xbzrle;
585
586 return 1;
587 }
588
589 static inline
590 ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
591 ram_addr_t start)
592 {
593 unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
594 unsigned long nr = base + (start >> TARGET_PAGE_BITS);
595 uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
596 unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
597
598 unsigned long next;
599
600 if (ram_bulk_stage && nr > base) {
601 next = nr + 1;
602 } else {
603 next = find_next_bit(migration_bitmap, size, nr);
604 }
605
606 if (next < size) {
607 clear_bit(next, migration_bitmap);
608 migration_dirty_pages--;
609 }
610 return (next - base) << TARGET_PAGE_BITS;
611 }
612
613 static inline bool migration_bitmap_set_dirty(ram_addr_t addr)
614 {
615 bool ret;
616 int nr = addr >> TARGET_PAGE_BITS;
617
618 ret = test_and_set_bit(nr, migration_bitmap);
619
620 if (!ret) {
621 migration_dirty_pages++;
622 }
623 return ret;
624 }
625
626 static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
627 {
628 ram_addr_t addr;
629 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
630
631 /* start address is aligned at the start of a word? */
632 if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
633 int k;
634 int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
635 unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
636
637 for (k = page; k < page + nr; k++) {
638 if (src[k]) {
639 unsigned long new_dirty;
640 new_dirty = ~migration_bitmap[k];
641 migration_bitmap[k] |= src[k];
642 new_dirty &= src[k];
643 migration_dirty_pages += ctpopl(new_dirty);
644 src[k] = 0;
645 }
646 }
647 } else {
648 for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
649 if (cpu_physical_memory_get_dirty(start + addr,
650 TARGET_PAGE_SIZE,
651 DIRTY_MEMORY_MIGRATION)) {
652 cpu_physical_memory_reset_dirty(start + addr,
653 TARGET_PAGE_SIZE,
654 DIRTY_MEMORY_MIGRATION);
655 migration_bitmap_set_dirty(start + addr);
656 }
657 }
658 }
659 }
660
661
662 /* Fix me: there are too many global variables used in migration process. */
663 static int64_t start_time;
664 static int64_t bytes_xfer_prev;
665 static int64_t num_dirty_pages_period;
666
667 static void migration_bitmap_sync_init(void)
668 {
669 start_time = 0;
670 bytes_xfer_prev = 0;
671 num_dirty_pages_period = 0;
672 }
673
674 /* Called with iothread lock held, to protect ram_list.dirty_memory[] */
675 static void migration_bitmap_sync(void)
676 {
677 RAMBlock *block;
678 uint64_t num_dirty_pages_init = migration_dirty_pages;
679 MigrationState *s = migrate_get_current();
680 int64_t end_time;
681 int64_t bytes_xfer_now;
682 static uint64_t xbzrle_cache_miss_prev;
683 static uint64_t iterations_prev;
684
685 bitmap_sync_count++;
686
687 if (!bytes_xfer_prev) {
688 bytes_xfer_prev = ram_bytes_transferred();
689 }
690
691 if (!start_time) {
692 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
693 }
694
695 trace_migration_bitmap_sync_start();
696 address_space_sync_dirty_bitmap(&address_space_memory);
697
698 rcu_read_lock();
699 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
700 migration_bitmap_sync_range(block->mr->ram_addr, block->used_length);
701 }
702 rcu_read_unlock();
703
704 trace_migration_bitmap_sync_end(migration_dirty_pages
705 - num_dirty_pages_init);
706 num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
707 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
708
709 /* more than 1 second = 1000 millisecons */
710 if (end_time > start_time + 1000) {
711 if (migrate_auto_converge()) {
712 /* The following detection logic can be refined later. For now:
713 Check to see if the dirtied bytes is 50% more than the approx.
714 amount of bytes that just got transferred since the last time we
715 were in this routine. If that happens >N times (for now N==4)
716 we turn on the throttle down logic */
717 bytes_xfer_now = ram_bytes_transferred();
718 if (s->dirty_pages_rate &&
719 (num_dirty_pages_period * TARGET_PAGE_SIZE >
720 (bytes_xfer_now - bytes_xfer_prev)/2) &&
721 (dirty_rate_high_cnt++ > 4)) {
722 trace_migration_throttle();
723 mig_throttle_on = true;
724 dirty_rate_high_cnt = 0;
725 }
726 bytes_xfer_prev = bytes_xfer_now;
727 } else {
728 mig_throttle_on = false;
729 }
730 if (migrate_use_xbzrle()) {
731 if (iterations_prev != 0) {
732 acct_info.xbzrle_cache_miss_rate =
733 (double)(acct_info.xbzrle_cache_miss -
734 xbzrle_cache_miss_prev) /
735 (acct_info.iterations - iterations_prev);
736 }
737 iterations_prev = acct_info.iterations;
738 xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss;
739 }
740 s->dirty_pages_rate = num_dirty_pages_period * 1000
741 / (end_time - start_time);
742 s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
743 start_time = end_time;
744 num_dirty_pages_period = 0;
745 s->dirty_sync_count = bitmap_sync_count;
746 }
747 }
748
749 /**
750 * save_zero_page: Send the zero page to the stream
751 *
752 * Returns: Number of pages written.
753 *
754 * @f: QEMUFile where to send the data
755 * @block: block that contains the page we want to send
756 * @offset: offset inside the block for the page
757 * @p: pointer to the page
758 * @bytes_transferred: increase it with the number of transferred bytes
759 */
760 static int save_zero_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
761 uint8_t *p, uint64_t *bytes_transferred)
762 {
763 int pages = -1;
764
765 if (is_zero_range(p, TARGET_PAGE_SIZE)) {
766 acct_info.dup_pages++;
767 *bytes_transferred += save_page_header(f, block,
768 offset | RAM_SAVE_FLAG_COMPRESS);
769 qemu_put_byte(f, 0);
770 *bytes_transferred += 1;
771 pages = 1;
772 }
773
774 return pages;
775 }
776
777 /**
778 * ram_save_page: Send the given page to the stream
779 *
780 * Returns: Number of pages written.
781 *
782 * @f: QEMUFile where to send the data
783 * @block: block that contains the page we want to send
784 * @offset: offset inside the block for the page
785 * @last_stage: if we are at the completion stage
786 * @bytes_transferred: increase it with the number of transferred bytes
787 */
788 static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
789 bool last_stage, uint64_t *bytes_transferred)
790 {
791 int pages = -1;
792 uint64_t bytes_xmit;
793 ram_addr_t current_addr;
794 MemoryRegion *mr = block->mr;
795 uint8_t *p;
796 int ret;
797 bool send_async = true;
798
799 p = memory_region_get_ram_ptr(mr) + offset;
800
801 /* In doubt sent page as normal */
802 bytes_xmit = 0;
803 ret = ram_control_save_page(f, block->offset,
804 offset, TARGET_PAGE_SIZE, &bytes_xmit);
805 if (bytes_xmit) {
806 *bytes_transferred += bytes_xmit;
807 pages = 1;
808 }
809
810 XBZRLE_cache_lock();
811
812 current_addr = block->offset + offset;
813
814 if (block == last_sent_block) {
815 offset |= RAM_SAVE_FLAG_CONTINUE;
816 }
817 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
818 if (ret != RAM_SAVE_CONTROL_DELAYED) {
819 if (bytes_xmit > 0) {
820 acct_info.norm_pages++;
821 } else if (bytes_xmit == 0) {
822 acct_info.dup_pages++;
823 }
824 }
825 } else {
826 pages = save_zero_page(f, block, offset, p, bytes_transferred);
827 if (pages > 0) {
828 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
829 * page would be stale
830 */
831 xbzrle_cache_zero_page(current_addr);
832 } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
833 pages = save_xbzrle_page(f, &p, current_addr, block,
834 offset, last_stage, bytes_transferred);
835 if (!last_stage) {
836 /* Can't send this cached data async, since the cache page
837 * might get updated before it gets to the wire
838 */
839 send_async = false;
840 }
841 }
842 }
843
844 /* XBZRLE overflow or normal page */
845 if (pages == -1) {
846 *bytes_transferred += save_page_header(f, block,
847 offset | RAM_SAVE_FLAG_PAGE);
848 if (send_async) {
849 qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
850 } else {
851 qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
852 }
853 *bytes_transferred += TARGET_PAGE_SIZE;
854 pages = 1;
855 acct_info.norm_pages++;
856 }
857
858 XBZRLE_cache_unlock();
859
860 return pages;
861 }
862
863 static int do_compress_ram_page(CompressParam *param)
864 {
865 int bytes_sent, blen;
866 uint8_t *p;
867 RAMBlock *block = param->block;
868 ram_addr_t offset = param->offset;
869
870 p = memory_region_get_ram_ptr(block->mr) + (offset & TARGET_PAGE_MASK);
871
872 bytes_sent = save_page_header(param->file, block, offset |
873 RAM_SAVE_FLAG_COMPRESS_PAGE);
874 blen = qemu_put_compression_data(param->file, p, TARGET_PAGE_SIZE,
875 migrate_compress_level());
876 bytes_sent += blen;
877
878 return bytes_sent;
879 }
880
881 static inline void start_compression(CompressParam *param)
882 {
883 param->done = false;
884 qemu_mutex_lock(&param->mutex);
885 param->start = true;
886 qemu_cond_signal(&param->cond);
887 qemu_mutex_unlock(&param->mutex);
888 }
889
890
891 static uint64_t bytes_transferred;
892
893 static void flush_compressed_data(QEMUFile *f)
894 {
895 int idx, len, thread_count;
896
897 if (!migrate_use_compression()) {
898 return;
899 }
900 thread_count = migrate_compress_threads();
901 for (idx = 0; idx < thread_count; idx++) {
902 if (!comp_param[idx].done) {
903 qemu_mutex_lock(comp_done_lock);
904 while (!comp_param[idx].done && !quit_comp_thread) {
905 qemu_cond_wait(comp_done_cond, comp_done_lock);
906 }
907 qemu_mutex_unlock(comp_done_lock);
908 }
909 if (!quit_comp_thread) {
910 len = qemu_put_qemu_file(f, comp_param[idx].file);
911 bytes_transferred += len;
912 }
913 }
914 }
915
916 static inline void set_compress_params(CompressParam *param, RAMBlock *block,
917 ram_addr_t offset)
918 {
919 param->block = block;
920 param->offset = offset;
921 }
922
923 static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block,
924 ram_addr_t offset,
925 uint64_t *bytes_transferred)
926 {
927 int idx, thread_count, bytes_xmit = -1, pages = -1;
928
929 thread_count = migrate_compress_threads();
930 qemu_mutex_lock(comp_done_lock);
931 while (true) {
932 for (idx = 0; idx < thread_count; idx++) {
933 if (comp_param[idx].done) {
934 bytes_xmit = qemu_put_qemu_file(f, comp_param[idx].file);
935 set_compress_params(&comp_param[idx], block, offset);
936 start_compression(&comp_param[idx]);
937 pages = 1;
938 acct_info.norm_pages++;
939 *bytes_transferred += bytes_xmit;
940 break;
941 }
942 }
943 if (pages > 0) {
944 break;
945 } else {
946 qemu_cond_wait(comp_done_cond, comp_done_lock);
947 }
948 }
949 qemu_mutex_unlock(comp_done_lock);
950
951 return pages;
952 }
953
954 /**
955 * ram_save_compressed_page: compress the given page and send it to the stream
956 *
957 * Returns: Number of pages written.
958 *
959 * @f: QEMUFile where to send the data
960 * @block: block that contains the page we want to send
961 * @offset: offset inside the block for the page
962 * @last_stage: if we are at the completion stage
963 * @bytes_transferred: increase it with the number of transferred bytes
964 */
965 static int ram_save_compressed_page(QEMUFile *f, RAMBlock *block,
966 ram_addr_t offset, bool last_stage,
967 uint64_t *bytes_transferred)
968 {
969 int pages = -1;
970 uint64_t bytes_xmit;
971 MemoryRegion *mr = block->mr;
972 uint8_t *p;
973 int ret;
974
975 p = memory_region_get_ram_ptr(mr) + offset;
976
977 bytes_xmit = 0;
978 ret = ram_control_save_page(f, block->offset,
979 offset, TARGET_PAGE_SIZE, &bytes_xmit);
980 if (bytes_xmit) {
981 *bytes_transferred += bytes_xmit;
982 pages = 1;
983 }
984 if (block == last_sent_block) {
985 offset |= RAM_SAVE_FLAG_CONTINUE;
986 }
987 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
988 if (ret != RAM_SAVE_CONTROL_DELAYED) {
989 if (bytes_xmit > 0) {
990 acct_info.norm_pages++;
991 } else if (bytes_xmit == 0) {
992 acct_info.dup_pages++;
993 }
994 }
995 } else {
996 /* When starting the process of a new block, the first page of
997 * the block should be sent out before other pages in the same
998 * block, and all the pages in last block should have been sent
999 * out, keeping this order is important, because the 'cont' flag
1000 * is used to avoid resending the block name.
1001 */
1002 if (block != last_sent_block) {
1003 flush_compressed_data(f);
1004 pages = save_zero_page(f, block, offset, p, bytes_transferred);
1005 if (pages == -1) {
1006 set_compress_params(&comp_param[0], block, offset);
1007 /* Use the qemu thread to compress the data to make sure the
1008 * first page is sent out before other pages
1009 */
1010 bytes_xmit = do_compress_ram_page(&comp_param[0]);
1011 acct_info.norm_pages++;
1012 qemu_put_qemu_file(f, comp_param[0].file);
1013 *bytes_transferred += bytes_xmit;
1014 pages = 1;
1015 }
1016 } else {
1017 pages = save_zero_page(f, block, offset, p, bytes_transferred);
1018 if (pages == -1) {
1019 pages = compress_page_with_multi_thread(f, block, offset,
1020 bytes_transferred);
1021 }
1022 }
1023 }
1024
1025 return pages;
1026 }
1027
1028 /**
1029 * ram_find_and_save_block: Finds a dirty page and sends it to f
1030 *
1031 * Called within an RCU critical section.
1032 *
1033 * Returns: The number of pages written
1034 * 0 means no dirty pages
1035 *
1036 * @f: QEMUFile where to send the data
1037 * @last_stage: if we are at the completion stage
1038 * @bytes_transferred: increase it with the number of transferred bytes
1039 */
1040
1041 static int ram_find_and_save_block(QEMUFile *f, bool last_stage,
1042 uint64_t *bytes_transferred)
1043 {
1044 RAMBlock *block = last_seen_block;
1045 ram_addr_t offset = last_offset;
1046 bool complete_round = false;
1047 int pages = 0;
1048 MemoryRegion *mr;
1049
1050 if (!block)
1051 block = QLIST_FIRST_RCU(&ram_list.blocks);
1052
1053 while (true) {
1054 mr = block->mr;
1055 offset = migration_bitmap_find_and_reset_dirty(mr, offset);
1056 if (complete_round && block == last_seen_block &&
1057 offset >= last_offset) {
1058 break;
1059 }
1060 if (offset >= block->used_length) {
1061 offset = 0;
1062 block = QLIST_NEXT_RCU(block, next);
1063 if (!block) {
1064 block = QLIST_FIRST_RCU(&ram_list.blocks);
1065 complete_round = true;
1066 ram_bulk_stage = false;
1067 if (migrate_use_xbzrle()) {
1068 /* If xbzrle is on, stop using the data compression at this
1069 * point. In theory, xbzrle can do better than compression.
1070 */
1071 flush_compressed_data(f);
1072 compression_switch = false;
1073 }
1074 }
1075 } else {
1076 if (compression_switch && migrate_use_compression()) {
1077 pages = ram_save_compressed_page(f, block, offset, last_stage,
1078 bytes_transferred);
1079 } else {
1080 pages = ram_save_page(f, block, offset, last_stage,
1081 bytes_transferred);
1082 }
1083
1084 /* if page is unmodified, continue to the next */
1085 if (pages > 0) {
1086 last_sent_block = block;
1087 break;
1088 }
1089 }
1090 }
1091
1092 last_seen_block = block;
1093 last_offset = offset;
1094
1095 return pages;
1096 }
1097
1098 void acct_update_position(QEMUFile *f, size_t size, bool zero)
1099 {
1100 uint64_t pages = size / TARGET_PAGE_SIZE;
1101 if (zero) {
1102 acct_info.dup_pages += pages;
1103 } else {
1104 acct_info.norm_pages += pages;
1105 bytes_transferred += size;
1106 qemu_update_position(f, size);
1107 }
1108 }
1109
1110 static ram_addr_t ram_save_remaining(void)
1111 {
1112 return migration_dirty_pages;
1113 }
1114
1115 uint64_t ram_bytes_remaining(void)
1116 {
1117 return ram_save_remaining() * TARGET_PAGE_SIZE;
1118 }
1119
1120 uint64_t ram_bytes_transferred(void)
1121 {
1122 return bytes_transferred;
1123 }
1124
1125 uint64_t ram_bytes_total(void)
1126 {
1127 RAMBlock *block;
1128 uint64_t total = 0;
1129
1130 rcu_read_lock();
1131 QLIST_FOREACH_RCU(block, &ram_list.blocks, next)
1132 total += block->used_length;
1133 rcu_read_unlock();
1134 return total;
1135 }
1136
1137 void free_xbzrle_decoded_buf(void)
1138 {
1139 g_free(xbzrle_decoded_buf);
1140 xbzrle_decoded_buf = NULL;
1141 }
1142
1143 static void migration_end(void)
1144 {
1145 if (migration_bitmap) {
1146 memory_global_dirty_log_stop();
1147 g_free(migration_bitmap);
1148 migration_bitmap = NULL;
1149 }
1150
1151 XBZRLE_cache_lock();
1152 if (XBZRLE.cache) {
1153 cache_fini(XBZRLE.cache);
1154 g_free(XBZRLE.encoded_buf);
1155 g_free(XBZRLE.current_buf);
1156 XBZRLE.cache = NULL;
1157 XBZRLE.encoded_buf = NULL;
1158 XBZRLE.current_buf = NULL;
1159 }
1160 XBZRLE_cache_unlock();
1161 }
1162
1163 static void ram_migration_cancel(void *opaque)
1164 {
1165 migration_end();
1166 }
1167
1168 static void reset_ram_globals(void)
1169 {
1170 last_seen_block = NULL;
1171 last_sent_block = NULL;
1172 last_offset = 0;
1173 last_version = ram_list.version;
1174 ram_bulk_stage = true;
1175 }
1176
1177 #define MAX_WAIT 50 /* ms, half buffered_file limit */
1178
1179
1180 /* Each of ram_save_setup, ram_save_iterate and ram_save_complete has
1181 * long-running RCU critical section. When rcu-reclaims in the code
1182 * start to become numerous it will be necessary to reduce the
1183 * granularity of these critical sections.
1184 */
1185
1186 static int ram_save_setup(QEMUFile *f, void *opaque)
1187 {
1188 RAMBlock *block;
1189 int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
1190
1191 mig_throttle_on = false;
1192 dirty_rate_high_cnt = 0;
1193 bitmap_sync_count = 0;
1194 migration_bitmap_sync_init();
1195
1196 if (migrate_use_xbzrle()) {
1197 XBZRLE_cache_lock();
1198 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
1199 TARGET_PAGE_SIZE,
1200 TARGET_PAGE_SIZE);
1201 if (!XBZRLE.cache) {
1202 XBZRLE_cache_unlock();
1203 error_report("Error creating cache");
1204 return -1;
1205 }
1206 XBZRLE_cache_unlock();
1207
1208 /* We prefer not to abort if there is no memory */
1209 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
1210 if (!XBZRLE.encoded_buf) {
1211 error_report("Error allocating encoded_buf");
1212 return -1;
1213 }
1214
1215 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
1216 if (!XBZRLE.current_buf) {
1217 error_report("Error allocating current_buf");
1218 g_free(XBZRLE.encoded_buf);
1219 XBZRLE.encoded_buf = NULL;
1220 return -1;
1221 }
1222
1223 acct_clear();
1224 }
1225
1226 /* iothread lock needed for ram_list.dirty_memory[] */
1227 qemu_mutex_lock_iothread();
1228 qemu_mutex_lock_ramlist();
1229 rcu_read_lock();
1230 bytes_transferred = 0;
1231 reset_ram_globals();
1232
1233 ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
1234 migration_bitmap = bitmap_new(ram_bitmap_pages);
1235 bitmap_set(migration_bitmap, 0, ram_bitmap_pages);
1236
1237 /*
1238 * Count the total number of pages used by ram blocks not including any
1239 * gaps due to alignment or unplugs.
1240 */
1241 migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
1242
1243 memory_global_dirty_log_start();
1244 migration_bitmap_sync();
1245 qemu_mutex_unlock_ramlist();
1246 qemu_mutex_unlock_iothread();
1247
1248 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
1249
1250 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1251 qemu_put_byte(f, strlen(block->idstr));
1252 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
1253 qemu_put_be64(f, block->used_length);
1254 }
1255
1256 rcu_read_unlock();
1257
1258 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
1259 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
1260
1261 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
1262
1263 return 0;
1264 }
1265
1266 static int ram_save_iterate(QEMUFile *f, void *opaque)
1267 {
1268 int ret;
1269 int i;
1270 int64_t t0;
1271 int pages_sent = 0;
1272
1273 rcu_read_lock();
1274 if (ram_list.version != last_version) {
1275 reset_ram_globals();
1276 }
1277
1278 /* Read version before ram_list.blocks */
1279 smp_rmb();
1280
1281 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
1282
1283 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1284 i = 0;
1285 while ((ret = qemu_file_rate_limit(f)) == 0) {
1286 int pages;
1287
1288 pages = ram_find_and_save_block(f, false, &bytes_transferred);
1289 /* no more pages to sent */
1290 if (pages == 0) {
1291 break;
1292 }
1293 pages_sent += pages;
1294 acct_info.iterations++;
1295 check_guest_throttling();
1296 /* we want to check in the 1st loop, just in case it was the 1st time
1297 and we had to sync the dirty bitmap.
1298 qemu_get_clock_ns() is a bit expensive, so we only check each some
1299 iterations
1300 */
1301 if ((i & 63) == 0) {
1302 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
1303 if (t1 > MAX_WAIT) {
1304 DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
1305 t1, i);
1306 break;
1307 }
1308 }
1309 i++;
1310 }
1311 flush_compressed_data(f);
1312 rcu_read_unlock();
1313
1314 /*
1315 * Must occur before EOS (or any QEMUFile operation)
1316 * because of RDMA protocol.
1317 */
1318 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
1319
1320 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
1321 bytes_transferred += 8;
1322
1323 ret = qemu_file_get_error(f);
1324 if (ret < 0) {
1325 return ret;
1326 }
1327
1328 return pages_sent;
1329 }
1330
1331 /* Called with iothread lock */
1332 static int ram_save_complete(QEMUFile *f, void *opaque)
1333 {
1334 rcu_read_lock();
1335
1336 migration_bitmap_sync();
1337
1338 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
1339
1340 /* try transferring iterative blocks of memory */
1341
1342 /* flush all remaining blocks regardless of rate limiting */
1343 while (true) {
1344 int pages;
1345
1346 pages = ram_find_and_save_block(f, true, &bytes_transferred);
1347 /* no more blocks to sent */
1348 if (pages == 0) {
1349 break;
1350 }
1351 }
1352
1353 flush_compressed_data(f);
1354 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
1355 migration_end();
1356
1357 rcu_read_unlock();
1358 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
1359
1360 return 0;
1361 }
1362
1363 static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
1364 {
1365 uint64_t remaining_size;
1366
1367 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
1368
1369 if (remaining_size < max_size) {
1370 qemu_mutex_lock_iothread();
1371 rcu_read_lock();
1372 migration_bitmap_sync();
1373 rcu_read_unlock();
1374 qemu_mutex_unlock_iothread();
1375 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
1376 }
1377 return remaining_size;
1378 }
1379
1380 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
1381 {
1382 unsigned int xh_len;
1383 int xh_flags;
1384
1385 if (!xbzrle_decoded_buf) {
1386 xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
1387 }
1388
1389 /* extract RLE header */
1390 xh_flags = qemu_get_byte(f);
1391 xh_len = qemu_get_be16(f);
1392
1393 if (xh_flags != ENCODING_FLAG_XBZRLE) {
1394 error_report("Failed to load XBZRLE page - wrong compression!");
1395 return -1;
1396 }
1397
1398 if (xh_len > TARGET_PAGE_SIZE) {
1399 error_report("Failed to load XBZRLE page - len overflow!");
1400 return -1;
1401 }
1402 /* load data and decode */
1403 qemu_get_buffer(f, xbzrle_decoded_buf, xh_len);
1404
1405 /* decode RLE */
1406 if (xbzrle_decode_buffer(xbzrle_decoded_buf, xh_len, host,
1407 TARGET_PAGE_SIZE) == -1) {
1408 error_report("Failed to load XBZRLE page - decode error!");
1409 return -1;
1410 }
1411
1412 return 0;
1413 }
1414
1415 /* Must be called from within a rcu critical section.
1416 * Returns a pointer from within the RCU-protected ram_list.
1417 */
1418 static inline void *host_from_stream_offset(QEMUFile *f,
1419 ram_addr_t offset,
1420 int flags)
1421 {
1422 static RAMBlock *block = NULL;
1423 char id[256];
1424 uint8_t len;
1425
1426 if (flags & RAM_SAVE_FLAG_CONTINUE) {
1427 if (!block || block->max_length <= offset) {
1428 error_report("Ack, bad migration stream!");
1429 return NULL;
1430 }
1431
1432 return memory_region_get_ram_ptr(block->mr) + offset;
1433 }
1434
1435 len = qemu_get_byte(f);
1436 qemu_get_buffer(f, (uint8_t *)id, len);
1437 id[len] = 0;
1438
1439 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1440 if (!strncmp(id, block->idstr, sizeof(id)) &&
1441 block->max_length > offset) {
1442 return memory_region_get_ram_ptr(block->mr) + offset;
1443 }
1444 }
1445
1446 error_report("Can't find block %s!", id);
1447 return NULL;
1448 }
1449
1450 /*
1451 * If a page (or a whole RDMA chunk) has been
1452 * determined to be zero, then zap it.
1453 */
1454 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
1455 {
1456 if (ch != 0 || !is_zero_range(host, size)) {
1457 memset(host, ch, size);
1458 }
1459 }
1460
1461 static void *do_data_decompress(void *opaque)
1462 {
1463 while (!quit_decomp_thread) {
1464 /* To be done */
1465 }
1466
1467 return NULL;
1468 }
1469
1470 void migrate_decompress_threads_create(void)
1471 {
1472 int i, thread_count;
1473
1474 thread_count = migrate_decompress_threads();
1475 decompress_threads = g_new0(QemuThread, thread_count);
1476 decomp_param = g_new0(DecompressParam, thread_count);
1477 compressed_data_buf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
1478 quit_decomp_thread = false;
1479 for (i = 0; i < thread_count; i++) {
1480 qemu_mutex_init(&decomp_param[i].mutex);
1481 qemu_cond_init(&decomp_param[i].cond);
1482 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
1483 qemu_thread_create(decompress_threads + i, "decompress",
1484 do_data_decompress, decomp_param + i,
1485 QEMU_THREAD_JOINABLE);
1486 }
1487 }
1488
1489 void migrate_decompress_threads_join(void)
1490 {
1491 int i, thread_count;
1492
1493 quit_decomp_thread = true;
1494 thread_count = migrate_decompress_threads();
1495 for (i = 0; i < thread_count; i++) {
1496 qemu_thread_join(decompress_threads + i);
1497 qemu_mutex_destroy(&decomp_param[i].mutex);
1498 qemu_cond_destroy(&decomp_param[i].cond);
1499 g_free(decomp_param[i].compbuf);
1500 }
1501 g_free(decompress_threads);
1502 g_free(decomp_param);
1503 g_free(compressed_data_buf);
1504 decompress_threads = NULL;
1505 decomp_param = NULL;
1506 compressed_data_buf = NULL;
1507 }
1508
1509 static void decompress_data_with_multi_threads(uint8_t *compbuf,
1510 void *host, int len)
1511 {
1512 /* To be done */
1513 }
1514
1515 static int ram_load(QEMUFile *f, void *opaque, int version_id)
1516 {
1517 int flags = 0, ret = 0;
1518 static uint64_t seq_iter;
1519 int len = 0;
1520
1521 seq_iter++;
1522
1523 if (version_id != 4) {
1524 ret = -EINVAL;
1525 }
1526
1527 /* This RCU critical section can be very long running.
1528 * When RCU reclaims in the code start to become numerous,
1529 * it will be necessary to reduce the granularity of this
1530 * critical section.
1531 */
1532 rcu_read_lock();
1533 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
1534 ram_addr_t addr, total_ram_bytes;
1535 void *host;
1536 uint8_t ch;
1537
1538 addr = qemu_get_be64(f);
1539 flags = addr & ~TARGET_PAGE_MASK;
1540 addr &= TARGET_PAGE_MASK;
1541
1542 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
1543 case RAM_SAVE_FLAG_MEM_SIZE:
1544 /* Synchronize RAM block list */
1545 total_ram_bytes = addr;
1546 while (!ret && total_ram_bytes) {
1547 RAMBlock *block;
1548 uint8_t len;
1549 char id[256];
1550 ram_addr_t length;
1551
1552 len = qemu_get_byte(f);
1553 qemu_get_buffer(f, (uint8_t *)id, len);
1554 id[len] = 0;
1555 length = qemu_get_be64(f);
1556
1557 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1558 if (!strncmp(id, block->idstr, sizeof(id))) {
1559 if (length != block->used_length) {
1560 Error *local_err = NULL;
1561
1562 ret = qemu_ram_resize(block->offset, length, &local_err);
1563 if (local_err) {
1564 error_report_err(local_err);
1565 }
1566 }
1567 break;
1568 }
1569 }
1570
1571 if (!block) {
1572 error_report("Unknown ramblock \"%s\", cannot "
1573 "accept migration", id);
1574 ret = -EINVAL;
1575 }
1576
1577 total_ram_bytes -= length;
1578 }
1579 break;
1580 case RAM_SAVE_FLAG_COMPRESS:
1581 host = host_from_stream_offset(f, addr, flags);
1582 if (!host) {
1583 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
1584 ret = -EINVAL;
1585 break;
1586 }
1587 ch = qemu_get_byte(f);
1588 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
1589 break;
1590 case RAM_SAVE_FLAG_PAGE:
1591 host = host_from_stream_offset(f, addr, flags);
1592 if (!host) {
1593 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
1594 ret = -EINVAL;
1595 break;
1596 }
1597 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
1598 break;
1599 case RAM_SAVE_FLAG_COMPRESS_PAGE:
1600 host = host_from_stream_offset(f, addr, flags);
1601 if (!host) {
1602 error_report("Invalid RAM offset " RAM_ADDR_FMT, addr);
1603 ret = -EINVAL;
1604 break;
1605 }
1606
1607 len = qemu_get_be32(f);
1608 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
1609 error_report("Invalid compressed data length: %d", len);
1610 ret = -EINVAL;
1611 break;
1612 }
1613 qemu_get_buffer(f, compressed_data_buf, len);
1614 decompress_data_with_multi_threads(compressed_data_buf, host, len);
1615 break;
1616 case RAM_SAVE_FLAG_XBZRLE:
1617 host = host_from_stream_offset(f, addr, flags);
1618 if (!host) {
1619 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
1620 ret = -EINVAL;
1621 break;
1622 }
1623 if (load_xbzrle(f, addr, host) < 0) {
1624 error_report("Failed to decompress XBZRLE page at "
1625 RAM_ADDR_FMT, addr);
1626 ret = -EINVAL;
1627 break;
1628 }
1629 break;
1630 case RAM_SAVE_FLAG_EOS:
1631 /* normal exit */
1632 break;
1633 default:
1634 if (flags & RAM_SAVE_FLAG_HOOK) {
1635 ram_control_load_hook(f, flags);
1636 } else {
1637 error_report("Unknown combination of migration flags: %#x",
1638 flags);
1639 ret = -EINVAL;
1640 }
1641 }
1642 if (!ret) {
1643 ret = qemu_file_get_error(f);
1644 }
1645 }
1646
1647 rcu_read_unlock();
1648 DPRINTF("Completed load of VM with exit code %d seq iteration "
1649 "%" PRIu64 "\n", ret, seq_iter);
1650 return ret;
1651 }
1652
1653 static SaveVMHandlers savevm_ram_handlers = {
1654 .save_live_setup = ram_save_setup,
1655 .save_live_iterate = ram_save_iterate,
1656 .save_live_complete = ram_save_complete,
1657 .save_live_pending = ram_save_pending,
1658 .load_state = ram_load,
1659 .cancel = ram_migration_cancel,
1660 };
1661
1662 void ram_mig_init(void)
1663 {
1664 qemu_mutex_init(&XBZRLE.lock);
1665 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
1666 }
1667
1668 struct soundhw {
1669 const char *name;
1670 const char *descr;
1671 int enabled;
1672 int isa;
1673 union {
1674 int (*init_isa) (ISABus *bus);
1675 int (*init_pci) (PCIBus *bus);
1676 } init;
1677 };
1678
1679 static struct soundhw soundhw[9];
1680 static int soundhw_count;
1681
1682 void isa_register_soundhw(const char *name, const char *descr,
1683 int (*init_isa)(ISABus *bus))
1684 {
1685 assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
1686 soundhw[soundhw_count].name = name;
1687 soundhw[soundhw_count].descr = descr;
1688 soundhw[soundhw_count].isa = 1;
1689 soundhw[soundhw_count].init.init_isa = init_isa;
1690 soundhw_count++;
1691 }
1692
1693 void pci_register_soundhw(const char *name, const char *descr,
1694 int (*init_pci)(PCIBus *bus))
1695 {
1696 assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
1697 soundhw[soundhw_count].name = name;
1698 soundhw[soundhw_count].descr = descr;
1699 soundhw[soundhw_count].isa = 0;
1700 soundhw[soundhw_count].init.init_pci = init_pci;
1701 soundhw_count++;
1702 }
1703
1704 void select_soundhw(const char *optarg)
1705 {
1706 struct soundhw *c;
1707
1708 if (is_help_option(optarg)) {
1709 show_valid_cards:
1710
1711 if (soundhw_count) {
1712 printf("Valid sound card names (comma separated):\n");
1713 for (c = soundhw; c->name; ++c) {
1714 printf ("%-11s %s\n", c->name, c->descr);
1715 }
1716 printf("\n-soundhw all will enable all of the above\n");
1717 } else {
1718 printf("Machine has no user-selectable audio hardware "
1719 "(it may or may not have always-present audio hardware).\n");
1720 }
1721 exit(!is_help_option(optarg));
1722 }
1723 else {
1724 size_t l;
1725 const char *p;
1726 char *e;
1727 int bad_card = 0;
1728
1729 if (!strcmp(optarg, "all")) {
1730 for (c = soundhw; c->name; ++c) {
1731 c->enabled = 1;
1732 }
1733 return;
1734 }
1735
1736 p = optarg;
1737 while (*p) {
1738 e = strchr(p, ',');
1739 l = !e ? strlen(p) : (size_t) (e - p);
1740
1741 for (c = soundhw; c->name; ++c) {
1742 if (!strncmp(c->name, p, l) && !c->name[l]) {
1743 c->enabled = 1;
1744 break;
1745 }
1746 }
1747
1748 if (!c->name) {
1749 if (l > 80) {
1750 error_report("Unknown sound card name (too big to show)");
1751 }
1752 else {
1753 error_report("Unknown sound card name `%.*s'",
1754 (int) l, p);
1755 }
1756 bad_card = 1;
1757 }
1758 p += l + (e != NULL);
1759 }
1760
1761 if (bad_card) {
1762 goto show_valid_cards;
1763 }
1764 }
1765 }
1766
1767 void audio_init(void)
1768 {
1769 struct soundhw *c;
1770 ISABus *isa_bus = (ISABus *) object_resolve_path_type("", TYPE_ISA_BUS, NULL);
1771 PCIBus *pci_bus = (PCIBus *) object_resolve_path_type("", TYPE_PCI_BUS, NULL);
1772
1773 for (c = soundhw; c->name; ++c) {
1774 if (c->enabled) {
1775 if (c->isa) {
1776 if (!isa_bus) {
1777 error_report("ISA bus not available for %s", c->name);
1778 exit(1);
1779 }
1780 c->init.init_isa(isa_bus);
1781 } else {
1782 if (!pci_bus) {
1783 error_report("PCI bus not available for %s", c->name);
1784 exit(1);
1785 }
1786 c->init.init_pci(pci_bus);
1787 }
1788 }
1789 }
1790 }
1791
1792 int qemu_uuid_parse(const char *str, uint8_t *uuid)
1793 {
1794 int ret;
1795
1796 if (strlen(str) != 36) {
1797 return -1;
1798 }
1799
1800 ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3],
1801 &uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9],
1802 &uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14],
1803 &uuid[15]);
1804
1805 if (ret != 16) {
1806 return -1;
1807 }
1808 return 0;
1809 }
1810
1811 void do_acpitable_option(const QemuOpts *opts)
1812 {
1813 #ifdef TARGET_I386
1814 Error *err = NULL;
1815
1816 acpi_table_add(opts, &err);
1817 if (err) {
1818 error_report("Wrong acpi table provided: %s",
1819 error_get_pretty(err));
1820 error_free(err);
1821 exit(1);
1822 }
1823 #endif
1824 }
1825
1826 void do_smbios_option(QemuOpts *opts)
1827 {
1828 #ifdef TARGET_I386
1829 smbios_entry_add(opts);
1830 #endif
1831 }
1832
1833 void cpudef_init(void)
1834 {
1835 #if defined(cpudef_setup)
1836 cpudef_setup(); /* parse cpu definitions in target config file */
1837 #endif
1838 }
1839
1840 int kvm_available(void)
1841 {
1842 #ifdef CONFIG_KVM
1843 return 1;
1844 #else
1845 return 0;
1846 #endif
1847 }
1848
1849 int xen_available(void)
1850 {
1851 #ifdef CONFIG_XEN
1852 return 1;
1853 #else
1854 return 0;
1855 #endif
1856 }
1857
1858
1859 TargetInfo *qmp_query_target(Error **errp)
1860 {
1861 TargetInfo *info = g_malloc0(sizeof(*info));
1862
1863 info->arch = g_strdup(TARGET_NAME);
1864
1865 return info;
1866 }
1867
1868 /* Stub function that's gets run on the vcpu when its brought out of the
1869 VM to run inside qemu via async_run_on_cpu()*/
1870 static void mig_sleep_cpu(void *opq)
1871 {
1872 qemu_mutex_unlock_iothread();
1873 g_usleep(30*1000);
1874 qemu_mutex_lock_iothread();
1875 }
1876
1877 /* To reduce the dirty rate explicitly disallow the VCPUs from spending
1878 much time in the VM. The migration thread will try to catchup.
1879 Workload will experience a performance drop.
1880 */
1881 static void mig_throttle_guest_down(void)
1882 {
1883 CPUState *cpu;
1884
1885 qemu_mutex_lock_iothread();
1886 CPU_FOREACH(cpu) {
1887 async_run_on_cpu(cpu, mig_sleep_cpu, NULL);
1888 }
1889 qemu_mutex_unlock_iothread();
1890 }
1891
1892 static void check_guest_throttling(void)
1893 {
1894 static int64_t t0;
1895 int64_t t1;
1896
1897 if (!mig_throttle_on) {
1898 return;
1899 }
1900
1901 if (!t0) {
1902 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1903 return;
1904 }
1905
1906 t1 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1907
1908 /* If it has been more than 40 ms since the last time the guest
1909 * was throttled then do it again.
1910 */
1911 if (40 < (t1-t0)/1000000) {
1912 mig_throttle_guest_down();
1913 t0 = t1;
1914 }
1915 }