]> git.proxmox.com Git - mirror_qemu.git/blob - arch_init.c
migration: expose xbzrle cache miss rate
[mirror_qemu.git] / arch_init.c
1 /*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24 #include <stdint.h>
25 #include <stdarg.h>
26 #include <stdlib.h>
27 #ifndef _WIN32
28 #include <sys/types.h>
29 #include <sys/mman.h>
30 #endif
31 #include "config.h"
32 #include "monitor/monitor.h"
33 #include "sysemu/sysemu.h"
34 #include "qemu/bitops.h"
35 #include "qemu/bitmap.h"
36 #include "sysemu/arch_init.h"
37 #include "audio/audio.h"
38 #include "hw/i386/pc.h"
39 #include "hw/pci/pci.h"
40 #include "hw/audio/audio.h"
41 #include "sysemu/kvm.h"
42 #include "migration/migration.h"
43 #include "hw/i386/smbios.h"
44 #include "exec/address-spaces.h"
45 #include "hw/audio/pcspk.h"
46 #include "migration/page_cache.h"
47 #include "qemu/config-file.h"
48 #include "qemu/error-report.h"
49 #include "qmp-commands.h"
50 #include "trace.h"
51 #include "exec/cpu-all.h"
52 #include "exec/ram_addr.h"
53 #include "hw/acpi/acpi.h"
54 #include "qemu/host-utils.h"
55
56 #ifdef DEBUG_ARCH_INIT
57 #define DPRINTF(fmt, ...) \
58 do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
59 #else
60 #define DPRINTF(fmt, ...) \
61 do { } while (0)
62 #endif
63
64 #ifdef TARGET_SPARC
65 int graphic_width = 1024;
66 int graphic_height = 768;
67 int graphic_depth = 8;
68 #else
69 int graphic_width = 800;
70 int graphic_height = 600;
71 int graphic_depth = 32;
72 #endif
73
74
75 #if defined(TARGET_ALPHA)
76 #define QEMU_ARCH QEMU_ARCH_ALPHA
77 #elif defined(TARGET_ARM)
78 #define QEMU_ARCH QEMU_ARCH_ARM
79 #elif defined(TARGET_CRIS)
80 #define QEMU_ARCH QEMU_ARCH_CRIS
81 #elif defined(TARGET_I386)
82 #define QEMU_ARCH QEMU_ARCH_I386
83 #elif defined(TARGET_M68K)
84 #define QEMU_ARCH QEMU_ARCH_M68K
85 #elif defined(TARGET_LM32)
86 #define QEMU_ARCH QEMU_ARCH_LM32
87 #elif defined(TARGET_MICROBLAZE)
88 #define QEMU_ARCH QEMU_ARCH_MICROBLAZE
89 #elif defined(TARGET_MIPS)
90 #define QEMU_ARCH QEMU_ARCH_MIPS
91 #elif defined(TARGET_MOXIE)
92 #define QEMU_ARCH QEMU_ARCH_MOXIE
93 #elif defined(TARGET_OPENRISC)
94 #define QEMU_ARCH QEMU_ARCH_OPENRISC
95 #elif defined(TARGET_PPC)
96 #define QEMU_ARCH QEMU_ARCH_PPC
97 #elif defined(TARGET_S390X)
98 #define QEMU_ARCH QEMU_ARCH_S390X
99 #elif defined(TARGET_SH4)
100 #define QEMU_ARCH QEMU_ARCH_SH4
101 #elif defined(TARGET_SPARC)
102 #define QEMU_ARCH QEMU_ARCH_SPARC
103 #elif defined(TARGET_XTENSA)
104 #define QEMU_ARCH QEMU_ARCH_XTENSA
105 #elif defined(TARGET_UNICORE32)
106 #define QEMU_ARCH QEMU_ARCH_UNICORE32
107 #endif
108
109 const uint32_t arch_type = QEMU_ARCH;
110 static bool mig_throttle_on;
111 static int dirty_rate_high_cnt;
112 static void check_guest_throttling(void);
113
114 static uint64_t bitmap_sync_count;
115
116 /***********************************************************/
117 /* ram save/restore */
118
119 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
120 #define RAM_SAVE_FLAG_COMPRESS 0x02
121 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
122 #define RAM_SAVE_FLAG_PAGE 0x08
123 #define RAM_SAVE_FLAG_EOS 0x10
124 #define RAM_SAVE_FLAG_CONTINUE 0x20
125 #define RAM_SAVE_FLAG_XBZRLE 0x40
126 /* 0x80 is reserved in migration.h start with 0x100 next */
127
128 static struct defconfig_file {
129 const char *filename;
130 /* Indicates it is an user config file (disabled by -no-user-config) */
131 bool userconfig;
132 } default_config_files[] = {
133 { CONFIG_QEMU_CONFDIR "/qemu.conf", true },
134 { CONFIG_QEMU_CONFDIR "/target-" TARGET_NAME ".conf", true },
135 { NULL }, /* end of list */
136 };
137
138 static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE];
139
140 int qemu_read_default_config_files(bool userconfig)
141 {
142 int ret;
143 struct defconfig_file *f;
144
145 for (f = default_config_files; f->filename; f++) {
146 if (!userconfig && f->userconfig) {
147 continue;
148 }
149 ret = qemu_read_config_file(f->filename);
150 if (ret < 0 && ret != -ENOENT) {
151 return ret;
152 }
153 }
154
155 return 0;
156 }
157
158 static inline bool is_zero_range(uint8_t *p, uint64_t size)
159 {
160 return buffer_find_nonzero_offset(p, size) == size;
161 }
162
163 /* struct contains XBZRLE cache and a static page
164 used by the compression */
165 static struct {
166 /* buffer used for XBZRLE encoding */
167 uint8_t *encoded_buf;
168 /* buffer for storing page content */
169 uint8_t *current_buf;
170 /* Cache for XBZRLE, Protected by lock. */
171 PageCache *cache;
172 QemuMutex lock;
173 } XBZRLE;
174
175 /* buffer used for XBZRLE decoding */
176 static uint8_t *xbzrle_decoded_buf;
177
178 static void XBZRLE_cache_lock(void)
179 {
180 if (migrate_use_xbzrle())
181 qemu_mutex_lock(&XBZRLE.lock);
182 }
183
184 static void XBZRLE_cache_unlock(void)
185 {
186 if (migrate_use_xbzrle())
187 qemu_mutex_unlock(&XBZRLE.lock);
188 }
189
190 /*
191 * called from qmp_migrate_set_cache_size in main thread, possibly while
192 * a migration is in progress.
193 * A running migration maybe using the cache and might finish during this
194 * call, hence changes to the cache are protected by XBZRLE.lock().
195 */
196 int64_t xbzrle_cache_resize(int64_t new_size)
197 {
198 PageCache *new_cache;
199 int64_t ret;
200
201 if (new_size < TARGET_PAGE_SIZE) {
202 return -1;
203 }
204
205 XBZRLE_cache_lock();
206
207 if (XBZRLE.cache != NULL) {
208 if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
209 goto out_new_size;
210 }
211 new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
212 TARGET_PAGE_SIZE);
213 if (!new_cache) {
214 error_report("Error creating cache");
215 ret = -1;
216 goto out;
217 }
218
219 cache_fini(XBZRLE.cache);
220 XBZRLE.cache = new_cache;
221 }
222
223 out_new_size:
224 ret = pow2floor(new_size);
225 out:
226 XBZRLE_cache_unlock();
227 return ret;
228 }
229
230 /* accounting for migration statistics */
231 typedef struct AccountingInfo {
232 uint64_t dup_pages;
233 uint64_t skipped_pages;
234 uint64_t norm_pages;
235 uint64_t iterations;
236 uint64_t xbzrle_bytes;
237 uint64_t xbzrle_pages;
238 uint64_t xbzrle_cache_miss;
239 double xbzrle_cache_miss_rate;
240 uint64_t xbzrle_overflows;
241 } AccountingInfo;
242
243 static AccountingInfo acct_info;
244
245 static void acct_clear(void)
246 {
247 memset(&acct_info, 0, sizeof(acct_info));
248 }
249
250 uint64_t dup_mig_bytes_transferred(void)
251 {
252 return acct_info.dup_pages * TARGET_PAGE_SIZE;
253 }
254
255 uint64_t dup_mig_pages_transferred(void)
256 {
257 return acct_info.dup_pages;
258 }
259
260 uint64_t skipped_mig_bytes_transferred(void)
261 {
262 return acct_info.skipped_pages * TARGET_PAGE_SIZE;
263 }
264
265 uint64_t skipped_mig_pages_transferred(void)
266 {
267 return acct_info.skipped_pages;
268 }
269
270 uint64_t norm_mig_bytes_transferred(void)
271 {
272 return acct_info.norm_pages * TARGET_PAGE_SIZE;
273 }
274
275 uint64_t norm_mig_pages_transferred(void)
276 {
277 return acct_info.norm_pages;
278 }
279
280 uint64_t xbzrle_mig_bytes_transferred(void)
281 {
282 return acct_info.xbzrle_bytes;
283 }
284
285 uint64_t xbzrle_mig_pages_transferred(void)
286 {
287 return acct_info.xbzrle_pages;
288 }
289
290 uint64_t xbzrle_mig_pages_cache_miss(void)
291 {
292 return acct_info.xbzrle_cache_miss;
293 }
294
295 double xbzrle_mig_cache_miss_rate(void)
296 {
297 return acct_info.xbzrle_cache_miss_rate;
298 }
299
300 uint64_t xbzrle_mig_pages_overflow(void)
301 {
302 return acct_info.xbzrle_overflows;
303 }
304
305 static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
306 int cont, int flag)
307 {
308 size_t size;
309
310 qemu_put_be64(f, offset | cont | flag);
311 size = 8;
312
313 if (!cont) {
314 qemu_put_byte(f, strlen(block->idstr));
315 qemu_put_buffer(f, (uint8_t *)block->idstr,
316 strlen(block->idstr));
317 size += 1 + strlen(block->idstr);
318 }
319 return size;
320 }
321
322 /* This is the last block that we have visited serching for dirty pages
323 */
324 static RAMBlock *last_seen_block;
325 /* This is the last block from where we have sent data */
326 static RAMBlock *last_sent_block;
327 static ram_addr_t last_offset;
328 static unsigned long *migration_bitmap;
329 static uint64_t migration_dirty_pages;
330 static uint32_t last_version;
331 static bool ram_bulk_stage;
332
333 /* Update the xbzrle cache to reflect a page that's been sent as all 0.
334 * The important thing is that a stale (not-yet-0'd) page be replaced
335 * by the new data.
336 * As a bonus, if the page wasn't in the cache it gets added so that
337 * when a small write is made into the 0'd page it gets XBZRLE sent
338 */
339 static void xbzrle_cache_zero_page(ram_addr_t current_addr)
340 {
341 if (ram_bulk_stage || !migrate_use_xbzrle()) {
342 return;
343 }
344
345 /* We don't care if this fails to allocate a new cache page
346 * as long as it updated an old one */
347 cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE);
348 }
349
350 #define ENCODING_FLAG_XBZRLE 0x1
351
352 static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
353 ram_addr_t current_addr, RAMBlock *block,
354 ram_addr_t offset, int cont, bool last_stage)
355 {
356 int encoded_len = 0, bytes_sent = -1;
357 uint8_t *prev_cached_page;
358
359 if (!cache_is_cached(XBZRLE.cache, current_addr)) {
360 acct_info.xbzrle_cache_miss++;
361 if (!last_stage) {
362 if (cache_insert(XBZRLE.cache, current_addr, *current_data) == -1) {
363 return -1;
364 } else {
365 /* update *current_data when the page has been
366 inserted into cache */
367 *current_data = get_cached_data(XBZRLE.cache, current_addr);
368 }
369 }
370 return -1;
371 }
372
373 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
374
375 /* save current buffer into memory */
376 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
377
378 /* XBZRLE encoding (if there is no overflow) */
379 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
380 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
381 TARGET_PAGE_SIZE);
382 if (encoded_len == 0) {
383 DPRINTF("Skipping unmodified page\n");
384 return 0;
385 } else if (encoded_len == -1) {
386 DPRINTF("Overflow\n");
387 acct_info.xbzrle_overflows++;
388 /* update data in the cache */
389 if (!last_stage) {
390 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
391 *current_data = prev_cached_page;
392 }
393 return -1;
394 }
395
396 /* we need to update the data in the cache, in order to get the same data */
397 if (!last_stage) {
398 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
399 }
400
401 /* Send XBZRLE based compressed page */
402 bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
403 qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
404 qemu_put_be16(f, encoded_len);
405 qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
406 bytes_sent += encoded_len + 1 + 2;
407 acct_info.xbzrle_pages++;
408 acct_info.xbzrle_bytes += bytes_sent;
409
410 return bytes_sent;
411 }
412
413 static inline
414 ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
415 ram_addr_t start)
416 {
417 unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
418 unsigned long nr = base + (start >> TARGET_PAGE_BITS);
419 uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
420 unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
421
422 unsigned long next;
423
424 if (ram_bulk_stage && nr > base) {
425 next = nr + 1;
426 } else {
427 next = find_next_bit(migration_bitmap, size, nr);
428 }
429
430 if (next < size) {
431 clear_bit(next, migration_bitmap);
432 migration_dirty_pages--;
433 }
434 return (next - base) << TARGET_PAGE_BITS;
435 }
436
437 static inline bool migration_bitmap_set_dirty(ram_addr_t addr)
438 {
439 bool ret;
440 int nr = addr >> TARGET_PAGE_BITS;
441
442 ret = test_and_set_bit(nr, migration_bitmap);
443
444 if (!ret) {
445 migration_dirty_pages++;
446 }
447 return ret;
448 }
449
450 static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
451 {
452 ram_addr_t addr;
453 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
454
455 /* start address is aligned at the start of a word? */
456 if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
457 int k;
458 int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
459 unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
460
461 for (k = page; k < page + nr; k++) {
462 if (src[k]) {
463 unsigned long new_dirty;
464 new_dirty = ~migration_bitmap[k];
465 migration_bitmap[k] |= src[k];
466 new_dirty &= src[k];
467 migration_dirty_pages += ctpopl(new_dirty);
468 src[k] = 0;
469 }
470 }
471 } else {
472 for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
473 if (cpu_physical_memory_get_dirty(start + addr,
474 TARGET_PAGE_SIZE,
475 DIRTY_MEMORY_MIGRATION)) {
476 cpu_physical_memory_reset_dirty(start + addr,
477 TARGET_PAGE_SIZE,
478 DIRTY_MEMORY_MIGRATION);
479 migration_bitmap_set_dirty(start + addr);
480 }
481 }
482 }
483 }
484
485
486 /* Needs iothread lock! */
487
488 static void migration_bitmap_sync(void)
489 {
490 RAMBlock *block;
491 uint64_t num_dirty_pages_init = migration_dirty_pages;
492 MigrationState *s = migrate_get_current();
493 static int64_t start_time;
494 static int64_t bytes_xfer_prev;
495 static int64_t num_dirty_pages_period;
496 int64_t end_time;
497 int64_t bytes_xfer_now;
498 static uint64_t xbzrle_cache_miss_prev;
499 static uint64_t iterations_prev;
500
501 bitmap_sync_count++;
502
503 if (!bytes_xfer_prev) {
504 bytes_xfer_prev = ram_bytes_transferred();
505 }
506
507 if (!start_time) {
508 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
509 }
510
511 trace_migration_bitmap_sync_start();
512 address_space_sync_dirty_bitmap(&address_space_memory);
513
514 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
515 migration_bitmap_sync_range(block->mr->ram_addr, block->length);
516 }
517 trace_migration_bitmap_sync_end(migration_dirty_pages
518 - num_dirty_pages_init);
519 num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
520 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
521
522 /* more than 1 second = 1000 millisecons */
523 if (end_time > start_time + 1000) {
524 if (migrate_auto_converge()) {
525 /* The following detection logic can be refined later. For now:
526 Check to see if the dirtied bytes is 50% more than the approx.
527 amount of bytes that just got transferred since the last time we
528 were in this routine. If that happens >N times (for now N==4)
529 we turn on the throttle down logic */
530 bytes_xfer_now = ram_bytes_transferred();
531 if (s->dirty_pages_rate &&
532 (num_dirty_pages_period * TARGET_PAGE_SIZE >
533 (bytes_xfer_now - bytes_xfer_prev)/2) &&
534 (dirty_rate_high_cnt++ > 4)) {
535 trace_migration_throttle();
536 mig_throttle_on = true;
537 dirty_rate_high_cnt = 0;
538 }
539 bytes_xfer_prev = bytes_xfer_now;
540 } else {
541 mig_throttle_on = false;
542 }
543 if (migrate_use_xbzrle()) {
544 if (iterations_prev != 0) {
545 acct_info.xbzrle_cache_miss_rate =
546 (double)(acct_info.xbzrle_cache_miss -
547 xbzrle_cache_miss_prev) /
548 (acct_info.iterations - iterations_prev);
549 }
550 iterations_prev = acct_info.iterations;
551 xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss;
552 }
553 s->dirty_pages_rate = num_dirty_pages_period * 1000
554 / (end_time - start_time);
555 s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
556 start_time = end_time;
557 num_dirty_pages_period = 0;
558 s->dirty_sync_count = bitmap_sync_count;
559 }
560 }
561
562 /*
563 * ram_save_block: Writes a page of memory to the stream f
564 *
565 * Returns: The number of bytes written.
566 * 0 means no dirty pages
567 */
568
569 static int ram_save_block(QEMUFile *f, bool last_stage)
570 {
571 RAMBlock *block = last_seen_block;
572 ram_addr_t offset = last_offset;
573 bool complete_round = false;
574 int bytes_sent = 0;
575 MemoryRegion *mr;
576 ram_addr_t current_addr;
577
578 if (!block)
579 block = QTAILQ_FIRST(&ram_list.blocks);
580
581 while (true) {
582 mr = block->mr;
583 offset = migration_bitmap_find_and_reset_dirty(mr, offset);
584 if (complete_round && block == last_seen_block &&
585 offset >= last_offset) {
586 break;
587 }
588 if (offset >= block->length) {
589 offset = 0;
590 block = QTAILQ_NEXT(block, next);
591 if (!block) {
592 block = QTAILQ_FIRST(&ram_list.blocks);
593 complete_round = true;
594 ram_bulk_stage = false;
595 }
596 } else {
597 int ret;
598 uint8_t *p;
599 bool send_async = true;
600 int cont = (block == last_sent_block) ?
601 RAM_SAVE_FLAG_CONTINUE : 0;
602
603 p = memory_region_get_ram_ptr(mr) + offset;
604
605 /* In doubt sent page as normal */
606 bytes_sent = -1;
607 ret = ram_control_save_page(f, block->offset,
608 offset, TARGET_PAGE_SIZE, &bytes_sent);
609
610 XBZRLE_cache_lock();
611
612 current_addr = block->offset + offset;
613 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
614 if (ret != RAM_SAVE_CONTROL_DELAYED) {
615 if (bytes_sent > 0) {
616 acct_info.norm_pages++;
617 } else if (bytes_sent == 0) {
618 acct_info.dup_pages++;
619 }
620 }
621 } else if (is_zero_range(p, TARGET_PAGE_SIZE)) {
622 acct_info.dup_pages++;
623 bytes_sent = save_block_hdr(f, block, offset, cont,
624 RAM_SAVE_FLAG_COMPRESS);
625 qemu_put_byte(f, 0);
626 bytes_sent++;
627 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
628 * page would be stale
629 */
630 xbzrle_cache_zero_page(current_addr);
631 } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
632 bytes_sent = save_xbzrle_page(f, &p, current_addr, block,
633 offset, cont, last_stage);
634 if (!last_stage) {
635 /* Can't send this cached data async, since the cache page
636 * might get updated before it gets to the wire
637 */
638 send_async = false;
639 }
640 }
641
642 /* XBZRLE overflow or normal page */
643 if (bytes_sent == -1) {
644 bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
645 if (send_async) {
646 qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
647 } else {
648 qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
649 }
650 bytes_sent += TARGET_PAGE_SIZE;
651 acct_info.norm_pages++;
652 }
653
654 XBZRLE_cache_unlock();
655 /* if page is unmodified, continue to the next */
656 if (bytes_sent > 0) {
657 last_sent_block = block;
658 break;
659 }
660 }
661 }
662 last_seen_block = block;
663 last_offset = offset;
664
665 return bytes_sent;
666 }
667
668 static uint64_t bytes_transferred;
669
670 void acct_update_position(QEMUFile *f, size_t size, bool zero)
671 {
672 uint64_t pages = size / TARGET_PAGE_SIZE;
673 if (zero) {
674 acct_info.dup_pages += pages;
675 } else {
676 acct_info.norm_pages += pages;
677 bytes_transferred += size;
678 qemu_update_position(f, size);
679 }
680 }
681
682 static ram_addr_t ram_save_remaining(void)
683 {
684 return migration_dirty_pages;
685 }
686
687 uint64_t ram_bytes_remaining(void)
688 {
689 return ram_save_remaining() * TARGET_PAGE_SIZE;
690 }
691
692 uint64_t ram_bytes_transferred(void)
693 {
694 return bytes_transferred;
695 }
696
697 uint64_t ram_bytes_total(void)
698 {
699 RAMBlock *block;
700 uint64_t total = 0;
701
702 QTAILQ_FOREACH(block, &ram_list.blocks, next)
703 total += block->length;
704
705 return total;
706 }
707
708 void free_xbzrle_decoded_buf(void)
709 {
710 g_free(xbzrle_decoded_buf);
711 xbzrle_decoded_buf = NULL;
712 }
713
714 static void migration_end(void)
715 {
716 if (migration_bitmap) {
717 memory_global_dirty_log_stop();
718 g_free(migration_bitmap);
719 migration_bitmap = NULL;
720 }
721
722 XBZRLE_cache_lock();
723 if (XBZRLE.cache) {
724 cache_fini(XBZRLE.cache);
725 g_free(XBZRLE.cache);
726 g_free(XBZRLE.encoded_buf);
727 g_free(XBZRLE.current_buf);
728 XBZRLE.cache = NULL;
729 XBZRLE.encoded_buf = NULL;
730 XBZRLE.current_buf = NULL;
731 }
732 XBZRLE_cache_unlock();
733 }
734
735 static void ram_migration_cancel(void *opaque)
736 {
737 migration_end();
738 }
739
740 static void reset_ram_globals(void)
741 {
742 last_seen_block = NULL;
743 last_sent_block = NULL;
744 last_offset = 0;
745 last_version = ram_list.version;
746 ram_bulk_stage = true;
747 }
748
749 #define MAX_WAIT 50 /* ms, half buffered_file limit */
750
751 static int ram_save_setup(QEMUFile *f, void *opaque)
752 {
753 RAMBlock *block;
754 int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
755
756 mig_throttle_on = false;
757 dirty_rate_high_cnt = 0;
758 bitmap_sync_count = 0;
759
760 if (migrate_use_xbzrle()) {
761 XBZRLE_cache_lock();
762 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
763 TARGET_PAGE_SIZE,
764 TARGET_PAGE_SIZE);
765 if (!XBZRLE.cache) {
766 XBZRLE_cache_unlock();
767 error_report("Error creating cache");
768 return -1;
769 }
770 XBZRLE_cache_unlock();
771
772 /* We prefer not to abort if there is no memory */
773 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
774 if (!XBZRLE.encoded_buf) {
775 error_report("Error allocating encoded_buf");
776 return -1;
777 }
778
779 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
780 if (!XBZRLE.current_buf) {
781 error_report("Error allocating current_buf");
782 g_free(XBZRLE.encoded_buf);
783 XBZRLE.encoded_buf = NULL;
784 return -1;
785 }
786
787 acct_clear();
788 }
789
790 qemu_mutex_lock_iothread();
791 qemu_mutex_lock_ramlist();
792 bytes_transferred = 0;
793 reset_ram_globals();
794
795 ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
796 migration_bitmap = bitmap_new(ram_bitmap_pages);
797 bitmap_set(migration_bitmap, 0, ram_bitmap_pages);
798
799 /*
800 * Count the total number of pages used by ram blocks not including any
801 * gaps due to alignment or unplugs.
802 */
803 migration_dirty_pages = 0;
804 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
805 uint64_t block_pages;
806
807 block_pages = block->length >> TARGET_PAGE_BITS;
808 migration_dirty_pages += block_pages;
809 }
810
811 memory_global_dirty_log_start();
812 migration_bitmap_sync();
813 qemu_mutex_unlock_iothread();
814
815 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
816
817 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
818 qemu_put_byte(f, strlen(block->idstr));
819 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
820 qemu_put_be64(f, block->length);
821 }
822
823 qemu_mutex_unlock_ramlist();
824
825 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
826 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
827
828 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
829
830 return 0;
831 }
832
833 static int ram_save_iterate(QEMUFile *f, void *opaque)
834 {
835 int ret;
836 int i;
837 int64_t t0;
838 int total_sent = 0;
839
840 qemu_mutex_lock_ramlist();
841
842 if (ram_list.version != last_version) {
843 reset_ram_globals();
844 }
845
846 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
847
848 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
849 i = 0;
850 while ((ret = qemu_file_rate_limit(f)) == 0) {
851 int bytes_sent;
852
853 bytes_sent = ram_save_block(f, false);
854 /* no more blocks to sent */
855 if (bytes_sent == 0) {
856 break;
857 }
858 total_sent += bytes_sent;
859 acct_info.iterations++;
860 check_guest_throttling();
861 /* we want to check in the 1st loop, just in case it was the 1st time
862 and we had to sync the dirty bitmap.
863 qemu_get_clock_ns() is a bit expensive, so we only check each some
864 iterations
865 */
866 if ((i & 63) == 0) {
867 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
868 if (t1 > MAX_WAIT) {
869 DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
870 t1, i);
871 break;
872 }
873 }
874 i++;
875 }
876
877 qemu_mutex_unlock_ramlist();
878
879 /*
880 * Must occur before EOS (or any QEMUFile operation)
881 * because of RDMA protocol.
882 */
883 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
884
885 bytes_transferred += total_sent;
886
887 /*
888 * Do not count these 8 bytes into total_sent, so that we can
889 * return 0 if no page had been dirtied.
890 */
891 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
892 bytes_transferred += 8;
893
894 ret = qemu_file_get_error(f);
895 if (ret < 0) {
896 return ret;
897 }
898
899 return total_sent;
900 }
901
902 static int ram_save_complete(QEMUFile *f, void *opaque)
903 {
904 qemu_mutex_lock_ramlist();
905 migration_bitmap_sync();
906
907 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
908
909 /* try transferring iterative blocks of memory */
910
911 /* flush all remaining blocks regardless of rate limiting */
912 while (true) {
913 int bytes_sent;
914
915 bytes_sent = ram_save_block(f, true);
916 /* no more blocks to sent */
917 if (bytes_sent == 0) {
918 break;
919 }
920 bytes_transferred += bytes_sent;
921 }
922
923 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
924 migration_end();
925
926 qemu_mutex_unlock_ramlist();
927 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
928
929 return 0;
930 }
931
932 static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
933 {
934 uint64_t remaining_size;
935
936 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
937
938 if (remaining_size < max_size) {
939 qemu_mutex_lock_iothread();
940 migration_bitmap_sync();
941 qemu_mutex_unlock_iothread();
942 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
943 }
944 return remaining_size;
945 }
946
947 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
948 {
949 int ret, rc = 0;
950 unsigned int xh_len;
951 int xh_flags;
952
953 if (!xbzrle_decoded_buf) {
954 xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
955 }
956
957 /* extract RLE header */
958 xh_flags = qemu_get_byte(f);
959 xh_len = qemu_get_be16(f);
960
961 if (xh_flags != ENCODING_FLAG_XBZRLE) {
962 fprintf(stderr, "Failed to load XBZRLE page - wrong compression!\n");
963 return -1;
964 }
965
966 if (xh_len > TARGET_PAGE_SIZE) {
967 fprintf(stderr, "Failed to load XBZRLE page - len overflow!\n");
968 return -1;
969 }
970 /* load data and decode */
971 qemu_get_buffer(f, xbzrle_decoded_buf, xh_len);
972
973 /* decode RLE */
974 ret = xbzrle_decode_buffer(xbzrle_decoded_buf, xh_len, host,
975 TARGET_PAGE_SIZE);
976 if (ret == -1) {
977 fprintf(stderr, "Failed to load XBZRLE page - decode error!\n");
978 rc = -1;
979 } else if (ret > TARGET_PAGE_SIZE) {
980 fprintf(stderr, "Failed to load XBZRLE page - size %d exceeds %d!\n",
981 ret, TARGET_PAGE_SIZE);
982 abort();
983 }
984
985 return rc;
986 }
987
988 static inline void *host_from_stream_offset(QEMUFile *f,
989 ram_addr_t offset,
990 int flags)
991 {
992 static RAMBlock *block = NULL;
993 char id[256];
994 uint8_t len;
995
996 if (flags & RAM_SAVE_FLAG_CONTINUE) {
997 if (!block) {
998 fprintf(stderr, "Ack, bad migration stream!\n");
999 return NULL;
1000 }
1001
1002 return memory_region_get_ram_ptr(block->mr) + offset;
1003 }
1004
1005 len = qemu_get_byte(f);
1006 qemu_get_buffer(f, (uint8_t *)id, len);
1007 id[len] = 0;
1008
1009 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1010 if (!strncmp(id, block->idstr, sizeof(id)))
1011 return memory_region_get_ram_ptr(block->mr) + offset;
1012 }
1013
1014 fprintf(stderr, "Can't find block %s!\n", id);
1015 return NULL;
1016 }
1017
1018 /*
1019 * If a page (or a whole RDMA chunk) has been
1020 * determined to be zero, then zap it.
1021 */
1022 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
1023 {
1024 if (ch != 0 || !is_zero_range(host, size)) {
1025 memset(host, ch, size);
1026 }
1027 }
1028
1029 static int ram_load(QEMUFile *f, void *opaque, int version_id)
1030 {
1031 ram_addr_t addr;
1032 int flags, ret = 0;
1033 int error;
1034 static uint64_t seq_iter;
1035
1036 seq_iter++;
1037
1038 if (version_id != 4) {
1039 return -EINVAL;
1040 }
1041
1042 do {
1043 addr = qemu_get_be64(f);
1044
1045 flags = addr & ~TARGET_PAGE_MASK;
1046 addr &= TARGET_PAGE_MASK;
1047
1048 if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
1049 /* Synchronize RAM block list */
1050 char id[256];
1051 ram_addr_t length;
1052 ram_addr_t total_ram_bytes = addr;
1053
1054 while (total_ram_bytes) {
1055 RAMBlock *block;
1056 uint8_t len;
1057
1058 len = qemu_get_byte(f);
1059 qemu_get_buffer(f, (uint8_t *)id, len);
1060 id[len] = 0;
1061 length = qemu_get_be64(f);
1062
1063 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1064 if (!strncmp(id, block->idstr, sizeof(id))) {
1065 if (block->length != length) {
1066 fprintf(stderr,
1067 "Length mismatch: %s: " RAM_ADDR_FMT
1068 " in != " RAM_ADDR_FMT "\n", id, length,
1069 block->length);
1070 ret = -EINVAL;
1071 goto done;
1072 }
1073 break;
1074 }
1075 }
1076
1077 if (!block) {
1078 fprintf(stderr, "Unknown ramblock \"%s\", cannot "
1079 "accept migration\n", id);
1080 ret = -EINVAL;
1081 goto done;
1082 }
1083
1084 total_ram_bytes -= length;
1085 }
1086 }
1087
1088 if (flags & RAM_SAVE_FLAG_COMPRESS) {
1089 void *host;
1090 uint8_t ch;
1091
1092 host = host_from_stream_offset(f, addr, flags);
1093 if (!host) {
1094 return -EINVAL;
1095 }
1096
1097 ch = qemu_get_byte(f);
1098 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
1099 } else if (flags & RAM_SAVE_FLAG_PAGE) {
1100 void *host;
1101
1102 host = host_from_stream_offset(f, addr, flags);
1103 if (!host) {
1104 return -EINVAL;
1105 }
1106
1107 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
1108 } else if (flags & RAM_SAVE_FLAG_XBZRLE) {
1109 void *host = host_from_stream_offset(f, addr, flags);
1110 if (!host) {
1111 return -EINVAL;
1112 }
1113
1114 if (load_xbzrle(f, addr, host) < 0) {
1115 ret = -EINVAL;
1116 goto done;
1117 }
1118 } else if (flags & RAM_SAVE_FLAG_HOOK) {
1119 ram_control_load_hook(f, flags);
1120 }
1121 error = qemu_file_get_error(f);
1122 if (error) {
1123 ret = error;
1124 goto done;
1125 }
1126 } while (!(flags & RAM_SAVE_FLAG_EOS));
1127
1128 done:
1129 DPRINTF("Completed load of VM with exit code %d seq iteration "
1130 "%" PRIu64 "\n", ret, seq_iter);
1131 return ret;
1132 }
1133
1134 static SaveVMHandlers savevm_ram_handlers = {
1135 .save_live_setup = ram_save_setup,
1136 .save_live_iterate = ram_save_iterate,
1137 .save_live_complete = ram_save_complete,
1138 .save_live_pending = ram_save_pending,
1139 .load_state = ram_load,
1140 .cancel = ram_migration_cancel,
1141 };
1142
1143 void ram_mig_init(void)
1144 {
1145 qemu_mutex_init(&XBZRLE.lock);
1146 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
1147 }
1148
1149 struct soundhw {
1150 const char *name;
1151 const char *descr;
1152 int enabled;
1153 int isa;
1154 union {
1155 int (*init_isa) (ISABus *bus);
1156 int (*init_pci) (PCIBus *bus);
1157 } init;
1158 };
1159
1160 static struct soundhw soundhw[9];
1161 static int soundhw_count;
1162
1163 void isa_register_soundhw(const char *name, const char *descr,
1164 int (*init_isa)(ISABus *bus))
1165 {
1166 assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
1167 soundhw[soundhw_count].name = name;
1168 soundhw[soundhw_count].descr = descr;
1169 soundhw[soundhw_count].isa = 1;
1170 soundhw[soundhw_count].init.init_isa = init_isa;
1171 soundhw_count++;
1172 }
1173
1174 void pci_register_soundhw(const char *name, const char *descr,
1175 int (*init_pci)(PCIBus *bus))
1176 {
1177 assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
1178 soundhw[soundhw_count].name = name;
1179 soundhw[soundhw_count].descr = descr;
1180 soundhw[soundhw_count].isa = 0;
1181 soundhw[soundhw_count].init.init_pci = init_pci;
1182 soundhw_count++;
1183 }
1184
1185 void select_soundhw(const char *optarg)
1186 {
1187 struct soundhw *c;
1188
1189 if (is_help_option(optarg)) {
1190 show_valid_cards:
1191
1192 if (soundhw_count) {
1193 printf("Valid sound card names (comma separated):\n");
1194 for (c = soundhw; c->name; ++c) {
1195 printf ("%-11s %s\n", c->name, c->descr);
1196 }
1197 printf("\n-soundhw all will enable all of the above\n");
1198 } else {
1199 printf("Machine has no user-selectable audio hardware "
1200 "(it may or may not have always-present audio hardware).\n");
1201 }
1202 exit(!is_help_option(optarg));
1203 }
1204 else {
1205 size_t l;
1206 const char *p;
1207 char *e;
1208 int bad_card = 0;
1209
1210 if (!strcmp(optarg, "all")) {
1211 for (c = soundhw; c->name; ++c) {
1212 c->enabled = 1;
1213 }
1214 return;
1215 }
1216
1217 p = optarg;
1218 while (*p) {
1219 e = strchr(p, ',');
1220 l = !e ? strlen(p) : (size_t) (e - p);
1221
1222 for (c = soundhw; c->name; ++c) {
1223 if (!strncmp(c->name, p, l) && !c->name[l]) {
1224 c->enabled = 1;
1225 break;
1226 }
1227 }
1228
1229 if (!c->name) {
1230 if (l > 80) {
1231 fprintf(stderr,
1232 "Unknown sound card name (too big to show)\n");
1233 }
1234 else {
1235 fprintf(stderr, "Unknown sound card name `%.*s'\n",
1236 (int) l, p);
1237 }
1238 bad_card = 1;
1239 }
1240 p += l + (e != NULL);
1241 }
1242
1243 if (bad_card) {
1244 goto show_valid_cards;
1245 }
1246 }
1247 }
1248
1249 void audio_init(void)
1250 {
1251 struct soundhw *c;
1252 ISABus *isa_bus = (ISABus *) object_resolve_path_type("", TYPE_ISA_BUS, NULL);
1253 PCIBus *pci_bus = (PCIBus *) object_resolve_path_type("", TYPE_PCI_BUS, NULL);
1254
1255 for (c = soundhw; c->name; ++c) {
1256 if (c->enabled) {
1257 if (c->isa) {
1258 if (!isa_bus) {
1259 fprintf(stderr, "ISA bus not available for %s\n", c->name);
1260 exit(1);
1261 }
1262 c->init.init_isa(isa_bus);
1263 } else {
1264 if (!pci_bus) {
1265 fprintf(stderr, "PCI bus not available for %s\n", c->name);
1266 exit(1);
1267 }
1268 c->init.init_pci(pci_bus);
1269 }
1270 }
1271 }
1272 }
1273
1274 int qemu_uuid_parse(const char *str, uint8_t *uuid)
1275 {
1276 int ret;
1277
1278 if (strlen(str) != 36) {
1279 return -1;
1280 }
1281
1282 ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3],
1283 &uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9],
1284 &uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14],
1285 &uuid[15]);
1286
1287 if (ret != 16) {
1288 return -1;
1289 }
1290 return 0;
1291 }
1292
1293 void do_acpitable_option(const QemuOpts *opts)
1294 {
1295 #ifdef TARGET_I386
1296 Error *err = NULL;
1297
1298 acpi_table_add(opts, &err);
1299 if (err) {
1300 error_report("Wrong acpi table provided: %s",
1301 error_get_pretty(err));
1302 error_free(err);
1303 exit(1);
1304 }
1305 #endif
1306 }
1307
1308 void do_smbios_option(QemuOpts *opts)
1309 {
1310 #ifdef TARGET_I386
1311 smbios_entry_add(opts);
1312 #endif
1313 }
1314
1315 void cpudef_init(void)
1316 {
1317 #if defined(cpudef_setup)
1318 cpudef_setup(); /* parse cpu definitions in target config file */
1319 #endif
1320 }
1321
1322 int tcg_available(void)
1323 {
1324 return 1;
1325 }
1326
1327 int kvm_available(void)
1328 {
1329 #ifdef CONFIG_KVM
1330 return 1;
1331 #else
1332 return 0;
1333 #endif
1334 }
1335
1336 int xen_available(void)
1337 {
1338 #ifdef CONFIG_XEN
1339 return 1;
1340 #else
1341 return 0;
1342 #endif
1343 }
1344
1345
1346 TargetInfo *qmp_query_target(Error **errp)
1347 {
1348 TargetInfo *info = g_malloc0(sizeof(*info));
1349
1350 info->arch = g_strdup(TARGET_NAME);
1351
1352 return info;
1353 }
1354
1355 /* Stub function that's gets run on the vcpu when its brought out of the
1356 VM to run inside qemu via async_run_on_cpu()*/
1357 static void mig_sleep_cpu(void *opq)
1358 {
1359 qemu_mutex_unlock_iothread();
1360 g_usleep(30*1000);
1361 qemu_mutex_lock_iothread();
1362 }
1363
1364 /* To reduce the dirty rate explicitly disallow the VCPUs from spending
1365 much time in the VM. The migration thread will try to catchup.
1366 Workload will experience a performance drop.
1367 */
1368 static void mig_throttle_guest_down(void)
1369 {
1370 CPUState *cpu;
1371
1372 qemu_mutex_lock_iothread();
1373 CPU_FOREACH(cpu) {
1374 async_run_on_cpu(cpu, mig_sleep_cpu, NULL);
1375 }
1376 qemu_mutex_unlock_iothread();
1377 }
1378
1379 static void check_guest_throttling(void)
1380 {
1381 static int64_t t0;
1382 int64_t t1;
1383
1384 if (!mig_throttle_on) {
1385 return;
1386 }
1387
1388 if (!t0) {
1389 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1390 return;
1391 }
1392
1393 t1 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1394
1395 /* If it has been more than 40 ms since the last time the guest
1396 * was throttled then do it again.
1397 */
1398 if (40 < (t1-t0)/1000000) {
1399 mig_throttle_guest_down();
1400 t0 = t1;
1401 }
1402 }