]>
Commit | Line | Data |
---|---|---|
ad96090a BS |
1 | /* |
2 | * QEMU System Emulator | |
3 | * | |
4 | * Copyright (c) 2003-2008 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | #include <stdint.h> | |
25 | #include <stdarg.h> | |
b2e0a138 | 26 | #include <stdlib.h> |
3fcb38c2 | 27 | #include <zlib.h> |
ad96090a | 28 | #ifndef _WIN32 |
1c47cb16 | 29 | #include <sys/types.h> |
ad96090a BS |
30 | #include <sys/mman.h> |
31 | #endif | |
32 | #include "config.h" | |
83c9089e | 33 | #include "monitor/monitor.h" |
9c17d615 | 34 | #include "sysemu/sysemu.h" |
1de7afc9 PB |
35 | #include "qemu/bitops.h" |
36 | #include "qemu/bitmap.h" | |
9c17d615 | 37 | #include "sysemu/arch_init.h" |
ad96090a | 38 | #include "audio/audio.h" |
0d09e41a | 39 | #include "hw/i386/pc.h" |
a2cb15b0 | 40 | #include "hw/pci/pci.h" |
0d09e41a | 41 | #include "hw/audio/audio.h" |
9c17d615 | 42 | #include "sysemu/kvm.h" |
caf71f86 | 43 | #include "migration/migration.h" |
0d09e41a | 44 | #include "hw/i386/smbios.h" |
022c62cb | 45 | #include "exec/address-spaces.h" |
0d09e41a | 46 | #include "hw/audio/pcspk.h" |
caf71f86 | 47 | #include "migration/page_cache.h" |
1de7afc9 | 48 | #include "qemu/config-file.h" |
d97326ee | 49 | #include "qemu/error-report.h" |
99afc91d | 50 | #include "qmp-commands.h" |
3c12193d | 51 | #include "trace.h" |
0d6d3c87 | 52 | #include "exec/cpu-all.h" |
12291ec1 | 53 | #include "exec/ram_addr.h" |
0445259b | 54 | #include "hw/acpi/acpi.h" |
aa8dc044 | 55 | #include "qemu/host-utils.h" |
0dc3f44a | 56 | #include "qemu/rcu_queue.h" |
ad96090a | 57 | |
3a697f69 OW |
58 | #ifdef DEBUG_ARCH_INIT |
59 | #define DPRINTF(fmt, ...) \ | |
60 | do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0) | |
61 | #else | |
62 | #define DPRINTF(fmt, ...) \ | |
63 | do { } while (0) | |
64 | #endif | |
65 | ||
ad96090a BS |
66 | #ifdef TARGET_SPARC |
67 | int graphic_width = 1024; | |
68 | int graphic_height = 768; | |
69 | int graphic_depth = 8; | |
70 | #else | |
71 | int graphic_width = 800; | |
72 | int graphic_height = 600; | |
f1ff0e89 | 73 | int graphic_depth = 32; |
ad96090a BS |
74 | #endif |
75 | ||
ad96090a BS |
76 | |
77 | #if defined(TARGET_ALPHA) | |
78 | #define QEMU_ARCH QEMU_ARCH_ALPHA | |
79 | #elif defined(TARGET_ARM) | |
80 | #define QEMU_ARCH QEMU_ARCH_ARM | |
81 | #elif defined(TARGET_CRIS) | |
82 | #define QEMU_ARCH QEMU_ARCH_CRIS | |
83 | #elif defined(TARGET_I386) | |
84 | #define QEMU_ARCH QEMU_ARCH_I386 | |
85 | #elif defined(TARGET_M68K) | |
86 | #define QEMU_ARCH QEMU_ARCH_M68K | |
81ea0e13 MW |
87 | #elif defined(TARGET_LM32) |
88 | #define QEMU_ARCH QEMU_ARCH_LM32 | |
ad96090a BS |
89 | #elif defined(TARGET_MICROBLAZE) |
90 | #define QEMU_ARCH QEMU_ARCH_MICROBLAZE | |
91 | #elif defined(TARGET_MIPS) | |
92 | #define QEMU_ARCH QEMU_ARCH_MIPS | |
d15a9c23 AG |
93 | #elif defined(TARGET_MOXIE) |
94 | #define QEMU_ARCH QEMU_ARCH_MOXIE | |
e67db06e JL |
95 | #elif defined(TARGET_OPENRISC) |
96 | #define QEMU_ARCH QEMU_ARCH_OPENRISC | |
ad96090a BS |
97 | #elif defined(TARGET_PPC) |
98 | #define QEMU_ARCH QEMU_ARCH_PPC | |
99 | #elif defined(TARGET_S390X) | |
100 | #define QEMU_ARCH QEMU_ARCH_S390X | |
101 | #elif defined(TARGET_SH4) | |
102 | #define QEMU_ARCH QEMU_ARCH_SH4 | |
103 | #elif defined(TARGET_SPARC) | |
104 | #define QEMU_ARCH QEMU_ARCH_SPARC | |
2328826b MF |
105 | #elif defined(TARGET_XTENSA) |
106 | #define QEMU_ARCH QEMU_ARCH_XTENSA | |
4f23a1e6 GX |
107 | #elif defined(TARGET_UNICORE32) |
108 | #define QEMU_ARCH QEMU_ARCH_UNICORE32 | |
48e06fe0 BK |
109 | #elif defined(TARGET_TRICORE) |
110 | #define QEMU_ARCH QEMU_ARCH_TRICORE | |
ad96090a BS |
111 | #endif |
112 | ||
113 | const uint32_t arch_type = QEMU_ARCH; | |
7ca1dfad CV |
114 | static bool mig_throttle_on; |
115 | static int dirty_rate_high_cnt; | |
116 | static void check_guest_throttling(void); | |
ad96090a | 117 | |
71411d35 C |
118 | static uint64_t bitmap_sync_count; |
119 | ||
ad96090a BS |
120 | /***********************************************************/ |
121 | /* ram save/restore */ | |
122 | ||
d20878d2 YT |
123 | #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */ |
124 | #define RAM_SAVE_FLAG_COMPRESS 0x02 | |
125 | #define RAM_SAVE_FLAG_MEM_SIZE 0x04 | |
126 | #define RAM_SAVE_FLAG_PAGE 0x08 | |
127 | #define RAM_SAVE_FLAG_EOS 0x10 | |
128 | #define RAM_SAVE_FLAG_CONTINUE 0x20 | |
17ad9b35 | 129 | #define RAM_SAVE_FLAG_XBZRLE 0x40 |
0033b8b4 | 130 | /* 0x80 is reserved in migration.h start with 0x100 next */ |
3fcb38c2 | 131 | #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100 |
ad96090a | 132 | |
756557de EH |
133 | static struct defconfig_file { |
134 | const char *filename; | |
f29a5614 EH |
135 | /* Indicates it is an user config file (disabled by -no-user-config) */ |
136 | bool userconfig; | |
756557de | 137 | } default_config_files[] = { |
f29a5614 | 138 | { CONFIG_QEMU_CONFDIR "/qemu.conf", true }, |
2e59915d | 139 | { CONFIG_QEMU_CONFDIR "/target-" TARGET_NAME ".conf", true }, |
756557de EH |
140 | { NULL }, /* end of list */ |
141 | }; | |
142 | ||
6d3cb1f9 | 143 | static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE]; |
756557de | 144 | |
f29a5614 | 145 | int qemu_read_default_config_files(bool userconfig) |
b5a8fe5e EH |
146 | { |
147 | int ret; | |
756557de | 148 | struct defconfig_file *f; |
b5a8fe5e | 149 | |
756557de | 150 | for (f = default_config_files; f->filename; f++) { |
f29a5614 EH |
151 | if (!userconfig && f->userconfig) { |
152 | continue; | |
153 | } | |
756557de EH |
154 | ret = qemu_read_config_file(f->filename); |
155 | if (ret < 0 && ret != -ENOENT) { | |
156 | return ret; | |
157 | } | |
b5a8fe5e | 158 | } |
4d8b3c63 | 159 | |
b5a8fe5e EH |
160 | return 0; |
161 | } | |
162 | ||
dc3c26a4 | 163 | static inline bool is_zero_range(uint8_t *p, uint64_t size) |
ad96090a | 164 | { |
dc3c26a4 | 165 | return buffer_find_nonzero_offset(p, size) == size; |
ad96090a BS |
166 | } |
167 | ||
17ad9b35 OW |
168 | /* struct contains XBZRLE cache and a static page |
169 | used by the compression */ | |
170 | static struct { | |
171 | /* buffer used for XBZRLE encoding */ | |
172 | uint8_t *encoded_buf; | |
173 | /* buffer for storing page content */ | |
174 | uint8_t *current_buf; | |
fd8cec93 | 175 | /* Cache for XBZRLE, Protected by lock. */ |
17ad9b35 | 176 | PageCache *cache; |
fd8cec93 | 177 | QemuMutex lock; |
d97326ee DDAG |
178 | } XBZRLE; |
179 | ||
905f26f2 GA |
180 | /* buffer used for XBZRLE decoding */ |
181 | static uint8_t *xbzrle_decoded_buf; | |
9e1ba4cc | 182 | |
fd8cec93 GA |
183 | static void XBZRLE_cache_lock(void) |
184 | { | |
185 | if (migrate_use_xbzrle()) | |
186 | qemu_mutex_lock(&XBZRLE.lock); | |
187 | } | |
188 | ||
189 | static void XBZRLE_cache_unlock(void) | |
190 | { | |
191 | if (migrate_use_xbzrle()) | |
192 | qemu_mutex_unlock(&XBZRLE.lock); | |
193 | } | |
194 | ||
d97326ee DDAG |
195 | /* |
196 | * called from qmp_migrate_set_cache_size in main thread, possibly while | |
197 | * a migration is in progress. | |
198 | * A running migration maybe using the cache and might finish during this | |
199 | * call, hence changes to the cache are protected by XBZRLE.lock(). | |
200 | */ | |
9e1ba4cc OW |
201 | int64_t xbzrle_cache_resize(int64_t new_size) |
202 | { | |
d97326ee DDAG |
203 | PageCache *new_cache; |
204 | int64_t ret; | |
fd8cec93 | 205 | |
c91e681a OW |
206 | if (new_size < TARGET_PAGE_SIZE) { |
207 | return -1; | |
208 | } | |
209 | ||
d97326ee DDAG |
210 | XBZRLE_cache_lock(); |
211 | ||
9e1ba4cc | 212 | if (XBZRLE.cache != NULL) { |
fd8cec93 | 213 | if (pow2floor(new_size) == migrate_xbzrle_cache_size()) { |
d97326ee | 214 | goto out_new_size; |
fd8cec93 GA |
215 | } |
216 | new_cache = cache_init(new_size / TARGET_PAGE_SIZE, | |
217 | TARGET_PAGE_SIZE); | |
218 | if (!new_cache) { | |
d97326ee DDAG |
219 | error_report("Error creating cache"); |
220 | ret = -1; | |
221 | goto out; | |
fd8cec93 | 222 | } |
fd8cec93 | 223 | |
d97326ee DDAG |
224 | cache_fini(XBZRLE.cache); |
225 | XBZRLE.cache = new_cache; | |
9e1ba4cc | 226 | } |
fd8cec93 | 227 | |
d97326ee DDAG |
228 | out_new_size: |
229 | ret = pow2floor(new_size); | |
230 | out: | |
231 | XBZRLE_cache_unlock(); | |
232 | return ret; | |
9e1ba4cc OW |
233 | } |
234 | ||
004d4c10 OW |
235 | /* accounting for migration statistics */ |
236 | typedef struct AccountingInfo { | |
237 | uint64_t dup_pages; | |
f1c72795 | 238 | uint64_t skipped_pages; |
004d4c10 OW |
239 | uint64_t norm_pages; |
240 | uint64_t iterations; | |
f36d55af OW |
241 | uint64_t xbzrle_bytes; |
242 | uint64_t xbzrle_pages; | |
243 | uint64_t xbzrle_cache_miss; | |
8bc39233 | 244 | double xbzrle_cache_miss_rate; |
f36d55af | 245 | uint64_t xbzrle_overflows; |
004d4c10 OW |
246 | } AccountingInfo; |
247 | ||
248 | static AccountingInfo acct_info; | |
249 | ||
250 | static void acct_clear(void) | |
251 | { | |
252 | memset(&acct_info, 0, sizeof(acct_info)); | |
253 | } | |
254 | ||
255 | uint64_t dup_mig_bytes_transferred(void) | |
256 | { | |
257 | return acct_info.dup_pages * TARGET_PAGE_SIZE; | |
258 | } | |
259 | ||
260 | uint64_t dup_mig_pages_transferred(void) | |
261 | { | |
262 | return acct_info.dup_pages; | |
263 | } | |
264 | ||
f1c72795 PL |
265 | uint64_t skipped_mig_bytes_transferred(void) |
266 | { | |
267 | return acct_info.skipped_pages * TARGET_PAGE_SIZE; | |
268 | } | |
269 | ||
270 | uint64_t skipped_mig_pages_transferred(void) | |
271 | { | |
272 | return acct_info.skipped_pages; | |
273 | } | |
274 | ||
004d4c10 OW |
275 | uint64_t norm_mig_bytes_transferred(void) |
276 | { | |
277 | return acct_info.norm_pages * TARGET_PAGE_SIZE; | |
278 | } | |
279 | ||
280 | uint64_t norm_mig_pages_transferred(void) | |
281 | { | |
282 | return acct_info.norm_pages; | |
283 | } | |
284 | ||
f36d55af OW |
285 | uint64_t xbzrle_mig_bytes_transferred(void) |
286 | { | |
287 | return acct_info.xbzrle_bytes; | |
288 | } | |
289 | ||
290 | uint64_t xbzrle_mig_pages_transferred(void) | |
291 | { | |
292 | return acct_info.xbzrle_pages; | |
293 | } | |
294 | ||
295 | uint64_t xbzrle_mig_pages_cache_miss(void) | |
296 | { | |
297 | return acct_info.xbzrle_cache_miss; | |
298 | } | |
299 | ||
8bc39233 C |
300 | double xbzrle_mig_cache_miss_rate(void) |
301 | { | |
302 | return acct_info.xbzrle_cache_miss_rate; | |
303 | } | |
304 | ||
f36d55af OW |
305 | uint64_t xbzrle_mig_pages_overflow(void) |
306 | { | |
307 | return acct_info.xbzrle_overflows; | |
308 | } | |
309 | ||
73bab2fc JQ |
310 | /* This is the last block that we have visited serching for dirty pages |
311 | */ | |
312 | static RAMBlock *last_seen_block; | |
313 | /* This is the last block from where we have sent data */ | |
314 | static RAMBlock *last_sent_block; | |
315 | static ram_addr_t last_offset; | |
316 | static unsigned long *migration_bitmap; | |
317 | static uint64_t migration_dirty_pages; | |
318 | static uint32_t last_version; | |
319 | static bool ram_bulk_stage; | |
320 | ||
8706d2d5 | 321 | struct CompressParam { |
474ddaf6 LL |
322 | bool start; |
323 | bool done; | |
324 | QEMUFile *file; | |
325 | QemuMutex mutex; | |
326 | QemuCond cond; | |
327 | RAMBlock *block; | |
328 | ram_addr_t offset; | |
8706d2d5 LL |
329 | }; |
330 | typedef struct CompressParam CompressParam; | |
331 | ||
3fcb38c2 LL |
332 | struct DecompressParam { |
333 | /* To be done */ | |
334 | }; | |
335 | typedef struct DecompressParam DecompressParam; | |
336 | ||
8706d2d5 LL |
337 | static CompressParam *comp_param; |
338 | static QemuThread *compress_threads; | |
474ddaf6 LL |
339 | /* comp_done_cond is used to wake up the migration thread when |
340 | * one of the compression threads has finished the compression. | |
341 | * comp_done_lock is used to co-work with comp_done_cond. | |
342 | */ | |
343 | static QemuMutex *comp_done_lock; | |
344 | static QemuCond *comp_done_cond; | |
345 | /* The empty QEMUFileOps will be used by file in CompressParam */ | |
346 | static const QEMUFileOps empty_ops = { }; | |
8706d2d5 | 347 | static bool quit_comp_thread; |
3fcb38c2 LL |
348 | static bool quit_decomp_thread; |
349 | static DecompressParam *decomp_param; | |
350 | static QemuThread *decompress_threads; | |
351 | static uint8_t *compressed_data_buf; | |
8706d2d5 LL |
352 | |
353 | static void *do_data_compress(void *opaque) | |
354 | { | |
355 | while (!quit_comp_thread) { | |
356 | ||
357 | /* To be done */ | |
358 | ||
359 | } | |
360 | ||
361 | return NULL; | |
362 | } | |
363 | ||
364 | static inline void terminate_compression_threads(void) | |
365 | { | |
366 | quit_comp_thread = true; | |
367 | ||
368 | /* To be done */ | |
369 | } | |
370 | ||
371 | void migrate_compress_threads_join(void) | |
372 | { | |
373 | int i, thread_count; | |
374 | ||
375 | if (!migrate_use_compression()) { | |
376 | return; | |
377 | } | |
378 | terminate_compression_threads(); | |
379 | thread_count = migrate_compress_threads(); | |
380 | for (i = 0; i < thread_count; i++) { | |
381 | qemu_thread_join(compress_threads + i); | |
474ddaf6 LL |
382 | qemu_fclose(comp_param[i].file); |
383 | qemu_mutex_destroy(&comp_param[i].mutex); | |
384 | qemu_cond_destroy(&comp_param[i].cond); | |
8706d2d5 | 385 | } |
474ddaf6 LL |
386 | qemu_mutex_destroy(comp_done_lock); |
387 | qemu_cond_destroy(comp_done_cond); | |
8706d2d5 LL |
388 | g_free(compress_threads); |
389 | g_free(comp_param); | |
474ddaf6 LL |
390 | g_free(comp_done_cond); |
391 | g_free(comp_done_lock); | |
8706d2d5 LL |
392 | compress_threads = NULL; |
393 | comp_param = NULL; | |
474ddaf6 LL |
394 | comp_done_cond = NULL; |
395 | comp_done_lock = NULL; | |
8706d2d5 LL |
396 | } |
397 | ||
398 | void migrate_compress_threads_create(void) | |
399 | { | |
400 | int i, thread_count; | |
401 | ||
402 | if (!migrate_use_compression()) { | |
403 | return; | |
404 | } | |
405 | quit_comp_thread = false; | |
406 | thread_count = migrate_compress_threads(); | |
407 | compress_threads = g_new0(QemuThread, thread_count); | |
408 | comp_param = g_new0(CompressParam, thread_count); | |
474ddaf6 LL |
409 | comp_done_cond = g_new0(QemuCond, 1); |
410 | comp_done_lock = g_new0(QemuMutex, 1); | |
411 | qemu_cond_init(comp_done_cond); | |
412 | qemu_mutex_init(comp_done_lock); | |
8706d2d5 | 413 | for (i = 0; i < thread_count; i++) { |
474ddaf6 LL |
414 | /* com_param[i].file is just used as a dummy buffer to save data, set |
415 | * it's ops to empty. | |
416 | */ | |
417 | comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops); | |
418 | qemu_mutex_init(&comp_param[i].mutex); | |
419 | qemu_cond_init(&comp_param[i].cond); | |
8706d2d5 LL |
420 | qemu_thread_create(compress_threads + i, "compress", |
421 | do_data_compress, comp_param + i, | |
422 | QEMU_THREAD_JOINABLE); | |
423 | } | |
424 | } | |
425 | ||
f6f14c58 JQ |
426 | /** |
427 | * save_page_header: Write page header to wire | |
428 | * | |
429 | * If this is the 1st block, it also writes the block identification | |
430 | * | |
431 | * Returns: Number of bytes written | |
432 | * | |
433 | * @f: QEMUFile where to send the data | |
434 | * @block: block that contains the page we want to send | |
435 | * @offset: offset inside the block for the page | |
436 | * in the lower bits, it contains flags | |
437 | */ | |
438 | static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset) | |
0c51f43d | 439 | { |
3f7d7b09 JQ |
440 | size_t size; |
441 | ||
f6f14c58 | 442 | qemu_put_be64(f, offset); |
3f7d7b09 | 443 | size = 8; |
0c51f43d | 444 | |
43edc0ed | 445 | if (!(offset & RAM_SAVE_FLAG_CONTINUE)) { |
3f7d7b09 JQ |
446 | qemu_put_byte(f, strlen(block->idstr)); |
447 | qemu_put_buffer(f, (uint8_t *)block->idstr, | |
448 | strlen(block->idstr)); | |
449 | size += 1 + strlen(block->idstr); | |
450 | } | |
451 | return size; | |
0c51f43d OW |
452 | } |
453 | ||
6d3cb1f9 DDAG |
454 | /* Update the xbzrle cache to reflect a page that's been sent as all 0. |
455 | * The important thing is that a stale (not-yet-0'd) page be replaced | |
456 | * by the new data. | |
457 | * As a bonus, if the page wasn't in the cache it gets added so that | |
458 | * when a small write is made into the 0'd page it gets XBZRLE sent | |
459 | */ | |
460 | static void xbzrle_cache_zero_page(ram_addr_t current_addr) | |
461 | { | |
462 | if (ram_bulk_stage || !migrate_use_xbzrle()) { | |
463 | return; | |
464 | } | |
465 | ||
466 | /* We don't care if this fails to allocate a new cache page | |
467 | * as long as it updated an old one */ | |
27af7d6e C |
468 | cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE, |
469 | bitmap_sync_count); | |
6d3cb1f9 DDAG |
470 | } |
471 | ||
17ad9b35 OW |
472 | #define ENCODING_FLAG_XBZRLE 0x1 |
473 | ||
f4be0f75 JQ |
474 | /** |
475 | * save_xbzrle_page: compress and send current page | |
476 | * | |
477 | * Returns: 1 means that we wrote the page | |
478 | * 0 means that page is identical to the one already sent | |
479 | * -1 means that xbzrle would be longer than normal | |
480 | * | |
481 | * @f: QEMUFile where to send the data | |
482 | * @current_data: | |
483 | * @current_addr: | |
484 | * @block: block that contains the page we want to send | |
485 | * @offset: offset inside the block for the page | |
486 | * @last_stage: if we are at the completion stage | |
487 | * @bytes_transferred: increase it with the number of transferred bytes | |
488 | */ | |
1534ee93 | 489 | static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data, |
17ad9b35 | 490 | ram_addr_t current_addr, RAMBlock *block, |
73bab2fc | 491 | ram_addr_t offset, bool last_stage, |
f4be0f75 | 492 | uint64_t *bytes_transferred) |
17ad9b35 | 493 | { |
f4be0f75 | 494 | int encoded_len = 0, bytes_xbzrle; |
17ad9b35 OW |
495 | uint8_t *prev_cached_page; |
496 | ||
27af7d6e | 497 | if (!cache_is_cached(XBZRLE.cache, current_addr, bitmap_sync_count)) { |
1534ee93 | 498 | acct_info.xbzrle_cache_miss++; |
dd051c72 | 499 | if (!last_stage) { |
27af7d6e C |
500 | if (cache_insert(XBZRLE.cache, current_addr, *current_data, |
501 | bitmap_sync_count) == -1) { | |
89db9987 | 502 | return -1; |
1534ee93 C |
503 | } else { |
504 | /* update *current_data when the page has been | |
505 | inserted into cache */ | |
506 | *current_data = get_cached_data(XBZRLE.cache, current_addr); | |
89db9987 | 507 | } |
dd051c72 | 508 | } |
17ad9b35 OW |
509 | return -1; |
510 | } | |
511 | ||
512 | prev_cached_page = get_cached_data(XBZRLE.cache, current_addr); | |
513 | ||
514 | /* save current buffer into memory */ | |
1534ee93 | 515 | memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE); |
17ad9b35 OW |
516 | |
517 | /* XBZRLE encoding (if there is no overflow) */ | |
518 | encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf, | |
519 | TARGET_PAGE_SIZE, XBZRLE.encoded_buf, | |
520 | TARGET_PAGE_SIZE); | |
521 | if (encoded_len == 0) { | |
522 | DPRINTF("Skipping unmodified page\n"); | |
523 | return 0; | |
524 | } else if (encoded_len == -1) { | |
525 | DPRINTF("Overflow\n"); | |
f36d55af | 526 | acct_info.xbzrle_overflows++; |
17ad9b35 | 527 | /* update data in the cache */ |
1534ee93 C |
528 | if (!last_stage) { |
529 | memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE); | |
530 | *current_data = prev_cached_page; | |
531 | } | |
17ad9b35 OW |
532 | return -1; |
533 | } | |
534 | ||
535 | /* we need to update the data in the cache, in order to get the same data */ | |
dd051c72 JQ |
536 | if (!last_stage) { |
537 | memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE); | |
538 | } | |
17ad9b35 OW |
539 | |
540 | /* Send XBZRLE based compressed page */ | |
f6f14c58 | 541 | bytes_xbzrle = save_page_header(f, block, offset | RAM_SAVE_FLAG_XBZRLE); |
17ad9b35 OW |
542 | qemu_put_byte(f, ENCODING_FLAG_XBZRLE); |
543 | qemu_put_be16(f, encoded_len); | |
544 | qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len); | |
f4be0f75 | 545 | bytes_xbzrle += encoded_len + 1 + 2; |
f36d55af | 546 | acct_info.xbzrle_pages++; |
f4be0f75 JQ |
547 | acct_info.xbzrle_bytes += bytes_xbzrle; |
548 | *bytes_transferred += bytes_xbzrle; | |
17ad9b35 | 549 | |
f4be0f75 | 550 | return 1; |
17ad9b35 OW |
551 | } |
552 | ||
4c8ae0f6 JQ |
553 | static inline |
554 | ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr, | |
555 | ram_addr_t start) | |
69268cde | 556 | { |
4c8ae0f6 JQ |
557 | unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS; |
558 | unsigned long nr = base + (start >> TARGET_PAGE_BITS); | |
0851c9f7 MT |
559 | uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr)); |
560 | unsigned long size = base + (mr_size >> TARGET_PAGE_BITS); | |
c6bf8e0e | 561 | |
70c8652b PL |
562 | unsigned long next; |
563 | ||
564 | if (ram_bulk_stage && nr > base) { | |
565 | next = nr + 1; | |
566 | } else { | |
567 | next = find_next_bit(migration_bitmap, size, nr); | |
568 | } | |
69268cde | 569 | |
4c8ae0f6 JQ |
570 | if (next < size) { |
571 | clear_bit(next, migration_bitmap); | |
c6bf8e0e | 572 | migration_dirty_pages--; |
69268cde | 573 | } |
4c8ae0f6 | 574 | return (next - base) << TARGET_PAGE_BITS; |
69268cde JQ |
575 | } |
576 | ||
791fa2a2 | 577 | static inline bool migration_bitmap_set_dirty(ram_addr_t addr) |
e44d26c8 | 578 | { |
c6bf8e0e | 579 | bool ret; |
791fa2a2 | 580 | int nr = addr >> TARGET_PAGE_BITS; |
e44d26c8 | 581 | |
c6bf8e0e JQ |
582 | ret = test_and_set_bit(nr, migration_bitmap); |
583 | ||
584 | if (!ret) { | |
585 | migration_dirty_pages++; | |
e44d26c8 | 586 | } |
c6bf8e0e | 587 | return ret; |
e44d26c8 JQ |
588 | } |
589 | ||
791fa2a2 JQ |
590 | static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length) |
591 | { | |
592 | ram_addr_t addr; | |
aa8dc044 JQ |
593 | unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); |
594 | ||
595 | /* start address is aligned at the start of a word? */ | |
596 | if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) { | |
597 | int k; | |
598 | int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS); | |
599 | unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]; | |
600 | ||
601 | for (k = page; k < page + nr; k++) { | |
602 | if (src[k]) { | |
603 | unsigned long new_dirty; | |
604 | new_dirty = ~migration_bitmap[k]; | |
605 | migration_bitmap[k] |= src[k]; | |
606 | new_dirty &= src[k]; | |
607 | migration_dirty_pages += ctpopl(new_dirty); | |
608 | src[k] = 0; | |
609 | } | |
610 | } | |
611 | } else { | |
612 | for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) { | |
613 | if (cpu_physical_memory_get_dirty(start + addr, | |
614 | TARGET_PAGE_SIZE, | |
615 | DIRTY_MEMORY_MIGRATION)) { | |
616 | cpu_physical_memory_reset_dirty(start + addr, | |
617 | TARGET_PAGE_SIZE, | |
618 | DIRTY_MEMORY_MIGRATION); | |
619 | migration_bitmap_set_dirty(start + addr); | |
620 | } | |
791fa2a2 JQ |
621 | } |
622 | } | |
623 | } | |
624 | ||
625 | ||
6c1b663c C |
626 | /* Fix me: there are too many global variables used in migration process. */ |
627 | static int64_t start_time; | |
628 | static int64_t bytes_xfer_prev; | |
629 | static int64_t num_dirty_pages_period; | |
630 | ||
631 | static void migration_bitmap_sync_init(void) | |
632 | { | |
633 | start_time = 0; | |
634 | bytes_xfer_prev = 0; | |
635 | num_dirty_pages_period = 0; | |
636 | } | |
32c835ba | 637 | |
ae3a7047 | 638 | /* Called with iothread lock held, to protect ram_list.dirty_memory[] */ |
dd2df737 JQ |
639 | static void migration_bitmap_sync(void) |
640 | { | |
c6bf8e0e | 641 | RAMBlock *block; |
c6bf8e0e | 642 | uint64_t num_dirty_pages_init = migration_dirty_pages; |
8d017193 | 643 | MigrationState *s = migrate_get_current(); |
8d017193 | 644 | int64_t end_time; |
7ca1dfad | 645 | int64_t bytes_xfer_now; |
8bc39233 C |
646 | static uint64_t xbzrle_cache_miss_prev; |
647 | static uint64_t iterations_prev; | |
7ca1dfad | 648 | |
71411d35 C |
649 | bitmap_sync_count++; |
650 | ||
7ca1dfad CV |
651 | if (!bytes_xfer_prev) { |
652 | bytes_xfer_prev = ram_bytes_transferred(); | |
653 | } | |
8d017193 JQ |
654 | |
655 | if (!start_time) { | |
bc72ad67 | 656 | start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); |
8d017193 | 657 | } |
3c12193d JQ |
658 | |
659 | trace_migration_bitmap_sync_start(); | |
1d671369 | 660 | address_space_sync_dirty_bitmap(&address_space_memory); |
c6bf8e0e | 661 | |
0dc3f44a MD |
662 | rcu_read_lock(); |
663 | QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { | |
9b8424d5 | 664 | migration_bitmap_sync_range(block->mr->ram_addr, block->used_length); |
c6bf8e0e | 665 | } |
0dc3f44a MD |
666 | rcu_read_unlock(); |
667 | ||
c6bf8e0e | 668 | trace_migration_bitmap_sync_end(migration_dirty_pages |
3c12193d | 669 | - num_dirty_pages_init); |
8d017193 | 670 | num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init; |
bc72ad67 | 671 | end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); |
8d017193 JQ |
672 | |
673 | /* more than 1 second = 1000 millisecons */ | |
674 | if (end_time > start_time + 1000) { | |
7ca1dfad CV |
675 | if (migrate_auto_converge()) { |
676 | /* The following detection logic can be refined later. For now: | |
677 | Check to see if the dirtied bytes is 50% more than the approx. | |
678 | amount of bytes that just got transferred since the last time we | |
679 | were in this routine. If that happens >N times (for now N==4) | |
680 | we turn on the throttle down logic */ | |
681 | bytes_xfer_now = ram_bytes_transferred(); | |
682 | if (s->dirty_pages_rate && | |
683 | (num_dirty_pages_period * TARGET_PAGE_SIZE > | |
684 | (bytes_xfer_now - bytes_xfer_prev)/2) && | |
685 | (dirty_rate_high_cnt++ > 4)) { | |
686 | trace_migration_throttle(); | |
687 | mig_throttle_on = true; | |
688 | dirty_rate_high_cnt = 0; | |
689 | } | |
690 | bytes_xfer_prev = bytes_xfer_now; | |
691 | } else { | |
692 | mig_throttle_on = false; | |
693 | } | |
8bc39233 C |
694 | if (migrate_use_xbzrle()) { |
695 | if (iterations_prev != 0) { | |
696 | acct_info.xbzrle_cache_miss_rate = | |
697 | (double)(acct_info.xbzrle_cache_miss - | |
698 | xbzrle_cache_miss_prev) / | |
699 | (acct_info.iterations - iterations_prev); | |
700 | } | |
701 | iterations_prev = acct_info.iterations; | |
702 | xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss; | |
703 | } | |
8d017193 JQ |
704 | s->dirty_pages_rate = num_dirty_pages_period * 1000 |
705 | / (end_time - start_time); | |
90f8ae72 | 706 | s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE; |
8d017193 JQ |
707 | start_time = end_time; |
708 | num_dirty_pages_period = 0; | |
58570ed8 | 709 | s->dirty_sync_count = bitmap_sync_count; |
8d017193 | 710 | } |
dd2df737 JQ |
711 | } |
712 | ||
87cf878b | 713 | /** |
14bcfdc7 DDAG |
714 | * ram_save_page: Send the given page to the stream |
715 | * | |
87cf878b JQ |
716 | * Returns: Number of pages written. |
717 | * | |
718 | * @f: QEMUFile where to send the data | |
719 | * @block: block that contains the page we want to send | |
720 | * @offset: offset inside the block for the page | |
721 | * @last_stage: if we are at the completion stage | |
722 | * @bytes_transferred: increase it with the number of transferred bytes | |
14bcfdc7 DDAG |
723 | */ |
724 | static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset, | |
87cf878b | 725 | bool last_stage, uint64_t *bytes_transferred) |
14bcfdc7 | 726 | { |
87cf878b | 727 | int pages = -1; |
6e1dea46 | 728 | uint64_t bytes_xmit; |
14bcfdc7 DDAG |
729 | ram_addr_t current_addr; |
730 | MemoryRegion *mr = block->mr; | |
731 | uint8_t *p; | |
732 | int ret; | |
733 | bool send_async = true; | |
734 | ||
14bcfdc7 DDAG |
735 | p = memory_region_get_ram_ptr(mr) + offset; |
736 | ||
737 | /* In doubt sent page as normal */ | |
6e1dea46 | 738 | bytes_xmit = 0; |
14bcfdc7 | 739 | ret = ram_control_save_page(f, block->offset, |
6e1dea46 JQ |
740 | offset, TARGET_PAGE_SIZE, &bytes_xmit); |
741 | if (bytes_xmit) { | |
87cf878b JQ |
742 | *bytes_transferred += bytes_xmit; |
743 | pages = 1; | |
6e1dea46 | 744 | } |
14bcfdc7 DDAG |
745 | |
746 | XBZRLE_cache_lock(); | |
747 | ||
748 | current_addr = block->offset + offset; | |
43edc0ed JQ |
749 | |
750 | if (block == last_sent_block) { | |
751 | offset |= RAM_SAVE_FLAG_CONTINUE; | |
752 | } | |
14bcfdc7 DDAG |
753 | if (ret != RAM_SAVE_CONTROL_NOT_SUPP) { |
754 | if (ret != RAM_SAVE_CONTROL_DELAYED) { | |
6e1dea46 | 755 | if (bytes_xmit > 0) { |
14bcfdc7 | 756 | acct_info.norm_pages++; |
6e1dea46 | 757 | } else if (bytes_xmit == 0) { |
14bcfdc7 DDAG |
758 | acct_info.dup_pages++; |
759 | } | |
760 | } | |
761 | } else if (is_zero_range(p, TARGET_PAGE_SIZE)) { | |
762 | acct_info.dup_pages++; | |
f6f14c58 JQ |
763 | *bytes_transferred += save_page_header(f, block, |
764 | offset | RAM_SAVE_FLAG_COMPRESS); | |
14bcfdc7 | 765 | qemu_put_byte(f, 0); |
87cf878b JQ |
766 | *bytes_transferred += 1; |
767 | pages = 1; | |
14bcfdc7 DDAG |
768 | /* Must let xbzrle know, otherwise a previous (now 0'd) cached |
769 | * page would be stale | |
770 | */ | |
771 | xbzrle_cache_zero_page(current_addr); | |
772 | } else if (!ram_bulk_stage && migrate_use_xbzrle()) { | |
f4be0f75 | 773 | pages = save_xbzrle_page(f, &p, current_addr, block, |
73bab2fc | 774 | offset, last_stage, bytes_transferred); |
14bcfdc7 DDAG |
775 | if (!last_stage) { |
776 | /* Can't send this cached data async, since the cache page | |
777 | * might get updated before it gets to the wire | |
778 | */ | |
779 | send_async = false; | |
780 | } | |
781 | } | |
782 | ||
783 | /* XBZRLE overflow or normal page */ | |
87cf878b | 784 | if (pages == -1) { |
f6f14c58 JQ |
785 | *bytes_transferred += save_page_header(f, block, |
786 | offset | RAM_SAVE_FLAG_PAGE); | |
14bcfdc7 DDAG |
787 | if (send_async) { |
788 | qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE); | |
789 | } else { | |
790 | qemu_put_buffer(f, p, TARGET_PAGE_SIZE); | |
791 | } | |
87cf878b JQ |
792 | *bytes_transferred += TARGET_PAGE_SIZE; |
793 | pages = 1; | |
14bcfdc7 DDAG |
794 | acct_info.norm_pages++; |
795 | } | |
796 | ||
797 | XBZRLE_cache_unlock(); | |
798 | ||
87cf878b | 799 | return pages; |
14bcfdc7 DDAG |
800 | } |
801 | ||
8706d2d5 LL |
802 | /** |
803 | * ram_save_compressed_page: compress the given page and send it to the stream | |
804 | * | |
805 | * Returns: Number of pages written. | |
806 | * | |
807 | * @f: QEMUFile where to send the data | |
808 | * @block: block that contains the page we want to send | |
809 | * @offset: offset inside the block for the page | |
810 | * @last_stage: if we are at the completion stage | |
811 | * @bytes_transferred: increase it with the number of transferred bytes | |
812 | */ | |
813 | static int ram_save_compressed_page(QEMUFile *f, RAMBlock *block, | |
814 | ram_addr_t offset, bool last_stage, | |
815 | uint64_t *bytes_transferred) | |
816 | { | |
817 | int pages = -1; | |
818 | ||
819 | /* To be done*/ | |
820 | ||
821 | return pages; | |
822 | } | |
823 | ||
0fcd8d31 JQ |
824 | /** |
825 | * ram_find_and_save_block: Finds a dirty page and sends it to f | |
6c779f22 | 826 | * |
0dc3f44a MD |
827 | * Called within an RCU critical section. |
828 | * | |
0fcd8d31 | 829 | * Returns: The number of pages written |
b823ceaa | 830 | * 0 means no dirty pages |
0fcd8d31 JQ |
831 | * |
832 | * @f: QEMUFile where to send the data | |
833 | * @last_stage: if we are at the completion stage | |
834 | * @bytes_transferred: increase it with the number of transferred bytes | |
6c779f22 OW |
835 | */ |
836 | ||
0fcd8d31 JQ |
837 | static int ram_find_and_save_block(QEMUFile *f, bool last_stage, |
838 | uint64_t *bytes_transferred) | |
ad96090a | 839 | { |
b23a9a5c | 840 | RAMBlock *block = last_seen_block; |
e44359c3 | 841 | ram_addr_t offset = last_offset; |
4c8ae0f6 | 842 | bool complete_round = false; |
87cf878b | 843 | int pages = 0; |
71c510e2 | 844 | MemoryRegion *mr; |
ad96090a | 845 | |
e44359c3 | 846 | if (!block) |
0dc3f44a | 847 | block = QLIST_FIRST_RCU(&ram_list.blocks); |
e44359c3 | 848 | |
4c8ae0f6 | 849 | while (true) { |
71c510e2 | 850 | mr = block->mr; |
4c8ae0f6 JQ |
851 | offset = migration_bitmap_find_and_reset_dirty(mr, offset); |
852 | if (complete_round && block == last_seen_block && | |
853 | offset >= last_offset) { | |
854 | break; | |
855 | } | |
9b8424d5 | 856 | if (offset >= block->used_length) { |
4c8ae0f6 | 857 | offset = 0; |
0dc3f44a | 858 | block = QLIST_NEXT_RCU(block, next); |
4c8ae0f6 | 859 | if (!block) { |
0dc3f44a | 860 | block = QLIST_FIRST_RCU(&ram_list.blocks); |
4c8ae0f6 | 861 | complete_round = true; |
78d07ae7 | 862 | ram_bulk_stage = false; |
4c8ae0f6 JQ |
863 | } |
864 | } else { | |
8706d2d5 LL |
865 | if (migrate_use_compression()) { |
866 | pages = ram_save_compressed_page(f, block, offset, last_stage, | |
867 | bytes_transferred); | |
868 | } else { | |
869 | pages = ram_save_page(f, block, offset, last_stage, | |
870 | bytes_transferred); | |
871 | } | |
17ad9b35 | 872 | |
17ad9b35 | 873 | /* if page is unmodified, continue to the next */ |
87cf878b | 874 | if (pages > 0) { |
43edc0ed | 875 | last_sent_block = block; |
17ad9b35 OW |
876 | break; |
877 | } | |
ad96090a | 878 | } |
4c8ae0f6 | 879 | } |
ae3a7047 | 880 | |
b23a9a5c | 881 | last_seen_block = block; |
e44359c3 | 882 | last_offset = offset; |
0fcd8d31 | 883 | |
87cf878b | 884 | return pages; |
ad96090a BS |
885 | } |
886 | ||
887 | static uint64_t bytes_transferred; | |
888 | ||
2b0ce079 MH |
889 | void acct_update_position(QEMUFile *f, size_t size, bool zero) |
890 | { | |
891 | uint64_t pages = size / TARGET_PAGE_SIZE; | |
892 | if (zero) { | |
893 | acct_info.dup_pages += pages; | |
894 | } else { | |
895 | acct_info.norm_pages += pages; | |
896 | bytes_transferred += size; | |
897 | qemu_update_position(f, size); | |
898 | } | |
899 | } | |
900 | ||
ad96090a BS |
901 | static ram_addr_t ram_save_remaining(void) |
902 | { | |
c6bf8e0e | 903 | return migration_dirty_pages; |
ad96090a BS |
904 | } |
905 | ||
906 | uint64_t ram_bytes_remaining(void) | |
907 | { | |
908 | return ram_save_remaining() * TARGET_PAGE_SIZE; | |
909 | } | |
910 | ||
911 | uint64_t ram_bytes_transferred(void) | |
912 | { | |
913 | return bytes_transferred; | |
914 | } | |
915 | ||
916 | uint64_t ram_bytes_total(void) | |
917 | { | |
d17b5288 AW |
918 | RAMBlock *block; |
919 | uint64_t total = 0; | |
920 | ||
0dc3f44a MD |
921 | rcu_read_lock(); |
922 | QLIST_FOREACH_RCU(block, &ram_list.blocks, next) | |
9b8424d5 | 923 | total += block->used_length; |
0dc3f44a | 924 | rcu_read_unlock(); |
d17b5288 | 925 | return total; |
ad96090a BS |
926 | } |
927 | ||
905f26f2 GA |
928 | void free_xbzrle_decoded_buf(void) |
929 | { | |
930 | g_free(xbzrle_decoded_buf); | |
931 | xbzrle_decoded_buf = NULL; | |
932 | } | |
933 | ||
8e21cd32 OW |
934 | static void migration_end(void) |
935 | { | |
244eaa75 PB |
936 | if (migration_bitmap) { |
937 | memory_global_dirty_log_stop(); | |
938 | g_free(migration_bitmap); | |
939 | migration_bitmap = NULL; | |
940 | } | |
17ad9b35 | 941 | |
fd8cec93 | 942 | XBZRLE_cache_lock(); |
244eaa75 | 943 | if (XBZRLE.cache) { |
17ad9b35 | 944 | cache_fini(XBZRLE.cache); |
17ad9b35 OW |
945 | g_free(XBZRLE.encoded_buf); |
946 | g_free(XBZRLE.current_buf); | |
17ad9b35 | 947 | XBZRLE.cache = NULL; |
f6c6483b OW |
948 | XBZRLE.encoded_buf = NULL; |
949 | XBZRLE.current_buf = NULL; | |
17ad9b35 | 950 | } |
fd8cec93 | 951 | XBZRLE_cache_unlock(); |
8e21cd32 OW |
952 | } |
953 | ||
9b5bfab0 JQ |
954 | static void ram_migration_cancel(void *opaque) |
955 | { | |
956 | migration_end(); | |
957 | } | |
958 | ||
5a170775 JQ |
959 | static void reset_ram_globals(void) |
960 | { | |
b23a9a5c | 961 | last_seen_block = NULL; |
5f718a15 | 962 | last_sent_block = NULL; |
5a170775 | 963 | last_offset = 0; |
f798b07f | 964 | last_version = ram_list.version; |
78d07ae7 | 965 | ram_bulk_stage = true; |
5a170775 JQ |
966 | } |
967 | ||
4508bd9e JQ |
968 | #define MAX_WAIT 50 /* ms, half buffered_file limit */ |
969 | ||
0dc3f44a MD |
970 | |
971 | /* Each of ram_save_setup, ram_save_iterate and ram_save_complete has | |
972 | * long-running RCU critical section. When rcu-reclaims in the code | |
973 | * start to become numerous it will be necessary to reduce the | |
974 | * granularity of these critical sections. | |
975 | */ | |
976 | ||
d1315aac | 977 | static int ram_save_setup(QEMUFile *f, void *opaque) |
ad96090a | 978 | { |
d1315aac | 979 | RAMBlock *block; |
e30d1d8c | 980 | int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */ |
c6bf8e0e | 981 | |
7ca1dfad CV |
982 | mig_throttle_on = false; |
983 | dirty_rate_high_cnt = 0; | |
71411d35 | 984 | bitmap_sync_count = 0; |
6c1b663c | 985 | migration_bitmap_sync_init(); |
ad96090a | 986 | |
17ad9b35 | 987 | if (migrate_use_xbzrle()) { |
d97326ee | 988 | XBZRLE_cache_lock(); |
17ad9b35 OW |
989 | XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() / |
990 | TARGET_PAGE_SIZE, | |
991 | TARGET_PAGE_SIZE); | |
992 | if (!XBZRLE.cache) { | |
d97326ee DDAG |
993 | XBZRLE_cache_unlock(); |
994 | error_report("Error creating cache"); | |
17ad9b35 OW |
995 | return -1; |
996 | } | |
d97326ee | 997 | XBZRLE_cache_unlock(); |
a17b2fd3 OW |
998 | |
999 | /* We prefer not to abort if there is no memory */ | |
1000 | XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE); | |
1001 | if (!XBZRLE.encoded_buf) { | |
d97326ee | 1002 | error_report("Error allocating encoded_buf"); |
a17b2fd3 OW |
1003 | return -1; |
1004 | } | |
1005 | ||
1006 | XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE); | |
1007 | if (!XBZRLE.current_buf) { | |
d97326ee | 1008 | error_report("Error allocating current_buf"); |
a17b2fd3 OW |
1009 | g_free(XBZRLE.encoded_buf); |
1010 | XBZRLE.encoded_buf = NULL; | |
1011 | return -1; | |
1012 | } | |
1013 | ||
004d4c10 | 1014 | acct_clear(); |
17ad9b35 OW |
1015 | } |
1016 | ||
ae3a7047 | 1017 | /* iothread lock needed for ram_list.dirty_memory[] */ |
9b095037 PB |
1018 | qemu_mutex_lock_iothread(); |
1019 | qemu_mutex_lock_ramlist(); | |
0dc3f44a | 1020 | rcu_read_lock(); |
9b095037 PB |
1021 | bytes_transferred = 0; |
1022 | reset_ram_globals(); | |
1023 | ||
e30d1d8c DDAG |
1024 | ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS; |
1025 | migration_bitmap = bitmap_new(ram_bitmap_pages); | |
1026 | bitmap_set(migration_bitmap, 0, ram_bitmap_pages); | |
1027 | ||
1028 | /* | |
1029 | * Count the total number of pages used by ram blocks not including any | |
1030 | * gaps due to alignment or unplugs. | |
1031 | */ | |
f54a235f | 1032 | migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS; |
e30d1d8c | 1033 | |
d1315aac | 1034 | memory_global_dirty_log_start(); |
c6bf8e0e | 1035 | migration_bitmap_sync(); |
0dc3f44a | 1036 | qemu_mutex_unlock_ramlist(); |
9b095037 | 1037 | qemu_mutex_unlock_iothread(); |
ad96090a | 1038 | |
d1315aac | 1039 | qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE); |
97ab12d4 | 1040 | |
0dc3f44a | 1041 | QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { |
d1315aac JQ |
1042 | qemu_put_byte(f, strlen(block->idstr)); |
1043 | qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr)); | |
9b8424d5 | 1044 | qemu_put_be64(f, block->used_length); |
ad96090a BS |
1045 | } |
1046 | ||
0dc3f44a | 1047 | rcu_read_unlock(); |
0033b8b4 MH |
1048 | |
1049 | ram_control_before_iterate(f, RAM_CONTROL_SETUP); | |
1050 | ram_control_after_iterate(f, RAM_CONTROL_SETUP); | |
1051 | ||
d1315aac JQ |
1052 | qemu_put_be64(f, RAM_SAVE_FLAG_EOS); |
1053 | ||
1054 | return 0; | |
1055 | } | |
1056 | ||
16310a3c | 1057 | static int ram_save_iterate(QEMUFile *f, void *opaque) |
d1315aac | 1058 | { |
d1315aac JQ |
1059 | int ret; |
1060 | int i; | |
e4ed1541 | 1061 | int64_t t0; |
0fcd8d31 | 1062 | int pages_sent = 0; |
d1315aac | 1063 | |
0dc3f44a | 1064 | rcu_read_lock(); |
f798b07f UD |
1065 | if (ram_list.version != last_version) { |
1066 | reset_ram_globals(); | |
1067 | } | |
1068 | ||
0dc3f44a MD |
1069 | /* Read version before ram_list.blocks */ |
1070 | smp_rmb(); | |
1071 | ||
0033b8b4 MH |
1072 | ram_control_before_iterate(f, RAM_CONTROL_ROUND); |
1073 | ||
bc72ad67 | 1074 | t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
4508bd9e | 1075 | i = 0; |
2975725f | 1076 | while ((ret = qemu_file_rate_limit(f)) == 0) { |
0fcd8d31 | 1077 | int pages; |
ad96090a | 1078 | |
0fcd8d31 JQ |
1079 | pages = ram_find_and_save_block(f, false, &bytes_transferred); |
1080 | /* no more pages to sent */ | |
1081 | if (pages == 0) { | |
ad96090a BS |
1082 | break; |
1083 | } | |
0fcd8d31 | 1084 | pages_sent += pages; |
004d4c10 | 1085 | acct_info.iterations++; |
7ca1dfad | 1086 | check_guest_throttling(); |
4508bd9e JQ |
1087 | /* we want to check in the 1st loop, just in case it was the 1st time |
1088 | and we had to sync the dirty bitmap. | |
1089 | qemu_get_clock_ns() is a bit expensive, so we only check each some | |
1090 | iterations | |
1091 | */ | |
1092 | if ((i & 63) == 0) { | |
bc72ad67 | 1093 | uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000; |
4508bd9e | 1094 | if (t1 > MAX_WAIT) { |
ef37a699 | 1095 | DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n", |
4508bd9e JQ |
1096 | t1, i); |
1097 | break; | |
1098 | } | |
1099 | } | |
1100 | i++; | |
ad96090a | 1101 | } |
0dc3f44a | 1102 | rcu_read_unlock(); |
fb3409de | 1103 | |
0033b8b4 MH |
1104 | /* |
1105 | * Must occur before EOS (or any QEMUFile operation) | |
1106 | * because of RDMA protocol. | |
1107 | */ | |
1108 | ram_control_after_iterate(f, RAM_CONTROL_ROUND); | |
1109 | ||
6cd0beda LL |
1110 | qemu_put_be64(f, RAM_SAVE_FLAG_EOS); |
1111 | bytes_transferred += 8; | |
1112 | ||
1113 | ret = qemu_file_get_error(f); | |
2975725f JQ |
1114 | if (ret < 0) { |
1115 | return ret; | |
1116 | } | |
1117 | ||
0fcd8d31 | 1118 | return pages_sent; |
16310a3c JQ |
1119 | } |
1120 | ||
ae3a7047 | 1121 | /* Called with iothread lock */ |
16310a3c JQ |
1122 | static int ram_save_complete(QEMUFile *f, void *opaque) |
1123 | { | |
0dc3f44a MD |
1124 | rcu_read_lock(); |
1125 | ||
9c339485 | 1126 | migration_bitmap_sync(); |
b2a8658e | 1127 | |
0033b8b4 MH |
1128 | ram_control_before_iterate(f, RAM_CONTROL_FINISH); |
1129 | ||
ad96090a | 1130 | /* try transferring iterative blocks of memory */ |
3a697f69 | 1131 | |
16310a3c | 1132 | /* flush all remaining blocks regardless of rate limiting */ |
6c779f22 | 1133 | while (true) { |
0fcd8d31 | 1134 | int pages; |
3fc250b4 | 1135 | |
0fcd8d31 | 1136 | pages = ram_find_and_save_block(f, true, &bytes_transferred); |
6c779f22 | 1137 | /* no more blocks to sent */ |
0fcd8d31 | 1138 | if (pages == 0) { |
6c779f22 | 1139 | break; |
ad96090a | 1140 | } |
ad96090a | 1141 | } |
0033b8b4 MH |
1142 | |
1143 | ram_control_after_iterate(f, RAM_CONTROL_FINISH); | |
244eaa75 | 1144 | migration_end(); |
ad96090a | 1145 | |
0dc3f44a | 1146 | rcu_read_unlock(); |
ad96090a BS |
1147 | qemu_put_be64(f, RAM_SAVE_FLAG_EOS); |
1148 | ||
5b3c9638 | 1149 | return 0; |
ad96090a BS |
1150 | } |
1151 | ||
e4ed1541 JQ |
1152 | static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size) |
1153 | { | |
1154 | uint64_t remaining_size; | |
1155 | ||
1156 | remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE; | |
1157 | ||
1158 | if (remaining_size < max_size) { | |
32c835ba | 1159 | qemu_mutex_lock_iothread(); |
0dc3f44a | 1160 | rcu_read_lock(); |
e4ed1541 | 1161 | migration_bitmap_sync(); |
0dc3f44a | 1162 | rcu_read_unlock(); |
32c835ba | 1163 | qemu_mutex_unlock_iothread(); |
e4ed1541 JQ |
1164 | remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE; |
1165 | } | |
1166 | return remaining_size; | |
1167 | } | |
1168 | ||
17ad9b35 OW |
1169 | static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host) |
1170 | { | |
17ad9b35 OW |
1171 | unsigned int xh_len; |
1172 | int xh_flags; | |
1173 | ||
905f26f2 GA |
1174 | if (!xbzrle_decoded_buf) { |
1175 | xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE); | |
17ad9b35 OW |
1176 | } |
1177 | ||
1178 | /* extract RLE header */ | |
1179 | xh_flags = qemu_get_byte(f); | |
1180 | xh_len = qemu_get_be16(f); | |
1181 | ||
1182 | if (xh_flags != ENCODING_FLAG_XBZRLE) { | |
0971f1be | 1183 | error_report("Failed to load XBZRLE page - wrong compression!"); |
17ad9b35 OW |
1184 | return -1; |
1185 | } | |
1186 | ||
1187 | if (xh_len > TARGET_PAGE_SIZE) { | |
0971f1be | 1188 | error_report("Failed to load XBZRLE page - len overflow!"); |
17ad9b35 OW |
1189 | return -1; |
1190 | } | |
1191 | /* load data and decode */ | |
905f26f2 | 1192 | qemu_get_buffer(f, xbzrle_decoded_buf, xh_len); |
17ad9b35 OW |
1193 | |
1194 | /* decode RLE */ | |
fb626663 CG |
1195 | if (xbzrle_decode_buffer(xbzrle_decoded_buf, xh_len, host, |
1196 | TARGET_PAGE_SIZE) == -1) { | |
0971f1be | 1197 | error_report("Failed to load XBZRLE page - decode error!"); |
fb626663 | 1198 | return -1; |
17ad9b35 OW |
1199 | } |
1200 | ||
fb626663 | 1201 | return 0; |
17ad9b35 OW |
1202 | } |
1203 | ||
0dc3f44a MD |
1204 | /* Must be called from within a rcu critical section. |
1205 | * Returns a pointer from within the RCU-protected ram_list. | |
1206 | */ | |
a55bbe31 AW |
1207 | static inline void *host_from_stream_offset(QEMUFile *f, |
1208 | ram_addr_t offset, | |
1209 | int flags) | |
1210 | { | |
1211 | static RAMBlock *block = NULL; | |
1212 | char id[256]; | |
1213 | uint8_t len; | |
1214 | ||
1215 | if (flags & RAM_SAVE_FLAG_CONTINUE) { | |
9b8424d5 | 1216 | if (!block || block->max_length <= offset) { |
0971f1be | 1217 | error_report("Ack, bad migration stream!"); |
a55bbe31 AW |
1218 | return NULL; |
1219 | } | |
1220 | ||
dc94a7ed | 1221 | return memory_region_get_ram_ptr(block->mr) + offset; |
a55bbe31 AW |
1222 | } |
1223 | ||
1224 | len = qemu_get_byte(f); | |
1225 | qemu_get_buffer(f, (uint8_t *)id, len); | |
1226 | id[len] = 0; | |
1227 | ||
0dc3f44a | 1228 | QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { |
9b8424d5 MT |
1229 | if (!strncmp(id, block->idstr, sizeof(id)) && |
1230 | block->max_length > offset) { | |
dc94a7ed | 1231 | return memory_region_get_ram_ptr(block->mr) + offset; |
0be839a2 | 1232 | } |
a55bbe31 AW |
1233 | } |
1234 | ||
0971f1be | 1235 | error_report("Can't find block %s!", id); |
a55bbe31 AW |
1236 | return NULL; |
1237 | } | |
1238 | ||
44c3b58c MH |
1239 | /* |
1240 | * If a page (or a whole RDMA chunk) has been | |
1241 | * determined to be zero, then zap it. | |
1242 | */ | |
1243 | void ram_handle_compressed(void *host, uint8_t ch, uint64_t size) | |
1244 | { | |
d613a56f | 1245 | if (ch != 0 || !is_zero_range(host, size)) { |
44c3b58c | 1246 | memset(host, ch, size); |
44c3b58c MH |
1247 | } |
1248 | } | |
1249 | ||
3fcb38c2 LL |
1250 | static void *do_data_decompress(void *opaque) |
1251 | { | |
1252 | while (!quit_decomp_thread) { | |
1253 | /* To be done */ | |
1254 | } | |
1255 | ||
1256 | return NULL; | |
1257 | } | |
1258 | ||
1259 | void migrate_decompress_threads_create(void) | |
1260 | { | |
1261 | int i, thread_count; | |
1262 | ||
1263 | thread_count = migrate_decompress_threads(); | |
1264 | decompress_threads = g_new0(QemuThread, thread_count); | |
1265 | decomp_param = g_new0(DecompressParam, thread_count); | |
1266 | compressed_data_buf = g_malloc0(compressBound(TARGET_PAGE_SIZE)); | |
1267 | quit_decomp_thread = false; | |
1268 | for (i = 0; i < thread_count; i++) { | |
1269 | qemu_thread_create(decompress_threads + i, "decompress", | |
1270 | do_data_decompress, decomp_param + i, | |
1271 | QEMU_THREAD_JOINABLE); | |
1272 | } | |
1273 | } | |
1274 | ||
1275 | void migrate_decompress_threads_join(void) | |
1276 | { | |
1277 | int i, thread_count; | |
1278 | ||
1279 | quit_decomp_thread = true; | |
1280 | thread_count = migrate_decompress_threads(); | |
1281 | for (i = 0; i < thread_count; i++) { | |
1282 | qemu_thread_join(decompress_threads + i); | |
1283 | } | |
1284 | g_free(decompress_threads); | |
1285 | g_free(decomp_param); | |
1286 | g_free(compressed_data_buf); | |
1287 | decompress_threads = NULL; | |
1288 | decomp_param = NULL; | |
1289 | compressed_data_buf = NULL; | |
1290 | } | |
1291 | ||
1292 | static void decompress_data_with_multi_threads(uint8_t *compbuf, | |
1293 | void *host, int len) | |
1294 | { | |
1295 | /* To be done */ | |
1296 | } | |
1297 | ||
7908c78d | 1298 | static int ram_load(QEMUFile *f, void *opaque, int version_id) |
ad96090a | 1299 | { |
5b0e9dd4 | 1300 | int flags = 0, ret = 0; |
3a697f69 | 1301 | static uint64_t seq_iter; |
3fcb38c2 | 1302 | int len = 0; |
3a697f69 OW |
1303 | |
1304 | seq_iter++; | |
ad96090a | 1305 | |
21a246a4 | 1306 | if (version_id != 4) { |
4798fe55 | 1307 | ret = -EINVAL; |
ad96090a BS |
1308 | } |
1309 | ||
0dc3f44a MD |
1310 | /* This RCU critical section can be very long running. |
1311 | * When RCU reclaims in the code start to become numerous, | |
1312 | * it will be necessary to reduce the granularity of this | |
1313 | * critical section. | |
1314 | */ | |
1315 | rcu_read_lock(); | |
5b0e9dd4 PL |
1316 | while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) { |
1317 | ram_addr_t addr, total_ram_bytes; | |
1318 | void *host; | |
1319 | uint8_t ch; | |
ad96090a | 1320 | |
5b0e9dd4 | 1321 | addr = qemu_get_be64(f); |
ad96090a BS |
1322 | flags = addr & ~TARGET_PAGE_MASK; |
1323 | addr &= TARGET_PAGE_MASK; | |
1324 | ||
5b0e9dd4 PL |
1325 | switch (flags & ~RAM_SAVE_FLAG_CONTINUE) { |
1326 | case RAM_SAVE_FLAG_MEM_SIZE: | |
21a246a4 | 1327 | /* Synchronize RAM block list */ |
5b0e9dd4 PL |
1328 | total_ram_bytes = addr; |
1329 | while (!ret && total_ram_bytes) { | |
21a246a4 C |
1330 | RAMBlock *block; |
1331 | uint8_t len; | |
5b0e9dd4 PL |
1332 | char id[256]; |
1333 | ram_addr_t length; | |
21a246a4 C |
1334 | |
1335 | len = qemu_get_byte(f); | |
1336 | qemu_get_buffer(f, (uint8_t *)id, len); | |
1337 | id[len] = 0; | |
1338 | length = qemu_get_be64(f); | |
1339 | ||
0dc3f44a | 1340 | QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { |
21a246a4 | 1341 | if (!strncmp(id, block->idstr, sizeof(id))) { |
b0cc3f83 MT |
1342 | if (length != block->used_length) { |
1343 | Error *local_err = NULL; | |
1344 | ||
1345 | ret = qemu_ram_resize(block->offset, length, &local_err); | |
1346 | if (local_err) { | |
565f65d2 | 1347 | error_report_err(local_err); |
b0cc3f83 | 1348 | } |
97ab12d4 | 1349 | } |
21a246a4 | 1350 | break; |
97ab12d4 | 1351 | } |
21a246a4 | 1352 | } |
97ab12d4 | 1353 | |
21a246a4 | 1354 | if (!block) { |
0971f1be LT |
1355 | error_report("Unknown ramblock \"%s\", cannot " |
1356 | "accept migration", id); | |
21a246a4 | 1357 | ret = -EINVAL; |
db80face | 1358 | } |
21a246a4 C |
1359 | |
1360 | total_ram_bytes -= length; | |
ad96090a | 1361 | } |
5b0e9dd4 PL |
1362 | break; |
1363 | case RAM_SAVE_FLAG_COMPRESS: | |
f09f2189 | 1364 | host = host_from_stream_offset(f, addr, flags); |
492fb99c | 1365 | if (!host) { |
db80face | 1366 | error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); |
4798fe55 | 1367 | ret = -EINVAL; |
db80face | 1368 | break; |
492fb99c | 1369 | } |
97ab12d4 | 1370 | ch = qemu_get_byte(f); |
44c3b58c | 1371 | ram_handle_compressed(host, ch, TARGET_PAGE_SIZE); |
5b0e9dd4 PL |
1372 | break; |
1373 | case RAM_SAVE_FLAG_PAGE: | |
f09f2189 | 1374 | host = host_from_stream_offset(f, addr, flags); |
0ff1f9f5 | 1375 | if (!host) { |
db80face | 1376 | error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); |
4798fe55 | 1377 | ret = -EINVAL; |
db80face | 1378 | break; |
0ff1f9f5 | 1379 | } |
97ab12d4 | 1380 | qemu_get_buffer(f, host, TARGET_PAGE_SIZE); |
5b0e9dd4 | 1381 | break; |
3fcb38c2 LL |
1382 | case RAM_SAVE_FLAG_COMPRESS_PAGE: |
1383 | host = host_from_stream_offset(f, addr, flags); | |
1384 | if (!host) { | |
1385 | error_report("Invalid RAM offset " RAM_ADDR_FMT, addr); | |
1386 | ret = -EINVAL; | |
1387 | break; | |
1388 | } | |
1389 | ||
1390 | len = qemu_get_be32(f); | |
1391 | if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) { | |
1392 | error_report("Invalid compressed data length: %d", len); | |
1393 | ret = -EINVAL; | |
1394 | break; | |
1395 | } | |
1396 | qemu_get_buffer(f, compressed_data_buf, len); | |
1397 | decompress_data_with_multi_threads(compressed_data_buf, host, len); | |
1398 | break; | |
5b0e9dd4 PL |
1399 | case RAM_SAVE_FLAG_XBZRLE: |
1400 | host = host_from_stream_offset(f, addr, flags); | |
17ad9b35 | 1401 | if (!host) { |
db80face | 1402 | error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); |
4798fe55 | 1403 | ret = -EINVAL; |
db80face | 1404 | break; |
17ad9b35 | 1405 | } |
17ad9b35 | 1406 | if (load_xbzrle(f, addr, host) < 0) { |
db80face PL |
1407 | error_report("Failed to decompress XBZRLE page at " |
1408 | RAM_ADDR_FMT, addr); | |
17ad9b35 | 1409 | ret = -EINVAL; |
db80face | 1410 | break; |
17ad9b35 | 1411 | } |
db80face | 1412 | break; |
5b0e9dd4 PL |
1413 | case RAM_SAVE_FLAG_EOS: |
1414 | /* normal exit */ | |
db80face | 1415 | break; |
5b0e9dd4 PL |
1416 | default: |
1417 | if (flags & RAM_SAVE_FLAG_HOOK) { | |
1418 | ram_control_load_hook(f, flags); | |
1419 | } else { | |
1420 | error_report("Unknown combination of migration flags: %#x", | |
1421 | flags); | |
1422 | ret = -EINVAL; | |
1423 | } | |
1424 | } | |
1425 | if (!ret) { | |
1426 | ret = qemu_file_get_error(f); | |
ad96090a | 1427 | } |
db80face | 1428 | } |
ad96090a | 1429 | |
0dc3f44a | 1430 | rcu_read_unlock(); |
ef37a699 IM |
1431 | DPRINTF("Completed load of VM with exit code %d seq iteration " |
1432 | "%" PRIu64 "\n", ret, seq_iter); | |
3a697f69 | 1433 | return ret; |
ad96090a BS |
1434 | } |
1435 | ||
0d6ab3ab | 1436 | static SaveVMHandlers savevm_ram_handlers = { |
d1315aac | 1437 | .save_live_setup = ram_save_setup, |
16310a3c JQ |
1438 | .save_live_iterate = ram_save_iterate, |
1439 | .save_live_complete = ram_save_complete, | |
e4ed1541 | 1440 | .save_live_pending = ram_save_pending, |
7908c78d | 1441 | .load_state = ram_load, |
9b5bfab0 | 1442 | .cancel = ram_migration_cancel, |
7908c78d JQ |
1443 | }; |
1444 | ||
0d6ab3ab DDAG |
1445 | void ram_mig_init(void) |
1446 | { | |
d97326ee | 1447 | qemu_mutex_init(&XBZRLE.lock); |
0d6ab3ab DDAG |
1448 | register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL); |
1449 | } | |
1450 | ||
0dfa5ef9 IY |
1451 | struct soundhw { |
1452 | const char *name; | |
1453 | const char *descr; | |
1454 | int enabled; | |
1455 | int isa; | |
1456 | union { | |
4a0f031d | 1457 | int (*init_isa) (ISABus *bus); |
0dfa5ef9 IY |
1458 | int (*init_pci) (PCIBus *bus); |
1459 | } init; | |
1460 | }; | |
1461 | ||
36cd6f6f PB |
1462 | static struct soundhw soundhw[9]; |
1463 | static int soundhw_count; | |
ad96090a | 1464 | |
36cd6f6f PB |
1465 | void isa_register_soundhw(const char *name, const char *descr, |
1466 | int (*init_isa)(ISABus *bus)) | |
1467 | { | |
1468 | assert(soundhw_count < ARRAY_SIZE(soundhw) - 1); | |
1469 | soundhw[soundhw_count].name = name; | |
1470 | soundhw[soundhw_count].descr = descr; | |
1471 | soundhw[soundhw_count].isa = 1; | |
1472 | soundhw[soundhw_count].init.init_isa = init_isa; | |
1473 | soundhw_count++; | |
1474 | } | |
ad96090a | 1475 | |
36cd6f6f PB |
1476 | void pci_register_soundhw(const char *name, const char *descr, |
1477 | int (*init_pci)(PCIBus *bus)) | |
1478 | { | |
1479 | assert(soundhw_count < ARRAY_SIZE(soundhw) - 1); | |
1480 | soundhw[soundhw_count].name = name; | |
1481 | soundhw[soundhw_count].descr = descr; | |
1482 | soundhw[soundhw_count].isa = 0; | |
1483 | soundhw[soundhw_count].init.init_pci = init_pci; | |
1484 | soundhw_count++; | |
1485 | } | |
ad96090a BS |
1486 | |
1487 | void select_soundhw(const char *optarg) | |
1488 | { | |
1489 | struct soundhw *c; | |
1490 | ||
c8057f95 | 1491 | if (is_help_option(optarg)) { |
ad96090a BS |
1492 | show_valid_cards: |
1493 | ||
36cd6f6f PB |
1494 | if (soundhw_count) { |
1495 | printf("Valid sound card names (comma separated):\n"); | |
1496 | for (c = soundhw; c->name; ++c) { | |
1497 | printf ("%-11s %s\n", c->name, c->descr); | |
1498 | } | |
1499 | printf("\n-soundhw all will enable all of the above\n"); | |
1500 | } else { | |
1501 | printf("Machine has no user-selectable audio hardware " | |
1502 | "(it may or may not have always-present audio hardware).\n"); | |
ad96090a | 1503 | } |
c8057f95 | 1504 | exit(!is_help_option(optarg)); |
ad96090a BS |
1505 | } |
1506 | else { | |
1507 | size_t l; | |
1508 | const char *p; | |
1509 | char *e; | |
1510 | int bad_card = 0; | |
1511 | ||
1512 | if (!strcmp(optarg, "all")) { | |
1513 | for (c = soundhw; c->name; ++c) { | |
1514 | c->enabled = 1; | |
1515 | } | |
1516 | return; | |
1517 | } | |
1518 | ||
1519 | p = optarg; | |
1520 | while (*p) { | |
1521 | e = strchr(p, ','); | |
1522 | l = !e ? strlen(p) : (size_t) (e - p); | |
1523 | ||
1524 | for (c = soundhw; c->name; ++c) { | |
1525 | if (!strncmp(c->name, p, l) && !c->name[l]) { | |
1526 | c->enabled = 1; | |
1527 | break; | |
1528 | } | |
1529 | } | |
1530 | ||
1531 | if (!c->name) { | |
1532 | if (l > 80) { | |
0971f1be | 1533 | error_report("Unknown sound card name (too big to show)"); |
ad96090a BS |
1534 | } |
1535 | else { | |
0971f1be LT |
1536 | error_report("Unknown sound card name `%.*s'", |
1537 | (int) l, p); | |
ad96090a BS |
1538 | } |
1539 | bad_card = 1; | |
1540 | } | |
1541 | p += l + (e != NULL); | |
1542 | } | |
1543 | ||
1544 | if (bad_card) { | |
1545 | goto show_valid_cards; | |
1546 | } | |
1547 | } | |
1548 | } | |
0dfa5ef9 | 1549 | |
f81222bc | 1550 | void audio_init(void) |
0dfa5ef9 IY |
1551 | { |
1552 | struct soundhw *c; | |
f81222bc PB |
1553 | ISABus *isa_bus = (ISABus *) object_resolve_path_type("", TYPE_ISA_BUS, NULL); |
1554 | PCIBus *pci_bus = (PCIBus *) object_resolve_path_type("", TYPE_PCI_BUS, NULL); | |
0dfa5ef9 IY |
1555 | |
1556 | for (c = soundhw; c->name; ++c) { | |
1557 | if (c->enabled) { | |
1558 | if (c->isa) { | |
f81222bc | 1559 | if (!isa_bus) { |
0971f1be | 1560 | error_report("ISA bus not available for %s", c->name); |
f81222bc | 1561 | exit(1); |
0dfa5ef9 | 1562 | } |
f81222bc | 1563 | c->init.init_isa(isa_bus); |
0dfa5ef9 | 1564 | } else { |
f81222bc | 1565 | if (!pci_bus) { |
0971f1be | 1566 | error_report("PCI bus not available for %s", c->name); |
f81222bc | 1567 | exit(1); |
0dfa5ef9 | 1568 | } |
f81222bc | 1569 | c->init.init_pci(pci_bus); |
0dfa5ef9 IY |
1570 | } |
1571 | } | |
1572 | } | |
1573 | } | |
ad96090a BS |
1574 | |
1575 | int qemu_uuid_parse(const char *str, uint8_t *uuid) | |
1576 | { | |
1577 | int ret; | |
1578 | ||
1579 | if (strlen(str) != 36) { | |
1580 | return -1; | |
1581 | } | |
1582 | ||
1583 | ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3], | |
1584 | &uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9], | |
1585 | &uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14], | |
1586 | &uuid[15]); | |
1587 | ||
1588 | if (ret != 16) { | |
1589 | return -1; | |
1590 | } | |
ad96090a BS |
1591 | return 0; |
1592 | } | |
1593 | ||
0c764a9d | 1594 | void do_acpitable_option(const QemuOpts *opts) |
ad96090a BS |
1595 | { |
1596 | #ifdef TARGET_I386 | |
23084327 LE |
1597 | Error *err = NULL; |
1598 | ||
1599 | acpi_table_add(opts, &err); | |
1600 | if (err) { | |
4a44d85e SA |
1601 | error_report("Wrong acpi table provided: %s", |
1602 | error_get_pretty(err)); | |
23084327 | 1603 | error_free(err); |
ad96090a BS |
1604 | exit(1); |
1605 | } | |
1606 | #endif | |
1607 | } | |
1608 | ||
4f953d2f | 1609 | void do_smbios_option(QemuOpts *opts) |
ad96090a BS |
1610 | { |
1611 | #ifdef TARGET_I386 | |
4f953d2f | 1612 | smbios_entry_add(opts); |
ad96090a BS |
1613 | #endif |
1614 | } | |
1615 | ||
1616 | void cpudef_init(void) | |
1617 | { | |
1618 | #if defined(cpudef_setup) | |
1619 | cpudef_setup(); /* parse cpu definitions in target config file */ | |
1620 | #endif | |
1621 | } | |
1622 | ||
ad96090a BS |
1623 | int kvm_available(void) |
1624 | { | |
1625 | #ifdef CONFIG_KVM | |
1626 | return 1; | |
1627 | #else | |
1628 | return 0; | |
1629 | #endif | |
1630 | } | |
1631 | ||
1632 | int xen_available(void) | |
1633 | { | |
1634 | #ifdef CONFIG_XEN | |
1635 | return 1; | |
1636 | #else | |
1637 | return 0; | |
1638 | #endif | |
1639 | } | |
99afc91d DB |
1640 | |
1641 | ||
1642 | TargetInfo *qmp_query_target(Error **errp) | |
1643 | { | |
1644 | TargetInfo *info = g_malloc0(sizeof(*info)); | |
1645 | ||
c02a9552 | 1646 | info->arch = g_strdup(TARGET_NAME); |
99afc91d DB |
1647 | |
1648 | return info; | |
1649 | } | |
7ca1dfad CV |
1650 | |
1651 | /* Stub function that's gets run on the vcpu when its brought out of the | |
1652 | VM to run inside qemu via async_run_on_cpu()*/ | |
1653 | static void mig_sleep_cpu(void *opq) | |
1654 | { | |
1655 | qemu_mutex_unlock_iothread(); | |
1656 | g_usleep(30*1000); | |
1657 | qemu_mutex_lock_iothread(); | |
1658 | } | |
1659 | ||
1660 | /* To reduce the dirty rate explicitly disallow the VCPUs from spending | |
1661 | much time in the VM. The migration thread will try to catchup. | |
1662 | Workload will experience a performance drop. | |
1663 | */ | |
7ca1dfad CV |
1664 | static void mig_throttle_guest_down(void) |
1665 | { | |
38fcbd3f AF |
1666 | CPUState *cpu; |
1667 | ||
7ca1dfad | 1668 | qemu_mutex_lock_iothread(); |
38fcbd3f AF |
1669 | CPU_FOREACH(cpu) { |
1670 | async_run_on_cpu(cpu, mig_sleep_cpu, NULL); | |
1671 | } | |
7ca1dfad CV |
1672 | qemu_mutex_unlock_iothread(); |
1673 | } | |
1674 | ||
1675 | static void check_guest_throttling(void) | |
1676 | { | |
1677 | static int64_t t0; | |
1678 | int64_t t1; | |
1679 | ||
1680 | if (!mig_throttle_on) { | |
1681 | return; | |
1682 | } | |
1683 | ||
1684 | if (!t0) { | |
bc72ad67 | 1685 | t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
7ca1dfad CV |
1686 | return; |
1687 | } | |
1688 | ||
bc72ad67 | 1689 | t1 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
7ca1dfad CV |
1690 | |
1691 | /* If it has been more than 40 ms since the last time the guest | |
1692 | * was throttled then do it again. | |
1693 | */ | |
1694 | if (40 < (t1-t0)/1000000) { | |
1695 | mig_throttle_guest_down(); | |
1696 | t0 = t1; | |
1697 | } | |
1698 | } |