]> git.proxmox.com Git - mirror_qemu.git/blob - migration/dirtyrate.c
migration/calc-dirty-rate: millisecond-granularity period
[mirror_qemu.git] / migration / dirtyrate.c
1 /*
2 * Dirtyrate implement code
3 *
4 * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO.,LTD.
5 *
6 * Authors:
7 * Chuan Zheng <zhengchuan@huawei.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 */
12
13 #include "qemu/osdep.h"
14 #include "qemu/error-report.h"
15 #include <zlib.h>
16 #include "hw/core/cpu.h"
17 #include "qapi/error.h"
18 #include "exec/ramblock.h"
19 #include "exec/target_page.h"
20 #include "qemu/rcu_queue.h"
21 #include "qemu/main-loop.h"
22 #include "qapi/qapi-commands-migration.h"
23 #include "ram.h"
24 #include "trace.h"
25 #include "dirtyrate.h"
26 #include "monitor/hmp.h"
27 #include "monitor/monitor.h"
28 #include "qapi/qmp/qdict.h"
29 #include "sysemu/kvm.h"
30 #include "sysemu/runstate.h"
31 #include "exec/memory.h"
32 #include "qemu/xxhash.h"
33
34 /*
35 * total_dirty_pages is procted by BQL and is used
36 * to stat dirty pages during the period of two
37 * memory_global_dirty_log_sync
38 */
39 uint64_t total_dirty_pages;
40
41 typedef struct DirtyPageRecord {
42 uint64_t start_pages;
43 uint64_t end_pages;
44 } DirtyPageRecord;
45
46 static int CalculatingState = DIRTY_RATE_STATUS_UNSTARTED;
47 static struct DirtyRateStat DirtyStat;
48 static DirtyRateMeasureMode dirtyrate_mode =
49 DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING;
50
51 static int64_t dirty_stat_wait(int64_t msec, int64_t initial_time)
52 {
53 int64_t current_time;
54
55 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
56 if ((current_time - initial_time) >= msec) {
57 msec = current_time - initial_time;
58 } else {
59 g_usleep((msec + initial_time - current_time) * 1000);
60 /* g_usleep may overshoot */
61 msec = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - initial_time;
62 }
63
64 return msec;
65 }
66
67 static inline void record_dirtypages(DirtyPageRecord *dirty_pages,
68 CPUState *cpu, bool start)
69 {
70 if (start) {
71 dirty_pages[cpu->cpu_index].start_pages = cpu->dirty_pages;
72 } else {
73 dirty_pages[cpu->cpu_index].end_pages = cpu->dirty_pages;
74 }
75 }
76
77 static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages,
78 int64_t calc_time_ms)
79 {
80 uint64_t increased_dirty_pages =
81 dirty_pages.end_pages - dirty_pages.start_pages;
82
83 /*
84 * multiply by 1000ms/s _before_ converting down to megabytes
85 * to avoid losing precision
86 */
87 return qemu_target_pages_to_MiB(increased_dirty_pages * 1000) /
88 calc_time_ms;
89 }
90
91 void global_dirty_log_change(unsigned int flag, bool start)
92 {
93 qemu_mutex_lock_iothread();
94 if (start) {
95 memory_global_dirty_log_start(flag);
96 } else {
97 memory_global_dirty_log_stop(flag);
98 }
99 qemu_mutex_unlock_iothread();
100 }
101
102 /*
103 * global_dirty_log_sync
104 * 1. sync dirty log from kvm
105 * 2. stop dirty tracking if needed.
106 */
107 static void global_dirty_log_sync(unsigned int flag, bool one_shot)
108 {
109 qemu_mutex_lock_iothread();
110 memory_global_dirty_log_sync(false);
111 if (one_shot) {
112 memory_global_dirty_log_stop(flag);
113 }
114 qemu_mutex_unlock_iothread();
115 }
116
117 static DirtyPageRecord *vcpu_dirty_stat_alloc(VcpuStat *stat)
118 {
119 CPUState *cpu;
120 int nvcpu = 0;
121
122 CPU_FOREACH(cpu) {
123 nvcpu++;
124 }
125
126 stat->nvcpu = nvcpu;
127 stat->rates = g_new0(DirtyRateVcpu, nvcpu);
128
129 return g_new0(DirtyPageRecord, nvcpu);
130 }
131
132 static void vcpu_dirty_stat_collect(VcpuStat *stat,
133 DirtyPageRecord *records,
134 bool start)
135 {
136 CPUState *cpu;
137
138 CPU_FOREACH(cpu) {
139 record_dirtypages(records, cpu, start);
140 }
141 }
142
143 int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms,
144 VcpuStat *stat,
145 unsigned int flag,
146 bool one_shot)
147 {
148 DirtyPageRecord *records;
149 int64_t init_time_ms;
150 int64_t duration;
151 int64_t dirtyrate;
152 int i = 0;
153 unsigned int gen_id;
154
155 retry:
156 init_time_ms = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
157
158 WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) {
159 gen_id = cpu_list_generation_id_get();
160 records = vcpu_dirty_stat_alloc(stat);
161 vcpu_dirty_stat_collect(stat, records, true);
162 }
163
164 duration = dirty_stat_wait(calc_time_ms, init_time_ms);
165
166 global_dirty_log_sync(flag, one_shot);
167
168 WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) {
169 if (gen_id != cpu_list_generation_id_get()) {
170 g_free(records);
171 g_free(stat->rates);
172 cpu_list_unlock();
173 goto retry;
174 }
175 vcpu_dirty_stat_collect(stat, records, false);
176 }
177
178 for (i = 0; i < stat->nvcpu; i++) {
179 dirtyrate = do_calculate_dirtyrate(records[i], duration);
180
181 stat->rates[i].id = i;
182 stat->rates[i].dirty_rate = dirtyrate;
183
184 trace_dirtyrate_do_calculate_vcpu(i, dirtyrate);
185 }
186
187 g_free(records);
188
189 return duration;
190 }
191
192 static bool is_calc_time_valid(int64_t msec)
193 {
194 if ((msec < MIN_CALC_TIME_MS) || (msec > MAX_CALC_TIME_MS)) {
195 return false;
196 }
197
198 return true;
199 }
200
201 static bool is_sample_pages_valid(int64_t pages)
202 {
203 return pages >= MIN_SAMPLE_PAGE_COUNT &&
204 pages <= MAX_SAMPLE_PAGE_COUNT;
205 }
206
207 static int dirtyrate_set_state(int *state, int old_state, int new_state)
208 {
209 assert(new_state < DIRTY_RATE_STATUS__MAX);
210 trace_dirtyrate_set_state(DirtyRateStatus_str(new_state));
211 if (qatomic_cmpxchg(state, old_state, new_state) == old_state) {
212 return 0;
213 } else {
214 return -1;
215 }
216 }
217
218 /* Decimal power of given time unit relative to one second */
219 static int time_unit_to_power(TimeUnit time_unit)
220 {
221 switch (time_unit) {
222 case TIME_UNIT_SECOND:
223 return 0;
224 case TIME_UNIT_MILLISECOND:
225 return -3;
226 default:
227 assert(false); /* unreachable */
228 return 0;
229 }
230 }
231
232 static int64_t convert_time_unit(int64_t value, TimeUnit unit_from,
233 TimeUnit unit_to)
234 {
235 int power = time_unit_to_power(unit_from) -
236 time_unit_to_power(unit_to);
237 while (power < 0) {
238 value /= 10;
239 power += 1;
240 }
241 while (power > 0) {
242 value *= 10;
243 power -= 1;
244 }
245 return value;
246 }
247
248
249 static struct DirtyRateInfo *
250 query_dirty_rate_info(TimeUnit calc_time_unit)
251 {
252 int i;
253 int64_t dirty_rate = DirtyStat.dirty_rate;
254 struct DirtyRateInfo *info = g_new0(DirtyRateInfo, 1);
255 DirtyRateVcpuList *head = NULL, **tail = &head;
256
257 info->status = CalculatingState;
258 info->start_time = DirtyStat.start_time;
259 info->calc_time = convert_time_unit(DirtyStat.calc_time_ms,
260 TIME_UNIT_MILLISECOND,
261 calc_time_unit);
262 info->calc_time_unit = calc_time_unit;
263 info->sample_pages = DirtyStat.sample_pages;
264 info->mode = dirtyrate_mode;
265
266 if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURED) {
267 info->has_dirty_rate = true;
268 info->dirty_rate = dirty_rate;
269
270 if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) {
271 /*
272 * set sample_pages with 0 to indicate page sampling
273 * isn't enabled
274 **/
275 info->sample_pages = 0;
276 info->has_vcpu_dirty_rate = true;
277 for (i = 0; i < DirtyStat.dirty_ring.nvcpu; i++) {
278 DirtyRateVcpu *rate = g_new0(DirtyRateVcpu, 1);
279 rate->id = DirtyStat.dirty_ring.rates[i].id;
280 rate->dirty_rate = DirtyStat.dirty_ring.rates[i].dirty_rate;
281 QAPI_LIST_APPEND(tail, rate);
282 }
283 info->vcpu_dirty_rate = head;
284 }
285
286 if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) {
287 info->sample_pages = 0;
288 }
289 }
290
291 trace_query_dirty_rate_info(DirtyRateStatus_str(CalculatingState));
292
293 return info;
294 }
295
296 static void init_dirtyrate_stat(int64_t start_time,
297 struct DirtyRateConfig config)
298 {
299 DirtyStat.dirty_rate = -1;
300 DirtyStat.start_time = start_time;
301 DirtyStat.calc_time_ms = config.calc_time_ms;
302 DirtyStat.sample_pages = config.sample_pages_per_gigabytes;
303
304 switch (config.mode) {
305 case DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING:
306 DirtyStat.page_sampling.total_dirty_samples = 0;
307 DirtyStat.page_sampling.total_sample_count = 0;
308 DirtyStat.page_sampling.total_block_mem_MB = 0;
309 break;
310 case DIRTY_RATE_MEASURE_MODE_DIRTY_RING:
311 DirtyStat.dirty_ring.nvcpu = -1;
312 DirtyStat.dirty_ring.rates = NULL;
313 break;
314 default:
315 break;
316 }
317 }
318
319 static void cleanup_dirtyrate_stat(struct DirtyRateConfig config)
320 {
321 /* last calc-dirty-rate qmp use dirty ring mode */
322 if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) {
323 free(DirtyStat.dirty_ring.rates);
324 DirtyStat.dirty_ring.rates = NULL;
325 }
326 }
327
328 static void update_dirtyrate_stat(struct RamblockDirtyInfo *info)
329 {
330 DirtyStat.page_sampling.total_dirty_samples += info->sample_dirty_count;
331 DirtyStat.page_sampling.total_sample_count += info->sample_pages_count;
332 /* size of total pages in MB */
333 DirtyStat.page_sampling.total_block_mem_MB +=
334 qemu_target_pages_to_MiB(info->ramblock_pages);
335 }
336
337 static void update_dirtyrate(uint64_t msec)
338 {
339 uint64_t dirtyrate;
340 uint64_t total_dirty_samples = DirtyStat.page_sampling.total_dirty_samples;
341 uint64_t total_sample_count = DirtyStat.page_sampling.total_sample_count;
342 uint64_t total_block_mem_MB = DirtyStat.page_sampling.total_block_mem_MB;
343
344 dirtyrate = total_dirty_samples * total_block_mem_MB *
345 1000 / (total_sample_count * msec);
346
347 DirtyStat.dirty_rate = dirtyrate;
348 }
349
350 /*
351 * Compute hash of a single page of size TARGET_PAGE_SIZE.
352 */
353 static uint32_t compute_page_hash(void *ptr)
354 {
355 size_t page_size = qemu_target_page_size();
356 uint32_t i;
357 uint64_t v1, v2, v3, v4;
358 uint64_t res;
359 const uint64_t *p = ptr;
360
361 v1 = QEMU_XXHASH_SEED + XXH_PRIME64_1 + XXH_PRIME64_2;
362 v2 = QEMU_XXHASH_SEED + XXH_PRIME64_2;
363 v3 = QEMU_XXHASH_SEED + 0;
364 v4 = QEMU_XXHASH_SEED - XXH_PRIME64_1;
365 for (i = 0; i < page_size / 8; i += 4) {
366 v1 = XXH64_round(v1, p[i + 0]);
367 v2 = XXH64_round(v2, p[i + 1]);
368 v3 = XXH64_round(v3, p[i + 2]);
369 v4 = XXH64_round(v4, p[i + 3]);
370 }
371 res = XXH64_mergerounds(v1, v2, v3, v4);
372 res += page_size;
373 res = XXH64_avalanche(res);
374 return (uint32_t)(res & UINT32_MAX);
375 }
376
377
378 /*
379 * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
380 * in ramblock, which starts from ramblock base address.
381 */
382 static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
383 uint64_t vfn)
384 {
385 uint32_t hash;
386
387 hash = compute_page_hash(info->ramblock_addr +
388 vfn * qemu_target_page_size());
389
390 trace_get_ramblock_vfn_hash(info->idstr, vfn, hash);
391 return hash;
392 }
393
394 static bool save_ramblock_hash(struct RamblockDirtyInfo *info)
395 {
396 unsigned int sample_pages_count;
397 int i;
398 GRand *rand;
399
400 sample_pages_count = info->sample_pages_count;
401
402 /* ramblock size less than one page, return success to skip this ramblock */
403 if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
404 return true;
405 }
406
407 info->hash_result = g_try_malloc0_n(sample_pages_count,
408 sizeof(uint32_t));
409 if (!info->hash_result) {
410 return false;
411 }
412
413 info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
414 sizeof(uint64_t));
415 if (!info->sample_page_vfn) {
416 g_free(info->hash_result);
417 return false;
418 }
419
420 rand = g_rand_new();
421 for (i = 0; i < sample_pages_count; i++) {
422 info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
423 info->ramblock_pages - 1);
424 info->hash_result[i] = get_ramblock_vfn_hash(info,
425 info->sample_page_vfn[i]);
426 }
427 g_rand_free(rand);
428
429 return true;
430 }
431
432 static void get_ramblock_dirty_info(RAMBlock *block,
433 struct RamblockDirtyInfo *info,
434 struct DirtyRateConfig *config)
435 {
436 uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
437
438 /* Right shift 30 bits to calc ramblock size in GB */
439 info->sample_pages_count = (qemu_ram_get_used_length(block) *
440 sample_pages_per_gigabytes) >> 30;
441 /* Right shift TARGET_PAGE_BITS to calc page count */
442 info->ramblock_pages = qemu_ram_get_used_length(block) >>
443 qemu_target_page_bits();
444 info->ramblock_addr = qemu_ram_get_host_addr(block);
445 strcpy(info->idstr, qemu_ram_get_idstr(block));
446 }
447
448 static void free_ramblock_dirty_info(struct RamblockDirtyInfo *infos, int count)
449 {
450 int i;
451
452 if (!infos) {
453 return;
454 }
455
456 for (i = 0; i < count; i++) {
457 g_free(infos[i].sample_page_vfn);
458 g_free(infos[i].hash_result);
459 }
460 g_free(infos);
461 }
462
463 static bool skip_sample_ramblock(RAMBlock *block)
464 {
465 /*
466 * Sample only blocks larger than MIN_RAMBLOCK_SIZE.
467 */
468 if (qemu_ram_get_used_length(block) < (MIN_RAMBLOCK_SIZE << 10)) {
469 trace_skip_sample_ramblock(block->idstr,
470 qemu_ram_get_used_length(block));
471 return true;
472 }
473
474 return false;
475 }
476
477 static bool record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
478 struct DirtyRateConfig config,
479 int *block_count)
480 {
481 struct RamblockDirtyInfo *info = NULL;
482 struct RamblockDirtyInfo *dinfo = NULL;
483 RAMBlock *block = NULL;
484 int total_count = 0;
485 int index = 0;
486 bool ret = false;
487
488 RAMBLOCK_FOREACH_MIGRATABLE(block) {
489 if (skip_sample_ramblock(block)) {
490 continue;
491 }
492 total_count++;
493 }
494
495 dinfo = g_try_malloc0_n(total_count, sizeof(struct RamblockDirtyInfo));
496 if (dinfo == NULL) {
497 goto out;
498 }
499
500 RAMBLOCK_FOREACH_MIGRATABLE(block) {
501 if (skip_sample_ramblock(block)) {
502 continue;
503 }
504 if (index >= total_count) {
505 break;
506 }
507 info = &dinfo[index];
508 get_ramblock_dirty_info(block, info, &config);
509 if (!save_ramblock_hash(info)) {
510 goto out;
511 }
512 index++;
513 }
514 ret = true;
515
516 out:
517 *block_count = index;
518 *block_dinfo = dinfo;
519 return ret;
520 }
521
522 static void calc_page_dirty_rate(struct RamblockDirtyInfo *info)
523 {
524 uint32_t hash;
525 int i;
526
527 for (i = 0; i < info->sample_pages_count; i++) {
528 hash = get_ramblock_vfn_hash(info, info->sample_page_vfn[i]);
529 if (hash != info->hash_result[i]) {
530 trace_calc_page_dirty_rate(info->idstr, hash, info->hash_result[i]);
531 info->sample_dirty_count++;
532 }
533 }
534 }
535
536 static struct RamblockDirtyInfo *
537 find_block_matched(RAMBlock *block, int count,
538 struct RamblockDirtyInfo *infos)
539 {
540 int i;
541
542 for (i = 0; i < count; i++) {
543 if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) {
544 break;
545 }
546 }
547
548 if (i == count) {
549 return NULL;
550 }
551
552 if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) ||
553 infos[i].ramblock_pages !=
554 (qemu_ram_get_used_length(block) >> qemu_target_page_bits())) {
555 trace_find_page_matched(block->idstr);
556 return NULL;
557 }
558
559 return &infos[i];
560 }
561
562 static bool compare_page_hash_info(struct RamblockDirtyInfo *info,
563 int block_count)
564 {
565 struct RamblockDirtyInfo *block_dinfo = NULL;
566 RAMBlock *block = NULL;
567
568 RAMBLOCK_FOREACH_MIGRATABLE(block) {
569 if (skip_sample_ramblock(block)) {
570 continue;
571 }
572 block_dinfo = find_block_matched(block, block_count, info);
573 if (block_dinfo == NULL) {
574 continue;
575 }
576 calc_page_dirty_rate(block_dinfo);
577 update_dirtyrate_stat(block_dinfo);
578 }
579
580 if (DirtyStat.page_sampling.total_sample_count == 0) {
581 return false;
582 }
583
584 return true;
585 }
586
587 static inline void record_dirtypages_bitmap(DirtyPageRecord *dirty_pages,
588 bool start)
589 {
590 if (start) {
591 dirty_pages->start_pages = total_dirty_pages;
592 } else {
593 dirty_pages->end_pages = total_dirty_pages;
594 }
595 }
596
597 static inline void dirtyrate_manual_reset_protect(void)
598 {
599 RAMBlock *block = NULL;
600
601 WITH_RCU_READ_LOCK_GUARD() {
602 RAMBLOCK_FOREACH_MIGRATABLE(block) {
603 memory_region_clear_dirty_bitmap(block->mr, 0,
604 block->used_length);
605 }
606 }
607 }
608
609 static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config)
610 {
611 int64_t start_time;
612 DirtyPageRecord dirty_pages;
613
614 qemu_mutex_lock_iothread();
615 memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE);
616
617 /*
618 * 1'round of log sync may return all 1 bits with
619 * KVM_DIRTY_LOG_INITIALLY_SET enable
620 * skip it unconditionally and start dirty tracking
621 * from 2'round of log sync
622 */
623 memory_global_dirty_log_sync(false);
624
625 /*
626 * reset page protect manually and unconditionally.
627 * this make sure kvm dirty log be cleared if
628 * KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE cap is enabled.
629 */
630 dirtyrate_manual_reset_protect();
631 qemu_mutex_unlock_iothread();
632
633 record_dirtypages_bitmap(&dirty_pages, true);
634
635 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
636 DirtyStat.start_time = start_time / 1000;
637
638 DirtyStat.calc_time_ms = dirty_stat_wait(config.calc_time_ms, start_time);
639
640 /*
641 * do two things.
642 * 1. fetch dirty bitmap from kvm
643 * 2. stop dirty tracking
644 */
645 global_dirty_log_sync(GLOBAL_DIRTY_DIRTY_RATE, true);
646
647 record_dirtypages_bitmap(&dirty_pages, false);
648
649 DirtyStat.dirty_rate = do_calculate_dirtyrate(dirty_pages,
650 DirtyStat.calc_time_ms);
651 }
652
653 static void calculate_dirtyrate_dirty_ring(struct DirtyRateConfig config)
654 {
655 uint64_t dirtyrate = 0;
656 uint64_t dirtyrate_sum = 0;
657 int i = 0;
658
659 /* start log sync */
660 global_dirty_log_change(GLOBAL_DIRTY_DIRTY_RATE, true);
661
662 DirtyStat.start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000;
663
664 /* calculate vcpu dirtyrate */
665 DirtyStat.calc_time_ms = vcpu_calculate_dirtyrate(config.calc_time_ms,
666 &DirtyStat.dirty_ring,
667 GLOBAL_DIRTY_DIRTY_RATE,
668 true);
669
670 /* calculate vm dirtyrate */
671 for (i = 0; i < DirtyStat.dirty_ring.nvcpu; i++) {
672 dirtyrate = DirtyStat.dirty_ring.rates[i].dirty_rate;
673 DirtyStat.dirty_ring.rates[i].dirty_rate = dirtyrate;
674 dirtyrate_sum += dirtyrate;
675 }
676
677 DirtyStat.dirty_rate = dirtyrate_sum;
678 }
679
680 static void calculate_dirtyrate_sample_vm(struct DirtyRateConfig config)
681 {
682 struct RamblockDirtyInfo *block_dinfo = NULL;
683 int block_count = 0;
684 int64_t initial_time;
685
686 rcu_read_lock();
687 initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
688 if (!record_ramblock_hash_info(&block_dinfo, config, &block_count)) {
689 goto out;
690 }
691 rcu_read_unlock();
692
693 DirtyStat.calc_time_ms = dirty_stat_wait(config.calc_time_ms,
694 initial_time);
695 DirtyStat.start_time = initial_time / 1000;
696
697 rcu_read_lock();
698 if (!compare_page_hash_info(block_dinfo, block_count)) {
699 goto out;
700 }
701
702 update_dirtyrate(DirtyStat.calc_time_ms);
703
704 out:
705 rcu_read_unlock();
706 free_ramblock_dirty_info(block_dinfo, block_count);
707 }
708
709 static void calculate_dirtyrate(struct DirtyRateConfig config)
710 {
711 if (config.mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) {
712 calculate_dirtyrate_dirty_bitmap(config);
713 } else if (config.mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) {
714 calculate_dirtyrate_dirty_ring(config);
715 } else {
716 calculate_dirtyrate_sample_vm(config);
717 }
718
719 trace_dirtyrate_calculate(DirtyStat.dirty_rate);
720 }
721
722 void *get_dirtyrate_thread(void *arg)
723 {
724 struct DirtyRateConfig config = *(struct DirtyRateConfig *)arg;
725 int ret;
726 rcu_register_thread();
727
728 ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_UNSTARTED,
729 DIRTY_RATE_STATUS_MEASURING);
730 if (ret == -1) {
731 error_report("change dirtyrate state failed.");
732 return NULL;
733 }
734
735 calculate_dirtyrate(config);
736
737 ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_MEASURING,
738 DIRTY_RATE_STATUS_MEASURED);
739 if (ret == -1) {
740 error_report("change dirtyrate state failed.");
741 }
742
743 rcu_unregister_thread();
744 return NULL;
745 }
746
747 void qmp_calc_dirty_rate(int64_t calc_time,
748 bool has_calc_time_unit,
749 TimeUnit calc_time_unit,
750 bool has_sample_pages,
751 int64_t sample_pages,
752 bool has_mode,
753 DirtyRateMeasureMode mode,
754 Error **errp)
755 {
756 static struct DirtyRateConfig config;
757 QemuThread thread;
758 int ret;
759 int64_t start_time;
760
761 /*
762 * If the dirty rate is already being measured, don't attempt to start.
763 */
764 if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURING) {
765 error_setg(errp, "the dirty rate is already being measured.");
766 return;
767 }
768
769 int64_t calc_time_ms = convert_time_unit(
770 calc_time,
771 has_calc_time_unit ? calc_time_unit : TIME_UNIT_SECOND,
772 TIME_UNIT_MILLISECOND
773 );
774
775 if (!is_calc_time_valid(calc_time_ms)) {
776 error_setg(errp, "Calculation time is out of range [%dms, %dms].",
777 MIN_CALC_TIME_MS, MAX_CALC_TIME_MS);
778 return;
779 }
780
781 if (!has_mode) {
782 mode = DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING;
783 }
784
785 if (has_sample_pages && mode != DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING) {
786 error_setg(errp, "sample-pages is used only in page-sampling mode");
787 return;
788 }
789
790 if (has_sample_pages) {
791 if (!is_sample_pages_valid(sample_pages)) {
792 error_setg(errp, "sample-pages is out of range[%d, %d].",
793 MIN_SAMPLE_PAGE_COUNT,
794 MAX_SAMPLE_PAGE_COUNT);
795 return;
796 }
797 } else {
798 sample_pages = DIRTYRATE_DEFAULT_SAMPLE_PAGES;
799 }
800
801 /*
802 * dirty ring mode only works when kvm dirty ring is enabled.
803 * on the contrary, dirty bitmap mode is not.
804 */
805 if (((mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) &&
806 !kvm_dirty_ring_enabled()) ||
807 ((mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) &&
808 kvm_dirty_ring_enabled())) {
809 error_setg(errp, "mode %s is not enabled, use other method instead.",
810 DirtyRateMeasureMode_str(mode));
811 return;
812 }
813
814 /*
815 * Init calculation state as unstarted.
816 */
817 ret = dirtyrate_set_state(&CalculatingState, CalculatingState,
818 DIRTY_RATE_STATUS_UNSTARTED);
819 if (ret == -1) {
820 error_setg(errp, "init dirty rate calculation state failed.");
821 return;
822 }
823
824 config.calc_time_ms = calc_time_ms;
825 config.sample_pages_per_gigabytes = sample_pages;
826 config.mode = mode;
827
828 cleanup_dirtyrate_stat(config);
829
830 /*
831 * update dirty rate mode so that we can figure out what mode has
832 * been used in last calculation
833 **/
834 dirtyrate_mode = mode;
835
836 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000;
837 init_dirtyrate_stat(start_time, config);
838
839 qemu_thread_create(&thread, "get_dirtyrate", get_dirtyrate_thread,
840 (void *)&config, QEMU_THREAD_DETACHED);
841 }
842
843
844 struct DirtyRateInfo *qmp_query_dirty_rate(bool has_calc_time_unit,
845 TimeUnit calc_time_unit,
846 Error **errp)
847 {
848 return query_dirty_rate_info(
849 has_calc_time_unit ? calc_time_unit : TIME_UNIT_SECOND);
850 }
851
852 void hmp_info_dirty_rate(Monitor *mon, const QDict *qdict)
853 {
854 DirtyRateInfo *info = query_dirty_rate_info(TIME_UNIT_SECOND);
855
856 monitor_printf(mon, "Status: %s\n",
857 DirtyRateStatus_str(info->status));
858 monitor_printf(mon, "Start Time: %"PRIi64" (ms)\n",
859 info->start_time);
860 if (info->mode == DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING) {
861 monitor_printf(mon, "Sample Pages: %"PRIu64" (per GB)\n",
862 info->sample_pages);
863 }
864 monitor_printf(mon, "Period: %"PRIi64" (sec)\n",
865 info->calc_time);
866 monitor_printf(mon, "Mode: %s\n",
867 DirtyRateMeasureMode_str(info->mode));
868 monitor_printf(mon, "Dirty rate: ");
869 if (info->has_dirty_rate) {
870 monitor_printf(mon, "%"PRIi64" (MB/s)\n", info->dirty_rate);
871 if (info->has_vcpu_dirty_rate) {
872 DirtyRateVcpuList *rate, *head = info->vcpu_dirty_rate;
873 for (rate = head; rate != NULL; rate = rate->next) {
874 monitor_printf(mon, "vcpu[%"PRIi64"], Dirty rate: %"PRIi64
875 " (MB/s)\n", rate->value->id,
876 rate->value->dirty_rate);
877 }
878 }
879 } else {
880 monitor_printf(mon, "(not ready)\n");
881 }
882
883 qapi_free_DirtyRateVcpuList(info->vcpu_dirty_rate);
884 g_free(info);
885 }
886
887 void hmp_calc_dirty_rate(Monitor *mon, const QDict *qdict)
888 {
889 int64_t sec = qdict_get_try_int(qdict, "second", 0);
890 int64_t sample_pages = qdict_get_try_int(qdict, "sample_pages_per_GB", -1);
891 bool has_sample_pages = (sample_pages != -1);
892 bool dirty_ring = qdict_get_try_bool(qdict, "dirty_ring", false);
893 bool dirty_bitmap = qdict_get_try_bool(qdict, "dirty_bitmap", false);
894 DirtyRateMeasureMode mode = DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING;
895 Error *err = NULL;
896
897 if (!sec) {
898 monitor_printf(mon, "Incorrect period length specified!\n");
899 return;
900 }
901
902 if (dirty_ring && dirty_bitmap) {
903 monitor_printf(mon, "Either dirty ring or dirty bitmap "
904 "can be specified!\n");
905 return;
906 }
907
908 if (dirty_bitmap) {
909 mode = DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP;
910 } else if (dirty_ring) {
911 mode = DIRTY_RATE_MEASURE_MODE_DIRTY_RING;
912 }
913
914 qmp_calc_dirty_rate(sec, /* calc-time */
915 false, TIME_UNIT_SECOND, /* calc-time-unit */
916 has_sample_pages, sample_pages,
917 true, mode,
918 &err);
919 if (err) {
920 hmp_handle_error(mon, err);
921 return;
922 }
923
924 monitor_printf(mon, "Starting dirty rate measurement with period %"PRIi64
925 " seconds\n", sec);
926 monitor_printf(mon, "[Please use 'info dirty_rate' to check results]\n");
927 }