]> git.proxmox.com Git - mirror_qemu.git/blame - migration/dirtyrate.c
include/qemu/plugin: Remove QEMU_PLUGIN_ASSERT
[mirror_qemu.git] / migration / dirtyrate.c
CommitLineData
4240dcee
CZ
1/*
2 * Dirtyrate implement code
3 *
4 * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO.,LTD.
5 *
6 * Authors:
7 * Chuan Zheng <zhengchuan@huawei.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 */
12
13#include "qemu/osdep.h"
662770af 14#include <zlib.h>
4240dcee
CZ
15#include "qapi/error.h"
16#include "cpu.h"
4240dcee 17#include "exec/ramblock.h"
826b8bc8 18#include "exec/ram_addr.h"
4240dcee 19#include "qemu/rcu_queue.h"
0e21bf24 20#include "qemu/main-loop.h"
4240dcee 21#include "qapi/qapi-commands-migration.h"
3ded54b1 22#include "ram.h"
3c0b5dff 23#include "trace.h"
4240dcee 24#include "dirtyrate.h"
a4a571d9
PX
25#include "monitor/hmp.h"
26#include "monitor/monitor.h"
27#include "qapi/qmp/qdict.h"
0e21bf24
HH
28#include "sysemu/kvm.h"
29#include "sysemu/runstate.h"
30#include "exec/memory.h"
31
4998a37e
HH
32/*
33 * total_dirty_pages is procted by BQL and is used
34 * to stat dirty pages during the period of two
35 * memory_global_dirty_log_sync
36 */
37uint64_t total_dirty_pages;
38
0e21bf24
HH
39typedef struct DirtyPageRecord {
40 uint64_t start_pages;
41 uint64_t end_pages;
42} DirtyPageRecord;
4240dcee 43
7df3aa30 44static int CalculatingState = DIRTY_RATE_STATUS_UNSTARTED;
c9a58d71 45static struct DirtyRateStat DirtyStat;
0e21bf24
HH
46static DirtyRateMeasureMode dirtyrate_mode =
47 DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING;
7df3aa30 48
8244166d 49static int64_t dirty_stat_wait(int64_t msec, int64_t initial_time)
eca58224
CZ
50{
51 int64_t current_time;
52
53 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
54 if ((current_time - initial_time) >= msec) {
55 msec = current_time - initial_time;
56 } else {
57 g_usleep((msec + initial_time - current_time) * 1000);
58 }
59
60 return msec;
61}
62
8244166d
HH
63static inline void record_dirtypages(DirtyPageRecord *dirty_pages,
64 CPUState *cpu, bool start)
65{
66 if (start) {
67 dirty_pages[cpu->cpu_index].start_pages = cpu->dirty_pages;
68 } else {
69 dirty_pages[cpu->cpu_index].end_pages = cpu->dirty_pages;
70 }
71}
72
73static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages,
74 int64_t calc_time_ms)
75{
76 uint64_t memory_size_MB;
77 uint64_t increased_dirty_pages =
78 dirty_pages.end_pages - dirty_pages.start_pages;
79
80 memory_size_MB = (increased_dirty_pages * TARGET_PAGE_SIZE) >> 20;
81
82 return memory_size_MB * 1000 / calc_time_ms;
83}
84
85void global_dirty_log_change(unsigned int flag, bool start)
86{
87 qemu_mutex_lock_iothread();
88 if (start) {
89 memory_global_dirty_log_start(flag);
90 } else {
91 memory_global_dirty_log_stop(flag);
92 }
93 qemu_mutex_unlock_iothread();
94}
95
96/*
97 * global_dirty_log_sync
98 * 1. sync dirty log from kvm
99 * 2. stop dirty tracking if needed.
100 */
101static void global_dirty_log_sync(unsigned int flag, bool one_shot)
102{
103 qemu_mutex_lock_iothread();
104 memory_global_dirty_log_sync();
105 if (one_shot) {
106 memory_global_dirty_log_stop(flag);
107 }
108 qemu_mutex_unlock_iothread();
109}
110
111static DirtyPageRecord *vcpu_dirty_stat_alloc(VcpuStat *stat)
112{
113 CPUState *cpu;
8244166d
HH
114 int nvcpu = 0;
115
116 CPU_FOREACH(cpu) {
117 nvcpu++;
118 }
119
120 stat->nvcpu = nvcpu;
c5e8d518 121 stat->rates = g_new0(DirtyRateVcpu, nvcpu);
8244166d 122
66997c42 123 return g_new0(DirtyPageRecord, nvcpu);
8244166d
HH
124}
125
126static void vcpu_dirty_stat_collect(VcpuStat *stat,
127 DirtyPageRecord *records,
128 bool start)
129{
130 CPUState *cpu;
131
132 CPU_FOREACH(cpu) {
133 record_dirtypages(records, cpu, start);
134 }
135}
136
137int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms,
138 VcpuStat *stat,
139 unsigned int flag,
140 bool one_shot)
141{
142 DirtyPageRecord *records;
143 int64_t init_time_ms;
144 int64_t duration;
145 int64_t dirtyrate;
146 int i = 0;
147 unsigned int gen_id;
148
149retry:
150 init_time_ms = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
151
152 cpu_list_lock();
153 gen_id = cpu_list_generation_id_get();
154 records = vcpu_dirty_stat_alloc(stat);
155 vcpu_dirty_stat_collect(stat, records, true);
156 cpu_list_unlock();
157
158 duration = dirty_stat_wait(calc_time_ms, init_time_ms);
159
160 global_dirty_log_sync(flag, one_shot);
161
162 cpu_list_lock();
163 if (gen_id != cpu_list_generation_id_get()) {
164 g_free(records);
165 g_free(stat->rates);
166 cpu_list_unlock();
167 goto retry;
168 }
169 vcpu_dirty_stat_collect(stat, records, false);
170 cpu_list_unlock();
171
172 for (i = 0; i < stat->nvcpu; i++) {
173 dirtyrate = do_calculate_dirtyrate(records[i], duration);
174
175 stat->rates[i].id = i;
176 stat->rates[i].dirty_rate = dirtyrate;
177
178 trace_dirtyrate_do_calculate_vcpu(i, dirtyrate);
179 }
180
181 g_free(records);
182
183 return duration;
184}
185
eca58224
CZ
186static bool is_sample_period_valid(int64_t sec)
187{
188 if (sec < MIN_FETCH_DIRTYRATE_TIME_SEC ||
189 sec > MAX_FETCH_DIRTYRATE_TIME_SEC) {
190 return false;
191 }
192
193 return true;
194}
195
7afa08cd
HH
196static bool is_sample_pages_valid(int64_t pages)
197{
198 return pages >= MIN_SAMPLE_PAGE_COUNT &&
199 pages <= MAX_SAMPLE_PAGE_COUNT;
200}
201
7df3aa30
CZ
202static int dirtyrate_set_state(int *state, int old_state, int new_state)
203{
204 assert(new_state < DIRTY_RATE_STATUS__MAX);
3c0b5dff 205 trace_dirtyrate_set_state(DirtyRateStatus_str(new_state));
7df3aa30
CZ
206 if (qatomic_cmpxchg(state, old_state, new_state) == old_state) {
207 return 0;
208 } else {
209 return -1;
210 }
211}
212
4c437254
CZ
213static struct DirtyRateInfo *query_dirty_rate_info(void)
214{
0e21bf24 215 int i;
4c437254 216 int64_t dirty_rate = DirtyStat.dirty_rate;
b21e2380 217 struct DirtyRateInfo *info = g_new0(DirtyRateInfo, 1);
0e21bf24 218 DirtyRateVcpuList *head = NULL, **tail = &head;
4c437254
CZ
219
220 info->status = CalculatingState;
221 info->start_time = DirtyStat.start_time;
222 info->calc_time = DirtyStat.calc_time;
7afa08cd 223 info->sample_pages = DirtyStat.sample_pages;
0e21bf24
HH
224 info->mode = dirtyrate_mode;
225
226 if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURED) {
227 info->has_dirty_rate = true;
228 info->dirty_rate = dirty_rate;
229
230 if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) {
231 /*
232 * set sample_pages with 0 to indicate page sampling
233 * isn't enabled
234 **/
235 info->sample_pages = 0;
236 info->has_vcpu_dirty_rate = true;
237 for (i = 0; i < DirtyStat.dirty_ring.nvcpu; i++) {
b21e2380 238 DirtyRateVcpu *rate = g_new0(DirtyRateVcpu, 1);
0e21bf24
HH
239 rate->id = DirtyStat.dirty_ring.rates[i].id;
240 rate->dirty_rate = DirtyStat.dirty_ring.rates[i].dirty_rate;
241 QAPI_LIST_APPEND(tail, rate);
242 }
243 info->vcpu_dirty_rate = head;
244 }
826b8bc8
HH
245
246 if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) {
247 info->sample_pages = 0;
248 }
0e21bf24 249 }
4c437254 250
3c0b5dff
CZ
251 trace_query_dirty_rate_info(DirtyRateStatus_str(CalculatingState));
252
4c437254
CZ
253 return info;
254}
255
71864ead
HH
256static void init_dirtyrate_stat(int64_t start_time,
257 struct DirtyRateConfig config)
c9a58d71 258{
c9a58d71 259 DirtyStat.dirty_rate = -1;
aa84b506 260 DirtyStat.start_time = start_time;
71864ead
HH
261 DirtyStat.calc_time = config.sample_period_seconds;
262 DirtyStat.sample_pages = config.sample_pages_per_gigabytes;
263
264 switch (config.mode) {
265 case DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING:
266 DirtyStat.page_sampling.total_dirty_samples = 0;
267 DirtyStat.page_sampling.total_sample_count = 0;
268 DirtyStat.page_sampling.total_block_mem_MB = 0;
269 break;
270 case DIRTY_RATE_MEASURE_MODE_DIRTY_RING:
271 DirtyStat.dirty_ring.nvcpu = -1;
272 DirtyStat.dirty_ring.rates = NULL;
273 break;
274 default:
275 break;
276 }
c9a58d71
CZ
277}
278
0e21bf24
HH
279static void cleanup_dirtyrate_stat(struct DirtyRateConfig config)
280{
281 /* last calc-dirty-rate qmp use dirty ring mode */
282 if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) {
283 free(DirtyStat.dirty_ring.rates);
284 DirtyStat.dirty_ring.rates = NULL;
285 }
286}
287
c9a58d71
CZ
288static void update_dirtyrate_stat(struct RamblockDirtyInfo *info)
289{
71864ead
HH
290 DirtyStat.page_sampling.total_dirty_samples += info->sample_dirty_count;
291 DirtyStat.page_sampling.total_sample_count += info->sample_pages_count;
c9a58d71 292 /* size of total pages in MB */
71864ead
HH
293 DirtyStat.page_sampling.total_block_mem_MB += (info->ramblock_pages *
294 TARGET_PAGE_SIZE) >> 20;
c9a58d71
CZ
295}
296
297static void update_dirtyrate(uint64_t msec)
298{
299 uint64_t dirtyrate;
71864ead
HH
300 uint64_t total_dirty_samples = DirtyStat.page_sampling.total_dirty_samples;
301 uint64_t total_sample_count = DirtyStat.page_sampling.total_sample_count;
302 uint64_t total_block_mem_MB = DirtyStat.page_sampling.total_block_mem_MB;
c9a58d71
CZ
303
304 dirtyrate = total_dirty_samples * total_block_mem_MB *
305 1000 / (total_sample_count * msec);
306
307 DirtyStat.dirty_rate = dirtyrate;
308}
7df3aa30 309
ba0e519f
CZ
310/*
311 * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
312 * in ramblock, which starts from ramblock base address.
313 */
314static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
315 uint64_t vfn)
316{
317 uint32_t crc;
318
319 crc = crc32(0, (info->ramblock_addr +
320 vfn * TARGET_PAGE_SIZE), TARGET_PAGE_SIZE);
321
3c0b5dff 322 trace_get_ramblock_vfn_hash(info->idstr, vfn, crc);
ba0e519f
CZ
323 return crc;
324}
325
326static bool save_ramblock_hash(struct RamblockDirtyInfo *info)
327{
328 unsigned int sample_pages_count;
329 int i;
330 GRand *rand;
331
332 sample_pages_count = info->sample_pages_count;
333
334 /* ramblock size less than one page, return success to skip this ramblock */
335 if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
336 return true;
337 }
338
339 info->hash_result = g_try_malloc0_n(sample_pages_count,
340 sizeof(uint32_t));
341 if (!info->hash_result) {
342 return false;
343 }
344
345 info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
346 sizeof(uint64_t));
347 if (!info->sample_page_vfn) {
348 g_free(info->hash_result);
349 return false;
350 }
351
352 rand = g_rand_new();
353 for (i = 0; i < sample_pages_count; i++) {
354 info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
355 info->ramblock_pages - 1);
356 info->hash_result[i] = get_ramblock_vfn_hash(info,
357 info->sample_page_vfn[i]);
358 }
359 g_rand_free(rand);
360
361 return true;
362}
363
364static void get_ramblock_dirty_info(RAMBlock *block,
365 struct RamblockDirtyInfo *info,
366 struct DirtyRateConfig *config)
367{
368 uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
369
370 /* Right shift 30 bits to calc ramblock size in GB */
371 info->sample_pages_count = (qemu_ram_get_used_length(block) *
372 sample_pages_per_gigabytes) >> 30;
373 /* Right shift TARGET_PAGE_BITS to calc page count */
374 info->ramblock_pages = qemu_ram_get_used_length(block) >>
375 TARGET_PAGE_BITS;
376 info->ramblock_addr = qemu_ram_get_host_addr(block);
377 strcpy(info->idstr, qemu_ram_get_idstr(block));
378}
379
cf0bbb49
CZ
380static void free_ramblock_dirty_info(struct RamblockDirtyInfo *infos, int count)
381{
382 int i;
383
384 if (!infos) {
385 return;
386 }
387
388 for (i = 0; i < count; i++) {
389 g_free(infos[i].sample_page_vfn);
390 g_free(infos[i].hash_result);
391 }
392 g_free(infos);
393}
394
f82583cd
CZ
395static bool skip_sample_ramblock(RAMBlock *block)
396{
397 /*
398 * Sample only blocks larger than MIN_RAMBLOCK_SIZE.
399 */
400 if (qemu_ram_get_used_length(block) < (MIN_RAMBLOCK_SIZE << 10)) {
3c0b5dff
CZ
401 trace_skip_sample_ramblock(block->idstr,
402 qemu_ram_get_used_length(block));
f82583cd
CZ
403 return true;
404 }
405
406 return false;
407}
408
ba0e519f
CZ
409static bool record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
410 struct DirtyRateConfig config,
411 int *block_count)
412{
413 struct RamblockDirtyInfo *info = NULL;
414 struct RamblockDirtyInfo *dinfo = NULL;
415 RAMBlock *block = NULL;
416 int total_count = 0;
417 int index = 0;
418 bool ret = false;
419
420 RAMBLOCK_FOREACH_MIGRATABLE(block) {
f82583cd
CZ
421 if (skip_sample_ramblock(block)) {
422 continue;
423 }
ba0e519f
CZ
424 total_count++;
425 }
426
427 dinfo = g_try_malloc0_n(total_count, sizeof(struct RamblockDirtyInfo));
428 if (dinfo == NULL) {
429 goto out;
430 }
431
432 RAMBLOCK_FOREACH_MIGRATABLE(block) {
f82583cd
CZ
433 if (skip_sample_ramblock(block)) {
434 continue;
435 }
ba0e519f
CZ
436 if (index >= total_count) {
437 break;
438 }
439 info = &dinfo[index];
440 get_ramblock_dirty_info(block, info, &config);
441 if (!save_ramblock_hash(info)) {
442 goto out;
443 }
444 index++;
445 }
446 ret = true;
447
448out:
449 *block_count = index;
450 *block_dinfo = dinfo;
451 return ret;
452}
453
9c04387b
CZ
454static void calc_page_dirty_rate(struct RamblockDirtyInfo *info)
455{
456 uint32_t crc;
457 int i;
458
459 for (i = 0; i < info->sample_pages_count; i++) {
460 crc = get_ramblock_vfn_hash(info, info->sample_page_vfn[i]);
461 if (crc != info->hash_result[i]) {
3c0b5dff 462 trace_calc_page_dirty_rate(info->idstr, crc, info->hash_result[i]);
9c04387b
CZ
463 info->sample_dirty_count++;
464 }
465 }
466}
467
468static struct RamblockDirtyInfo *
469find_block_matched(RAMBlock *block, int count,
470 struct RamblockDirtyInfo *infos)
471{
472 int i;
9c04387b
CZ
473
474 for (i = 0; i < count; i++) {
475 if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) {
476 break;
477 }
478 }
479
480 if (i == count) {
481 return NULL;
482 }
483
484 if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) ||
485 infos[i].ramblock_pages !=
486 (qemu_ram_get_used_length(block) >> TARGET_PAGE_BITS)) {
3c0b5dff 487 trace_find_page_matched(block->idstr);
9c04387b
CZ
488 return NULL;
489 }
490
66997c42 491 return &infos[i];
9c04387b
CZ
492}
493
494static bool compare_page_hash_info(struct RamblockDirtyInfo *info,
495 int block_count)
496{
497 struct RamblockDirtyInfo *block_dinfo = NULL;
498 RAMBlock *block = NULL;
499
500 RAMBLOCK_FOREACH_MIGRATABLE(block) {
f82583cd
CZ
501 if (skip_sample_ramblock(block)) {
502 continue;
503 }
9c04387b
CZ
504 block_dinfo = find_block_matched(block, block_count, info);
505 if (block_dinfo == NULL) {
506 continue;
507 }
508 calc_page_dirty_rate(block_dinfo);
509 update_dirtyrate_stat(block_dinfo);
510 }
511
71864ead 512 if (DirtyStat.page_sampling.total_sample_count == 0) {
9c04387b
CZ
513 return false;
514 }
515
516 return true;
517}
518
826b8bc8
HH
519static inline void record_dirtypages_bitmap(DirtyPageRecord *dirty_pages,
520 bool start)
521{
522 if (start) {
523 dirty_pages->start_pages = total_dirty_pages;
524 } else {
525 dirty_pages->end_pages = total_dirty_pages;
526 }
527}
528
826b8bc8
HH
529static inline void dirtyrate_manual_reset_protect(void)
530{
531 RAMBlock *block = NULL;
532
533 WITH_RCU_READ_LOCK_GUARD() {
534 RAMBLOCK_FOREACH_MIGRATABLE(block) {
535 memory_region_clear_dirty_bitmap(block->mr, 0,
536 block->used_length);
537 }
538 }
539}
540
541static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config)
542{
543 int64_t msec = 0;
544 int64_t start_time;
545 DirtyPageRecord dirty_pages;
546
547 qemu_mutex_lock_iothread();
548 memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE);
549
550 /*
551 * 1'round of log sync may return all 1 bits with
552 * KVM_DIRTY_LOG_INITIALLY_SET enable
553 * skip it unconditionally and start dirty tracking
554 * from 2'round of log sync
555 */
556 memory_global_dirty_log_sync();
557
558 /*
559 * reset page protect manually and unconditionally.
560 * this make sure kvm dirty log be cleared if
561 * KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE cap is enabled.
562 */
563 dirtyrate_manual_reset_protect();
564 qemu_mutex_unlock_iothread();
565
566 record_dirtypages_bitmap(&dirty_pages, true);
567
568 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
569 DirtyStat.start_time = start_time / 1000;
570
571 msec = config.sample_period_seconds * 1000;
8244166d 572 msec = dirty_stat_wait(msec, start_time);
826b8bc8
HH
573 DirtyStat.calc_time = msec / 1000;
574
575 /*
8244166d 576 * do two things.
826b8bc8
HH
577 * 1. fetch dirty bitmap from kvm
578 * 2. stop dirty tracking
579 */
8244166d 580 global_dirty_log_sync(GLOBAL_DIRTY_DIRTY_RATE, true);
826b8bc8
HH
581
582 record_dirtypages_bitmap(&dirty_pages, false);
583
8244166d 584 DirtyStat.dirty_rate = do_calculate_dirtyrate(dirty_pages, msec);
826b8bc8
HH
585}
586
0e21bf24
HH
587static void calculate_dirtyrate_dirty_ring(struct DirtyRateConfig config)
588{
8244166d 589 int64_t duration;
0e21bf24
HH
590 uint64_t dirtyrate = 0;
591 uint64_t dirtyrate_sum = 0;
0e21bf24
HH
592 int i = 0;
593
8244166d
HH
594 /* start log sync */
595 global_dirty_log_change(GLOBAL_DIRTY_DIRTY_RATE, true);
0e21bf24 596
8244166d 597 DirtyStat.start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000;
0e21bf24 598
8244166d
HH
599 /* calculate vcpu dirtyrate */
600 duration = vcpu_calculate_dirtyrate(config.sample_period_seconds * 1000,
601 &DirtyStat.dirty_ring,
602 GLOBAL_DIRTY_DIRTY_RATE,
603 true);
0e21bf24 604
8244166d 605 DirtyStat.calc_time = duration / 1000;
0e21bf24 606
8244166d 607 /* calculate vm dirtyrate */
0e21bf24 608 for (i = 0; i < DirtyStat.dirty_ring.nvcpu; i++) {
8244166d 609 dirtyrate = DirtyStat.dirty_ring.rates[i].dirty_rate;
0e21bf24
HH
610 DirtyStat.dirty_ring.rates[i].dirty_rate = dirtyrate;
611 dirtyrate_sum += dirtyrate;
612 }
613
614 DirtyStat.dirty_rate = dirtyrate_sum;
0e21bf24
HH
615}
616
617static void calculate_dirtyrate_sample_vm(struct DirtyRateConfig config)
4240dcee 618{
cf0bbb49
CZ
619 struct RamblockDirtyInfo *block_dinfo = NULL;
620 int block_count = 0;
621 int64_t msec = 0;
622 int64_t initial_time;
623
cf0bbb49
CZ
624 rcu_read_lock();
625 initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
626 if (!record_ramblock_hash_info(&block_dinfo, config, &block_count)) {
627 goto out;
628 }
629 rcu_read_unlock();
630
631 msec = config.sample_period_seconds * 1000;
8244166d 632 msec = dirty_stat_wait(msec, initial_time);
4c437254
CZ
633 DirtyStat.start_time = initial_time / 1000;
634 DirtyStat.calc_time = msec / 1000;
cf0bbb49
CZ
635
636 rcu_read_lock();
637 if (!compare_page_hash_info(block_dinfo, block_count)) {
638 goto out;
639 }
640
641 update_dirtyrate(msec);
642
643out:
644 rcu_read_unlock();
645 free_ramblock_dirty_info(block_dinfo, block_count);
4240dcee
CZ
646}
647
0e21bf24
HH
648static void calculate_dirtyrate(struct DirtyRateConfig config)
649{
826b8bc8
HH
650 if (config.mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) {
651 calculate_dirtyrate_dirty_bitmap(config);
652 } else if (config.mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) {
0e21bf24
HH
653 calculate_dirtyrate_dirty_ring(config);
654 } else {
655 calculate_dirtyrate_sample_vm(config);
656 }
657
658 trace_dirtyrate_calculate(DirtyStat.dirty_rate);
659}
660
4240dcee
CZ
661void *get_dirtyrate_thread(void *arg)
662{
663 struct DirtyRateConfig config = *(struct DirtyRateConfig *)arg;
7df3aa30 664 int ret;
15eb2d64 665 rcu_register_thread();
7df3aa30
CZ
666
667 ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_UNSTARTED,
668 DIRTY_RATE_STATUS_MEASURING);
669 if (ret == -1) {
670 error_report("change dirtyrate state failed.");
671 return NULL;
672 }
4240dcee
CZ
673
674 calculate_dirtyrate(config);
675
7df3aa30
CZ
676 ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_MEASURING,
677 DIRTY_RATE_STATUS_MEASURED);
678 if (ret == -1) {
679 error_report("change dirtyrate state failed.");
680 }
15eb2d64
HH
681
682 rcu_unregister_thread();
4240dcee
CZ
683 return NULL;
684}
4c437254 685
0e21bf24
HH
686void qmp_calc_dirty_rate(int64_t calc_time,
687 bool has_sample_pages,
688 int64_t sample_pages,
689 bool has_mode,
690 DirtyRateMeasureMode mode,
691 Error **errp)
4c437254
CZ
692{
693 static struct DirtyRateConfig config;
694 QemuThread thread;
695 int ret;
9865d0f6 696 int64_t start_time;
4c437254
CZ
697
698 /*
699 * If the dirty rate is already being measured, don't attempt to start.
700 */
701 if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURING) {
702 error_setg(errp, "the dirty rate is already being measured.");
703 return;
704 }
705
706 if (!is_sample_period_valid(calc_time)) {
707 error_setg(errp, "calc-time is out of range[%d, %d].",
708 MIN_FETCH_DIRTYRATE_TIME_SEC,
709 MAX_FETCH_DIRTYRATE_TIME_SEC);
710 return;
711 }
712
0e21bf24
HH
713 if (!has_mode) {
714 mode = DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING;
715 }
716
bd9510d3
ZD
717 if (has_sample_pages && mode != DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING) {
718 error_setg(errp, "sample-pages is used only in page-sampling mode");
0e21bf24
HH
719 return;
720 }
721
7afa08cd
HH
722 if (has_sample_pages) {
723 if (!is_sample_pages_valid(sample_pages)) {
724 error_setg(errp, "sample-pages is out of range[%d, %d].",
725 MIN_SAMPLE_PAGE_COUNT,
726 MAX_SAMPLE_PAGE_COUNT);
727 return;
728 }
729 } else {
730 sample_pages = DIRTYRATE_DEFAULT_SAMPLE_PAGES;
731 }
732
0e21bf24
HH
733 /*
734 * dirty ring mode only works when kvm dirty ring is enabled.
826b8bc8 735 * on the contrary, dirty bitmap mode is not.
0e21bf24 736 */
826b8bc8
HH
737 if (((mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) &&
738 !kvm_dirty_ring_enabled()) ||
739 ((mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) &&
740 kvm_dirty_ring_enabled())) {
741 error_setg(errp, "mode %s is not enabled, use other method instead.",
742 DirtyRateMeasureMode_str(mode));
743 return;
0e21bf24
HH
744 }
745
4c437254
CZ
746 /*
747 * Init calculation state as unstarted.
748 */
749 ret = dirtyrate_set_state(&CalculatingState, CalculatingState,
750 DIRTY_RATE_STATUS_UNSTARTED);
751 if (ret == -1) {
752 error_setg(errp, "init dirty rate calculation state failed.");
753 return;
754 }
755
756 config.sample_period_seconds = calc_time;
7afa08cd 757 config.sample_pages_per_gigabytes = sample_pages;
0e21bf24
HH
758 config.mode = mode;
759
760 cleanup_dirtyrate_stat(config);
761
762 /*
763 * update dirty rate mode so that we can figure out what mode has
764 * been used in last calculation
765 **/
766 dirtyrate_mode = mode;
9865d0f6
HH
767
768 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000;
769 init_dirtyrate_stat(start_time, config);
770
4c437254
CZ
771 qemu_thread_create(&thread, "get_dirtyrate", get_dirtyrate_thread,
772 (void *)&config, QEMU_THREAD_DETACHED);
773}
774
775struct DirtyRateInfo *qmp_query_dirty_rate(Error **errp)
776{
777 return query_dirty_rate_info();
778}
a4a571d9
PX
779
780void hmp_info_dirty_rate(Monitor *mon, const QDict *qdict)
781{
782 DirtyRateInfo *info = query_dirty_rate_info();
783
784 monitor_printf(mon, "Status: %s\n",
785 DirtyRateStatus_str(info->status));
786 monitor_printf(mon, "Start Time: %"PRIi64" (ms)\n",
787 info->start_time);
bd9510d3
ZD
788 if (info->mode == DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING) {
789 monitor_printf(mon, "Sample Pages: %"PRIu64" (per GB)\n",
790 info->sample_pages);
791 }
a4a571d9
PX
792 monitor_printf(mon, "Period: %"PRIi64" (sec)\n",
793 info->calc_time);
0e21bf24
HH
794 monitor_printf(mon, "Mode: %s\n",
795 DirtyRateMeasureMode_str(info->mode));
a4a571d9
PX
796 monitor_printf(mon, "Dirty rate: ");
797 if (info->has_dirty_rate) {
798 monitor_printf(mon, "%"PRIi64" (MB/s)\n", info->dirty_rate);
0e21bf24
HH
799 if (info->has_vcpu_dirty_rate) {
800 DirtyRateVcpuList *rate, *head = info->vcpu_dirty_rate;
801 for (rate = head; rate != NULL; rate = rate->next) {
802 monitor_printf(mon, "vcpu[%"PRIi64"], Dirty rate: %"PRIi64
803 " (MB/s)\n", rate->value->id,
804 rate->value->dirty_rate);
805 }
806 }
a4a571d9
PX
807 } else {
808 monitor_printf(mon, "(not ready)\n");
809 }
0e21bf24
HH
810
811 qapi_free_DirtyRateVcpuList(info->vcpu_dirty_rate);
a4a571d9
PX
812 g_free(info);
813}
814
815void hmp_calc_dirty_rate(Monitor *mon, const QDict *qdict)
816{
817 int64_t sec = qdict_get_try_int(qdict, "second", 0);
818 int64_t sample_pages = qdict_get_try_int(qdict, "sample_pages_per_GB", -1);
819 bool has_sample_pages = (sample_pages != -1);
0e21bf24 820 bool dirty_ring = qdict_get_try_bool(qdict, "dirty_ring", false);
826b8bc8
HH
821 bool dirty_bitmap = qdict_get_try_bool(qdict, "dirty_bitmap", false);
822 DirtyRateMeasureMode mode = DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING;
a4a571d9
PX
823 Error *err = NULL;
824
825 if (!sec) {
826 monitor_printf(mon, "Incorrect period length specified!\n");
827 return;
828 }
829
826b8bc8
HH
830 if (dirty_ring && dirty_bitmap) {
831 monitor_printf(mon, "Either dirty ring or dirty bitmap "
832 "can be specified!\n");
833 return;
834 }
835
836 if (dirty_bitmap) {
837 mode = DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP;
838 } else if (dirty_ring) {
839 mode = DIRTY_RATE_MEASURE_MODE_DIRTY_RING;
840 }
841
0e21bf24
HH
842 qmp_calc_dirty_rate(sec, has_sample_pages, sample_pages, true,
843 mode, &err);
a4a571d9
PX
844 if (err) {
845 hmp_handle_error(mon, err);
846 return;
847 }
848
849 monitor_printf(mon, "Starting dirty rate measurement with period %"PRIi64
850 " seconds\n", sec);
851 monitor_printf(mon, "[Please use 'info dirty_rate' to check results]\n");
852}