]> git.proxmox.com Git - mirror_qemu.git/blob - migration/dirtyrate.c
migration: only check page size match if RAM postcopy is enabled
[mirror_qemu.git] / migration / dirtyrate.c
1 /*
2 * Dirtyrate implement code
3 *
4 * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO.,LTD.
5 *
6 * Authors:
7 * Chuan Zheng <zhengchuan@huawei.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 */
12
13 #include "qemu/osdep.h"
14 #include <zlib.h>
15 #include "qapi/error.h"
16 #include "cpu.h"
17 #include "exec/ramblock.h"
18 #include "qemu/rcu_queue.h"
19 #include "qapi/qapi-commands-migration.h"
20 #include "ram.h"
21 #include "trace.h"
22 #include "dirtyrate.h"
23
24 static int CalculatingState = DIRTY_RATE_STATUS_UNSTARTED;
25 static struct DirtyRateStat DirtyStat;
26
27 static int64_t set_sample_page_period(int64_t msec, int64_t initial_time)
28 {
29 int64_t current_time;
30
31 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
32 if ((current_time - initial_time) >= msec) {
33 msec = current_time - initial_time;
34 } else {
35 g_usleep((msec + initial_time - current_time) * 1000);
36 }
37
38 return msec;
39 }
40
41 static bool is_sample_period_valid(int64_t sec)
42 {
43 if (sec < MIN_FETCH_DIRTYRATE_TIME_SEC ||
44 sec > MAX_FETCH_DIRTYRATE_TIME_SEC) {
45 return false;
46 }
47
48 return true;
49 }
50
51 static int dirtyrate_set_state(int *state, int old_state, int new_state)
52 {
53 assert(new_state < DIRTY_RATE_STATUS__MAX);
54 trace_dirtyrate_set_state(DirtyRateStatus_str(new_state));
55 if (qatomic_cmpxchg(state, old_state, new_state) == old_state) {
56 return 0;
57 } else {
58 return -1;
59 }
60 }
61
62 static struct DirtyRateInfo *query_dirty_rate_info(void)
63 {
64 int64_t dirty_rate = DirtyStat.dirty_rate;
65 struct DirtyRateInfo *info = g_malloc0(sizeof(DirtyRateInfo));
66
67 if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURED) {
68 info->has_dirty_rate = true;
69 info->dirty_rate = dirty_rate;
70 }
71
72 info->status = CalculatingState;
73 info->start_time = DirtyStat.start_time;
74 info->calc_time = DirtyStat.calc_time;
75
76 trace_query_dirty_rate_info(DirtyRateStatus_str(CalculatingState));
77
78 return info;
79 }
80
81 static void init_dirtyrate_stat(int64_t start_time, int64_t calc_time)
82 {
83 DirtyStat.total_dirty_samples = 0;
84 DirtyStat.total_sample_count = 0;
85 DirtyStat.total_block_mem_MB = 0;
86 DirtyStat.dirty_rate = -1;
87 DirtyStat.start_time = start_time;
88 DirtyStat.calc_time = calc_time;
89 }
90
91 static void update_dirtyrate_stat(struct RamblockDirtyInfo *info)
92 {
93 DirtyStat.total_dirty_samples += info->sample_dirty_count;
94 DirtyStat.total_sample_count += info->sample_pages_count;
95 /* size of total pages in MB */
96 DirtyStat.total_block_mem_MB += (info->ramblock_pages *
97 TARGET_PAGE_SIZE) >> 20;
98 }
99
100 static void update_dirtyrate(uint64_t msec)
101 {
102 uint64_t dirtyrate;
103 uint64_t total_dirty_samples = DirtyStat.total_dirty_samples;
104 uint64_t total_sample_count = DirtyStat.total_sample_count;
105 uint64_t total_block_mem_MB = DirtyStat.total_block_mem_MB;
106
107 dirtyrate = total_dirty_samples * total_block_mem_MB *
108 1000 / (total_sample_count * msec);
109
110 DirtyStat.dirty_rate = dirtyrate;
111 }
112
113 /*
114 * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
115 * in ramblock, which starts from ramblock base address.
116 */
117 static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
118 uint64_t vfn)
119 {
120 uint32_t crc;
121
122 crc = crc32(0, (info->ramblock_addr +
123 vfn * TARGET_PAGE_SIZE), TARGET_PAGE_SIZE);
124
125 trace_get_ramblock_vfn_hash(info->idstr, vfn, crc);
126 return crc;
127 }
128
129 static bool save_ramblock_hash(struct RamblockDirtyInfo *info)
130 {
131 unsigned int sample_pages_count;
132 int i;
133 GRand *rand;
134
135 sample_pages_count = info->sample_pages_count;
136
137 /* ramblock size less than one page, return success to skip this ramblock */
138 if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
139 return true;
140 }
141
142 info->hash_result = g_try_malloc0_n(sample_pages_count,
143 sizeof(uint32_t));
144 if (!info->hash_result) {
145 return false;
146 }
147
148 info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
149 sizeof(uint64_t));
150 if (!info->sample_page_vfn) {
151 g_free(info->hash_result);
152 return false;
153 }
154
155 rand = g_rand_new();
156 for (i = 0; i < sample_pages_count; i++) {
157 info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
158 info->ramblock_pages - 1);
159 info->hash_result[i] = get_ramblock_vfn_hash(info,
160 info->sample_page_vfn[i]);
161 }
162 g_rand_free(rand);
163
164 return true;
165 }
166
167 static void get_ramblock_dirty_info(RAMBlock *block,
168 struct RamblockDirtyInfo *info,
169 struct DirtyRateConfig *config)
170 {
171 uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
172
173 /* Right shift 30 bits to calc ramblock size in GB */
174 info->sample_pages_count = (qemu_ram_get_used_length(block) *
175 sample_pages_per_gigabytes) >> 30;
176 /* Right shift TARGET_PAGE_BITS to calc page count */
177 info->ramblock_pages = qemu_ram_get_used_length(block) >>
178 TARGET_PAGE_BITS;
179 info->ramblock_addr = qemu_ram_get_host_addr(block);
180 strcpy(info->idstr, qemu_ram_get_idstr(block));
181 }
182
183 static void free_ramblock_dirty_info(struct RamblockDirtyInfo *infos, int count)
184 {
185 int i;
186
187 if (!infos) {
188 return;
189 }
190
191 for (i = 0; i < count; i++) {
192 g_free(infos[i].sample_page_vfn);
193 g_free(infos[i].hash_result);
194 }
195 g_free(infos);
196 }
197
198 static bool skip_sample_ramblock(RAMBlock *block)
199 {
200 /*
201 * Sample only blocks larger than MIN_RAMBLOCK_SIZE.
202 */
203 if (qemu_ram_get_used_length(block) < (MIN_RAMBLOCK_SIZE << 10)) {
204 trace_skip_sample_ramblock(block->idstr,
205 qemu_ram_get_used_length(block));
206 return true;
207 }
208
209 return false;
210 }
211
212 static bool record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
213 struct DirtyRateConfig config,
214 int *block_count)
215 {
216 struct RamblockDirtyInfo *info = NULL;
217 struct RamblockDirtyInfo *dinfo = NULL;
218 RAMBlock *block = NULL;
219 int total_count = 0;
220 int index = 0;
221 bool ret = false;
222
223 RAMBLOCK_FOREACH_MIGRATABLE(block) {
224 if (skip_sample_ramblock(block)) {
225 continue;
226 }
227 total_count++;
228 }
229
230 dinfo = g_try_malloc0_n(total_count, sizeof(struct RamblockDirtyInfo));
231 if (dinfo == NULL) {
232 goto out;
233 }
234
235 RAMBLOCK_FOREACH_MIGRATABLE(block) {
236 if (skip_sample_ramblock(block)) {
237 continue;
238 }
239 if (index >= total_count) {
240 break;
241 }
242 info = &dinfo[index];
243 get_ramblock_dirty_info(block, info, &config);
244 if (!save_ramblock_hash(info)) {
245 goto out;
246 }
247 index++;
248 }
249 ret = true;
250
251 out:
252 *block_count = index;
253 *block_dinfo = dinfo;
254 return ret;
255 }
256
257 static void calc_page_dirty_rate(struct RamblockDirtyInfo *info)
258 {
259 uint32_t crc;
260 int i;
261
262 for (i = 0; i < info->sample_pages_count; i++) {
263 crc = get_ramblock_vfn_hash(info, info->sample_page_vfn[i]);
264 if (crc != info->hash_result[i]) {
265 trace_calc_page_dirty_rate(info->idstr, crc, info->hash_result[i]);
266 info->sample_dirty_count++;
267 }
268 }
269 }
270
271 static struct RamblockDirtyInfo *
272 find_block_matched(RAMBlock *block, int count,
273 struct RamblockDirtyInfo *infos)
274 {
275 int i;
276 struct RamblockDirtyInfo *matched;
277
278 for (i = 0; i < count; i++) {
279 if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) {
280 break;
281 }
282 }
283
284 if (i == count) {
285 return NULL;
286 }
287
288 if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) ||
289 infos[i].ramblock_pages !=
290 (qemu_ram_get_used_length(block) >> TARGET_PAGE_BITS)) {
291 trace_find_page_matched(block->idstr);
292 return NULL;
293 }
294
295 matched = &infos[i];
296
297 return matched;
298 }
299
300 static bool compare_page_hash_info(struct RamblockDirtyInfo *info,
301 int block_count)
302 {
303 struct RamblockDirtyInfo *block_dinfo = NULL;
304 RAMBlock *block = NULL;
305
306 RAMBLOCK_FOREACH_MIGRATABLE(block) {
307 if (skip_sample_ramblock(block)) {
308 continue;
309 }
310 block_dinfo = find_block_matched(block, block_count, info);
311 if (block_dinfo == NULL) {
312 continue;
313 }
314 calc_page_dirty_rate(block_dinfo);
315 update_dirtyrate_stat(block_dinfo);
316 }
317
318 if (DirtyStat.total_sample_count == 0) {
319 return false;
320 }
321
322 return true;
323 }
324
325 static void calculate_dirtyrate(struct DirtyRateConfig config)
326 {
327 struct RamblockDirtyInfo *block_dinfo = NULL;
328 int block_count = 0;
329 int64_t msec = 0;
330 int64_t initial_time;
331
332 rcu_register_thread();
333 rcu_read_lock();
334 initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
335 if (!record_ramblock_hash_info(&block_dinfo, config, &block_count)) {
336 goto out;
337 }
338 rcu_read_unlock();
339
340 msec = config.sample_period_seconds * 1000;
341 msec = set_sample_page_period(msec, initial_time);
342 DirtyStat.start_time = initial_time / 1000;
343 DirtyStat.calc_time = msec / 1000;
344
345 rcu_read_lock();
346 if (!compare_page_hash_info(block_dinfo, block_count)) {
347 goto out;
348 }
349
350 update_dirtyrate(msec);
351
352 out:
353 rcu_read_unlock();
354 free_ramblock_dirty_info(block_dinfo, block_count);
355 rcu_unregister_thread();
356 }
357
358 void *get_dirtyrate_thread(void *arg)
359 {
360 struct DirtyRateConfig config = *(struct DirtyRateConfig *)arg;
361 int ret;
362 int64_t start_time;
363 int64_t calc_time;
364
365 ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_UNSTARTED,
366 DIRTY_RATE_STATUS_MEASURING);
367 if (ret == -1) {
368 error_report("change dirtyrate state failed.");
369 return NULL;
370 }
371
372 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000;
373 calc_time = config.sample_period_seconds;
374 init_dirtyrate_stat(start_time, calc_time);
375
376 calculate_dirtyrate(config);
377
378 ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_MEASURING,
379 DIRTY_RATE_STATUS_MEASURED);
380 if (ret == -1) {
381 error_report("change dirtyrate state failed.");
382 }
383 return NULL;
384 }
385
386 void qmp_calc_dirty_rate(int64_t calc_time, Error **errp)
387 {
388 static struct DirtyRateConfig config;
389 QemuThread thread;
390 int ret;
391
392 /*
393 * If the dirty rate is already being measured, don't attempt to start.
394 */
395 if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURING) {
396 error_setg(errp, "the dirty rate is already being measured.");
397 return;
398 }
399
400 if (!is_sample_period_valid(calc_time)) {
401 error_setg(errp, "calc-time is out of range[%d, %d].",
402 MIN_FETCH_DIRTYRATE_TIME_SEC,
403 MAX_FETCH_DIRTYRATE_TIME_SEC);
404 return;
405 }
406
407 /*
408 * Init calculation state as unstarted.
409 */
410 ret = dirtyrate_set_state(&CalculatingState, CalculatingState,
411 DIRTY_RATE_STATUS_UNSTARTED);
412 if (ret == -1) {
413 error_setg(errp, "init dirty rate calculation state failed.");
414 return;
415 }
416
417 config.sample_period_seconds = calc_time;
418 config.sample_pages_per_gigabytes = DIRTYRATE_DEFAULT_SAMPLE_PAGES;
419 qemu_thread_create(&thread, "get_dirtyrate", get_dirtyrate_thread,
420 (void *)&config, QEMU_THREAD_DETACHED);
421 }
422
423 struct DirtyRateInfo *qmp_query_dirty_rate(Error **errp)
424 {
425 return query_dirty_rate_info();
426 }