]>
Commit | Line | Data |
---|---|---|
4240dcee CZ |
1 | /* |
2 | * Dirtyrate implement code | |
3 | * | |
4 | * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO.,LTD. | |
5 | * | |
6 | * Authors: | |
7 | * Chuan Zheng <zhengchuan@huawei.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
10 | * See the COPYING file in the top-level directory. | |
11 | */ | |
12 | ||
ba0e519f | 13 | #include <zlib.h> |
4240dcee CZ |
14 | #include "qemu/osdep.h" |
15 | #include "qapi/error.h" | |
16 | #include "cpu.h" | |
17 | #include "qemu/config-file.h" | |
18 | #include "exec/memory.h" | |
19 | #include "exec/ramblock.h" | |
20 | #include "exec/target_page.h" | |
21 | #include "qemu/rcu_queue.h" | |
22 | #include "qapi/qapi-commands-migration.h" | |
23 | #include "migration.h" | |
3ded54b1 | 24 | #include "ram.h" |
4240dcee CZ |
25 | #include "dirtyrate.h" |
26 | ||
7df3aa30 | 27 | static int CalculatingState = DIRTY_RATE_STATUS_UNSTARTED; |
c9a58d71 | 28 | static struct DirtyRateStat DirtyStat; |
7df3aa30 CZ |
29 | |
30 | static int dirtyrate_set_state(int *state, int old_state, int new_state) | |
31 | { | |
32 | assert(new_state < DIRTY_RATE_STATUS__MAX); | |
33 | if (qatomic_cmpxchg(state, old_state, new_state) == old_state) { | |
34 | return 0; | |
35 | } else { | |
36 | return -1; | |
37 | } | |
38 | } | |
39 | ||
c9a58d71 CZ |
40 | static void reset_dirtyrate_stat(void) |
41 | { | |
42 | DirtyStat.total_dirty_samples = 0; | |
43 | DirtyStat.total_sample_count = 0; | |
44 | DirtyStat.total_block_mem_MB = 0; | |
45 | DirtyStat.dirty_rate = -1; | |
46 | DirtyStat.start_time = 0; | |
47 | DirtyStat.calc_time = 0; | |
48 | } | |
49 | ||
50 | static void update_dirtyrate_stat(struct RamblockDirtyInfo *info) | |
51 | { | |
52 | DirtyStat.total_dirty_samples += info->sample_dirty_count; | |
53 | DirtyStat.total_sample_count += info->sample_pages_count; | |
54 | /* size of total pages in MB */ | |
55 | DirtyStat.total_block_mem_MB += (info->ramblock_pages * | |
56 | TARGET_PAGE_SIZE) >> 20; | |
57 | } | |
58 | ||
59 | static void update_dirtyrate(uint64_t msec) | |
60 | { | |
61 | uint64_t dirtyrate; | |
62 | uint64_t total_dirty_samples = DirtyStat.total_dirty_samples; | |
63 | uint64_t total_sample_count = DirtyStat.total_sample_count; | |
64 | uint64_t total_block_mem_MB = DirtyStat.total_block_mem_MB; | |
65 | ||
66 | dirtyrate = total_dirty_samples * total_block_mem_MB * | |
67 | 1000 / (total_sample_count * msec); | |
68 | ||
69 | DirtyStat.dirty_rate = dirtyrate; | |
70 | } | |
7df3aa30 | 71 | |
ba0e519f CZ |
72 | /* |
73 | * get hash result for the sampled memory with length of TARGET_PAGE_SIZE | |
74 | * in ramblock, which starts from ramblock base address. | |
75 | */ | |
76 | static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info, | |
77 | uint64_t vfn) | |
78 | { | |
79 | uint32_t crc; | |
80 | ||
81 | crc = crc32(0, (info->ramblock_addr + | |
82 | vfn * TARGET_PAGE_SIZE), TARGET_PAGE_SIZE); | |
83 | ||
84 | return crc; | |
85 | } | |
86 | ||
87 | static bool save_ramblock_hash(struct RamblockDirtyInfo *info) | |
88 | { | |
89 | unsigned int sample_pages_count; | |
90 | int i; | |
91 | GRand *rand; | |
92 | ||
93 | sample_pages_count = info->sample_pages_count; | |
94 | ||
95 | /* ramblock size less than one page, return success to skip this ramblock */ | |
96 | if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) { | |
97 | return true; | |
98 | } | |
99 | ||
100 | info->hash_result = g_try_malloc0_n(sample_pages_count, | |
101 | sizeof(uint32_t)); | |
102 | if (!info->hash_result) { | |
103 | return false; | |
104 | } | |
105 | ||
106 | info->sample_page_vfn = g_try_malloc0_n(sample_pages_count, | |
107 | sizeof(uint64_t)); | |
108 | if (!info->sample_page_vfn) { | |
109 | g_free(info->hash_result); | |
110 | return false; | |
111 | } | |
112 | ||
113 | rand = g_rand_new(); | |
114 | for (i = 0; i < sample_pages_count; i++) { | |
115 | info->sample_page_vfn[i] = g_rand_int_range(rand, 0, | |
116 | info->ramblock_pages - 1); | |
117 | info->hash_result[i] = get_ramblock_vfn_hash(info, | |
118 | info->sample_page_vfn[i]); | |
119 | } | |
120 | g_rand_free(rand); | |
121 | ||
122 | return true; | |
123 | } | |
124 | ||
125 | static void get_ramblock_dirty_info(RAMBlock *block, | |
126 | struct RamblockDirtyInfo *info, | |
127 | struct DirtyRateConfig *config) | |
128 | { | |
129 | uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes; | |
130 | ||
131 | /* Right shift 30 bits to calc ramblock size in GB */ | |
132 | info->sample_pages_count = (qemu_ram_get_used_length(block) * | |
133 | sample_pages_per_gigabytes) >> 30; | |
134 | /* Right shift TARGET_PAGE_BITS to calc page count */ | |
135 | info->ramblock_pages = qemu_ram_get_used_length(block) >> | |
136 | TARGET_PAGE_BITS; | |
137 | info->ramblock_addr = qemu_ram_get_host_addr(block); | |
138 | strcpy(info->idstr, qemu_ram_get_idstr(block)); | |
139 | } | |
140 | ||
141 | static bool record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo, | |
142 | struct DirtyRateConfig config, | |
143 | int *block_count) | |
144 | { | |
145 | struct RamblockDirtyInfo *info = NULL; | |
146 | struct RamblockDirtyInfo *dinfo = NULL; | |
147 | RAMBlock *block = NULL; | |
148 | int total_count = 0; | |
149 | int index = 0; | |
150 | bool ret = false; | |
151 | ||
152 | RAMBLOCK_FOREACH_MIGRATABLE(block) { | |
153 | total_count++; | |
154 | } | |
155 | ||
156 | dinfo = g_try_malloc0_n(total_count, sizeof(struct RamblockDirtyInfo)); | |
157 | if (dinfo == NULL) { | |
158 | goto out; | |
159 | } | |
160 | ||
161 | RAMBLOCK_FOREACH_MIGRATABLE(block) { | |
162 | if (index >= total_count) { | |
163 | break; | |
164 | } | |
165 | info = &dinfo[index]; | |
166 | get_ramblock_dirty_info(block, info, &config); | |
167 | if (!save_ramblock_hash(info)) { | |
168 | goto out; | |
169 | } | |
170 | index++; | |
171 | } | |
172 | ret = true; | |
173 | ||
174 | out: | |
175 | *block_count = index; | |
176 | *block_dinfo = dinfo; | |
177 | return ret; | |
178 | } | |
179 | ||
9c04387b CZ |
180 | static void calc_page_dirty_rate(struct RamblockDirtyInfo *info) |
181 | { | |
182 | uint32_t crc; | |
183 | int i; | |
184 | ||
185 | for (i = 0; i < info->sample_pages_count; i++) { | |
186 | crc = get_ramblock_vfn_hash(info, info->sample_page_vfn[i]); | |
187 | if (crc != info->hash_result[i]) { | |
188 | info->sample_dirty_count++; | |
189 | } | |
190 | } | |
191 | } | |
192 | ||
193 | static struct RamblockDirtyInfo * | |
194 | find_block_matched(RAMBlock *block, int count, | |
195 | struct RamblockDirtyInfo *infos) | |
196 | { | |
197 | int i; | |
198 | struct RamblockDirtyInfo *matched; | |
199 | ||
200 | for (i = 0; i < count; i++) { | |
201 | if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) { | |
202 | break; | |
203 | } | |
204 | } | |
205 | ||
206 | if (i == count) { | |
207 | return NULL; | |
208 | } | |
209 | ||
210 | if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) || | |
211 | infos[i].ramblock_pages != | |
212 | (qemu_ram_get_used_length(block) >> TARGET_PAGE_BITS)) { | |
213 | return NULL; | |
214 | } | |
215 | ||
216 | matched = &infos[i]; | |
217 | ||
218 | return matched; | |
219 | } | |
220 | ||
221 | static bool compare_page_hash_info(struct RamblockDirtyInfo *info, | |
222 | int block_count) | |
223 | { | |
224 | struct RamblockDirtyInfo *block_dinfo = NULL; | |
225 | RAMBlock *block = NULL; | |
226 | ||
227 | RAMBLOCK_FOREACH_MIGRATABLE(block) { | |
228 | block_dinfo = find_block_matched(block, block_count, info); | |
229 | if (block_dinfo == NULL) { | |
230 | continue; | |
231 | } | |
232 | calc_page_dirty_rate(block_dinfo); | |
233 | update_dirtyrate_stat(block_dinfo); | |
234 | } | |
235 | ||
236 | if (DirtyStat.total_sample_count == 0) { | |
237 | return false; | |
238 | } | |
239 | ||
240 | return true; | |
241 | } | |
242 | ||
4240dcee CZ |
243 | static void calculate_dirtyrate(struct DirtyRateConfig config) |
244 | { | |
245 | /* todo */ | |
246 | return; | |
247 | } | |
248 | ||
249 | void *get_dirtyrate_thread(void *arg) | |
250 | { | |
251 | struct DirtyRateConfig config = *(struct DirtyRateConfig *)arg; | |
7df3aa30 CZ |
252 | int ret; |
253 | ||
254 | ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_UNSTARTED, | |
255 | DIRTY_RATE_STATUS_MEASURING); | |
256 | if (ret == -1) { | |
257 | error_report("change dirtyrate state failed."); | |
258 | return NULL; | |
259 | } | |
4240dcee CZ |
260 | |
261 | calculate_dirtyrate(config); | |
262 | ||
7df3aa30 CZ |
263 | ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_MEASURING, |
264 | DIRTY_RATE_STATUS_MEASURED); | |
265 | if (ret == -1) { | |
266 | error_report("change dirtyrate state failed."); | |
267 | } | |
4240dcee CZ |
268 | return NULL; |
269 | } |