]> git.proxmox.com Git - mirror_qemu.git/blob - include/exec/ram_addr.h
Merge remote-tracking branch 'remotes/juanquintela/tags/migration/20180627' into...
[mirror_qemu.git] / include / exec / ram_addr.h
1 /*
2 * Declarations for cpu physical memory functions
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
11 *
12 */
13
14 /*
15 * This header is for use by exec.c and memory.c ONLY. Do not include it.
16 * The functions declared here will be removed soon.
17 */
18
19 #ifndef RAM_ADDR_H
20 #define RAM_ADDR_H
21
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
24 #include "exec/ramlist.h"
25
26 struct RAMBlock {
27 struct rcu_head rcu;
28 struct MemoryRegion *mr;
29 uint8_t *host;
30 ram_addr_t offset;
31 ram_addr_t used_length;
32 ram_addr_t max_length;
33 void (*resized)(const char*, uint64_t length, void *host);
34 uint32_t flags;
35 /* Protected by iothread lock. */
36 char idstr[256];
37 /* RCU-enabled, writes protected by the ramlist lock */
38 QLIST_ENTRY(RAMBlock) next;
39 QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
40 int fd;
41 size_t page_size;
42 /* dirty bitmap used during migration */
43 unsigned long *bmap;
44 /* bitmap of pages that haven't been sent even once
45 * only maintained and used in postcopy at the moment
46 * where it's used to send the dirtymap at the start
47 * of the postcopy phase
48 */
49 unsigned long *unsentmap;
50 /* bitmap of already received pages in postcopy */
51 unsigned long *receivedmap;
52 };
53
54 static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
55 {
56 return (b && b->host && offset < b->used_length) ? true : false;
57 }
58
59 static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
60 {
61 assert(offset_in_ramblock(block, offset));
62 return (char *)block->host + offset;
63 }
64
65 static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr,
66 RAMBlock *rb)
67 {
68 uint64_t host_addr_offset =
69 (uint64_t)(uintptr_t)(host_addr - (void *)rb->host);
70 return host_addr_offset >> TARGET_PAGE_BITS;
71 }
72
73 long qemu_getrampagesize(void);
74 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
75 bool share, const char *mem_path,
76 Error **errp);
77 RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
78 bool share, int fd,
79 Error **errp);
80 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
81 MemoryRegion *mr, Error **errp);
82 RAMBlock *qemu_ram_alloc(ram_addr_t size, bool share, MemoryRegion *mr,
83 Error **errp);
84 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
85 void (*resized)(const char*,
86 uint64_t length,
87 void *host),
88 MemoryRegion *mr, Error **errp);
89 void qemu_ram_free(RAMBlock *block);
90
91 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
92
93 #define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
94 #define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
95
96 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
97 ram_addr_t length,
98 unsigned client)
99 {
100 DirtyMemoryBlocks *blocks;
101 unsigned long end, page;
102 unsigned long idx, offset, base;
103 bool dirty = false;
104
105 assert(client < DIRTY_MEMORY_NUM);
106
107 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
108 page = start >> TARGET_PAGE_BITS;
109
110 rcu_read_lock();
111
112 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
113
114 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
115 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
116 base = page - offset;
117 while (page < end) {
118 unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
119 unsigned long num = next - base;
120 unsigned long found = find_next_bit(blocks->blocks[idx], num, offset);
121 if (found < num) {
122 dirty = true;
123 break;
124 }
125
126 page = next;
127 idx++;
128 offset = 0;
129 base += DIRTY_MEMORY_BLOCK_SIZE;
130 }
131
132 rcu_read_unlock();
133
134 return dirty;
135 }
136
137 static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
138 ram_addr_t length,
139 unsigned client)
140 {
141 DirtyMemoryBlocks *blocks;
142 unsigned long end, page;
143 unsigned long idx, offset, base;
144 bool dirty = true;
145
146 assert(client < DIRTY_MEMORY_NUM);
147
148 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
149 page = start >> TARGET_PAGE_BITS;
150
151 rcu_read_lock();
152
153 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
154
155 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
156 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
157 base = page - offset;
158 while (page < end) {
159 unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
160 unsigned long num = next - base;
161 unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset);
162 if (found < num) {
163 dirty = false;
164 break;
165 }
166
167 page = next;
168 idx++;
169 offset = 0;
170 base += DIRTY_MEMORY_BLOCK_SIZE;
171 }
172
173 rcu_read_unlock();
174
175 return dirty;
176 }
177
178 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
179 unsigned client)
180 {
181 return cpu_physical_memory_get_dirty(addr, 1, client);
182 }
183
184 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
185 {
186 bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
187 bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
188 bool migration =
189 cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
190 return !(vga && code && migration);
191 }
192
193 static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
194 ram_addr_t length,
195 uint8_t mask)
196 {
197 uint8_t ret = 0;
198
199 if (mask & (1 << DIRTY_MEMORY_VGA) &&
200 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
201 ret |= (1 << DIRTY_MEMORY_VGA);
202 }
203 if (mask & (1 << DIRTY_MEMORY_CODE) &&
204 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
205 ret |= (1 << DIRTY_MEMORY_CODE);
206 }
207 if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
208 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
209 ret |= (1 << DIRTY_MEMORY_MIGRATION);
210 }
211 return ret;
212 }
213
214 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
215 unsigned client)
216 {
217 unsigned long page, idx, offset;
218 DirtyMemoryBlocks *blocks;
219
220 assert(client < DIRTY_MEMORY_NUM);
221
222 page = addr >> TARGET_PAGE_BITS;
223 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
224 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
225
226 rcu_read_lock();
227
228 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
229
230 set_bit_atomic(offset, blocks->blocks[idx]);
231
232 rcu_read_unlock();
233 }
234
235 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
236 ram_addr_t length,
237 uint8_t mask)
238 {
239 DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
240 unsigned long end, page;
241 unsigned long idx, offset, base;
242 int i;
243
244 if (!mask && !xen_enabled()) {
245 return;
246 }
247
248 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
249 page = start >> TARGET_PAGE_BITS;
250
251 rcu_read_lock();
252
253 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
254 blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
255 }
256
257 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
258 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
259 base = page - offset;
260 while (page < end) {
261 unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
262
263 if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
264 bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
265 offset, next - page);
266 }
267 if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
268 bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
269 offset, next - page);
270 }
271 if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
272 bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
273 offset, next - page);
274 }
275
276 page = next;
277 idx++;
278 offset = 0;
279 base += DIRTY_MEMORY_BLOCK_SIZE;
280 }
281
282 rcu_read_unlock();
283
284 xen_hvm_modified_memory(start, length);
285 }
286
287 #if !defined(_WIN32)
288 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
289 ram_addr_t start,
290 ram_addr_t pages)
291 {
292 unsigned long i, j;
293 unsigned long page_number, c;
294 hwaddr addr;
295 ram_addr_t ram_addr;
296 unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
297 unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
298 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
299
300 /* start address is aligned at the start of a word? */
301 if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
302 (hpratio == 1)) {
303 unsigned long **blocks[DIRTY_MEMORY_NUM];
304 unsigned long idx;
305 unsigned long offset;
306 long k;
307 long nr = BITS_TO_LONGS(pages);
308
309 idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
310 offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
311 DIRTY_MEMORY_BLOCK_SIZE);
312
313 rcu_read_lock();
314
315 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
316 blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
317 }
318
319 for (k = 0; k < nr; k++) {
320 if (bitmap[k]) {
321 unsigned long temp = leul_to_cpu(bitmap[k]);
322
323 atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp);
324 atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
325 if (tcg_enabled()) {
326 atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
327 }
328 }
329
330 if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
331 offset = 0;
332 idx++;
333 }
334 }
335
336 rcu_read_unlock();
337
338 xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
339 } else {
340 uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
341 /*
342 * bitmap-traveling is faster than memory-traveling (for addr...)
343 * especially when most of the memory is not dirty.
344 */
345 for (i = 0; i < len; i++) {
346 if (bitmap[i] != 0) {
347 c = leul_to_cpu(bitmap[i]);
348 do {
349 j = ctzl(c);
350 c &= ~(1ul << j);
351 page_number = (i * HOST_LONG_BITS + j) * hpratio;
352 addr = page_number * TARGET_PAGE_SIZE;
353 ram_addr = start + addr;
354 cpu_physical_memory_set_dirty_range(ram_addr,
355 TARGET_PAGE_SIZE * hpratio, clients);
356 } while (c != 0);
357 }
358 }
359 }
360 }
361 #endif /* not _WIN32 */
362
363 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
364 ram_addr_t length,
365 unsigned client);
366
367 DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
368 (ram_addr_t start, ram_addr_t length, unsigned client);
369
370 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
371 ram_addr_t start,
372 ram_addr_t length);
373
374 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
375 ram_addr_t length)
376 {
377 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
378 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
379 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
380 }
381
382
383 static inline
384 uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
385 ram_addr_t start,
386 ram_addr_t length,
387 uint64_t *real_dirty_pages)
388 {
389 ram_addr_t addr;
390 unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS);
391 uint64_t num_dirty = 0;
392 unsigned long *dest = rb->bmap;
393
394 /* start address and length is aligned at the start of a word? */
395 if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) ==
396 (start + rb->offset) &&
397 !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) {
398 int k;
399 int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
400 unsigned long * const *src;
401 unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
402 unsigned long offset = BIT_WORD((word * BITS_PER_LONG) %
403 DIRTY_MEMORY_BLOCK_SIZE);
404 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
405
406 rcu_read_lock();
407
408 src = atomic_rcu_read(
409 &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
410
411 for (k = page; k < page + nr; k++) {
412 if (src[idx][offset]) {
413 unsigned long bits = atomic_xchg(&src[idx][offset], 0);
414 unsigned long new_dirty;
415 *real_dirty_pages += ctpopl(bits);
416 new_dirty = ~dest[k];
417 dest[k] |= bits;
418 new_dirty &= bits;
419 num_dirty += ctpopl(new_dirty);
420 }
421
422 if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
423 offset = 0;
424 idx++;
425 }
426 }
427
428 rcu_read_unlock();
429 } else {
430 ram_addr_t offset = rb->offset;
431
432 for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
433 if (cpu_physical_memory_test_and_clear_dirty(
434 start + addr + offset,
435 TARGET_PAGE_SIZE,
436 DIRTY_MEMORY_MIGRATION)) {
437 *real_dirty_pages += 1;
438 long k = (start + addr) >> TARGET_PAGE_BITS;
439 if (!test_and_set_bit(k, dest)) {
440 num_dirty++;
441 }
442 }
443 }
444 }
445
446 return num_dirty;
447 }
448 #endif
449 #endif