]> git.proxmox.com Git - mirror_qemu.git/blame - include/exec/ram_addr.h
Merge remote-tracking branch 'remotes/kraxel/tags/pull-cirrus-20170316-1' into staging
[mirror_qemu.git] / include / exec / ram_addr.h
CommitLineData
220c3ebd
JQ
1/*
2 * Declarations for cpu physical memory functions
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
11 *
12 */
13
14/*
15 * This header is for use by exec.c and memory.c ONLY. Do not include it.
16 * The functions declared here will be removed soon.
17 */
18
19#ifndef RAM_ADDR_H
20#define RAM_ADDR_H
21
22#ifndef CONFIG_USER_ONLY
23#include "hw/xen/xen.h"
0987d735 24#include "exec/ramlist.h"
220c3ebd 25
3c9589e1
DDAG
26struct RAMBlock {
27 struct rcu_head rcu;
28 struct MemoryRegion *mr;
29 uint8_t *host;
30 ram_addr_t offset;
31 ram_addr_t used_length;
32 ram_addr_t max_length;
33 void (*resized)(const char*, uint64_t length, void *host);
34 uint32_t flags;
35 /* Protected by iothread lock. */
36 char idstr[256];
37 /* RCU-enabled, writes protected by the ramlist lock */
38 QLIST_ENTRY(RAMBlock) next;
0987d735 39 QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
3c9589e1 40 int fd;
863e9621 41 size_t page_size;
3c9589e1
DDAG
42};
43
4c4bad48
HZ
44static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
45{
46 return (b && b->host && offset < b->used_length) ? true : false;
47}
48
3c9589e1
DDAG
49static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
50{
4c4bad48 51 assert(offset_in_ramblock(block, offset));
3c9589e1
DDAG
52 return (char *)block->host + offset;
53}
54
9c607668 55long qemu_getrampagesize(void);
3c9589e1 56ram_addr_t last_ram_offset(void);
528f46af
FZ
57RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
58 bool share, const char *mem_path,
59 Error **errp);
60RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
61 MemoryRegion *mr, Error **errp);
62RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
63RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
64 void (*resized)(const char*,
65 uint64_t length,
66 void *host),
67 MemoryRegion *mr, Error **errp);
f1060c55 68void qemu_ram_free(RAMBlock *block);
220c3ebd 69
fa53a0e5 70int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
62be4e3a 71
58d2707e
PB
72#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
73#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
74
220c3ebd
JQ
75static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
76 ram_addr_t length,
77 unsigned client)
78{
5b82b703
SH
79 DirtyMemoryBlocks *blocks;
80 unsigned long end, page;
88c73d16 81 unsigned long idx, offset, base;
5b82b703 82 bool dirty = false;
220c3ebd
JQ
83
84 assert(client < DIRTY_MEMORY_NUM);
85
86 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
87 page = start >> TARGET_PAGE_BITS;
220c3ebd 88
5b82b703
SH
89 rcu_read_lock();
90
91 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
92
88c73d16
PB
93 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
94 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
95 base = page - offset;
5b82b703 96 while (page < end) {
88c73d16
PB
97 unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
98 unsigned long num = next - base;
99 unsigned long found = find_next_bit(blocks->blocks[idx], num, offset);
100 if (found < num) {
5b82b703
SH
101 dirty = true;
102 break;
103 }
104
88c73d16
PB
105 page = next;
106 idx++;
107 offset = 0;
108 base += DIRTY_MEMORY_BLOCK_SIZE;
5b82b703
SH
109 }
110
111 rcu_read_unlock();
112
113 return dirty;
220c3ebd
JQ
114}
115
72b47e79 116static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
f874bf90
PM
117 ram_addr_t length,
118 unsigned client)
119{
5b82b703
SH
120 DirtyMemoryBlocks *blocks;
121 unsigned long end, page;
88c73d16 122 unsigned long idx, offset, base;
5b82b703 123 bool dirty = true;
f874bf90
PM
124
125 assert(client < DIRTY_MEMORY_NUM);
126
127 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
128 page = start >> TARGET_PAGE_BITS;
f874bf90 129
5b82b703
SH
130 rcu_read_lock();
131
132 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
133
88c73d16
PB
134 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
135 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
136 base = page - offset;
5b82b703 137 while (page < end) {
88c73d16
PB
138 unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
139 unsigned long num = next - base;
140 unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset);
141 if (found < num) {
5b82b703
SH
142 dirty = false;
143 break;
144 }
145
88c73d16
PB
146 page = next;
147 idx++;
148 offset = 0;
149 base += DIRTY_MEMORY_BLOCK_SIZE;
5b82b703
SH
150 }
151
152 rcu_read_unlock();
153
154 return dirty;
f874bf90
PM
155}
156
220c3ebd
JQ
157static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
158 unsigned client)
159{
160 return cpu_physical_memory_get_dirty(addr, 1, client);
161}
162
163static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
164{
165 bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
166 bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
167 bool migration =
168 cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
169 return !(vga && code && migration);
170}
171
e87f7778
PB
172static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
173 ram_addr_t length,
174 uint8_t mask)
f874bf90 175{
e87f7778
PB
176 uint8_t ret = 0;
177
178 if (mask & (1 << DIRTY_MEMORY_VGA) &&
179 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
180 ret |= (1 << DIRTY_MEMORY_VGA);
181 }
182 if (mask & (1 << DIRTY_MEMORY_CODE) &&
183 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
184 ret |= (1 << DIRTY_MEMORY_CODE);
185 }
186 if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
187 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
188 ret |= (1 << DIRTY_MEMORY_MIGRATION);
189 }
190 return ret;
f874bf90
PM
191}
192
220c3ebd
JQ
193static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
194 unsigned client)
195{
5b82b703
SH
196 unsigned long page, idx, offset;
197 DirtyMemoryBlocks *blocks;
198
220c3ebd 199 assert(client < DIRTY_MEMORY_NUM);
5b82b703
SH
200
201 page = addr >> TARGET_PAGE_BITS;
202 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
203 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
204
205 rcu_read_lock();
206
207 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
208
209 set_bit_atomic(offset, blocks->blocks[idx]);
210
211 rcu_read_unlock();
220c3ebd
JQ
212}
213
214static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
58d2707e
PB
215 ram_addr_t length,
216 uint8_t mask)
220c3ebd 217{
5b82b703 218 DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
220c3ebd 219 unsigned long end, page;
88c73d16 220 unsigned long idx, offset, base;
5b82b703 221 int i;
220c3ebd 222
8bafcb21
PB
223 if (!mask && !xen_enabled()) {
224 return;
225 }
226
220c3ebd
JQ
227 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
228 page = start >> TARGET_PAGE_BITS;
5b82b703
SH
229
230 rcu_read_lock();
231
232 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
233 blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
58d2707e 234 }
5b82b703 235
88c73d16
PB
236 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
237 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
238 base = page - offset;
5b82b703 239 while (page < end) {
88c73d16 240 unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
5b82b703
SH
241
242 if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
243 bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
88c73d16 244 offset, next - page);
5b82b703
SH
245 }
246 if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
247 bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
88c73d16 248 offset, next - page);
5b82b703
SH
249 }
250 if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
251 bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
88c73d16 252 offset, next - page);
5b82b703
SH
253 }
254
88c73d16
PB
255 page = next;
256 idx++;
257 offset = 0;
258 base += DIRTY_MEMORY_BLOCK_SIZE;
58d2707e 259 }
5b82b703
SH
260
261 rcu_read_unlock();
262
220c3ebd
JQ
263 xen_modified_memory(start, length);
264}
265
fb3ecb7e 266#if !defined(_WIN32)
5ff7fb77
JQ
267static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
268 ram_addr_t start,
269 ram_addr_t pages)
270{
ae2810c4 271 unsigned long i, j;
5ff7fb77
JQ
272 unsigned long page_number, c;
273 hwaddr addr;
274 ram_addr_t ram_addr;
ae2810c4 275 unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
5ff7fb77 276 unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
ae2810c4 277 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
5ff7fb77 278
ae2810c4 279 /* start address is aligned at the start of a word? */
f9ee9f9a
AK
280 if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
281 (hpratio == 1)) {
5b82b703
SH
282 unsigned long **blocks[DIRTY_MEMORY_NUM];
283 unsigned long idx;
284 unsigned long offset;
ae2810c4
JQ
285 long k;
286 long nr = BITS_TO_LONGS(pages);
287
5b82b703
SH
288 idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
289 offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
290 DIRTY_MEMORY_BLOCK_SIZE);
291
292 rcu_read_lock();
293
294 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
295 blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
296 }
297
ae2810c4
JQ
298 for (k = 0; k < nr; k++) {
299 if (bitmap[k]) {
300 unsigned long temp = leul_to_cpu(bitmap[k]);
301
5b82b703
SH
302 atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp);
303 atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
9460dee4 304 if (tcg_enabled()) {
5b82b703 305 atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
9460dee4 306 }
ae2810c4 307 }
5b82b703
SH
308
309 if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
310 offset = 0;
311 idx++;
312 }
ae2810c4 313 }
5b82b703
SH
314
315 rcu_read_unlock();
316
49dfcec4 317 xen_modified_memory(start, pages << TARGET_PAGE_BITS);
ae2810c4 318 } else {
9460dee4 319 uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
ae2810c4
JQ
320 /*
321 * bitmap-traveling is faster than memory-traveling (for addr...)
322 * especially when most of the memory is not dirty.
323 */
324 for (i = 0; i < len; i++) {
325 if (bitmap[i] != 0) {
326 c = leul_to_cpu(bitmap[i]);
327 do {
7224f66e 328 j = ctzl(c);
ae2810c4
JQ
329 c &= ~(1ul << j);
330 page_number = (i * HOST_LONG_BITS + j) * hpratio;
331 addr = page_number * TARGET_PAGE_SIZE;
332 ram_addr = start + addr;
333 cpu_physical_memory_set_dirty_range(ram_addr,
9460dee4 334 TARGET_PAGE_SIZE * hpratio, clients);
ae2810c4
JQ
335 } while (c != 0);
336 }
5ff7fb77
JQ
337 }
338 }
339}
fb3ecb7e 340#endif /* not _WIN32 */
5ff7fb77 341
03eebc9e
SH
342bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
343 ram_addr_t length,
344 unsigned client);
220c3ebd 345
c8d6f66a
MT
346static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
347 ram_addr_t length)
348{
03eebc9e
SH
349 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
350 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
351 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
c8d6f66a
MT
352}
353
354
20015f72
SH
355static inline
356uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
357 ram_addr_t start,
1ffb5dfd
CF
358 ram_addr_t length,
359 int64_t *real_dirty_pages)
20015f72
SH
360{
361 ram_addr_t addr;
362 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
363 uint64_t num_dirty = 0;
364
365 /* start address is aligned at the start of a word? */
366 if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
367 int k;
368 int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
5b82b703
SH
369 unsigned long * const *src;
370 unsigned long idx = (page * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
371 unsigned long offset = BIT_WORD((page * BITS_PER_LONG) %
372 DIRTY_MEMORY_BLOCK_SIZE);
373
374 rcu_read_lock();
375
376 src = atomic_rcu_read(
377 &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
20015f72
SH
378
379 for (k = page; k < page + nr; k++) {
5b82b703
SH
380 if (src[idx][offset]) {
381 unsigned long bits = atomic_xchg(&src[idx][offset], 0);
20015f72 382 unsigned long new_dirty;
1ffb5dfd 383 *real_dirty_pages += ctpopl(bits);
20015f72 384 new_dirty = ~dest[k];
5f2cb946
SH
385 dest[k] |= bits;
386 new_dirty &= bits;
20015f72 387 num_dirty += ctpopl(new_dirty);
20015f72 388 }
5b82b703
SH
389
390 if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
391 offset = 0;
392 idx++;
393 }
20015f72 394 }
5b82b703
SH
395
396 rcu_read_unlock();
20015f72
SH
397 } else {
398 for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
03eebc9e
SH
399 if (cpu_physical_memory_test_and_clear_dirty(
400 start + addr,
401 TARGET_PAGE_SIZE,
402 DIRTY_MEMORY_MIGRATION)) {
1ffb5dfd 403 *real_dirty_pages += 1;
20015f72
SH
404 long k = (start + addr) >> TARGET_PAGE_BITS;
405 if (!test_and_set_bit(k, dest)) {
406 num_dirty++;
407 }
20015f72
SH
408 }
409 }
410 }
411
412 return num_dirty;
413}
414
88401cbc 415void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);
220c3ebd
JQ
416#endif
417#endif