]>
git.proxmox.com Git - mirror_qemu.git/blob - include/exec/ram_addr.h
2 * Declarations for cpu physical memory functions
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
15 * This header is for use by exec.c and memory.c ONLY. Do not include it.
16 * The functions declared here will be removed soon.
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
27 struct MemoryRegion
*mr
;
30 ram_addr_t used_length
;
31 ram_addr_t max_length
;
32 void (*resized
)(const char*, uint64_t length
, void *host
);
34 /* Protected by iothread lock. */
36 /* RCU-enabled, writes protected by the ramlist lock */
37 QLIST_ENTRY(RAMBlock
) next
;
41 static inline void *ramblock_ptr(RAMBlock
*block
, ram_addr_t offset
)
43 assert(offset
< block
->used_length
);
45 return (char *)block
->host
+ offset
;
48 typedef struct RAMList
{
50 /* Protected by the iothread lock. */
51 unsigned long *dirty_memory
[DIRTY_MEMORY_NUM
];
53 /* RCU-enabled, writes protected by the ramlist lock. */
54 QLIST_HEAD(, RAMBlock
) blocks
;
57 extern RAMList ram_list
;
59 ram_addr_t
last_ram_offset(void);
60 void qemu_mutex_lock_ramlist(void);
61 void qemu_mutex_unlock_ramlist(void);
63 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
64 bool share
, const char *mem_path
,
66 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
67 MemoryRegion
*mr
, Error
**errp
);
68 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
);
69 ram_addr_t
qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t max_size
,
70 void (*resized
)(const char*,
73 MemoryRegion
*mr
, Error
**errp
);
74 int qemu_get_ram_fd(ram_addr_t addr
);
75 void qemu_set_ram_fd(ram_addr_t addr
, int fd
);
76 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
);
77 void qemu_ram_free(ram_addr_t addr
);
79 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
);
81 #define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
82 #define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
84 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start
,
88 unsigned long end
, page
, next
;
90 assert(client
< DIRTY_MEMORY_NUM
);
92 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
93 page
= start
>> TARGET_PAGE_BITS
;
94 next
= find_next_bit(ram_list
.dirty_memory
[client
], end
, page
);
99 static inline bool cpu_physical_memory_all_dirty(ram_addr_t start
,
103 unsigned long end
, page
, next
;
105 assert(client
< DIRTY_MEMORY_NUM
);
107 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
108 page
= start
>> TARGET_PAGE_BITS
;
109 next
= find_next_zero_bit(ram_list
.dirty_memory
[client
], end
, page
);
114 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr
,
117 return cpu_physical_memory_get_dirty(addr
, 1, client
);
120 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr
)
122 bool vga
= cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_VGA
);
123 bool code
= cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_CODE
);
125 cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_MIGRATION
);
126 return !(vga
&& code
&& migration
);
129 static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start
,
135 if (mask
& (1 << DIRTY_MEMORY_VGA
) &&
136 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_VGA
)) {
137 ret
|= (1 << DIRTY_MEMORY_VGA
);
139 if (mask
& (1 << DIRTY_MEMORY_CODE
) &&
140 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_CODE
)) {
141 ret
|= (1 << DIRTY_MEMORY_CODE
);
143 if (mask
& (1 << DIRTY_MEMORY_MIGRATION
) &&
144 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_MIGRATION
)) {
145 ret
|= (1 << DIRTY_MEMORY_MIGRATION
);
150 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr
,
153 assert(client
< DIRTY_MEMORY_NUM
);
154 set_bit_atomic(addr
>> TARGET_PAGE_BITS
, ram_list
.dirty_memory
[client
]);
157 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start
,
161 unsigned long end
, page
;
162 unsigned long **d
= ram_list
.dirty_memory
;
164 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
165 page
= start
>> TARGET_PAGE_BITS
;
166 if (likely(mask
& (1 << DIRTY_MEMORY_MIGRATION
))) {
167 bitmap_set_atomic(d
[DIRTY_MEMORY_MIGRATION
], page
, end
- page
);
169 if (unlikely(mask
& (1 << DIRTY_MEMORY_VGA
))) {
170 bitmap_set_atomic(d
[DIRTY_MEMORY_VGA
], page
, end
- page
);
172 if (unlikely(mask
& (1 << DIRTY_MEMORY_CODE
))) {
173 bitmap_set_atomic(d
[DIRTY_MEMORY_CODE
], page
, end
- page
);
175 xen_modified_memory(start
, length
);
179 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap
,
184 unsigned long page_number
, c
;
187 unsigned long len
= (pages
+ HOST_LONG_BITS
- 1) / HOST_LONG_BITS
;
188 unsigned long hpratio
= getpagesize() / TARGET_PAGE_SIZE
;
189 unsigned long page
= BIT_WORD(start
>> TARGET_PAGE_BITS
);
191 /* start address is aligned at the start of a word? */
192 if ((((page
* BITS_PER_LONG
) << TARGET_PAGE_BITS
) == start
) &&
195 long nr
= BITS_TO_LONGS(pages
);
197 for (k
= 0; k
< nr
; k
++) {
199 unsigned long temp
= leul_to_cpu(bitmap
[k
]);
200 unsigned long **d
= ram_list
.dirty_memory
;
202 atomic_or(&d
[DIRTY_MEMORY_MIGRATION
][page
+ k
], temp
);
203 atomic_or(&d
[DIRTY_MEMORY_VGA
][page
+ k
], temp
);
205 atomic_or(&d
[DIRTY_MEMORY_CODE
][page
+ k
], temp
);
209 xen_modified_memory(start
, pages
<< TARGET_PAGE_BITS
);
211 uint8_t clients
= tcg_enabled() ? DIRTY_CLIENTS_ALL
: DIRTY_CLIENTS_NOCODE
;
213 * bitmap-traveling is faster than memory-traveling (for addr...)
214 * especially when most of the memory is not dirty.
216 for (i
= 0; i
< len
; i
++) {
217 if (bitmap
[i
] != 0) {
218 c
= leul_to_cpu(bitmap
[i
]);
222 page_number
= (i
* HOST_LONG_BITS
+ j
) * hpratio
;
223 addr
= page_number
* TARGET_PAGE_SIZE
;
224 ram_addr
= start
+ addr
;
225 cpu_physical_memory_set_dirty_range(ram_addr
,
226 TARGET_PAGE_SIZE
* hpratio
, clients
);
232 #endif /* not _WIN32 */
234 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
238 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start
,
241 cpu_physical_memory_test_and_clear_dirty(start
, length
, DIRTY_MEMORY_MIGRATION
);
242 cpu_physical_memory_test_and_clear_dirty(start
, length
, DIRTY_MEMORY_VGA
);
243 cpu_physical_memory_test_and_clear_dirty(start
, length
, DIRTY_MEMORY_CODE
);
248 uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest
,
253 unsigned long page
= BIT_WORD(start
>> TARGET_PAGE_BITS
);
254 uint64_t num_dirty
= 0;
256 /* start address is aligned at the start of a word? */
257 if (((page
* BITS_PER_LONG
) << TARGET_PAGE_BITS
) == start
) {
259 int nr
= BITS_TO_LONGS(length
>> TARGET_PAGE_BITS
);
260 unsigned long *src
= ram_list
.dirty_memory
[DIRTY_MEMORY_MIGRATION
];
262 for (k
= page
; k
< page
+ nr
; k
++) {
264 unsigned long bits
= atomic_xchg(&src
[k
], 0);
265 unsigned long new_dirty
;
266 new_dirty
= ~dest
[k
];
269 num_dirty
+= ctpopl(new_dirty
);
273 for (addr
= 0; addr
< length
; addr
+= TARGET_PAGE_SIZE
) {
274 if (cpu_physical_memory_test_and_clear_dirty(
277 DIRTY_MEMORY_MIGRATION
)) {
278 long k
= (start
+ addr
) >> TARGET_PAGE_BITS
;
279 if (!test_and_set_bit(k
, dest
)) {
289 void migration_bitmap_extend(ram_addr_t old
, ram_addr_t
new);