]>
Commit | Line | Data |
---|---|---|
54936004 | 1 | /* |
5b6dd868 | 2 | * Virtual page mapping |
5fafdf24 | 3 | * |
54936004 FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
54936004 | 18 | */ |
67b915a5 | 19 | #include "config.h" |
d5a8f07c FB |
20 | #ifdef _WIN32 |
21 | #include <windows.h> | |
22 | #else | |
a98d49b1 | 23 | #include <sys/types.h> |
d5a8f07c FB |
24 | #include <sys/mman.h> |
25 | #endif | |
54936004 | 26 | |
055403b2 | 27 | #include "qemu-common.h" |
6180a181 | 28 | #include "cpu.h" |
b67d9a52 | 29 | #include "tcg.h" |
b3c7724c | 30 | #include "hw/hw.h" |
cc9e98cb | 31 | #include "hw/qdev.h" |
1de7afc9 | 32 | #include "qemu/osdep.h" |
9c17d615 | 33 | #include "sysemu/kvm.h" |
0d09e41a | 34 | #include "hw/xen/xen.h" |
1de7afc9 PB |
35 | #include "qemu/timer.h" |
36 | #include "qemu/config-file.h" | |
022c62cb | 37 | #include "exec/memory.h" |
9c17d615 | 38 | #include "sysemu/dma.h" |
022c62cb | 39 | #include "exec/address-spaces.h" |
53a5960a PB |
40 | #if defined(CONFIG_USER_ONLY) |
41 | #include <qemu.h> | |
432d268c | 42 | #else /* !CONFIG_USER_ONLY */ |
9c17d615 | 43 | #include "sysemu/xen-mapcache.h" |
6506e4f9 | 44 | #include "trace.h" |
53a5960a | 45 | #endif |
0d6d3c87 | 46 | #include "exec/cpu-all.h" |
54936004 | 47 | |
022c62cb | 48 | #include "exec/cputlb.h" |
5b6dd868 | 49 | #include "translate-all.h" |
0cac1b66 | 50 | |
022c62cb | 51 | #include "exec/memory-internal.h" |
67d95c15 | 52 | |
db7b5426 | 53 | //#define DEBUG_SUBPAGE |
1196be37 | 54 | |
e2eef170 | 55 | #if !defined(CONFIG_USER_ONLY) |
9fa3e853 | 56 | int phys_ram_fd; |
74576198 | 57 | static int in_migration; |
94a6b54f | 58 | |
a3161038 | 59 | RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) }; |
62152b8a AK |
60 | |
61 | static MemoryRegion *system_memory; | |
309cb471 | 62 | static MemoryRegion *system_io; |
62152b8a | 63 | |
f6790af6 AK |
64 | AddressSpace address_space_io; |
65 | AddressSpace address_space_memory; | |
9e11908f | 66 | DMAContext dma_context_memory; |
2673a5da | 67 | |
0844e007 | 68 | MemoryRegion io_mem_rom, io_mem_notdirty; |
acc9d80b | 69 | static MemoryRegion io_mem_unassigned; |
0e0df1e2 | 70 | |
e2eef170 | 71 | #endif |
9fa3e853 | 72 | |
9349b4f9 | 73 | CPUArchState *first_cpu; |
6a00d601 FB |
74 | /* current CPU in the current thread. It is only valid inside |
75 | cpu_exec() */ | |
9349b4f9 | 76 | DEFINE_TLS(CPUArchState *,cpu_single_env); |
2e70f6ef | 77 | /* 0 = Do not count executed instructions. |
bf20dc07 | 78 | 1 = Precise instruction counting. |
2e70f6ef | 79 | 2 = Adaptive rate instruction counting. */ |
5708fc66 | 80 | int use_icount; |
6a00d601 | 81 | |
e2eef170 | 82 | #if !defined(CONFIG_USER_ONLY) |
4346ae3e | 83 | |
1db8abb1 PB |
84 | typedef struct PhysPageEntry PhysPageEntry; |
85 | ||
86 | struct PhysPageEntry { | |
87 | uint16_t is_leaf : 1; | |
88 | /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */ | |
89 | uint16_t ptr : 15; | |
90 | }; | |
91 | ||
92 | struct AddressSpaceDispatch { | |
93 | /* This is a multi-level map on the physical address space. | |
94 | * The bottom level has pointers to MemoryRegionSections. | |
95 | */ | |
96 | PhysPageEntry phys_map; | |
97 | MemoryListener listener; | |
acc9d80b | 98 | AddressSpace *as; |
1db8abb1 PB |
99 | }; |
100 | ||
90260c6c JK |
101 | #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) |
102 | typedef struct subpage_t { | |
103 | MemoryRegion iomem; | |
acc9d80b | 104 | AddressSpace *as; |
90260c6c JK |
105 | hwaddr base; |
106 | uint16_t sub_section[TARGET_PAGE_SIZE]; | |
107 | } subpage_t; | |
108 | ||
5312bd8b AK |
109 | static MemoryRegionSection *phys_sections; |
110 | static unsigned phys_sections_nb, phys_sections_nb_alloc; | |
111 | static uint16_t phys_section_unassigned; | |
aa102231 AK |
112 | static uint16_t phys_section_notdirty; |
113 | static uint16_t phys_section_rom; | |
114 | static uint16_t phys_section_watch; | |
5312bd8b | 115 | |
d6f2ea22 AK |
116 | /* Simple allocator for PhysPageEntry nodes */ |
117 | static PhysPageEntry (*phys_map_nodes)[L2_SIZE]; | |
118 | static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc; | |
119 | ||
07f07b31 | 120 | #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1) |
d6f2ea22 | 121 | |
e2eef170 | 122 | static void io_mem_init(void); |
62152b8a | 123 | static void memory_map_init(void); |
8b9c99d9 | 124 | static void *qemu_safe_ram_ptr(ram_addr_t addr); |
e2eef170 | 125 | |
1ec9b909 | 126 | static MemoryRegion io_mem_watch; |
6658ffb8 | 127 | #endif |
fd6ce8f6 | 128 | |
6d9a1304 | 129 | #if !defined(CONFIG_USER_ONLY) |
d6f2ea22 | 130 | |
f7bf5461 | 131 | static void phys_map_node_reserve(unsigned nodes) |
d6f2ea22 | 132 | { |
f7bf5461 | 133 | if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) { |
d6f2ea22 AK |
134 | typedef PhysPageEntry Node[L2_SIZE]; |
135 | phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16); | |
f7bf5461 AK |
136 | phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc, |
137 | phys_map_nodes_nb + nodes); | |
d6f2ea22 AK |
138 | phys_map_nodes = g_renew(Node, phys_map_nodes, |
139 | phys_map_nodes_nb_alloc); | |
140 | } | |
f7bf5461 AK |
141 | } |
142 | ||
143 | static uint16_t phys_map_node_alloc(void) | |
144 | { | |
145 | unsigned i; | |
146 | uint16_t ret; | |
147 | ||
148 | ret = phys_map_nodes_nb++; | |
149 | assert(ret != PHYS_MAP_NODE_NIL); | |
150 | assert(ret != phys_map_nodes_nb_alloc); | |
d6f2ea22 | 151 | for (i = 0; i < L2_SIZE; ++i) { |
07f07b31 | 152 | phys_map_nodes[ret][i].is_leaf = 0; |
c19e8800 | 153 | phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL; |
d6f2ea22 | 154 | } |
f7bf5461 | 155 | return ret; |
d6f2ea22 AK |
156 | } |
157 | ||
158 | static void phys_map_nodes_reset(void) | |
159 | { | |
160 | phys_map_nodes_nb = 0; | |
161 | } | |
162 | ||
92e873b9 | 163 | |
a8170e5e AK |
164 | static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index, |
165 | hwaddr *nb, uint16_t leaf, | |
2999097b | 166 | int level) |
f7bf5461 AK |
167 | { |
168 | PhysPageEntry *p; | |
169 | int i; | |
a8170e5e | 170 | hwaddr step = (hwaddr)1 << (level * L2_BITS); |
108c49b8 | 171 | |
07f07b31 | 172 | if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) { |
c19e8800 AK |
173 | lp->ptr = phys_map_node_alloc(); |
174 | p = phys_map_nodes[lp->ptr]; | |
f7bf5461 AK |
175 | if (level == 0) { |
176 | for (i = 0; i < L2_SIZE; i++) { | |
07f07b31 | 177 | p[i].is_leaf = 1; |
c19e8800 | 178 | p[i].ptr = phys_section_unassigned; |
4346ae3e | 179 | } |
67c4d23c | 180 | } |
f7bf5461 | 181 | } else { |
c19e8800 | 182 | p = phys_map_nodes[lp->ptr]; |
92e873b9 | 183 | } |
2999097b | 184 | lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)]; |
f7bf5461 | 185 | |
2999097b | 186 | while (*nb && lp < &p[L2_SIZE]) { |
07f07b31 AK |
187 | if ((*index & (step - 1)) == 0 && *nb >= step) { |
188 | lp->is_leaf = true; | |
c19e8800 | 189 | lp->ptr = leaf; |
07f07b31 AK |
190 | *index += step; |
191 | *nb -= step; | |
2999097b AK |
192 | } else { |
193 | phys_page_set_level(lp, index, nb, leaf, level - 1); | |
194 | } | |
195 | ++lp; | |
f7bf5461 AK |
196 | } |
197 | } | |
198 | ||
ac1970fb | 199 | static void phys_page_set(AddressSpaceDispatch *d, |
a8170e5e | 200 | hwaddr index, hwaddr nb, |
2999097b | 201 | uint16_t leaf) |
f7bf5461 | 202 | { |
2999097b | 203 | /* Wildly overreserve - it doesn't matter much. */ |
07f07b31 | 204 | phys_map_node_reserve(3 * P_L2_LEVELS); |
5cd2c5b6 | 205 | |
ac1970fb | 206 | phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); |
92e873b9 FB |
207 | } |
208 | ||
149f54b5 | 209 | static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index) |
92e873b9 | 210 | { |
ac1970fb | 211 | PhysPageEntry lp = d->phys_map; |
31ab2b4a AK |
212 | PhysPageEntry *p; |
213 | int i; | |
f1f6e3b8 | 214 | |
07f07b31 | 215 | for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) { |
c19e8800 | 216 | if (lp.ptr == PHYS_MAP_NODE_NIL) { |
fd298934 | 217 | return &phys_sections[phys_section_unassigned]; |
31ab2b4a | 218 | } |
c19e8800 | 219 | p = phys_map_nodes[lp.ptr]; |
31ab2b4a | 220 | lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)]; |
5312bd8b | 221 | } |
fd298934 | 222 | return &phys_sections[lp.ptr]; |
f3705d53 AK |
223 | } |
224 | ||
e5548617 BS |
225 | bool memory_region_is_unassigned(MemoryRegion *mr) |
226 | { | |
2a8e7499 | 227 | return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device |
5b6dd868 | 228 | && mr != &io_mem_watch; |
fd6ce8f6 | 229 | } |
149f54b5 | 230 | |
9f029603 | 231 | static MemoryRegionSection *address_space_lookup_region(AddressSpace *as, |
90260c6c JK |
232 | hwaddr addr, |
233 | bool resolve_subpage) | |
9f029603 | 234 | { |
90260c6c JK |
235 | MemoryRegionSection *section; |
236 | subpage_t *subpage; | |
237 | ||
238 | section = phys_page_find(as->dispatch, addr >> TARGET_PAGE_BITS); | |
239 | if (resolve_subpage && section->mr->subpage) { | |
240 | subpage = container_of(section->mr, subpage_t, iomem); | |
241 | section = &phys_sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; | |
242 | } | |
243 | return section; | |
9f029603 JK |
244 | } |
245 | ||
90260c6c JK |
246 | static MemoryRegionSection * |
247 | address_space_translate_internal(AddressSpace *as, hwaddr addr, hwaddr *xlat, | |
248 | hwaddr *plen, bool resolve_subpage) | |
149f54b5 PB |
249 | { |
250 | MemoryRegionSection *section; | |
251 | Int128 diff; | |
252 | ||
90260c6c | 253 | section = address_space_lookup_region(as, addr, resolve_subpage); |
149f54b5 PB |
254 | /* Compute offset within MemoryRegionSection */ |
255 | addr -= section->offset_within_address_space; | |
256 | ||
257 | /* Compute offset within MemoryRegion */ | |
258 | *xlat = addr + section->offset_within_region; | |
259 | ||
260 | diff = int128_sub(section->mr->size, int128_make64(addr)); | |
3752a036 | 261 | *plen = int128_get64(int128_min(diff, int128_make64(*plen))); |
149f54b5 PB |
262 | return section; |
263 | } | |
90260c6c | 264 | |
5c8a00ce PB |
265 | MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, |
266 | hwaddr *xlat, hwaddr *plen, | |
267 | bool is_write) | |
90260c6c | 268 | { |
5c8a00ce | 269 | return address_space_translate_internal(as, addr, xlat, plen, true)->mr; |
90260c6c JK |
270 | } |
271 | ||
272 | MemoryRegionSection * | |
273 | address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat, | |
274 | hwaddr *plen) | |
275 | { | |
276 | return address_space_translate_internal(as, addr, xlat, plen, false); | |
277 | } | |
5b6dd868 | 278 | #endif |
fd6ce8f6 | 279 | |
5b6dd868 | 280 | void cpu_exec_init_all(void) |
fdbb84d1 | 281 | { |
5b6dd868 | 282 | #if !defined(CONFIG_USER_ONLY) |
b2a8658e | 283 | qemu_mutex_init(&ram_list.mutex); |
5b6dd868 BS |
284 | memory_map_init(); |
285 | io_mem_init(); | |
fdbb84d1 | 286 | #endif |
5b6dd868 | 287 | } |
fdbb84d1 | 288 | |
b170fce3 | 289 | #if !defined(CONFIG_USER_ONLY) |
5b6dd868 BS |
290 | |
291 | static int cpu_common_post_load(void *opaque, int version_id) | |
fd6ce8f6 | 292 | { |
259186a7 | 293 | CPUState *cpu = opaque; |
a513fe19 | 294 | |
5b6dd868 BS |
295 | /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the |
296 | version_id is increased. */ | |
259186a7 AF |
297 | cpu->interrupt_request &= ~0x01; |
298 | tlb_flush(cpu->env_ptr, 1); | |
5b6dd868 BS |
299 | |
300 | return 0; | |
a513fe19 | 301 | } |
7501267e | 302 | |
5b6dd868 BS |
303 | static const VMStateDescription vmstate_cpu_common = { |
304 | .name = "cpu_common", | |
305 | .version_id = 1, | |
306 | .minimum_version_id = 1, | |
307 | .minimum_version_id_old = 1, | |
308 | .post_load = cpu_common_post_load, | |
309 | .fields = (VMStateField []) { | |
259186a7 AF |
310 | VMSTATE_UINT32(halted, CPUState), |
311 | VMSTATE_UINT32(interrupt_request, CPUState), | |
5b6dd868 BS |
312 | VMSTATE_END_OF_LIST() |
313 | } | |
314 | }; | |
b170fce3 AF |
315 | #else |
316 | #define vmstate_cpu_common vmstate_dummy | |
5b6dd868 | 317 | #endif |
ea041c0e | 318 | |
38d8f5c8 | 319 | CPUState *qemu_get_cpu(int index) |
ea041c0e | 320 | { |
5b6dd868 | 321 | CPUArchState *env = first_cpu; |
38d8f5c8 | 322 | CPUState *cpu = NULL; |
ea041c0e | 323 | |
5b6dd868 | 324 | while (env) { |
55e5c285 AF |
325 | cpu = ENV_GET_CPU(env); |
326 | if (cpu->cpu_index == index) { | |
5b6dd868 | 327 | break; |
55e5c285 | 328 | } |
5b6dd868 | 329 | env = env->next_cpu; |
ea041c0e | 330 | } |
5b6dd868 | 331 | |
d76fddae | 332 | return env ? cpu : NULL; |
ea041c0e FB |
333 | } |
334 | ||
d6b9e0d6 MT |
335 | void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data) |
336 | { | |
337 | CPUArchState *env = first_cpu; | |
338 | ||
339 | while (env) { | |
340 | func(ENV_GET_CPU(env), data); | |
341 | env = env->next_cpu; | |
342 | } | |
343 | } | |
344 | ||
5b6dd868 | 345 | void cpu_exec_init(CPUArchState *env) |
ea041c0e | 346 | { |
5b6dd868 | 347 | CPUState *cpu = ENV_GET_CPU(env); |
b170fce3 | 348 | CPUClass *cc = CPU_GET_CLASS(cpu); |
5b6dd868 BS |
349 | CPUArchState **penv; |
350 | int cpu_index; | |
351 | ||
352 | #if defined(CONFIG_USER_ONLY) | |
353 | cpu_list_lock(); | |
354 | #endif | |
355 | env->next_cpu = NULL; | |
356 | penv = &first_cpu; | |
357 | cpu_index = 0; | |
358 | while (*penv != NULL) { | |
359 | penv = &(*penv)->next_cpu; | |
360 | cpu_index++; | |
361 | } | |
55e5c285 | 362 | cpu->cpu_index = cpu_index; |
1b1ed8dc | 363 | cpu->numa_node = 0; |
5b6dd868 BS |
364 | QTAILQ_INIT(&env->breakpoints); |
365 | QTAILQ_INIT(&env->watchpoints); | |
366 | #ifndef CONFIG_USER_ONLY | |
367 | cpu->thread_id = qemu_get_thread_id(); | |
368 | #endif | |
369 | *penv = env; | |
370 | #if defined(CONFIG_USER_ONLY) | |
371 | cpu_list_unlock(); | |
372 | #endif | |
259186a7 | 373 | vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu); |
5b6dd868 | 374 | #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) |
5b6dd868 BS |
375 | register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION, |
376 | cpu_save, cpu_load, env); | |
b170fce3 | 377 | assert(cc->vmsd == NULL); |
5b6dd868 | 378 | #endif |
b170fce3 AF |
379 | if (cc->vmsd != NULL) { |
380 | vmstate_register(NULL, cpu_index, cc->vmsd, cpu); | |
381 | } | |
ea041c0e FB |
382 | } |
383 | ||
1fddef4b | 384 | #if defined(TARGET_HAS_ICE) |
94df27fd | 385 | #if defined(CONFIG_USER_ONLY) |
9349b4f9 | 386 | static void breakpoint_invalidate(CPUArchState *env, target_ulong pc) |
94df27fd PB |
387 | { |
388 | tb_invalidate_phys_page_range(pc, pc + 1, 0); | |
389 | } | |
390 | #else | |
1e7855a5 MF |
391 | static void breakpoint_invalidate(CPUArchState *env, target_ulong pc) |
392 | { | |
9d70c4b7 MF |
393 | tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) | |
394 | (pc & ~TARGET_PAGE_MASK)); | |
1e7855a5 | 395 | } |
c27004ec | 396 | #endif |
94df27fd | 397 | #endif /* TARGET_HAS_ICE */ |
d720b93d | 398 | |
c527ee8f | 399 | #if defined(CONFIG_USER_ONLY) |
9349b4f9 | 400 | void cpu_watchpoint_remove_all(CPUArchState *env, int mask) |
c527ee8f PB |
401 | |
402 | { | |
403 | } | |
404 | ||
9349b4f9 | 405 | int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len, |
c527ee8f PB |
406 | int flags, CPUWatchpoint **watchpoint) |
407 | { | |
408 | return -ENOSYS; | |
409 | } | |
410 | #else | |
6658ffb8 | 411 | /* Add a watchpoint. */ |
9349b4f9 | 412 | int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len, |
a1d1bb31 | 413 | int flags, CPUWatchpoint **watchpoint) |
6658ffb8 | 414 | { |
b4051334 | 415 | target_ulong len_mask = ~(len - 1); |
c0ce998e | 416 | CPUWatchpoint *wp; |
6658ffb8 | 417 | |
b4051334 | 418 | /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */ |
0dc23828 MF |
419 | if ((len & (len - 1)) || (addr & ~len_mask) || |
420 | len == 0 || len > TARGET_PAGE_SIZE) { | |
b4051334 AL |
421 | fprintf(stderr, "qemu: tried to set invalid watchpoint at " |
422 | TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len); | |
423 | return -EINVAL; | |
424 | } | |
7267c094 | 425 | wp = g_malloc(sizeof(*wp)); |
a1d1bb31 AL |
426 | |
427 | wp->vaddr = addr; | |
b4051334 | 428 | wp->len_mask = len_mask; |
a1d1bb31 AL |
429 | wp->flags = flags; |
430 | ||
2dc9f411 | 431 | /* keep all GDB-injected watchpoints in front */ |
c0ce998e | 432 | if (flags & BP_GDB) |
72cf2d4f | 433 | QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry); |
c0ce998e | 434 | else |
72cf2d4f | 435 | QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry); |
6658ffb8 | 436 | |
6658ffb8 | 437 | tlb_flush_page(env, addr); |
a1d1bb31 AL |
438 | |
439 | if (watchpoint) | |
440 | *watchpoint = wp; | |
441 | return 0; | |
6658ffb8 PB |
442 | } |
443 | ||
a1d1bb31 | 444 | /* Remove a specific watchpoint. */ |
9349b4f9 | 445 | int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len, |
a1d1bb31 | 446 | int flags) |
6658ffb8 | 447 | { |
b4051334 | 448 | target_ulong len_mask = ~(len - 1); |
a1d1bb31 | 449 | CPUWatchpoint *wp; |
6658ffb8 | 450 | |
72cf2d4f | 451 | QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
b4051334 | 452 | if (addr == wp->vaddr && len_mask == wp->len_mask |
6e140f28 | 453 | && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { |
a1d1bb31 | 454 | cpu_watchpoint_remove_by_ref(env, wp); |
6658ffb8 PB |
455 | return 0; |
456 | } | |
457 | } | |
a1d1bb31 | 458 | return -ENOENT; |
6658ffb8 PB |
459 | } |
460 | ||
a1d1bb31 | 461 | /* Remove a specific watchpoint by reference. */ |
9349b4f9 | 462 | void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint) |
a1d1bb31 | 463 | { |
72cf2d4f | 464 | QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry); |
7d03f82f | 465 | |
a1d1bb31 AL |
466 | tlb_flush_page(env, watchpoint->vaddr); |
467 | ||
7267c094 | 468 | g_free(watchpoint); |
a1d1bb31 AL |
469 | } |
470 | ||
471 | /* Remove all matching watchpoints. */ | |
9349b4f9 | 472 | void cpu_watchpoint_remove_all(CPUArchState *env, int mask) |
a1d1bb31 | 473 | { |
c0ce998e | 474 | CPUWatchpoint *wp, *next; |
a1d1bb31 | 475 | |
72cf2d4f | 476 | QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) { |
a1d1bb31 AL |
477 | if (wp->flags & mask) |
478 | cpu_watchpoint_remove_by_ref(env, wp); | |
c0ce998e | 479 | } |
7d03f82f | 480 | } |
c527ee8f | 481 | #endif |
7d03f82f | 482 | |
a1d1bb31 | 483 | /* Add a breakpoint. */ |
9349b4f9 | 484 | int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags, |
a1d1bb31 | 485 | CPUBreakpoint **breakpoint) |
4c3a88a2 | 486 | { |
1fddef4b | 487 | #if defined(TARGET_HAS_ICE) |
c0ce998e | 488 | CPUBreakpoint *bp; |
3b46e624 | 489 | |
7267c094 | 490 | bp = g_malloc(sizeof(*bp)); |
4c3a88a2 | 491 | |
a1d1bb31 AL |
492 | bp->pc = pc; |
493 | bp->flags = flags; | |
494 | ||
2dc9f411 | 495 | /* keep all GDB-injected breakpoints in front */ |
c0ce998e | 496 | if (flags & BP_GDB) |
72cf2d4f | 497 | QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry); |
c0ce998e | 498 | else |
72cf2d4f | 499 | QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry); |
3b46e624 | 500 | |
d720b93d | 501 | breakpoint_invalidate(env, pc); |
a1d1bb31 AL |
502 | |
503 | if (breakpoint) | |
504 | *breakpoint = bp; | |
4c3a88a2 FB |
505 | return 0; |
506 | #else | |
a1d1bb31 | 507 | return -ENOSYS; |
4c3a88a2 FB |
508 | #endif |
509 | } | |
510 | ||
a1d1bb31 | 511 | /* Remove a specific breakpoint. */ |
9349b4f9 | 512 | int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags) |
a1d1bb31 | 513 | { |
7d03f82f | 514 | #if defined(TARGET_HAS_ICE) |
a1d1bb31 AL |
515 | CPUBreakpoint *bp; |
516 | ||
72cf2d4f | 517 | QTAILQ_FOREACH(bp, &env->breakpoints, entry) { |
a1d1bb31 AL |
518 | if (bp->pc == pc && bp->flags == flags) { |
519 | cpu_breakpoint_remove_by_ref(env, bp); | |
520 | return 0; | |
521 | } | |
7d03f82f | 522 | } |
a1d1bb31 AL |
523 | return -ENOENT; |
524 | #else | |
525 | return -ENOSYS; | |
7d03f82f EI |
526 | #endif |
527 | } | |
528 | ||
a1d1bb31 | 529 | /* Remove a specific breakpoint by reference. */ |
9349b4f9 | 530 | void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint) |
4c3a88a2 | 531 | { |
1fddef4b | 532 | #if defined(TARGET_HAS_ICE) |
72cf2d4f | 533 | QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry); |
d720b93d | 534 | |
a1d1bb31 AL |
535 | breakpoint_invalidate(env, breakpoint->pc); |
536 | ||
7267c094 | 537 | g_free(breakpoint); |
a1d1bb31 AL |
538 | #endif |
539 | } | |
540 | ||
541 | /* Remove all matching breakpoints. */ | |
9349b4f9 | 542 | void cpu_breakpoint_remove_all(CPUArchState *env, int mask) |
a1d1bb31 AL |
543 | { |
544 | #if defined(TARGET_HAS_ICE) | |
c0ce998e | 545 | CPUBreakpoint *bp, *next; |
a1d1bb31 | 546 | |
72cf2d4f | 547 | QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) { |
a1d1bb31 AL |
548 | if (bp->flags & mask) |
549 | cpu_breakpoint_remove_by_ref(env, bp); | |
c0ce998e | 550 | } |
4c3a88a2 FB |
551 | #endif |
552 | } | |
553 | ||
c33a346e FB |
554 | /* enable or disable single step mode. EXCP_DEBUG is returned by the |
555 | CPU loop after each instruction */ | |
9349b4f9 | 556 | void cpu_single_step(CPUArchState *env, int enabled) |
c33a346e | 557 | { |
1fddef4b | 558 | #if defined(TARGET_HAS_ICE) |
c33a346e FB |
559 | if (env->singlestep_enabled != enabled) { |
560 | env->singlestep_enabled = enabled; | |
e22a25c9 AL |
561 | if (kvm_enabled()) |
562 | kvm_update_guest_debug(env, 0); | |
563 | else { | |
ccbb4d44 | 564 | /* must flush all the translated code to avoid inconsistencies */ |
e22a25c9 AL |
565 | /* XXX: only flush what is necessary */ |
566 | tb_flush(env); | |
567 | } | |
c33a346e FB |
568 | } |
569 | #endif | |
570 | } | |
571 | ||
9349b4f9 | 572 | void cpu_exit(CPUArchState *env) |
3098dba0 | 573 | { |
fcd7d003 AF |
574 | CPUState *cpu = ENV_GET_CPU(env); |
575 | ||
576 | cpu->exit_request = 1; | |
378df4b2 | 577 | cpu->tcg_exit_req = 1; |
3098dba0 AJ |
578 | } |
579 | ||
9349b4f9 | 580 | void cpu_abort(CPUArchState *env, const char *fmt, ...) |
7501267e FB |
581 | { |
582 | va_list ap; | |
493ae1f0 | 583 | va_list ap2; |
7501267e FB |
584 | |
585 | va_start(ap, fmt); | |
493ae1f0 | 586 | va_copy(ap2, ap); |
7501267e FB |
587 | fprintf(stderr, "qemu: fatal: "); |
588 | vfprintf(stderr, fmt, ap); | |
589 | fprintf(stderr, "\n"); | |
6fd2a026 | 590 | cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP); |
93fcfe39 AL |
591 | if (qemu_log_enabled()) { |
592 | qemu_log("qemu: fatal: "); | |
593 | qemu_log_vprintf(fmt, ap2); | |
594 | qemu_log("\n"); | |
6fd2a026 | 595 | log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP); |
31b1a7b4 | 596 | qemu_log_flush(); |
93fcfe39 | 597 | qemu_log_close(); |
924edcae | 598 | } |
493ae1f0 | 599 | va_end(ap2); |
f9373291 | 600 | va_end(ap); |
fd052bf6 RV |
601 | #if defined(CONFIG_USER_ONLY) |
602 | { | |
603 | struct sigaction act; | |
604 | sigfillset(&act.sa_mask); | |
605 | act.sa_handler = SIG_DFL; | |
606 | sigaction(SIGABRT, &act, NULL); | |
607 | } | |
608 | #endif | |
7501267e FB |
609 | abort(); |
610 | } | |
611 | ||
9349b4f9 | 612 | CPUArchState *cpu_copy(CPUArchState *env) |
c5be9f08 | 613 | { |
9349b4f9 AF |
614 | CPUArchState *new_env = cpu_init(env->cpu_model_str); |
615 | CPUArchState *next_cpu = new_env->next_cpu; | |
5a38f081 AL |
616 | #if defined(TARGET_HAS_ICE) |
617 | CPUBreakpoint *bp; | |
618 | CPUWatchpoint *wp; | |
619 | #endif | |
620 | ||
9349b4f9 | 621 | memcpy(new_env, env, sizeof(CPUArchState)); |
5a38f081 | 622 | |
55e5c285 | 623 | /* Preserve chaining. */ |
c5be9f08 | 624 | new_env->next_cpu = next_cpu; |
5a38f081 AL |
625 | |
626 | /* Clone all break/watchpoints. | |
627 | Note: Once we support ptrace with hw-debug register access, make sure | |
628 | BP_CPU break/watchpoints are handled correctly on clone. */ | |
72cf2d4f BS |
629 | QTAILQ_INIT(&env->breakpoints); |
630 | QTAILQ_INIT(&env->watchpoints); | |
5a38f081 | 631 | #if defined(TARGET_HAS_ICE) |
72cf2d4f | 632 | QTAILQ_FOREACH(bp, &env->breakpoints, entry) { |
5a38f081 AL |
633 | cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL); |
634 | } | |
72cf2d4f | 635 | QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
5a38f081 AL |
636 | cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1, |
637 | wp->flags, NULL); | |
638 | } | |
639 | #endif | |
640 | ||
c5be9f08 TS |
641 | return new_env; |
642 | } | |
643 | ||
0124311e | 644 | #if !defined(CONFIG_USER_ONLY) |
d24981d3 JQ |
645 | static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end, |
646 | uintptr_t length) | |
647 | { | |
648 | uintptr_t start1; | |
649 | ||
650 | /* we modify the TLB cache so that the dirty bit will be set again | |
651 | when accessing the range */ | |
652 | start1 = (uintptr_t)qemu_safe_ram_ptr(start); | |
653 | /* Check that we don't span multiple blocks - this breaks the | |
654 | address comparisons below. */ | |
655 | if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1 | |
656 | != (end - 1) - start) { | |
657 | abort(); | |
658 | } | |
659 | cpu_tlb_reset_dirty_all(start1, length); | |
660 | ||
661 | } | |
662 | ||
5579c7f3 | 663 | /* Note: start and end must be within the same ram block. */ |
c227f099 | 664 | void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, |
0a962c02 | 665 | int dirty_flags) |
1ccde1cb | 666 | { |
d24981d3 | 667 | uintptr_t length; |
1ccde1cb FB |
668 | |
669 | start &= TARGET_PAGE_MASK; | |
670 | end = TARGET_PAGE_ALIGN(end); | |
671 | ||
672 | length = end - start; | |
673 | if (length == 0) | |
674 | return; | |
f7c11b53 | 675 | cpu_physical_memory_mask_dirty_range(start, length, dirty_flags); |
f23db169 | 676 | |
d24981d3 JQ |
677 | if (tcg_enabled()) { |
678 | tlb_reset_dirty_range_all(start, end, length); | |
5579c7f3 | 679 | } |
1ccde1cb FB |
680 | } |
681 | ||
8b9c99d9 | 682 | static int cpu_physical_memory_set_dirty_tracking(int enable) |
74576198 | 683 | { |
f6f3fbca | 684 | int ret = 0; |
74576198 | 685 | in_migration = enable; |
f6f3fbca | 686 | return ret; |
74576198 AL |
687 | } |
688 | ||
a8170e5e | 689 | hwaddr memory_region_section_get_iotlb(CPUArchState *env, |
149f54b5 PB |
690 | MemoryRegionSection *section, |
691 | target_ulong vaddr, | |
692 | hwaddr paddr, hwaddr xlat, | |
693 | int prot, | |
694 | target_ulong *address) | |
e5548617 | 695 | { |
a8170e5e | 696 | hwaddr iotlb; |
e5548617 BS |
697 | CPUWatchpoint *wp; |
698 | ||
cc5bea60 | 699 | if (memory_region_is_ram(section->mr)) { |
e5548617 BS |
700 | /* Normal RAM. */ |
701 | iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) | |
149f54b5 | 702 | + xlat; |
e5548617 BS |
703 | if (!section->readonly) { |
704 | iotlb |= phys_section_notdirty; | |
705 | } else { | |
706 | iotlb |= phys_section_rom; | |
707 | } | |
708 | } else { | |
e5548617 | 709 | iotlb = section - phys_sections; |
149f54b5 | 710 | iotlb += xlat; |
e5548617 BS |
711 | } |
712 | ||
713 | /* Make accesses to pages with watchpoints go via the | |
714 | watchpoint trap routines. */ | |
715 | QTAILQ_FOREACH(wp, &env->watchpoints, entry) { | |
716 | if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) { | |
717 | /* Avoid trapping reads of pages with a write breakpoint. */ | |
718 | if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) { | |
719 | iotlb = phys_section_watch + paddr; | |
720 | *address |= TLB_MMIO; | |
721 | break; | |
722 | } | |
723 | } | |
724 | } | |
725 | ||
726 | return iotlb; | |
727 | } | |
9fa3e853 FB |
728 | #endif /* defined(CONFIG_USER_ONLY) */ |
729 | ||
e2eef170 | 730 | #if !defined(CONFIG_USER_ONLY) |
8da3ff18 | 731 | |
c227f099 | 732 | static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
5312bd8b | 733 | uint16_t section); |
acc9d80b | 734 | static subpage_t *subpage_init(AddressSpace *as, hwaddr base); |
5312bd8b | 735 | static void destroy_page_desc(uint16_t section_index) |
54688b1e | 736 | { |
5312bd8b AK |
737 | MemoryRegionSection *section = &phys_sections[section_index]; |
738 | MemoryRegion *mr = section->mr; | |
54688b1e AK |
739 | |
740 | if (mr->subpage) { | |
741 | subpage_t *subpage = container_of(mr, subpage_t, iomem); | |
742 | memory_region_destroy(&subpage->iomem); | |
743 | g_free(subpage); | |
744 | } | |
745 | } | |
746 | ||
4346ae3e | 747 | static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level) |
54688b1e AK |
748 | { |
749 | unsigned i; | |
d6f2ea22 | 750 | PhysPageEntry *p; |
54688b1e | 751 | |
c19e8800 | 752 | if (lp->ptr == PHYS_MAP_NODE_NIL) { |
54688b1e AK |
753 | return; |
754 | } | |
755 | ||
c19e8800 | 756 | p = phys_map_nodes[lp->ptr]; |
4346ae3e | 757 | for (i = 0; i < L2_SIZE; ++i) { |
07f07b31 | 758 | if (!p[i].is_leaf) { |
54688b1e | 759 | destroy_l2_mapping(&p[i], level - 1); |
4346ae3e | 760 | } else { |
c19e8800 | 761 | destroy_page_desc(p[i].ptr); |
54688b1e | 762 | } |
54688b1e | 763 | } |
07f07b31 | 764 | lp->is_leaf = 0; |
c19e8800 | 765 | lp->ptr = PHYS_MAP_NODE_NIL; |
54688b1e AK |
766 | } |
767 | ||
ac1970fb | 768 | static void destroy_all_mappings(AddressSpaceDispatch *d) |
54688b1e | 769 | { |
ac1970fb | 770 | destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1); |
d6f2ea22 | 771 | phys_map_nodes_reset(); |
54688b1e AK |
772 | } |
773 | ||
5312bd8b AK |
774 | static uint16_t phys_section_add(MemoryRegionSection *section) |
775 | { | |
68f3f65b PB |
776 | /* The physical section number is ORed with a page-aligned |
777 | * pointer to produce the iotlb entries. Thus it should | |
778 | * never overflow into the page-aligned value. | |
779 | */ | |
780 | assert(phys_sections_nb < TARGET_PAGE_SIZE); | |
781 | ||
5312bd8b AK |
782 | if (phys_sections_nb == phys_sections_nb_alloc) { |
783 | phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16); | |
784 | phys_sections = g_renew(MemoryRegionSection, phys_sections, | |
785 | phys_sections_nb_alloc); | |
786 | } | |
787 | phys_sections[phys_sections_nb] = *section; | |
788 | return phys_sections_nb++; | |
789 | } | |
790 | ||
791 | static void phys_sections_clear(void) | |
792 | { | |
793 | phys_sections_nb = 0; | |
794 | } | |
795 | ||
ac1970fb | 796 | static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section) |
0f0cb164 AK |
797 | { |
798 | subpage_t *subpage; | |
a8170e5e | 799 | hwaddr base = section->offset_within_address_space |
0f0cb164 | 800 | & TARGET_PAGE_MASK; |
ac1970fb | 801 | MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS); |
0f0cb164 AK |
802 | MemoryRegionSection subsection = { |
803 | .offset_within_address_space = base, | |
804 | .size = TARGET_PAGE_SIZE, | |
805 | }; | |
a8170e5e | 806 | hwaddr start, end; |
0f0cb164 | 807 | |
f3705d53 | 808 | assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); |
0f0cb164 | 809 | |
f3705d53 | 810 | if (!(existing->mr->subpage)) { |
acc9d80b | 811 | subpage = subpage_init(d->as, base); |
0f0cb164 | 812 | subsection.mr = &subpage->iomem; |
ac1970fb | 813 | phys_page_set(d, base >> TARGET_PAGE_BITS, 1, |
2999097b | 814 | phys_section_add(&subsection)); |
0f0cb164 | 815 | } else { |
f3705d53 | 816 | subpage = container_of(existing->mr, subpage_t, iomem); |
0f0cb164 AK |
817 | } |
818 | start = section->offset_within_address_space & ~TARGET_PAGE_MASK; | |
adb2a9b5 | 819 | end = start + section->size - 1; |
0f0cb164 AK |
820 | subpage_register(subpage, start, end, phys_section_add(section)); |
821 | } | |
822 | ||
823 | ||
ac1970fb | 824 | static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section) |
33417e70 | 825 | { |
a8170e5e | 826 | hwaddr start_addr = section->offset_within_address_space; |
5312bd8b | 827 | uint16_t section_index = phys_section_add(section); |
733d5ef5 | 828 | uint64_t num_pages = section->size >> TARGET_PAGE_BITS; |
dd81124b | 829 | |
733d5ef5 PB |
830 | assert(num_pages); |
831 | phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index); | |
33417e70 FB |
832 | } |
833 | ||
ac1970fb | 834 | static void mem_add(MemoryListener *listener, MemoryRegionSection *section) |
0f0cb164 | 835 | { |
ac1970fb | 836 | AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener); |
99b9cc06 | 837 | MemoryRegionSection now = *section, remain = *section; |
0f0cb164 | 838 | |
733d5ef5 PB |
839 | if (now.offset_within_address_space & ~TARGET_PAGE_MASK) { |
840 | uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space) | |
841 | - now.offset_within_address_space; | |
842 | ||
843 | now.size = MIN(left, now.size); | |
ac1970fb | 844 | register_subpage(d, &now); |
733d5ef5 PB |
845 | } else { |
846 | now.size = 0; | |
847 | } | |
848 | while (remain.size != now.size) { | |
0f0cb164 AK |
849 | remain.size -= now.size; |
850 | remain.offset_within_address_space += now.size; | |
851 | remain.offset_within_region += now.size; | |
69b67646 | 852 | now = remain; |
733d5ef5 PB |
853 | if (remain.size < TARGET_PAGE_SIZE) { |
854 | register_subpage(d, &now); | |
855 | } else if (remain.offset_within_region & ~TARGET_PAGE_MASK) { | |
69b67646 | 856 | now.size = TARGET_PAGE_SIZE; |
ac1970fb | 857 | register_subpage(d, &now); |
69b67646 | 858 | } else { |
733d5ef5 | 859 | now.size &= -TARGET_PAGE_SIZE; |
ac1970fb | 860 | register_multipage(d, &now); |
69b67646 | 861 | } |
0f0cb164 AK |
862 | } |
863 | } | |
864 | ||
62a2744c SY |
865 | void qemu_flush_coalesced_mmio_buffer(void) |
866 | { | |
867 | if (kvm_enabled()) | |
868 | kvm_flush_coalesced_mmio_buffer(); | |
869 | } | |
870 | ||
b2a8658e UD |
871 | void qemu_mutex_lock_ramlist(void) |
872 | { | |
873 | qemu_mutex_lock(&ram_list.mutex); | |
874 | } | |
875 | ||
876 | void qemu_mutex_unlock_ramlist(void) | |
877 | { | |
878 | qemu_mutex_unlock(&ram_list.mutex); | |
879 | } | |
880 | ||
c902760f MT |
881 | #if defined(__linux__) && !defined(TARGET_S390X) |
882 | ||
883 | #include <sys/vfs.h> | |
884 | ||
885 | #define HUGETLBFS_MAGIC 0x958458f6 | |
886 | ||
887 | static long gethugepagesize(const char *path) | |
888 | { | |
889 | struct statfs fs; | |
890 | int ret; | |
891 | ||
892 | do { | |
9742bf26 | 893 | ret = statfs(path, &fs); |
c902760f MT |
894 | } while (ret != 0 && errno == EINTR); |
895 | ||
896 | if (ret != 0) { | |
9742bf26 YT |
897 | perror(path); |
898 | return 0; | |
c902760f MT |
899 | } |
900 | ||
901 | if (fs.f_type != HUGETLBFS_MAGIC) | |
9742bf26 | 902 | fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path); |
c902760f MT |
903 | |
904 | return fs.f_bsize; | |
905 | } | |
906 | ||
04b16653 AW |
907 | static void *file_ram_alloc(RAMBlock *block, |
908 | ram_addr_t memory, | |
909 | const char *path) | |
c902760f MT |
910 | { |
911 | char *filename; | |
8ca761f6 PF |
912 | char *sanitized_name; |
913 | char *c; | |
c902760f MT |
914 | void *area; |
915 | int fd; | |
916 | #ifdef MAP_POPULATE | |
917 | int flags; | |
918 | #endif | |
919 | unsigned long hpagesize; | |
920 | ||
921 | hpagesize = gethugepagesize(path); | |
922 | if (!hpagesize) { | |
9742bf26 | 923 | return NULL; |
c902760f MT |
924 | } |
925 | ||
926 | if (memory < hpagesize) { | |
927 | return NULL; | |
928 | } | |
929 | ||
930 | if (kvm_enabled() && !kvm_has_sync_mmu()) { | |
931 | fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n"); | |
932 | return NULL; | |
933 | } | |
934 | ||
8ca761f6 PF |
935 | /* Make name safe to use with mkstemp by replacing '/' with '_'. */ |
936 | sanitized_name = g_strdup(block->mr->name); | |
937 | for (c = sanitized_name; *c != '\0'; c++) { | |
938 | if (*c == '/') | |
939 | *c = '_'; | |
940 | } | |
941 | ||
942 | filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path, | |
943 | sanitized_name); | |
944 | g_free(sanitized_name); | |
c902760f MT |
945 | |
946 | fd = mkstemp(filename); | |
947 | if (fd < 0) { | |
9742bf26 | 948 | perror("unable to create backing store for hugepages"); |
e4ada482 | 949 | g_free(filename); |
9742bf26 | 950 | return NULL; |
c902760f MT |
951 | } |
952 | unlink(filename); | |
e4ada482 | 953 | g_free(filename); |
c902760f MT |
954 | |
955 | memory = (memory+hpagesize-1) & ~(hpagesize-1); | |
956 | ||
957 | /* | |
958 | * ftruncate is not supported by hugetlbfs in older | |
959 | * hosts, so don't bother bailing out on errors. | |
960 | * If anything goes wrong with it under other filesystems, | |
961 | * mmap will fail. | |
962 | */ | |
963 | if (ftruncate(fd, memory)) | |
9742bf26 | 964 | perror("ftruncate"); |
c902760f MT |
965 | |
966 | #ifdef MAP_POPULATE | |
967 | /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case | |
968 | * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED | |
969 | * to sidestep this quirk. | |
970 | */ | |
971 | flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE; | |
972 | area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0); | |
973 | #else | |
974 | area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); | |
975 | #endif | |
976 | if (area == MAP_FAILED) { | |
9742bf26 YT |
977 | perror("file_ram_alloc: can't mmap RAM pages"); |
978 | close(fd); | |
979 | return (NULL); | |
c902760f | 980 | } |
04b16653 | 981 | block->fd = fd; |
c902760f MT |
982 | return area; |
983 | } | |
984 | #endif | |
985 | ||
d17b5288 | 986 | static ram_addr_t find_ram_offset(ram_addr_t size) |
04b16653 AW |
987 | { |
988 | RAMBlock *block, *next_block; | |
3e837b2c | 989 | ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; |
04b16653 | 990 | |
49cd9ac6 SH |
991 | assert(size != 0); /* it would hand out same offset multiple times */ |
992 | ||
a3161038 | 993 | if (QTAILQ_EMPTY(&ram_list.blocks)) |
04b16653 AW |
994 | return 0; |
995 | ||
a3161038 | 996 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
f15fbc4b | 997 | ram_addr_t end, next = RAM_ADDR_MAX; |
04b16653 AW |
998 | |
999 | end = block->offset + block->length; | |
1000 | ||
a3161038 | 1001 | QTAILQ_FOREACH(next_block, &ram_list.blocks, next) { |
04b16653 AW |
1002 | if (next_block->offset >= end) { |
1003 | next = MIN(next, next_block->offset); | |
1004 | } | |
1005 | } | |
1006 | if (next - end >= size && next - end < mingap) { | |
3e837b2c | 1007 | offset = end; |
04b16653 AW |
1008 | mingap = next - end; |
1009 | } | |
1010 | } | |
3e837b2c AW |
1011 | |
1012 | if (offset == RAM_ADDR_MAX) { | |
1013 | fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n", | |
1014 | (uint64_t)size); | |
1015 | abort(); | |
1016 | } | |
1017 | ||
04b16653 AW |
1018 | return offset; |
1019 | } | |
1020 | ||
652d7ec2 | 1021 | ram_addr_t last_ram_offset(void) |
d17b5288 AW |
1022 | { |
1023 | RAMBlock *block; | |
1024 | ram_addr_t last = 0; | |
1025 | ||
a3161038 | 1026 | QTAILQ_FOREACH(block, &ram_list.blocks, next) |
d17b5288 AW |
1027 | last = MAX(last, block->offset + block->length); |
1028 | ||
1029 | return last; | |
1030 | } | |
1031 | ||
ddb97f1d JB |
1032 | static void qemu_ram_setup_dump(void *addr, ram_addr_t size) |
1033 | { | |
1034 | int ret; | |
1035 | QemuOpts *machine_opts; | |
1036 | ||
1037 | /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */ | |
1038 | machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0); | |
1039 | if (machine_opts && | |
1040 | !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) { | |
1041 | ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP); | |
1042 | if (ret) { | |
1043 | perror("qemu_madvise"); | |
1044 | fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, " | |
1045 | "but dump_guest_core=off specified\n"); | |
1046 | } | |
1047 | } | |
1048 | } | |
1049 | ||
c5705a77 | 1050 | void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev) |
84b89d78 CM |
1051 | { |
1052 | RAMBlock *new_block, *block; | |
1053 | ||
c5705a77 | 1054 | new_block = NULL; |
a3161038 | 1055 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
c5705a77 AK |
1056 | if (block->offset == addr) { |
1057 | new_block = block; | |
1058 | break; | |
1059 | } | |
1060 | } | |
1061 | assert(new_block); | |
1062 | assert(!new_block->idstr[0]); | |
84b89d78 | 1063 | |
09e5ab63 AL |
1064 | if (dev) { |
1065 | char *id = qdev_get_dev_path(dev); | |
84b89d78 CM |
1066 | if (id) { |
1067 | snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); | |
7267c094 | 1068 | g_free(id); |
84b89d78 CM |
1069 | } |
1070 | } | |
1071 | pstrcat(new_block->idstr, sizeof(new_block->idstr), name); | |
1072 | ||
b2a8658e UD |
1073 | /* This assumes the iothread lock is taken here too. */ |
1074 | qemu_mutex_lock_ramlist(); | |
a3161038 | 1075 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
c5705a77 | 1076 | if (block != new_block && !strcmp(block->idstr, new_block->idstr)) { |
84b89d78 CM |
1077 | fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", |
1078 | new_block->idstr); | |
1079 | abort(); | |
1080 | } | |
1081 | } | |
b2a8658e | 1082 | qemu_mutex_unlock_ramlist(); |
c5705a77 AK |
1083 | } |
1084 | ||
8490fc78 LC |
1085 | static int memory_try_enable_merging(void *addr, size_t len) |
1086 | { | |
1087 | QemuOpts *opts; | |
1088 | ||
1089 | opts = qemu_opts_find(qemu_find_opts("machine"), 0); | |
1090 | if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) { | |
1091 | /* disabled by the user */ | |
1092 | return 0; | |
1093 | } | |
1094 | ||
1095 | return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE); | |
1096 | } | |
1097 | ||
c5705a77 AK |
1098 | ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, |
1099 | MemoryRegion *mr) | |
1100 | { | |
abb26d63 | 1101 | RAMBlock *block, *new_block; |
c5705a77 AK |
1102 | |
1103 | size = TARGET_PAGE_ALIGN(size); | |
1104 | new_block = g_malloc0(sizeof(*new_block)); | |
84b89d78 | 1105 | |
b2a8658e UD |
1106 | /* This assumes the iothread lock is taken here too. */ |
1107 | qemu_mutex_lock_ramlist(); | |
7c637366 | 1108 | new_block->mr = mr; |
432d268c | 1109 | new_block->offset = find_ram_offset(size); |
6977dfe6 YT |
1110 | if (host) { |
1111 | new_block->host = host; | |
cd19cfa2 | 1112 | new_block->flags |= RAM_PREALLOC_MASK; |
6977dfe6 YT |
1113 | } else { |
1114 | if (mem_path) { | |
c902760f | 1115 | #if defined (__linux__) && !defined(TARGET_S390X) |
6977dfe6 YT |
1116 | new_block->host = file_ram_alloc(new_block, size, mem_path); |
1117 | if (!new_block->host) { | |
6eebf958 | 1118 | new_block->host = qemu_anon_ram_alloc(size); |
8490fc78 | 1119 | memory_try_enable_merging(new_block->host, size); |
6977dfe6 | 1120 | } |
c902760f | 1121 | #else |
6977dfe6 YT |
1122 | fprintf(stderr, "-mem-path option unsupported\n"); |
1123 | exit(1); | |
c902760f | 1124 | #endif |
6977dfe6 | 1125 | } else { |
868bb33f | 1126 | if (xen_enabled()) { |
fce537d4 | 1127 | xen_ram_alloc(new_block->offset, size, mr); |
fdec9918 CB |
1128 | } else if (kvm_enabled()) { |
1129 | /* some s390/kvm configurations have special constraints */ | |
6eebf958 | 1130 | new_block->host = kvm_ram_alloc(size); |
432d268c | 1131 | } else { |
6eebf958 | 1132 | new_block->host = qemu_anon_ram_alloc(size); |
432d268c | 1133 | } |
8490fc78 | 1134 | memory_try_enable_merging(new_block->host, size); |
6977dfe6 | 1135 | } |
c902760f | 1136 | } |
94a6b54f PB |
1137 | new_block->length = size; |
1138 | ||
abb26d63 PB |
1139 | /* Keep the list sorted from biggest to smallest block. */ |
1140 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { | |
1141 | if (block->length < new_block->length) { | |
1142 | break; | |
1143 | } | |
1144 | } | |
1145 | if (block) { | |
1146 | QTAILQ_INSERT_BEFORE(block, new_block, next); | |
1147 | } else { | |
1148 | QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next); | |
1149 | } | |
0d6d3c87 | 1150 | ram_list.mru_block = NULL; |
94a6b54f | 1151 | |
f798b07f | 1152 | ram_list.version++; |
b2a8658e | 1153 | qemu_mutex_unlock_ramlist(); |
f798b07f | 1154 | |
7267c094 | 1155 | ram_list.phys_dirty = g_realloc(ram_list.phys_dirty, |
04b16653 | 1156 | last_ram_offset() >> TARGET_PAGE_BITS); |
5fda043f IM |
1157 | memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS), |
1158 | 0, size >> TARGET_PAGE_BITS); | |
1720aeee | 1159 | cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff); |
94a6b54f | 1160 | |
ddb97f1d | 1161 | qemu_ram_setup_dump(new_block->host, size); |
ad0b5321 | 1162 | qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE); |
ddb97f1d | 1163 | |
6f0437e8 JK |
1164 | if (kvm_enabled()) |
1165 | kvm_setup_guest_memory(new_block->host, size); | |
1166 | ||
94a6b54f PB |
1167 | return new_block->offset; |
1168 | } | |
e9a1ab19 | 1169 | |
c5705a77 | 1170 | ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr) |
6977dfe6 | 1171 | { |
c5705a77 | 1172 | return qemu_ram_alloc_from_ptr(size, NULL, mr); |
6977dfe6 YT |
1173 | } |
1174 | ||
1f2e98b6 AW |
1175 | void qemu_ram_free_from_ptr(ram_addr_t addr) |
1176 | { | |
1177 | RAMBlock *block; | |
1178 | ||
b2a8658e UD |
1179 | /* This assumes the iothread lock is taken here too. */ |
1180 | qemu_mutex_lock_ramlist(); | |
a3161038 | 1181 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
1f2e98b6 | 1182 | if (addr == block->offset) { |
a3161038 | 1183 | QTAILQ_REMOVE(&ram_list.blocks, block, next); |
0d6d3c87 | 1184 | ram_list.mru_block = NULL; |
f798b07f | 1185 | ram_list.version++; |
7267c094 | 1186 | g_free(block); |
b2a8658e | 1187 | break; |
1f2e98b6 AW |
1188 | } |
1189 | } | |
b2a8658e | 1190 | qemu_mutex_unlock_ramlist(); |
1f2e98b6 AW |
1191 | } |
1192 | ||
c227f099 | 1193 | void qemu_ram_free(ram_addr_t addr) |
e9a1ab19 | 1194 | { |
04b16653 AW |
1195 | RAMBlock *block; |
1196 | ||
b2a8658e UD |
1197 | /* This assumes the iothread lock is taken here too. */ |
1198 | qemu_mutex_lock_ramlist(); | |
a3161038 | 1199 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
04b16653 | 1200 | if (addr == block->offset) { |
a3161038 | 1201 | QTAILQ_REMOVE(&ram_list.blocks, block, next); |
0d6d3c87 | 1202 | ram_list.mru_block = NULL; |
f798b07f | 1203 | ram_list.version++; |
cd19cfa2 HY |
1204 | if (block->flags & RAM_PREALLOC_MASK) { |
1205 | ; | |
1206 | } else if (mem_path) { | |
04b16653 AW |
1207 | #if defined (__linux__) && !defined(TARGET_S390X) |
1208 | if (block->fd) { | |
1209 | munmap(block->host, block->length); | |
1210 | close(block->fd); | |
1211 | } else { | |
e7a09b92 | 1212 | qemu_anon_ram_free(block->host, block->length); |
04b16653 | 1213 | } |
fd28aa13 JK |
1214 | #else |
1215 | abort(); | |
04b16653 AW |
1216 | #endif |
1217 | } else { | |
868bb33f | 1218 | if (xen_enabled()) { |
e41d7c69 | 1219 | xen_invalidate_map_cache_entry(block->host); |
432d268c | 1220 | } else { |
e7a09b92 | 1221 | qemu_anon_ram_free(block->host, block->length); |
432d268c | 1222 | } |
04b16653 | 1223 | } |
7267c094 | 1224 | g_free(block); |
b2a8658e | 1225 | break; |
04b16653 AW |
1226 | } |
1227 | } | |
b2a8658e | 1228 | qemu_mutex_unlock_ramlist(); |
04b16653 | 1229 | |
e9a1ab19 FB |
1230 | } |
1231 | ||
cd19cfa2 HY |
1232 | #ifndef _WIN32 |
1233 | void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) | |
1234 | { | |
1235 | RAMBlock *block; | |
1236 | ram_addr_t offset; | |
1237 | int flags; | |
1238 | void *area, *vaddr; | |
1239 | ||
a3161038 | 1240 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
cd19cfa2 HY |
1241 | offset = addr - block->offset; |
1242 | if (offset < block->length) { | |
1243 | vaddr = block->host + offset; | |
1244 | if (block->flags & RAM_PREALLOC_MASK) { | |
1245 | ; | |
1246 | } else { | |
1247 | flags = MAP_FIXED; | |
1248 | munmap(vaddr, length); | |
1249 | if (mem_path) { | |
1250 | #if defined(__linux__) && !defined(TARGET_S390X) | |
1251 | if (block->fd) { | |
1252 | #ifdef MAP_POPULATE | |
1253 | flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED : | |
1254 | MAP_PRIVATE; | |
1255 | #else | |
1256 | flags |= MAP_PRIVATE; | |
1257 | #endif | |
1258 | area = mmap(vaddr, length, PROT_READ | PROT_WRITE, | |
1259 | flags, block->fd, offset); | |
1260 | } else { | |
1261 | flags |= MAP_PRIVATE | MAP_ANONYMOUS; | |
1262 | area = mmap(vaddr, length, PROT_READ | PROT_WRITE, | |
1263 | flags, -1, 0); | |
1264 | } | |
fd28aa13 JK |
1265 | #else |
1266 | abort(); | |
cd19cfa2 HY |
1267 | #endif |
1268 | } else { | |
1269 | #if defined(TARGET_S390X) && defined(CONFIG_KVM) | |
1270 | flags |= MAP_SHARED | MAP_ANONYMOUS; | |
1271 | area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE, | |
1272 | flags, -1, 0); | |
1273 | #else | |
1274 | flags |= MAP_PRIVATE | MAP_ANONYMOUS; | |
1275 | area = mmap(vaddr, length, PROT_READ | PROT_WRITE, | |
1276 | flags, -1, 0); | |
1277 | #endif | |
1278 | } | |
1279 | if (area != vaddr) { | |
f15fbc4b AP |
1280 | fprintf(stderr, "Could not remap addr: " |
1281 | RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n", | |
cd19cfa2 HY |
1282 | length, addr); |
1283 | exit(1); | |
1284 | } | |
8490fc78 | 1285 | memory_try_enable_merging(vaddr, length); |
ddb97f1d | 1286 | qemu_ram_setup_dump(vaddr, length); |
cd19cfa2 HY |
1287 | } |
1288 | return; | |
1289 | } | |
1290 | } | |
1291 | } | |
1292 | #endif /* !_WIN32 */ | |
1293 | ||
dc828ca1 | 1294 | /* Return a host pointer to ram allocated with qemu_ram_alloc. |
5579c7f3 PB |
1295 | With the exception of the softmmu code in this file, this should |
1296 | only be used for local memory (e.g. video ram) that the device owns, | |
1297 | and knows it isn't going to access beyond the end of the block. | |
1298 | ||
1299 | It should not be used for general purpose DMA. | |
1300 | Use cpu_physical_memory_map/cpu_physical_memory_rw instead. | |
1301 | */ | |
c227f099 | 1302 | void *qemu_get_ram_ptr(ram_addr_t addr) |
dc828ca1 | 1303 | { |
94a6b54f PB |
1304 | RAMBlock *block; |
1305 | ||
b2a8658e | 1306 | /* The list is protected by the iothread lock here. */ |
0d6d3c87 PB |
1307 | block = ram_list.mru_block; |
1308 | if (block && addr - block->offset < block->length) { | |
1309 | goto found; | |
1310 | } | |
a3161038 | 1311 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
f471a17e | 1312 | if (addr - block->offset < block->length) { |
0d6d3c87 | 1313 | goto found; |
f471a17e | 1314 | } |
94a6b54f | 1315 | } |
f471a17e AW |
1316 | |
1317 | fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); | |
1318 | abort(); | |
1319 | ||
0d6d3c87 PB |
1320 | found: |
1321 | ram_list.mru_block = block; | |
1322 | if (xen_enabled()) { | |
1323 | /* We need to check if the requested address is in the RAM | |
1324 | * because we don't want to map the entire memory in QEMU. | |
1325 | * In that case just map until the end of the page. | |
1326 | */ | |
1327 | if (block->offset == 0) { | |
1328 | return xen_map_cache(addr, 0, 0); | |
1329 | } else if (block->host == NULL) { | |
1330 | block->host = | |
1331 | xen_map_cache(block->offset, block->length, 1); | |
1332 | } | |
1333 | } | |
1334 | return block->host + (addr - block->offset); | |
dc828ca1 PB |
1335 | } |
1336 | ||
0d6d3c87 PB |
1337 | /* Return a host pointer to ram allocated with qemu_ram_alloc. Same as |
1338 | * qemu_get_ram_ptr but do not touch ram_list.mru_block. | |
1339 | * | |
1340 | * ??? Is this still necessary? | |
b2e0a138 | 1341 | */ |
8b9c99d9 | 1342 | static void *qemu_safe_ram_ptr(ram_addr_t addr) |
b2e0a138 MT |
1343 | { |
1344 | RAMBlock *block; | |
1345 | ||
b2a8658e | 1346 | /* The list is protected by the iothread lock here. */ |
a3161038 | 1347 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
b2e0a138 | 1348 | if (addr - block->offset < block->length) { |
868bb33f | 1349 | if (xen_enabled()) { |
432d268c JN |
1350 | /* We need to check if the requested address is in the RAM |
1351 | * because we don't want to map the entire memory in QEMU. | |
712c2b41 | 1352 | * In that case just map until the end of the page. |
432d268c JN |
1353 | */ |
1354 | if (block->offset == 0) { | |
e41d7c69 | 1355 | return xen_map_cache(addr, 0, 0); |
432d268c | 1356 | } else if (block->host == NULL) { |
e41d7c69 JK |
1357 | block->host = |
1358 | xen_map_cache(block->offset, block->length, 1); | |
432d268c JN |
1359 | } |
1360 | } | |
b2e0a138 MT |
1361 | return block->host + (addr - block->offset); |
1362 | } | |
1363 | } | |
1364 | ||
1365 | fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); | |
1366 | abort(); | |
1367 | ||
1368 | return NULL; | |
1369 | } | |
1370 | ||
38bee5dc SS |
1371 | /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr |
1372 | * but takes a size argument */ | |
8b9c99d9 | 1373 | static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size) |
38bee5dc | 1374 | { |
8ab934f9 SS |
1375 | if (*size == 0) { |
1376 | return NULL; | |
1377 | } | |
868bb33f | 1378 | if (xen_enabled()) { |
e41d7c69 | 1379 | return xen_map_cache(addr, *size, 1); |
868bb33f | 1380 | } else { |
38bee5dc SS |
1381 | RAMBlock *block; |
1382 | ||
a3161038 | 1383 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
38bee5dc SS |
1384 | if (addr - block->offset < block->length) { |
1385 | if (addr - block->offset + *size > block->length) | |
1386 | *size = block->length - addr + block->offset; | |
1387 | return block->host + (addr - block->offset); | |
1388 | } | |
1389 | } | |
1390 | ||
1391 | fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); | |
1392 | abort(); | |
38bee5dc SS |
1393 | } |
1394 | } | |
1395 | ||
e890261f | 1396 | int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr) |
5579c7f3 | 1397 | { |
94a6b54f PB |
1398 | RAMBlock *block; |
1399 | uint8_t *host = ptr; | |
1400 | ||
868bb33f | 1401 | if (xen_enabled()) { |
e41d7c69 | 1402 | *ram_addr = xen_ram_addr_from_mapcache(ptr); |
712c2b41 SS |
1403 | return 0; |
1404 | } | |
1405 | ||
a3161038 | 1406 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
432d268c JN |
1407 | /* This case append when the block is not mapped. */ |
1408 | if (block->host == NULL) { | |
1409 | continue; | |
1410 | } | |
f471a17e | 1411 | if (host - block->host < block->length) { |
e890261f MT |
1412 | *ram_addr = block->offset + (host - block->host); |
1413 | return 0; | |
f471a17e | 1414 | } |
94a6b54f | 1415 | } |
432d268c | 1416 | |
e890261f MT |
1417 | return -1; |
1418 | } | |
f471a17e | 1419 | |
e890261f MT |
1420 | /* Some of the softmmu routines need to translate from a host pointer |
1421 | (typically a TLB entry) back to a ram offset. */ | |
1422 | ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) | |
1423 | { | |
1424 | ram_addr_t ram_addr; | |
f471a17e | 1425 | |
e890261f MT |
1426 | if (qemu_ram_addr_from_host(ptr, &ram_addr)) { |
1427 | fprintf(stderr, "Bad ram pointer %p\n", ptr); | |
1428 | abort(); | |
1429 | } | |
1430 | return ram_addr; | |
5579c7f3 PB |
1431 | } |
1432 | ||
a8170e5e | 1433 | static void notdirty_mem_write(void *opaque, hwaddr ram_addr, |
0e0df1e2 | 1434 | uint64_t val, unsigned size) |
9fa3e853 | 1435 | { |
3a7d929e | 1436 | int dirty_flags; |
f7c11b53 | 1437 | dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); |
3a7d929e | 1438 | if (!(dirty_flags & CODE_DIRTY_FLAG)) { |
0e0df1e2 | 1439 | tb_invalidate_phys_page_fast(ram_addr, size); |
f7c11b53 | 1440 | dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); |
3a7d929e | 1441 | } |
0e0df1e2 AK |
1442 | switch (size) { |
1443 | case 1: | |
1444 | stb_p(qemu_get_ram_ptr(ram_addr), val); | |
1445 | break; | |
1446 | case 2: | |
1447 | stw_p(qemu_get_ram_ptr(ram_addr), val); | |
1448 | break; | |
1449 | case 4: | |
1450 | stl_p(qemu_get_ram_ptr(ram_addr), val); | |
1451 | break; | |
1452 | default: | |
1453 | abort(); | |
3a7d929e | 1454 | } |
f23db169 | 1455 | dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
f7c11b53 | 1456 | cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags); |
f23db169 FB |
1457 | /* we remove the notdirty callback only if the code has been |
1458 | flushed */ | |
1459 | if (dirty_flags == 0xff) | |
2e70f6ef | 1460 | tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); |
9fa3e853 FB |
1461 | } |
1462 | ||
b018ddf6 PB |
1463 | static bool notdirty_mem_accepts(void *opaque, hwaddr addr, |
1464 | unsigned size, bool is_write) | |
1465 | { | |
1466 | return is_write; | |
1467 | } | |
1468 | ||
0e0df1e2 | 1469 | static const MemoryRegionOps notdirty_mem_ops = { |
0e0df1e2 | 1470 | .write = notdirty_mem_write, |
b018ddf6 | 1471 | .valid.accepts = notdirty_mem_accepts, |
0e0df1e2 | 1472 | .endianness = DEVICE_NATIVE_ENDIAN, |
1ccde1cb FB |
1473 | }; |
1474 | ||
0f459d16 | 1475 | /* Generate a debug exception if a watchpoint has been hit. */ |
b4051334 | 1476 | static void check_watchpoint(int offset, int len_mask, int flags) |
0f459d16 | 1477 | { |
9349b4f9 | 1478 | CPUArchState *env = cpu_single_env; |
06d55cc1 | 1479 | target_ulong pc, cs_base; |
0f459d16 | 1480 | target_ulong vaddr; |
a1d1bb31 | 1481 | CPUWatchpoint *wp; |
06d55cc1 | 1482 | int cpu_flags; |
0f459d16 | 1483 | |
06d55cc1 AL |
1484 | if (env->watchpoint_hit) { |
1485 | /* We re-entered the check after replacing the TB. Now raise | |
1486 | * the debug interrupt so that is will trigger after the | |
1487 | * current instruction. */ | |
c3affe56 | 1488 | cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG); |
06d55cc1 AL |
1489 | return; |
1490 | } | |
2e70f6ef | 1491 | vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset; |
72cf2d4f | 1492 | QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
b4051334 AL |
1493 | if ((vaddr == (wp->vaddr & len_mask) || |
1494 | (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) { | |
6e140f28 AL |
1495 | wp->flags |= BP_WATCHPOINT_HIT; |
1496 | if (!env->watchpoint_hit) { | |
1497 | env->watchpoint_hit = wp; | |
5a316526 | 1498 | tb_check_watchpoint(env); |
6e140f28 AL |
1499 | if (wp->flags & BP_STOP_BEFORE_ACCESS) { |
1500 | env->exception_index = EXCP_DEBUG; | |
488d6577 | 1501 | cpu_loop_exit(env); |
6e140f28 AL |
1502 | } else { |
1503 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags); | |
1504 | tb_gen_code(env, pc, cs_base, cpu_flags, 1); | |
488d6577 | 1505 | cpu_resume_from_signal(env, NULL); |
6e140f28 | 1506 | } |
06d55cc1 | 1507 | } |
6e140f28 AL |
1508 | } else { |
1509 | wp->flags &= ~BP_WATCHPOINT_HIT; | |
0f459d16 PB |
1510 | } |
1511 | } | |
1512 | } | |
1513 | ||
6658ffb8 PB |
1514 | /* Watchpoint access routines. Watchpoints are inserted using TLB tricks, |
1515 | so these check for a hit then pass through to the normal out-of-line | |
1516 | phys routines. */ | |
a8170e5e | 1517 | static uint64_t watch_mem_read(void *opaque, hwaddr addr, |
1ec9b909 | 1518 | unsigned size) |
6658ffb8 | 1519 | { |
1ec9b909 AK |
1520 | check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ); |
1521 | switch (size) { | |
1522 | case 1: return ldub_phys(addr); | |
1523 | case 2: return lduw_phys(addr); | |
1524 | case 4: return ldl_phys(addr); | |
1525 | default: abort(); | |
1526 | } | |
6658ffb8 PB |
1527 | } |
1528 | ||
a8170e5e | 1529 | static void watch_mem_write(void *opaque, hwaddr addr, |
1ec9b909 | 1530 | uint64_t val, unsigned size) |
6658ffb8 | 1531 | { |
1ec9b909 AK |
1532 | check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE); |
1533 | switch (size) { | |
67364150 MF |
1534 | case 1: |
1535 | stb_phys(addr, val); | |
1536 | break; | |
1537 | case 2: | |
1538 | stw_phys(addr, val); | |
1539 | break; | |
1540 | case 4: | |
1541 | stl_phys(addr, val); | |
1542 | break; | |
1ec9b909 AK |
1543 | default: abort(); |
1544 | } | |
6658ffb8 PB |
1545 | } |
1546 | ||
1ec9b909 AK |
1547 | static const MemoryRegionOps watch_mem_ops = { |
1548 | .read = watch_mem_read, | |
1549 | .write = watch_mem_write, | |
1550 | .endianness = DEVICE_NATIVE_ENDIAN, | |
6658ffb8 | 1551 | }; |
6658ffb8 | 1552 | |
a8170e5e | 1553 | static uint64_t subpage_read(void *opaque, hwaddr addr, |
70c68e44 | 1554 | unsigned len) |
db7b5426 | 1555 | { |
acc9d80b JK |
1556 | subpage_t *subpage = opaque; |
1557 | uint8_t buf[4]; | |
791af8c8 | 1558 | |
db7b5426 | 1559 | #if defined(DEBUG_SUBPAGE) |
acc9d80b JK |
1560 | printf("%s: subpage %p len %d addr " TARGET_FMT_plx "\n", __func__, |
1561 | subpage, len, addr); | |
db7b5426 | 1562 | #endif |
acc9d80b JK |
1563 | address_space_read(subpage->as, addr + subpage->base, buf, len); |
1564 | switch (len) { | |
1565 | case 1: | |
1566 | return ldub_p(buf); | |
1567 | case 2: | |
1568 | return lduw_p(buf); | |
1569 | case 4: | |
1570 | return ldl_p(buf); | |
1571 | default: | |
1572 | abort(); | |
1573 | } | |
db7b5426 BS |
1574 | } |
1575 | ||
a8170e5e | 1576 | static void subpage_write(void *opaque, hwaddr addr, |
70c68e44 | 1577 | uint64_t value, unsigned len) |
db7b5426 | 1578 | { |
acc9d80b JK |
1579 | subpage_t *subpage = opaque; |
1580 | uint8_t buf[4]; | |
1581 | ||
db7b5426 | 1582 | #if defined(DEBUG_SUBPAGE) |
70c68e44 | 1583 | printf("%s: subpage %p len %d addr " TARGET_FMT_plx |
acc9d80b JK |
1584 | " value %"PRIx64"\n", |
1585 | __func__, subpage, len, addr, value); | |
db7b5426 | 1586 | #endif |
acc9d80b JK |
1587 | switch (len) { |
1588 | case 1: | |
1589 | stb_p(buf, value); | |
1590 | break; | |
1591 | case 2: | |
1592 | stw_p(buf, value); | |
1593 | break; | |
1594 | case 4: | |
1595 | stl_p(buf, value); | |
1596 | break; | |
1597 | default: | |
1598 | abort(); | |
1599 | } | |
1600 | address_space_write(subpage->as, addr + subpage->base, buf, len); | |
db7b5426 BS |
1601 | } |
1602 | ||
c353e4cc PB |
1603 | static bool subpage_accepts(void *opaque, hwaddr addr, |
1604 | unsigned size, bool is_write) | |
1605 | { | |
acc9d80b | 1606 | subpage_t *subpage = opaque; |
c353e4cc | 1607 | #if defined(DEBUG_SUBPAGE) |
acc9d80b JK |
1608 | printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx "\n", |
1609 | __func__, subpage, is_write ? 'w' : 'r', len, addr); | |
c353e4cc PB |
1610 | #endif |
1611 | ||
acc9d80b JK |
1612 | return address_space_access_valid(subpage->as, addr + subpage->base, |
1613 | size, is_write); | |
c353e4cc PB |
1614 | } |
1615 | ||
70c68e44 AK |
1616 | static const MemoryRegionOps subpage_ops = { |
1617 | .read = subpage_read, | |
1618 | .write = subpage_write, | |
c353e4cc | 1619 | .valid.accepts = subpage_accepts, |
70c68e44 | 1620 | .endianness = DEVICE_NATIVE_ENDIAN, |
db7b5426 BS |
1621 | }; |
1622 | ||
c227f099 | 1623 | static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
5312bd8b | 1624 | uint16_t section) |
db7b5426 BS |
1625 | { |
1626 | int idx, eidx; | |
1627 | ||
1628 | if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) | |
1629 | return -1; | |
1630 | idx = SUBPAGE_IDX(start); | |
1631 | eidx = SUBPAGE_IDX(end); | |
1632 | #if defined(DEBUG_SUBPAGE) | |
0bf9e31a | 1633 | printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__, |
db7b5426 BS |
1634 | mmio, start, end, idx, eidx, memory); |
1635 | #endif | |
db7b5426 | 1636 | for (; idx <= eidx; idx++) { |
5312bd8b | 1637 | mmio->sub_section[idx] = section; |
db7b5426 BS |
1638 | } |
1639 | ||
1640 | return 0; | |
1641 | } | |
1642 | ||
acc9d80b | 1643 | static subpage_t *subpage_init(AddressSpace *as, hwaddr base) |
db7b5426 | 1644 | { |
c227f099 | 1645 | subpage_t *mmio; |
db7b5426 | 1646 | |
7267c094 | 1647 | mmio = g_malloc0(sizeof(subpage_t)); |
1eec614b | 1648 | |
acc9d80b | 1649 | mmio->as = as; |
1eec614b | 1650 | mmio->base = base; |
70c68e44 AK |
1651 | memory_region_init_io(&mmio->iomem, &subpage_ops, mmio, |
1652 | "subpage", TARGET_PAGE_SIZE); | |
b3b00c78 | 1653 | mmio->iomem.subpage = true; |
db7b5426 | 1654 | #if defined(DEBUG_SUBPAGE) |
1eec614b AL |
1655 | printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__, |
1656 | mmio, base, TARGET_PAGE_SIZE, subpage_memory); | |
db7b5426 | 1657 | #endif |
0f0cb164 | 1658 | subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned); |
db7b5426 BS |
1659 | |
1660 | return mmio; | |
1661 | } | |
1662 | ||
5312bd8b AK |
1663 | static uint16_t dummy_section(MemoryRegion *mr) |
1664 | { | |
1665 | MemoryRegionSection section = { | |
1666 | .mr = mr, | |
1667 | .offset_within_address_space = 0, | |
1668 | .offset_within_region = 0, | |
1669 | .size = UINT64_MAX, | |
1670 | }; | |
1671 | ||
1672 | return phys_section_add(§ion); | |
1673 | } | |
1674 | ||
a8170e5e | 1675 | MemoryRegion *iotlb_to_region(hwaddr index) |
aa102231 | 1676 | { |
37ec01d4 | 1677 | return phys_sections[index & ~TARGET_PAGE_MASK].mr; |
aa102231 AK |
1678 | } |
1679 | ||
e9179ce1 AK |
1680 | static void io_mem_init(void) |
1681 | { | |
bf8d5166 | 1682 | memory_region_init_io(&io_mem_rom, &unassigned_mem_ops, NULL, "rom", UINT64_MAX); |
0e0df1e2 AK |
1683 | memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL, |
1684 | "unassigned", UINT64_MAX); | |
1685 | memory_region_init_io(&io_mem_notdirty, ¬dirty_mem_ops, NULL, | |
1686 | "notdirty", UINT64_MAX); | |
1ec9b909 AK |
1687 | memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL, |
1688 | "watch", UINT64_MAX); | |
e9179ce1 AK |
1689 | } |
1690 | ||
ac1970fb AK |
1691 | static void mem_begin(MemoryListener *listener) |
1692 | { | |
1693 | AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener); | |
1694 | ||
1695 | destroy_all_mappings(d); | |
1696 | d->phys_map.ptr = PHYS_MAP_NODE_NIL; | |
1697 | } | |
1698 | ||
50c1e149 AK |
1699 | static void core_begin(MemoryListener *listener) |
1700 | { | |
5312bd8b AK |
1701 | phys_sections_clear(); |
1702 | phys_section_unassigned = dummy_section(&io_mem_unassigned); | |
aa102231 AK |
1703 | phys_section_notdirty = dummy_section(&io_mem_notdirty); |
1704 | phys_section_rom = dummy_section(&io_mem_rom); | |
1705 | phys_section_watch = dummy_section(&io_mem_watch); | |
50c1e149 AK |
1706 | } |
1707 | ||
1d71148e | 1708 | static void tcg_commit(MemoryListener *listener) |
50c1e149 | 1709 | { |
9349b4f9 | 1710 | CPUArchState *env; |
117712c3 AK |
1711 | |
1712 | /* since each CPU stores ram addresses in its TLB cache, we must | |
1713 | reset the modified entries */ | |
1714 | /* XXX: slow ! */ | |
1715 | for(env = first_cpu; env != NULL; env = env->next_cpu) { | |
1716 | tlb_flush(env, 1); | |
1717 | } | |
50c1e149 AK |
1718 | } |
1719 | ||
93632747 AK |
1720 | static void core_log_global_start(MemoryListener *listener) |
1721 | { | |
1722 | cpu_physical_memory_set_dirty_tracking(1); | |
1723 | } | |
1724 | ||
1725 | static void core_log_global_stop(MemoryListener *listener) | |
1726 | { | |
1727 | cpu_physical_memory_set_dirty_tracking(0); | |
1728 | } | |
1729 | ||
4855d41a AK |
1730 | static void io_region_add(MemoryListener *listener, |
1731 | MemoryRegionSection *section) | |
1732 | { | |
a2d33521 AK |
1733 | MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1); |
1734 | ||
1735 | mrio->mr = section->mr; | |
1736 | mrio->offset = section->offset_within_region; | |
1737 | iorange_init(&mrio->iorange, &memory_region_iorange_ops, | |
4855d41a | 1738 | section->offset_within_address_space, section->size); |
a2d33521 | 1739 | ioport_register(&mrio->iorange); |
4855d41a AK |
1740 | } |
1741 | ||
1742 | static void io_region_del(MemoryListener *listener, | |
1743 | MemoryRegionSection *section) | |
1744 | { | |
1745 | isa_unassign_ioport(section->offset_within_address_space, section->size); | |
1746 | } | |
1747 | ||
93632747 | 1748 | static MemoryListener core_memory_listener = { |
50c1e149 | 1749 | .begin = core_begin, |
93632747 AK |
1750 | .log_global_start = core_log_global_start, |
1751 | .log_global_stop = core_log_global_stop, | |
ac1970fb | 1752 | .priority = 1, |
93632747 AK |
1753 | }; |
1754 | ||
4855d41a AK |
1755 | static MemoryListener io_memory_listener = { |
1756 | .region_add = io_region_add, | |
1757 | .region_del = io_region_del, | |
4855d41a AK |
1758 | .priority = 0, |
1759 | }; | |
1760 | ||
1d71148e AK |
1761 | static MemoryListener tcg_memory_listener = { |
1762 | .commit = tcg_commit, | |
1763 | }; | |
1764 | ||
ac1970fb AK |
1765 | void address_space_init_dispatch(AddressSpace *as) |
1766 | { | |
1767 | AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1); | |
1768 | ||
1769 | d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 }; | |
1770 | d->listener = (MemoryListener) { | |
1771 | .begin = mem_begin, | |
1772 | .region_add = mem_add, | |
1773 | .region_nop = mem_add, | |
1774 | .priority = 0, | |
1775 | }; | |
acc9d80b | 1776 | d->as = as; |
ac1970fb AK |
1777 | as->dispatch = d; |
1778 | memory_listener_register(&d->listener, as); | |
1779 | } | |
1780 | ||
83f3c251 AK |
1781 | void address_space_destroy_dispatch(AddressSpace *as) |
1782 | { | |
1783 | AddressSpaceDispatch *d = as->dispatch; | |
1784 | ||
1785 | memory_listener_unregister(&d->listener); | |
1786 | destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1); | |
1787 | g_free(d); | |
1788 | as->dispatch = NULL; | |
1789 | } | |
1790 | ||
62152b8a AK |
1791 | static void memory_map_init(void) |
1792 | { | |
7267c094 | 1793 | system_memory = g_malloc(sizeof(*system_memory)); |
8417cebf | 1794 | memory_region_init(system_memory, "system", INT64_MAX); |
2673a5da AK |
1795 | address_space_init(&address_space_memory, system_memory); |
1796 | address_space_memory.name = "memory"; | |
309cb471 | 1797 | |
7267c094 | 1798 | system_io = g_malloc(sizeof(*system_io)); |
309cb471 | 1799 | memory_region_init(system_io, "io", 65536); |
2673a5da AK |
1800 | address_space_init(&address_space_io, system_io); |
1801 | address_space_io.name = "I/O"; | |
93632747 | 1802 | |
f6790af6 AK |
1803 | memory_listener_register(&core_memory_listener, &address_space_memory); |
1804 | memory_listener_register(&io_memory_listener, &address_space_io); | |
1805 | memory_listener_register(&tcg_memory_listener, &address_space_memory); | |
9e11908f PM |
1806 | |
1807 | dma_context_init(&dma_context_memory, &address_space_memory, | |
1808 | NULL, NULL, NULL); | |
62152b8a AK |
1809 | } |
1810 | ||
1811 | MemoryRegion *get_system_memory(void) | |
1812 | { | |
1813 | return system_memory; | |
1814 | } | |
1815 | ||
309cb471 AK |
1816 | MemoryRegion *get_system_io(void) |
1817 | { | |
1818 | return system_io; | |
1819 | } | |
1820 | ||
e2eef170 PB |
1821 | #endif /* !defined(CONFIG_USER_ONLY) */ |
1822 | ||
13eb76e0 FB |
1823 | /* physical memory access (slow version, mainly for debug) */ |
1824 | #if defined(CONFIG_USER_ONLY) | |
9349b4f9 | 1825 | int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr, |
a68fe89c | 1826 | uint8_t *buf, int len, int is_write) |
13eb76e0 FB |
1827 | { |
1828 | int l, flags; | |
1829 | target_ulong page; | |
53a5960a | 1830 | void * p; |
13eb76e0 FB |
1831 | |
1832 | while (len > 0) { | |
1833 | page = addr & TARGET_PAGE_MASK; | |
1834 | l = (page + TARGET_PAGE_SIZE) - addr; | |
1835 | if (l > len) | |
1836 | l = len; | |
1837 | flags = page_get_flags(page); | |
1838 | if (!(flags & PAGE_VALID)) | |
a68fe89c | 1839 | return -1; |
13eb76e0 FB |
1840 | if (is_write) { |
1841 | if (!(flags & PAGE_WRITE)) | |
a68fe89c | 1842 | return -1; |
579a97f7 | 1843 | /* XXX: this code should not depend on lock_user */ |
72fb7daa | 1844 | if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) |
a68fe89c | 1845 | return -1; |
72fb7daa AJ |
1846 | memcpy(p, buf, l); |
1847 | unlock_user(p, addr, l); | |
13eb76e0 FB |
1848 | } else { |
1849 | if (!(flags & PAGE_READ)) | |
a68fe89c | 1850 | return -1; |
579a97f7 | 1851 | /* XXX: this code should not depend on lock_user */ |
72fb7daa | 1852 | if (!(p = lock_user(VERIFY_READ, addr, l, 1))) |
a68fe89c | 1853 | return -1; |
72fb7daa | 1854 | memcpy(buf, p, l); |
5b257578 | 1855 | unlock_user(p, addr, 0); |
13eb76e0 FB |
1856 | } |
1857 | len -= l; | |
1858 | buf += l; | |
1859 | addr += l; | |
1860 | } | |
a68fe89c | 1861 | return 0; |
13eb76e0 | 1862 | } |
8df1cd07 | 1863 | |
13eb76e0 | 1864 | #else |
51d7a9eb | 1865 | |
a8170e5e AK |
1866 | static void invalidate_and_set_dirty(hwaddr addr, |
1867 | hwaddr length) | |
51d7a9eb AP |
1868 | { |
1869 | if (!cpu_physical_memory_is_dirty(addr)) { | |
1870 | /* invalidate code */ | |
1871 | tb_invalidate_phys_page_range(addr, addr + length, 0); | |
1872 | /* set dirty bit */ | |
1873 | cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG)); | |
1874 | } | |
e226939d | 1875 | xen_modified_memory(addr, length); |
51d7a9eb AP |
1876 | } |
1877 | ||
2bbfa05d PB |
1878 | static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) |
1879 | { | |
1880 | if (memory_region_is_ram(mr)) { | |
1881 | return !(is_write && mr->readonly); | |
1882 | } | |
1883 | if (memory_region_is_romd(mr)) { | |
1884 | return !is_write; | |
1885 | } | |
1886 | ||
1887 | return false; | |
1888 | } | |
1889 | ||
f52cc467 | 1890 | static inline int memory_access_size(MemoryRegion *mr, int l, hwaddr addr) |
82f2563f | 1891 | { |
f52cc467 | 1892 | if (l >= 4 && (((addr & 3) == 0 || mr->ops->impl.unaligned))) { |
82f2563f PB |
1893 | return 4; |
1894 | } | |
f52cc467 | 1895 | if (l >= 2 && (((addr & 1) == 0) || mr->ops->impl.unaligned)) { |
82f2563f PB |
1896 | return 2; |
1897 | } | |
1898 | return 1; | |
1899 | } | |
1900 | ||
fd8aaa76 | 1901 | bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, |
ac1970fb | 1902 | int len, bool is_write) |
13eb76e0 | 1903 | { |
149f54b5 | 1904 | hwaddr l; |
13eb76e0 | 1905 | uint8_t *ptr; |
791af8c8 | 1906 | uint64_t val; |
149f54b5 | 1907 | hwaddr addr1; |
5c8a00ce | 1908 | MemoryRegion *mr; |
fd8aaa76 | 1909 | bool error = false; |
3b46e624 | 1910 | |
13eb76e0 | 1911 | while (len > 0) { |
149f54b5 | 1912 | l = len; |
5c8a00ce | 1913 | mr = address_space_translate(as, addr, &addr1, &l, is_write); |
3b46e624 | 1914 | |
13eb76e0 | 1915 | if (is_write) { |
5c8a00ce PB |
1916 | if (!memory_access_is_direct(mr, is_write)) { |
1917 | l = memory_access_size(mr, l, addr1); | |
6a00d601 FB |
1918 | /* XXX: could force cpu_single_env to NULL to avoid |
1919 | potential bugs */ | |
82f2563f | 1920 | if (l == 4) { |
1c213d19 | 1921 | /* 32 bit write access */ |
c27004ec | 1922 | val = ldl_p(buf); |
5c8a00ce | 1923 | error |= io_mem_write(mr, addr1, val, 4); |
82f2563f | 1924 | } else if (l == 2) { |
1c213d19 | 1925 | /* 16 bit write access */ |
c27004ec | 1926 | val = lduw_p(buf); |
5c8a00ce | 1927 | error |= io_mem_write(mr, addr1, val, 2); |
13eb76e0 | 1928 | } else { |
1c213d19 | 1929 | /* 8 bit write access */ |
c27004ec | 1930 | val = ldub_p(buf); |
5c8a00ce | 1931 | error |= io_mem_write(mr, addr1, val, 1); |
13eb76e0 | 1932 | } |
2bbfa05d | 1933 | } else { |
5c8a00ce | 1934 | addr1 += memory_region_get_ram_addr(mr); |
13eb76e0 | 1935 | /* RAM case */ |
5579c7f3 | 1936 | ptr = qemu_get_ram_ptr(addr1); |
13eb76e0 | 1937 | memcpy(ptr, buf, l); |
51d7a9eb | 1938 | invalidate_and_set_dirty(addr1, l); |
13eb76e0 FB |
1939 | } |
1940 | } else { | |
5c8a00ce | 1941 | if (!memory_access_is_direct(mr, is_write)) { |
13eb76e0 | 1942 | /* I/O case */ |
5c8a00ce | 1943 | l = memory_access_size(mr, l, addr1); |
82f2563f | 1944 | if (l == 4) { |
13eb76e0 | 1945 | /* 32 bit read access */ |
5c8a00ce | 1946 | error |= io_mem_read(mr, addr1, &val, 4); |
c27004ec | 1947 | stl_p(buf, val); |
82f2563f | 1948 | } else if (l == 2) { |
13eb76e0 | 1949 | /* 16 bit read access */ |
5c8a00ce | 1950 | error |= io_mem_read(mr, addr1, &val, 2); |
c27004ec | 1951 | stw_p(buf, val); |
13eb76e0 | 1952 | } else { |
1c213d19 | 1953 | /* 8 bit read access */ |
5c8a00ce | 1954 | error |= io_mem_read(mr, addr1, &val, 1); |
c27004ec | 1955 | stb_p(buf, val); |
13eb76e0 FB |
1956 | } |
1957 | } else { | |
1958 | /* RAM case */ | |
5c8a00ce | 1959 | ptr = qemu_get_ram_ptr(mr->ram_addr + addr1); |
f3705d53 | 1960 | memcpy(buf, ptr, l); |
13eb76e0 FB |
1961 | } |
1962 | } | |
1963 | len -= l; | |
1964 | buf += l; | |
1965 | addr += l; | |
1966 | } | |
fd8aaa76 PB |
1967 | |
1968 | return error; | |
13eb76e0 | 1969 | } |
8df1cd07 | 1970 | |
fd8aaa76 | 1971 | bool address_space_write(AddressSpace *as, hwaddr addr, |
ac1970fb AK |
1972 | const uint8_t *buf, int len) |
1973 | { | |
fd8aaa76 | 1974 | return address_space_rw(as, addr, (uint8_t *)buf, len, true); |
ac1970fb AK |
1975 | } |
1976 | ||
fd8aaa76 | 1977 | bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len) |
ac1970fb | 1978 | { |
fd8aaa76 | 1979 | return address_space_rw(as, addr, buf, len, false); |
ac1970fb AK |
1980 | } |
1981 | ||
1982 | ||
a8170e5e | 1983 | void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf, |
ac1970fb AK |
1984 | int len, int is_write) |
1985 | { | |
fd8aaa76 | 1986 | address_space_rw(&address_space_memory, addr, buf, len, is_write); |
ac1970fb AK |
1987 | } |
1988 | ||
d0ecd2aa | 1989 | /* used for ROM loading : can write in RAM and ROM */ |
a8170e5e | 1990 | void cpu_physical_memory_write_rom(hwaddr addr, |
d0ecd2aa FB |
1991 | const uint8_t *buf, int len) |
1992 | { | |
149f54b5 | 1993 | hwaddr l; |
d0ecd2aa | 1994 | uint8_t *ptr; |
149f54b5 | 1995 | hwaddr addr1; |
5c8a00ce | 1996 | MemoryRegion *mr; |
3b46e624 | 1997 | |
d0ecd2aa | 1998 | while (len > 0) { |
149f54b5 | 1999 | l = len; |
5c8a00ce PB |
2000 | mr = address_space_translate(&address_space_memory, |
2001 | addr, &addr1, &l, true); | |
3b46e624 | 2002 | |
5c8a00ce PB |
2003 | if (!(memory_region_is_ram(mr) || |
2004 | memory_region_is_romd(mr))) { | |
d0ecd2aa FB |
2005 | /* do nothing */ |
2006 | } else { | |
5c8a00ce | 2007 | addr1 += memory_region_get_ram_addr(mr); |
d0ecd2aa | 2008 | /* ROM/RAM case */ |
5579c7f3 | 2009 | ptr = qemu_get_ram_ptr(addr1); |
d0ecd2aa | 2010 | memcpy(ptr, buf, l); |
51d7a9eb | 2011 | invalidate_and_set_dirty(addr1, l); |
d0ecd2aa FB |
2012 | } |
2013 | len -= l; | |
2014 | buf += l; | |
2015 | addr += l; | |
2016 | } | |
2017 | } | |
2018 | ||
6d16c2f8 AL |
2019 | typedef struct { |
2020 | void *buffer; | |
a8170e5e AK |
2021 | hwaddr addr; |
2022 | hwaddr len; | |
6d16c2f8 AL |
2023 | } BounceBuffer; |
2024 | ||
2025 | static BounceBuffer bounce; | |
2026 | ||
ba223c29 AL |
2027 | typedef struct MapClient { |
2028 | void *opaque; | |
2029 | void (*callback)(void *opaque); | |
72cf2d4f | 2030 | QLIST_ENTRY(MapClient) link; |
ba223c29 AL |
2031 | } MapClient; |
2032 | ||
72cf2d4f BS |
2033 | static QLIST_HEAD(map_client_list, MapClient) map_client_list |
2034 | = QLIST_HEAD_INITIALIZER(map_client_list); | |
ba223c29 AL |
2035 | |
2036 | void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) | |
2037 | { | |
7267c094 | 2038 | MapClient *client = g_malloc(sizeof(*client)); |
ba223c29 AL |
2039 | |
2040 | client->opaque = opaque; | |
2041 | client->callback = callback; | |
72cf2d4f | 2042 | QLIST_INSERT_HEAD(&map_client_list, client, link); |
ba223c29 AL |
2043 | return client; |
2044 | } | |
2045 | ||
8b9c99d9 | 2046 | static void cpu_unregister_map_client(void *_client) |
ba223c29 AL |
2047 | { |
2048 | MapClient *client = (MapClient *)_client; | |
2049 | ||
72cf2d4f | 2050 | QLIST_REMOVE(client, link); |
7267c094 | 2051 | g_free(client); |
ba223c29 AL |
2052 | } |
2053 | ||
2054 | static void cpu_notify_map_clients(void) | |
2055 | { | |
2056 | MapClient *client; | |
2057 | ||
72cf2d4f BS |
2058 | while (!QLIST_EMPTY(&map_client_list)) { |
2059 | client = QLIST_FIRST(&map_client_list); | |
ba223c29 | 2060 | client->callback(client->opaque); |
34d5e948 | 2061 | cpu_unregister_map_client(client); |
ba223c29 AL |
2062 | } |
2063 | } | |
2064 | ||
51644ab7 PB |
2065 | bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write) |
2066 | { | |
5c8a00ce | 2067 | MemoryRegion *mr; |
51644ab7 PB |
2068 | hwaddr l, xlat; |
2069 | ||
2070 | while (len > 0) { | |
2071 | l = len; | |
5c8a00ce PB |
2072 | mr = address_space_translate(as, addr, &xlat, &l, is_write); |
2073 | if (!memory_access_is_direct(mr, is_write)) { | |
2074 | l = memory_access_size(mr, l, addr); | |
2075 | if (!memory_region_access_valid(mr, xlat, l, is_write)) { | |
51644ab7 PB |
2076 | return false; |
2077 | } | |
2078 | } | |
2079 | ||
2080 | len -= l; | |
2081 | addr += l; | |
2082 | } | |
2083 | return true; | |
2084 | } | |
2085 | ||
6d16c2f8 AL |
2086 | /* Map a physical memory region into a host virtual address. |
2087 | * May map a subset of the requested range, given by and returned in *plen. | |
2088 | * May return NULL if resources needed to perform the mapping are exhausted. | |
2089 | * Use only for reads OR writes - not for read-modify-write operations. | |
ba223c29 AL |
2090 | * Use cpu_register_map_client() to know when retrying the map operation is |
2091 | * likely to succeed. | |
6d16c2f8 | 2092 | */ |
ac1970fb | 2093 | void *address_space_map(AddressSpace *as, |
a8170e5e AK |
2094 | hwaddr addr, |
2095 | hwaddr *plen, | |
ac1970fb | 2096 | bool is_write) |
6d16c2f8 | 2097 | { |
a8170e5e AK |
2098 | hwaddr len = *plen; |
2099 | hwaddr todo = 0; | |
149f54b5 | 2100 | hwaddr l, xlat; |
5c8a00ce | 2101 | MemoryRegion *mr; |
f15fbc4b | 2102 | ram_addr_t raddr = RAM_ADDR_MAX; |
8ab934f9 SS |
2103 | ram_addr_t rlen; |
2104 | void *ret; | |
6d16c2f8 AL |
2105 | |
2106 | while (len > 0) { | |
149f54b5 | 2107 | l = len; |
5c8a00ce | 2108 | mr = address_space_translate(as, addr, &xlat, &l, is_write); |
6d16c2f8 | 2109 | |
5c8a00ce | 2110 | if (!memory_access_is_direct(mr, is_write)) { |
38bee5dc | 2111 | if (todo || bounce.buffer) { |
6d16c2f8 AL |
2112 | break; |
2113 | } | |
2114 | bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE); | |
2115 | bounce.addr = addr; | |
2116 | bounce.len = l; | |
2117 | if (!is_write) { | |
ac1970fb | 2118 | address_space_read(as, addr, bounce.buffer, l); |
6d16c2f8 | 2119 | } |
38bee5dc SS |
2120 | |
2121 | *plen = l; | |
2122 | return bounce.buffer; | |
6d16c2f8 | 2123 | } |
8ab934f9 | 2124 | if (!todo) { |
5c8a00ce | 2125 | raddr = memory_region_get_ram_addr(mr) + xlat; |
149f54b5 | 2126 | } else { |
5c8a00ce | 2127 | if (memory_region_get_ram_addr(mr) + xlat != raddr + todo) { |
149f54b5 PB |
2128 | break; |
2129 | } | |
8ab934f9 | 2130 | } |
6d16c2f8 AL |
2131 | |
2132 | len -= l; | |
2133 | addr += l; | |
38bee5dc | 2134 | todo += l; |
6d16c2f8 | 2135 | } |
8ab934f9 SS |
2136 | rlen = todo; |
2137 | ret = qemu_ram_ptr_length(raddr, &rlen); | |
2138 | *plen = rlen; | |
2139 | return ret; | |
6d16c2f8 AL |
2140 | } |
2141 | ||
ac1970fb | 2142 | /* Unmaps a memory region previously mapped by address_space_map(). |
6d16c2f8 AL |
2143 | * Will also mark the memory as dirty if is_write == 1. access_len gives |
2144 | * the amount of memory that was actually read or written by the caller. | |
2145 | */ | |
a8170e5e AK |
2146 | void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, |
2147 | int is_write, hwaddr access_len) | |
6d16c2f8 AL |
2148 | { |
2149 | if (buffer != bounce.buffer) { | |
2150 | if (is_write) { | |
e890261f | 2151 | ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer); |
6d16c2f8 AL |
2152 | while (access_len) { |
2153 | unsigned l; | |
2154 | l = TARGET_PAGE_SIZE; | |
2155 | if (l > access_len) | |
2156 | l = access_len; | |
51d7a9eb | 2157 | invalidate_and_set_dirty(addr1, l); |
6d16c2f8 AL |
2158 | addr1 += l; |
2159 | access_len -= l; | |
2160 | } | |
2161 | } | |
868bb33f | 2162 | if (xen_enabled()) { |
e41d7c69 | 2163 | xen_invalidate_map_cache_entry(buffer); |
050a0ddf | 2164 | } |
6d16c2f8 AL |
2165 | return; |
2166 | } | |
2167 | if (is_write) { | |
ac1970fb | 2168 | address_space_write(as, bounce.addr, bounce.buffer, access_len); |
6d16c2f8 | 2169 | } |
f8a83245 | 2170 | qemu_vfree(bounce.buffer); |
6d16c2f8 | 2171 | bounce.buffer = NULL; |
ba223c29 | 2172 | cpu_notify_map_clients(); |
6d16c2f8 | 2173 | } |
d0ecd2aa | 2174 | |
a8170e5e AK |
2175 | void *cpu_physical_memory_map(hwaddr addr, |
2176 | hwaddr *plen, | |
ac1970fb AK |
2177 | int is_write) |
2178 | { | |
2179 | return address_space_map(&address_space_memory, addr, plen, is_write); | |
2180 | } | |
2181 | ||
a8170e5e AK |
2182 | void cpu_physical_memory_unmap(void *buffer, hwaddr len, |
2183 | int is_write, hwaddr access_len) | |
ac1970fb AK |
2184 | { |
2185 | return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len); | |
2186 | } | |
2187 | ||
8df1cd07 | 2188 | /* warning: addr must be aligned */ |
a8170e5e | 2189 | static inline uint32_t ldl_phys_internal(hwaddr addr, |
1e78bcc1 | 2190 | enum device_endian endian) |
8df1cd07 | 2191 | { |
8df1cd07 | 2192 | uint8_t *ptr; |
791af8c8 | 2193 | uint64_t val; |
5c8a00ce | 2194 | MemoryRegion *mr; |
149f54b5 PB |
2195 | hwaddr l = 4; |
2196 | hwaddr addr1; | |
8df1cd07 | 2197 | |
5c8a00ce PB |
2198 | mr = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2199 | false); | |
2200 | if (l < 4 || !memory_access_is_direct(mr, false)) { | |
8df1cd07 | 2201 | /* I/O case */ |
5c8a00ce | 2202 | io_mem_read(mr, addr1, &val, 4); |
1e78bcc1 AG |
2203 | #if defined(TARGET_WORDS_BIGENDIAN) |
2204 | if (endian == DEVICE_LITTLE_ENDIAN) { | |
2205 | val = bswap32(val); | |
2206 | } | |
2207 | #else | |
2208 | if (endian == DEVICE_BIG_ENDIAN) { | |
2209 | val = bswap32(val); | |
2210 | } | |
2211 | #endif | |
8df1cd07 FB |
2212 | } else { |
2213 | /* RAM case */ | |
5c8a00ce | 2214 | ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr) |
06ef3525 | 2215 | & TARGET_PAGE_MASK) |
149f54b5 | 2216 | + addr1); |
1e78bcc1 AG |
2217 | switch (endian) { |
2218 | case DEVICE_LITTLE_ENDIAN: | |
2219 | val = ldl_le_p(ptr); | |
2220 | break; | |
2221 | case DEVICE_BIG_ENDIAN: | |
2222 | val = ldl_be_p(ptr); | |
2223 | break; | |
2224 | default: | |
2225 | val = ldl_p(ptr); | |
2226 | break; | |
2227 | } | |
8df1cd07 FB |
2228 | } |
2229 | return val; | |
2230 | } | |
2231 | ||
a8170e5e | 2232 | uint32_t ldl_phys(hwaddr addr) |
1e78bcc1 AG |
2233 | { |
2234 | return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN); | |
2235 | } | |
2236 | ||
a8170e5e | 2237 | uint32_t ldl_le_phys(hwaddr addr) |
1e78bcc1 AG |
2238 | { |
2239 | return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN); | |
2240 | } | |
2241 | ||
a8170e5e | 2242 | uint32_t ldl_be_phys(hwaddr addr) |
1e78bcc1 AG |
2243 | { |
2244 | return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN); | |
2245 | } | |
2246 | ||
84b7b8e7 | 2247 | /* warning: addr must be aligned */ |
a8170e5e | 2248 | static inline uint64_t ldq_phys_internal(hwaddr addr, |
1e78bcc1 | 2249 | enum device_endian endian) |
84b7b8e7 | 2250 | { |
84b7b8e7 FB |
2251 | uint8_t *ptr; |
2252 | uint64_t val; | |
5c8a00ce | 2253 | MemoryRegion *mr; |
149f54b5 PB |
2254 | hwaddr l = 8; |
2255 | hwaddr addr1; | |
84b7b8e7 | 2256 | |
5c8a00ce PB |
2257 | mr = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2258 | false); | |
2259 | if (l < 8 || !memory_access_is_direct(mr, false)) { | |
84b7b8e7 | 2260 | /* I/O case */ |
5c8a00ce | 2261 | io_mem_read(mr, addr1, &val, 8); |
968a5627 PB |
2262 | #if defined(TARGET_WORDS_BIGENDIAN) |
2263 | if (endian == DEVICE_LITTLE_ENDIAN) { | |
2264 | val = bswap64(val); | |
2265 | } | |
2266 | #else | |
2267 | if (endian == DEVICE_BIG_ENDIAN) { | |
2268 | val = bswap64(val); | |
2269 | } | |
84b7b8e7 FB |
2270 | #endif |
2271 | } else { | |
2272 | /* RAM case */ | |
5c8a00ce | 2273 | ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr) |
06ef3525 | 2274 | & TARGET_PAGE_MASK) |
149f54b5 | 2275 | + addr1); |
1e78bcc1 AG |
2276 | switch (endian) { |
2277 | case DEVICE_LITTLE_ENDIAN: | |
2278 | val = ldq_le_p(ptr); | |
2279 | break; | |
2280 | case DEVICE_BIG_ENDIAN: | |
2281 | val = ldq_be_p(ptr); | |
2282 | break; | |
2283 | default: | |
2284 | val = ldq_p(ptr); | |
2285 | break; | |
2286 | } | |
84b7b8e7 FB |
2287 | } |
2288 | return val; | |
2289 | } | |
2290 | ||
a8170e5e | 2291 | uint64_t ldq_phys(hwaddr addr) |
1e78bcc1 AG |
2292 | { |
2293 | return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN); | |
2294 | } | |
2295 | ||
a8170e5e | 2296 | uint64_t ldq_le_phys(hwaddr addr) |
1e78bcc1 AG |
2297 | { |
2298 | return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN); | |
2299 | } | |
2300 | ||
a8170e5e | 2301 | uint64_t ldq_be_phys(hwaddr addr) |
1e78bcc1 AG |
2302 | { |
2303 | return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN); | |
2304 | } | |
2305 | ||
aab33094 | 2306 | /* XXX: optimize */ |
a8170e5e | 2307 | uint32_t ldub_phys(hwaddr addr) |
aab33094 FB |
2308 | { |
2309 | uint8_t val; | |
2310 | cpu_physical_memory_read(addr, &val, 1); | |
2311 | return val; | |
2312 | } | |
2313 | ||
733f0b02 | 2314 | /* warning: addr must be aligned */ |
a8170e5e | 2315 | static inline uint32_t lduw_phys_internal(hwaddr addr, |
1e78bcc1 | 2316 | enum device_endian endian) |
aab33094 | 2317 | { |
733f0b02 MT |
2318 | uint8_t *ptr; |
2319 | uint64_t val; | |
5c8a00ce | 2320 | MemoryRegion *mr; |
149f54b5 PB |
2321 | hwaddr l = 2; |
2322 | hwaddr addr1; | |
733f0b02 | 2323 | |
5c8a00ce PB |
2324 | mr = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2325 | false); | |
2326 | if (l < 2 || !memory_access_is_direct(mr, false)) { | |
733f0b02 | 2327 | /* I/O case */ |
5c8a00ce | 2328 | io_mem_read(mr, addr1, &val, 2); |
1e78bcc1 AG |
2329 | #if defined(TARGET_WORDS_BIGENDIAN) |
2330 | if (endian == DEVICE_LITTLE_ENDIAN) { | |
2331 | val = bswap16(val); | |
2332 | } | |
2333 | #else | |
2334 | if (endian == DEVICE_BIG_ENDIAN) { | |
2335 | val = bswap16(val); | |
2336 | } | |
2337 | #endif | |
733f0b02 MT |
2338 | } else { |
2339 | /* RAM case */ | |
5c8a00ce | 2340 | ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr) |
06ef3525 | 2341 | & TARGET_PAGE_MASK) |
149f54b5 | 2342 | + addr1); |
1e78bcc1 AG |
2343 | switch (endian) { |
2344 | case DEVICE_LITTLE_ENDIAN: | |
2345 | val = lduw_le_p(ptr); | |
2346 | break; | |
2347 | case DEVICE_BIG_ENDIAN: | |
2348 | val = lduw_be_p(ptr); | |
2349 | break; | |
2350 | default: | |
2351 | val = lduw_p(ptr); | |
2352 | break; | |
2353 | } | |
733f0b02 MT |
2354 | } |
2355 | return val; | |
aab33094 FB |
2356 | } |
2357 | ||
a8170e5e | 2358 | uint32_t lduw_phys(hwaddr addr) |
1e78bcc1 AG |
2359 | { |
2360 | return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN); | |
2361 | } | |
2362 | ||
a8170e5e | 2363 | uint32_t lduw_le_phys(hwaddr addr) |
1e78bcc1 AG |
2364 | { |
2365 | return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN); | |
2366 | } | |
2367 | ||
a8170e5e | 2368 | uint32_t lduw_be_phys(hwaddr addr) |
1e78bcc1 AG |
2369 | { |
2370 | return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN); | |
2371 | } | |
2372 | ||
8df1cd07 FB |
2373 | /* warning: addr must be aligned. The ram page is not masked as dirty |
2374 | and the code inside is not invalidated. It is useful if the dirty | |
2375 | bits are used to track modified PTEs */ | |
a8170e5e | 2376 | void stl_phys_notdirty(hwaddr addr, uint32_t val) |
8df1cd07 | 2377 | { |
8df1cd07 | 2378 | uint8_t *ptr; |
5c8a00ce | 2379 | MemoryRegion *mr; |
149f54b5 PB |
2380 | hwaddr l = 4; |
2381 | hwaddr addr1; | |
8df1cd07 | 2382 | |
5c8a00ce PB |
2383 | mr = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2384 | true); | |
2385 | if (l < 4 || !memory_access_is_direct(mr, true)) { | |
2386 | io_mem_write(mr, addr1, val, 4); | |
8df1cd07 | 2387 | } else { |
5c8a00ce | 2388 | addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; |
5579c7f3 | 2389 | ptr = qemu_get_ram_ptr(addr1); |
8df1cd07 | 2390 | stl_p(ptr, val); |
74576198 AL |
2391 | |
2392 | if (unlikely(in_migration)) { | |
2393 | if (!cpu_physical_memory_is_dirty(addr1)) { | |
2394 | /* invalidate code */ | |
2395 | tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); | |
2396 | /* set dirty bit */ | |
f7c11b53 YT |
2397 | cpu_physical_memory_set_dirty_flags( |
2398 | addr1, (0xff & ~CODE_DIRTY_FLAG)); | |
74576198 AL |
2399 | } |
2400 | } | |
8df1cd07 FB |
2401 | } |
2402 | } | |
2403 | ||
2404 | /* warning: addr must be aligned */ | |
a8170e5e | 2405 | static inline void stl_phys_internal(hwaddr addr, uint32_t val, |
1e78bcc1 | 2406 | enum device_endian endian) |
8df1cd07 | 2407 | { |
8df1cd07 | 2408 | uint8_t *ptr; |
5c8a00ce | 2409 | MemoryRegion *mr; |
149f54b5 PB |
2410 | hwaddr l = 4; |
2411 | hwaddr addr1; | |
8df1cd07 | 2412 | |
5c8a00ce PB |
2413 | mr = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2414 | true); | |
2415 | if (l < 4 || !memory_access_is_direct(mr, true)) { | |
1e78bcc1 AG |
2416 | #if defined(TARGET_WORDS_BIGENDIAN) |
2417 | if (endian == DEVICE_LITTLE_ENDIAN) { | |
2418 | val = bswap32(val); | |
2419 | } | |
2420 | #else | |
2421 | if (endian == DEVICE_BIG_ENDIAN) { | |
2422 | val = bswap32(val); | |
2423 | } | |
2424 | #endif | |
5c8a00ce | 2425 | io_mem_write(mr, addr1, val, 4); |
8df1cd07 | 2426 | } else { |
8df1cd07 | 2427 | /* RAM case */ |
5c8a00ce | 2428 | addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; |
5579c7f3 | 2429 | ptr = qemu_get_ram_ptr(addr1); |
1e78bcc1 AG |
2430 | switch (endian) { |
2431 | case DEVICE_LITTLE_ENDIAN: | |
2432 | stl_le_p(ptr, val); | |
2433 | break; | |
2434 | case DEVICE_BIG_ENDIAN: | |
2435 | stl_be_p(ptr, val); | |
2436 | break; | |
2437 | default: | |
2438 | stl_p(ptr, val); | |
2439 | break; | |
2440 | } | |
51d7a9eb | 2441 | invalidate_and_set_dirty(addr1, 4); |
8df1cd07 FB |
2442 | } |
2443 | } | |
2444 | ||
a8170e5e | 2445 | void stl_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2446 | { |
2447 | stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN); | |
2448 | } | |
2449 | ||
a8170e5e | 2450 | void stl_le_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2451 | { |
2452 | stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN); | |
2453 | } | |
2454 | ||
a8170e5e | 2455 | void stl_be_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2456 | { |
2457 | stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN); | |
2458 | } | |
2459 | ||
aab33094 | 2460 | /* XXX: optimize */ |
a8170e5e | 2461 | void stb_phys(hwaddr addr, uint32_t val) |
aab33094 FB |
2462 | { |
2463 | uint8_t v = val; | |
2464 | cpu_physical_memory_write(addr, &v, 1); | |
2465 | } | |
2466 | ||
733f0b02 | 2467 | /* warning: addr must be aligned */ |
a8170e5e | 2468 | static inline void stw_phys_internal(hwaddr addr, uint32_t val, |
1e78bcc1 | 2469 | enum device_endian endian) |
aab33094 | 2470 | { |
733f0b02 | 2471 | uint8_t *ptr; |
5c8a00ce | 2472 | MemoryRegion *mr; |
149f54b5 PB |
2473 | hwaddr l = 2; |
2474 | hwaddr addr1; | |
733f0b02 | 2475 | |
5c8a00ce PB |
2476 | mr = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2477 | true); | |
2478 | if (l < 2 || !memory_access_is_direct(mr, true)) { | |
1e78bcc1 AG |
2479 | #if defined(TARGET_WORDS_BIGENDIAN) |
2480 | if (endian == DEVICE_LITTLE_ENDIAN) { | |
2481 | val = bswap16(val); | |
2482 | } | |
2483 | #else | |
2484 | if (endian == DEVICE_BIG_ENDIAN) { | |
2485 | val = bswap16(val); | |
2486 | } | |
2487 | #endif | |
5c8a00ce | 2488 | io_mem_write(mr, addr1, val, 2); |
733f0b02 | 2489 | } else { |
733f0b02 | 2490 | /* RAM case */ |
5c8a00ce | 2491 | addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; |
733f0b02 | 2492 | ptr = qemu_get_ram_ptr(addr1); |
1e78bcc1 AG |
2493 | switch (endian) { |
2494 | case DEVICE_LITTLE_ENDIAN: | |
2495 | stw_le_p(ptr, val); | |
2496 | break; | |
2497 | case DEVICE_BIG_ENDIAN: | |
2498 | stw_be_p(ptr, val); | |
2499 | break; | |
2500 | default: | |
2501 | stw_p(ptr, val); | |
2502 | break; | |
2503 | } | |
51d7a9eb | 2504 | invalidate_and_set_dirty(addr1, 2); |
733f0b02 | 2505 | } |
aab33094 FB |
2506 | } |
2507 | ||
a8170e5e | 2508 | void stw_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2509 | { |
2510 | stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN); | |
2511 | } | |
2512 | ||
a8170e5e | 2513 | void stw_le_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2514 | { |
2515 | stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN); | |
2516 | } | |
2517 | ||
a8170e5e | 2518 | void stw_be_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2519 | { |
2520 | stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN); | |
2521 | } | |
2522 | ||
aab33094 | 2523 | /* XXX: optimize */ |
a8170e5e | 2524 | void stq_phys(hwaddr addr, uint64_t val) |
aab33094 FB |
2525 | { |
2526 | val = tswap64(val); | |
71d2b725 | 2527 | cpu_physical_memory_write(addr, &val, 8); |
aab33094 FB |
2528 | } |
2529 | ||
a8170e5e | 2530 | void stq_le_phys(hwaddr addr, uint64_t val) |
1e78bcc1 AG |
2531 | { |
2532 | val = cpu_to_le64(val); | |
2533 | cpu_physical_memory_write(addr, &val, 8); | |
2534 | } | |
2535 | ||
a8170e5e | 2536 | void stq_be_phys(hwaddr addr, uint64_t val) |
1e78bcc1 AG |
2537 | { |
2538 | val = cpu_to_be64(val); | |
2539 | cpu_physical_memory_write(addr, &val, 8); | |
2540 | } | |
2541 | ||
5e2972fd | 2542 | /* virtual memory access for debug (includes writing to ROM) */ |
9349b4f9 | 2543 | int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr, |
b448f2f3 | 2544 | uint8_t *buf, int len, int is_write) |
13eb76e0 FB |
2545 | { |
2546 | int l; | |
a8170e5e | 2547 | hwaddr phys_addr; |
9b3c35e0 | 2548 | target_ulong page; |
13eb76e0 FB |
2549 | |
2550 | while (len > 0) { | |
2551 | page = addr & TARGET_PAGE_MASK; | |
2552 | phys_addr = cpu_get_phys_page_debug(env, page); | |
2553 | /* if no physical page mapped, return an error */ | |
2554 | if (phys_addr == -1) | |
2555 | return -1; | |
2556 | l = (page + TARGET_PAGE_SIZE) - addr; | |
2557 | if (l > len) | |
2558 | l = len; | |
5e2972fd | 2559 | phys_addr += (addr & ~TARGET_PAGE_MASK); |
5e2972fd AL |
2560 | if (is_write) |
2561 | cpu_physical_memory_write_rom(phys_addr, buf, l); | |
2562 | else | |
5e2972fd | 2563 | cpu_physical_memory_rw(phys_addr, buf, l, is_write); |
13eb76e0 FB |
2564 | len -= l; |
2565 | buf += l; | |
2566 | addr += l; | |
2567 | } | |
2568 | return 0; | |
2569 | } | |
a68fe89c | 2570 | #endif |
13eb76e0 | 2571 | |
8e4a424b BS |
2572 | #if !defined(CONFIG_USER_ONLY) |
2573 | ||
2574 | /* | |
2575 | * A helper function for the _utterly broken_ virtio device model to find out if | |
2576 | * it's running on a big endian machine. Don't do this at home kids! | |
2577 | */ | |
2578 | bool virtio_is_big_endian(void); | |
2579 | bool virtio_is_big_endian(void) | |
2580 | { | |
2581 | #if defined(TARGET_WORDS_BIGENDIAN) | |
2582 | return true; | |
2583 | #else | |
2584 | return false; | |
2585 | #endif | |
2586 | } | |
2587 | ||
2588 | #endif | |
2589 | ||
76f35538 | 2590 | #ifndef CONFIG_USER_ONLY |
a8170e5e | 2591 | bool cpu_physical_memory_is_io(hwaddr phys_addr) |
76f35538 | 2592 | { |
5c8a00ce | 2593 | MemoryRegion*mr; |
149f54b5 | 2594 | hwaddr l = 1; |
76f35538 | 2595 | |
5c8a00ce PB |
2596 | mr = address_space_translate(&address_space_memory, |
2597 | phys_addr, &phys_addr, &l, false); | |
76f35538 | 2598 | |
5c8a00ce PB |
2599 | return !(memory_region_is_ram(mr) || |
2600 | memory_region_is_romd(mr)); | |
76f35538 WC |
2601 | } |
2602 | #endif |