]> git.proxmox.com Git - qemu.git/blame - exec.c
target-alpha: Register VMStateDescription for AlphaCPU
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
0d09e41a 34#include "hw/xen/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
db7b5426 53//#define DEBUG_SUBPAGE
1196be37 54
e2eef170 55#if !defined(CONFIG_USER_ONLY)
9fa3e853 56int phys_ram_fd;
74576198 57static int in_migration;
94a6b54f 58
a3161038 59RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
60
61static MemoryRegion *system_memory;
309cb471 62static MemoryRegion *system_io;
62152b8a 63
f6790af6
AK
64AddressSpace address_space_io;
65AddressSpace address_space_memory;
2673a5da 66
0844e007 67MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 68static MemoryRegion io_mem_unassigned;
0e0df1e2 69
e2eef170 70#endif
9fa3e853 71
9349b4f9 72CPUArchState *first_cpu;
6a00d601
FB
73/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
9349b4f9 75DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 76/* 0 = Do not count executed instructions.
bf20dc07 77 1 = Precise instruction counting.
2e70f6ef 78 2 = Adaptive rate instruction counting. */
5708fc66 79int use_icount;
6a00d601 80
e2eef170 81#if !defined(CONFIG_USER_ONLY)
4346ae3e 82
1db8abb1
PB
83typedef struct PhysPageEntry PhysPageEntry;
84
85struct PhysPageEntry {
86 uint16_t is_leaf : 1;
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
88 uint16_t ptr : 15;
89};
90
91struct AddressSpaceDispatch {
92 /* This is a multi-level map on the physical address space.
93 * The bottom level has pointers to MemoryRegionSections.
94 */
95 PhysPageEntry phys_map;
96 MemoryListener listener;
acc9d80b 97 AddressSpace *as;
1db8abb1
PB
98};
99
90260c6c
JK
100#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
101typedef struct subpage_t {
102 MemoryRegion iomem;
acc9d80b 103 AddressSpace *as;
90260c6c
JK
104 hwaddr base;
105 uint16_t sub_section[TARGET_PAGE_SIZE];
106} subpage_t;
107
5312bd8b
AK
108static MemoryRegionSection *phys_sections;
109static unsigned phys_sections_nb, phys_sections_nb_alloc;
110static uint16_t phys_section_unassigned;
aa102231
AK
111static uint16_t phys_section_notdirty;
112static uint16_t phys_section_rom;
113static uint16_t phys_section_watch;
5312bd8b 114
d6f2ea22
AK
115/* Simple allocator for PhysPageEntry nodes */
116static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
117static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
118
07f07b31 119#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 120
e2eef170 121static void io_mem_init(void);
62152b8a 122static void memory_map_init(void);
8b9c99d9 123static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 124
1ec9b909 125static MemoryRegion io_mem_watch;
6658ffb8 126#endif
fd6ce8f6 127
6d9a1304 128#if !defined(CONFIG_USER_ONLY)
d6f2ea22 129
f7bf5461 130static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 131{
f7bf5461 132 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
133 typedef PhysPageEntry Node[L2_SIZE];
134 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
135 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
136 phys_map_nodes_nb + nodes);
d6f2ea22
AK
137 phys_map_nodes = g_renew(Node, phys_map_nodes,
138 phys_map_nodes_nb_alloc);
139 }
f7bf5461
AK
140}
141
142static uint16_t phys_map_node_alloc(void)
143{
144 unsigned i;
145 uint16_t ret;
146
147 ret = phys_map_nodes_nb++;
148 assert(ret != PHYS_MAP_NODE_NIL);
149 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 150 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 151 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 152 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 153 }
f7bf5461 154 return ret;
d6f2ea22
AK
155}
156
157static void phys_map_nodes_reset(void)
158{
159 phys_map_nodes_nb = 0;
160}
161
92e873b9 162
a8170e5e
AK
163static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
164 hwaddr *nb, uint16_t leaf,
2999097b 165 int level)
f7bf5461
AK
166{
167 PhysPageEntry *p;
168 int i;
a8170e5e 169 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 170
07f07b31 171 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
172 lp->ptr = phys_map_node_alloc();
173 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
174 if (level == 0) {
175 for (i = 0; i < L2_SIZE; i++) {
07f07b31 176 p[i].is_leaf = 1;
c19e8800 177 p[i].ptr = phys_section_unassigned;
4346ae3e 178 }
67c4d23c 179 }
f7bf5461 180 } else {
c19e8800 181 p = phys_map_nodes[lp->ptr];
92e873b9 182 }
2999097b 183 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 184
2999097b 185 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
186 if ((*index & (step - 1)) == 0 && *nb >= step) {
187 lp->is_leaf = true;
c19e8800 188 lp->ptr = leaf;
07f07b31
AK
189 *index += step;
190 *nb -= step;
2999097b
AK
191 } else {
192 phys_page_set_level(lp, index, nb, leaf, level - 1);
193 }
194 ++lp;
f7bf5461
AK
195 }
196}
197
ac1970fb 198static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 199 hwaddr index, hwaddr nb,
2999097b 200 uint16_t leaf)
f7bf5461 201{
2999097b 202 /* Wildly overreserve - it doesn't matter much. */
07f07b31 203 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 204
ac1970fb 205 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
206}
207
149f54b5 208static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
92e873b9 209{
ac1970fb 210 PhysPageEntry lp = d->phys_map;
31ab2b4a
AK
211 PhysPageEntry *p;
212 int i;
f1f6e3b8 213
07f07b31 214 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 215 if (lp.ptr == PHYS_MAP_NODE_NIL) {
fd298934 216 return &phys_sections[phys_section_unassigned];
31ab2b4a 217 }
c19e8800 218 p = phys_map_nodes[lp.ptr];
31ab2b4a 219 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 220 }
fd298934 221 return &phys_sections[lp.ptr];
f3705d53
AK
222}
223
e5548617
BS
224bool memory_region_is_unassigned(MemoryRegion *mr)
225{
2a8e7499 226 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 227 && mr != &io_mem_watch;
fd6ce8f6 228}
149f54b5 229
9f029603 230static MemoryRegionSection *address_space_lookup_region(AddressSpace *as,
90260c6c
JK
231 hwaddr addr,
232 bool resolve_subpage)
9f029603 233{
90260c6c
JK
234 MemoryRegionSection *section;
235 subpage_t *subpage;
236
237 section = phys_page_find(as->dispatch, addr >> TARGET_PAGE_BITS);
238 if (resolve_subpage && section->mr->subpage) {
239 subpage = container_of(section->mr, subpage_t, iomem);
240 section = &phys_sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
241 }
242 return section;
9f029603
JK
243}
244
90260c6c
JK
245static MemoryRegionSection *
246address_space_translate_internal(AddressSpace *as, hwaddr addr, hwaddr *xlat,
247 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
248{
249 MemoryRegionSection *section;
250 Int128 diff;
251
90260c6c 252 section = address_space_lookup_region(as, addr, resolve_subpage);
149f54b5
PB
253 /* Compute offset within MemoryRegionSection */
254 addr -= section->offset_within_address_space;
255
256 /* Compute offset within MemoryRegion */
257 *xlat = addr + section->offset_within_region;
258
259 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 260 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
261 return section;
262}
90260c6c 263
5c8a00ce
PB
264MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
265 hwaddr *xlat, hwaddr *plen,
266 bool is_write)
90260c6c 267{
30951157
AK
268 IOMMUTLBEntry iotlb;
269 MemoryRegionSection *section;
270 MemoryRegion *mr;
271 hwaddr len = *plen;
272
273 for (;;) {
274 section = address_space_translate_internal(as, addr, &addr, plen, true);
275 mr = section->mr;
276
277 if (!mr->iommu_ops) {
278 break;
279 }
280
281 iotlb = mr->iommu_ops->translate(mr, addr);
282 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
283 | (addr & iotlb.addr_mask));
284 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
285 if (!(iotlb.perm & (1 << is_write))) {
286 mr = &io_mem_unassigned;
287 break;
288 }
289
290 as = iotlb.target_as;
291 }
292
293 *plen = len;
294 *xlat = addr;
295 return mr;
90260c6c
JK
296}
297
298MemoryRegionSection *
299address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
300 hwaddr *plen)
301{
30951157
AK
302 MemoryRegionSection *section;
303 section = address_space_translate_internal(as, addr, xlat, plen, false);
304
305 assert(!section->mr->iommu_ops);
306 return section;
90260c6c 307}
5b6dd868 308#endif
fd6ce8f6 309
5b6dd868 310void cpu_exec_init_all(void)
fdbb84d1 311{
5b6dd868 312#if !defined(CONFIG_USER_ONLY)
b2a8658e 313 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
314 memory_map_init();
315 io_mem_init();
fdbb84d1 316#endif
5b6dd868 317}
fdbb84d1 318
b170fce3 319#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
320
321static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 322{
259186a7 323 CPUState *cpu = opaque;
a513fe19 324
5b6dd868
BS
325 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
326 version_id is increased. */
259186a7
AF
327 cpu->interrupt_request &= ~0x01;
328 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
329
330 return 0;
a513fe19 331}
7501267e 332
1a1562f5 333const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
334 .name = "cpu_common",
335 .version_id = 1,
336 .minimum_version_id = 1,
337 .minimum_version_id_old = 1,
338 .post_load = cpu_common_post_load,
339 .fields = (VMStateField []) {
259186a7
AF
340 VMSTATE_UINT32(halted, CPUState),
341 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
342 VMSTATE_END_OF_LIST()
343 }
344};
1a1562f5 345
5b6dd868 346#endif
ea041c0e 347
38d8f5c8 348CPUState *qemu_get_cpu(int index)
ea041c0e 349{
5b6dd868 350 CPUArchState *env = first_cpu;
38d8f5c8 351 CPUState *cpu = NULL;
ea041c0e 352
5b6dd868 353 while (env) {
55e5c285
AF
354 cpu = ENV_GET_CPU(env);
355 if (cpu->cpu_index == index) {
5b6dd868 356 break;
55e5c285 357 }
5b6dd868 358 env = env->next_cpu;
ea041c0e 359 }
5b6dd868 360
d76fddae 361 return env ? cpu : NULL;
ea041c0e
FB
362}
363
d6b9e0d6
MT
364void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
365{
366 CPUArchState *env = first_cpu;
367
368 while (env) {
369 func(ENV_GET_CPU(env), data);
370 env = env->next_cpu;
371 }
372}
373
5b6dd868 374void cpu_exec_init(CPUArchState *env)
ea041c0e 375{
5b6dd868 376 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 377 CPUClass *cc = CPU_GET_CLASS(cpu);
5b6dd868
BS
378 CPUArchState **penv;
379 int cpu_index;
380
381#if defined(CONFIG_USER_ONLY)
382 cpu_list_lock();
383#endif
384 env->next_cpu = NULL;
385 penv = &first_cpu;
386 cpu_index = 0;
387 while (*penv != NULL) {
388 penv = &(*penv)->next_cpu;
389 cpu_index++;
390 }
55e5c285 391 cpu->cpu_index = cpu_index;
1b1ed8dc 392 cpu->numa_node = 0;
5b6dd868
BS
393 QTAILQ_INIT(&env->breakpoints);
394 QTAILQ_INIT(&env->watchpoints);
395#ifndef CONFIG_USER_ONLY
396 cpu->thread_id = qemu_get_thread_id();
397#endif
398 *penv = env;
399#if defined(CONFIG_USER_ONLY)
400 cpu_list_unlock();
401#endif
259186a7 402 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
5b6dd868 403#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
404 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
405 cpu_save, cpu_load, env);
b170fce3 406 assert(cc->vmsd == NULL);
5b6dd868 407#endif
b170fce3
AF
408 if (cc->vmsd != NULL) {
409 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
410 }
ea041c0e
FB
411}
412
1fddef4b 413#if defined(TARGET_HAS_ICE)
94df27fd 414#if defined(CONFIG_USER_ONLY)
9349b4f9 415static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
416{
417 tb_invalidate_phys_page_range(pc, pc + 1, 0);
418}
419#else
1e7855a5
MF
420static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
421{
9d70c4b7
MF
422 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
423 (pc & ~TARGET_PAGE_MASK));
1e7855a5 424}
c27004ec 425#endif
94df27fd 426#endif /* TARGET_HAS_ICE */
d720b93d 427
c527ee8f 428#if defined(CONFIG_USER_ONLY)
9349b4f9 429void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
430
431{
432}
433
9349b4f9 434int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
435 int flags, CPUWatchpoint **watchpoint)
436{
437 return -ENOSYS;
438}
439#else
6658ffb8 440/* Add a watchpoint. */
9349b4f9 441int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 442 int flags, CPUWatchpoint **watchpoint)
6658ffb8 443{
b4051334 444 target_ulong len_mask = ~(len - 1);
c0ce998e 445 CPUWatchpoint *wp;
6658ffb8 446
b4051334 447 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
448 if ((len & (len - 1)) || (addr & ~len_mask) ||
449 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
450 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
451 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
452 return -EINVAL;
453 }
7267c094 454 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
455
456 wp->vaddr = addr;
b4051334 457 wp->len_mask = len_mask;
a1d1bb31
AL
458 wp->flags = flags;
459
2dc9f411 460 /* keep all GDB-injected watchpoints in front */
c0ce998e 461 if (flags & BP_GDB)
72cf2d4f 462 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 463 else
72cf2d4f 464 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 465
6658ffb8 466 tlb_flush_page(env, addr);
a1d1bb31
AL
467
468 if (watchpoint)
469 *watchpoint = wp;
470 return 0;
6658ffb8
PB
471}
472
a1d1bb31 473/* Remove a specific watchpoint. */
9349b4f9 474int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 475 int flags)
6658ffb8 476{
b4051334 477 target_ulong len_mask = ~(len - 1);
a1d1bb31 478 CPUWatchpoint *wp;
6658ffb8 479
72cf2d4f 480 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 481 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 482 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 483 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
484 return 0;
485 }
486 }
a1d1bb31 487 return -ENOENT;
6658ffb8
PB
488}
489
a1d1bb31 490/* Remove a specific watchpoint by reference. */
9349b4f9 491void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 492{
72cf2d4f 493 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 494
a1d1bb31
AL
495 tlb_flush_page(env, watchpoint->vaddr);
496
7267c094 497 g_free(watchpoint);
a1d1bb31
AL
498}
499
500/* Remove all matching watchpoints. */
9349b4f9 501void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 502{
c0ce998e 503 CPUWatchpoint *wp, *next;
a1d1bb31 504
72cf2d4f 505 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
506 if (wp->flags & mask)
507 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 508 }
7d03f82f 509}
c527ee8f 510#endif
7d03f82f 511
a1d1bb31 512/* Add a breakpoint. */
9349b4f9 513int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 514 CPUBreakpoint **breakpoint)
4c3a88a2 515{
1fddef4b 516#if defined(TARGET_HAS_ICE)
c0ce998e 517 CPUBreakpoint *bp;
3b46e624 518
7267c094 519 bp = g_malloc(sizeof(*bp));
4c3a88a2 520
a1d1bb31
AL
521 bp->pc = pc;
522 bp->flags = flags;
523
2dc9f411 524 /* keep all GDB-injected breakpoints in front */
c0ce998e 525 if (flags & BP_GDB)
72cf2d4f 526 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 527 else
72cf2d4f 528 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 529
d720b93d 530 breakpoint_invalidate(env, pc);
a1d1bb31
AL
531
532 if (breakpoint)
533 *breakpoint = bp;
4c3a88a2
FB
534 return 0;
535#else
a1d1bb31 536 return -ENOSYS;
4c3a88a2
FB
537#endif
538}
539
a1d1bb31 540/* Remove a specific breakpoint. */
9349b4f9 541int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 542{
7d03f82f 543#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
544 CPUBreakpoint *bp;
545
72cf2d4f 546 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
547 if (bp->pc == pc && bp->flags == flags) {
548 cpu_breakpoint_remove_by_ref(env, bp);
549 return 0;
550 }
7d03f82f 551 }
a1d1bb31
AL
552 return -ENOENT;
553#else
554 return -ENOSYS;
7d03f82f
EI
555#endif
556}
557
a1d1bb31 558/* Remove a specific breakpoint by reference. */
9349b4f9 559void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 560{
1fddef4b 561#if defined(TARGET_HAS_ICE)
72cf2d4f 562 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 563
a1d1bb31
AL
564 breakpoint_invalidate(env, breakpoint->pc);
565
7267c094 566 g_free(breakpoint);
a1d1bb31
AL
567#endif
568}
569
570/* Remove all matching breakpoints. */
9349b4f9 571void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
572{
573#if defined(TARGET_HAS_ICE)
c0ce998e 574 CPUBreakpoint *bp, *next;
a1d1bb31 575
72cf2d4f 576 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
577 if (bp->flags & mask)
578 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 579 }
4c3a88a2
FB
580#endif
581}
582
c33a346e
FB
583/* enable or disable single step mode. EXCP_DEBUG is returned by the
584 CPU loop after each instruction */
9349b4f9 585void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 586{
1fddef4b 587#if defined(TARGET_HAS_ICE)
c33a346e
FB
588 if (env->singlestep_enabled != enabled) {
589 env->singlestep_enabled = enabled;
e22a25c9
AL
590 if (kvm_enabled())
591 kvm_update_guest_debug(env, 0);
592 else {
ccbb4d44 593 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
594 /* XXX: only flush what is necessary */
595 tb_flush(env);
596 }
c33a346e
FB
597 }
598#endif
599}
600
9349b4f9 601void cpu_exit(CPUArchState *env)
3098dba0 602{
fcd7d003
AF
603 CPUState *cpu = ENV_GET_CPU(env);
604
605 cpu->exit_request = 1;
378df4b2 606 cpu->tcg_exit_req = 1;
3098dba0
AJ
607}
608
9349b4f9 609void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
610{
611 va_list ap;
493ae1f0 612 va_list ap2;
7501267e
FB
613
614 va_start(ap, fmt);
493ae1f0 615 va_copy(ap2, ap);
7501267e
FB
616 fprintf(stderr, "qemu: fatal: ");
617 vfprintf(stderr, fmt, ap);
618 fprintf(stderr, "\n");
6fd2a026 619 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
620 if (qemu_log_enabled()) {
621 qemu_log("qemu: fatal: ");
622 qemu_log_vprintf(fmt, ap2);
623 qemu_log("\n");
6fd2a026 624 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 625 qemu_log_flush();
93fcfe39 626 qemu_log_close();
924edcae 627 }
493ae1f0 628 va_end(ap2);
f9373291 629 va_end(ap);
fd052bf6
RV
630#if defined(CONFIG_USER_ONLY)
631 {
632 struct sigaction act;
633 sigfillset(&act.sa_mask);
634 act.sa_handler = SIG_DFL;
635 sigaction(SIGABRT, &act, NULL);
636 }
637#endif
7501267e
FB
638 abort();
639}
640
9349b4f9 641CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 642{
9349b4f9
AF
643 CPUArchState *new_env = cpu_init(env->cpu_model_str);
644 CPUArchState *next_cpu = new_env->next_cpu;
5a38f081
AL
645#if defined(TARGET_HAS_ICE)
646 CPUBreakpoint *bp;
647 CPUWatchpoint *wp;
648#endif
649
9349b4f9 650 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 651
55e5c285 652 /* Preserve chaining. */
c5be9f08 653 new_env->next_cpu = next_cpu;
5a38f081
AL
654
655 /* Clone all break/watchpoints.
656 Note: Once we support ptrace with hw-debug register access, make sure
657 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
658 QTAILQ_INIT(&env->breakpoints);
659 QTAILQ_INIT(&env->watchpoints);
5a38f081 660#if defined(TARGET_HAS_ICE)
72cf2d4f 661 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
662 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
663 }
72cf2d4f 664 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
665 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
666 wp->flags, NULL);
667 }
668#endif
669
c5be9f08
TS
670 return new_env;
671}
672
0124311e 673#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
674static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
675 uintptr_t length)
676{
677 uintptr_t start1;
678
679 /* we modify the TLB cache so that the dirty bit will be set again
680 when accessing the range */
681 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
682 /* Check that we don't span multiple blocks - this breaks the
683 address comparisons below. */
684 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
685 != (end - 1) - start) {
686 abort();
687 }
688 cpu_tlb_reset_dirty_all(start1, length);
689
690}
691
5579c7f3 692/* Note: start and end must be within the same ram block. */
c227f099 693void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 694 int dirty_flags)
1ccde1cb 695{
d24981d3 696 uintptr_t length;
1ccde1cb
FB
697
698 start &= TARGET_PAGE_MASK;
699 end = TARGET_PAGE_ALIGN(end);
700
701 length = end - start;
702 if (length == 0)
703 return;
f7c11b53 704 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 705
d24981d3
JQ
706 if (tcg_enabled()) {
707 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 708 }
1ccde1cb
FB
709}
710
8b9c99d9 711static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 712{
f6f3fbca 713 int ret = 0;
74576198 714 in_migration = enable;
f6f3fbca 715 return ret;
74576198
AL
716}
717
a8170e5e 718hwaddr memory_region_section_get_iotlb(CPUArchState *env,
149f54b5
PB
719 MemoryRegionSection *section,
720 target_ulong vaddr,
721 hwaddr paddr, hwaddr xlat,
722 int prot,
723 target_ulong *address)
e5548617 724{
a8170e5e 725 hwaddr iotlb;
e5548617
BS
726 CPUWatchpoint *wp;
727
cc5bea60 728 if (memory_region_is_ram(section->mr)) {
e5548617
BS
729 /* Normal RAM. */
730 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 731 + xlat;
e5548617
BS
732 if (!section->readonly) {
733 iotlb |= phys_section_notdirty;
734 } else {
735 iotlb |= phys_section_rom;
736 }
737 } else {
e5548617 738 iotlb = section - phys_sections;
149f54b5 739 iotlb += xlat;
e5548617
BS
740 }
741
742 /* Make accesses to pages with watchpoints go via the
743 watchpoint trap routines. */
744 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
745 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
746 /* Avoid trapping reads of pages with a write breakpoint. */
747 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
748 iotlb = phys_section_watch + paddr;
749 *address |= TLB_MMIO;
750 break;
751 }
752 }
753 }
754
755 return iotlb;
756}
9fa3e853
FB
757#endif /* defined(CONFIG_USER_ONLY) */
758
e2eef170 759#if !defined(CONFIG_USER_ONLY)
8da3ff18 760
c227f099 761static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 762 uint16_t section);
acc9d80b 763static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
5312bd8b 764static void destroy_page_desc(uint16_t section_index)
54688b1e 765{
5312bd8b
AK
766 MemoryRegionSection *section = &phys_sections[section_index];
767 MemoryRegion *mr = section->mr;
54688b1e
AK
768
769 if (mr->subpage) {
770 subpage_t *subpage = container_of(mr, subpage_t, iomem);
771 memory_region_destroy(&subpage->iomem);
772 g_free(subpage);
773 }
774}
775
4346ae3e 776static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
777{
778 unsigned i;
d6f2ea22 779 PhysPageEntry *p;
54688b1e 780
c19e8800 781 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
782 return;
783 }
784
c19e8800 785 p = phys_map_nodes[lp->ptr];
4346ae3e 786 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 787 if (!p[i].is_leaf) {
54688b1e 788 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 789 } else {
c19e8800 790 destroy_page_desc(p[i].ptr);
54688b1e 791 }
54688b1e 792 }
07f07b31 793 lp->is_leaf = 0;
c19e8800 794 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
795}
796
ac1970fb 797static void destroy_all_mappings(AddressSpaceDispatch *d)
54688b1e 798{
ac1970fb 799 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
d6f2ea22 800 phys_map_nodes_reset();
54688b1e
AK
801}
802
5312bd8b
AK
803static uint16_t phys_section_add(MemoryRegionSection *section)
804{
68f3f65b
PB
805 /* The physical section number is ORed with a page-aligned
806 * pointer to produce the iotlb entries. Thus it should
807 * never overflow into the page-aligned value.
808 */
809 assert(phys_sections_nb < TARGET_PAGE_SIZE);
810
5312bd8b
AK
811 if (phys_sections_nb == phys_sections_nb_alloc) {
812 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
813 phys_sections = g_renew(MemoryRegionSection, phys_sections,
814 phys_sections_nb_alloc);
815 }
816 phys_sections[phys_sections_nb] = *section;
817 return phys_sections_nb++;
818}
819
820static void phys_sections_clear(void)
821{
822 phys_sections_nb = 0;
823}
824
ac1970fb 825static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
826{
827 subpage_t *subpage;
a8170e5e 828 hwaddr base = section->offset_within_address_space
0f0cb164 829 & TARGET_PAGE_MASK;
ac1970fb 830 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
0f0cb164
AK
831 MemoryRegionSection subsection = {
832 .offset_within_address_space = base,
052e87b0 833 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 834 };
a8170e5e 835 hwaddr start, end;
0f0cb164 836
f3705d53 837 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 838
f3705d53 839 if (!(existing->mr->subpage)) {
acc9d80b 840 subpage = subpage_init(d->as, base);
0f0cb164 841 subsection.mr = &subpage->iomem;
ac1970fb 842 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 843 phys_section_add(&subsection));
0f0cb164 844 } else {
f3705d53 845 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
846 }
847 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 848 end = start + int128_get64(section->size) - 1;
0f0cb164
AK
849 subpage_register(subpage, start, end, phys_section_add(section));
850}
851
852
052e87b0
PB
853static void register_multipage(AddressSpaceDispatch *d,
854 MemoryRegionSection *section)
33417e70 855{
a8170e5e 856 hwaddr start_addr = section->offset_within_address_space;
5312bd8b 857 uint16_t section_index = phys_section_add(section);
052e87b0
PB
858 uint64_t num_pages = int128_get64(int128_rshift(section->size,
859 TARGET_PAGE_BITS));
dd81124b 860
733d5ef5
PB
861 assert(num_pages);
862 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
863}
864
ac1970fb 865static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 866{
ac1970fb 867 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
99b9cc06 868 MemoryRegionSection now = *section, remain = *section;
052e87b0 869 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 870
733d5ef5
PB
871 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
872 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
873 - now.offset_within_address_space;
874
052e87b0 875 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 876 register_subpage(d, &now);
733d5ef5 877 } else {
052e87b0 878 now.size = int128_zero();
733d5ef5 879 }
052e87b0
PB
880 while (int128_ne(remain.size, now.size)) {
881 remain.size = int128_sub(remain.size, now.size);
882 remain.offset_within_address_space += int128_get64(now.size);
883 remain.offset_within_region += int128_get64(now.size);
69b67646 884 now = remain;
052e87b0 885 if (int128_lt(remain.size, page_size)) {
733d5ef5
PB
886 register_subpage(d, &now);
887 } else if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
052e87b0 888 now.size = page_size;
ac1970fb 889 register_subpage(d, &now);
69b67646 890 } else {
052e87b0 891 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 892 register_multipage(d, &now);
69b67646 893 }
0f0cb164
AK
894 }
895}
896
62a2744c
SY
897void qemu_flush_coalesced_mmio_buffer(void)
898{
899 if (kvm_enabled())
900 kvm_flush_coalesced_mmio_buffer();
901}
902
b2a8658e
UD
903void qemu_mutex_lock_ramlist(void)
904{
905 qemu_mutex_lock(&ram_list.mutex);
906}
907
908void qemu_mutex_unlock_ramlist(void)
909{
910 qemu_mutex_unlock(&ram_list.mutex);
911}
912
c902760f
MT
913#if defined(__linux__) && !defined(TARGET_S390X)
914
915#include <sys/vfs.h>
916
917#define HUGETLBFS_MAGIC 0x958458f6
918
919static long gethugepagesize(const char *path)
920{
921 struct statfs fs;
922 int ret;
923
924 do {
9742bf26 925 ret = statfs(path, &fs);
c902760f
MT
926 } while (ret != 0 && errno == EINTR);
927
928 if (ret != 0) {
9742bf26
YT
929 perror(path);
930 return 0;
c902760f
MT
931 }
932
933 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 934 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
935
936 return fs.f_bsize;
937}
938
04b16653
AW
939static void *file_ram_alloc(RAMBlock *block,
940 ram_addr_t memory,
941 const char *path)
c902760f
MT
942{
943 char *filename;
8ca761f6
PF
944 char *sanitized_name;
945 char *c;
c902760f
MT
946 void *area;
947 int fd;
948#ifdef MAP_POPULATE
949 int flags;
950#endif
951 unsigned long hpagesize;
952
953 hpagesize = gethugepagesize(path);
954 if (!hpagesize) {
9742bf26 955 return NULL;
c902760f
MT
956 }
957
958 if (memory < hpagesize) {
959 return NULL;
960 }
961
962 if (kvm_enabled() && !kvm_has_sync_mmu()) {
963 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
964 return NULL;
965 }
966
8ca761f6
PF
967 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
968 sanitized_name = g_strdup(block->mr->name);
969 for (c = sanitized_name; *c != '\0'; c++) {
970 if (*c == '/')
971 *c = '_';
972 }
973
974 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
975 sanitized_name);
976 g_free(sanitized_name);
c902760f
MT
977
978 fd = mkstemp(filename);
979 if (fd < 0) {
9742bf26 980 perror("unable to create backing store for hugepages");
e4ada482 981 g_free(filename);
9742bf26 982 return NULL;
c902760f
MT
983 }
984 unlink(filename);
e4ada482 985 g_free(filename);
c902760f
MT
986
987 memory = (memory+hpagesize-1) & ~(hpagesize-1);
988
989 /*
990 * ftruncate is not supported by hugetlbfs in older
991 * hosts, so don't bother bailing out on errors.
992 * If anything goes wrong with it under other filesystems,
993 * mmap will fail.
994 */
995 if (ftruncate(fd, memory))
9742bf26 996 perror("ftruncate");
c902760f
MT
997
998#ifdef MAP_POPULATE
999 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
1000 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
1001 * to sidestep this quirk.
1002 */
1003 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
1004 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
1005#else
1006 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1007#endif
1008 if (area == MAP_FAILED) {
9742bf26
YT
1009 perror("file_ram_alloc: can't mmap RAM pages");
1010 close(fd);
1011 return (NULL);
c902760f 1012 }
04b16653 1013 block->fd = fd;
c902760f
MT
1014 return area;
1015}
1016#endif
1017
d17b5288 1018static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1019{
1020 RAMBlock *block, *next_block;
3e837b2c 1021 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1022
49cd9ac6
SH
1023 assert(size != 0); /* it would hand out same offset multiple times */
1024
a3161038 1025 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1026 return 0;
1027
a3161038 1028 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1029 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1030
1031 end = block->offset + block->length;
1032
a3161038 1033 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1034 if (next_block->offset >= end) {
1035 next = MIN(next, next_block->offset);
1036 }
1037 }
1038 if (next - end >= size && next - end < mingap) {
3e837b2c 1039 offset = end;
04b16653
AW
1040 mingap = next - end;
1041 }
1042 }
3e837b2c
AW
1043
1044 if (offset == RAM_ADDR_MAX) {
1045 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1046 (uint64_t)size);
1047 abort();
1048 }
1049
04b16653
AW
1050 return offset;
1051}
1052
652d7ec2 1053ram_addr_t last_ram_offset(void)
d17b5288
AW
1054{
1055 RAMBlock *block;
1056 ram_addr_t last = 0;
1057
a3161038 1058 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1059 last = MAX(last, block->offset + block->length);
1060
1061 return last;
1062}
1063
ddb97f1d
JB
1064static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1065{
1066 int ret;
1067 QemuOpts *machine_opts;
1068
1069 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1070 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1071 if (machine_opts &&
1072 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
1073 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1074 if (ret) {
1075 perror("qemu_madvise");
1076 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1077 "but dump_guest_core=off specified\n");
1078 }
1079 }
1080}
1081
c5705a77 1082void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
1083{
1084 RAMBlock *new_block, *block;
1085
c5705a77 1086 new_block = NULL;
a3161038 1087 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
1088 if (block->offset == addr) {
1089 new_block = block;
1090 break;
1091 }
1092 }
1093 assert(new_block);
1094 assert(!new_block->idstr[0]);
84b89d78 1095
09e5ab63
AL
1096 if (dev) {
1097 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1098 if (id) {
1099 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1100 g_free(id);
84b89d78
CM
1101 }
1102 }
1103 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1104
b2a8658e
UD
1105 /* This assumes the iothread lock is taken here too. */
1106 qemu_mutex_lock_ramlist();
a3161038 1107 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1108 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1109 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1110 new_block->idstr);
1111 abort();
1112 }
1113 }
b2a8658e 1114 qemu_mutex_unlock_ramlist();
c5705a77
AK
1115}
1116
8490fc78
LC
1117static int memory_try_enable_merging(void *addr, size_t len)
1118{
1119 QemuOpts *opts;
1120
1121 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1122 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1123 /* disabled by the user */
1124 return 0;
1125 }
1126
1127 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1128}
1129
c5705a77
AK
1130ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1131 MemoryRegion *mr)
1132{
abb26d63 1133 RAMBlock *block, *new_block;
c5705a77
AK
1134
1135 size = TARGET_PAGE_ALIGN(size);
1136 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1137
b2a8658e
UD
1138 /* This assumes the iothread lock is taken here too. */
1139 qemu_mutex_lock_ramlist();
7c637366 1140 new_block->mr = mr;
432d268c 1141 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1142 if (host) {
1143 new_block->host = host;
cd19cfa2 1144 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1145 } else {
1146 if (mem_path) {
c902760f 1147#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1148 new_block->host = file_ram_alloc(new_block, size, mem_path);
1149 if (!new_block->host) {
6eebf958 1150 new_block->host = qemu_anon_ram_alloc(size);
8490fc78 1151 memory_try_enable_merging(new_block->host, size);
6977dfe6 1152 }
c902760f 1153#else
6977dfe6
YT
1154 fprintf(stderr, "-mem-path option unsupported\n");
1155 exit(1);
c902760f 1156#endif
6977dfe6 1157 } else {
868bb33f 1158 if (xen_enabled()) {
fce537d4 1159 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1160 } else if (kvm_enabled()) {
1161 /* some s390/kvm configurations have special constraints */
6eebf958 1162 new_block->host = kvm_ram_alloc(size);
432d268c 1163 } else {
6eebf958 1164 new_block->host = qemu_anon_ram_alloc(size);
432d268c 1165 }
8490fc78 1166 memory_try_enable_merging(new_block->host, size);
6977dfe6 1167 }
c902760f 1168 }
94a6b54f
PB
1169 new_block->length = size;
1170
abb26d63
PB
1171 /* Keep the list sorted from biggest to smallest block. */
1172 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1173 if (block->length < new_block->length) {
1174 break;
1175 }
1176 }
1177 if (block) {
1178 QTAILQ_INSERT_BEFORE(block, new_block, next);
1179 } else {
1180 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1181 }
0d6d3c87 1182 ram_list.mru_block = NULL;
94a6b54f 1183
f798b07f 1184 ram_list.version++;
b2a8658e 1185 qemu_mutex_unlock_ramlist();
f798b07f 1186
7267c094 1187 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1188 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1189 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1190 0, size >> TARGET_PAGE_BITS);
1720aeee 1191 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1192
ddb97f1d 1193 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1194 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1195
6f0437e8
JK
1196 if (kvm_enabled())
1197 kvm_setup_guest_memory(new_block->host, size);
1198
94a6b54f
PB
1199 return new_block->offset;
1200}
e9a1ab19 1201
c5705a77 1202ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1203{
c5705a77 1204 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1205}
1206
1f2e98b6
AW
1207void qemu_ram_free_from_ptr(ram_addr_t addr)
1208{
1209 RAMBlock *block;
1210
b2a8658e
UD
1211 /* This assumes the iothread lock is taken here too. */
1212 qemu_mutex_lock_ramlist();
a3161038 1213 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1214 if (addr == block->offset) {
a3161038 1215 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1216 ram_list.mru_block = NULL;
f798b07f 1217 ram_list.version++;
7267c094 1218 g_free(block);
b2a8658e 1219 break;
1f2e98b6
AW
1220 }
1221 }
b2a8658e 1222 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1223}
1224
c227f099 1225void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1226{
04b16653
AW
1227 RAMBlock *block;
1228
b2a8658e
UD
1229 /* This assumes the iothread lock is taken here too. */
1230 qemu_mutex_lock_ramlist();
a3161038 1231 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1232 if (addr == block->offset) {
a3161038 1233 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1234 ram_list.mru_block = NULL;
f798b07f 1235 ram_list.version++;
cd19cfa2
HY
1236 if (block->flags & RAM_PREALLOC_MASK) {
1237 ;
1238 } else if (mem_path) {
04b16653
AW
1239#if defined (__linux__) && !defined(TARGET_S390X)
1240 if (block->fd) {
1241 munmap(block->host, block->length);
1242 close(block->fd);
1243 } else {
e7a09b92 1244 qemu_anon_ram_free(block->host, block->length);
04b16653 1245 }
fd28aa13
JK
1246#else
1247 abort();
04b16653
AW
1248#endif
1249 } else {
868bb33f 1250 if (xen_enabled()) {
e41d7c69 1251 xen_invalidate_map_cache_entry(block->host);
432d268c 1252 } else {
e7a09b92 1253 qemu_anon_ram_free(block->host, block->length);
432d268c 1254 }
04b16653 1255 }
7267c094 1256 g_free(block);
b2a8658e 1257 break;
04b16653
AW
1258 }
1259 }
b2a8658e 1260 qemu_mutex_unlock_ramlist();
04b16653 1261
e9a1ab19
FB
1262}
1263
cd19cfa2
HY
1264#ifndef _WIN32
1265void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1266{
1267 RAMBlock *block;
1268 ram_addr_t offset;
1269 int flags;
1270 void *area, *vaddr;
1271
a3161038 1272 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1273 offset = addr - block->offset;
1274 if (offset < block->length) {
1275 vaddr = block->host + offset;
1276 if (block->flags & RAM_PREALLOC_MASK) {
1277 ;
1278 } else {
1279 flags = MAP_FIXED;
1280 munmap(vaddr, length);
1281 if (mem_path) {
1282#if defined(__linux__) && !defined(TARGET_S390X)
1283 if (block->fd) {
1284#ifdef MAP_POPULATE
1285 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1286 MAP_PRIVATE;
1287#else
1288 flags |= MAP_PRIVATE;
1289#endif
1290 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1291 flags, block->fd, offset);
1292 } else {
1293 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1294 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1295 flags, -1, 0);
1296 }
fd28aa13
JK
1297#else
1298 abort();
cd19cfa2
HY
1299#endif
1300 } else {
1301#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1302 flags |= MAP_SHARED | MAP_ANONYMOUS;
1303 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1304 flags, -1, 0);
1305#else
1306 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1307 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1308 flags, -1, 0);
1309#endif
1310 }
1311 if (area != vaddr) {
f15fbc4b
AP
1312 fprintf(stderr, "Could not remap addr: "
1313 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1314 length, addr);
1315 exit(1);
1316 }
8490fc78 1317 memory_try_enable_merging(vaddr, length);
ddb97f1d 1318 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1319 }
1320 return;
1321 }
1322 }
1323}
1324#endif /* !_WIN32 */
1325
dc828ca1 1326/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
1327 With the exception of the softmmu code in this file, this should
1328 only be used for local memory (e.g. video ram) that the device owns,
1329 and knows it isn't going to access beyond the end of the block.
1330
1331 It should not be used for general purpose DMA.
1332 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1333 */
c227f099 1334void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 1335{
94a6b54f
PB
1336 RAMBlock *block;
1337
b2a8658e 1338 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1339 block = ram_list.mru_block;
1340 if (block && addr - block->offset < block->length) {
1341 goto found;
1342 }
a3161038 1343 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1344 if (addr - block->offset < block->length) {
0d6d3c87 1345 goto found;
f471a17e 1346 }
94a6b54f 1347 }
f471a17e
AW
1348
1349 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1350 abort();
1351
0d6d3c87
PB
1352found:
1353 ram_list.mru_block = block;
1354 if (xen_enabled()) {
1355 /* We need to check if the requested address is in the RAM
1356 * because we don't want to map the entire memory in QEMU.
1357 * In that case just map until the end of the page.
1358 */
1359 if (block->offset == 0) {
1360 return xen_map_cache(addr, 0, 0);
1361 } else if (block->host == NULL) {
1362 block->host =
1363 xen_map_cache(block->offset, block->length, 1);
1364 }
1365 }
1366 return block->host + (addr - block->offset);
dc828ca1
PB
1367}
1368
0d6d3c87
PB
1369/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1370 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1371 *
1372 * ??? Is this still necessary?
b2e0a138 1373 */
8b9c99d9 1374static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1375{
1376 RAMBlock *block;
1377
b2a8658e 1378 /* The list is protected by the iothread lock here. */
a3161038 1379 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1380 if (addr - block->offset < block->length) {
868bb33f 1381 if (xen_enabled()) {
432d268c
JN
1382 /* We need to check if the requested address is in the RAM
1383 * because we don't want to map the entire memory in QEMU.
712c2b41 1384 * In that case just map until the end of the page.
432d268c
JN
1385 */
1386 if (block->offset == 0) {
e41d7c69 1387 return xen_map_cache(addr, 0, 0);
432d268c 1388 } else if (block->host == NULL) {
e41d7c69
JK
1389 block->host =
1390 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1391 }
1392 }
b2e0a138
MT
1393 return block->host + (addr - block->offset);
1394 }
1395 }
1396
1397 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1398 abort();
1399
1400 return NULL;
1401}
1402
38bee5dc
SS
1403/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1404 * but takes a size argument */
8b9c99d9 1405static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1406{
8ab934f9
SS
1407 if (*size == 0) {
1408 return NULL;
1409 }
868bb33f 1410 if (xen_enabled()) {
e41d7c69 1411 return xen_map_cache(addr, *size, 1);
868bb33f 1412 } else {
38bee5dc
SS
1413 RAMBlock *block;
1414
a3161038 1415 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1416 if (addr - block->offset < block->length) {
1417 if (addr - block->offset + *size > block->length)
1418 *size = block->length - addr + block->offset;
1419 return block->host + (addr - block->offset);
1420 }
1421 }
1422
1423 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1424 abort();
38bee5dc
SS
1425 }
1426}
1427
e890261f 1428int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1429{
94a6b54f
PB
1430 RAMBlock *block;
1431 uint8_t *host = ptr;
1432
868bb33f 1433 if (xen_enabled()) {
e41d7c69 1434 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
1435 return 0;
1436 }
1437
a3161038 1438 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1439 /* This case append when the block is not mapped. */
1440 if (block->host == NULL) {
1441 continue;
1442 }
f471a17e 1443 if (host - block->host < block->length) {
e890261f
MT
1444 *ram_addr = block->offset + (host - block->host);
1445 return 0;
f471a17e 1446 }
94a6b54f 1447 }
432d268c 1448
e890261f
MT
1449 return -1;
1450}
f471a17e 1451
e890261f
MT
1452/* Some of the softmmu routines need to translate from a host pointer
1453 (typically a TLB entry) back to a ram offset. */
1454ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1455{
1456 ram_addr_t ram_addr;
f471a17e 1457
e890261f
MT
1458 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1459 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1460 abort();
1461 }
1462 return ram_addr;
5579c7f3
PB
1463}
1464
a8170e5e 1465static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1466 uint64_t val, unsigned size)
9fa3e853 1467{
3a7d929e 1468 int dirty_flags;
f7c11b53 1469 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1470 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
0e0df1e2 1471 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1472 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1473 }
0e0df1e2
AK
1474 switch (size) {
1475 case 1:
1476 stb_p(qemu_get_ram_ptr(ram_addr), val);
1477 break;
1478 case 2:
1479 stw_p(qemu_get_ram_ptr(ram_addr), val);
1480 break;
1481 case 4:
1482 stl_p(qemu_get_ram_ptr(ram_addr), val);
1483 break;
1484 default:
1485 abort();
3a7d929e 1486 }
f23db169 1487 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1488 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1489 /* we remove the notdirty callback only if the code has been
1490 flushed */
1491 if (dirty_flags == 0xff)
2e70f6ef 1492 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1493}
1494
b018ddf6
PB
1495static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1496 unsigned size, bool is_write)
1497{
1498 return is_write;
1499}
1500
0e0df1e2 1501static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1502 .write = notdirty_mem_write,
b018ddf6 1503 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1504 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1505};
1506
0f459d16 1507/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1508static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1509{
9349b4f9 1510 CPUArchState *env = cpu_single_env;
06d55cc1 1511 target_ulong pc, cs_base;
0f459d16 1512 target_ulong vaddr;
a1d1bb31 1513 CPUWatchpoint *wp;
06d55cc1 1514 int cpu_flags;
0f459d16 1515
06d55cc1
AL
1516 if (env->watchpoint_hit) {
1517 /* We re-entered the check after replacing the TB. Now raise
1518 * the debug interrupt so that is will trigger after the
1519 * current instruction. */
c3affe56 1520 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1521 return;
1522 }
2e70f6ef 1523 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1524 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1525 if ((vaddr == (wp->vaddr & len_mask) ||
1526 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1527 wp->flags |= BP_WATCHPOINT_HIT;
1528 if (!env->watchpoint_hit) {
1529 env->watchpoint_hit = wp;
5a316526 1530 tb_check_watchpoint(env);
6e140f28
AL
1531 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1532 env->exception_index = EXCP_DEBUG;
488d6577 1533 cpu_loop_exit(env);
6e140f28
AL
1534 } else {
1535 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1536 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1537 cpu_resume_from_signal(env, NULL);
6e140f28 1538 }
06d55cc1 1539 }
6e140f28
AL
1540 } else {
1541 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1542 }
1543 }
1544}
1545
6658ffb8
PB
1546/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1547 so these check for a hit then pass through to the normal out-of-line
1548 phys routines. */
a8170e5e 1549static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1550 unsigned size)
6658ffb8 1551{
1ec9b909
AK
1552 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1553 switch (size) {
1554 case 1: return ldub_phys(addr);
1555 case 2: return lduw_phys(addr);
1556 case 4: return ldl_phys(addr);
1557 default: abort();
1558 }
6658ffb8
PB
1559}
1560
a8170e5e 1561static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1562 uint64_t val, unsigned size)
6658ffb8 1563{
1ec9b909
AK
1564 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1565 switch (size) {
67364150
MF
1566 case 1:
1567 stb_phys(addr, val);
1568 break;
1569 case 2:
1570 stw_phys(addr, val);
1571 break;
1572 case 4:
1573 stl_phys(addr, val);
1574 break;
1ec9b909
AK
1575 default: abort();
1576 }
6658ffb8
PB
1577}
1578
1ec9b909
AK
1579static const MemoryRegionOps watch_mem_ops = {
1580 .read = watch_mem_read,
1581 .write = watch_mem_write,
1582 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1583};
6658ffb8 1584
a8170e5e 1585static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1586 unsigned len)
db7b5426 1587{
acc9d80b
JK
1588 subpage_t *subpage = opaque;
1589 uint8_t buf[4];
791af8c8 1590
db7b5426 1591#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1592 printf("%s: subpage %p len %d addr " TARGET_FMT_plx "\n", __func__,
1593 subpage, len, addr);
db7b5426 1594#endif
acc9d80b
JK
1595 address_space_read(subpage->as, addr + subpage->base, buf, len);
1596 switch (len) {
1597 case 1:
1598 return ldub_p(buf);
1599 case 2:
1600 return lduw_p(buf);
1601 case 4:
1602 return ldl_p(buf);
1603 default:
1604 abort();
1605 }
db7b5426
BS
1606}
1607
a8170e5e 1608static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1609 uint64_t value, unsigned len)
db7b5426 1610{
acc9d80b
JK
1611 subpage_t *subpage = opaque;
1612 uint8_t buf[4];
1613
db7b5426 1614#if defined(DEBUG_SUBPAGE)
70c68e44 1615 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
acc9d80b
JK
1616 " value %"PRIx64"\n",
1617 __func__, subpage, len, addr, value);
db7b5426 1618#endif
acc9d80b
JK
1619 switch (len) {
1620 case 1:
1621 stb_p(buf, value);
1622 break;
1623 case 2:
1624 stw_p(buf, value);
1625 break;
1626 case 4:
1627 stl_p(buf, value);
1628 break;
1629 default:
1630 abort();
1631 }
1632 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1633}
1634
c353e4cc
PB
1635static bool subpage_accepts(void *opaque, hwaddr addr,
1636 unsigned size, bool is_write)
1637{
acc9d80b 1638 subpage_t *subpage = opaque;
c353e4cc 1639#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1640 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx "\n",
1641 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1642#endif
1643
acc9d80b
JK
1644 return address_space_access_valid(subpage->as, addr + subpage->base,
1645 size, is_write);
c353e4cc
PB
1646}
1647
70c68e44
AK
1648static const MemoryRegionOps subpage_ops = {
1649 .read = subpage_read,
1650 .write = subpage_write,
c353e4cc 1651 .valid.accepts = subpage_accepts,
70c68e44 1652 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1653};
1654
c227f099 1655static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1656 uint16_t section)
db7b5426
BS
1657{
1658 int idx, eidx;
1659
1660 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1661 return -1;
1662 idx = SUBPAGE_IDX(start);
1663 eidx = SUBPAGE_IDX(end);
1664#if defined(DEBUG_SUBPAGE)
0bf9e31a 1665 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1666 mmio, start, end, idx, eidx, memory);
1667#endif
db7b5426 1668 for (; idx <= eidx; idx++) {
5312bd8b 1669 mmio->sub_section[idx] = section;
db7b5426
BS
1670 }
1671
1672 return 0;
1673}
1674
acc9d80b 1675static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1676{
c227f099 1677 subpage_t *mmio;
db7b5426 1678
7267c094 1679 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1680
acc9d80b 1681 mmio->as = as;
1eec614b 1682 mmio->base = base;
70c68e44
AK
1683 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1684 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1685 mmio->iomem.subpage = true;
db7b5426 1686#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1687 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1688 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1689#endif
0f0cb164 1690 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
1691
1692 return mmio;
1693}
1694
5312bd8b
AK
1695static uint16_t dummy_section(MemoryRegion *mr)
1696{
1697 MemoryRegionSection section = {
1698 .mr = mr,
1699 .offset_within_address_space = 0,
1700 .offset_within_region = 0,
052e87b0 1701 .size = int128_2_64(),
5312bd8b
AK
1702 };
1703
1704 return phys_section_add(&section);
1705}
1706
a8170e5e 1707MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1708{
37ec01d4 1709 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1710}
1711
e9179ce1
AK
1712static void io_mem_init(void)
1713{
bf8d5166 1714 memory_region_init_io(&io_mem_rom, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
0e0df1e2
AK
1715 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1716 "unassigned", UINT64_MAX);
1717 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1718 "notdirty", UINT64_MAX);
1ec9b909
AK
1719 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1720 "watch", UINT64_MAX);
e9179ce1
AK
1721}
1722
ac1970fb
AK
1723static void mem_begin(MemoryListener *listener)
1724{
1725 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1726
1727 destroy_all_mappings(d);
1728 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1729}
1730
50c1e149
AK
1731static void core_begin(MemoryListener *listener)
1732{
5312bd8b
AK
1733 phys_sections_clear();
1734 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
1735 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1736 phys_section_rom = dummy_section(&io_mem_rom);
1737 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
1738}
1739
1d71148e 1740static void tcg_commit(MemoryListener *listener)
50c1e149 1741{
9349b4f9 1742 CPUArchState *env;
117712c3
AK
1743
1744 /* since each CPU stores ram addresses in its TLB cache, we must
1745 reset the modified entries */
1746 /* XXX: slow ! */
1747 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1748 tlb_flush(env, 1);
1749 }
50c1e149
AK
1750}
1751
93632747
AK
1752static void core_log_global_start(MemoryListener *listener)
1753{
1754 cpu_physical_memory_set_dirty_tracking(1);
1755}
1756
1757static void core_log_global_stop(MemoryListener *listener)
1758{
1759 cpu_physical_memory_set_dirty_tracking(0);
1760}
1761
4855d41a
AK
1762static void io_region_add(MemoryListener *listener,
1763 MemoryRegionSection *section)
1764{
a2d33521
AK
1765 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1766
1767 mrio->mr = section->mr;
1768 mrio->offset = section->offset_within_region;
1769 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
052e87b0
PB
1770 section->offset_within_address_space,
1771 int128_get64(section->size));
a2d33521 1772 ioport_register(&mrio->iorange);
4855d41a
AK
1773}
1774
1775static void io_region_del(MemoryListener *listener,
1776 MemoryRegionSection *section)
1777{
052e87b0
PB
1778 isa_unassign_ioport(section->offset_within_address_space,
1779 int128_get64(section->size));
4855d41a
AK
1780}
1781
93632747 1782static MemoryListener core_memory_listener = {
50c1e149 1783 .begin = core_begin,
93632747
AK
1784 .log_global_start = core_log_global_start,
1785 .log_global_stop = core_log_global_stop,
ac1970fb 1786 .priority = 1,
93632747
AK
1787};
1788
4855d41a
AK
1789static MemoryListener io_memory_listener = {
1790 .region_add = io_region_add,
1791 .region_del = io_region_del,
4855d41a
AK
1792 .priority = 0,
1793};
1794
1d71148e
AK
1795static MemoryListener tcg_memory_listener = {
1796 .commit = tcg_commit,
1797};
1798
ac1970fb
AK
1799void address_space_init_dispatch(AddressSpace *as)
1800{
1801 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1802
1803 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1804 d->listener = (MemoryListener) {
1805 .begin = mem_begin,
1806 .region_add = mem_add,
1807 .region_nop = mem_add,
1808 .priority = 0,
1809 };
acc9d80b 1810 d->as = as;
ac1970fb
AK
1811 as->dispatch = d;
1812 memory_listener_register(&d->listener, as);
1813}
1814
83f3c251
AK
1815void address_space_destroy_dispatch(AddressSpace *as)
1816{
1817 AddressSpaceDispatch *d = as->dispatch;
1818
1819 memory_listener_unregister(&d->listener);
1820 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1821 g_free(d);
1822 as->dispatch = NULL;
1823}
1824
62152b8a
AK
1825static void memory_map_init(void)
1826{
7267c094 1827 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 1828 memory_region_init(system_memory, "system", INT64_MAX);
7dca8043 1829 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1830
7267c094 1831 system_io = g_malloc(sizeof(*system_io));
309cb471 1832 memory_region_init(system_io, "io", 65536);
7dca8043 1833 address_space_init(&address_space_io, system_io, "I/O");
93632747 1834
f6790af6
AK
1835 memory_listener_register(&core_memory_listener, &address_space_memory);
1836 memory_listener_register(&io_memory_listener, &address_space_io);
1837 memory_listener_register(&tcg_memory_listener, &address_space_memory);
62152b8a
AK
1838}
1839
1840MemoryRegion *get_system_memory(void)
1841{
1842 return system_memory;
1843}
1844
309cb471
AK
1845MemoryRegion *get_system_io(void)
1846{
1847 return system_io;
1848}
1849
e2eef170
PB
1850#endif /* !defined(CONFIG_USER_ONLY) */
1851
13eb76e0
FB
1852/* physical memory access (slow version, mainly for debug) */
1853#if defined(CONFIG_USER_ONLY)
9349b4f9 1854int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1855 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1856{
1857 int l, flags;
1858 target_ulong page;
53a5960a 1859 void * p;
13eb76e0
FB
1860
1861 while (len > 0) {
1862 page = addr & TARGET_PAGE_MASK;
1863 l = (page + TARGET_PAGE_SIZE) - addr;
1864 if (l > len)
1865 l = len;
1866 flags = page_get_flags(page);
1867 if (!(flags & PAGE_VALID))
a68fe89c 1868 return -1;
13eb76e0
FB
1869 if (is_write) {
1870 if (!(flags & PAGE_WRITE))
a68fe89c 1871 return -1;
579a97f7 1872 /* XXX: this code should not depend on lock_user */
72fb7daa 1873 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1874 return -1;
72fb7daa
AJ
1875 memcpy(p, buf, l);
1876 unlock_user(p, addr, l);
13eb76e0
FB
1877 } else {
1878 if (!(flags & PAGE_READ))
a68fe89c 1879 return -1;
579a97f7 1880 /* XXX: this code should not depend on lock_user */
72fb7daa 1881 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1882 return -1;
72fb7daa 1883 memcpy(buf, p, l);
5b257578 1884 unlock_user(p, addr, 0);
13eb76e0
FB
1885 }
1886 len -= l;
1887 buf += l;
1888 addr += l;
1889 }
a68fe89c 1890 return 0;
13eb76e0 1891}
8df1cd07 1892
13eb76e0 1893#else
51d7a9eb 1894
a8170e5e
AK
1895static void invalidate_and_set_dirty(hwaddr addr,
1896 hwaddr length)
51d7a9eb
AP
1897{
1898 if (!cpu_physical_memory_is_dirty(addr)) {
1899 /* invalidate code */
1900 tb_invalidate_phys_page_range(addr, addr + length, 0);
1901 /* set dirty bit */
1902 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1903 }
e226939d 1904 xen_modified_memory(addr, length);
51d7a9eb
AP
1905}
1906
2bbfa05d
PB
1907static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1908{
1909 if (memory_region_is_ram(mr)) {
1910 return !(is_write && mr->readonly);
1911 }
1912 if (memory_region_is_romd(mr)) {
1913 return !is_write;
1914 }
1915
1916 return false;
1917}
1918
f52cc467 1919static inline int memory_access_size(MemoryRegion *mr, int l, hwaddr addr)
82f2563f 1920{
f52cc467 1921 if (l >= 4 && (((addr & 3) == 0 || mr->ops->impl.unaligned))) {
82f2563f
PB
1922 return 4;
1923 }
f52cc467 1924 if (l >= 2 && (((addr & 1) == 0) || mr->ops->impl.unaligned)) {
82f2563f
PB
1925 return 2;
1926 }
1927 return 1;
1928}
1929
fd8aaa76 1930bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1931 int len, bool is_write)
13eb76e0 1932{
149f54b5 1933 hwaddr l;
13eb76e0 1934 uint8_t *ptr;
791af8c8 1935 uint64_t val;
149f54b5 1936 hwaddr addr1;
5c8a00ce 1937 MemoryRegion *mr;
fd8aaa76 1938 bool error = false;
3b46e624 1939
13eb76e0 1940 while (len > 0) {
149f54b5 1941 l = len;
5c8a00ce 1942 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 1943
13eb76e0 1944 if (is_write) {
5c8a00ce
PB
1945 if (!memory_access_is_direct(mr, is_write)) {
1946 l = memory_access_size(mr, l, addr1);
6a00d601
FB
1947 /* XXX: could force cpu_single_env to NULL to avoid
1948 potential bugs */
82f2563f 1949 if (l == 4) {
1c213d19 1950 /* 32 bit write access */
c27004ec 1951 val = ldl_p(buf);
5c8a00ce 1952 error |= io_mem_write(mr, addr1, val, 4);
82f2563f 1953 } else if (l == 2) {
1c213d19 1954 /* 16 bit write access */
c27004ec 1955 val = lduw_p(buf);
5c8a00ce 1956 error |= io_mem_write(mr, addr1, val, 2);
13eb76e0 1957 } else {
1c213d19 1958 /* 8 bit write access */
c27004ec 1959 val = ldub_p(buf);
5c8a00ce 1960 error |= io_mem_write(mr, addr1, val, 1);
13eb76e0 1961 }
2bbfa05d 1962 } else {
5c8a00ce 1963 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 1964 /* RAM case */
5579c7f3 1965 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1966 memcpy(ptr, buf, l);
51d7a9eb 1967 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
1968 }
1969 } else {
5c8a00ce 1970 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 1971 /* I/O case */
5c8a00ce 1972 l = memory_access_size(mr, l, addr1);
82f2563f 1973 if (l == 4) {
13eb76e0 1974 /* 32 bit read access */
5c8a00ce 1975 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 1976 stl_p(buf, val);
82f2563f 1977 } else if (l == 2) {
13eb76e0 1978 /* 16 bit read access */
5c8a00ce 1979 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 1980 stw_p(buf, val);
13eb76e0 1981 } else {
1c213d19 1982 /* 8 bit read access */
5c8a00ce 1983 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 1984 stb_p(buf, val);
13eb76e0
FB
1985 }
1986 } else {
1987 /* RAM case */
5c8a00ce 1988 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 1989 memcpy(buf, ptr, l);
13eb76e0
FB
1990 }
1991 }
1992 len -= l;
1993 buf += l;
1994 addr += l;
1995 }
fd8aaa76
PB
1996
1997 return error;
13eb76e0 1998}
8df1cd07 1999
fd8aaa76 2000bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
2001 const uint8_t *buf, int len)
2002{
fd8aaa76 2003 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
2004}
2005
fd8aaa76 2006bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 2007{
fd8aaa76 2008 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
2009}
2010
2011
a8170e5e 2012void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2013 int len, int is_write)
2014{
fd8aaa76 2015 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2016}
2017
d0ecd2aa 2018/* used for ROM loading : can write in RAM and ROM */
a8170e5e 2019void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
2020 const uint8_t *buf, int len)
2021{
149f54b5 2022 hwaddr l;
d0ecd2aa 2023 uint8_t *ptr;
149f54b5 2024 hwaddr addr1;
5c8a00ce 2025 MemoryRegion *mr;
3b46e624 2026
d0ecd2aa 2027 while (len > 0) {
149f54b5 2028 l = len;
5c8a00ce
PB
2029 mr = address_space_translate(&address_space_memory,
2030 addr, &addr1, &l, true);
3b46e624 2031
5c8a00ce
PB
2032 if (!(memory_region_is_ram(mr) ||
2033 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2034 /* do nothing */
2035 } else {
5c8a00ce 2036 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2037 /* ROM/RAM case */
5579c7f3 2038 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2039 memcpy(ptr, buf, l);
51d7a9eb 2040 invalidate_and_set_dirty(addr1, l);
d0ecd2aa
FB
2041 }
2042 len -= l;
2043 buf += l;
2044 addr += l;
2045 }
2046}
2047
6d16c2f8
AL
2048typedef struct {
2049 void *buffer;
a8170e5e
AK
2050 hwaddr addr;
2051 hwaddr len;
6d16c2f8
AL
2052} BounceBuffer;
2053
2054static BounceBuffer bounce;
2055
ba223c29
AL
2056typedef struct MapClient {
2057 void *opaque;
2058 void (*callback)(void *opaque);
72cf2d4f 2059 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2060} MapClient;
2061
72cf2d4f
BS
2062static QLIST_HEAD(map_client_list, MapClient) map_client_list
2063 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2064
2065void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2066{
7267c094 2067 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2068
2069 client->opaque = opaque;
2070 client->callback = callback;
72cf2d4f 2071 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2072 return client;
2073}
2074
8b9c99d9 2075static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2076{
2077 MapClient *client = (MapClient *)_client;
2078
72cf2d4f 2079 QLIST_REMOVE(client, link);
7267c094 2080 g_free(client);
ba223c29
AL
2081}
2082
2083static void cpu_notify_map_clients(void)
2084{
2085 MapClient *client;
2086
72cf2d4f
BS
2087 while (!QLIST_EMPTY(&map_client_list)) {
2088 client = QLIST_FIRST(&map_client_list);
ba223c29 2089 client->callback(client->opaque);
34d5e948 2090 cpu_unregister_map_client(client);
ba223c29
AL
2091 }
2092}
2093
51644ab7
PB
2094bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2095{
5c8a00ce 2096 MemoryRegion *mr;
51644ab7
PB
2097 hwaddr l, xlat;
2098
2099 while (len > 0) {
2100 l = len;
5c8a00ce
PB
2101 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2102 if (!memory_access_is_direct(mr, is_write)) {
2103 l = memory_access_size(mr, l, addr);
2104 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2105 return false;
2106 }
2107 }
2108
2109 len -= l;
2110 addr += l;
2111 }
2112 return true;
2113}
2114
6d16c2f8
AL
2115/* Map a physical memory region into a host virtual address.
2116 * May map a subset of the requested range, given by and returned in *plen.
2117 * May return NULL if resources needed to perform the mapping are exhausted.
2118 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2119 * Use cpu_register_map_client() to know when retrying the map operation is
2120 * likely to succeed.
6d16c2f8 2121 */
ac1970fb 2122void *address_space_map(AddressSpace *as,
a8170e5e
AK
2123 hwaddr addr,
2124 hwaddr *plen,
ac1970fb 2125 bool is_write)
6d16c2f8 2126{
a8170e5e
AK
2127 hwaddr len = *plen;
2128 hwaddr todo = 0;
149f54b5 2129 hwaddr l, xlat;
5c8a00ce 2130 MemoryRegion *mr;
f15fbc4b 2131 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
2132 ram_addr_t rlen;
2133 void *ret;
6d16c2f8
AL
2134
2135 while (len > 0) {
149f54b5 2136 l = len;
5c8a00ce 2137 mr = address_space_translate(as, addr, &xlat, &l, is_write);
6d16c2f8 2138
5c8a00ce 2139 if (!memory_access_is_direct(mr, is_write)) {
38bee5dc 2140 if (todo || bounce.buffer) {
6d16c2f8
AL
2141 break;
2142 }
2143 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2144 bounce.addr = addr;
2145 bounce.len = l;
2146 if (!is_write) {
ac1970fb 2147 address_space_read(as, addr, bounce.buffer, l);
6d16c2f8 2148 }
38bee5dc
SS
2149
2150 *plen = l;
2151 return bounce.buffer;
6d16c2f8 2152 }
8ab934f9 2153 if (!todo) {
5c8a00ce 2154 raddr = memory_region_get_ram_addr(mr) + xlat;
149f54b5 2155 } else {
5c8a00ce 2156 if (memory_region_get_ram_addr(mr) + xlat != raddr + todo) {
149f54b5
PB
2157 break;
2158 }
8ab934f9 2159 }
6d16c2f8
AL
2160
2161 len -= l;
2162 addr += l;
38bee5dc 2163 todo += l;
6d16c2f8 2164 }
8ab934f9
SS
2165 rlen = todo;
2166 ret = qemu_ram_ptr_length(raddr, &rlen);
2167 *plen = rlen;
2168 return ret;
6d16c2f8
AL
2169}
2170
ac1970fb 2171/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2172 * Will also mark the memory as dirty if is_write == 1. access_len gives
2173 * the amount of memory that was actually read or written by the caller.
2174 */
a8170e5e
AK
2175void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2176 int is_write, hwaddr access_len)
6d16c2f8
AL
2177{
2178 if (buffer != bounce.buffer) {
2179 if (is_write) {
e890261f 2180 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
2181 while (access_len) {
2182 unsigned l;
2183 l = TARGET_PAGE_SIZE;
2184 if (l > access_len)
2185 l = access_len;
51d7a9eb 2186 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2187 addr1 += l;
2188 access_len -= l;
2189 }
2190 }
868bb33f 2191 if (xen_enabled()) {
e41d7c69 2192 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2193 }
6d16c2f8
AL
2194 return;
2195 }
2196 if (is_write) {
ac1970fb 2197 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2198 }
f8a83245 2199 qemu_vfree(bounce.buffer);
6d16c2f8 2200 bounce.buffer = NULL;
ba223c29 2201 cpu_notify_map_clients();
6d16c2f8 2202}
d0ecd2aa 2203
a8170e5e
AK
2204void *cpu_physical_memory_map(hwaddr addr,
2205 hwaddr *plen,
ac1970fb
AK
2206 int is_write)
2207{
2208 return address_space_map(&address_space_memory, addr, plen, is_write);
2209}
2210
a8170e5e
AK
2211void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2212 int is_write, hwaddr access_len)
ac1970fb
AK
2213{
2214 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2215}
2216
8df1cd07 2217/* warning: addr must be aligned */
a8170e5e 2218static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2219 enum device_endian endian)
8df1cd07 2220{
8df1cd07 2221 uint8_t *ptr;
791af8c8 2222 uint64_t val;
5c8a00ce 2223 MemoryRegion *mr;
149f54b5
PB
2224 hwaddr l = 4;
2225 hwaddr addr1;
8df1cd07 2226
5c8a00ce
PB
2227 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2228 false);
2229 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2230 /* I/O case */
5c8a00ce 2231 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2232#if defined(TARGET_WORDS_BIGENDIAN)
2233 if (endian == DEVICE_LITTLE_ENDIAN) {
2234 val = bswap32(val);
2235 }
2236#else
2237 if (endian == DEVICE_BIG_ENDIAN) {
2238 val = bswap32(val);
2239 }
2240#endif
8df1cd07
FB
2241 } else {
2242 /* RAM case */
5c8a00ce 2243 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2244 & TARGET_PAGE_MASK)
149f54b5 2245 + addr1);
1e78bcc1
AG
2246 switch (endian) {
2247 case DEVICE_LITTLE_ENDIAN:
2248 val = ldl_le_p(ptr);
2249 break;
2250 case DEVICE_BIG_ENDIAN:
2251 val = ldl_be_p(ptr);
2252 break;
2253 default:
2254 val = ldl_p(ptr);
2255 break;
2256 }
8df1cd07
FB
2257 }
2258 return val;
2259}
2260
a8170e5e 2261uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2262{
2263 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2264}
2265
a8170e5e 2266uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2267{
2268 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2269}
2270
a8170e5e 2271uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2272{
2273 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2274}
2275
84b7b8e7 2276/* warning: addr must be aligned */
a8170e5e 2277static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2278 enum device_endian endian)
84b7b8e7 2279{
84b7b8e7
FB
2280 uint8_t *ptr;
2281 uint64_t val;
5c8a00ce 2282 MemoryRegion *mr;
149f54b5
PB
2283 hwaddr l = 8;
2284 hwaddr addr1;
84b7b8e7 2285
5c8a00ce
PB
2286 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2287 false);
2288 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2289 /* I/O case */
5c8a00ce 2290 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2291#if defined(TARGET_WORDS_BIGENDIAN)
2292 if (endian == DEVICE_LITTLE_ENDIAN) {
2293 val = bswap64(val);
2294 }
2295#else
2296 if (endian == DEVICE_BIG_ENDIAN) {
2297 val = bswap64(val);
2298 }
84b7b8e7
FB
2299#endif
2300 } else {
2301 /* RAM case */
5c8a00ce 2302 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2303 & TARGET_PAGE_MASK)
149f54b5 2304 + addr1);
1e78bcc1
AG
2305 switch (endian) {
2306 case DEVICE_LITTLE_ENDIAN:
2307 val = ldq_le_p(ptr);
2308 break;
2309 case DEVICE_BIG_ENDIAN:
2310 val = ldq_be_p(ptr);
2311 break;
2312 default:
2313 val = ldq_p(ptr);
2314 break;
2315 }
84b7b8e7
FB
2316 }
2317 return val;
2318}
2319
a8170e5e 2320uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2321{
2322 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2323}
2324
a8170e5e 2325uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2326{
2327 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2328}
2329
a8170e5e 2330uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2331{
2332 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2333}
2334
aab33094 2335/* XXX: optimize */
a8170e5e 2336uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2337{
2338 uint8_t val;
2339 cpu_physical_memory_read(addr, &val, 1);
2340 return val;
2341}
2342
733f0b02 2343/* warning: addr must be aligned */
a8170e5e 2344static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2345 enum device_endian endian)
aab33094 2346{
733f0b02
MT
2347 uint8_t *ptr;
2348 uint64_t val;
5c8a00ce 2349 MemoryRegion *mr;
149f54b5
PB
2350 hwaddr l = 2;
2351 hwaddr addr1;
733f0b02 2352
5c8a00ce
PB
2353 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2354 false);
2355 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2356 /* I/O case */
5c8a00ce 2357 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2358#if defined(TARGET_WORDS_BIGENDIAN)
2359 if (endian == DEVICE_LITTLE_ENDIAN) {
2360 val = bswap16(val);
2361 }
2362#else
2363 if (endian == DEVICE_BIG_ENDIAN) {
2364 val = bswap16(val);
2365 }
2366#endif
733f0b02
MT
2367 } else {
2368 /* RAM case */
5c8a00ce 2369 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2370 & TARGET_PAGE_MASK)
149f54b5 2371 + addr1);
1e78bcc1
AG
2372 switch (endian) {
2373 case DEVICE_LITTLE_ENDIAN:
2374 val = lduw_le_p(ptr);
2375 break;
2376 case DEVICE_BIG_ENDIAN:
2377 val = lduw_be_p(ptr);
2378 break;
2379 default:
2380 val = lduw_p(ptr);
2381 break;
2382 }
733f0b02
MT
2383 }
2384 return val;
aab33094
FB
2385}
2386
a8170e5e 2387uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2388{
2389 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2390}
2391
a8170e5e 2392uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2393{
2394 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2395}
2396
a8170e5e 2397uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2398{
2399 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2400}
2401
8df1cd07
FB
2402/* warning: addr must be aligned. The ram page is not masked as dirty
2403 and the code inside is not invalidated. It is useful if the dirty
2404 bits are used to track modified PTEs */
a8170e5e 2405void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2406{
8df1cd07 2407 uint8_t *ptr;
5c8a00ce 2408 MemoryRegion *mr;
149f54b5
PB
2409 hwaddr l = 4;
2410 hwaddr addr1;
8df1cd07 2411
5c8a00ce
PB
2412 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2413 true);
2414 if (l < 4 || !memory_access_is_direct(mr, true)) {
2415 io_mem_write(mr, addr1, val, 4);
8df1cd07 2416 } else {
5c8a00ce 2417 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2418 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2419 stl_p(ptr, val);
74576198
AL
2420
2421 if (unlikely(in_migration)) {
2422 if (!cpu_physical_memory_is_dirty(addr1)) {
2423 /* invalidate code */
2424 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2425 /* set dirty bit */
f7c11b53
YT
2426 cpu_physical_memory_set_dirty_flags(
2427 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2428 }
2429 }
8df1cd07
FB
2430 }
2431}
2432
2433/* warning: addr must be aligned */
a8170e5e 2434static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2435 enum device_endian endian)
8df1cd07 2436{
8df1cd07 2437 uint8_t *ptr;
5c8a00ce 2438 MemoryRegion *mr;
149f54b5
PB
2439 hwaddr l = 4;
2440 hwaddr addr1;
8df1cd07 2441
5c8a00ce
PB
2442 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2443 true);
2444 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2445#if defined(TARGET_WORDS_BIGENDIAN)
2446 if (endian == DEVICE_LITTLE_ENDIAN) {
2447 val = bswap32(val);
2448 }
2449#else
2450 if (endian == DEVICE_BIG_ENDIAN) {
2451 val = bswap32(val);
2452 }
2453#endif
5c8a00ce 2454 io_mem_write(mr, addr1, val, 4);
8df1cd07 2455 } else {
8df1cd07 2456 /* RAM case */
5c8a00ce 2457 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2458 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2459 switch (endian) {
2460 case DEVICE_LITTLE_ENDIAN:
2461 stl_le_p(ptr, val);
2462 break;
2463 case DEVICE_BIG_ENDIAN:
2464 stl_be_p(ptr, val);
2465 break;
2466 default:
2467 stl_p(ptr, val);
2468 break;
2469 }
51d7a9eb 2470 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2471 }
2472}
2473
a8170e5e 2474void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2475{
2476 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2477}
2478
a8170e5e 2479void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2480{
2481 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2482}
2483
a8170e5e 2484void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2485{
2486 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2487}
2488
aab33094 2489/* XXX: optimize */
a8170e5e 2490void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2491{
2492 uint8_t v = val;
2493 cpu_physical_memory_write(addr, &v, 1);
2494}
2495
733f0b02 2496/* warning: addr must be aligned */
a8170e5e 2497static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2498 enum device_endian endian)
aab33094 2499{
733f0b02 2500 uint8_t *ptr;
5c8a00ce 2501 MemoryRegion *mr;
149f54b5
PB
2502 hwaddr l = 2;
2503 hwaddr addr1;
733f0b02 2504
5c8a00ce
PB
2505 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2506 true);
2507 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2508#if defined(TARGET_WORDS_BIGENDIAN)
2509 if (endian == DEVICE_LITTLE_ENDIAN) {
2510 val = bswap16(val);
2511 }
2512#else
2513 if (endian == DEVICE_BIG_ENDIAN) {
2514 val = bswap16(val);
2515 }
2516#endif
5c8a00ce 2517 io_mem_write(mr, addr1, val, 2);
733f0b02 2518 } else {
733f0b02 2519 /* RAM case */
5c8a00ce 2520 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2521 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2522 switch (endian) {
2523 case DEVICE_LITTLE_ENDIAN:
2524 stw_le_p(ptr, val);
2525 break;
2526 case DEVICE_BIG_ENDIAN:
2527 stw_be_p(ptr, val);
2528 break;
2529 default:
2530 stw_p(ptr, val);
2531 break;
2532 }
51d7a9eb 2533 invalidate_and_set_dirty(addr1, 2);
733f0b02 2534 }
aab33094
FB
2535}
2536
a8170e5e 2537void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2538{
2539 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2540}
2541
a8170e5e 2542void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2543{
2544 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2545}
2546
a8170e5e 2547void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2548{
2549 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2550}
2551
aab33094 2552/* XXX: optimize */
a8170e5e 2553void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2554{
2555 val = tswap64(val);
71d2b725 2556 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2557}
2558
a8170e5e 2559void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2560{
2561 val = cpu_to_le64(val);
2562 cpu_physical_memory_write(addr, &val, 8);
2563}
2564
a8170e5e 2565void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2566{
2567 val = cpu_to_be64(val);
2568 cpu_physical_memory_write(addr, &val, 8);
2569}
2570
5e2972fd 2571/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2572int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2573 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2574{
2575 int l;
a8170e5e 2576 hwaddr phys_addr;
9b3c35e0 2577 target_ulong page;
13eb76e0
FB
2578
2579 while (len > 0) {
2580 page = addr & TARGET_PAGE_MASK;
2581 phys_addr = cpu_get_phys_page_debug(env, page);
2582 /* if no physical page mapped, return an error */
2583 if (phys_addr == -1)
2584 return -1;
2585 l = (page + TARGET_PAGE_SIZE) - addr;
2586 if (l > len)
2587 l = len;
5e2972fd 2588 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2589 if (is_write)
2590 cpu_physical_memory_write_rom(phys_addr, buf, l);
2591 else
5e2972fd 2592 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2593 len -= l;
2594 buf += l;
2595 addr += l;
2596 }
2597 return 0;
2598}
a68fe89c 2599#endif
13eb76e0 2600
8e4a424b
BS
2601#if !defined(CONFIG_USER_ONLY)
2602
2603/*
2604 * A helper function for the _utterly broken_ virtio device model to find out if
2605 * it's running on a big endian machine. Don't do this at home kids!
2606 */
2607bool virtio_is_big_endian(void);
2608bool virtio_is_big_endian(void)
2609{
2610#if defined(TARGET_WORDS_BIGENDIAN)
2611 return true;
2612#else
2613 return false;
2614#endif
2615}
2616
2617#endif
2618
76f35538 2619#ifndef CONFIG_USER_ONLY
a8170e5e 2620bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2621{
5c8a00ce 2622 MemoryRegion*mr;
149f54b5 2623 hwaddr l = 1;
76f35538 2624
5c8a00ce
PB
2625 mr = address_space_translate(&address_space_memory,
2626 phys_addr, &phys_addr, &l, false);
76f35538 2627
5c8a00ce
PB
2628 return !(memory_region_is_ram(mr) ||
2629 memory_region_is_romd(mr));
76f35538 2630}
bd2fa51f
MH
2631
2632void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2633{
2634 RAMBlock *block;
2635
2636 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2637 func(block->host, block->offset, block->length, opaque);
2638 }
2639}
ec3f8c99 2640#endif