]> git.proxmox.com Git - qemu.git/blame - exec.c
exec: drop useless #if
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
0d09e41a 34#include "hw/xen/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
67d3b957 53//#define DEBUG_UNASSIGNED
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
e2eef170 56#if !defined(CONFIG_USER_ONLY)
9fa3e853 57int phys_ram_fd;
74576198 58static int in_migration;
94a6b54f 59
a3161038 60RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
61
62static MemoryRegion *system_memory;
309cb471 63static MemoryRegion *system_io;
62152b8a 64
f6790af6
AK
65AddressSpace address_space_io;
66AddressSpace address_space_memory;
9e11908f 67DMAContext dma_context_memory;
2673a5da 68
2a8e7499 69MemoryRegion io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 70static MemoryRegion io_mem_subpage_ram;
0e0df1e2 71
e2eef170 72#endif
9fa3e853 73
9349b4f9 74CPUArchState *first_cpu;
6a00d601
FB
75/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
9349b4f9 77DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 78/* 0 = Do not count executed instructions.
bf20dc07 79 1 = Precise instruction counting.
2e70f6ef 80 2 = Adaptive rate instruction counting. */
5708fc66 81int use_icount;
6a00d601 82
e2eef170 83#if !defined(CONFIG_USER_ONLY)
4346ae3e 84
5312bd8b
AK
85static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
aa102231
AK
88static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
5312bd8b 91
d6f2ea22
AK
92/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
07f07b31 96#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 97
e2eef170 98static void io_mem_init(void);
62152b8a 99static void memory_map_init(void);
8b9c99d9 100static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 101
1ec9b909 102static MemoryRegion io_mem_watch;
6658ffb8 103#endif
fd6ce8f6 104
6d9a1304 105#if !defined(CONFIG_USER_ONLY)
d6f2ea22 106
f7bf5461 107static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 108{
f7bf5461 109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
d6f2ea22
AK
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
f7bf5461
AK
117}
118
119static uint16_t phys_map_node_alloc(void)
120{
121 unsigned i;
122 uint16_t ret;
123
124 ret = phys_map_nodes_nb++;
125 assert(ret != PHYS_MAP_NODE_NIL);
126 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 127 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 128 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 130 }
f7bf5461 131 return ret;
d6f2ea22
AK
132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
92e873b9 139
a8170e5e
AK
140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
2999097b 142 int level)
f7bf5461
AK
143{
144 PhysPageEntry *p;
145 int i;
a8170e5e 146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 147
07f07b31 148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
07f07b31 153 p[i].is_leaf = 1;
c19e8800 154 p[i].ptr = phys_section_unassigned;
4346ae3e 155 }
67c4d23c 156 }
f7bf5461 157 } else {
c19e8800 158 p = phys_map_nodes[lp->ptr];
92e873b9 159 }
2999097b 160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 161
2999097b 162 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
c19e8800 165 lp->ptr = leaf;
07f07b31
AK
166 *index += step;
167 *nb -= step;
2999097b
AK
168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
f7bf5461
AK
172 }
173}
174
ac1970fb 175static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 176 hwaddr index, hwaddr nb,
2999097b 177 uint16_t leaf)
f7bf5461 178{
2999097b 179 /* Wildly overreserve - it doesn't matter much. */
07f07b31 180 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 181
ac1970fb 182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
183}
184
a8170e5e 185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
92e873b9 186{
ac1970fb 187 PhysPageEntry lp = d->phys_map;
31ab2b4a
AK
188 PhysPageEntry *p;
189 int i;
f1f6e3b8 190
07f07b31 191 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 192 if (lp.ptr == PHYS_MAP_NODE_NIL) {
fd298934 193 return &phys_sections[phys_section_unassigned];
31ab2b4a 194 }
c19e8800 195 p = phys_map_nodes[lp.ptr];
31ab2b4a 196 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 197 }
fd298934 198 return &phys_sections[lp.ptr];
f3705d53
AK
199}
200
e5548617
BS
201bool memory_region_is_unassigned(MemoryRegion *mr)
202{
2a8e7499 203 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 204 && mr != &io_mem_watch;
fd6ce8f6 205}
5b6dd868 206#endif
fd6ce8f6 207
5b6dd868 208void cpu_exec_init_all(void)
fdbb84d1 209{
5b6dd868 210#if !defined(CONFIG_USER_ONLY)
b2a8658e 211 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
212 memory_map_init();
213 io_mem_init();
fdbb84d1 214#endif
5b6dd868 215}
fdbb84d1 216
b170fce3 217#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
218
219static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 220{
259186a7 221 CPUState *cpu = opaque;
a513fe19 222
5b6dd868
BS
223 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
224 version_id is increased. */
259186a7
AF
225 cpu->interrupt_request &= ~0x01;
226 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
227
228 return 0;
a513fe19 229}
7501267e 230
5b6dd868
BS
231static const VMStateDescription vmstate_cpu_common = {
232 .name = "cpu_common",
233 .version_id = 1,
234 .minimum_version_id = 1,
235 .minimum_version_id_old = 1,
236 .post_load = cpu_common_post_load,
237 .fields = (VMStateField []) {
259186a7
AF
238 VMSTATE_UINT32(halted, CPUState),
239 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
240 VMSTATE_END_OF_LIST()
241 }
242};
b170fce3
AF
243#else
244#define vmstate_cpu_common vmstate_dummy
5b6dd868 245#endif
ea041c0e 246
38d8f5c8 247CPUState *qemu_get_cpu(int index)
ea041c0e 248{
5b6dd868 249 CPUArchState *env = first_cpu;
38d8f5c8 250 CPUState *cpu = NULL;
ea041c0e 251
5b6dd868 252 while (env) {
55e5c285
AF
253 cpu = ENV_GET_CPU(env);
254 if (cpu->cpu_index == index) {
5b6dd868 255 break;
55e5c285 256 }
5b6dd868 257 env = env->next_cpu;
ea041c0e 258 }
5b6dd868 259
d76fddae 260 return env ? cpu : NULL;
ea041c0e
FB
261}
262
d6b9e0d6
MT
263void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
264{
265 CPUArchState *env = first_cpu;
266
267 while (env) {
268 func(ENV_GET_CPU(env), data);
269 env = env->next_cpu;
270 }
271}
272
5b6dd868 273void cpu_exec_init(CPUArchState *env)
ea041c0e 274{
5b6dd868 275 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 276 CPUClass *cc = CPU_GET_CLASS(cpu);
5b6dd868
BS
277 CPUArchState **penv;
278 int cpu_index;
279
280#if defined(CONFIG_USER_ONLY)
281 cpu_list_lock();
282#endif
283 env->next_cpu = NULL;
284 penv = &first_cpu;
285 cpu_index = 0;
286 while (*penv != NULL) {
287 penv = &(*penv)->next_cpu;
288 cpu_index++;
289 }
55e5c285 290 cpu->cpu_index = cpu_index;
1b1ed8dc 291 cpu->numa_node = 0;
5b6dd868
BS
292 QTAILQ_INIT(&env->breakpoints);
293 QTAILQ_INIT(&env->watchpoints);
294#ifndef CONFIG_USER_ONLY
295 cpu->thread_id = qemu_get_thread_id();
296#endif
297 *penv = env;
298#if defined(CONFIG_USER_ONLY)
299 cpu_list_unlock();
300#endif
259186a7 301 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
5b6dd868 302#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
303 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
304 cpu_save, cpu_load, env);
b170fce3 305 assert(cc->vmsd == NULL);
5b6dd868 306#endif
b170fce3
AF
307 if (cc->vmsd != NULL) {
308 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
309 }
ea041c0e
FB
310}
311
1fddef4b 312#if defined(TARGET_HAS_ICE)
94df27fd 313#if defined(CONFIG_USER_ONLY)
9349b4f9 314static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
315{
316 tb_invalidate_phys_page_range(pc, pc + 1, 0);
317}
318#else
1e7855a5
MF
319static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
320{
9d70c4b7
MF
321 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
322 (pc & ~TARGET_PAGE_MASK));
1e7855a5 323}
c27004ec 324#endif
94df27fd 325#endif /* TARGET_HAS_ICE */
d720b93d 326
c527ee8f 327#if defined(CONFIG_USER_ONLY)
9349b4f9 328void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
329
330{
331}
332
9349b4f9 333int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
334 int flags, CPUWatchpoint **watchpoint)
335{
336 return -ENOSYS;
337}
338#else
6658ffb8 339/* Add a watchpoint. */
9349b4f9 340int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 341 int flags, CPUWatchpoint **watchpoint)
6658ffb8 342{
b4051334 343 target_ulong len_mask = ~(len - 1);
c0ce998e 344 CPUWatchpoint *wp;
6658ffb8 345
b4051334 346 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
347 if ((len & (len - 1)) || (addr & ~len_mask) ||
348 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
349 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
350 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
351 return -EINVAL;
352 }
7267c094 353 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
354
355 wp->vaddr = addr;
b4051334 356 wp->len_mask = len_mask;
a1d1bb31
AL
357 wp->flags = flags;
358
2dc9f411 359 /* keep all GDB-injected watchpoints in front */
c0ce998e 360 if (flags & BP_GDB)
72cf2d4f 361 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 362 else
72cf2d4f 363 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 364
6658ffb8 365 tlb_flush_page(env, addr);
a1d1bb31
AL
366
367 if (watchpoint)
368 *watchpoint = wp;
369 return 0;
6658ffb8
PB
370}
371
a1d1bb31 372/* Remove a specific watchpoint. */
9349b4f9 373int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 374 int flags)
6658ffb8 375{
b4051334 376 target_ulong len_mask = ~(len - 1);
a1d1bb31 377 CPUWatchpoint *wp;
6658ffb8 378
72cf2d4f 379 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 380 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 381 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 382 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
383 return 0;
384 }
385 }
a1d1bb31 386 return -ENOENT;
6658ffb8
PB
387}
388
a1d1bb31 389/* Remove a specific watchpoint by reference. */
9349b4f9 390void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 391{
72cf2d4f 392 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 393
a1d1bb31
AL
394 tlb_flush_page(env, watchpoint->vaddr);
395
7267c094 396 g_free(watchpoint);
a1d1bb31
AL
397}
398
399/* Remove all matching watchpoints. */
9349b4f9 400void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 401{
c0ce998e 402 CPUWatchpoint *wp, *next;
a1d1bb31 403
72cf2d4f 404 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
405 if (wp->flags & mask)
406 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 407 }
7d03f82f 408}
c527ee8f 409#endif
7d03f82f 410
a1d1bb31 411/* Add a breakpoint. */
9349b4f9 412int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 413 CPUBreakpoint **breakpoint)
4c3a88a2 414{
1fddef4b 415#if defined(TARGET_HAS_ICE)
c0ce998e 416 CPUBreakpoint *bp;
3b46e624 417
7267c094 418 bp = g_malloc(sizeof(*bp));
4c3a88a2 419
a1d1bb31
AL
420 bp->pc = pc;
421 bp->flags = flags;
422
2dc9f411 423 /* keep all GDB-injected breakpoints in front */
c0ce998e 424 if (flags & BP_GDB)
72cf2d4f 425 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 426 else
72cf2d4f 427 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 428
d720b93d 429 breakpoint_invalidate(env, pc);
a1d1bb31
AL
430
431 if (breakpoint)
432 *breakpoint = bp;
4c3a88a2
FB
433 return 0;
434#else
a1d1bb31 435 return -ENOSYS;
4c3a88a2
FB
436#endif
437}
438
a1d1bb31 439/* Remove a specific breakpoint. */
9349b4f9 440int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 441{
7d03f82f 442#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
443 CPUBreakpoint *bp;
444
72cf2d4f 445 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
446 if (bp->pc == pc && bp->flags == flags) {
447 cpu_breakpoint_remove_by_ref(env, bp);
448 return 0;
449 }
7d03f82f 450 }
a1d1bb31
AL
451 return -ENOENT;
452#else
453 return -ENOSYS;
7d03f82f
EI
454#endif
455}
456
a1d1bb31 457/* Remove a specific breakpoint by reference. */
9349b4f9 458void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 459{
1fddef4b 460#if defined(TARGET_HAS_ICE)
72cf2d4f 461 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 462
a1d1bb31
AL
463 breakpoint_invalidate(env, breakpoint->pc);
464
7267c094 465 g_free(breakpoint);
a1d1bb31
AL
466#endif
467}
468
469/* Remove all matching breakpoints. */
9349b4f9 470void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
471{
472#if defined(TARGET_HAS_ICE)
c0ce998e 473 CPUBreakpoint *bp, *next;
a1d1bb31 474
72cf2d4f 475 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
476 if (bp->flags & mask)
477 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 478 }
4c3a88a2
FB
479#endif
480}
481
c33a346e
FB
482/* enable or disable single step mode. EXCP_DEBUG is returned by the
483 CPU loop after each instruction */
9349b4f9 484void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 485{
1fddef4b 486#if defined(TARGET_HAS_ICE)
c33a346e
FB
487 if (env->singlestep_enabled != enabled) {
488 env->singlestep_enabled = enabled;
e22a25c9
AL
489 if (kvm_enabled())
490 kvm_update_guest_debug(env, 0);
491 else {
ccbb4d44 492 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
493 /* XXX: only flush what is necessary */
494 tb_flush(env);
495 }
c33a346e
FB
496 }
497#endif
498}
499
9349b4f9 500void cpu_exit(CPUArchState *env)
3098dba0 501{
fcd7d003
AF
502 CPUState *cpu = ENV_GET_CPU(env);
503
504 cpu->exit_request = 1;
378df4b2 505 cpu->tcg_exit_req = 1;
3098dba0
AJ
506}
507
9349b4f9 508void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
509{
510 va_list ap;
493ae1f0 511 va_list ap2;
7501267e
FB
512
513 va_start(ap, fmt);
493ae1f0 514 va_copy(ap2, ap);
7501267e
FB
515 fprintf(stderr, "qemu: fatal: ");
516 vfprintf(stderr, fmt, ap);
517 fprintf(stderr, "\n");
6fd2a026 518 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
519 if (qemu_log_enabled()) {
520 qemu_log("qemu: fatal: ");
521 qemu_log_vprintf(fmt, ap2);
522 qemu_log("\n");
6fd2a026 523 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 524 qemu_log_flush();
93fcfe39 525 qemu_log_close();
924edcae 526 }
493ae1f0 527 va_end(ap2);
f9373291 528 va_end(ap);
fd052bf6
RV
529#if defined(CONFIG_USER_ONLY)
530 {
531 struct sigaction act;
532 sigfillset(&act.sa_mask);
533 act.sa_handler = SIG_DFL;
534 sigaction(SIGABRT, &act, NULL);
535 }
536#endif
7501267e
FB
537 abort();
538}
539
9349b4f9 540CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 541{
9349b4f9
AF
542 CPUArchState *new_env = cpu_init(env->cpu_model_str);
543 CPUArchState *next_cpu = new_env->next_cpu;
5a38f081
AL
544#if defined(TARGET_HAS_ICE)
545 CPUBreakpoint *bp;
546 CPUWatchpoint *wp;
547#endif
548
9349b4f9 549 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 550
55e5c285 551 /* Preserve chaining. */
c5be9f08 552 new_env->next_cpu = next_cpu;
5a38f081
AL
553
554 /* Clone all break/watchpoints.
555 Note: Once we support ptrace with hw-debug register access, make sure
556 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
557 QTAILQ_INIT(&env->breakpoints);
558 QTAILQ_INIT(&env->watchpoints);
5a38f081 559#if defined(TARGET_HAS_ICE)
72cf2d4f 560 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
561 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
562 }
72cf2d4f 563 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
564 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
565 wp->flags, NULL);
566 }
567#endif
568
c5be9f08
TS
569 return new_env;
570}
571
0124311e 572#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
573static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
574 uintptr_t length)
575{
576 uintptr_t start1;
577
578 /* we modify the TLB cache so that the dirty bit will be set again
579 when accessing the range */
580 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
581 /* Check that we don't span multiple blocks - this breaks the
582 address comparisons below. */
583 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
584 != (end - 1) - start) {
585 abort();
586 }
587 cpu_tlb_reset_dirty_all(start1, length);
588
589}
590
5579c7f3 591/* Note: start and end must be within the same ram block. */
c227f099 592void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 593 int dirty_flags)
1ccde1cb 594{
d24981d3 595 uintptr_t length;
1ccde1cb
FB
596
597 start &= TARGET_PAGE_MASK;
598 end = TARGET_PAGE_ALIGN(end);
599
600 length = end - start;
601 if (length == 0)
602 return;
f7c11b53 603 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 604
d24981d3
JQ
605 if (tcg_enabled()) {
606 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 607 }
1ccde1cb
FB
608}
609
8b9c99d9 610static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 611{
f6f3fbca 612 int ret = 0;
74576198 613 in_migration = enable;
f6f3fbca 614 return ret;
74576198
AL
615}
616
a8170e5e 617hwaddr memory_region_section_get_iotlb(CPUArchState *env,
e5548617
BS
618 MemoryRegionSection *section,
619 target_ulong vaddr,
a8170e5e 620 hwaddr paddr,
e5548617
BS
621 int prot,
622 target_ulong *address)
623{
a8170e5e 624 hwaddr iotlb;
e5548617
BS
625 CPUWatchpoint *wp;
626
cc5bea60 627 if (memory_region_is_ram(section->mr)) {
e5548617
BS
628 /* Normal RAM. */
629 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 630 + memory_region_section_addr(section, paddr);
e5548617
BS
631 if (!section->readonly) {
632 iotlb |= phys_section_notdirty;
633 } else {
634 iotlb |= phys_section_rom;
635 }
636 } else {
e5548617 637 iotlb = section - phys_sections;
cc5bea60 638 iotlb += memory_region_section_addr(section, paddr);
e5548617
BS
639 }
640
641 /* Make accesses to pages with watchpoints go via the
642 watchpoint trap routines. */
643 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
644 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
645 /* Avoid trapping reads of pages with a write breakpoint. */
646 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
647 iotlb = phys_section_watch + paddr;
648 *address |= TLB_MMIO;
649 break;
650 }
651 }
652 }
653
654 return iotlb;
655}
9fa3e853
FB
656#endif /* defined(CONFIG_USER_ONLY) */
657
e2eef170 658#if !defined(CONFIG_USER_ONLY)
8da3ff18 659
c04b2b78
PB
660#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
661typedef struct subpage_t {
70c68e44 662 MemoryRegion iomem;
a8170e5e 663 hwaddr base;
5312bd8b 664 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
665} subpage_t;
666
c227f099 667static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 668 uint16_t section);
a8170e5e 669static subpage_t *subpage_init(hwaddr base);
5312bd8b 670static void destroy_page_desc(uint16_t section_index)
54688b1e 671{
5312bd8b
AK
672 MemoryRegionSection *section = &phys_sections[section_index];
673 MemoryRegion *mr = section->mr;
54688b1e
AK
674
675 if (mr->subpage) {
676 subpage_t *subpage = container_of(mr, subpage_t, iomem);
677 memory_region_destroy(&subpage->iomem);
678 g_free(subpage);
679 }
680}
681
4346ae3e 682static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
683{
684 unsigned i;
d6f2ea22 685 PhysPageEntry *p;
54688b1e 686
c19e8800 687 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
688 return;
689 }
690
c19e8800 691 p = phys_map_nodes[lp->ptr];
4346ae3e 692 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 693 if (!p[i].is_leaf) {
54688b1e 694 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 695 } else {
c19e8800 696 destroy_page_desc(p[i].ptr);
54688b1e 697 }
54688b1e 698 }
07f07b31 699 lp->is_leaf = 0;
c19e8800 700 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
701}
702
ac1970fb 703static void destroy_all_mappings(AddressSpaceDispatch *d)
54688b1e 704{
ac1970fb 705 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
d6f2ea22 706 phys_map_nodes_reset();
54688b1e
AK
707}
708
5312bd8b
AK
709static uint16_t phys_section_add(MemoryRegionSection *section)
710{
68f3f65b
PB
711 /* The physical section number is ORed with a page-aligned
712 * pointer to produce the iotlb entries. Thus it should
713 * never overflow into the page-aligned value.
714 */
715 assert(phys_sections_nb < TARGET_PAGE_SIZE);
716
5312bd8b
AK
717 if (phys_sections_nb == phys_sections_nb_alloc) {
718 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
719 phys_sections = g_renew(MemoryRegionSection, phys_sections,
720 phys_sections_nb_alloc);
721 }
722 phys_sections[phys_sections_nb] = *section;
723 return phys_sections_nb++;
724}
725
726static void phys_sections_clear(void)
727{
728 phys_sections_nb = 0;
729}
730
ac1970fb 731static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
732{
733 subpage_t *subpage;
a8170e5e 734 hwaddr base = section->offset_within_address_space
0f0cb164 735 & TARGET_PAGE_MASK;
ac1970fb 736 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
0f0cb164
AK
737 MemoryRegionSection subsection = {
738 .offset_within_address_space = base,
739 .size = TARGET_PAGE_SIZE,
740 };
a8170e5e 741 hwaddr start, end;
0f0cb164 742
f3705d53 743 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 744
f3705d53 745 if (!(existing->mr->subpage)) {
0f0cb164
AK
746 subpage = subpage_init(base);
747 subsection.mr = &subpage->iomem;
ac1970fb 748 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 749 phys_section_add(&subsection));
0f0cb164 750 } else {
f3705d53 751 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
752 }
753 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
adb2a9b5 754 end = start + section->size - 1;
0f0cb164
AK
755 subpage_register(subpage, start, end, phys_section_add(section));
756}
757
758
ac1970fb 759static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
33417e70 760{
a8170e5e 761 hwaddr start_addr = section->offset_within_address_space;
dd81124b 762 ram_addr_t size = section->size;
a8170e5e 763 hwaddr addr;
5312bd8b 764 uint16_t section_index = phys_section_add(section);
dd81124b 765
3b8e6a2d 766 assert(size);
f6f3fbca 767
3b8e6a2d 768 addr = start_addr;
ac1970fb 769 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2999097b 770 section_index);
33417e70
FB
771}
772
86a86236
AK
773QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > MAX_PHYS_ADDR_SPACE_BITS)
774
775static MemoryRegionSection limit(MemoryRegionSection section)
776{
777 section.size = MIN(section.offset_within_address_space + section.size,
778 MAX_PHYS_ADDR + 1)
779 - section.offset_within_address_space;
780
781 return section;
782}
783
ac1970fb 784static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 785{
ac1970fb 786 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
86a86236 787 MemoryRegionSection now = limit(*section), remain = limit(*section);
0f0cb164
AK
788
789 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
790 || (now.size < TARGET_PAGE_SIZE)) {
791 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
792 - now.offset_within_address_space,
793 now.size);
ac1970fb 794 register_subpage(d, &now);
0f0cb164
AK
795 remain.size -= now.size;
796 remain.offset_within_address_space += now.size;
797 remain.offset_within_region += now.size;
798 }
69b67646
TH
799 while (remain.size >= TARGET_PAGE_SIZE) {
800 now = remain;
801 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
802 now.size = TARGET_PAGE_SIZE;
ac1970fb 803 register_subpage(d, &now);
69b67646
TH
804 } else {
805 now.size &= TARGET_PAGE_MASK;
ac1970fb 806 register_multipage(d, &now);
69b67646 807 }
0f0cb164
AK
808 remain.size -= now.size;
809 remain.offset_within_address_space += now.size;
810 remain.offset_within_region += now.size;
811 }
812 now = remain;
813 if (now.size) {
ac1970fb 814 register_subpage(d, &now);
0f0cb164
AK
815 }
816}
817
62a2744c
SY
818void qemu_flush_coalesced_mmio_buffer(void)
819{
820 if (kvm_enabled())
821 kvm_flush_coalesced_mmio_buffer();
822}
823
b2a8658e
UD
824void qemu_mutex_lock_ramlist(void)
825{
826 qemu_mutex_lock(&ram_list.mutex);
827}
828
829void qemu_mutex_unlock_ramlist(void)
830{
831 qemu_mutex_unlock(&ram_list.mutex);
832}
833
c902760f
MT
834#if defined(__linux__) && !defined(TARGET_S390X)
835
836#include <sys/vfs.h>
837
838#define HUGETLBFS_MAGIC 0x958458f6
839
840static long gethugepagesize(const char *path)
841{
842 struct statfs fs;
843 int ret;
844
845 do {
9742bf26 846 ret = statfs(path, &fs);
c902760f
MT
847 } while (ret != 0 && errno == EINTR);
848
849 if (ret != 0) {
9742bf26
YT
850 perror(path);
851 return 0;
c902760f
MT
852 }
853
854 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 855 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
856
857 return fs.f_bsize;
858}
859
04b16653
AW
860static void *file_ram_alloc(RAMBlock *block,
861 ram_addr_t memory,
862 const char *path)
c902760f
MT
863{
864 char *filename;
8ca761f6
PF
865 char *sanitized_name;
866 char *c;
c902760f
MT
867 void *area;
868 int fd;
869#ifdef MAP_POPULATE
870 int flags;
871#endif
872 unsigned long hpagesize;
873
874 hpagesize = gethugepagesize(path);
875 if (!hpagesize) {
9742bf26 876 return NULL;
c902760f
MT
877 }
878
879 if (memory < hpagesize) {
880 return NULL;
881 }
882
883 if (kvm_enabled() && !kvm_has_sync_mmu()) {
884 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
885 return NULL;
886 }
887
8ca761f6
PF
888 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
889 sanitized_name = g_strdup(block->mr->name);
890 for (c = sanitized_name; *c != '\0'; c++) {
891 if (*c == '/')
892 *c = '_';
893 }
894
895 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
896 sanitized_name);
897 g_free(sanitized_name);
c902760f
MT
898
899 fd = mkstemp(filename);
900 if (fd < 0) {
9742bf26 901 perror("unable to create backing store for hugepages");
e4ada482 902 g_free(filename);
9742bf26 903 return NULL;
c902760f
MT
904 }
905 unlink(filename);
e4ada482 906 g_free(filename);
c902760f
MT
907
908 memory = (memory+hpagesize-1) & ~(hpagesize-1);
909
910 /*
911 * ftruncate is not supported by hugetlbfs in older
912 * hosts, so don't bother bailing out on errors.
913 * If anything goes wrong with it under other filesystems,
914 * mmap will fail.
915 */
916 if (ftruncate(fd, memory))
9742bf26 917 perror("ftruncate");
c902760f
MT
918
919#ifdef MAP_POPULATE
920 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
921 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
922 * to sidestep this quirk.
923 */
924 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
925 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
926#else
927 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
928#endif
929 if (area == MAP_FAILED) {
9742bf26
YT
930 perror("file_ram_alloc: can't mmap RAM pages");
931 close(fd);
932 return (NULL);
c902760f 933 }
04b16653 934 block->fd = fd;
c902760f
MT
935 return area;
936}
937#endif
938
d17b5288 939static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
940{
941 RAMBlock *block, *next_block;
3e837b2c 942 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 943
49cd9ac6
SH
944 assert(size != 0); /* it would hand out same offset multiple times */
945
a3161038 946 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
947 return 0;
948
a3161038 949 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 950 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
951
952 end = block->offset + block->length;
953
a3161038 954 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
955 if (next_block->offset >= end) {
956 next = MIN(next, next_block->offset);
957 }
958 }
959 if (next - end >= size && next - end < mingap) {
3e837b2c 960 offset = end;
04b16653
AW
961 mingap = next - end;
962 }
963 }
3e837b2c
AW
964
965 if (offset == RAM_ADDR_MAX) {
966 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
967 (uint64_t)size);
968 abort();
969 }
970
04b16653
AW
971 return offset;
972}
973
652d7ec2 974ram_addr_t last_ram_offset(void)
d17b5288
AW
975{
976 RAMBlock *block;
977 ram_addr_t last = 0;
978
a3161038 979 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
980 last = MAX(last, block->offset + block->length);
981
982 return last;
983}
984
ddb97f1d
JB
985static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
986{
987 int ret;
988 QemuOpts *machine_opts;
989
990 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
991 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
992 if (machine_opts &&
993 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
994 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
995 if (ret) {
996 perror("qemu_madvise");
997 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
998 "but dump_guest_core=off specified\n");
999 }
1000 }
1001}
1002
c5705a77 1003void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
1004{
1005 RAMBlock *new_block, *block;
1006
c5705a77 1007 new_block = NULL;
a3161038 1008 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
1009 if (block->offset == addr) {
1010 new_block = block;
1011 break;
1012 }
1013 }
1014 assert(new_block);
1015 assert(!new_block->idstr[0]);
84b89d78 1016
09e5ab63
AL
1017 if (dev) {
1018 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1019 if (id) {
1020 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1021 g_free(id);
84b89d78
CM
1022 }
1023 }
1024 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1025
b2a8658e
UD
1026 /* This assumes the iothread lock is taken here too. */
1027 qemu_mutex_lock_ramlist();
a3161038 1028 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1029 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1030 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1031 new_block->idstr);
1032 abort();
1033 }
1034 }
b2a8658e 1035 qemu_mutex_unlock_ramlist();
c5705a77
AK
1036}
1037
8490fc78
LC
1038static int memory_try_enable_merging(void *addr, size_t len)
1039{
1040 QemuOpts *opts;
1041
1042 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1043 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1044 /* disabled by the user */
1045 return 0;
1046 }
1047
1048 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1049}
1050
c5705a77
AK
1051ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1052 MemoryRegion *mr)
1053{
abb26d63 1054 RAMBlock *block, *new_block;
c5705a77
AK
1055
1056 size = TARGET_PAGE_ALIGN(size);
1057 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1058
b2a8658e
UD
1059 /* This assumes the iothread lock is taken here too. */
1060 qemu_mutex_lock_ramlist();
7c637366 1061 new_block->mr = mr;
432d268c 1062 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1063 if (host) {
1064 new_block->host = host;
cd19cfa2 1065 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1066 } else {
1067 if (mem_path) {
c902760f 1068#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1069 new_block->host = file_ram_alloc(new_block, size, mem_path);
1070 if (!new_block->host) {
6eebf958 1071 new_block->host = qemu_anon_ram_alloc(size);
8490fc78 1072 memory_try_enable_merging(new_block->host, size);
6977dfe6 1073 }
c902760f 1074#else
6977dfe6
YT
1075 fprintf(stderr, "-mem-path option unsupported\n");
1076 exit(1);
c902760f 1077#endif
6977dfe6 1078 } else {
868bb33f 1079 if (xen_enabled()) {
fce537d4 1080 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1081 } else if (kvm_enabled()) {
1082 /* some s390/kvm configurations have special constraints */
6eebf958 1083 new_block->host = kvm_ram_alloc(size);
432d268c 1084 } else {
6eebf958 1085 new_block->host = qemu_anon_ram_alloc(size);
432d268c 1086 }
8490fc78 1087 memory_try_enable_merging(new_block->host, size);
6977dfe6 1088 }
c902760f 1089 }
94a6b54f
PB
1090 new_block->length = size;
1091
abb26d63
PB
1092 /* Keep the list sorted from biggest to smallest block. */
1093 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1094 if (block->length < new_block->length) {
1095 break;
1096 }
1097 }
1098 if (block) {
1099 QTAILQ_INSERT_BEFORE(block, new_block, next);
1100 } else {
1101 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1102 }
0d6d3c87 1103 ram_list.mru_block = NULL;
94a6b54f 1104
f798b07f 1105 ram_list.version++;
b2a8658e 1106 qemu_mutex_unlock_ramlist();
f798b07f 1107
7267c094 1108 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1109 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1110 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1111 0, size >> TARGET_PAGE_BITS);
1720aeee 1112 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1113
ddb97f1d 1114 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1115 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1116
6f0437e8
JK
1117 if (kvm_enabled())
1118 kvm_setup_guest_memory(new_block->host, size);
1119
94a6b54f
PB
1120 return new_block->offset;
1121}
e9a1ab19 1122
c5705a77 1123ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1124{
c5705a77 1125 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1126}
1127
1f2e98b6
AW
1128void qemu_ram_free_from_ptr(ram_addr_t addr)
1129{
1130 RAMBlock *block;
1131
b2a8658e
UD
1132 /* This assumes the iothread lock is taken here too. */
1133 qemu_mutex_lock_ramlist();
a3161038 1134 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1135 if (addr == block->offset) {
a3161038 1136 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1137 ram_list.mru_block = NULL;
f798b07f 1138 ram_list.version++;
7267c094 1139 g_free(block);
b2a8658e 1140 break;
1f2e98b6
AW
1141 }
1142 }
b2a8658e 1143 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1144}
1145
c227f099 1146void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1147{
04b16653
AW
1148 RAMBlock *block;
1149
b2a8658e
UD
1150 /* This assumes the iothread lock is taken here too. */
1151 qemu_mutex_lock_ramlist();
a3161038 1152 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1153 if (addr == block->offset) {
a3161038 1154 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1155 ram_list.mru_block = NULL;
f798b07f 1156 ram_list.version++;
cd19cfa2
HY
1157 if (block->flags & RAM_PREALLOC_MASK) {
1158 ;
1159 } else if (mem_path) {
04b16653
AW
1160#if defined (__linux__) && !defined(TARGET_S390X)
1161 if (block->fd) {
1162 munmap(block->host, block->length);
1163 close(block->fd);
1164 } else {
e7a09b92 1165 qemu_anon_ram_free(block->host, block->length);
04b16653 1166 }
fd28aa13
JK
1167#else
1168 abort();
04b16653
AW
1169#endif
1170 } else {
868bb33f 1171 if (xen_enabled()) {
e41d7c69 1172 xen_invalidate_map_cache_entry(block->host);
432d268c 1173 } else {
e7a09b92 1174 qemu_anon_ram_free(block->host, block->length);
432d268c 1175 }
04b16653 1176 }
7267c094 1177 g_free(block);
b2a8658e 1178 break;
04b16653
AW
1179 }
1180 }
b2a8658e 1181 qemu_mutex_unlock_ramlist();
04b16653 1182
e9a1ab19
FB
1183}
1184
cd19cfa2
HY
1185#ifndef _WIN32
1186void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1187{
1188 RAMBlock *block;
1189 ram_addr_t offset;
1190 int flags;
1191 void *area, *vaddr;
1192
a3161038 1193 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1194 offset = addr - block->offset;
1195 if (offset < block->length) {
1196 vaddr = block->host + offset;
1197 if (block->flags & RAM_PREALLOC_MASK) {
1198 ;
1199 } else {
1200 flags = MAP_FIXED;
1201 munmap(vaddr, length);
1202 if (mem_path) {
1203#if defined(__linux__) && !defined(TARGET_S390X)
1204 if (block->fd) {
1205#ifdef MAP_POPULATE
1206 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1207 MAP_PRIVATE;
1208#else
1209 flags |= MAP_PRIVATE;
1210#endif
1211 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1212 flags, block->fd, offset);
1213 } else {
1214 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1215 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1216 flags, -1, 0);
1217 }
fd28aa13
JK
1218#else
1219 abort();
cd19cfa2
HY
1220#endif
1221 } else {
1222#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1223 flags |= MAP_SHARED | MAP_ANONYMOUS;
1224 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1225 flags, -1, 0);
1226#else
1227 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1228 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1229 flags, -1, 0);
1230#endif
1231 }
1232 if (area != vaddr) {
f15fbc4b
AP
1233 fprintf(stderr, "Could not remap addr: "
1234 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1235 length, addr);
1236 exit(1);
1237 }
8490fc78 1238 memory_try_enable_merging(vaddr, length);
ddb97f1d 1239 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1240 }
1241 return;
1242 }
1243 }
1244}
1245#endif /* !_WIN32 */
1246
dc828ca1 1247/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
1248 With the exception of the softmmu code in this file, this should
1249 only be used for local memory (e.g. video ram) that the device owns,
1250 and knows it isn't going to access beyond the end of the block.
1251
1252 It should not be used for general purpose DMA.
1253 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1254 */
c227f099 1255void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 1256{
94a6b54f
PB
1257 RAMBlock *block;
1258
b2a8658e 1259 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1260 block = ram_list.mru_block;
1261 if (block && addr - block->offset < block->length) {
1262 goto found;
1263 }
a3161038 1264 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1265 if (addr - block->offset < block->length) {
0d6d3c87 1266 goto found;
f471a17e 1267 }
94a6b54f 1268 }
f471a17e
AW
1269
1270 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1271 abort();
1272
0d6d3c87
PB
1273found:
1274 ram_list.mru_block = block;
1275 if (xen_enabled()) {
1276 /* We need to check if the requested address is in the RAM
1277 * because we don't want to map the entire memory in QEMU.
1278 * In that case just map until the end of the page.
1279 */
1280 if (block->offset == 0) {
1281 return xen_map_cache(addr, 0, 0);
1282 } else if (block->host == NULL) {
1283 block->host =
1284 xen_map_cache(block->offset, block->length, 1);
1285 }
1286 }
1287 return block->host + (addr - block->offset);
dc828ca1
PB
1288}
1289
0d6d3c87
PB
1290/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1291 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1292 *
1293 * ??? Is this still necessary?
b2e0a138 1294 */
8b9c99d9 1295static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1296{
1297 RAMBlock *block;
1298
b2a8658e 1299 /* The list is protected by the iothread lock here. */
a3161038 1300 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1301 if (addr - block->offset < block->length) {
868bb33f 1302 if (xen_enabled()) {
432d268c
JN
1303 /* We need to check if the requested address is in the RAM
1304 * because we don't want to map the entire memory in QEMU.
712c2b41 1305 * In that case just map until the end of the page.
432d268c
JN
1306 */
1307 if (block->offset == 0) {
e41d7c69 1308 return xen_map_cache(addr, 0, 0);
432d268c 1309 } else if (block->host == NULL) {
e41d7c69
JK
1310 block->host =
1311 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1312 }
1313 }
b2e0a138
MT
1314 return block->host + (addr - block->offset);
1315 }
1316 }
1317
1318 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1319 abort();
1320
1321 return NULL;
1322}
1323
38bee5dc
SS
1324/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1325 * but takes a size argument */
8b9c99d9 1326static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1327{
8ab934f9
SS
1328 if (*size == 0) {
1329 return NULL;
1330 }
868bb33f 1331 if (xen_enabled()) {
e41d7c69 1332 return xen_map_cache(addr, *size, 1);
868bb33f 1333 } else {
38bee5dc
SS
1334 RAMBlock *block;
1335
a3161038 1336 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1337 if (addr - block->offset < block->length) {
1338 if (addr - block->offset + *size > block->length)
1339 *size = block->length - addr + block->offset;
1340 return block->host + (addr - block->offset);
1341 }
1342 }
1343
1344 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1345 abort();
38bee5dc
SS
1346 }
1347}
1348
e890261f 1349int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1350{
94a6b54f
PB
1351 RAMBlock *block;
1352 uint8_t *host = ptr;
1353
868bb33f 1354 if (xen_enabled()) {
e41d7c69 1355 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
1356 return 0;
1357 }
1358
a3161038 1359 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1360 /* This case append when the block is not mapped. */
1361 if (block->host == NULL) {
1362 continue;
1363 }
f471a17e 1364 if (host - block->host < block->length) {
e890261f
MT
1365 *ram_addr = block->offset + (host - block->host);
1366 return 0;
f471a17e 1367 }
94a6b54f 1368 }
432d268c 1369
e890261f
MT
1370 return -1;
1371}
f471a17e 1372
e890261f
MT
1373/* Some of the softmmu routines need to translate from a host pointer
1374 (typically a TLB entry) back to a ram offset. */
1375ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1376{
1377 ram_addr_t ram_addr;
f471a17e 1378
e890261f
MT
1379 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1380 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1381 abort();
1382 }
1383 return ram_addr;
5579c7f3
PB
1384}
1385
a8170e5e 1386static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1387 unsigned size)
e18231a3
BS
1388{
1389#ifdef DEBUG_UNASSIGNED
1390 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1391#endif
5b450407 1392#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1393 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
1394#endif
1395 return 0;
1396}
1397
a8170e5e 1398static void unassigned_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1399 uint64_t val, unsigned size)
e18231a3
BS
1400{
1401#ifdef DEBUG_UNASSIGNED
0e0df1e2 1402 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 1403#endif
5b450407 1404#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1405 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 1406#endif
33417e70
FB
1407}
1408
0e0df1e2
AK
1409static const MemoryRegionOps unassigned_mem_ops = {
1410 .read = unassigned_mem_read,
1411 .write = unassigned_mem_write,
1412 .endianness = DEVICE_NATIVE_ENDIAN,
1413};
e18231a3 1414
a8170e5e 1415static uint64_t error_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1416 unsigned size)
e18231a3 1417{
0e0df1e2 1418 abort();
e18231a3
BS
1419}
1420
0e0df1e2
AK
1421static const MemoryRegionOps rom_mem_ops = {
1422 .read = error_mem_read,
1423 .write = unassigned_mem_write,
1424 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1425};
1426
a8170e5e 1427static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1428 uint64_t val, unsigned size)
9fa3e853 1429{
3a7d929e 1430 int dirty_flags;
f7c11b53 1431 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1432 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
0e0df1e2 1433 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1434 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1435 }
0e0df1e2
AK
1436 switch (size) {
1437 case 1:
1438 stb_p(qemu_get_ram_ptr(ram_addr), val);
1439 break;
1440 case 2:
1441 stw_p(qemu_get_ram_ptr(ram_addr), val);
1442 break;
1443 case 4:
1444 stl_p(qemu_get_ram_ptr(ram_addr), val);
1445 break;
1446 default:
1447 abort();
3a7d929e 1448 }
f23db169 1449 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1450 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1451 /* we remove the notdirty callback only if the code has been
1452 flushed */
1453 if (dirty_flags == 0xff)
2e70f6ef 1454 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1455}
1456
0e0df1e2
AK
1457static const MemoryRegionOps notdirty_mem_ops = {
1458 .read = error_mem_read,
1459 .write = notdirty_mem_write,
1460 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1461};
1462
0f459d16 1463/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1464static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1465{
9349b4f9 1466 CPUArchState *env = cpu_single_env;
06d55cc1 1467 target_ulong pc, cs_base;
0f459d16 1468 target_ulong vaddr;
a1d1bb31 1469 CPUWatchpoint *wp;
06d55cc1 1470 int cpu_flags;
0f459d16 1471
06d55cc1
AL
1472 if (env->watchpoint_hit) {
1473 /* We re-entered the check after replacing the TB. Now raise
1474 * the debug interrupt so that is will trigger after the
1475 * current instruction. */
c3affe56 1476 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1477 return;
1478 }
2e70f6ef 1479 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1480 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1481 if ((vaddr == (wp->vaddr & len_mask) ||
1482 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1483 wp->flags |= BP_WATCHPOINT_HIT;
1484 if (!env->watchpoint_hit) {
1485 env->watchpoint_hit = wp;
5a316526 1486 tb_check_watchpoint(env);
6e140f28
AL
1487 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1488 env->exception_index = EXCP_DEBUG;
488d6577 1489 cpu_loop_exit(env);
6e140f28
AL
1490 } else {
1491 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1492 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1493 cpu_resume_from_signal(env, NULL);
6e140f28 1494 }
06d55cc1 1495 }
6e140f28
AL
1496 } else {
1497 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1498 }
1499 }
1500}
1501
6658ffb8
PB
1502/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1503 so these check for a hit then pass through to the normal out-of-line
1504 phys routines. */
a8170e5e 1505static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1506 unsigned size)
6658ffb8 1507{
1ec9b909
AK
1508 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1509 switch (size) {
1510 case 1: return ldub_phys(addr);
1511 case 2: return lduw_phys(addr);
1512 case 4: return ldl_phys(addr);
1513 default: abort();
1514 }
6658ffb8
PB
1515}
1516
a8170e5e 1517static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1518 uint64_t val, unsigned size)
6658ffb8 1519{
1ec9b909
AK
1520 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1521 switch (size) {
67364150
MF
1522 case 1:
1523 stb_phys(addr, val);
1524 break;
1525 case 2:
1526 stw_phys(addr, val);
1527 break;
1528 case 4:
1529 stl_phys(addr, val);
1530 break;
1ec9b909
AK
1531 default: abort();
1532 }
6658ffb8
PB
1533}
1534
1ec9b909
AK
1535static const MemoryRegionOps watch_mem_ops = {
1536 .read = watch_mem_read,
1537 .write = watch_mem_write,
1538 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1539};
6658ffb8 1540
a8170e5e 1541static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1542 unsigned len)
db7b5426 1543{
70c68e44 1544 subpage_t *mmio = opaque;
f6405247 1545 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1546 MemoryRegionSection *section;
db7b5426
BS
1547#if defined(DEBUG_SUBPAGE)
1548 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1549 mmio, len, addr, idx);
1550#endif
db7b5426 1551
5312bd8b
AK
1552 section = &phys_sections[mmio->sub_section[idx]];
1553 addr += mmio->base;
1554 addr -= section->offset_within_address_space;
1555 addr += section->offset_within_region;
37ec01d4 1556 return io_mem_read(section->mr, addr, len);
db7b5426
BS
1557}
1558
a8170e5e 1559static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1560 uint64_t value, unsigned len)
db7b5426 1561{
70c68e44 1562 subpage_t *mmio = opaque;
f6405247 1563 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1564 MemoryRegionSection *section;
db7b5426 1565#if defined(DEBUG_SUBPAGE)
70c68e44
AK
1566 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1567 " idx %d value %"PRIx64"\n",
f6405247 1568 __func__, mmio, len, addr, idx, value);
db7b5426 1569#endif
f6405247 1570
5312bd8b
AK
1571 section = &phys_sections[mmio->sub_section[idx]];
1572 addr += mmio->base;
1573 addr -= section->offset_within_address_space;
1574 addr += section->offset_within_region;
37ec01d4 1575 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
1576}
1577
70c68e44
AK
1578static const MemoryRegionOps subpage_ops = {
1579 .read = subpage_read,
1580 .write = subpage_write,
1581 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1582};
1583
a8170e5e 1584static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
de712f94 1585 unsigned size)
56384e8b
AF
1586{
1587 ram_addr_t raddr = addr;
1588 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1589 switch (size) {
1590 case 1: return ldub_p(ptr);
1591 case 2: return lduw_p(ptr);
1592 case 4: return ldl_p(ptr);
1593 default: abort();
1594 }
56384e8b
AF
1595}
1596
a8170e5e 1597static void subpage_ram_write(void *opaque, hwaddr addr,
de712f94 1598 uint64_t value, unsigned size)
56384e8b
AF
1599{
1600 ram_addr_t raddr = addr;
1601 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1602 switch (size) {
1603 case 1: return stb_p(ptr, value);
1604 case 2: return stw_p(ptr, value);
1605 case 4: return stl_p(ptr, value);
1606 default: abort();
1607 }
56384e8b
AF
1608}
1609
de712f94
AK
1610static const MemoryRegionOps subpage_ram_ops = {
1611 .read = subpage_ram_read,
1612 .write = subpage_ram_write,
1613 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
1614};
1615
c227f099 1616static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1617 uint16_t section)
db7b5426
BS
1618{
1619 int idx, eidx;
1620
1621 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1622 return -1;
1623 idx = SUBPAGE_IDX(start);
1624 eidx = SUBPAGE_IDX(end);
1625#if defined(DEBUG_SUBPAGE)
0bf9e31a 1626 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1627 mmio, start, end, idx, eidx, memory);
1628#endif
5312bd8b
AK
1629 if (memory_region_is_ram(phys_sections[section].mr)) {
1630 MemoryRegionSection new_section = phys_sections[section];
1631 new_section.mr = &io_mem_subpage_ram;
1632 section = phys_section_add(&new_section);
56384e8b 1633 }
db7b5426 1634 for (; idx <= eidx; idx++) {
5312bd8b 1635 mmio->sub_section[idx] = section;
db7b5426
BS
1636 }
1637
1638 return 0;
1639}
1640
a8170e5e 1641static subpage_t *subpage_init(hwaddr base)
db7b5426 1642{
c227f099 1643 subpage_t *mmio;
db7b5426 1644
7267c094 1645 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
1646
1647 mmio->base = base;
70c68e44
AK
1648 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1649 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1650 mmio->iomem.subpage = true;
db7b5426 1651#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1652 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1653 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1654#endif
0f0cb164 1655 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
1656
1657 return mmio;
1658}
1659
5312bd8b
AK
1660static uint16_t dummy_section(MemoryRegion *mr)
1661{
1662 MemoryRegionSection section = {
1663 .mr = mr,
1664 .offset_within_address_space = 0,
1665 .offset_within_region = 0,
1666 .size = UINT64_MAX,
1667 };
1668
1669 return phys_section_add(&section);
1670}
1671
a8170e5e 1672MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1673{
37ec01d4 1674 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1675}
1676
e9179ce1
AK
1677static void io_mem_init(void)
1678{
0e0df1e2
AK
1679 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1680 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1681 "unassigned", UINT64_MAX);
1682 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1683 "notdirty", UINT64_MAX);
de712f94
AK
1684 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1685 "subpage-ram", UINT64_MAX);
1ec9b909
AK
1686 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1687 "watch", UINT64_MAX);
e9179ce1
AK
1688}
1689
ac1970fb
AK
1690static void mem_begin(MemoryListener *listener)
1691{
1692 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1693
1694 destroy_all_mappings(d);
1695 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1696}
1697
50c1e149
AK
1698static void core_begin(MemoryListener *listener)
1699{
5312bd8b
AK
1700 phys_sections_clear();
1701 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
1702 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1703 phys_section_rom = dummy_section(&io_mem_rom);
1704 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
1705}
1706
1d71148e 1707static void tcg_commit(MemoryListener *listener)
50c1e149 1708{
9349b4f9 1709 CPUArchState *env;
117712c3
AK
1710
1711 /* since each CPU stores ram addresses in its TLB cache, we must
1712 reset the modified entries */
1713 /* XXX: slow ! */
1714 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1715 tlb_flush(env, 1);
1716 }
50c1e149
AK
1717}
1718
93632747
AK
1719static void core_log_global_start(MemoryListener *listener)
1720{
1721 cpu_physical_memory_set_dirty_tracking(1);
1722}
1723
1724static void core_log_global_stop(MemoryListener *listener)
1725{
1726 cpu_physical_memory_set_dirty_tracking(0);
1727}
1728
4855d41a
AK
1729static void io_region_add(MemoryListener *listener,
1730 MemoryRegionSection *section)
1731{
a2d33521
AK
1732 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1733
1734 mrio->mr = section->mr;
1735 mrio->offset = section->offset_within_region;
1736 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 1737 section->offset_within_address_space, section->size);
a2d33521 1738 ioport_register(&mrio->iorange);
4855d41a
AK
1739}
1740
1741static void io_region_del(MemoryListener *listener,
1742 MemoryRegionSection *section)
1743{
1744 isa_unassign_ioport(section->offset_within_address_space, section->size);
1745}
1746
93632747 1747static MemoryListener core_memory_listener = {
50c1e149 1748 .begin = core_begin,
93632747
AK
1749 .log_global_start = core_log_global_start,
1750 .log_global_stop = core_log_global_stop,
ac1970fb 1751 .priority = 1,
93632747
AK
1752};
1753
4855d41a
AK
1754static MemoryListener io_memory_listener = {
1755 .region_add = io_region_add,
1756 .region_del = io_region_del,
4855d41a
AK
1757 .priority = 0,
1758};
1759
1d71148e
AK
1760static MemoryListener tcg_memory_listener = {
1761 .commit = tcg_commit,
1762};
1763
ac1970fb
AK
1764void address_space_init_dispatch(AddressSpace *as)
1765{
1766 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1767
1768 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1769 d->listener = (MemoryListener) {
1770 .begin = mem_begin,
1771 .region_add = mem_add,
1772 .region_nop = mem_add,
1773 .priority = 0,
1774 };
1775 as->dispatch = d;
1776 memory_listener_register(&d->listener, as);
1777}
1778
83f3c251
AK
1779void address_space_destroy_dispatch(AddressSpace *as)
1780{
1781 AddressSpaceDispatch *d = as->dispatch;
1782
1783 memory_listener_unregister(&d->listener);
1784 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1785 g_free(d);
1786 as->dispatch = NULL;
1787}
1788
62152b8a
AK
1789static void memory_map_init(void)
1790{
7267c094 1791 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 1792 memory_region_init(system_memory, "system", INT64_MAX);
2673a5da
AK
1793 address_space_init(&address_space_memory, system_memory);
1794 address_space_memory.name = "memory";
309cb471 1795
7267c094 1796 system_io = g_malloc(sizeof(*system_io));
309cb471 1797 memory_region_init(system_io, "io", 65536);
2673a5da
AK
1798 address_space_init(&address_space_io, system_io);
1799 address_space_io.name = "I/O";
93632747 1800
f6790af6
AK
1801 memory_listener_register(&core_memory_listener, &address_space_memory);
1802 memory_listener_register(&io_memory_listener, &address_space_io);
1803 memory_listener_register(&tcg_memory_listener, &address_space_memory);
9e11908f
PM
1804
1805 dma_context_init(&dma_context_memory, &address_space_memory,
1806 NULL, NULL, NULL);
62152b8a
AK
1807}
1808
1809MemoryRegion *get_system_memory(void)
1810{
1811 return system_memory;
1812}
1813
309cb471
AK
1814MemoryRegion *get_system_io(void)
1815{
1816 return system_io;
1817}
1818
e2eef170
PB
1819#endif /* !defined(CONFIG_USER_ONLY) */
1820
13eb76e0
FB
1821/* physical memory access (slow version, mainly for debug) */
1822#if defined(CONFIG_USER_ONLY)
9349b4f9 1823int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1824 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1825{
1826 int l, flags;
1827 target_ulong page;
53a5960a 1828 void * p;
13eb76e0
FB
1829
1830 while (len > 0) {
1831 page = addr & TARGET_PAGE_MASK;
1832 l = (page + TARGET_PAGE_SIZE) - addr;
1833 if (l > len)
1834 l = len;
1835 flags = page_get_flags(page);
1836 if (!(flags & PAGE_VALID))
a68fe89c 1837 return -1;
13eb76e0
FB
1838 if (is_write) {
1839 if (!(flags & PAGE_WRITE))
a68fe89c 1840 return -1;
579a97f7 1841 /* XXX: this code should not depend on lock_user */
72fb7daa 1842 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1843 return -1;
72fb7daa
AJ
1844 memcpy(p, buf, l);
1845 unlock_user(p, addr, l);
13eb76e0
FB
1846 } else {
1847 if (!(flags & PAGE_READ))
a68fe89c 1848 return -1;
579a97f7 1849 /* XXX: this code should not depend on lock_user */
72fb7daa 1850 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1851 return -1;
72fb7daa 1852 memcpy(buf, p, l);
5b257578 1853 unlock_user(p, addr, 0);
13eb76e0
FB
1854 }
1855 len -= l;
1856 buf += l;
1857 addr += l;
1858 }
a68fe89c 1859 return 0;
13eb76e0 1860}
8df1cd07 1861
13eb76e0 1862#else
51d7a9eb 1863
a8170e5e
AK
1864static void invalidate_and_set_dirty(hwaddr addr,
1865 hwaddr length)
51d7a9eb
AP
1866{
1867 if (!cpu_physical_memory_is_dirty(addr)) {
1868 /* invalidate code */
1869 tb_invalidate_phys_page_range(addr, addr + length, 0);
1870 /* set dirty bit */
1871 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1872 }
e226939d 1873 xen_modified_memory(addr, length);
51d7a9eb
AP
1874}
1875
a8170e5e 1876void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1877 int len, bool is_write)
13eb76e0 1878{
ac1970fb 1879 AddressSpaceDispatch *d = as->dispatch;
37ec01d4 1880 int l;
13eb76e0
FB
1881 uint8_t *ptr;
1882 uint32_t val;
a8170e5e 1883 hwaddr page;
f3705d53 1884 MemoryRegionSection *section;
3b46e624 1885
13eb76e0
FB
1886 while (len > 0) {
1887 page = addr & TARGET_PAGE_MASK;
1888 l = (page + TARGET_PAGE_SIZE) - addr;
1889 if (l > len)
1890 l = len;
ac1970fb 1891 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1892
13eb76e0 1893 if (is_write) {
f3705d53 1894 if (!memory_region_is_ram(section->mr)) {
a8170e5e 1895 hwaddr addr1;
cc5bea60 1896 addr1 = memory_region_section_addr(section, addr);
6a00d601
FB
1897 /* XXX: could force cpu_single_env to NULL to avoid
1898 potential bugs */
6c2934db 1899 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 1900 /* 32 bit write access */
c27004ec 1901 val = ldl_p(buf);
37ec01d4 1902 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 1903 l = 4;
6c2934db 1904 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 1905 /* 16 bit write access */
c27004ec 1906 val = lduw_p(buf);
37ec01d4 1907 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
1908 l = 2;
1909 } else {
1c213d19 1910 /* 8 bit write access */
c27004ec 1911 val = ldub_p(buf);
37ec01d4 1912 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
1913 l = 1;
1914 }
f3705d53 1915 } else if (!section->readonly) {
8ca5692d 1916 ram_addr_t addr1;
f3705d53 1917 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 1918 + memory_region_section_addr(section, addr);
13eb76e0 1919 /* RAM case */
5579c7f3 1920 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1921 memcpy(ptr, buf, l);
51d7a9eb 1922 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
1923 }
1924 } else {
cc5bea60
BS
1925 if (!(memory_region_is_ram(section->mr) ||
1926 memory_region_is_romd(section->mr))) {
a8170e5e 1927 hwaddr addr1;
13eb76e0 1928 /* I/O case */
cc5bea60 1929 addr1 = memory_region_section_addr(section, addr);
6c2934db 1930 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 1931 /* 32 bit read access */
37ec01d4 1932 val = io_mem_read(section->mr, addr1, 4);
c27004ec 1933 stl_p(buf, val);
13eb76e0 1934 l = 4;
6c2934db 1935 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 1936 /* 16 bit read access */
37ec01d4 1937 val = io_mem_read(section->mr, addr1, 2);
c27004ec 1938 stw_p(buf, val);
13eb76e0
FB
1939 l = 2;
1940 } else {
1c213d19 1941 /* 8 bit read access */
37ec01d4 1942 val = io_mem_read(section->mr, addr1, 1);
c27004ec 1943 stb_p(buf, val);
13eb76e0
FB
1944 l = 1;
1945 }
1946 } else {
1947 /* RAM case */
0a1b357f 1948 ptr = qemu_get_ram_ptr(section->mr->ram_addr
cc5bea60
BS
1949 + memory_region_section_addr(section,
1950 addr));
f3705d53 1951 memcpy(buf, ptr, l);
13eb76e0
FB
1952 }
1953 }
1954 len -= l;
1955 buf += l;
1956 addr += l;
1957 }
1958}
8df1cd07 1959
a8170e5e 1960void address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1961 const uint8_t *buf, int len)
1962{
1963 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1964}
1965
1966/**
1967 * address_space_read: read from an address space.
1968 *
1969 * @as: #AddressSpace to be accessed
1970 * @addr: address within that address space
1971 * @buf: buffer with the data transferred
1972 */
a8170e5e 1973void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb
AK
1974{
1975 address_space_rw(as, addr, buf, len, false);
1976}
1977
1978
a8170e5e 1979void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1980 int len, int is_write)
1981{
1982 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1983}
1984
d0ecd2aa 1985/* used for ROM loading : can write in RAM and ROM */
a8170e5e 1986void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
1987 const uint8_t *buf, int len)
1988{
ac1970fb 1989 AddressSpaceDispatch *d = address_space_memory.dispatch;
d0ecd2aa
FB
1990 int l;
1991 uint8_t *ptr;
a8170e5e 1992 hwaddr page;
f3705d53 1993 MemoryRegionSection *section;
3b46e624 1994
d0ecd2aa
FB
1995 while (len > 0) {
1996 page = addr & TARGET_PAGE_MASK;
1997 l = (page + TARGET_PAGE_SIZE) - addr;
1998 if (l > len)
1999 l = len;
ac1970fb 2000 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 2001
cc5bea60
BS
2002 if (!(memory_region_is_ram(section->mr) ||
2003 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
2004 /* do nothing */
2005 } else {
2006 unsigned long addr1;
f3705d53 2007 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 2008 + memory_region_section_addr(section, addr);
d0ecd2aa 2009 /* ROM/RAM case */
5579c7f3 2010 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2011 memcpy(ptr, buf, l);
51d7a9eb 2012 invalidate_and_set_dirty(addr1, l);
d0ecd2aa
FB
2013 }
2014 len -= l;
2015 buf += l;
2016 addr += l;
2017 }
2018}
2019
6d16c2f8
AL
2020typedef struct {
2021 void *buffer;
a8170e5e
AK
2022 hwaddr addr;
2023 hwaddr len;
6d16c2f8
AL
2024} BounceBuffer;
2025
2026static BounceBuffer bounce;
2027
ba223c29
AL
2028typedef struct MapClient {
2029 void *opaque;
2030 void (*callback)(void *opaque);
72cf2d4f 2031 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2032} MapClient;
2033
72cf2d4f
BS
2034static QLIST_HEAD(map_client_list, MapClient) map_client_list
2035 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2036
2037void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2038{
7267c094 2039 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2040
2041 client->opaque = opaque;
2042 client->callback = callback;
72cf2d4f 2043 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2044 return client;
2045}
2046
8b9c99d9 2047static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2048{
2049 MapClient *client = (MapClient *)_client;
2050
72cf2d4f 2051 QLIST_REMOVE(client, link);
7267c094 2052 g_free(client);
ba223c29
AL
2053}
2054
2055static void cpu_notify_map_clients(void)
2056{
2057 MapClient *client;
2058
72cf2d4f
BS
2059 while (!QLIST_EMPTY(&map_client_list)) {
2060 client = QLIST_FIRST(&map_client_list);
ba223c29 2061 client->callback(client->opaque);
34d5e948 2062 cpu_unregister_map_client(client);
ba223c29
AL
2063 }
2064}
2065
6d16c2f8
AL
2066/* Map a physical memory region into a host virtual address.
2067 * May map a subset of the requested range, given by and returned in *plen.
2068 * May return NULL if resources needed to perform the mapping are exhausted.
2069 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2070 * Use cpu_register_map_client() to know when retrying the map operation is
2071 * likely to succeed.
6d16c2f8 2072 */
ac1970fb 2073void *address_space_map(AddressSpace *as,
a8170e5e
AK
2074 hwaddr addr,
2075 hwaddr *plen,
ac1970fb 2076 bool is_write)
6d16c2f8 2077{
ac1970fb 2078 AddressSpaceDispatch *d = as->dispatch;
a8170e5e
AK
2079 hwaddr len = *plen;
2080 hwaddr todo = 0;
6d16c2f8 2081 int l;
a8170e5e 2082 hwaddr page;
f3705d53 2083 MemoryRegionSection *section;
f15fbc4b 2084 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
2085 ram_addr_t rlen;
2086 void *ret;
6d16c2f8
AL
2087
2088 while (len > 0) {
2089 page = addr & TARGET_PAGE_MASK;
2090 l = (page + TARGET_PAGE_SIZE) - addr;
2091 if (l > len)
2092 l = len;
ac1970fb 2093 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
6d16c2f8 2094
f3705d53 2095 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 2096 if (todo || bounce.buffer) {
6d16c2f8
AL
2097 break;
2098 }
2099 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2100 bounce.addr = addr;
2101 bounce.len = l;
2102 if (!is_write) {
ac1970fb 2103 address_space_read(as, addr, bounce.buffer, l);
6d16c2f8 2104 }
38bee5dc
SS
2105
2106 *plen = l;
2107 return bounce.buffer;
6d16c2f8 2108 }
8ab934f9 2109 if (!todo) {
f3705d53 2110 raddr = memory_region_get_ram_addr(section->mr)
cc5bea60 2111 + memory_region_section_addr(section, addr);
8ab934f9 2112 }
6d16c2f8
AL
2113
2114 len -= l;
2115 addr += l;
38bee5dc 2116 todo += l;
6d16c2f8 2117 }
8ab934f9
SS
2118 rlen = todo;
2119 ret = qemu_ram_ptr_length(raddr, &rlen);
2120 *plen = rlen;
2121 return ret;
6d16c2f8
AL
2122}
2123
ac1970fb 2124/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2125 * Will also mark the memory as dirty if is_write == 1. access_len gives
2126 * the amount of memory that was actually read or written by the caller.
2127 */
a8170e5e
AK
2128void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2129 int is_write, hwaddr access_len)
6d16c2f8
AL
2130{
2131 if (buffer != bounce.buffer) {
2132 if (is_write) {
e890261f 2133 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
2134 while (access_len) {
2135 unsigned l;
2136 l = TARGET_PAGE_SIZE;
2137 if (l > access_len)
2138 l = access_len;
51d7a9eb 2139 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2140 addr1 += l;
2141 access_len -= l;
2142 }
2143 }
868bb33f 2144 if (xen_enabled()) {
e41d7c69 2145 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2146 }
6d16c2f8
AL
2147 return;
2148 }
2149 if (is_write) {
ac1970fb 2150 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2151 }
f8a83245 2152 qemu_vfree(bounce.buffer);
6d16c2f8 2153 bounce.buffer = NULL;
ba223c29 2154 cpu_notify_map_clients();
6d16c2f8 2155}
d0ecd2aa 2156
a8170e5e
AK
2157void *cpu_physical_memory_map(hwaddr addr,
2158 hwaddr *plen,
ac1970fb
AK
2159 int is_write)
2160{
2161 return address_space_map(&address_space_memory, addr, plen, is_write);
2162}
2163
a8170e5e
AK
2164void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2165 int is_write, hwaddr access_len)
ac1970fb
AK
2166{
2167 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2168}
2169
8df1cd07 2170/* warning: addr must be aligned */
a8170e5e 2171static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2172 enum device_endian endian)
8df1cd07 2173{
8df1cd07
FB
2174 uint8_t *ptr;
2175 uint32_t val;
f3705d53 2176 MemoryRegionSection *section;
8df1cd07 2177
ac1970fb 2178 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2179
cc5bea60
BS
2180 if (!(memory_region_is_ram(section->mr) ||
2181 memory_region_is_romd(section->mr))) {
8df1cd07 2182 /* I/O case */
cc5bea60 2183 addr = memory_region_section_addr(section, addr);
37ec01d4 2184 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
2185#if defined(TARGET_WORDS_BIGENDIAN)
2186 if (endian == DEVICE_LITTLE_ENDIAN) {
2187 val = bswap32(val);
2188 }
2189#else
2190 if (endian == DEVICE_BIG_ENDIAN) {
2191 val = bswap32(val);
2192 }
2193#endif
8df1cd07
FB
2194 } else {
2195 /* RAM case */
f3705d53 2196 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2197 & TARGET_PAGE_MASK)
cc5bea60 2198 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2199 switch (endian) {
2200 case DEVICE_LITTLE_ENDIAN:
2201 val = ldl_le_p(ptr);
2202 break;
2203 case DEVICE_BIG_ENDIAN:
2204 val = ldl_be_p(ptr);
2205 break;
2206 default:
2207 val = ldl_p(ptr);
2208 break;
2209 }
8df1cd07
FB
2210 }
2211 return val;
2212}
2213
a8170e5e 2214uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2215{
2216 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2217}
2218
a8170e5e 2219uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2220{
2221 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2222}
2223
a8170e5e 2224uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2225{
2226 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2227}
2228
84b7b8e7 2229/* warning: addr must be aligned */
a8170e5e 2230static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2231 enum device_endian endian)
84b7b8e7 2232{
84b7b8e7
FB
2233 uint8_t *ptr;
2234 uint64_t val;
f3705d53 2235 MemoryRegionSection *section;
84b7b8e7 2236
ac1970fb 2237 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2238
cc5bea60
BS
2239 if (!(memory_region_is_ram(section->mr) ||
2240 memory_region_is_romd(section->mr))) {
84b7b8e7 2241 /* I/O case */
cc5bea60 2242 addr = memory_region_section_addr(section, addr);
1e78bcc1
AG
2243
2244 /* XXX This is broken when device endian != cpu endian.
2245 Fix and add "endian" variable check */
84b7b8e7 2246#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2247 val = io_mem_read(section->mr, addr, 4) << 32;
2248 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 2249#else
37ec01d4
AK
2250 val = io_mem_read(section->mr, addr, 4);
2251 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
2252#endif
2253 } else {
2254 /* RAM case */
f3705d53 2255 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2256 & TARGET_PAGE_MASK)
cc5bea60 2257 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2258 switch (endian) {
2259 case DEVICE_LITTLE_ENDIAN:
2260 val = ldq_le_p(ptr);
2261 break;
2262 case DEVICE_BIG_ENDIAN:
2263 val = ldq_be_p(ptr);
2264 break;
2265 default:
2266 val = ldq_p(ptr);
2267 break;
2268 }
84b7b8e7
FB
2269 }
2270 return val;
2271}
2272
a8170e5e 2273uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2274{
2275 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2276}
2277
a8170e5e 2278uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2279{
2280 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2281}
2282
a8170e5e 2283uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2284{
2285 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2286}
2287
aab33094 2288/* XXX: optimize */
a8170e5e 2289uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2290{
2291 uint8_t val;
2292 cpu_physical_memory_read(addr, &val, 1);
2293 return val;
2294}
2295
733f0b02 2296/* warning: addr must be aligned */
a8170e5e 2297static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2298 enum device_endian endian)
aab33094 2299{
733f0b02
MT
2300 uint8_t *ptr;
2301 uint64_t val;
f3705d53 2302 MemoryRegionSection *section;
733f0b02 2303
ac1970fb 2304 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2305
cc5bea60
BS
2306 if (!(memory_region_is_ram(section->mr) ||
2307 memory_region_is_romd(section->mr))) {
733f0b02 2308 /* I/O case */
cc5bea60 2309 addr = memory_region_section_addr(section, addr);
37ec01d4 2310 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
2311#if defined(TARGET_WORDS_BIGENDIAN)
2312 if (endian == DEVICE_LITTLE_ENDIAN) {
2313 val = bswap16(val);
2314 }
2315#else
2316 if (endian == DEVICE_BIG_ENDIAN) {
2317 val = bswap16(val);
2318 }
2319#endif
733f0b02
MT
2320 } else {
2321 /* RAM case */
f3705d53 2322 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2323 & TARGET_PAGE_MASK)
cc5bea60 2324 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2325 switch (endian) {
2326 case DEVICE_LITTLE_ENDIAN:
2327 val = lduw_le_p(ptr);
2328 break;
2329 case DEVICE_BIG_ENDIAN:
2330 val = lduw_be_p(ptr);
2331 break;
2332 default:
2333 val = lduw_p(ptr);
2334 break;
2335 }
733f0b02
MT
2336 }
2337 return val;
aab33094
FB
2338}
2339
a8170e5e 2340uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2341{
2342 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2343}
2344
a8170e5e 2345uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2346{
2347 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2348}
2349
a8170e5e 2350uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2351{
2352 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2353}
2354
8df1cd07
FB
2355/* warning: addr must be aligned. The ram page is not masked as dirty
2356 and the code inside is not invalidated. It is useful if the dirty
2357 bits are used to track modified PTEs */
a8170e5e 2358void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2359{
8df1cd07 2360 uint8_t *ptr;
f3705d53 2361 MemoryRegionSection *section;
8df1cd07 2362
ac1970fb 2363 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2364
f3705d53 2365 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2366 addr = memory_region_section_addr(section, addr);
f3705d53 2367 if (memory_region_is_ram(section->mr)) {
37ec01d4 2368 section = &phys_sections[phys_section_rom];
06ef3525 2369 }
37ec01d4 2370 io_mem_write(section->mr, addr, val, 4);
8df1cd07 2371 } else {
f3705d53 2372 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 2373 & TARGET_PAGE_MASK)
cc5bea60 2374 + memory_region_section_addr(section, addr);
5579c7f3 2375 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2376 stl_p(ptr, val);
74576198
AL
2377
2378 if (unlikely(in_migration)) {
2379 if (!cpu_physical_memory_is_dirty(addr1)) {
2380 /* invalidate code */
2381 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2382 /* set dirty bit */
f7c11b53
YT
2383 cpu_physical_memory_set_dirty_flags(
2384 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2385 }
2386 }
8df1cd07
FB
2387 }
2388}
2389
2390/* warning: addr must be aligned */
a8170e5e 2391static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2392 enum device_endian endian)
8df1cd07 2393{
8df1cd07 2394 uint8_t *ptr;
f3705d53 2395 MemoryRegionSection *section;
8df1cd07 2396
ac1970fb 2397 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2398
f3705d53 2399 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2400 addr = memory_region_section_addr(section, addr);
f3705d53 2401 if (memory_region_is_ram(section->mr)) {
37ec01d4 2402 section = &phys_sections[phys_section_rom];
06ef3525 2403 }
1e78bcc1
AG
2404#if defined(TARGET_WORDS_BIGENDIAN)
2405 if (endian == DEVICE_LITTLE_ENDIAN) {
2406 val = bswap32(val);
2407 }
2408#else
2409 if (endian == DEVICE_BIG_ENDIAN) {
2410 val = bswap32(val);
2411 }
2412#endif
37ec01d4 2413 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
2414 } else {
2415 unsigned long addr1;
f3705d53 2416 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2417 + memory_region_section_addr(section, addr);
8df1cd07 2418 /* RAM case */
5579c7f3 2419 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2420 switch (endian) {
2421 case DEVICE_LITTLE_ENDIAN:
2422 stl_le_p(ptr, val);
2423 break;
2424 case DEVICE_BIG_ENDIAN:
2425 stl_be_p(ptr, val);
2426 break;
2427 default:
2428 stl_p(ptr, val);
2429 break;
2430 }
51d7a9eb 2431 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2432 }
2433}
2434
a8170e5e 2435void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2436{
2437 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2438}
2439
a8170e5e 2440void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2441{
2442 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2443}
2444
a8170e5e 2445void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2446{
2447 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2448}
2449
aab33094 2450/* XXX: optimize */
a8170e5e 2451void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2452{
2453 uint8_t v = val;
2454 cpu_physical_memory_write(addr, &v, 1);
2455}
2456
733f0b02 2457/* warning: addr must be aligned */
a8170e5e 2458static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2459 enum device_endian endian)
aab33094 2460{
733f0b02 2461 uint8_t *ptr;
f3705d53 2462 MemoryRegionSection *section;
733f0b02 2463
ac1970fb 2464 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2465
f3705d53 2466 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2467 addr = memory_region_section_addr(section, addr);
f3705d53 2468 if (memory_region_is_ram(section->mr)) {
37ec01d4 2469 section = &phys_sections[phys_section_rom];
06ef3525 2470 }
1e78bcc1
AG
2471#if defined(TARGET_WORDS_BIGENDIAN)
2472 if (endian == DEVICE_LITTLE_ENDIAN) {
2473 val = bswap16(val);
2474 }
2475#else
2476 if (endian == DEVICE_BIG_ENDIAN) {
2477 val = bswap16(val);
2478 }
2479#endif
37ec01d4 2480 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
2481 } else {
2482 unsigned long addr1;
f3705d53 2483 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2484 + memory_region_section_addr(section, addr);
733f0b02
MT
2485 /* RAM case */
2486 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2487 switch (endian) {
2488 case DEVICE_LITTLE_ENDIAN:
2489 stw_le_p(ptr, val);
2490 break;
2491 case DEVICE_BIG_ENDIAN:
2492 stw_be_p(ptr, val);
2493 break;
2494 default:
2495 stw_p(ptr, val);
2496 break;
2497 }
51d7a9eb 2498 invalidate_and_set_dirty(addr1, 2);
733f0b02 2499 }
aab33094
FB
2500}
2501
a8170e5e 2502void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2503{
2504 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2505}
2506
a8170e5e 2507void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2508{
2509 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2510}
2511
a8170e5e 2512void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2513{
2514 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2515}
2516
aab33094 2517/* XXX: optimize */
a8170e5e 2518void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2519{
2520 val = tswap64(val);
71d2b725 2521 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2522}
2523
a8170e5e 2524void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2525{
2526 val = cpu_to_le64(val);
2527 cpu_physical_memory_write(addr, &val, 8);
2528}
2529
a8170e5e 2530void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2531{
2532 val = cpu_to_be64(val);
2533 cpu_physical_memory_write(addr, &val, 8);
2534}
2535
5e2972fd 2536/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2537int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2538 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2539{
2540 int l;
a8170e5e 2541 hwaddr phys_addr;
9b3c35e0 2542 target_ulong page;
13eb76e0
FB
2543
2544 while (len > 0) {
2545 page = addr & TARGET_PAGE_MASK;
2546 phys_addr = cpu_get_phys_page_debug(env, page);
2547 /* if no physical page mapped, return an error */
2548 if (phys_addr == -1)
2549 return -1;
2550 l = (page + TARGET_PAGE_SIZE) - addr;
2551 if (l > len)
2552 l = len;
5e2972fd 2553 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2554 if (is_write)
2555 cpu_physical_memory_write_rom(phys_addr, buf, l);
2556 else
5e2972fd 2557 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2558 len -= l;
2559 buf += l;
2560 addr += l;
2561 }
2562 return 0;
2563}
a68fe89c 2564#endif
13eb76e0 2565
8e4a424b
BS
2566#if !defined(CONFIG_USER_ONLY)
2567
2568/*
2569 * A helper function for the _utterly broken_ virtio device model to find out if
2570 * it's running on a big endian machine. Don't do this at home kids!
2571 */
2572bool virtio_is_big_endian(void);
2573bool virtio_is_big_endian(void)
2574{
2575#if defined(TARGET_WORDS_BIGENDIAN)
2576 return true;
2577#else
2578 return false;
2579#endif
2580}
2581
2582#endif
2583
76f35538 2584#ifndef CONFIG_USER_ONLY
a8170e5e 2585bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538
WC
2586{
2587 MemoryRegionSection *section;
2588
ac1970fb
AK
2589 section = phys_page_find(address_space_memory.dispatch,
2590 phys_addr >> TARGET_PAGE_BITS);
76f35538
WC
2591
2592 return !(memory_region_is_ram(section->mr) ||
2593 memory_region_is_romd(section->mr));
2594}
2595#endif