]> git.proxmox.com Git - qemu.git/blame - exec.c
like acpi_table_install(), acpi_table_add() should propagate Errors
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
432d268c 34#include "hw/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
67d3b957 53//#define DEBUG_UNASSIGNED
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
e2eef170 56#if !defined(CONFIG_USER_ONLY)
9fa3e853 57int phys_ram_fd;
74576198 58static int in_migration;
94a6b54f 59
a3161038 60RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
61
62static MemoryRegion *system_memory;
309cb471 63static MemoryRegion *system_io;
62152b8a 64
f6790af6
AK
65AddressSpace address_space_io;
66AddressSpace address_space_memory;
9e11908f 67DMAContext dma_context_memory;
2673a5da 68
0e0df1e2 69MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 70static MemoryRegion io_mem_subpage_ram;
0e0df1e2 71
e2eef170 72#endif
9fa3e853 73
9349b4f9 74CPUArchState *first_cpu;
6a00d601
FB
75/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
9349b4f9 77DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 78/* 0 = Do not count executed instructions.
bf20dc07 79 1 = Precise instruction counting.
2e70f6ef 80 2 = Adaptive rate instruction counting. */
5708fc66 81int use_icount;
6a00d601 82
e2eef170 83#if !defined(CONFIG_USER_ONLY)
4346ae3e 84
5312bd8b
AK
85static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
aa102231
AK
88static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
5312bd8b 91
d6f2ea22
AK
92/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
07f07b31 96#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 97
e2eef170 98static void io_mem_init(void);
62152b8a 99static void memory_map_init(void);
8b9c99d9 100static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 101
1ec9b909 102static MemoryRegion io_mem_watch;
6658ffb8 103#endif
fd6ce8f6 104
6d9a1304 105#if !defined(CONFIG_USER_ONLY)
d6f2ea22 106
f7bf5461 107static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 108{
f7bf5461 109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
d6f2ea22
AK
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
f7bf5461
AK
117}
118
119static uint16_t phys_map_node_alloc(void)
120{
121 unsigned i;
122 uint16_t ret;
123
124 ret = phys_map_nodes_nb++;
125 assert(ret != PHYS_MAP_NODE_NIL);
126 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 127 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 128 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 130 }
f7bf5461 131 return ret;
d6f2ea22
AK
132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
92e873b9 139
a8170e5e
AK
140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
2999097b 142 int level)
f7bf5461
AK
143{
144 PhysPageEntry *p;
145 int i;
a8170e5e 146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 147
07f07b31 148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
07f07b31 153 p[i].is_leaf = 1;
c19e8800 154 p[i].ptr = phys_section_unassigned;
4346ae3e 155 }
67c4d23c 156 }
f7bf5461 157 } else {
c19e8800 158 p = phys_map_nodes[lp->ptr];
92e873b9 159 }
2999097b 160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 161
2999097b 162 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
c19e8800 165 lp->ptr = leaf;
07f07b31
AK
166 *index += step;
167 *nb -= step;
2999097b
AK
168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
f7bf5461
AK
172 }
173}
174
ac1970fb 175static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 176 hwaddr index, hwaddr nb,
2999097b 177 uint16_t leaf)
f7bf5461 178{
2999097b 179 /* Wildly overreserve - it doesn't matter much. */
07f07b31 180 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 181
ac1970fb 182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
183}
184
a8170e5e 185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
92e873b9 186{
ac1970fb 187 PhysPageEntry lp = d->phys_map;
31ab2b4a
AK
188 PhysPageEntry *p;
189 int i;
31ab2b4a 190 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 191
07f07b31 192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
194 goto not_found;
195 }
c19e8800 196 p = phys_map_nodes[lp.ptr];
31ab2b4a 197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 198 }
31ab2b4a 199
c19e8800 200 s_index = lp.ptr;
31ab2b4a 201not_found:
f3705d53
AK
202 return &phys_sections[s_index];
203}
204
e5548617
BS
205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 209 && mr != &io_mem_watch;
fd6ce8f6 210}
5b6dd868 211#endif
fd6ce8f6 212
5b6dd868 213void cpu_exec_init_all(void)
fdbb84d1 214{
5b6dd868 215#if !defined(CONFIG_USER_ONLY)
b2a8658e 216 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
217 memory_map_init();
218 io_mem_init();
fdbb84d1 219#endif
5b6dd868 220}
fdbb84d1 221
b170fce3 222#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
223
224static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 225{
259186a7 226 CPUState *cpu = opaque;
a513fe19 227
5b6dd868
BS
228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
259186a7
AF
230 cpu->interrupt_request &= ~0x01;
231 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
232
233 return 0;
a513fe19 234}
7501267e 235
5b6dd868
BS
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
259186a7
AF
243 VMSTATE_UINT32(halted, CPUState),
244 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
245 VMSTATE_END_OF_LIST()
246 }
247};
b170fce3
AF
248#else
249#define vmstate_cpu_common vmstate_dummy
5b6dd868 250#endif
ea041c0e 251
38d8f5c8 252CPUState *qemu_get_cpu(int index)
ea041c0e 253{
5b6dd868 254 CPUArchState *env = first_cpu;
38d8f5c8 255 CPUState *cpu = NULL;
ea041c0e 256
5b6dd868 257 while (env) {
55e5c285
AF
258 cpu = ENV_GET_CPU(env);
259 if (cpu->cpu_index == index) {
5b6dd868 260 break;
55e5c285 261 }
5b6dd868 262 env = env->next_cpu;
ea041c0e 263 }
5b6dd868 264
d76fddae 265 return env ? cpu : NULL;
ea041c0e
FB
266}
267
5b6dd868 268void cpu_exec_init(CPUArchState *env)
ea041c0e 269{
5b6dd868 270 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 271 CPUClass *cc = CPU_GET_CLASS(cpu);
5b6dd868
BS
272 CPUArchState **penv;
273 int cpu_index;
274
275#if defined(CONFIG_USER_ONLY)
276 cpu_list_lock();
277#endif
278 env->next_cpu = NULL;
279 penv = &first_cpu;
280 cpu_index = 0;
281 while (*penv != NULL) {
282 penv = &(*penv)->next_cpu;
283 cpu_index++;
284 }
55e5c285 285 cpu->cpu_index = cpu_index;
1b1ed8dc 286 cpu->numa_node = 0;
5b6dd868
BS
287 QTAILQ_INIT(&env->breakpoints);
288 QTAILQ_INIT(&env->watchpoints);
289#ifndef CONFIG_USER_ONLY
290 cpu->thread_id = qemu_get_thread_id();
291#endif
292 *penv = env;
293#if defined(CONFIG_USER_ONLY)
294 cpu_list_unlock();
295#endif
259186a7 296 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
5b6dd868 297#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
298 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
299 cpu_save, cpu_load, env);
b170fce3 300 assert(cc->vmsd == NULL);
5b6dd868 301#endif
b170fce3
AF
302 if (cc->vmsd != NULL) {
303 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
304 }
ea041c0e
FB
305}
306
1fddef4b 307#if defined(TARGET_HAS_ICE)
94df27fd 308#if defined(CONFIG_USER_ONLY)
9349b4f9 309static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
310{
311 tb_invalidate_phys_page_range(pc, pc + 1, 0);
312}
313#else
1e7855a5
MF
314static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
315{
9d70c4b7
MF
316 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
317 (pc & ~TARGET_PAGE_MASK));
1e7855a5 318}
c27004ec 319#endif
94df27fd 320#endif /* TARGET_HAS_ICE */
d720b93d 321
c527ee8f 322#if defined(CONFIG_USER_ONLY)
9349b4f9 323void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
324
325{
326}
327
9349b4f9 328int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
329 int flags, CPUWatchpoint **watchpoint)
330{
331 return -ENOSYS;
332}
333#else
6658ffb8 334/* Add a watchpoint. */
9349b4f9 335int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 336 int flags, CPUWatchpoint **watchpoint)
6658ffb8 337{
b4051334 338 target_ulong len_mask = ~(len - 1);
c0ce998e 339 CPUWatchpoint *wp;
6658ffb8 340
b4051334 341 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
342 if ((len & (len - 1)) || (addr & ~len_mask) ||
343 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
344 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
345 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
346 return -EINVAL;
347 }
7267c094 348 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
349
350 wp->vaddr = addr;
b4051334 351 wp->len_mask = len_mask;
a1d1bb31
AL
352 wp->flags = flags;
353
2dc9f411 354 /* keep all GDB-injected watchpoints in front */
c0ce998e 355 if (flags & BP_GDB)
72cf2d4f 356 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 357 else
72cf2d4f 358 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 359
6658ffb8 360 tlb_flush_page(env, addr);
a1d1bb31
AL
361
362 if (watchpoint)
363 *watchpoint = wp;
364 return 0;
6658ffb8
PB
365}
366
a1d1bb31 367/* Remove a specific watchpoint. */
9349b4f9 368int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 369 int flags)
6658ffb8 370{
b4051334 371 target_ulong len_mask = ~(len - 1);
a1d1bb31 372 CPUWatchpoint *wp;
6658ffb8 373
72cf2d4f 374 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 375 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 376 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 377 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
378 return 0;
379 }
380 }
a1d1bb31 381 return -ENOENT;
6658ffb8
PB
382}
383
a1d1bb31 384/* Remove a specific watchpoint by reference. */
9349b4f9 385void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 386{
72cf2d4f 387 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 388
a1d1bb31
AL
389 tlb_flush_page(env, watchpoint->vaddr);
390
7267c094 391 g_free(watchpoint);
a1d1bb31
AL
392}
393
394/* Remove all matching watchpoints. */
9349b4f9 395void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 396{
c0ce998e 397 CPUWatchpoint *wp, *next;
a1d1bb31 398
72cf2d4f 399 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
400 if (wp->flags & mask)
401 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 402 }
7d03f82f 403}
c527ee8f 404#endif
7d03f82f 405
a1d1bb31 406/* Add a breakpoint. */
9349b4f9 407int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 408 CPUBreakpoint **breakpoint)
4c3a88a2 409{
1fddef4b 410#if defined(TARGET_HAS_ICE)
c0ce998e 411 CPUBreakpoint *bp;
3b46e624 412
7267c094 413 bp = g_malloc(sizeof(*bp));
4c3a88a2 414
a1d1bb31
AL
415 bp->pc = pc;
416 bp->flags = flags;
417
2dc9f411 418 /* keep all GDB-injected breakpoints in front */
c0ce998e 419 if (flags & BP_GDB)
72cf2d4f 420 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 421 else
72cf2d4f 422 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 423
d720b93d 424 breakpoint_invalidate(env, pc);
a1d1bb31
AL
425
426 if (breakpoint)
427 *breakpoint = bp;
4c3a88a2
FB
428 return 0;
429#else
a1d1bb31 430 return -ENOSYS;
4c3a88a2
FB
431#endif
432}
433
a1d1bb31 434/* Remove a specific breakpoint. */
9349b4f9 435int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 436{
7d03f82f 437#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
438 CPUBreakpoint *bp;
439
72cf2d4f 440 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
441 if (bp->pc == pc && bp->flags == flags) {
442 cpu_breakpoint_remove_by_ref(env, bp);
443 return 0;
444 }
7d03f82f 445 }
a1d1bb31
AL
446 return -ENOENT;
447#else
448 return -ENOSYS;
7d03f82f
EI
449#endif
450}
451
a1d1bb31 452/* Remove a specific breakpoint by reference. */
9349b4f9 453void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 454{
1fddef4b 455#if defined(TARGET_HAS_ICE)
72cf2d4f 456 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 457
a1d1bb31
AL
458 breakpoint_invalidate(env, breakpoint->pc);
459
7267c094 460 g_free(breakpoint);
a1d1bb31
AL
461#endif
462}
463
464/* Remove all matching breakpoints. */
9349b4f9 465void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
466{
467#if defined(TARGET_HAS_ICE)
c0ce998e 468 CPUBreakpoint *bp, *next;
a1d1bb31 469
72cf2d4f 470 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
471 if (bp->flags & mask)
472 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 473 }
4c3a88a2
FB
474#endif
475}
476
c33a346e
FB
477/* enable or disable single step mode. EXCP_DEBUG is returned by the
478 CPU loop after each instruction */
9349b4f9 479void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 480{
1fddef4b 481#if defined(TARGET_HAS_ICE)
c33a346e
FB
482 if (env->singlestep_enabled != enabled) {
483 env->singlestep_enabled = enabled;
e22a25c9
AL
484 if (kvm_enabled())
485 kvm_update_guest_debug(env, 0);
486 else {
ccbb4d44 487 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
488 /* XXX: only flush what is necessary */
489 tb_flush(env);
490 }
c33a346e
FB
491 }
492#endif
493}
494
9349b4f9 495void cpu_exit(CPUArchState *env)
3098dba0 496{
fcd7d003
AF
497 CPUState *cpu = ENV_GET_CPU(env);
498
499 cpu->exit_request = 1;
378df4b2 500 cpu->tcg_exit_req = 1;
3098dba0
AJ
501}
502
9349b4f9 503void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
504{
505 va_list ap;
493ae1f0 506 va_list ap2;
7501267e
FB
507
508 va_start(ap, fmt);
493ae1f0 509 va_copy(ap2, ap);
7501267e
FB
510 fprintf(stderr, "qemu: fatal: ");
511 vfprintf(stderr, fmt, ap);
512 fprintf(stderr, "\n");
6fd2a026 513 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
514 if (qemu_log_enabled()) {
515 qemu_log("qemu: fatal: ");
516 qemu_log_vprintf(fmt, ap2);
517 qemu_log("\n");
6fd2a026 518 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 519 qemu_log_flush();
93fcfe39 520 qemu_log_close();
924edcae 521 }
493ae1f0 522 va_end(ap2);
f9373291 523 va_end(ap);
fd052bf6
RV
524#if defined(CONFIG_USER_ONLY)
525 {
526 struct sigaction act;
527 sigfillset(&act.sa_mask);
528 act.sa_handler = SIG_DFL;
529 sigaction(SIGABRT, &act, NULL);
530 }
531#endif
7501267e
FB
532 abort();
533}
534
9349b4f9 535CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 536{
9349b4f9
AF
537 CPUArchState *new_env = cpu_init(env->cpu_model_str);
538 CPUArchState *next_cpu = new_env->next_cpu;
5a38f081
AL
539#if defined(TARGET_HAS_ICE)
540 CPUBreakpoint *bp;
541 CPUWatchpoint *wp;
542#endif
543
9349b4f9 544 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 545
55e5c285 546 /* Preserve chaining. */
c5be9f08 547 new_env->next_cpu = next_cpu;
5a38f081
AL
548
549 /* Clone all break/watchpoints.
550 Note: Once we support ptrace with hw-debug register access, make sure
551 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
552 QTAILQ_INIT(&env->breakpoints);
553 QTAILQ_INIT(&env->watchpoints);
5a38f081 554#if defined(TARGET_HAS_ICE)
72cf2d4f 555 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
556 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
557 }
72cf2d4f 558 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
559 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
560 wp->flags, NULL);
561 }
562#endif
563
c5be9f08
TS
564 return new_env;
565}
566
0124311e 567#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
568static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
569 uintptr_t length)
570{
571 uintptr_t start1;
572
573 /* we modify the TLB cache so that the dirty bit will be set again
574 when accessing the range */
575 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
576 /* Check that we don't span multiple blocks - this breaks the
577 address comparisons below. */
578 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
579 != (end - 1) - start) {
580 abort();
581 }
582 cpu_tlb_reset_dirty_all(start1, length);
583
584}
585
5579c7f3 586/* Note: start and end must be within the same ram block. */
c227f099 587void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 588 int dirty_flags)
1ccde1cb 589{
d24981d3 590 uintptr_t length;
1ccde1cb
FB
591
592 start &= TARGET_PAGE_MASK;
593 end = TARGET_PAGE_ALIGN(end);
594
595 length = end - start;
596 if (length == 0)
597 return;
f7c11b53 598 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 599
d24981d3
JQ
600 if (tcg_enabled()) {
601 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 602 }
1ccde1cb
FB
603}
604
8b9c99d9 605static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 606{
f6f3fbca 607 int ret = 0;
74576198 608 in_migration = enable;
f6f3fbca 609 return ret;
74576198
AL
610}
611
a8170e5e 612hwaddr memory_region_section_get_iotlb(CPUArchState *env,
e5548617
BS
613 MemoryRegionSection *section,
614 target_ulong vaddr,
a8170e5e 615 hwaddr paddr,
e5548617
BS
616 int prot,
617 target_ulong *address)
618{
a8170e5e 619 hwaddr iotlb;
e5548617
BS
620 CPUWatchpoint *wp;
621
cc5bea60 622 if (memory_region_is_ram(section->mr)) {
e5548617
BS
623 /* Normal RAM. */
624 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 625 + memory_region_section_addr(section, paddr);
e5548617
BS
626 if (!section->readonly) {
627 iotlb |= phys_section_notdirty;
628 } else {
629 iotlb |= phys_section_rom;
630 }
631 } else {
632 /* IO handlers are currently passed a physical address.
633 It would be nice to pass an offset from the base address
634 of that region. This would avoid having to special case RAM,
635 and avoid full address decoding in every device.
636 We can't use the high bits of pd for this because
637 IO_MEM_ROMD uses these as a ram address. */
638 iotlb = section - phys_sections;
cc5bea60 639 iotlb += memory_region_section_addr(section, paddr);
e5548617
BS
640 }
641
642 /* Make accesses to pages with watchpoints go via the
643 watchpoint trap routines. */
644 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
645 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
646 /* Avoid trapping reads of pages with a write breakpoint. */
647 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
648 iotlb = phys_section_watch + paddr;
649 *address |= TLB_MMIO;
650 break;
651 }
652 }
653 }
654
655 return iotlb;
656}
9fa3e853
FB
657#endif /* defined(CONFIG_USER_ONLY) */
658
e2eef170 659#if !defined(CONFIG_USER_ONLY)
8da3ff18 660
c04b2b78
PB
661#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
662typedef struct subpage_t {
70c68e44 663 MemoryRegion iomem;
a8170e5e 664 hwaddr base;
5312bd8b 665 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
666} subpage_t;
667
c227f099 668static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 669 uint16_t section);
a8170e5e 670static subpage_t *subpage_init(hwaddr base);
5312bd8b 671static void destroy_page_desc(uint16_t section_index)
54688b1e 672{
5312bd8b
AK
673 MemoryRegionSection *section = &phys_sections[section_index];
674 MemoryRegion *mr = section->mr;
54688b1e
AK
675
676 if (mr->subpage) {
677 subpage_t *subpage = container_of(mr, subpage_t, iomem);
678 memory_region_destroy(&subpage->iomem);
679 g_free(subpage);
680 }
681}
682
4346ae3e 683static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
684{
685 unsigned i;
d6f2ea22 686 PhysPageEntry *p;
54688b1e 687
c19e8800 688 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
689 return;
690 }
691
c19e8800 692 p = phys_map_nodes[lp->ptr];
4346ae3e 693 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 694 if (!p[i].is_leaf) {
54688b1e 695 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 696 } else {
c19e8800 697 destroy_page_desc(p[i].ptr);
54688b1e 698 }
54688b1e 699 }
07f07b31 700 lp->is_leaf = 0;
c19e8800 701 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
702}
703
ac1970fb 704static void destroy_all_mappings(AddressSpaceDispatch *d)
54688b1e 705{
ac1970fb 706 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
d6f2ea22 707 phys_map_nodes_reset();
54688b1e
AK
708}
709
5312bd8b
AK
710static uint16_t phys_section_add(MemoryRegionSection *section)
711{
712 if (phys_sections_nb == phys_sections_nb_alloc) {
713 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
714 phys_sections = g_renew(MemoryRegionSection, phys_sections,
715 phys_sections_nb_alloc);
716 }
717 phys_sections[phys_sections_nb] = *section;
718 return phys_sections_nb++;
719}
720
721static void phys_sections_clear(void)
722{
723 phys_sections_nb = 0;
724}
725
ac1970fb 726static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
727{
728 subpage_t *subpage;
a8170e5e 729 hwaddr base = section->offset_within_address_space
0f0cb164 730 & TARGET_PAGE_MASK;
ac1970fb 731 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
0f0cb164
AK
732 MemoryRegionSection subsection = {
733 .offset_within_address_space = base,
734 .size = TARGET_PAGE_SIZE,
735 };
a8170e5e 736 hwaddr start, end;
0f0cb164 737
f3705d53 738 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 739
f3705d53 740 if (!(existing->mr->subpage)) {
0f0cb164
AK
741 subpage = subpage_init(base);
742 subsection.mr = &subpage->iomem;
ac1970fb 743 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 744 phys_section_add(&subsection));
0f0cb164 745 } else {
f3705d53 746 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
747 }
748 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
adb2a9b5 749 end = start + section->size - 1;
0f0cb164
AK
750 subpage_register(subpage, start, end, phys_section_add(section));
751}
752
753
ac1970fb 754static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
33417e70 755{
a8170e5e 756 hwaddr start_addr = section->offset_within_address_space;
dd81124b 757 ram_addr_t size = section->size;
a8170e5e 758 hwaddr addr;
5312bd8b 759 uint16_t section_index = phys_section_add(section);
dd81124b 760
3b8e6a2d 761 assert(size);
f6f3fbca 762
3b8e6a2d 763 addr = start_addr;
ac1970fb 764 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2999097b 765 section_index);
33417e70
FB
766}
767
ac1970fb 768static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 769{
ac1970fb 770 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
0f0cb164
AK
771 MemoryRegionSection now = *section, remain = *section;
772
773 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
774 || (now.size < TARGET_PAGE_SIZE)) {
775 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
776 - now.offset_within_address_space,
777 now.size);
ac1970fb 778 register_subpage(d, &now);
0f0cb164
AK
779 remain.size -= now.size;
780 remain.offset_within_address_space += now.size;
781 remain.offset_within_region += now.size;
782 }
69b67646
TH
783 while (remain.size >= TARGET_PAGE_SIZE) {
784 now = remain;
785 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
786 now.size = TARGET_PAGE_SIZE;
ac1970fb 787 register_subpage(d, &now);
69b67646
TH
788 } else {
789 now.size &= TARGET_PAGE_MASK;
ac1970fb 790 register_multipage(d, &now);
69b67646 791 }
0f0cb164
AK
792 remain.size -= now.size;
793 remain.offset_within_address_space += now.size;
794 remain.offset_within_region += now.size;
795 }
796 now = remain;
797 if (now.size) {
ac1970fb 798 register_subpage(d, &now);
0f0cb164
AK
799 }
800}
801
62a2744c
SY
802void qemu_flush_coalesced_mmio_buffer(void)
803{
804 if (kvm_enabled())
805 kvm_flush_coalesced_mmio_buffer();
806}
807
b2a8658e
UD
808void qemu_mutex_lock_ramlist(void)
809{
810 qemu_mutex_lock(&ram_list.mutex);
811}
812
813void qemu_mutex_unlock_ramlist(void)
814{
815 qemu_mutex_unlock(&ram_list.mutex);
816}
817
c902760f
MT
818#if defined(__linux__) && !defined(TARGET_S390X)
819
820#include <sys/vfs.h>
821
822#define HUGETLBFS_MAGIC 0x958458f6
823
824static long gethugepagesize(const char *path)
825{
826 struct statfs fs;
827 int ret;
828
829 do {
9742bf26 830 ret = statfs(path, &fs);
c902760f
MT
831 } while (ret != 0 && errno == EINTR);
832
833 if (ret != 0) {
9742bf26
YT
834 perror(path);
835 return 0;
c902760f
MT
836 }
837
838 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 839 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
840
841 return fs.f_bsize;
842}
843
04b16653
AW
844static void *file_ram_alloc(RAMBlock *block,
845 ram_addr_t memory,
846 const char *path)
c902760f
MT
847{
848 char *filename;
8ca761f6
PF
849 char *sanitized_name;
850 char *c;
c902760f
MT
851 void *area;
852 int fd;
853#ifdef MAP_POPULATE
854 int flags;
855#endif
856 unsigned long hpagesize;
857
858 hpagesize = gethugepagesize(path);
859 if (!hpagesize) {
9742bf26 860 return NULL;
c902760f
MT
861 }
862
863 if (memory < hpagesize) {
864 return NULL;
865 }
866
867 if (kvm_enabled() && !kvm_has_sync_mmu()) {
868 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
869 return NULL;
870 }
871
8ca761f6
PF
872 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
873 sanitized_name = g_strdup(block->mr->name);
874 for (c = sanitized_name; *c != '\0'; c++) {
875 if (*c == '/')
876 *c = '_';
877 }
878
879 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
880 sanitized_name);
881 g_free(sanitized_name);
c902760f
MT
882
883 fd = mkstemp(filename);
884 if (fd < 0) {
9742bf26 885 perror("unable to create backing store for hugepages");
e4ada482 886 g_free(filename);
9742bf26 887 return NULL;
c902760f
MT
888 }
889 unlink(filename);
e4ada482 890 g_free(filename);
c902760f
MT
891
892 memory = (memory+hpagesize-1) & ~(hpagesize-1);
893
894 /*
895 * ftruncate is not supported by hugetlbfs in older
896 * hosts, so don't bother bailing out on errors.
897 * If anything goes wrong with it under other filesystems,
898 * mmap will fail.
899 */
900 if (ftruncate(fd, memory))
9742bf26 901 perror("ftruncate");
c902760f
MT
902
903#ifdef MAP_POPULATE
904 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
905 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
906 * to sidestep this quirk.
907 */
908 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
909 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
910#else
911 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
912#endif
913 if (area == MAP_FAILED) {
9742bf26
YT
914 perror("file_ram_alloc: can't mmap RAM pages");
915 close(fd);
916 return (NULL);
c902760f 917 }
04b16653 918 block->fd = fd;
c902760f
MT
919 return area;
920}
921#endif
922
d17b5288 923static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
924{
925 RAMBlock *block, *next_block;
3e837b2c 926 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 927
49cd9ac6
SH
928 assert(size != 0); /* it would hand out same offset multiple times */
929
a3161038 930 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
931 return 0;
932
a3161038 933 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 934 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
935
936 end = block->offset + block->length;
937
a3161038 938 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
939 if (next_block->offset >= end) {
940 next = MIN(next, next_block->offset);
941 }
942 }
943 if (next - end >= size && next - end < mingap) {
3e837b2c 944 offset = end;
04b16653
AW
945 mingap = next - end;
946 }
947 }
3e837b2c
AW
948
949 if (offset == RAM_ADDR_MAX) {
950 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
951 (uint64_t)size);
952 abort();
953 }
954
04b16653
AW
955 return offset;
956}
957
652d7ec2 958ram_addr_t last_ram_offset(void)
d17b5288
AW
959{
960 RAMBlock *block;
961 ram_addr_t last = 0;
962
a3161038 963 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
964 last = MAX(last, block->offset + block->length);
965
966 return last;
967}
968
ddb97f1d
JB
969static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
970{
971 int ret;
972 QemuOpts *machine_opts;
973
974 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
975 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
976 if (machine_opts &&
977 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
978 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
979 if (ret) {
980 perror("qemu_madvise");
981 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
982 "but dump_guest_core=off specified\n");
983 }
984 }
985}
986
c5705a77 987void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
988{
989 RAMBlock *new_block, *block;
990
c5705a77 991 new_block = NULL;
a3161038 992 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
993 if (block->offset == addr) {
994 new_block = block;
995 break;
996 }
997 }
998 assert(new_block);
999 assert(!new_block->idstr[0]);
84b89d78 1000
09e5ab63
AL
1001 if (dev) {
1002 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1003 if (id) {
1004 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1005 g_free(id);
84b89d78
CM
1006 }
1007 }
1008 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1009
b2a8658e
UD
1010 /* This assumes the iothread lock is taken here too. */
1011 qemu_mutex_lock_ramlist();
a3161038 1012 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1013 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1014 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1015 new_block->idstr);
1016 abort();
1017 }
1018 }
b2a8658e 1019 qemu_mutex_unlock_ramlist();
c5705a77
AK
1020}
1021
8490fc78
LC
1022static int memory_try_enable_merging(void *addr, size_t len)
1023{
1024 QemuOpts *opts;
1025
1026 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1027 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1028 /* disabled by the user */
1029 return 0;
1030 }
1031
1032 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1033}
1034
c5705a77
AK
1035ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1036 MemoryRegion *mr)
1037{
abb26d63 1038 RAMBlock *block, *new_block;
c5705a77
AK
1039
1040 size = TARGET_PAGE_ALIGN(size);
1041 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1042
b2a8658e
UD
1043 /* This assumes the iothread lock is taken here too. */
1044 qemu_mutex_lock_ramlist();
7c637366 1045 new_block->mr = mr;
432d268c 1046 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1047 if (host) {
1048 new_block->host = host;
cd19cfa2 1049 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1050 } else {
1051 if (mem_path) {
c902760f 1052#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1053 new_block->host = file_ram_alloc(new_block, size, mem_path);
1054 if (!new_block->host) {
1055 new_block->host = qemu_vmalloc(size);
8490fc78 1056 memory_try_enable_merging(new_block->host, size);
6977dfe6 1057 }
c902760f 1058#else
6977dfe6
YT
1059 fprintf(stderr, "-mem-path option unsupported\n");
1060 exit(1);
c902760f 1061#endif
6977dfe6 1062 } else {
868bb33f 1063 if (xen_enabled()) {
fce537d4 1064 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1065 } else if (kvm_enabled()) {
1066 /* some s390/kvm configurations have special constraints */
1067 new_block->host = kvm_vmalloc(size);
432d268c
JN
1068 } else {
1069 new_block->host = qemu_vmalloc(size);
1070 }
8490fc78 1071 memory_try_enable_merging(new_block->host, size);
6977dfe6 1072 }
c902760f 1073 }
94a6b54f
PB
1074 new_block->length = size;
1075
abb26d63
PB
1076 /* Keep the list sorted from biggest to smallest block. */
1077 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1078 if (block->length < new_block->length) {
1079 break;
1080 }
1081 }
1082 if (block) {
1083 QTAILQ_INSERT_BEFORE(block, new_block, next);
1084 } else {
1085 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1086 }
0d6d3c87 1087 ram_list.mru_block = NULL;
94a6b54f 1088
f798b07f 1089 ram_list.version++;
b2a8658e 1090 qemu_mutex_unlock_ramlist();
f798b07f 1091
7267c094 1092 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1093 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1094 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1095 0, size >> TARGET_PAGE_BITS);
1720aeee 1096 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1097
ddb97f1d 1098 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1099 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1100
6f0437e8
JK
1101 if (kvm_enabled())
1102 kvm_setup_guest_memory(new_block->host, size);
1103
94a6b54f
PB
1104 return new_block->offset;
1105}
e9a1ab19 1106
c5705a77 1107ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1108{
c5705a77 1109 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1110}
1111
1f2e98b6
AW
1112void qemu_ram_free_from_ptr(ram_addr_t addr)
1113{
1114 RAMBlock *block;
1115
b2a8658e
UD
1116 /* This assumes the iothread lock is taken here too. */
1117 qemu_mutex_lock_ramlist();
a3161038 1118 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1119 if (addr == block->offset) {
a3161038 1120 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1121 ram_list.mru_block = NULL;
f798b07f 1122 ram_list.version++;
7267c094 1123 g_free(block);
b2a8658e 1124 break;
1f2e98b6
AW
1125 }
1126 }
b2a8658e 1127 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1128}
1129
c227f099 1130void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1131{
04b16653
AW
1132 RAMBlock *block;
1133
b2a8658e
UD
1134 /* This assumes the iothread lock is taken here too. */
1135 qemu_mutex_lock_ramlist();
a3161038 1136 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1137 if (addr == block->offset) {
a3161038 1138 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1139 ram_list.mru_block = NULL;
f798b07f 1140 ram_list.version++;
cd19cfa2
HY
1141 if (block->flags & RAM_PREALLOC_MASK) {
1142 ;
1143 } else if (mem_path) {
04b16653
AW
1144#if defined (__linux__) && !defined(TARGET_S390X)
1145 if (block->fd) {
1146 munmap(block->host, block->length);
1147 close(block->fd);
1148 } else {
1149 qemu_vfree(block->host);
1150 }
fd28aa13
JK
1151#else
1152 abort();
04b16653
AW
1153#endif
1154 } else {
1155#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1156 munmap(block->host, block->length);
1157#else
868bb33f 1158 if (xen_enabled()) {
e41d7c69 1159 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
1160 } else {
1161 qemu_vfree(block->host);
1162 }
04b16653
AW
1163#endif
1164 }
7267c094 1165 g_free(block);
b2a8658e 1166 break;
04b16653
AW
1167 }
1168 }
b2a8658e 1169 qemu_mutex_unlock_ramlist();
04b16653 1170
e9a1ab19
FB
1171}
1172
cd19cfa2
HY
1173#ifndef _WIN32
1174void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1175{
1176 RAMBlock *block;
1177 ram_addr_t offset;
1178 int flags;
1179 void *area, *vaddr;
1180
a3161038 1181 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1182 offset = addr - block->offset;
1183 if (offset < block->length) {
1184 vaddr = block->host + offset;
1185 if (block->flags & RAM_PREALLOC_MASK) {
1186 ;
1187 } else {
1188 flags = MAP_FIXED;
1189 munmap(vaddr, length);
1190 if (mem_path) {
1191#if defined(__linux__) && !defined(TARGET_S390X)
1192 if (block->fd) {
1193#ifdef MAP_POPULATE
1194 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1195 MAP_PRIVATE;
1196#else
1197 flags |= MAP_PRIVATE;
1198#endif
1199 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1200 flags, block->fd, offset);
1201 } else {
1202 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1203 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1204 flags, -1, 0);
1205 }
fd28aa13
JK
1206#else
1207 abort();
cd19cfa2
HY
1208#endif
1209 } else {
1210#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1211 flags |= MAP_SHARED | MAP_ANONYMOUS;
1212 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1213 flags, -1, 0);
1214#else
1215 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1216 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1217 flags, -1, 0);
1218#endif
1219 }
1220 if (area != vaddr) {
f15fbc4b
AP
1221 fprintf(stderr, "Could not remap addr: "
1222 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1223 length, addr);
1224 exit(1);
1225 }
8490fc78 1226 memory_try_enable_merging(vaddr, length);
ddb97f1d 1227 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1228 }
1229 return;
1230 }
1231 }
1232}
1233#endif /* !_WIN32 */
1234
dc828ca1 1235/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
1236 With the exception of the softmmu code in this file, this should
1237 only be used for local memory (e.g. video ram) that the device owns,
1238 and knows it isn't going to access beyond the end of the block.
1239
1240 It should not be used for general purpose DMA.
1241 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1242 */
c227f099 1243void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 1244{
94a6b54f
PB
1245 RAMBlock *block;
1246
b2a8658e 1247 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1248 block = ram_list.mru_block;
1249 if (block && addr - block->offset < block->length) {
1250 goto found;
1251 }
a3161038 1252 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1253 if (addr - block->offset < block->length) {
0d6d3c87 1254 goto found;
f471a17e 1255 }
94a6b54f 1256 }
f471a17e
AW
1257
1258 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1259 abort();
1260
0d6d3c87
PB
1261found:
1262 ram_list.mru_block = block;
1263 if (xen_enabled()) {
1264 /* We need to check if the requested address is in the RAM
1265 * because we don't want to map the entire memory in QEMU.
1266 * In that case just map until the end of the page.
1267 */
1268 if (block->offset == 0) {
1269 return xen_map_cache(addr, 0, 0);
1270 } else if (block->host == NULL) {
1271 block->host =
1272 xen_map_cache(block->offset, block->length, 1);
1273 }
1274 }
1275 return block->host + (addr - block->offset);
dc828ca1
PB
1276}
1277
0d6d3c87
PB
1278/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1279 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1280 *
1281 * ??? Is this still necessary?
b2e0a138 1282 */
8b9c99d9 1283static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1284{
1285 RAMBlock *block;
1286
b2a8658e 1287 /* The list is protected by the iothread lock here. */
a3161038 1288 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1289 if (addr - block->offset < block->length) {
868bb33f 1290 if (xen_enabled()) {
432d268c
JN
1291 /* We need to check if the requested address is in the RAM
1292 * because we don't want to map the entire memory in QEMU.
712c2b41 1293 * In that case just map until the end of the page.
432d268c
JN
1294 */
1295 if (block->offset == 0) {
e41d7c69 1296 return xen_map_cache(addr, 0, 0);
432d268c 1297 } else if (block->host == NULL) {
e41d7c69
JK
1298 block->host =
1299 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1300 }
1301 }
b2e0a138
MT
1302 return block->host + (addr - block->offset);
1303 }
1304 }
1305
1306 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1307 abort();
1308
1309 return NULL;
1310}
1311
38bee5dc
SS
1312/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1313 * but takes a size argument */
8b9c99d9 1314static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1315{
8ab934f9
SS
1316 if (*size == 0) {
1317 return NULL;
1318 }
868bb33f 1319 if (xen_enabled()) {
e41d7c69 1320 return xen_map_cache(addr, *size, 1);
868bb33f 1321 } else {
38bee5dc
SS
1322 RAMBlock *block;
1323
a3161038 1324 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1325 if (addr - block->offset < block->length) {
1326 if (addr - block->offset + *size > block->length)
1327 *size = block->length - addr + block->offset;
1328 return block->host + (addr - block->offset);
1329 }
1330 }
1331
1332 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1333 abort();
38bee5dc
SS
1334 }
1335}
1336
050a0ddf
AP
1337void qemu_put_ram_ptr(void *addr)
1338{
1339 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
1340}
1341
e890261f 1342int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1343{
94a6b54f
PB
1344 RAMBlock *block;
1345 uint8_t *host = ptr;
1346
868bb33f 1347 if (xen_enabled()) {
e41d7c69 1348 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
1349 return 0;
1350 }
1351
a3161038 1352 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1353 /* This case append when the block is not mapped. */
1354 if (block->host == NULL) {
1355 continue;
1356 }
f471a17e 1357 if (host - block->host < block->length) {
e890261f
MT
1358 *ram_addr = block->offset + (host - block->host);
1359 return 0;
f471a17e 1360 }
94a6b54f 1361 }
432d268c 1362
e890261f
MT
1363 return -1;
1364}
f471a17e 1365
e890261f
MT
1366/* Some of the softmmu routines need to translate from a host pointer
1367 (typically a TLB entry) back to a ram offset. */
1368ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1369{
1370 ram_addr_t ram_addr;
f471a17e 1371
e890261f
MT
1372 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1373 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1374 abort();
1375 }
1376 return ram_addr;
5579c7f3
PB
1377}
1378
a8170e5e 1379static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1380 unsigned size)
e18231a3
BS
1381{
1382#ifdef DEBUG_UNASSIGNED
1383 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1384#endif
5b450407 1385#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1386 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
1387#endif
1388 return 0;
1389}
1390
a8170e5e 1391static void unassigned_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1392 uint64_t val, unsigned size)
e18231a3
BS
1393{
1394#ifdef DEBUG_UNASSIGNED
0e0df1e2 1395 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 1396#endif
5b450407 1397#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1398 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 1399#endif
33417e70
FB
1400}
1401
0e0df1e2
AK
1402static const MemoryRegionOps unassigned_mem_ops = {
1403 .read = unassigned_mem_read,
1404 .write = unassigned_mem_write,
1405 .endianness = DEVICE_NATIVE_ENDIAN,
1406};
e18231a3 1407
a8170e5e 1408static uint64_t error_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1409 unsigned size)
e18231a3 1410{
0e0df1e2 1411 abort();
e18231a3
BS
1412}
1413
a8170e5e 1414static void error_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1415 uint64_t value, unsigned size)
e18231a3 1416{
0e0df1e2 1417 abort();
33417e70
FB
1418}
1419
0e0df1e2
AK
1420static const MemoryRegionOps error_mem_ops = {
1421 .read = error_mem_read,
1422 .write = error_mem_write,
1423 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1424};
1425
0e0df1e2
AK
1426static const MemoryRegionOps rom_mem_ops = {
1427 .read = error_mem_read,
1428 .write = unassigned_mem_write,
1429 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1430};
1431
a8170e5e 1432static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1433 uint64_t val, unsigned size)
9fa3e853 1434{
3a7d929e 1435 int dirty_flags;
f7c11b53 1436 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1437 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1438#if !defined(CONFIG_USER_ONLY)
0e0df1e2 1439 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1440 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 1441#endif
3a7d929e 1442 }
0e0df1e2
AK
1443 switch (size) {
1444 case 1:
1445 stb_p(qemu_get_ram_ptr(ram_addr), val);
1446 break;
1447 case 2:
1448 stw_p(qemu_get_ram_ptr(ram_addr), val);
1449 break;
1450 case 4:
1451 stl_p(qemu_get_ram_ptr(ram_addr), val);
1452 break;
1453 default:
1454 abort();
3a7d929e 1455 }
f23db169 1456 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1457 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1458 /* we remove the notdirty callback only if the code has been
1459 flushed */
1460 if (dirty_flags == 0xff)
2e70f6ef 1461 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1462}
1463
0e0df1e2
AK
1464static const MemoryRegionOps notdirty_mem_ops = {
1465 .read = error_mem_read,
1466 .write = notdirty_mem_write,
1467 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1468};
1469
0f459d16 1470/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1471static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1472{
9349b4f9 1473 CPUArchState *env = cpu_single_env;
06d55cc1 1474 target_ulong pc, cs_base;
0f459d16 1475 target_ulong vaddr;
a1d1bb31 1476 CPUWatchpoint *wp;
06d55cc1 1477 int cpu_flags;
0f459d16 1478
06d55cc1
AL
1479 if (env->watchpoint_hit) {
1480 /* We re-entered the check after replacing the TB. Now raise
1481 * the debug interrupt so that is will trigger after the
1482 * current instruction. */
c3affe56 1483 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1484 return;
1485 }
2e70f6ef 1486 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1487 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1488 if ((vaddr == (wp->vaddr & len_mask) ||
1489 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1490 wp->flags |= BP_WATCHPOINT_HIT;
1491 if (!env->watchpoint_hit) {
1492 env->watchpoint_hit = wp;
5a316526 1493 tb_check_watchpoint(env);
6e140f28
AL
1494 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1495 env->exception_index = EXCP_DEBUG;
488d6577 1496 cpu_loop_exit(env);
6e140f28
AL
1497 } else {
1498 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1499 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1500 cpu_resume_from_signal(env, NULL);
6e140f28 1501 }
06d55cc1 1502 }
6e140f28
AL
1503 } else {
1504 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1505 }
1506 }
1507}
1508
6658ffb8
PB
1509/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1510 so these check for a hit then pass through to the normal out-of-line
1511 phys routines. */
a8170e5e 1512static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1513 unsigned size)
6658ffb8 1514{
1ec9b909
AK
1515 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1516 switch (size) {
1517 case 1: return ldub_phys(addr);
1518 case 2: return lduw_phys(addr);
1519 case 4: return ldl_phys(addr);
1520 default: abort();
1521 }
6658ffb8
PB
1522}
1523
a8170e5e 1524static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1525 uint64_t val, unsigned size)
6658ffb8 1526{
1ec9b909
AK
1527 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1528 switch (size) {
67364150
MF
1529 case 1:
1530 stb_phys(addr, val);
1531 break;
1532 case 2:
1533 stw_phys(addr, val);
1534 break;
1535 case 4:
1536 stl_phys(addr, val);
1537 break;
1ec9b909
AK
1538 default: abort();
1539 }
6658ffb8
PB
1540}
1541
1ec9b909
AK
1542static const MemoryRegionOps watch_mem_ops = {
1543 .read = watch_mem_read,
1544 .write = watch_mem_write,
1545 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1546};
6658ffb8 1547
a8170e5e 1548static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1549 unsigned len)
db7b5426 1550{
70c68e44 1551 subpage_t *mmio = opaque;
f6405247 1552 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1553 MemoryRegionSection *section;
db7b5426
BS
1554#if defined(DEBUG_SUBPAGE)
1555 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1556 mmio, len, addr, idx);
1557#endif
db7b5426 1558
5312bd8b
AK
1559 section = &phys_sections[mmio->sub_section[idx]];
1560 addr += mmio->base;
1561 addr -= section->offset_within_address_space;
1562 addr += section->offset_within_region;
37ec01d4 1563 return io_mem_read(section->mr, addr, len);
db7b5426
BS
1564}
1565
a8170e5e 1566static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1567 uint64_t value, unsigned len)
db7b5426 1568{
70c68e44 1569 subpage_t *mmio = opaque;
f6405247 1570 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1571 MemoryRegionSection *section;
db7b5426 1572#if defined(DEBUG_SUBPAGE)
70c68e44
AK
1573 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1574 " idx %d value %"PRIx64"\n",
f6405247 1575 __func__, mmio, len, addr, idx, value);
db7b5426 1576#endif
f6405247 1577
5312bd8b
AK
1578 section = &phys_sections[mmio->sub_section[idx]];
1579 addr += mmio->base;
1580 addr -= section->offset_within_address_space;
1581 addr += section->offset_within_region;
37ec01d4 1582 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
1583}
1584
70c68e44
AK
1585static const MemoryRegionOps subpage_ops = {
1586 .read = subpage_read,
1587 .write = subpage_write,
1588 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1589};
1590
a8170e5e 1591static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
de712f94 1592 unsigned size)
56384e8b
AF
1593{
1594 ram_addr_t raddr = addr;
1595 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1596 switch (size) {
1597 case 1: return ldub_p(ptr);
1598 case 2: return lduw_p(ptr);
1599 case 4: return ldl_p(ptr);
1600 default: abort();
1601 }
56384e8b
AF
1602}
1603
a8170e5e 1604static void subpage_ram_write(void *opaque, hwaddr addr,
de712f94 1605 uint64_t value, unsigned size)
56384e8b
AF
1606{
1607 ram_addr_t raddr = addr;
1608 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1609 switch (size) {
1610 case 1: return stb_p(ptr, value);
1611 case 2: return stw_p(ptr, value);
1612 case 4: return stl_p(ptr, value);
1613 default: abort();
1614 }
56384e8b
AF
1615}
1616
de712f94
AK
1617static const MemoryRegionOps subpage_ram_ops = {
1618 .read = subpage_ram_read,
1619 .write = subpage_ram_write,
1620 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
1621};
1622
c227f099 1623static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1624 uint16_t section)
db7b5426
BS
1625{
1626 int idx, eidx;
1627
1628 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1629 return -1;
1630 idx = SUBPAGE_IDX(start);
1631 eidx = SUBPAGE_IDX(end);
1632#if defined(DEBUG_SUBPAGE)
0bf9e31a 1633 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1634 mmio, start, end, idx, eidx, memory);
1635#endif
5312bd8b
AK
1636 if (memory_region_is_ram(phys_sections[section].mr)) {
1637 MemoryRegionSection new_section = phys_sections[section];
1638 new_section.mr = &io_mem_subpage_ram;
1639 section = phys_section_add(&new_section);
56384e8b 1640 }
db7b5426 1641 for (; idx <= eidx; idx++) {
5312bd8b 1642 mmio->sub_section[idx] = section;
db7b5426
BS
1643 }
1644
1645 return 0;
1646}
1647
a8170e5e 1648static subpage_t *subpage_init(hwaddr base)
db7b5426 1649{
c227f099 1650 subpage_t *mmio;
db7b5426 1651
7267c094 1652 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
1653
1654 mmio->base = base;
70c68e44
AK
1655 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1656 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1657 mmio->iomem.subpage = true;
db7b5426 1658#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1659 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1660 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1661#endif
0f0cb164 1662 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
1663
1664 return mmio;
1665}
1666
5312bd8b
AK
1667static uint16_t dummy_section(MemoryRegion *mr)
1668{
1669 MemoryRegionSection section = {
1670 .mr = mr,
1671 .offset_within_address_space = 0,
1672 .offset_within_region = 0,
1673 .size = UINT64_MAX,
1674 };
1675
1676 return phys_section_add(&section);
1677}
1678
a8170e5e 1679MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1680{
37ec01d4 1681 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1682}
1683
e9179ce1
AK
1684static void io_mem_init(void)
1685{
0e0df1e2 1686 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
1687 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1688 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1689 "unassigned", UINT64_MAX);
1690 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1691 "notdirty", UINT64_MAX);
de712f94
AK
1692 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1693 "subpage-ram", UINT64_MAX);
1ec9b909
AK
1694 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1695 "watch", UINT64_MAX);
e9179ce1
AK
1696}
1697
ac1970fb
AK
1698static void mem_begin(MemoryListener *listener)
1699{
1700 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1701
1702 destroy_all_mappings(d);
1703 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1704}
1705
50c1e149
AK
1706static void core_begin(MemoryListener *listener)
1707{
5312bd8b
AK
1708 phys_sections_clear();
1709 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
1710 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1711 phys_section_rom = dummy_section(&io_mem_rom);
1712 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
1713}
1714
1d71148e 1715static void tcg_commit(MemoryListener *listener)
50c1e149 1716{
9349b4f9 1717 CPUArchState *env;
117712c3
AK
1718
1719 /* since each CPU stores ram addresses in its TLB cache, we must
1720 reset the modified entries */
1721 /* XXX: slow ! */
1722 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1723 tlb_flush(env, 1);
1724 }
50c1e149
AK
1725}
1726
93632747
AK
1727static void core_log_global_start(MemoryListener *listener)
1728{
1729 cpu_physical_memory_set_dirty_tracking(1);
1730}
1731
1732static void core_log_global_stop(MemoryListener *listener)
1733{
1734 cpu_physical_memory_set_dirty_tracking(0);
1735}
1736
4855d41a
AK
1737static void io_region_add(MemoryListener *listener,
1738 MemoryRegionSection *section)
1739{
a2d33521
AK
1740 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1741
1742 mrio->mr = section->mr;
1743 mrio->offset = section->offset_within_region;
1744 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 1745 section->offset_within_address_space, section->size);
a2d33521 1746 ioport_register(&mrio->iorange);
4855d41a
AK
1747}
1748
1749static void io_region_del(MemoryListener *listener,
1750 MemoryRegionSection *section)
1751{
1752 isa_unassign_ioport(section->offset_within_address_space, section->size);
1753}
1754
93632747 1755static MemoryListener core_memory_listener = {
50c1e149 1756 .begin = core_begin,
93632747
AK
1757 .log_global_start = core_log_global_start,
1758 .log_global_stop = core_log_global_stop,
ac1970fb 1759 .priority = 1,
93632747
AK
1760};
1761
4855d41a
AK
1762static MemoryListener io_memory_listener = {
1763 .region_add = io_region_add,
1764 .region_del = io_region_del,
4855d41a
AK
1765 .priority = 0,
1766};
1767
1d71148e
AK
1768static MemoryListener tcg_memory_listener = {
1769 .commit = tcg_commit,
1770};
1771
ac1970fb
AK
1772void address_space_init_dispatch(AddressSpace *as)
1773{
1774 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1775
1776 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1777 d->listener = (MemoryListener) {
1778 .begin = mem_begin,
1779 .region_add = mem_add,
1780 .region_nop = mem_add,
1781 .priority = 0,
1782 };
1783 as->dispatch = d;
1784 memory_listener_register(&d->listener, as);
1785}
1786
83f3c251
AK
1787void address_space_destroy_dispatch(AddressSpace *as)
1788{
1789 AddressSpaceDispatch *d = as->dispatch;
1790
1791 memory_listener_unregister(&d->listener);
1792 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1793 g_free(d);
1794 as->dispatch = NULL;
1795}
1796
62152b8a
AK
1797static void memory_map_init(void)
1798{
7267c094 1799 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 1800 memory_region_init(system_memory, "system", INT64_MAX);
2673a5da
AK
1801 address_space_init(&address_space_memory, system_memory);
1802 address_space_memory.name = "memory";
309cb471 1803
7267c094 1804 system_io = g_malloc(sizeof(*system_io));
309cb471 1805 memory_region_init(system_io, "io", 65536);
2673a5da
AK
1806 address_space_init(&address_space_io, system_io);
1807 address_space_io.name = "I/O";
93632747 1808
f6790af6
AK
1809 memory_listener_register(&core_memory_listener, &address_space_memory);
1810 memory_listener_register(&io_memory_listener, &address_space_io);
1811 memory_listener_register(&tcg_memory_listener, &address_space_memory);
9e11908f
PM
1812
1813 dma_context_init(&dma_context_memory, &address_space_memory,
1814 NULL, NULL, NULL);
62152b8a
AK
1815}
1816
1817MemoryRegion *get_system_memory(void)
1818{
1819 return system_memory;
1820}
1821
309cb471
AK
1822MemoryRegion *get_system_io(void)
1823{
1824 return system_io;
1825}
1826
e2eef170
PB
1827#endif /* !defined(CONFIG_USER_ONLY) */
1828
13eb76e0
FB
1829/* physical memory access (slow version, mainly for debug) */
1830#if defined(CONFIG_USER_ONLY)
9349b4f9 1831int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1832 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1833{
1834 int l, flags;
1835 target_ulong page;
53a5960a 1836 void * p;
13eb76e0
FB
1837
1838 while (len > 0) {
1839 page = addr & TARGET_PAGE_MASK;
1840 l = (page + TARGET_PAGE_SIZE) - addr;
1841 if (l > len)
1842 l = len;
1843 flags = page_get_flags(page);
1844 if (!(flags & PAGE_VALID))
a68fe89c 1845 return -1;
13eb76e0
FB
1846 if (is_write) {
1847 if (!(flags & PAGE_WRITE))
a68fe89c 1848 return -1;
579a97f7 1849 /* XXX: this code should not depend on lock_user */
72fb7daa 1850 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1851 return -1;
72fb7daa
AJ
1852 memcpy(p, buf, l);
1853 unlock_user(p, addr, l);
13eb76e0
FB
1854 } else {
1855 if (!(flags & PAGE_READ))
a68fe89c 1856 return -1;
579a97f7 1857 /* XXX: this code should not depend on lock_user */
72fb7daa 1858 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1859 return -1;
72fb7daa 1860 memcpy(buf, p, l);
5b257578 1861 unlock_user(p, addr, 0);
13eb76e0
FB
1862 }
1863 len -= l;
1864 buf += l;
1865 addr += l;
1866 }
a68fe89c 1867 return 0;
13eb76e0 1868}
8df1cd07 1869
13eb76e0 1870#else
51d7a9eb 1871
a8170e5e
AK
1872static void invalidate_and_set_dirty(hwaddr addr,
1873 hwaddr length)
51d7a9eb
AP
1874{
1875 if (!cpu_physical_memory_is_dirty(addr)) {
1876 /* invalidate code */
1877 tb_invalidate_phys_page_range(addr, addr + length, 0);
1878 /* set dirty bit */
1879 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1880 }
e226939d 1881 xen_modified_memory(addr, length);
51d7a9eb
AP
1882}
1883
a8170e5e 1884void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1885 int len, bool is_write)
13eb76e0 1886{
ac1970fb 1887 AddressSpaceDispatch *d = as->dispatch;
37ec01d4 1888 int l;
13eb76e0
FB
1889 uint8_t *ptr;
1890 uint32_t val;
a8170e5e 1891 hwaddr page;
f3705d53 1892 MemoryRegionSection *section;
3b46e624 1893
13eb76e0
FB
1894 while (len > 0) {
1895 page = addr & TARGET_PAGE_MASK;
1896 l = (page + TARGET_PAGE_SIZE) - addr;
1897 if (l > len)
1898 l = len;
ac1970fb 1899 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1900
13eb76e0 1901 if (is_write) {
f3705d53 1902 if (!memory_region_is_ram(section->mr)) {
a8170e5e 1903 hwaddr addr1;
cc5bea60 1904 addr1 = memory_region_section_addr(section, addr);
6a00d601
FB
1905 /* XXX: could force cpu_single_env to NULL to avoid
1906 potential bugs */
6c2934db 1907 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 1908 /* 32 bit write access */
c27004ec 1909 val = ldl_p(buf);
37ec01d4 1910 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 1911 l = 4;
6c2934db 1912 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 1913 /* 16 bit write access */
c27004ec 1914 val = lduw_p(buf);
37ec01d4 1915 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
1916 l = 2;
1917 } else {
1c213d19 1918 /* 8 bit write access */
c27004ec 1919 val = ldub_p(buf);
37ec01d4 1920 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
1921 l = 1;
1922 }
f3705d53 1923 } else if (!section->readonly) {
8ca5692d 1924 ram_addr_t addr1;
f3705d53 1925 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 1926 + memory_region_section_addr(section, addr);
13eb76e0 1927 /* RAM case */
5579c7f3 1928 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1929 memcpy(ptr, buf, l);
51d7a9eb 1930 invalidate_and_set_dirty(addr1, l);
050a0ddf 1931 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1932 }
1933 } else {
cc5bea60
BS
1934 if (!(memory_region_is_ram(section->mr) ||
1935 memory_region_is_romd(section->mr))) {
a8170e5e 1936 hwaddr addr1;
13eb76e0 1937 /* I/O case */
cc5bea60 1938 addr1 = memory_region_section_addr(section, addr);
6c2934db 1939 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 1940 /* 32 bit read access */
37ec01d4 1941 val = io_mem_read(section->mr, addr1, 4);
c27004ec 1942 stl_p(buf, val);
13eb76e0 1943 l = 4;
6c2934db 1944 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 1945 /* 16 bit read access */
37ec01d4 1946 val = io_mem_read(section->mr, addr1, 2);
c27004ec 1947 stw_p(buf, val);
13eb76e0
FB
1948 l = 2;
1949 } else {
1c213d19 1950 /* 8 bit read access */
37ec01d4 1951 val = io_mem_read(section->mr, addr1, 1);
c27004ec 1952 stb_p(buf, val);
13eb76e0
FB
1953 l = 1;
1954 }
1955 } else {
1956 /* RAM case */
0a1b357f 1957 ptr = qemu_get_ram_ptr(section->mr->ram_addr
cc5bea60
BS
1958 + memory_region_section_addr(section,
1959 addr));
f3705d53 1960 memcpy(buf, ptr, l);
050a0ddf 1961 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1962 }
1963 }
1964 len -= l;
1965 buf += l;
1966 addr += l;
1967 }
1968}
8df1cd07 1969
a8170e5e 1970void address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1971 const uint8_t *buf, int len)
1972{
1973 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1974}
1975
1976/**
1977 * address_space_read: read from an address space.
1978 *
1979 * @as: #AddressSpace to be accessed
1980 * @addr: address within that address space
1981 * @buf: buffer with the data transferred
1982 */
a8170e5e 1983void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb
AK
1984{
1985 address_space_rw(as, addr, buf, len, false);
1986}
1987
1988
a8170e5e 1989void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1990 int len, int is_write)
1991{
1992 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1993}
1994
d0ecd2aa 1995/* used for ROM loading : can write in RAM and ROM */
a8170e5e 1996void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
1997 const uint8_t *buf, int len)
1998{
ac1970fb 1999 AddressSpaceDispatch *d = address_space_memory.dispatch;
d0ecd2aa
FB
2000 int l;
2001 uint8_t *ptr;
a8170e5e 2002 hwaddr page;
f3705d53 2003 MemoryRegionSection *section;
3b46e624 2004
d0ecd2aa
FB
2005 while (len > 0) {
2006 page = addr & TARGET_PAGE_MASK;
2007 l = (page + TARGET_PAGE_SIZE) - addr;
2008 if (l > len)
2009 l = len;
ac1970fb 2010 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 2011
cc5bea60
BS
2012 if (!(memory_region_is_ram(section->mr) ||
2013 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
2014 /* do nothing */
2015 } else {
2016 unsigned long addr1;
f3705d53 2017 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 2018 + memory_region_section_addr(section, addr);
d0ecd2aa 2019 /* ROM/RAM case */
5579c7f3 2020 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2021 memcpy(ptr, buf, l);
51d7a9eb 2022 invalidate_and_set_dirty(addr1, l);
050a0ddf 2023 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
2024 }
2025 len -= l;
2026 buf += l;
2027 addr += l;
2028 }
2029}
2030
6d16c2f8
AL
2031typedef struct {
2032 void *buffer;
a8170e5e
AK
2033 hwaddr addr;
2034 hwaddr len;
6d16c2f8
AL
2035} BounceBuffer;
2036
2037static BounceBuffer bounce;
2038
ba223c29
AL
2039typedef struct MapClient {
2040 void *opaque;
2041 void (*callback)(void *opaque);
72cf2d4f 2042 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2043} MapClient;
2044
72cf2d4f
BS
2045static QLIST_HEAD(map_client_list, MapClient) map_client_list
2046 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2047
2048void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2049{
7267c094 2050 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2051
2052 client->opaque = opaque;
2053 client->callback = callback;
72cf2d4f 2054 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2055 return client;
2056}
2057
8b9c99d9 2058static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2059{
2060 MapClient *client = (MapClient *)_client;
2061
72cf2d4f 2062 QLIST_REMOVE(client, link);
7267c094 2063 g_free(client);
ba223c29
AL
2064}
2065
2066static void cpu_notify_map_clients(void)
2067{
2068 MapClient *client;
2069
72cf2d4f
BS
2070 while (!QLIST_EMPTY(&map_client_list)) {
2071 client = QLIST_FIRST(&map_client_list);
ba223c29 2072 client->callback(client->opaque);
34d5e948 2073 cpu_unregister_map_client(client);
ba223c29
AL
2074 }
2075}
2076
6d16c2f8
AL
2077/* Map a physical memory region into a host virtual address.
2078 * May map a subset of the requested range, given by and returned in *plen.
2079 * May return NULL if resources needed to perform the mapping are exhausted.
2080 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2081 * Use cpu_register_map_client() to know when retrying the map operation is
2082 * likely to succeed.
6d16c2f8 2083 */
ac1970fb 2084void *address_space_map(AddressSpace *as,
a8170e5e
AK
2085 hwaddr addr,
2086 hwaddr *plen,
ac1970fb 2087 bool is_write)
6d16c2f8 2088{
ac1970fb 2089 AddressSpaceDispatch *d = as->dispatch;
a8170e5e
AK
2090 hwaddr len = *plen;
2091 hwaddr todo = 0;
6d16c2f8 2092 int l;
a8170e5e 2093 hwaddr page;
f3705d53 2094 MemoryRegionSection *section;
f15fbc4b 2095 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
2096 ram_addr_t rlen;
2097 void *ret;
6d16c2f8
AL
2098
2099 while (len > 0) {
2100 page = addr & TARGET_PAGE_MASK;
2101 l = (page + TARGET_PAGE_SIZE) - addr;
2102 if (l > len)
2103 l = len;
ac1970fb 2104 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
6d16c2f8 2105
f3705d53 2106 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 2107 if (todo || bounce.buffer) {
6d16c2f8
AL
2108 break;
2109 }
2110 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2111 bounce.addr = addr;
2112 bounce.len = l;
2113 if (!is_write) {
ac1970fb 2114 address_space_read(as, addr, bounce.buffer, l);
6d16c2f8 2115 }
38bee5dc
SS
2116
2117 *plen = l;
2118 return bounce.buffer;
6d16c2f8 2119 }
8ab934f9 2120 if (!todo) {
f3705d53 2121 raddr = memory_region_get_ram_addr(section->mr)
cc5bea60 2122 + memory_region_section_addr(section, addr);
8ab934f9 2123 }
6d16c2f8
AL
2124
2125 len -= l;
2126 addr += l;
38bee5dc 2127 todo += l;
6d16c2f8 2128 }
8ab934f9
SS
2129 rlen = todo;
2130 ret = qemu_ram_ptr_length(raddr, &rlen);
2131 *plen = rlen;
2132 return ret;
6d16c2f8
AL
2133}
2134
ac1970fb 2135/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2136 * Will also mark the memory as dirty if is_write == 1. access_len gives
2137 * the amount of memory that was actually read or written by the caller.
2138 */
a8170e5e
AK
2139void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2140 int is_write, hwaddr access_len)
6d16c2f8
AL
2141{
2142 if (buffer != bounce.buffer) {
2143 if (is_write) {
e890261f 2144 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
2145 while (access_len) {
2146 unsigned l;
2147 l = TARGET_PAGE_SIZE;
2148 if (l > access_len)
2149 l = access_len;
51d7a9eb 2150 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2151 addr1 += l;
2152 access_len -= l;
2153 }
2154 }
868bb33f 2155 if (xen_enabled()) {
e41d7c69 2156 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2157 }
6d16c2f8
AL
2158 return;
2159 }
2160 if (is_write) {
ac1970fb 2161 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2162 }
f8a83245 2163 qemu_vfree(bounce.buffer);
6d16c2f8 2164 bounce.buffer = NULL;
ba223c29 2165 cpu_notify_map_clients();
6d16c2f8 2166}
d0ecd2aa 2167
a8170e5e
AK
2168void *cpu_physical_memory_map(hwaddr addr,
2169 hwaddr *plen,
ac1970fb
AK
2170 int is_write)
2171{
2172 return address_space_map(&address_space_memory, addr, plen, is_write);
2173}
2174
a8170e5e
AK
2175void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2176 int is_write, hwaddr access_len)
ac1970fb
AK
2177{
2178 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2179}
2180
8df1cd07 2181/* warning: addr must be aligned */
a8170e5e 2182static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2183 enum device_endian endian)
8df1cd07 2184{
8df1cd07
FB
2185 uint8_t *ptr;
2186 uint32_t val;
f3705d53 2187 MemoryRegionSection *section;
8df1cd07 2188
ac1970fb 2189 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2190
cc5bea60
BS
2191 if (!(memory_region_is_ram(section->mr) ||
2192 memory_region_is_romd(section->mr))) {
8df1cd07 2193 /* I/O case */
cc5bea60 2194 addr = memory_region_section_addr(section, addr);
37ec01d4 2195 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
2196#if defined(TARGET_WORDS_BIGENDIAN)
2197 if (endian == DEVICE_LITTLE_ENDIAN) {
2198 val = bswap32(val);
2199 }
2200#else
2201 if (endian == DEVICE_BIG_ENDIAN) {
2202 val = bswap32(val);
2203 }
2204#endif
8df1cd07
FB
2205 } else {
2206 /* RAM case */
f3705d53 2207 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2208 & TARGET_PAGE_MASK)
cc5bea60 2209 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2210 switch (endian) {
2211 case DEVICE_LITTLE_ENDIAN:
2212 val = ldl_le_p(ptr);
2213 break;
2214 case DEVICE_BIG_ENDIAN:
2215 val = ldl_be_p(ptr);
2216 break;
2217 default:
2218 val = ldl_p(ptr);
2219 break;
2220 }
8df1cd07
FB
2221 }
2222 return val;
2223}
2224
a8170e5e 2225uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2226{
2227 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2228}
2229
a8170e5e 2230uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2231{
2232 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2233}
2234
a8170e5e 2235uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2236{
2237 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2238}
2239
84b7b8e7 2240/* warning: addr must be aligned */
a8170e5e 2241static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2242 enum device_endian endian)
84b7b8e7 2243{
84b7b8e7
FB
2244 uint8_t *ptr;
2245 uint64_t val;
f3705d53 2246 MemoryRegionSection *section;
84b7b8e7 2247
ac1970fb 2248 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2249
cc5bea60
BS
2250 if (!(memory_region_is_ram(section->mr) ||
2251 memory_region_is_romd(section->mr))) {
84b7b8e7 2252 /* I/O case */
cc5bea60 2253 addr = memory_region_section_addr(section, addr);
1e78bcc1
AG
2254
2255 /* XXX This is broken when device endian != cpu endian.
2256 Fix and add "endian" variable check */
84b7b8e7 2257#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2258 val = io_mem_read(section->mr, addr, 4) << 32;
2259 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 2260#else
37ec01d4
AK
2261 val = io_mem_read(section->mr, addr, 4);
2262 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
2263#endif
2264 } else {
2265 /* RAM case */
f3705d53 2266 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2267 & TARGET_PAGE_MASK)
cc5bea60 2268 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2269 switch (endian) {
2270 case DEVICE_LITTLE_ENDIAN:
2271 val = ldq_le_p(ptr);
2272 break;
2273 case DEVICE_BIG_ENDIAN:
2274 val = ldq_be_p(ptr);
2275 break;
2276 default:
2277 val = ldq_p(ptr);
2278 break;
2279 }
84b7b8e7
FB
2280 }
2281 return val;
2282}
2283
a8170e5e 2284uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2285{
2286 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2287}
2288
a8170e5e 2289uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2290{
2291 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2292}
2293
a8170e5e 2294uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2295{
2296 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2297}
2298
aab33094 2299/* XXX: optimize */
a8170e5e 2300uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2301{
2302 uint8_t val;
2303 cpu_physical_memory_read(addr, &val, 1);
2304 return val;
2305}
2306
733f0b02 2307/* warning: addr must be aligned */
a8170e5e 2308static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2309 enum device_endian endian)
aab33094 2310{
733f0b02
MT
2311 uint8_t *ptr;
2312 uint64_t val;
f3705d53 2313 MemoryRegionSection *section;
733f0b02 2314
ac1970fb 2315 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2316
cc5bea60
BS
2317 if (!(memory_region_is_ram(section->mr) ||
2318 memory_region_is_romd(section->mr))) {
733f0b02 2319 /* I/O case */
cc5bea60 2320 addr = memory_region_section_addr(section, addr);
37ec01d4 2321 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
2322#if defined(TARGET_WORDS_BIGENDIAN)
2323 if (endian == DEVICE_LITTLE_ENDIAN) {
2324 val = bswap16(val);
2325 }
2326#else
2327 if (endian == DEVICE_BIG_ENDIAN) {
2328 val = bswap16(val);
2329 }
2330#endif
733f0b02
MT
2331 } else {
2332 /* RAM case */
f3705d53 2333 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2334 & TARGET_PAGE_MASK)
cc5bea60 2335 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2336 switch (endian) {
2337 case DEVICE_LITTLE_ENDIAN:
2338 val = lduw_le_p(ptr);
2339 break;
2340 case DEVICE_BIG_ENDIAN:
2341 val = lduw_be_p(ptr);
2342 break;
2343 default:
2344 val = lduw_p(ptr);
2345 break;
2346 }
733f0b02
MT
2347 }
2348 return val;
aab33094
FB
2349}
2350
a8170e5e 2351uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2352{
2353 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2354}
2355
a8170e5e 2356uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2357{
2358 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2359}
2360
a8170e5e 2361uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2362{
2363 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2364}
2365
8df1cd07
FB
2366/* warning: addr must be aligned. The ram page is not masked as dirty
2367 and the code inside is not invalidated. It is useful if the dirty
2368 bits are used to track modified PTEs */
a8170e5e 2369void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2370{
8df1cd07 2371 uint8_t *ptr;
f3705d53 2372 MemoryRegionSection *section;
8df1cd07 2373
ac1970fb 2374 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2375
f3705d53 2376 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2377 addr = memory_region_section_addr(section, addr);
f3705d53 2378 if (memory_region_is_ram(section->mr)) {
37ec01d4 2379 section = &phys_sections[phys_section_rom];
06ef3525 2380 }
37ec01d4 2381 io_mem_write(section->mr, addr, val, 4);
8df1cd07 2382 } else {
f3705d53 2383 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 2384 & TARGET_PAGE_MASK)
cc5bea60 2385 + memory_region_section_addr(section, addr);
5579c7f3 2386 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2387 stl_p(ptr, val);
74576198
AL
2388
2389 if (unlikely(in_migration)) {
2390 if (!cpu_physical_memory_is_dirty(addr1)) {
2391 /* invalidate code */
2392 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2393 /* set dirty bit */
f7c11b53
YT
2394 cpu_physical_memory_set_dirty_flags(
2395 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2396 }
2397 }
8df1cd07
FB
2398 }
2399}
2400
a8170e5e 2401void stq_phys_notdirty(hwaddr addr, uint64_t val)
bc98a7ef 2402{
bc98a7ef 2403 uint8_t *ptr;
f3705d53 2404 MemoryRegionSection *section;
bc98a7ef 2405
ac1970fb 2406 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2407
f3705d53 2408 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2409 addr = memory_region_section_addr(section, addr);
f3705d53 2410 if (memory_region_is_ram(section->mr)) {
37ec01d4 2411 section = &phys_sections[phys_section_rom];
06ef3525 2412 }
bc98a7ef 2413#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2414 io_mem_write(section->mr, addr, val >> 32, 4);
2415 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 2416#else
37ec01d4
AK
2417 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2418 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
2419#endif
2420 } else {
f3705d53 2421 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2422 & TARGET_PAGE_MASK)
cc5bea60 2423 + memory_region_section_addr(section, addr));
bc98a7ef
JM
2424 stq_p(ptr, val);
2425 }
2426}
2427
8df1cd07 2428/* warning: addr must be aligned */
a8170e5e 2429static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2430 enum device_endian endian)
8df1cd07 2431{
8df1cd07 2432 uint8_t *ptr;
f3705d53 2433 MemoryRegionSection *section;
8df1cd07 2434
ac1970fb 2435 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2436
f3705d53 2437 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2438 addr = memory_region_section_addr(section, addr);
f3705d53 2439 if (memory_region_is_ram(section->mr)) {
37ec01d4 2440 section = &phys_sections[phys_section_rom];
06ef3525 2441 }
1e78bcc1
AG
2442#if defined(TARGET_WORDS_BIGENDIAN)
2443 if (endian == DEVICE_LITTLE_ENDIAN) {
2444 val = bswap32(val);
2445 }
2446#else
2447 if (endian == DEVICE_BIG_ENDIAN) {
2448 val = bswap32(val);
2449 }
2450#endif
37ec01d4 2451 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
2452 } else {
2453 unsigned long addr1;
f3705d53 2454 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2455 + memory_region_section_addr(section, addr);
8df1cd07 2456 /* RAM case */
5579c7f3 2457 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2458 switch (endian) {
2459 case DEVICE_LITTLE_ENDIAN:
2460 stl_le_p(ptr, val);
2461 break;
2462 case DEVICE_BIG_ENDIAN:
2463 stl_be_p(ptr, val);
2464 break;
2465 default:
2466 stl_p(ptr, val);
2467 break;
2468 }
51d7a9eb 2469 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2470 }
2471}
2472
a8170e5e 2473void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2474{
2475 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2476}
2477
a8170e5e 2478void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2479{
2480 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2481}
2482
a8170e5e 2483void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2484{
2485 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2486}
2487
aab33094 2488/* XXX: optimize */
a8170e5e 2489void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2490{
2491 uint8_t v = val;
2492 cpu_physical_memory_write(addr, &v, 1);
2493}
2494
733f0b02 2495/* warning: addr must be aligned */
a8170e5e 2496static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2497 enum device_endian endian)
aab33094 2498{
733f0b02 2499 uint8_t *ptr;
f3705d53 2500 MemoryRegionSection *section;
733f0b02 2501
ac1970fb 2502 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2503
f3705d53 2504 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2505 addr = memory_region_section_addr(section, addr);
f3705d53 2506 if (memory_region_is_ram(section->mr)) {
37ec01d4 2507 section = &phys_sections[phys_section_rom];
06ef3525 2508 }
1e78bcc1
AG
2509#if defined(TARGET_WORDS_BIGENDIAN)
2510 if (endian == DEVICE_LITTLE_ENDIAN) {
2511 val = bswap16(val);
2512 }
2513#else
2514 if (endian == DEVICE_BIG_ENDIAN) {
2515 val = bswap16(val);
2516 }
2517#endif
37ec01d4 2518 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
2519 } else {
2520 unsigned long addr1;
f3705d53 2521 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2522 + memory_region_section_addr(section, addr);
733f0b02
MT
2523 /* RAM case */
2524 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2525 switch (endian) {
2526 case DEVICE_LITTLE_ENDIAN:
2527 stw_le_p(ptr, val);
2528 break;
2529 case DEVICE_BIG_ENDIAN:
2530 stw_be_p(ptr, val);
2531 break;
2532 default:
2533 stw_p(ptr, val);
2534 break;
2535 }
51d7a9eb 2536 invalidate_and_set_dirty(addr1, 2);
733f0b02 2537 }
aab33094
FB
2538}
2539
a8170e5e 2540void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2541{
2542 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2543}
2544
a8170e5e 2545void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2546{
2547 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2548}
2549
a8170e5e 2550void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2551{
2552 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2553}
2554
aab33094 2555/* XXX: optimize */
a8170e5e 2556void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2557{
2558 val = tswap64(val);
71d2b725 2559 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2560}
2561
a8170e5e 2562void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2563{
2564 val = cpu_to_le64(val);
2565 cpu_physical_memory_write(addr, &val, 8);
2566}
2567
a8170e5e 2568void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2569{
2570 val = cpu_to_be64(val);
2571 cpu_physical_memory_write(addr, &val, 8);
2572}
2573
5e2972fd 2574/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2575int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2576 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2577{
2578 int l;
a8170e5e 2579 hwaddr phys_addr;
9b3c35e0 2580 target_ulong page;
13eb76e0
FB
2581
2582 while (len > 0) {
2583 page = addr & TARGET_PAGE_MASK;
2584 phys_addr = cpu_get_phys_page_debug(env, page);
2585 /* if no physical page mapped, return an error */
2586 if (phys_addr == -1)
2587 return -1;
2588 l = (page + TARGET_PAGE_SIZE) - addr;
2589 if (l > len)
2590 l = len;
5e2972fd 2591 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2592 if (is_write)
2593 cpu_physical_memory_write_rom(phys_addr, buf, l);
2594 else
5e2972fd 2595 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2596 len -= l;
2597 buf += l;
2598 addr += l;
2599 }
2600 return 0;
2601}
a68fe89c 2602#endif
13eb76e0 2603
8e4a424b
BS
2604#if !defined(CONFIG_USER_ONLY)
2605
2606/*
2607 * A helper function for the _utterly broken_ virtio device model to find out if
2608 * it's running on a big endian machine. Don't do this at home kids!
2609 */
2610bool virtio_is_big_endian(void);
2611bool virtio_is_big_endian(void)
2612{
2613#if defined(TARGET_WORDS_BIGENDIAN)
2614 return true;
2615#else
2616 return false;
2617#endif
2618}
2619
2620#endif
2621
76f35538 2622#ifndef CONFIG_USER_ONLY
a8170e5e 2623bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538
WC
2624{
2625 MemoryRegionSection *section;
2626
ac1970fb
AK
2627 section = phys_page_find(address_space_memory.dispatch,
2628 phys_addr >> TARGET_PAGE_BITS);
76f35538
WC
2629
2630 return !(memory_region_is_ram(section->mr) ||
2631 memory_region_is_romd(section->mr));
2632}
2633#endif