]> git.proxmox.com Git - qemu.git/blame - exec.c
fw_cfg: Splash image loader can overrun a stack variable, fix
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
432d268c 34#include "hw/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
67d3b957 53//#define DEBUG_UNASSIGNED
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
e2eef170 56#if !defined(CONFIG_USER_ONLY)
9fa3e853 57int phys_ram_fd;
74576198 58static int in_migration;
94a6b54f 59
a3161038 60RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
61
62static MemoryRegion *system_memory;
309cb471 63static MemoryRegion *system_io;
62152b8a 64
f6790af6
AK
65AddressSpace address_space_io;
66AddressSpace address_space_memory;
9e11908f 67DMAContext dma_context_memory;
2673a5da 68
0e0df1e2 69MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 70static MemoryRegion io_mem_subpage_ram;
0e0df1e2 71
e2eef170 72#endif
9fa3e853 73
9349b4f9 74CPUArchState *first_cpu;
6a00d601
FB
75/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
9349b4f9 77DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 78/* 0 = Do not count executed instructions.
bf20dc07 79 1 = Precise instruction counting.
2e70f6ef 80 2 = Adaptive rate instruction counting. */
5708fc66 81int use_icount;
6a00d601 82
e2eef170 83#if !defined(CONFIG_USER_ONLY)
4346ae3e 84
5312bd8b
AK
85static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
aa102231
AK
88static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
5312bd8b 91
d6f2ea22
AK
92/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
07f07b31 96#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 97
e2eef170 98static void io_mem_init(void);
62152b8a 99static void memory_map_init(void);
8b9c99d9 100static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 101
1ec9b909 102static MemoryRegion io_mem_watch;
6658ffb8 103#endif
fd6ce8f6 104
6d9a1304 105#if !defined(CONFIG_USER_ONLY)
d6f2ea22 106
f7bf5461 107static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 108{
f7bf5461 109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
d6f2ea22
AK
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
f7bf5461
AK
117}
118
119static uint16_t phys_map_node_alloc(void)
120{
121 unsigned i;
122 uint16_t ret;
123
124 ret = phys_map_nodes_nb++;
125 assert(ret != PHYS_MAP_NODE_NIL);
126 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 127 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 128 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 130 }
f7bf5461 131 return ret;
d6f2ea22
AK
132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
92e873b9 139
a8170e5e
AK
140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
2999097b 142 int level)
f7bf5461
AK
143{
144 PhysPageEntry *p;
145 int i;
a8170e5e 146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 147
07f07b31 148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
07f07b31 153 p[i].is_leaf = 1;
c19e8800 154 p[i].ptr = phys_section_unassigned;
4346ae3e 155 }
67c4d23c 156 }
f7bf5461 157 } else {
c19e8800 158 p = phys_map_nodes[lp->ptr];
92e873b9 159 }
2999097b 160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 161
2999097b 162 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
c19e8800 165 lp->ptr = leaf;
07f07b31
AK
166 *index += step;
167 *nb -= step;
2999097b
AK
168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
f7bf5461
AK
172 }
173}
174
ac1970fb 175static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 176 hwaddr index, hwaddr nb,
2999097b 177 uint16_t leaf)
f7bf5461 178{
2999097b 179 /* Wildly overreserve - it doesn't matter much. */
07f07b31 180 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 181
ac1970fb 182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
183}
184
a8170e5e 185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
92e873b9 186{
ac1970fb 187 PhysPageEntry lp = d->phys_map;
31ab2b4a
AK
188 PhysPageEntry *p;
189 int i;
31ab2b4a 190 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 191
07f07b31 192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
194 goto not_found;
195 }
c19e8800 196 p = phys_map_nodes[lp.ptr];
31ab2b4a 197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 198 }
31ab2b4a 199
c19e8800 200 s_index = lp.ptr;
31ab2b4a 201not_found:
f3705d53
AK
202 return &phys_sections[s_index];
203}
204
e5548617
BS
205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 209 && mr != &io_mem_watch;
fd6ce8f6 210}
5b6dd868 211#endif
fd6ce8f6 212
5b6dd868 213void cpu_exec_init_all(void)
fdbb84d1 214{
5b6dd868 215#if !defined(CONFIG_USER_ONLY)
b2a8658e 216 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
217 memory_map_init();
218 io_mem_init();
fdbb84d1 219#endif
5b6dd868 220}
fdbb84d1 221
5b6dd868
BS
222#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
223
224static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 225{
5b6dd868 226 CPUArchState *env = opaque;
a513fe19 227
5b6dd868
BS
228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
230 env->interrupt_request &= ~0x01;
231 tlb_flush(env, 1);
232
233 return 0;
a513fe19 234}
7501267e 235
5b6dd868
BS
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
243 VMSTATE_UINT32(halted, CPUArchState),
244 VMSTATE_UINT32(interrupt_request, CPUArchState),
245 VMSTATE_END_OF_LIST()
246 }
247};
248#endif
ea041c0e 249
38d8f5c8 250CPUState *qemu_get_cpu(int index)
ea041c0e 251{
5b6dd868 252 CPUArchState *env = first_cpu;
38d8f5c8 253 CPUState *cpu = NULL;
ea041c0e 254
5b6dd868 255 while (env) {
55e5c285
AF
256 cpu = ENV_GET_CPU(env);
257 if (cpu->cpu_index == index) {
5b6dd868 258 break;
55e5c285 259 }
5b6dd868 260 env = env->next_cpu;
ea041c0e 261 }
5b6dd868 262
38d8f5c8 263 return cpu;
ea041c0e
FB
264}
265
5b6dd868 266void cpu_exec_init(CPUArchState *env)
ea041c0e 267{
5b6dd868 268 CPUState *cpu = ENV_GET_CPU(env);
5b6dd868
BS
269 CPUArchState **penv;
270 int cpu_index;
271
272#if defined(CONFIG_USER_ONLY)
273 cpu_list_lock();
274#endif
275 env->next_cpu = NULL;
276 penv = &first_cpu;
277 cpu_index = 0;
278 while (*penv != NULL) {
279 penv = &(*penv)->next_cpu;
280 cpu_index++;
281 }
55e5c285 282 cpu->cpu_index = cpu_index;
1b1ed8dc 283 cpu->numa_node = 0;
5b6dd868
BS
284 QTAILQ_INIT(&env->breakpoints);
285 QTAILQ_INIT(&env->watchpoints);
286#ifndef CONFIG_USER_ONLY
287 cpu->thread_id = qemu_get_thread_id();
288#endif
289 *penv = env;
290#if defined(CONFIG_USER_ONLY)
291 cpu_list_unlock();
292#endif
293#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
294 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
295 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
296 cpu_save, cpu_load, env);
297#endif
ea041c0e
FB
298}
299
1fddef4b 300#if defined(TARGET_HAS_ICE)
94df27fd 301#if defined(CONFIG_USER_ONLY)
9349b4f9 302static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
303{
304 tb_invalidate_phys_page_range(pc, pc + 1, 0);
305}
306#else
1e7855a5
MF
307static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
308{
9d70c4b7
MF
309 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
310 (pc & ~TARGET_PAGE_MASK));
1e7855a5 311}
c27004ec 312#endif
94df27fd 313#endif /* TARGET_HAS_ICE */
d720b93d 314
c527ee8f 315#if defined(CONFIG_USER_ONLY)
9349b4f9 316void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
317
318{
319}
320
9349b4f9 321int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
322 int flags, CPUWatchpoint **watchpoint)
323{
324 return -ENOSYS;
325}
326#else
6658ffb8 327/* Add a watchpoint. */
9349b4f9 328int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 329 int flags, CPUWatchpoint **watchpoint)
6658ffb8 330{
b4051334 331 target_ulong len_mask = ~(len - 1);
c0ce998e 332 CPUWatchpoint *wp;
6658ffb8 333
b4051334 334 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
335 if ((len & (len - 1)) || (addr & ~len_mask) ||
336 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
337 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
338 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
339 return -EINVAL;
340 }
7267c094 341 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
342
343 wp->vaddr = addr;
b4051334 344 wp->len_mask = len_mask;
a1d1bb31
AL
345 wp->flags = flags;
346
2dc9f411 347 /* keep all GDB-injected watchpoints in front */
c0ce998e 348 if (flags & BP_GDB)
72cf2d4f 349 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 350 else
72cf2d4f 351 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 352
6658ffb8 353 tlb_flush_page(env, addr);
a1d1bb31
AL
354
355 if (watchpoint)
356 *watchpoint = wp;
357 return 0;
6658ffb8
PB
358}
359
a1d1bb31 360/* Remove a specific watchpoint. */
9349b4f9 361int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 362 int flags)
6658ffb8 363{
b4051334 364 target_ulong len_mask = ~(len - 1);
a1d1bb31 365 CPUWatchpoint *wp;
6658ffb8 366
72cf2d4f 367 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 368 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 369 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 370 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
371 return 0;
372 }
373 }
a1d1bb31 374 return -ENOENT;
6658ffb8
PB
375}
376
a1d1bb31 377/* Remove a specific watchpoint by reference. */
9349b4f9 378void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 379{
72cf2d4f 380 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 381
a1d1bb31
AL
382 tlb_flush_page(env, watchpoint->vaddr);
383
7267c094 384 g_free(watchpoint);
a1d1bb31
AL
385}
386
387/* Remove all matching watchpoints. */
9349b4f9 388void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 389{
c0ce998e 390 CPUWatchpoint *wp, *next;
a1d1bb31 391
72cf2d4f 392 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
393 if (wp->flags & mask)
394 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 395 }
7d03f82f 396}
c527ee8f 397#endif
7d03f82f 398
a1d1bb31 399/* Add a breakpoint. */
9349b4f9 400int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 401 CPUBreakpoint **breakpoint)
4c3a88a2 402{
1fddef4b 403#if defined(TARGET_HAS_ICE)
c0ce998e 404 CPUBreakpoint *bp;
3b46e624 405
7267c094 406 bp = g_malloc(sizeof(*bp));
4c3a88a2 407
a1d1bb31
AL
408 bp->pc = pc;
409 bp->flags = flags;
410
2dc9f411 411 /* keep all GDB-injected breakpoints in front */
c0ce998e 412 if (flags & BP_GDB)
72cf2d4f 413 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 414 else
72cf2d4f 415 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 416
d720b93d 417 breakpoint_invalidate(env, pc);
a1d1bb31
AL
418
419 if (breakpoint)
420 *breakpoint = bp;
4c3a88a2
FB
421 return 0;
422#else
a1d1bb31 423 return -ENOSYS;
4c3a88a2
FB
424#endif
425}
426
a1d1bb31 427/* Remove a specific breakpoint. */
9349b4f9 428int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 429{
7d03f82f 430#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
431 CPUBreakpoint *bp;
432
72cf2d4f 433 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
434 if (bp->pc == pc && bp->flags == flags) {
435 cpu_breakpoint_remove_by_ref(env, bp);
436 return 0;
437 }
7d03f82f 438 }
a1d1bb31
AL
439 return -ENOENT;
440#else
441 return -ENOSYS;
7d03f82f
EI
442#endif
443}
444
a1d1bb31 445/* Remove a specific breakpoint by reference. */
9349b4f9 446void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 447{
1fddef4b 448#if defined(TARGET_HAS_ICE)
72cf2d4f 449 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 450
a1d1bb31
AL
451 breakpoint_invalidate(env, breakpoint->pc);
452
7267c094 453 g_free(breakpoint);
a1d1bb31
AL
454#endif
455}
456
457/* Remove all matching breakpoints. */
9349b4f9 458void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
459{
460#if defined(TARGET_HAS_ICE)
c0ce998e 461 CPUBreakpoint *bp, *next;
a1d1bb31 462
72cf2d4f 463 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
464 if (bp->flags & mask)
465 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 466 }
4c3a88a2
FB
467#endif
468}
469
c33a346e
FB
470/* enable or disable single step mode. EXCP_DEBUG is returned by the
471 CPU loop after each instruction */
9349b4f9 472void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 473{
1fddef4b 474#if defined(TARGET_HAS_ICE)
c33a346e
FB
475 if (env->singlestep_enabled != enabled) {
476 env->singlestep_enabled = enabled;
e22a25c9
AL
477 if (kvm_enabled())
478 kvm_update_guest_debug(env, 0);
479 else {
ccbb4d44 480 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
481 /* XXX: only flush what is necessary */
482 tb_flush(env);
483 }
c33a346e
FB
484 }
485#endif
486}
487
9349b4f9 488void cpu_reset_interrupt(CPUArchState *env, int mask)
b54ad049
FB
489{
490 env->interrupt_request &= ~mask;
491}
492
9349b4f9 493void cpu_exit(CPUArchState *env)
3098dba0
AJ
494{
495 env->exit_request = 1;
496 cpu_unlink_tb(env);
497}
498
9349b4f9 499void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
500{
501 va_list ap;
493ae1f0 502 va_list ap2;
7501267e
FB
503
504 va_start(ap, fmt);
493ae1f0 505 va_copy(ap2, ap);
7501267e
FB
506 fprintf(stderr, "qemu: fatal: ");
507 vfprintf(stderr, fmt, ap);
508 fprintf(stderr, "\n");
6fd2a026 509 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
510 if (qemu_log_enabled()) {
511 qemu_log("qemu: fatal: ");
512 qemu_log_vprintf(fmt, ap2);
513 qemu_log("\n");
6fd2a026 514 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 515 qemu_log_flush();
93fcfe39 516 qemu_log_close();
924edcae 517 }
493ae1f0 518 va_end(ap2);
f9373291 519 va_end(ap);
fd052bf6
RV
520#if defined(CONFIG_USER_ONLY)
521 {
522 struct sigaction act;
523 sigfillset(&act.sa_mask);
524 act.sa_handler = SIG_DFL;
525 sigaction(SIGABRT, &act, NULL);
526 }
527#endif
7501267e
FB
528 abort();
529}
530
9349b4f9 531CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 532{
9349b4f9
AF
533 CPUArchState *new_env = cpu_init(env->cpu_model_str);
534 CPUArchState *next_cpu = new_env->next_cpu;
5a38f081
AL
535#if defined(TARGET_HAS_ICE)
536 CPUBreakpoint *bp;
537 CPUWatchpoint *wp;
538#endif
539
9349b4f9 540 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 541
55e5c285 542 /* Preserve chaining. */
c5be9f08 543 new_env->next_cpu = next_cpu;
5a38f081
AL
544
545 /* Clone all break/watchpoints.
546 Note: Once we support ptrace with hw-debug register access, make sure
547 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
548 QTAILQ_INIT(&env->breakpoints);
549 QTAILQ_INIT(&env->watchpoints);
5a38f081 550#if defined(TARGET_HAS_ICE)
72cf2d4f 551 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
552 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
553 }
72cf2d4f 554 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
555 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
556 wp->flags, NULL);
557 }
558#endif
559
c5be9f08
TS
560 return new_env;
561}
562
0124311e 563#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
564static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
565 uintptr_t length)
566{
567 uintptr_t start1;
568
569 /* we modify the TLB cache so that the dirty bit will be set again
570 when accessing the range */
571 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
572 /* Check that we don't span multiple blocks - this breaks the
573 address comparisons below. */
574 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
575 != (end - 1) - start) {
576 abort();
577 }
578 cpu_tlb_reset_dirty_all(start1, length);
579
580}
581
5579c7f3 582/* Note: start and end must be within the same ram block. */
c227f099 583void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 584 int dirty_flags)
1ccde1cb 585{
d24981d3 586 uintptr_t length;
1ccde1cb
FB
587
588 start &= TARGET_PAGE_MASK;
589 end = TARGET_PAGE_ALIGN(end);
590
591 length = end - start;
592 if (length == 0)
593 return;
f7c11b53 594 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 595
d24981d3
JQ
596 if (tcg_enabled()) {
597 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 598 }
1ccde1cb
FB
599}
600
8b9c99d9 601static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 602{
f6f3fbca 603 int ret = 0;
74576198 604 in_migration = enable;
f6f3fbca 605 return ret;
74576198
AL
606}
607
a8170e5e 608hwaddr memory_region_section_get_iotlb(CPUArchState *env,
e5548617
BS
609 MemoryRegionSection *section,
610 target_ulong vaddr,
a8170e5e 611 hwaddr paddr,
e5548617
BS
612 int prot,
613 target_ulong *address)
614{
a8170e5e 615 hwaddr iotlb;
e5548617
BS
616 CPUWatchpoint *wp;
617
cc5bea60 618 if (memory_region_is_ram(section->mr)) {
e5548617
BS
619 /* Normal RAM. */
620 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 621 + memory_region_section_addr(section, paddr);
e5548617
BS
622 if (!section->readonly) {
623 iotlb |= phys_section_notdirty;
624 } else {
625 iotlb |= phys_section_rom;
626 }
627 } else {
628 /* IO handlers are currently passed a physical address.
629 It would be nice to pass an offset from the base address
630 of that region. This would avoid having to special case RAM,
631 and avoid full address decoding in every device.
632 We can't use the high bits of pd for this because
633 IO_MEM_ROMD uses these as a ram address. */
634 iotlb = section - phys_sections;
cc5bea60 635 iotlb += memory_region_section_addr(section, paddr);
e5548617
BS
636 }
637
638 /* Make accesses to pages with watchpoints go via the
639 watchpoint trap routines. */
640 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
641 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
642 /* Avoid trapping reads of pages with a write breakpoint. */
643 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
644 iotlb = phys_section_watch + paddr;
645 *address |= TLB_MMIO;
646 break;
647 }
648 }
649 }
650
651 return iotlb;
652}
9fa3e853
FB
653#endif /* defined(CONFIG_USER_ONLY) */
654
e2eef170 655#if !defined(CONFIG_USER_ONLY)
8da3ff18 656
c04b2b78
PB
657#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
658typedef struct subpage_t {
70c68e44 659 MemoryRegion iomem;
a8170e5e 660 hwaddr base;
5312bd8b 661 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
662} subpage_t;
663
c227f099 664static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 665 uint16_t section);
a8170e5e 666static subpage_t *subpage_init(hwaddr base);
5312bd8b 667static void destroy_page_desc(uint16_t section_index)
54688b1e 668{
5312bd8b
AK
669 MemoryRegionSection *section = &phys_sections[section_index];
670 MemoryRegion *mr = section->mr;
54688b1e
AK
671
672 if (mr->subpage) {
673 subpage_t *subpage = container_of(mr, subpage_t, iomem);
674 memory_region_destroy(&subpage->iomem);
675 g_free(subpage);
676 }
677}
678
4346ae3e 679static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
680{
681 unsigned i;
d6f2ea22 682 PhysPageEntry *p;
54688b1e 683
c19e8800 684 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
685 return;
686 }
687
c19e8800 688 p = phys_map_nodes[lp->ptr];
4346ae3e 689 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 690 if (!p[i].is_leaf) {
54688b1e 691 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 692 } else {
c19e8800 693 destroy_page_desc(p[i].ptr);
54688b1e 694 }
54688b1e 695 }
07f07b31 696 lp->is_leaf = 0;
c19e8800 697 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
698}
699
ac1970fb 700static void destroy_all_mappings(AddressSpaceDispatch *d)
54688b1e 701{
ac1970fb 702 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
d6f2ea22 703 phys_map_nodes_reset();
54688b1e
AK
704}
705
5312bd8b
AK
706static uint16_t phys_section_add(MemoryRegionSection *section)
707{
708 if (phys_sections_nb == phys_sections_nb_alloc) {
709 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
710 phys_sections = g_renew(MemoryRegionSection, phys_sections,
711 phys_sections_nb_alloc);
712 }
713 phys_sections[phys_sections_nb] = *section;
714 return phys_sections_nb++;
715}
716
717static void phys_sections_clear(void)
718{
719 phys_sections_nb = 0;
720}
721
ac1970fb 722static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
723{
724 subpage_t *subpage;
a8170e5e 725 hwaddr base = section->offset_within_address_space
0f0cb164 726 & TARGET_PAGE_MASK;
ac1970fb 727 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
0f0cb164
AK
728 MemoryRegionSection subsection = {
729 .offset_within_address_space = base,
730 .size = TARGET_PAGE_SIZE,
731 };
a8170e5e 732 hwaddr start, end;
0f0cb164 733
f3705d53 734 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 735
f3705d53 736 if (!(existing->mr->subpage)) {
0f0cb164
AK
737 subpage = subpage_init(base);
738 subsection.mr = &subpage->iomem;
ac1970fb 739 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 740 phys_section_add(&subsection));
0f0cb164 741 } else {
f3705d53 742 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
743 }
744 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
adb2a9b5 745 end = start + section->size - 1;
0f0cb164
AK
746 subpage_register(subpage, start, end, phys_section_add(section));
747}
748
749
ac1970fb 750static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
33417e70 751{
a8170e5e 752 hwaddr start_addr = section->offset_within_address_space;
dd81124b 753 ram_addr_t size = section->size;
a8170e5e 754 hwaddr addr;
5312bd8b 755 uint16_t section_index = phys_section_add(section);
dd81124b 756
3b8e6a2d 757 assert(size);
f6f3fbca 758
3b8e6a2d 759 addr = start_addr;
ac1970fb 760 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2999097b 761 section_index);
33417e70
FB
762}
763
ac1970fb 764static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 765{
ac1970fb 766 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
0f0cb164
AK
767 MemoryRegionSection now = *section, remain = *section;
768
769 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
770 || (now.size < TARGET_PAGE_SIZE)) {
771 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
772 - now.offset_within_address_space,
773 now.size);
ac1970fb 774 register_subpage(d, &now);
0f0cb164
AK
775 remain.size -= now.size;
776 remain.offset_within_address_space += now.size;
777 remain.offset_within_region += now.size;
778 }
69b67646
TH
779 while (remain.size >= TARGET_PAGE_SIZE) {
780 now = remain;
781 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
782 now.size = TARGET_PAGE_SIZE;
ac1970fb 783 register_subpage(d, &now);
69b67646
TH
784 } else {
785 now.size &= TARGET_PAGE_MASK;
ac1970fb 786 register_multipage(d, &now);
69b67646 787 }
0f0cb164
AK
788 remain.size -= now.size;
789 remain.offset_within_address_space += now.size;
790 remain.offset_within_region += now.size;
791 }
792 now = remain;
793 if (now.size) {
ac1970fb 794 register_subpage(d, &now);
0f0cb164
AK
795 }
796}
797
62a2744c
SY
798void qemu_flush_coalesced_mmio_buffer(void)
799{
800 if (kvm_enabled())
801 kvm_flush_coalesced_mmio_buffer();
802}
803
b2a8658e
UD
804void qemu_mutex_lock_ramlist(void)
805{
806 qemu_mutex_lock(&ram_list.mutex);
807}
808
809void qemu_mutex_unlock_ramlist(void)
810{
811 qemu_mutex_unlock(&ram_list.mutex);
812}
813
c902760f
MT
814#if defined(__linux__) && !defined(TARGET_S390X)
815
816#include <sys/vfs.h>
817
818#define HUGETLBFS_MAGIC 0x958458f6
819
820static long gethugepagesize(const char *path)
821{
822 struct statfs fs;
823 int ret;
824
825 do {
9742bf26 826 ret = statfs(path, &fs);
c902760f
MT
827 } while (ret != 0 && errno == EINTR);
828
829 if (ret != 0) {
9742bf26
YT
830 perror(path);
831 return 0;
c902760f
MT
832 }
833
834 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 835 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
836
837 return fs.f_bsize;
838}
839
04b16653
AW
840static void *file_ram_alloc(RAMBlock *block,
841 ram_addr_t memory,
842 const char *path)
c902760f
MT
843{
844 char *filename;
845 void *area;
846 int fd;
847#ifdef MAP_POPULATE
848 int flags;
849#endif
850 unsigned long hpagesize;
851
852 hpagesize = gethugepagesize(path);
853 if (!hpagesize) {
9742bf26 854 return NULL;
c902760f
MT
855 }
856
857 if (memory < hpagesize) {
858 return NULL;
859 }
860
861 if (kvm_enabled() && !kvm_has_sync_mmu()) {
862 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
863 return NULL;
864 }
865
e4ada482 866 filename = g_strdup_printf("%s/qemu_back_mem.XXXXXX", path);
c902760f
MT
867
868 fd = mkstemp(filename);
869 if (fd < 0) {
9742bf26 870 perror("unable to create backing store for hugepages");
e4ada482 871 g_free(filename);
9742bf26 872 return NULL;
c902760f
MT
873 }
874 unlink(filename);
e4ada482 875 g_free(filename);
c902760f
MT
876
877 memory = (memory+hpagesize-1) & ~(hpagesize-1);
878
879 /*
880 * ftruncate is not supported by hugetlbfs in older
881 * hosts, so don't bother bailing out on errors.
882 * If anything goes wrong with it under other filesystems,
883 * mmap will fail.
884 */
885 if (ftruncate(fd, memory))
9742bf26 886 perror("ftruncate");
c902760f
MT
887
888#ifdef MAP_POPULATE
889 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
890 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
891 * to sidestep this quirk.
892 */
893 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
894 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
895#else
896 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
897#endif
898 if (area == MAP_FAILED) {
9742bf26
YT
899 perror("file_ram_alloc: can't mmap RAM pages");
900 close(fd);
901 return (NULL);
c902760f 902 }
04b16653 903 block->fd = fd;
c902760f
MT
904 return area;
905}
906#endif
907
d17b5288 908static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
909{
910 RAMBlock *block, *next_block;
3e837b2c 911 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 912
a3161038 913 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
914 return 0;
915
a3161038 916 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 917 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
918
919 end = block->offset + block->length;
920
a3161038 921 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
922 if (next_block->offset >= end) {
923 next = MIN(next, next_block->offset);
924 }
925 }
926 if (next - end >= size && next - end < mingap) {
3e837b2c 927 offset = end;
04b16653
AW
928 mingap = next - end;
929 }
930 }
3e837b2c
AW
931
932 if (offset == RAM_ADDR_MAX) {
933 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
934 (uint64_t)size);
935 abort();
936 }
937
04b16653
AW
938 return offset;
939}
940
652d7ec2 941ram_addr_t last_ram_offset(void)
d17b5288
AW
942{
943 RAMBlock *block;
944 ram_addr_t last = 0;
945
a3161038 946 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
947 last = MAX(last, block->offset + block->length);
948
949 return last;
950}
951
ddb97f1d
JB
952static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
953{
954 int ret;
955 QemuOpts *machine_opts;
956
957 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
958 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
959 if (machine_opts &&
960 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
961 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
962 if (ret) {
963 perror("qemu_madvise");
964 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
965 "but dump_guest_core=off specified\n");
966 }
967 }
968}
969
c5705a77 970void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
971{
972 RAMBlock *new_block, *block;
973
c5705a77 974 new_block = NULL;
a3161038 975 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
976 if (block->offset == addr) {
977 new_block = block;
978 break;
979 }
980 }
981 assert(new_block);
982 assert(!new_block->idstr[0]);
84b89d78 983
09e5ab63
AL
984 if (dev) {
985 char *id = qdev_get_dev_path(dev);
84b89d78
CM
986 if (id) {
987 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 988 g_free(id);
84b89d78
CM
989 }
990 }
991 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
992
b2a8658e
UD
993 /* This assumes the iothread lock is taken here too. */
994 qemu_mutex_lock_ramlist();
a3161038 995 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 996 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
997 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
998 new_block->idstr);
999 abort();
1000 }
1001 }
b2a8658e 1002 qemu_mutex_unlock_ramlist();
c5705a77
AK
1003}
1004
8490fc78
LC
1005static int memory_try_enable_merging(void *addr, size_t len)
1006{
1007 QemuOpts *opts;
1008
1009 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1010 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1011 /* disabled by the user */
1012 return 0;
1013 }
1014
1015 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1016}
1017
c5705a77
AK
1018ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1019 MemoryRegion *mr)
1020{
abb26d63 1021 RAMBlock *block, *new_block;
c5705a77
AK
1022
1023 size = TARGET_PAGE_ALIGN(size);
1024 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1025
b2a8658e
UD
1026 /* This assumes the iothread lock is taken here too. */
1027 qemu_mutex_lock_ramlist();
7c637366 1028 new_block->mr = mr;
432d268c 1029 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1030 if (host) {
1031 new_block->host = host;
cd19cfa2 1032 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1033 } else {
1034 if (mem_path) {
c902760f 1035#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1036 new_block->host = file_ram_alloc(new_block, size, mem_path);
1037 if (!new_block->host) {
1038 new_block->host = qemu_vmalloc(size);
8490fc78 1039 memory_try_enable_merging(new_block->host, size);
6977dfe6 1040 }
c902760f 1041#else
6977dfe6
YT
1042 fprintf(stderr, "-mem-path option unsupported\n");
1043 exit(1);
c902760f 1044#endif
6977dfe6 1045 } else {
868bb33f 1046 if (xen_enabled()) {
fce537d4 1047 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1048 } else if (kvm_enabled()) {
1049 /* some s390/kvm configurations have special constraints */
1050 new_block->host = kvm_vmalloc(size);
432d268c
JN
1051 } else {
1052 new_block->host = qemu_vmalloc(size);
1053 }
8490fc78 1054 memory_try_enable_merging(new_block->host, size);
6977dfe6 1055 }
c902760f 1056 }
94a6b54f
PB
1057 new_block->length = size;
1058
abb26d63
PB
1059 /* Keep the list sorted from biggest to smallest block. */
1060 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1061 if (block->length < new_block->length) {
1062 break;
1063 }
1064 }
1065 if (block) {
1066 QTAILQ_INSERT_BEFORE(block, new_block, next);
1067 } else {
1068 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1069 }
0d6d3c87 1070 ram_list.mru_block = NULL;
94a6b54f 1071
f798b07f 1072 ram_list.version++;
b2a8658e 1073 qemu_mutex_unlock_ramlist();
f798b07f 1074
7267c094 1075 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1076 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1077 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1078 0, size >> TARGET_PAGE_BITS);
1720aeee 1079 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1080
ddb97f1d 1081 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1082 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1083
6f0437e8
JK
1084 if (kvm_enabled())
1085 kvm_setup_guest_memory(new_block->host, size);
1086
94a6b54f
PB
1087 return new_block->offset;
1088}
e9a1ab19 1089
c5705a77 1090ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1091{
c5705a77 1092 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1093}
1094
1f2e98b6
AW
1095void qemu_ram_free_from_ptr(ram_addr_t addr)
1096{
1097 RAMBlock *block;
1098
b2a8658e
UD
1099 /* This assumes the iothread lock is taken here too. */
1100 qemu_mutex_lock_ramlist();
a3161038 1101 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1102 if (addr == block->offset) {
a3161038 1103 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1104 ram_list.mru_block = NULL;
f798b07f 1105 ram_list.version++;
7267c094 1106 g_free(block);
b2a8658e 1107 break;
1f2e98b6
AW
1108 }
1109 }
b2a8658e 1110 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1111}
1112
c227f099 1113void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1114{
04b16653
AW
1115 RAMBlock *block;
1116
b2a8658e
UD
1117 /* This assumes the iothread lock is taken here too. */
1118 qemu_mutex_lock_ramlist();
a3161038 1119 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1120 if (addr == block->offset) {
a3161038 1121 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1122 ram_list.mru_block = NULL;
f798b07f 1123 ram_list.version++;
cd19cfa2
HY
1124 if (block->flags & RAM_PREALLOC_MASK) {
1125 ;
1126 } else if (mem_path) {
04b16653
AW
1127#if defined (__linux__) && !defined(TARGET_S390X)
1128 if (block->fd) {
1129 munmap(block->host, block->length);
1130 close(block->fd);
1131 } else {
1132 qemu_vfree(block->host);
1133 }
fd28aa13
JK
1134#else
1135 abort();
04b16653
AW
1136#endif
1137 } else {
1138#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1139 munmap(block->host, block->length);
1140#else
868bb33f 1141 if (xen_enabled()) {
e41d7c69 1142 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
1143 } else {
1144 qemu_vfree(block->host);
1145 }
04b16653
AW
1146#endif
1147 }
7267c094 1148 g_free(block);
b2a8658e 1149 break;
04b16653
AW
1150 }
1151 }
b2a8658e 1152 qemu_mutex_unlock_ramlist();
04b16653 1153
e9a1ab19
FB
1154}
1155
cd19cfa2
HY
1156#ifndef _WIN32
1157void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1158{
1159 RAMBlock *block;
1160 ram_addr_t offset;
1161 int flags;
1162 void *area, *vaddr;
1163
a3161038 1164 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1165 offset = addr - block->offset;
1166 if (offset < block->length) {
1167 vaddr = block->host + offset;
1168 if (block->flags & RAM_PREALLOC_MASK) {
1169 ;
1170 } else {
1171 flags = MAP_FIXED;
1172 munmap(vaddr, length);
1173 if (mem_path) {
1174#if defined(__linux__) && !defined(TARGET_S390X)
1175 if (block->fd) {
1176#ifdef MAP_POPULATE
1177 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1178 MAP_PRIVATE;
1179#else
1180 flags |= MAP_PRIVATE;
1181#endif
1182 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1183 flags, block->fd, offset);
1184 } else {
1185 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1186 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1187 flags, -1, 0);
1188 }
fd28aa13
JK
1189#else
1190 abort();
cd19cfa2
HY
1191#endif
1192 } else {
1193#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1194 flags |= MAP_SHARED | MAP_ANONYMOUS;
1195 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1196 flags, -1, 0);
1197#else
1198 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1199 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1200 flags, -1, 0);
1201#endif
1202 }
1203 if (area != vaddr) {
f15fbc4b
AP
1204 fprintf(stderr, "Could not remap addr: "
1205 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1206 length, addr);
1207 exit(1);
1208 }
8490fc78 1209 memory_try_enable_merging(vaddr, length);
ddb97f1d 1210 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1211 }
1212 return;
1213 }
1214 }
1215}
1216#endif /* !_WIN32 */
1217
dc828ca1 1218/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
1219 With the exception of the softmmu code in this file, this should
1220 only be used for local memory (e.g. video ram) that the device owns,
1221 and knows it isn't going to access beyond the end of the block.
1222
1223 It should not be used for general purpose DMA.
1224 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1225 */
c227f099 1226void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 1227{
94a6b54f
PB
1228 RAMBlock *block;
1229
b2a8658e 1230 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1231 block = ram_list.mru_block;
1232 if (block && addr - block->offset < block->length) {
1233 goto found;
1234 }
a3161038 1235 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1236 if (addr - block->offset < block->length) {
0d6d3c87 1237 goto found;
f471a17e 1238 }
94a6b54f 1239 }
f471a17e
AW
1240
1241 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1242 abort();
1243
0d6d3c87
PB
1244found:
1245 ram_list.mru_block = block;
1246 if (xen_enabled()) {
1247 /* We need to check if the requested address is in the RAM
1248 * because we don't want to map the entire memory in QEMU.
1249 * In that case just map until the end of the page.
1250 */
1251 if (block->offset == 0) {
1252 return xen_map_cache(addr, 0, 0);
1253 } else if (block->host == NULL) {
1254 block->host =
1255 xen_map_cache(block->offset, block->length, 1);
1256 }
1257 }
1258 return block->host + (addr - block->offset);
dc828ca1
PB
1259}
1260
0d6d3c87
PB
1261/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1262 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1263 *
1264 * ??? Is this still necessary?
b2e0a138 1265 */
8b9c99d9 1266static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1267{
1268 RAMBlock *block;
1269
b2a8658e 1270 /* The list is protected by the iothread lock here. */
a3161038 1271 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1272 if (addr - block->offset < block->length) {
868bb33f 1273 if (xen_enabled()) {
432d268c
JN
1274 /* We need to check if the requested address is in the RAM
1275 * because we don't want to map the entire memory in QEMU.
712c2b41 1276 * In that case just map until the end of the page.
432d268c
JN
1277 */
1278 if (block->offset == 0) {
e41d7c69 1279 return xen_map_cache(addr, 0, 0);
432d268c 1280 } else if (block->host == NULL) {
e41d7c69
JK
1281 block->host =
1282 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1283 }
1284 }
b2e0a138
MT
1285 return block->host + (addr - block->offset);
1286 }
1287 }
1288
1289 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1290 abort();
1291
1292 return NULL;
1293}
1294
38bee5dc
SS
1295/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1296 * but takes a size argument */
8b9c99d9 1297static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1298{
8ab934f9
SS
1299 if (*size == 0) {
1300 return NULL;
1301 }
868bb33f 1302 if (xen_enabled()) {
e41d7c69 1303 return xen_map_cache(addr, *size, 1);
868bb33f 1304 } else {
38bee5dc
SS
1305 RAMBlock *block;
1306
a3161038 1307 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1308 if (addr - block->offset < block->length) {
1309 if (addr - block->offset + *size > block->length)
1310 *size = block->length - addr + block->offset;
1311 return block->host + (addr - block->offset);
1312 }
1313 }
1314
1315 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1316 abort();
38bee5dc
SS
1317 }
1318}
1319
050a0ddf
AP
1320void qemu_put_ram_ptr(void *addr)
1321{
1322 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
1323}
1324
e890261f 1325int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1326{
94a6b54f
PB
1327 RAMBlock *block;
1328 uint8_t *host = ptr;
1329
868bb33f 1330 if (xen_enabled()) {
e41d7c69 1331 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
1332 return 0;
1333 }
1334
a3161038 1335 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1336 /* This case append when the block is not mapped. */
1337 if (block->host == NULL) {
1338 continue;
1339 }
f471a17e 1340 if (host - block->host < block->length) {
e890261f
MT
1341 *ram_addr = block->offset + (host - block->host);
1342 return 0;
f471a17e 1343 }
94a6b54f 1344 }
432d268c 1345
e890261f
MT
1346 return -1;
1347}
f471a17e 1348
e890261f
MT
1349/* Some of the softmmu routines need to translate from a host pointer
1350 (typically a TLB entry) back to a ram offset. */
1351ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1352{
1353 ram_addr_t ram_addr;
f471a17e 1354
e890261f
MT
1355 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1356 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1357 abort();
1358 }
1359 return ram_addr;
5579c7f3
PB
1360}
1361
a8170e5e 1362static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1363 unsigned size)
e18231a3
BS
1364{
1365#ifdef DEBUG_UNASSIGNED
1366 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1367#endif
5b450407 1368#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1369 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
1370#endif
1371 return 0;
1372}
1373
a8170e5e 1374static void unassigned_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1375 uint64_t val, unsigned size)
e18231a3
BS
1376{
1377#ifdef DEBUG_UNASSIGNED
0e0df1e2 1378 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 1379#endif
5b450407 1380#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1381 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 1382#endif
33417e70
FB
1383}
1384
0e0df1e2
AK
1385static const MemoryRegionOps unassigned_mem_ops = {
1386 .read = unassigned_mem_read,
1387 .write = unassigned_mem_write,
1388 .endianness = DEVICE_NATIVE_ENDIAN,
1389};
e18231a3 1390
a8170e5e 1391static uint64_t error_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1392 unsigned size)
e18231a3 1393{
0e0df1e2 1394 abort();
e18231a3
BS
1395}
1396
a8170e5e 1397static void error_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1398 uint64_t value, unsigned size)
e18231a3 1399{
0e0df1e2 1400 abort();
33417e70
FB
1401}
1402
0e0df1e2
AK
1403static const MemoryRegionOps error_mem_ops = {
1404 .read = error_mem_read,
1405 .write = error_mem_write,
1406 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1407};
1408
0e0df1e2
AK
1409static const MemoryRegionOps rom_mem_ops = {
1410 .read = error_mem_read,
1411 .write = unassigned_mem_write,
1412 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1413};
1414
a8170e5e 1415static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1416 uint64_t val, unsigned size)
9fa3e853 1417{
3a7d929e 1418 int dirty_flags;
f7c11b53 1419 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1420 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1421#if !defined(CONFIG_USER_ONLY)
0e0df1e2 1422 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1423 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 1424#endif
3a7d929e 1425 }
0e0df1e2
AK
1426 switch (size) {
1427 case 1:
1428 stb_p(qemu_get_ram_ptr(ram_addr), val);
1429 break;
1430 case 2:
1431 stw_p(qemu_get_ram_ptr(ram_addr), val);
1432 break;
1433 case 4:
1434 stl_p(qemu_get_ram_ptr(ram_addr), val);
1435 break;
1436 default:
1437 abort();
3a7d929e 1438 }
f23db169 1439 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1440 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1441 /* we remove the notdirty callback only if the code has been
1442 flushed */
1443 if (dirty_flags == 0xff)
2e70f6ef 1444 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1445}
1446
0e0df1e2
AK
1447static const MemoryRegionOps notdirty_mem_ops = {
1448 .read = error_mem_read,
1449 .write = notdirty_mem_write,
1450 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1451};
1452
0f459d16 1453/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1454static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1455{
9349b4f9 1456 CPUArchState *env = cpu_single_env;
06d55cc1 1457 target_ulong pc, cs_base;
0f459d16 1458 target_ulong vaddr;
a1d1bb31 1459 CPUWatchpoint *wp;
06d55cc1 1460 int cpu_flags;
0f459d16 1461
06d55cc1
AL
1462 if (env->watchpoint_hit) {
1463 /* We re-entered the check after replacing the TB. Now raise
1464 * the debug interrupt so that is will trigger after the
1465 * current instruction. */
1466 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1467 return;
1468 }
2e70f6ef 1469 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1470 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1471 if ((vaddr == (wp->vaddr & len_mask) ||
1472 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1473 wp->flags |= BP_WATCHPOINT_HIT;
1474 if (!env->watchpoint_hit) {
1475 env->watchpoint_hit = wp;
5a316526 1476 tb_check_watchpoint(env);
6e140f28
AL
1477 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1478 env->exception_index = EXCP_DEBUG;
488d6577 1479 cpu_loop_exit(env);
6e140f28
AL
1480 } else {
1481 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1482 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1483 cpu_resume_from_signal(env, NULL);
6e140f28 1484 }
06d55cc1 1485 }
6e140f28
AL
1486 } else {
1487 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1488 }
1489 }
1490}
1491
6658ffb8
PB
1492/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1493 so these check for a hit then pass through to the normal out-of-line
1494 phys routines. */
a8170e5e 1495static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1496 unsigned size)
6658ffb8 1497{
1ec9b909
AK
1498 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1499 switch (size) {
1500 case 1: return ldub_phys(addr);
1501 case 2: return lduw_phys(addr);
1502 case 4: return ldl_phys(addr);
1503 default: abort();
1504 }
6658ffb8
PB
1505}
1506
a8170e5e 1507static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1508 uint64_t val, unsigned size)
6658ffb8 1509{
1ec9b909
AK
1510 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1511 switch (size) {
67364150
MF
1512 case 1:
1513 stb_phys(addr, val);
1514 break;
1515 case 2:
1516 stw_phys(addr, val);
1517 break;
1518 case 4:
1519 stl_phys(addr, val);
1520 break;
1ec9b909
AK
1521 default: abort();
1522 }
6658ffb8
PB
1523}
1524
1ec9b909
AK
1525static const MemoryRegionOps watch_mem_ops = {
1526 .read = watch_mem_read,
1527 .write = watch_mem_write,
1528 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1529};
6658ffb8 1530
a8170e5e 1531static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1532 unsigned len)
db7b5426 1533{
70c68e44 1534 subpage_t *mmio = opaque;
f6405247 1535 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1536 MemoryRegionSection *section;
db7b5426
BS
1537#if defined(DEBUG_SUBPAGE)
1538 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1539 mmio, len, addr, idx);
1540#endif
db7b5426 1541
5312bd8b
AK
1542 section = &phys_sections[mmio->sub_section[idx]];
1543 addr += mmio->base;
1544 addr -= section->offset_within_address_space;
1545 addr += section->offset_within_region;
37ec01d4 1546 return io_mem_read(section->mr, addr, len);
db7b5426
BS
1547}
1548
a8170e5e 1549static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1550 uint64_t value, unsigned len)
db7b5426 1551{
70c68e44 1552 subpage_t *mmio = opaque;
f6405247 1553 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1554 MemoryRegionSection *section;
db7b5426 1555#if defined(DEBUG_SUBPAGE)
70c68e44
AK
1556 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1557 " idx %d value %"PRIx64"\n",
f6405247 1558 __func__, mmio, len, addr, idx, value);
db7b5426 1559#endif
f6405247 1560
5312bd8b
AK
1561 section = &phys_sections[mmio->sub_section[idx]];
1562 addr += mmio->base;
1563 addr -= section->offset_within_address_space;
1564 addr += section->offset_within_region;
37ec01d4 1565 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
1566}
1567
70c68e44
AK
1568static const MemoryRegionOps subpage_ops = {
1569 .read = subpage_read,
1570 .write = subpage_write,
1571 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1572};
1573
a8170e5e 1574static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
de712f94 1575 unsigned size)
56384e8b
AF
1576{
1577 ram_addr_t raddr = addr;
1578 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1579 switch (size) {
1580 case 1: return ldub_p(ptr);
1581 case 2: return lduw_p(ptr);
1582 case 4: return ldl_p(ptr);
1583 default: abort();
1584 }
56384e8b
AF
1585}
1586
a8170e5e 1587static void subpage_ram_write(void *opaque, hwaddr addr,
de712f94 1588 uint64_t value, unsigned size)
56384e8b
AF
1589{
1590 ram_addr_t raddr = addr;
1591 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1592 switch (size) {
1593 case 1: return stb_p(ptr, value);
1594 case 2: return stw_p(ptr, value);
1595 case 4: return stl_p(ptr, value);
1596 default: abort();
1597 }
56384e8b
AF
1598}
1599
de712f94
AK
1600static const MemoryRegionOps subpage_ram_ops = {
1601 .read = subpage_ram_read,
1602 .write = subpage_ram_write,
1603 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
1604};
1605
c227f099 1606static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1607 uint16_t section)
db7b5426
BS
1608{
1609 int idx, eidx;
1610
1611 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1612 return -1;
1613 idx = SUBPAGE_IDX(start);
1614 eidx = SUBPAGE_IDX(end);
1615#if defined(DEBUG_SUBPAGE)
0bf9e31a 1616 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1617 mmio, start, end, idx, eidx, memory);
1618#endif
5312bd8b
AK
1619 if (memory_region_is_ram(phys_sections[section].mr)) {
1620 MemoryRegionSection new_section = phys_sections[section];
1621 new_section.mr = &io_mem_subpage_ram;
1622 section = phys_section_add(&new_section);
56384e8b 1623 }
db7b5426 1624 for (; idx <= eidx; idx++) {
5312bd8b 1625 mmio->sub_section[idx] = section;
db7b5426
BS
1626 }
1627
1628 return 0;
1629}
1630
a8170e5e 1631static subpage_t *subpage_init(hwaddr base)
db7b5426 1632{
c227f099 1633 subpage_t *mmio;
db7b5426 1634
7267c094 1635 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
1636
1637 mmio->base = base;
70c68e44
AK
1638 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1639 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1640 mmio->iomem.subpage = true;
db7b5426 1641#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1642 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1643 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1644#endif
0f0cb164 1645 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
1646
1647 return mmio;
1648}
1649
5312bd8b
AK
1650static uint16_t dummy_section(MemoryRegion *mr)
1651{
1652 MemoryRegionSection section = {
1653 .mr = mr,
1654 .offset_within_address_space = 0,
1655 .offset_within_region = 0,
1656 .size = UINT64_MAX,
1657 };
1658
1659 return phys_section_add(&section);
1660}
1661
a8170e5e 1662MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1663{
37ec01d4 1664 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1665}
1666
e9179ce1
AK
1667static void io_mem_init(void)
1668{
0e0df1e2 1669 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
1670 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1671 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1672 "unassigned", UINT64_MAX);
1673 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1674 "notdirty", UINT64_MAX);
de712f94
AK
1675 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1676 "subpage-ram", UINT64_MAX);
1ec9b909
AK
1677 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1678 "watch", UINT64_MAX);
e9179ce1
AK
1679}
1680
ac1970fb
AK
1681static void mem_begin(MemoryListener *listener)
1682{
1683 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1684
1685 destroy_all_mappings(d);
1686 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1687}
1688
50c1e149
AK
1689static void core_begin(MemoryListener *listener)
1690{
5312bd8b
AK
1691 phys_sections_clear();
1692 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
1693 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1694 phys_section_rom = dummy_section(&io_mem_rom);
1695 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
1696}
1697
1d71148e 1698static void tcg_commit(MemoryListener *listener)
50c1e149 1699{
9349b4f9 1700 CPUArchState *env;
117712c3
AK
1701
1702 /* since each CPU stores ram addresses in its TLB cache, we must
1703 reset the modified entries */
1704 /* XXX: slow ! */
1705 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1706 tlb_flush(env, 1);
1707 }
50c1e149
AK
1708}
1709
93632747
AK
1710static void core_log_global_start(MemoryListener *listener)
1711{
1712 cpu_physical_memory_set_dirty_tracking(1);
1713}
1714
1715static void core_log_global_stop(MemoryListener *listener)
1716{
1717 cpu_physical_memory_set_dirty_tracking(0);
1718}
1719
4855d41a
AK
1720static void io_region_add(MemoryListener *listener,
1721 MemoryRegionSection *section)
1722{
a2d33521
AK
1723 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1724
1725 mrio->mr = section->mr;
1726 mrio->offset = section->offset_within_region;
1727 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 1728 section->offset_within_address_space, section->size);
a2d33521 1729 ioport_register(&mrio->iorange);
4855d41a
AK
1730}
1731
1732static void io_region_del(MemoryListener *listener,
1733 MemoryRegionSection *section)
1734{
1735 isa_unassign_ioport(section->offset_within_address_space, section->size);
1736}
1737
93632747 1738static MemoryListener core_memory_listener = {
50c1e149 1739 .begin = core_begin,
93632747
AK
1740 .log_global_start = core_log_global_start,
1741 .log_global_stop = core_log_global_stop,
ac1970fb 1742 .priority = 1,
93632747
AK
1743};
1744
4855d41a
AK
1745static MemoryListener io_memory_listener = {
1746 .region_add = io_region_add,
1747 .region_del = io_region_del,
4855d41a
AK
1748 .priority = 0,
1749};
1750
1d71148e
AK
1751static MemoryListener tcg_memory_listener = {
1752 .commit = tcg_commit,
1753};
1754
ac1970fb
AK
1755void address_space_init_dispatch(AddressSpace *as)
1756{
1757 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1758
1759 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1760 d->listener = (MemoryListener) {
1761 .begin = mem_begin,
1762 .region_add = mem_add,
1763 .region_nop = mem_add,
1764 .priority = 0,
1765 };
1766 as->dispatch = d;
1767 memory_listener_register(&d->listener, as);
1768}
1769
83f3c251
AK
1770void address_space_destroy_dispatch(AddressSpace *as)
1771{
1772 AddressSpaceDispatch *d = as->dispatch;
1773
1774 memory_listener_unregister(&d->listener);
1775 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1776 g_free(d);
1777 as->dispatch = NULL;
1778}
1779
62152b8a
AK
1780static void memory_map_init(void)
1781{
7267c094 1782 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 1783 memory_region_init(system_memory, "system", INT64_MAX);
2673a5da
AK
1784 address_space_init(&address_space_memory, system_memory);
1785 address_space_memory.name = "memory";
309cb471 1786
7267c094 1787 system_io = g_malloc(sizeof(*system_io));
309cb471 1788 memory_region_init(system_io, "io", 65536);
2673a5da
AK
1789 address_space_init(&address_space_io, system_io);
1790 address_space_io.name = "I/O";
93632747 1791
f6790af6
AK
1792 memory_listener_register(&core_memory_listener, &address_space_memory);
1793 memory_listener_register(&io_memory_listener, &address_space_io);
1794 memory_listener_register(&tcg_memory_listener, &address_space_memory);
9e11908f
PM
1795
1796 dma_context_init(&dma_context_memory, &address_space_memory,
1797 NULL, NULL, NULL);
62152b8a
AK
1798}
1799
1800MemoryRegion *get_system_memory(void)
1801{
1802 return system_memory;
1803}
1804
309cb471
AK
1805MemoryRegion *get_system_io(void)
1806{
1807 return system_io;
1808}
1809
e2eef170
PB
1810#endif /* !defined(CONFIG_USER_ONLY) */
1811
13eb76e0
FB
1812/* physical memory access (slow version, mainly for debug) */
1813#if defined(CONFIG_USER_ONLY)
9349b4f9 1814int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1815 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1816{
1817 int l, flags;
1818 target_ulong page;
53a5960a 1819 void * p;
13eb76e0
FB
1820
1821 while (len > 0) {
1822 page = addr & TARGET_PAGE_MASK;
1823 l = (page + TARGET_PAGE_SIZE) - addr;
1824 if (l > len)
1825 l = len;
1826 flags = page_get_flags(page);
1827 if (!(flags & PAGE_VALID))
a68fe89c 1828 return -1;
13eb76e0
FB
1829 if (is_write) {
1830 if (!(flags & PAGE_WRITE))
a68fe89c 1831 return -1;
579a97f7 1832 /* XXX: this code should not depend on lock_user */
72fb7daa 1833 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1834 return -1;
72fb7daa
AJ
1835 memcpy(p, buf, l);
1836 unlock_user(p, addr, l);
13eb76e0
FB
1837 } else {
1838 if (!(flags & PAGE_READ))
a68fe89c 1839 return -1;
579a97f7 1840 /* XXX: this code should not depend on lock_user */
72fb7daa 1841 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1842 return -1;
72fb7daa 1843 memcpy(buf, p, l);
5b257578 1844 unlock_user(p, addr, 0);
13eb76e0
FB
1845 }
1846 len -= l;
1847 buf += l;
1848 addr += l;
1849 }
a68fe89c 1850 return 0;
13eb76e0 1851}
8df1cd07 1852
13eb76e0 1853#else
51d7a9eb 1854
a8170e5e
AK
1855static void invalidate_and_set_dirty(hwaddr addr,
1856 hwaddr length)
51d7a9eb
AP
1857{
1858 if (!cpu_physical_memory_is_dirty(addr)) {
1859 /* invalidate code */
1860 tb_invalidate_phys_page_range(addr, addr + length, 0);
1861 /* set dirty bit */
1862 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1863 }
e226939d 1864 xen_modified_memory(addr, length);
51d7a9eb
AP
1865}
1866
a8170e5e 1867void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1868 int len, bool is_write)
13eb76e0 1869{
ac1970fb 1870 AddressSpaceDispatch *d = as->dispatch;
37ec01d4 1871 int l;
13eb76e0
FB
1872 uint8_t *ptr;
1873 uint32_t val;
a8170e5e 1874 hwaddr page;
f3705d53 1875 MemoryRegionSection *section;
3b46e624 1876
13eb76e0
FB
1877 while (len > 0) {
1878 page = addr & TARGET_PAGE_MASK;
1879 l = (page + TARGET_PAGE_SIZE) - addr;
1880 if (l > len)
1881 l = len;
ac1970fb 1882 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1883
13eb76e0 1884 if (is_write) {
f3705d53 1885 if (!memory_region_is_ram(section->mr)) {
a8170e5e 1886 hwaddr addr1;
cc5bea60 1887 addr1 = memory_region_section_addr(section, addr);
6a00d601
FB
1888 /* XXX: could force cpu_single_env to NULL to avoid
1889 potential bugs */
6c2934db 1890 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 1891 /* 32 bit write access */
c27004ec 1892 val = ldl_p(buf);
37ec01d4 1893 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 1894 l = 4;
6c2934db 1895 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 1896 /* 16 bit write access */
c27004ec 1897 val = lduw_p(buf);
37ec01d4 1898 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
1899 l = 2;
1900 } else {
1c213d19 1901 /* 8 bit write access */
c27004ec 1902 val = ldub_p(buf);
37ec01d4 1903 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
1904 l = 1;
1905 }
f3705d53 1906 } else if (!section->readonly) {
8ca5692d 1907 ram_addr_t addr1;
f3705d53 1908 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 1909 + memory_region_section_addr(section, addr);
13eb76e0 1910 /* RAM case */
5579c7f3 1911 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1912 memcpy(ptr, buf, l);
51d7a9eb 1913 invalidate_and_set_dirty(addr1, l);
050a0ddf 1914 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1915 }
1916 } else {
cc5bea60
BS
1917 if (!(memory_region_is_ram(section->mr) ||
1918 memory_region_is_romd(section->mr))) {
a8170e5e 1919 hwaddr addr1;
13eb76e0 1920 /* I/O case */
cc5bea60 1921 addr1 = memory_region_section_addr(section, addr);
6c2934db 1922 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 1923 /* 32 bit read access */
37ec01d4 1924 val = io_mem_read(section->mr, addr1, 4);
c27004ec 1925 stl_p(buf, val);
13eb76e0 1926 l = 4;
6c2934db 1927 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 1928 /* 16 bit read access */
37ec01d4 1929 val = io_mem_read(section->mr, addr1, 2);
c27004ec 1930 stw_p(buf, val);
13eb76e0
FB
1931 l = 2;
1932 } else {
1c213d19 1933 /* 8 bit read access */
37ec01d4 1934 val = io_mem_read(section->mr, addr1, 1);
c27004ec 1935 stb_p(buf, val);
13eb76e0
FB
1936 l = 1;
1937 }
1938 } else {
1939 /* RAM case */
0a1b357f 1940 ptr = qemu_get_ram_ptr(section->mr->ram_addr
cc5bea60
BS
1941 + memory_region_section_addr(section,
1942 addr));
f3705d53 1943 memcpy(buf, ptr, l);
050a0ddf 1944 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1945 }
1946 }
1947 len -= l;
1948 buf += l;
1949 addr += l;
1950 }
1951}
8df1cd07 1952
a8170e5e 1953void address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1954 const uint8_t *buf, int len)
1955{
1956 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1957}
1958
1959/**
1960 * address_space_read: read from an address space.
1961 *
1962 * @as: #AddressSpace to be accessed
1963 * @addr: address within that address space
1964 * @buf: buffer with the data transferred
1965 */
a8170e5e 1966void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb
AK
1967{
1968 address_space_rw(as, addr, buf, len, false);
1969}
1970
1971
a8170e5e 1972void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1973 int len, int is_write)
1974{
1975 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1976}
1977
d0ecd2aa 1978/* used for ROM loading : can write in RAM and ROM */
a8170e5e 1979void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
1980 const uint8_t *buf, int len)
1981{
ac1970fb 1982 AddressSpaceDispatch *d = address_space_memory.dispatch;
d0ecd2aa
FB
1983 int l;
1984 uint8_t *ptr;
a8170e5e 1985 hwaddr page;
f3705d53 1986 MemoryRegionSection *section;
3b46e624 1987
d0ecd2aa
FB
1988 while (len > 0) {
1989 page = addr & TARGET_PAGE_MASK;
1990 l = (page + TARGET_PAGE_SIZE) - addr;
1991 if (l > len)
1992 l = len;
ac1970fb 1993 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1994
cc5bea60
BS
1995 if (!(memory_region_is_ram(section->mr) ||
1996 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
1997 /* do nothing */
1998 } else {
1999 unsigned long addr1;
f3705d53 2000 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 2001 + memory_region_section_addr(section, addr);
d0ecd2aa 2002 /* ROM/RAM case */
5579c7f3 2003 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2004 memcpy(ptr, buf, l);
51d7a9eb 2005 invalidate_and_set_dirty(addr1, l);
050a0ddf 2006 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
2007 }
2008 len -= l;
2009 buf += l;
2010 addr += l;
2011 }
2012}
2013
6d16c2f8
AL
2014typedef struct {
2015 void *buffer;
a8170e5e
AK
2016 hwaddr addr;
2017 hwaddr len;
6d16c2f8
AL
2018} BounceBuffer;
2019
2020static BounceBuffer bounce;
2021
ba223c29
AL
2022typedef struct MapClient {
2023 void *opaque;
2024 void (*callback)(void *opaque);
72cf2d4f 2025 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2026} MapClient;
2027
72cf2d4f
BS
2028static QLIST_HEAD(map_client_list, MapClient) map_client_list
2029 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2030
2031void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2032{
7267c094 2033 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2034
2035 client->opaque = opaque;
2036 client->callback = callback;
72cf2d4f 2037 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2038 return client;
2039}
2040
8b9c99d9 2041static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2042{
2043 MapClient *client = (MapClient *)_client;
2044
72cf2d4f 2045 QLIST_REMOVE(client, link);
7267c094 2046 g_free(client);
ba223c29
AL
2047}
2048
2049static void cpu_notify_map_clients(void)
2050{
2051 MapClient *client;
2052
72cf2d4f
BS
2053 while (!QLIST_EMPTY(&map_client_list)) {
2054 client = QLIST_FIRST(&map_client_list);
ba223c29 2055 client->callback(client->opaque);
34d5e948 2056 cpu_unregister_map_client(client);
ba223c29
AL
2057 }
2058}
2059
6d16c2f8
AL
2060/* Map a physical memory region into a host virtual address.
2061 * May map a subset of the requested range, given by and returned in *plen.
2062 * May return NULL if resources needed to perform the mapping are exhausted.
2063 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2064 * Use cpu_register_map_client() to know when retrying the map operation is
2065 * likely to succeed.
6d16c2f8 2066 */
ac1970fb 2067void *address_space_map(AddressSpace *as,
a8170e5e
AK
2068 hwaddr addr,
2069 hwaddr *plen,
ac1970fb 2070 bool is_write)
6d16c2f8 2071{
ac1970fb 2072 AddressSpaceDispatch *d = as->dispatch;
a8170e5e
AK
2073 hwaddr len = *plen;
2074 hwaddr todo = 0;
6d16c2f8 2075 int l;
a8170e5e 2076 hwaddr page;
f3705d53 2077 MemoryRegionSection *section;
f15fbc4b 2078 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
2079 ram_addr_t rlen;
2080 void *ret;
6d16c2f8
AL
2081
2082 while (len > 0) {
2083 page = addr & TARGET_PAGE_MASK;
2084 l = (page + TARGET_PAGE_SIZE) - addr;
2085 if (l > len)
2086 l = len;
ac1970fb 2087 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
6d16c2f8 2088
f3705d53 2089 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 2090 if (todo || bounce.buffer) {
6d16c2f8
AL
2091 break;
2092 }
2093 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2094 bounce.addr = addr;
2095 bounce.len = l;
2096 if (!is_write) {
ac1970fb 2097 address_space_read(as, addr, bounce.buffer, l);
6d16c2f8 2098 }
38bee5dc
SS
2099
2100 *plen = l;
2101 return bounce.buffer;
6d16c2f8 2102 }
8ab934f9 2103 if (!todo) {
f3705d53 2104 raddr = memory_region_get_ram_addr(section->mr)
cc5bea60 2105 + memory_region_section_addr(section, addr);
8ab934f9 2106 }
6d16c2f8
AL
2107
2108 len -= l;
2109 addr += l;
38bee5dc 2110 todo += l;
6d16c2f8 2111 }
8ab934f9
SS
2112 rlen = todo;
2113 ret = qemu_ram_ptr_length(raddr, &rlen);
2114 *plen = rlen;
2115 return ret;
6d16c2f8
AL
2116}
2117
ac1970fb 2118/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2119 * Will also mark the memory as dirty if is_write == 1. access_len gives
2120 * the amount of memory that was actually read or written by the caller.
2121 */
a8170e5e
AK
2122void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2123 int is_write, hwaddr access_len)
6d16c2f8
AL
2124{
2125 if (buffer != bounce.buffer) {
2126 if (is_write) {
e890261f 2127 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
2128 while (access_len) {
2129 unsigned l;
2130 l = TARGET_PAGE_SIZE;
2131 if (l > access_len)
2132 l = access_len;
51d7a9eb 2133 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2134 addr1 += l;
2135 access_len -= l;
2136 }
2137 }
868bb33f 2138 if (xen_enabled()) {
e41d7c69 2139 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2140 }
6d16c2f8
AL
2141 return;
2142 }
2143 if (is_write) {
ac1970fb 2144 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2145 }
f8a83245 2146 qemu_vfree(bounce.buffer);
6d16c2f8 2147 bounce.buffer = NULL;
ba223c29 2148 cpu_notify_map_clients();
6d16c2f8 2149}
d0ecd2aa 2150
a8170e5e
AK
2151void *cpu_physical_memory_map(hwaddr addr,
2152 hwaddr *plen,
ac1970fb
AK
2153 int is_write)
2154{
2155 return address_space_map(&address_space_memory, addr, plen, is_write);
2156}
2157
a8170e5e
AK
2158void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2159 int is_write, hwaddr access_len)
ac1970fb
AK
2160{
2161 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2162}
2163
8df1cd07 2164/* warning: addr must be aligned */
a8170e5e 2165static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2166 enum device_endian endian)
8df1cd07 2167{
8df1cd07
FB
2168 uint8_t *ptr;
2169 uint32_t val;
f3705d53 2170 MemoryRegionSection *section;
8df1cd07 2171
ac1970fb 2172 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2173
cc5bea60
BS
2174 if (!(memory_region_is_ram(section->mr) ||
2175 memory_region_is_romd(section->mr))) {
8df1cd07 2176 /* I/O case */
cc5bea60 2177 addr = memory_region_section_addr(section, addr);
37ec01d4 2178 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
2179#if defined(TARGET_WORDS_BIGENDIAN)
2180 if (endian == DEVICE_LITTLE_ENDIAN) {
2181 val = bswap32(val);
2182 }
2183#else
2184 if (endian == DEVICE_BIG_ENDIAN) {
2185 val = bswap32(val);
2186 }
2187#endif
8df1cd07
FB
2188 } else {
2189 /* RAM case */
f3705d53 2190 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2191 & TARGET_PAGE_MASK)
cc5bea60 2192 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2193 switch (endian) {
2194 case DEVICE_LITTLE_ENDIAN:
2195 val = ldl_le_p(ptr);
2196 break;
2197 case DEVICE_BIG_ENDIAN:
2198 val = ldl_be_p(ptr);
2199 break;
2200 default:
2201 val = ldl_p(ptr);
2202 break;
2203 }
8df1cd07
FB
2204 }
2205 return val;
2206}
2207
a8170e5e 2208uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2209{
2210 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2211}
2212
a8170e5e 2213uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2214{
2215 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2216}
2217
a8170e5e 2218uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2219{
2220 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2221}
2222
84b7b8e7 2223/* warning: addr must be aligned */
a8170e5e 2224static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2225 enum device_endian endian)
84b7b8e7 2226{
84b7b8e7
FB
2227 uint8_t *ptr;
2228 uint64_t val;
f3705d53 2229 MemoryRegionSection *section;
84b7b8e7 2230
ac1970fb 2231 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2232
cc5bea60
BS
2233 if (!(memory_region_is_ram(section->mr) ||
2234 memory_region_is_romd(section->mr))) {
84b7b8e7 2235 /* I/O case */
cc5bea60 2236 addr = memory_region_section_addr(section, addr);
1e78bcc1
AG
2237
2238 /* XXX This is broken when device endian != cpu endian.
2239 Fix and add "endian" variable check */
84b7b8e7 2240#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2241 val = io_mem_read(section->mr, addr, 4) << 32;
2242 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 2243#else
37ec01d4
AK
2244 val = io_mem_read(section->mr, addr, 4);
2245 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
2246#endif
2247 } else {
2248 /* RAM case */
f3705d53 2249 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2250 & TARGET_PAGE_MASK)
cc5bea60 2251 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2252 switch (endian) {
2253 case DEVICE_LITTLE_ENDIAN:
2254 val = ldq_le_p(ptr);
2255 break;
2256 case DEVICE_BIG_ENDIAN:
2257 val = ldq_be_p(ptr);
2258 break;
2259 default:
2260 val = ldq_p(ptr);
2261 break;
2262 }
84b7b8e7
FB
2263 }
2264 return val;
2265}
2266
a8170e5e 2267uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2268{
2269 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2270}
2271
a8170e5e 2272uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2273{
2274 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2275}
2276
a8170e5e 2277uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2278{
2279 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2280}
2281
aab33094 2282/* XXX: optimize */
a8170e5e 2283uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2284{
2285 uint8_t val;
2286 cpu_physical_memory_read(addr, &val, 1);
2287 return val;
2288}
2289
733f0b02 2290/* warning: addr must be aligned */
a8170e5e 2291static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2292 enum device_endian endian)
aab33094 2293{
733f0b02
MT
2294 uint8_t *ptr;
2295 uint64_t val;
f3705d53 2296 MemoryRegionSection *section;
733f0b02 2297
ac1970fb 2298 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2299
cc5bea60
BS
2300 if (!(memory_region_is_ram(section->mr) ||
2301 memory_region_is_romd(section->mr))) {
733f0b02 2302 /* I/O case */
cc5bea60 2303 addr = memory_region_section_addr(section, addr);
37ec01d4 2304 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
2305#if defined(TARGET_WORDS_BIGENDIAN)
2306 if (endian == DEVICE_LITTLE_ENDIAN) {
2307 val = bswap16(val);
2308 }
2309#else
2310 if (endian == DEVICE_BIG_ENDIAN) {
2311 val = bswap16(val);
2312 }
2313#endif
733f0b02
MT
2314 } else {
2315 /* RAM case */
f3705d53 2316 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2317 & TARGET_PAGE_MASK)
cc5bea60 2318 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2319 switch (endian) {
2320 case DEVICE_LITTLE_ENDIAN:
2321 val = lduw_le_p(ptr);
2322 break;
2323 case DEVICE_BIG_ENDIAN:
2324 val = lduw_be_p(ptr);
2325 break;
2326 default:
2327 val = lduw_p(ptr);
2328 break;
2329 }
733f0b02
MT
2330 }
2331 return val;
aab33094
FB
2332}
2333
a8170e5e 2334uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2335{
2336 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2337}
2338
a8170e5e 2339uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2340{
2341 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2342}
2343
a8170e5e 2344uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2345{
2346 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2347}
2348
8df1cd07
FB
2349/* warning: addr must be aligned. The ram page is not masked as dirty
2350 and the code inside is not invalidated. It is useful if the dirty
2351 bits are used to track modified PTEs */
a8170e5e 2352void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2353{
8df1cd07 2354 uint8_t *ptr;
f3705d53 2355 MemoryRegionSection *section;
8df1cd07 2356
ac1970fb 2357 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2358
f3705d53 2359 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2360 addr = memory_region_section_addr(section, addr);
f3705d53 2361 if (memory_region_is_ram(section->mr)) {
37ec01d4 2362 section = &phys_sections[phys_section_rom];
06ef3525 2363 }
37ec01d4 2364 io_mem_write(section->mr, addr, val, 4);
8df1cd07 2365 } else {
f3705d53 2366 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 2367 & TARGET_PAGE_MASK)
cc5bea60 2368 + memory_region_section_addr(section, addr);
5579c7f3 2369 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2370 stl_p(ptr, val);
74576198
AL
2371
2372 if (unlikely(in_migration)) {
2373 if (!cpu_physical_memory_is_dirty(addr1)) {
2374 /* invalidate code */
2375 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2376 /* set dirty bit */
f7c11b53
YT
2377 cpu_physical_memory_set_dirty_flags(
2378 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2379 }
2380 }
8df1cd07
FB
2381 }
2382}
2383
a8170e5e 2384void stq_phys_notdirty(hwaddr addr, uint64_t val)
bc98a7ef 2385{
bc98a7ef 2386 uint8_t *ptr;
f3705d53 2387 MemoryRegionSection *section;
bc98a7ef 2388
ac1970fb 2389 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2390
f3705d53 2391 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2392 addr = memory_region_section_addr(section, addr);
f3705d53 2393 if (memory_region_is_ram(section->mr)) {
37ec01d4 2394 section = &phys_sections[phys_section_rom];
06ef3525 2395 }
bc98a7ef 2396#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2397 io_mem_write(section->mr, addr, val >> 32, 4);
2398 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 2399#else
37ec01d4
AK
2400 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2401 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
2402#endif
2403 } else {
f3705d53 2404 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2405 & TARGET_PAGE_MASK)
cc5bea60 2406 + memory_region_section_addr(section, addr));
bc98a7ef
JM
2407 stq_p(ptr, val);
2408 }
2409}
2410
8df1cd07 2411/* warning: addr must be aligned */
a8170e5e 2412static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2413 enum device_endian endian)
8df1cd07 2414{
8df1cd07 2415 uint8_t *ptr;
f3705d53 2416 MemoryRegionSection *section;
8df1cd07 2417
ac1970fb 2418 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2419
f3705d53 2420 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2421 addr = memory_region_section_addr(section, addr);
f3705d53 2422 if (memory_region_is_ram(section->mr)) {
37ec01d4 2423 section = &phys_sections[phys_section_rom];
06ef3525 2424 }
1e78bcc1
AG
2425#if defined(TARGET_WORDS_BIGENDIAN)
2426 if (endian == DEVICE_LITTLE_ENDIAN) {
2427 val = bswap32(val);
2428 }
2429#else
2430 if (endian == DEVICE_BIG_ENDIAN) {
2431 val = bswap32(val);
2432 }
2433#endif
37ec01d4 2434 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
2435 } else {
2436 unsigned long addr1;
f3705d53 2437 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2438 + memory_region_section_addr(section, addr);
8df1cd07 2439 /* RAM case */
5579c7f3 2440 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2441 switch (endian) {
2442 case DEVICE_LITTLE_ENDIAN:
2443 stl_le_p(ptr, val);
2444 break;
2445 case DEVICE_BIG_ENDIAN:
2446 stl_be_p(ptr, val);
2447 break;
2448 default:
2449 stl_p(ptr, val);
2450 break;
2451 }
51d7a9eb 2452 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2453 }
2454}
2455
a8170e5e 2456void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2457{
2458 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2459}
2460
a8170e5e 2461void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2462{
2463 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2464}
2465
a8170e5e 2466void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2467{
2468 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2469}
2470
aab33094 2471/* XXX: optimize */
a8170e5e 2472void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2473{
2474 uint8_t v = val;
2475 cpu_physical_memory_write(addr, &v, 1);
2476}
2477
733f0b02 2478/* warning: addr must be aligned */
a8170e5e 2479static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2480 enum device_endian endian)
aab33094 2481{
733f0b02 2482 uint8_t *ptr;
f3705d53 2483 MemoryRegionSection *section;
733f0b02 2484
ac1970fb 2485 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2486
f3705d53 2487 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2488 addr = memory_region_section_addr(section, addr);
f3705d53 2489 if (memory_region_is_ram(section->mr)) {
37ec01d4 2490 section = &phys_sections[phys_section_rom];
06ef3525 2491 }
1e78bcc1
AG
2492#if defined(TARGET_WORDS_BIGENDIAN)
2493 if (endian == DEVICE_LITTLE_ENDIAN) {
2494 val = bswap16(val);
2495 }
2496#else
2497 if (endian == DEVICE_BIG_ENDIAN) {
2498 val = bswap16(val);
2499 }
2500#endif
37ec01d4 2501 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
2502 } else {
2503 unsigned long addr1;
f3705d53 2504 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2505 + memory_region_section_addr(section, addr);
733f0b02
MT
2506 /* RAM case */
2507 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2508 switch (endian) {
2509 case DEVICE_LITTLE_ENDIAN:
2510 stw_le_p(ptr, val);
2511 break;
2512 case DEVICE_BIG_ENDIAN:
2513 stw_be_p(ptr, val);
2514 break;
2515 default:
2516 stw_p(ptr, val);
2517 break;
2518 }
51d7a9eb 2519 invalidate_and_set_dirty(addr1, 2);
733f0b02 2520 }
aab33094
FB
2521}
2522
a8170e5e 2523void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2524{
2525 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2526}
2527
a8170e5e 2528void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2529{
2530 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2531}
2532
a8170e5e 2533void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2534{
2535 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2536}
2537
aab33094 2538/* XXX: optimize */
a8170e5e 2539void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2540{
2541 val = tswap64(val);
71d2b725 2542 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2543}
2544
a8170e5e 2545void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2546{
2547 val = cpu_to_le64(val);
2548 cpu_physical_memory_write(addr, &val, 8);
2549}
2550
a8170e5e 2551void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2552{
2553 val = cpu_to_be64(val);
2554 cpu_physical_memory_write(addr, &val, 8);
2555}
2556
5e2972fd 2557/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2558int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2559 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2560{
2561 int l;
a8170e5e 2562 hwaddr phys_addr;
9b3c35e0 2563 target_ulong page;
13eb76e0
FB
2564
2565 while (len > 0) {
2566 page = addr & TARGET_PAGE_MASK;
2567 phys_addr = cpu_get_phys_page_debug(env, page);
2568 /* if no physical page mapped, return an error */
2569 if (phys_addr == -1)
2570 return -1;
2571 l = (page + TARGET_PAGE_SIZE) - addr;
2572 if (l > len)
2573 l = len;
5e2972fd 2574 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2575 if (is_write)
2576 cpu_physical_memory_write_rom(phys_addr, buf, l);
2577 else
5e2972fd 2578 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2579 len -= l;
2580 buf += l;
2581 addr += l;
2582 }
2583 return 0;
2584}
a68fe89c 2585#endif
13eb76e0 2586
8e4a424b
BS
2587#if !defined(CONFIG_USER_ONLY)
2588
2589/*
2590 * A helper function for the _utterly broken_ virtio device model to find out if
2591 * it's running on a big endian machine. Don't do this at home kids!
2592 */
2593bool virtio_is_big_endian(void);
2594bool virtio_is_big_endian(void)
2595{
2596#if defined(TARGET_WORDS_BIGENDIAN)
2597 return true;
2598#else
2599 return false;
2600#endif
2601}
2602
2603#endif
2604
76f35538 2605#ifndef CONFIG_USER_ONLY
a8170e5e 2606bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538
WC
2607{
2608 MemoryRegionSection *section;
2609
ac1970fb
AK
2610 section = phys_page_find(address_space_memory.dispatch,
2611 phys_addr >> TARGET_PAGE_BITS);
76f35538
WC
2612
2613 return !(memory_region_is_ram(section->mr) ||
2614 memory_region_is_romd(section->mr));
2615}
2616#endif