]> git.proxmox.com Git - qemu.git/blame - exec.c
zynq_slcr: Compile time warning fixes.
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a 36#include "memory.h"
9e11908f 37#include "dma.h"
62152b8a 38#include "exec-memory.h"
53a5960a
PB
39#if defined(CONFIG_USER_ONLY)
40#include <qemu.h>
432d268c
JN
41#else /* !CONFIG_USER_ONLY */
42#include "xen-mapcache.h"
6506e4f9 43#include "trace.h"
53a5960a 44#endif
54936004 45
0cac1b66 46#include "cputlb.h"
5b6dd868 47#include "translate-all.h"
0cac1b66 48
7762c2c1 49#include "memory-internal.h"
67d95c15 50
67d3b957 51//#define DEBUG_UNASSIGNED
db7b5426 52//#define DEBUG_SUBPAGE
1196be37 53
e2eef170 54#if !defined(CONFIG_USER_ONLY)
9fa3e853 55int phys_ram_fd;
74576198 56static int in_migration;
94a6b54f 57
85d59fef 58RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
59
60static MemoryRegion *system_memory;
309cb471 61static MemoryRegion *system_io;
62152b8a 62
f6790af6
AK
63AddressSpace address_space_io;
64AddressSpace address_space_memory;
9e11908f 65DMAContext dma_context_memory;
2673a5da 66
0e0df1e2 67MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 68static MemoryRegion io_mem_subpage_ram;
0e0df1e2 69
e2eef170 70#endif
9fa3e853 71
9349b4f9 72CPUArchState *first_cpu;
6a00d601
FB
73/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
9349b4f9 75DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 76/* 0 = Do not count executed instructions.
bf20dc07 77 1 = Precise instruction counting.
2e70f6ef
PB
78 2 = Adaptive rate instruction counting. */
79int use_icount = 0;
6a00d601 80
e2eef170 81#if !defined(CONFIG_USER_ONLY)
4346ae3e 82
5312bd8b
AK
83static MemoryRegionSection *phys_sections;
84static unsigned phys_sections_nb, phys_sections_nb_alloc;
85static uint16_t phys_section_unassigned;
aa102231
AK
86static uint16_t phys_section_notdirty;
87static uint16_t phys_section_rom;
88static uint16_t phys_section_watch;
5312bd8b 89
d6f2ea22
AK
90/* Simple allocator for PhysPageEntry nodes */
91static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
92static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
93
07f07b31 94#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 95
e2eef170 96static void io_mem_init(void);
62152b8a 97static void memory_map_init(void);
8b9c99d9 98static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 99
1ec9b909 100static MemoryRegion io_mem_watch;
6658ffb8 101#endif
fd6ce8f6 102
6d9a1304 103#if !defined(CONFIG_USER_ONLY)
d6f2ea22 104
f7bf5461 105static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 106{
f7bf5461 107 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
108 typedef PhysPageEntry Node[L2_SIZE];
109 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
110 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
111 phys_map_nodes_nb + nodes);
d6f2ea22
AK
112 phys_map_nodes = g_renew(Node, phys_map_nodes,
113 phys_map_nodes_nb_alloc);
114 }
f7bf5461
AK
115}
116
117static uint16_t phys_map_node_alloc(void)
118{
119 unsigned i;
120 uint16_t ret;
121
122 ret = phys_map_nodes_nb++;
123 assert(ret != PHYS_MAP_NODE_NIL);
124 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 125 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 126 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 127 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 128 }
f7bf5461 129 return ret;
d6f2ea22
AK
130}
131
132static void phys_map_nodes_reset(void)
133{
134 phys_map_nodes_nb = 0;
135}
136
92e873b9 137
a8170e5e
AK
138static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
139 hwaddr *nb, uint16_t leaf,
2999097b 140 int level)
f7bf5461
AK
141{
142 PhysPageEntry *p;
143 int i;
a8170e5e 144 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 145
07f07b31 146 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
147 lp->ptr = phys_map_node_alloc();
148 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
149 if (level == 0) {
150 for (i = 0; i < L2_SIZE; i++) {
07f07b31 151 p[i].is_leaf = 1;
c19e8800 152 p[i].ptr = phys_section_unassigned;
4346ae3e 153 }
67c4d23c 154 }
f7bf5461 155 } else {
c19e8800 156 p = phys_map_nodes[lp->ptr];
92e873b9 157 }
2999097b 158 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 159
2999097b 160 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
161 if ((*index & (step - 1)) == 0 && *nb >= step) {
162 lp->is_leaf = true;
c19e8800 163 lp->ptr = leaf;
07f07b31
AK
164 *index += step;
165 *nb -= step;
2999097b
AK
166 } else {
167 phys_page_set_level(lp, index, nb, leaf, level - 1);
168 }
169 ++lp;
f7bf5461
AK
170 }
171}
172
ac1970fb 173static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 174 hwaddr index, hwaddr nb,
2999097b 175 uint16_t leaf)
f7bf5461 176{
2999097b 177 /* Wildly overreserve - it doesn't matter much. */
07f07b31 178 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 179
ac1970fb 180 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
181}
182
a8170e5e 183MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
92e873b9 184{
ac1970fb 185 PhysPageEntry lp = d->phys_map;
31ab2b4a
AK
186 PhysPageEntry *p;
187 int i;
31ab2b4a 188 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 189
07f07b31 190 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 191 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
192 goto not_found;
193 }
c19e8800 194 p = phys_map_nodes[lp.ptr];
31ab2b4a 195 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 196 }
31ab2b4a 197
c19e8800 198 s_index = lp.ptr;
31ab2b4a 199not_found:
f3705d53
AK
200 return &phys_sections[s_index];
201}
202
e5548617
BS
203bool memory_region_is_unassigned(MemoryRegion *mr)
204{
205 return mr != &io_mem_ram && mr != &io_mem_rom
206 && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 207 && mr != &io_mem_watch;
fd6ce8f6 208}
5b6dd868 209#endif
fd6ce8f6 210
5b6dd868 211void cpu_exec_init_all(void)
fdbb84d1 212{
5b6dd868
BS
213#if !defined(CONFIG_USER_ONLY)
214 memory_map_init();
215 io_mem_init();
fdbb84d1 216#endif
5b6dd868 217}
fdbb84d1 218
5b6dd868
BS
219#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
220
221static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 222{
5b6dd868 223 CPUArchState *env = opaque;
a513fe19 224
5b6dd868
BS
225 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
226 version_id is increased. */
227 env->interrupt_request &= ~0x01;
228 tlb_flush(env, 1);
229
230 return 0;
a513fe19 231}
7501267e 232
5b6dd868
BS
233static const VMStateDescription vmstate_cpu_common = {
234 .name = "cpu_common",
235 .version_id = 1,
236 .minimum_version_id = 1,
237 .minimum_version_id_old = 1,
238 .post_load = cpu_common_post_load,
239 .fields = (VMStateField []) {
240 VMSTATE_UINT32(halted, CPUArchState),
241 VMSTATE_UINT32(interrupt_request, CPUArchState),
242 VMSTATE_END_OF_LIST()
243 }
244};
245#endif
ea041c0e 246
5b6dd868 247CPUArchState *qemu_get_cpu(int cpu)
ea041c0e 248{
5b6dd868 249 CPUArchState *env = first_cpu;
ea041c0e 250
5b6dd868
BS
251 while (env) {
252 if (env->cpu_index == cpu)
253 break;
254 env = env->next_cpu;
ea041c0e 255 }
5b6dd868
BS
256
257 return env;
ea041c0e
FB
258}
259
5b6dd868 260void cpu_exec_init(CPUArchState *env)
ea041c0e 261{
5b6dd868
BS
262#ifndef CONFIG_USER_ONLY
263 CPUState *cpu = ENV_GET_CPU(env);
264#endif
265 CPUArchState **penv;
266 int cpu_index;
267
268#if defined(CONFIG_USER_ONLY)
269 cpu_list_lock();
270#endif
271 env->next_cpu = NULL;
272 penv = &first_cpu;
273 cpu_index = 0;
274 while (*penv != NULL) {
275 penv = &(*penv)->next_cpu;
276 cpu_index++;
277 }
278 env->cpu_index = cpu_index;
279 env->numa_node = 0;
280 QTAILQ_INIT(&env->breakpoints);
281 QTAILQ_INIT(&env->watchpoints);
282#ifndef CONFIG_USER_ONLY
283 cpu->thread_id = qemu_get_thread_id();
284#endif
285 *penv = env;
286#if defined(CONFIG_USER_ONLY)
287 cpu_list_unlock();
288#endif
289#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
290 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
291 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
292 cpu_save, cpu_load, env);
293#endif
ea041c0e
FB
294}
295
1fddef4b 296#if defined(TARGET_HAS_ICE)
94df27fd 297#if defined(CONFIG_USER_ONLY)
9349b4f9 298static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
299{
300 tb_invalidate_phys_page_range(pc, pc + 1, 0);
301}
302#else
1e7855a5
MF
303static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
304{
9d70c4b7
MF
305 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
306 (pc & ~TARGET_PAGE_MASK));
1e7855a5 307}
c27004ec 308#endif
94df27fd 309#endif /* TARGET_HAS_ICE */
d720b93d 310
c527ee8f 311#if defined(CONFIG_USER_ONLY)
9349b4f9 312void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
313
314{
315}
316
9349b4f9 317int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
318 int flags, CPUWatchpoint **watchpoint)
319{
320 return -ENOSYS;
321}
322#else
6658ffb8 323/* Add a watchpoint. */
9349b4f9 324int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 325 int flags, CPUWatchpoint **watchpoint)
6658ffb8 326{
b4051334 327 target_ulong len_mask = ~(len - 1);
c0ce998e 328 CPUWatchpoint *wp;
6658ffb8 329
b4051334 330 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
331 if ((len & (len - 1)) || (addr & ~len_mask) ||
332 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
333 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
334 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
335 return -EINVAL;
336 }
7267c094 337 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
338
339 wp->vaddr = addr;
b4051334 340 wp->len_mask = len_mask;
a1d1bb31
AL
341 wp->flags = flags;
342
2dc9f411 343 /* keep all GDB-injected watchpoints in front */
c0ce998e 344 if (flags & BP_GDB)
72cf2d4f 345 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 346 else
72cf2d4f 347 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 348
6658ffb8 349 tlb_flush_page(env, addr);
a1d1bb31
AL
350
351 if (watchpoint)
352 *watchpoint = wp;
353 return 0;
6658ffb8
PB
354}
355
a1d1bb31 356/* Remove a specific watchpoint. */
9349b4f9 357int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 358 int flags)
6658ffb8 359{
b4051334 360 target_ulong len_mask = ~(len - 1);
a1d1bb31 361 CPUWatchpoint *wp;
6658ffb8 362
72cf2d4f 363 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 364 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 365 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 366 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
367 return 0;
368 }
369 }
a1d1bb31 370 return -ENOENT;
6658ffb8
PB
371}
372
a1d1bb31 373/* Remove a specific watchpoint by reference. */
9349b4f9 374void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 375{
72cf2d4f 376 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 377
a1d1bb31
AL
378 tlb_flush_page(env, watchpoint->vaddr);
379
7267c094 380 g_free(watchpoint);
a1d1bb31
AL
381}
382
383/* Remove all matching watchpoints. */
9349b4f9 384void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 385{
c0ce998e 386 CPUWatchpoint *wp, *next;
a1d1bb31 387
72cf2d4f 388 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
389 if (wp->flags & mask)
390 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 391 }
7d03f82f 392}
c527ee8f 393#endif
7d03f82f 394
a1d1bb31 395/* Add a breakpoint. */
9349b4f9 396int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 397 CPUBreakpoint **breakpoint)
4c3a88a2 398{
1fddef4b 399#if defined(TARGET_HAS_ICE)
c0ce998e 400 CPUBreakpoint *bp;
3b46e624 401
7267c094 402 bp = g_malloc(sizeof(*bp));
4c3a88a2 403
a1d1bb31
AL
404 bp->pc = pc;
405 bp->flags = flags;
406
2dc9f411 407 /* keep all GDB-injected breakpoints in front */
c0ce998e 408 if (flags & BP_GDB)
72cf2d4f 409 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 410 else
72cf2d4f 411 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 412
d720b93d 413 breakpoint_invalidate(env, pc);
a1d1bb31
AL
414
415 if (breakpoint)
416 *breakpoint = bp;
4c3a88a2
FB
417 return 0;
418#else
a1d1bb31 419 return -ENOSYS;
4c3a88a2
FB
420#endif
421}
422
a1d1bb31 423/* Remove a specific breakpoint. */
9349b4f9 424int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 425{
7d03f82f 426#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
427 CPUBreakpoint *bp;
428
72cf2d4f 429 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
430 if (bp->pc == pc && bp->flags == flags) {
431 cpu_breakpoint_remove_by_ref(env, bp);
432 return 0;
433 }
7d03f82f 434 }
a1d1bb31
AL
435 return -ENOENT;
436#else
437 return -ENOSYS;
7d03f82f
EI
438#endif
439}
440
a1d1bb31 441/* Remove a specific breakpoint by reference. */
9349b4f9 442void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 443{
1fddef4b 444#if defined(TARGET_HAS_ICE)
72cf2d4f 445 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 446
a1d1bb31
AL
447 breakpoint_invalidate(env, breakpoint->pc);
448
7267c094 449 g_free(breakpoint);
a1d1bb31
AL
450#endif
451}
452
453/* Remove all matching breakpoints. */
9349b4f9 454void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
455{
456#if defined(TARGET_HAS_ICE)
c0ce998e 457 CPUBreakpoint *bp, *next;
a1d1bb31 458
72cf2d4f 459 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
460 if (bp->flags & mask)
461 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 462 }
4c3a88a2
FB
463#endif
464}
465
c33a346e
FB
466/* enable or disable single step mode. EXCP_DEBUG is returned by the
467 CPU loop after each instruction */
9349b4f9 468void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 469{
1fddef4b 470#if defined(TARGET_HAS_ICE)
c33a346e
FB
471 if (env->singlestep_enabled != enabled) {
472 env->singlestep_enabled = enabled;
e22a25c9
AL
473 if (kvm_enabled())
474 kvm_update_guest_debug(env, 0);
475 else {
ccbb4d44 476 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
477 /* XXX: only flush what is necessary */
478 tb_flush(env);
479 }
c33a346e
FB
480 }
481#endif
482}
483
9349b4f9 484void cpu_reset_interrupt(CPUArchState *env, int mask)
b54ad049
FB
485{
486 env->interrupt_request &= ~mask;
487}
488
9349b4f9 489void cpu_exit(CPUArchState *env)
3098dba0
AJ
490{
491 env->exit_request = 1;
492 cpu_unlink_tb(env);
493}
494
9349b4f9 495void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
496{
497 va_list ap;
493ae1f0 498 va_list ap2;
7501267e
FB
499
500 va_start(ap, fmt);
493ae1f0 501 va_copy(ap2, ap);
7501267e
FB
502 fprintf(stderr, "qemu: fatal: ");
503 vfprintf(stderr, fmt, ap);
504 fprintf(stderr, "\n");
6fd2a026 505 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
506 if (qemu_log_enabled()) {
507 qemu_log("qemu: fatal: ");
508 qemu_log_vprintf(fmt, ap2);
509 qemu_log("\n");
6fd2a026 510 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 511 qemu_log_flush();
93fcfe39 512 qemu_log_close();
924edcae 513 }
493ae1f0 514 va_end(ap2);
f9373291 515 va_end(ap);
fd052bf6
RV
516#if defined(CONFIG_USER_ONLY)
517 {
518 struct sigaction act;
519 sigfillset(&act.sa_mask);
520 act.sa_handler = SIG_DFL;
521 sigaction(SIGABRT, &act, NULL);
522 }
523#endif
7501267e
FB
524 abort();
525}
526
9349b4f9 527CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 528{
9349b4f9
AF
529 CPUArchState *new_env = cpu_init(env->cpu_model_str);
530 CPUArchState *next_cpu = new_env->next_cpu;
c5be9f08 531 int cpu_index = new_env->cpu_index;
5a38f081
AL
532#if defined(TARGET_HAS_ICE)
533 CPUBreakpoint *bp;
534 CPUWatchpoint *wp;
535#endif
536
9349b4f9 537 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081
AL
538
539 /* Preserve chaining and index. */
c5be9f08
TS
540 new_env->next_cpu = next_cpu;
541 new_env->cpu_index = cpu_index;
5a38f081
AL
542
543 /* Clone all break/watchpoints.
544 Note: Once we support ptrace with hw-debug register access, make sure
545 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
546 QTAILQ_INIT(&env->breakpoints);
547 QTAILQ_INIT(&env->watchpoints);
5a38f081 548#if defined(TARGET_HAS_ICE)
72cf2d4f 549 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
550 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
551 }
72cf2d4f 552 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
553 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
554 wp->flags, NULL);
555 }
556#endif
557
c5be9f08
TS
558 return new_env;
559}
560
0124311e 561#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
562static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
563 uintptr_t length)
564{
565 uintptr_t start1;
566
567 /* we modify the TLB cache so that the dirty bit will be set again
568 when accessing the range */
569 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
570 /* Check that we don't span multiple blocks - this breaks the
571 address comparisons below. */
572 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
573 != (end - 1) - start) {
574 abort();
575 }
576 cpu_tlb_reset_dirty_all(start1, length);
577
578}
579
5579c7f3 580/* Note: start and end must be within the same ram block. */
c227f099 581void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 582 int dirty_flags)
1ccde1cb 583{
d24981d3 584 uintptr_t length;
1ccde1cb
FB
585
586 start &= TARGET_PAGE_MASK;
587 end = TARGET_PAGE_ALIGN(end);
588
589 length = end - start;
590 if (length == 0)
591 return;
f7c11b53 592 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 593
d24981d3
JQ
594 if (tcg_enabled()) {
595 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 596 }
1ccde1cb
FB
597}
598
8b9c99d9 599static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 600{
f6f3fbca 601 int ret = 0;
74576198 602 in_migration = enable;
f6f3fbca 603 return ret;
74576198
AL
604}
605
a8170e5e 606hwaddr memory_region_section_get_iotlb(CPUArchState *env,
e5548617
BS
607 MemoryRegionSection *section,
608 target_ulong vaddr,
a8170e5e 609 hwaddr paddr,
e5548617
BS
610 int prot,
611 target_ulong *address)
612{
a8170e5e 613 hwaddr iotlb;
e5548617
BS
614 CPUWatchpoint *wp;
615
cc5bea60 616 if (memory_region_is_ram(section->mr)) {
e5548617
BS
617 /* Normal RAM. */
618 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 619 + memory_region_section_addr(section, paddr);
e5548617
BS
620 if (!section->readonly) {
621 iotlb |= phys_section_notdirty;
622 } else {
623 iotlb |= phys_section_rom;
624 }
625 } else {
626 /* IO handlers are currently passed a physical address.
627 It would be nice to pass an offset from the base address
628 of that region. This would avoid having to special case RAM,
629 and avoid full address decoding in every device.
630 We can't use the high bits of pd for this because
631 IO_MEM_ROMD uses these as a ram address. */
632 iotlb = section - phys_sections;
cc5bea60 633 iotlb += memory_region_section_addr(section, paddr);
e5548617
BS
634 }
635
636 /* Make accesses to pages with watchpoints go via the
637 watchpoint trap routines. */
638 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
639 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
640 /* Avoid trapping reads of pages with a write breakpoint. */
641 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
642 iotlb = phys_section_watch + paddr;
643 *address |= TLB_MMIO;
644 break;
645 }
646 }
647 }
648
649 return iotlb;
650}
9fa3e853
FB
651#endif /* defined(CONFIG_USER_ONLY) */
652
e2eef170 653#if !defined(CONFIG_USER_ONLY)
8da3ff18 654
c04b2b78
PB
655#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
656typedef struct subpage_t {
70c68e44 657 MemoryRegion iomem;
a8170e5e 658 hwaddr base;
5312bd8b 659 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
660} subpage_t;
661
c227f099 662static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 663 uint16_t section);
a8170e5e 664static subpage_t *subpage_init(hwaddr base);
5312bd8b 665static void destroy_page_desc(uint16_t section_index)
54688b1e 666{
5312bd8b
AK
667 MemoryRegionSection *section = &phys_sections[section_index];
668 MemoryRegion *mr = section->mr;
54688b1e
AK
669
670 if (mr->subpage) {
671 subpage_t *subpage = container_of(mr, subpage_t, iomem);
672 memory_region_destroy(&subpage->iomem);
673 g_free(subpage);
674 }
675}
676
4346ae3e 677static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
678{
679 unsigned i;
d6f2ea22 680 PhysPageEntry *p;
54688b1e 681
c19e8800 682 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
683 return;
684 }
685
c19e8800 686 p = phys_map_nodes[lp->ptr];
4346ae3e 687 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 688 if (!p[i].is_leaf) {
54688b1e 689 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 690 } else {
c19e8800 691 destroy_page_desc(p[i].ptr);
54688b1e 692 }
54688b1e 693 }
07f07b31 694 lp->is_leaf = 0;
c19e8800 695 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
696}
697
ac1970fb 698static void destroy_all_mappings(AddressSpaceDispatch *d)
54688b1e 699{
ac1970fb 700 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
d6f2ea22 701 phys_map_nodes_reset();
54688b1e
AK
702}
703
5312bd8b
AK
704static uint16_t phys_section_add(MemoryRegionSection *section)
705{
706 if (phys_sections_nb == phys_sections_nb_alloc) {
707 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
708 phys_sections = g_renew(MemoryRegionSection, phys_sections,
709 phys_sections_nb_alloc);
710 }
711 phys_sections[phys_sections_nb] = *section;
712 return phys_sections_nb++;
713}
714
715static void phys_sections_clear(void)
716{
717 phys_sections_nb = 0;
718}
719
ac1970fb 720static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
721{
722 subpage_t *subpage;
a8170e5e 723 hwaddr base = section->offset_within_address_space
0f0cb164 724 & TARGET_PAGE_MASK;
ac1970fb 725 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
0f0cb164
AK
726 MemoryRegionSection subsection = {
727 .offset_within_address_space = base,
728 .size = TARGET_PAGE_SIZE,
729 };
a8170e5e 730 hwaddr start, end;
0f0cb164 731
f3705d53 732 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 733
f3705d53 734 if (!(existing->mr->subpage)) {
0f0cb164
AK
735 subpage = subpage_init(base);
736 subsection.mr = &subpage->iomem;
ac1970fb 737 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 738 phys_section_add(&subsection));
0f0cb164 739 } else {
f3705d53 740 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
741 }
742 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
adb2a9b5 743 end = start + section->size - 1;
0f0cb164
AK
744 subpage_register(subpage, start, end, phys_section_add(section));
745}
746
747
ac1970fb 748static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
33417e70 749{
a8170e5e 750 hwaddr start_addr = section->offset_within_address_space;
dd81124b 751 ram_addr_t size = section->size;
a8170e5e 752 hwaddr addr;
5312bd8b 753 uint16_t section_index = phys_section_add(section);
dd81124b 754
3b8e6a2d 755 assert(size);
f6f3fbca 756
3b8e6a2d 757 addr = start_addr;
ac1970fb 758 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2999097b 759 section_index);
33417e70
FB
760}
761
ac1970fb 762static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 763{
ac1970fb 764 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
0f0cb164
AK
765 MemoryRegionSection now = *section, remain = *section;
766
767 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
768 || (now.size < TARGET_PAGE_SIZE)) {
769 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
770 - now.offset_within_address_space,
771 now.size);
ac1970fb 772 register_subpage(d, &now);
0f0cb164
AK
773 remain.size -= now.size;
774 remain.offset_within_address_space += now.size;
775 remain.offset_within_region += now.size;
776 }
69b67646
TH
777 while (remain.size >= TARGET_PAGE_SIZE) {
778 now = remain;
779 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
780 now.size = TARGET_PAGE_SIZE;
ac1970fb 781 register_subpage(d, &now);
69b67646
TH
782 } else {
783 now.size &= TARGET_PAGE_MASK;
ac1970fb 784 register_multipage(d, &now);
69b67646 785 }
0f0cb164
AK
786 remain.size -= now.size;
787 remain.offset_within_address_space += now.size;
788 remain.offset_within_region += now.size;
789 }
790 now = remain;
791 if (now.size) {
ac1970fb 792 register_subpage(d, &now);
0f0cb164
AK
793 }
794}
795
62a2744c
SY
796void qemu_flush_coalesced_mmio_buffer(void)
797{
798 if (kvm_enabled())
799 kvm_flush_coalesced_mmio_buffer();
800}
801
c902760f
MT
802#if defined(__linux__) && !defined(TARGET_S390X)
803
804#include <sys/vfs.h>
805
806#define HUGETLBFS_MAGIC 0x958458f6
807
808static long gethugepagesize(const char *path)
809{
810 struct statfs fs;
811 int ret;
812
813 do {
9742bf26 814 ret = statfs(path, &fs);
c902760f
MT
815 } while (ret != 0 && errno == EINTR);
816
817 if (ret != 0) {
9742bf26
YT
818 perror(path);
819 return 0;
c902760f
MT
820 }
821
822 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 823 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
824
825 return fs.f_bsize;
826}
827
04b16653
AW
828static void *file_ram_alloc(RAMBlock *block,
829 ram_addr_t memory,
830 const char *path)
c902760f
MT
831{
832 char *filename;
833 void *area;
834 int fd;
835#ifdef MAP_POPULATE
836 int flags;
837#endif
838 unsigned long hpagesize;
839
840 hpagesize = gethugepagesize(path);
841 if (!hpagesize) {
9742bf26 842 return NULL;
c902760f
MT
843 }
844
845 if (memory < hpagesize) {
846 return NULL;
847 }
848
849 if (kvm_enabled() && !kvm_has_sync_mmu()) {
850 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
851 return NULL;
852 }
853
854 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 855 return NULL;
c902760f
MT
856 }
857
858 fd = mkstemp(filename);
859 if (fd < 0) {
9742bf26
YT
860 perror("unable to create backing store for hugepages");
861 free(filename);
862 return NULL;
c902760f
MT
863 }
864 unlink(filename);
865 free(filename);
866
867 memory = (memory+hpagesize-1) & ~(hpagesize-1);
868
869 /*
870 * ftruncate is not supported by hugetlbfs in older
871 * hosts, so don't bother bailing out on errors.
872 * If anything goes wrong with it under other filesystems,
873 * mmap will fail.
874 */
875 if (ftruncate(fd, memory))
9742bf26 876 perror("ftruncate");
c902760f
MT
877
878#ifdef MAP_POPULATE
879 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
880 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
881 * to sidestep this quirk.
882 */
883 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
884 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
885#else
886 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
887#endif
888 if (area == MAP_FAILED) {
9742bf26
YT
889 perror("file_ram_alloc: can't mmap RAM pages");
890 close(fd);
891 return (NULL);
c902760f 892 }
04b16653 893 block->fd = fd;
c902760f
MT
894 return area;
895}
896#endif
897
d17b5288 898static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
899{
900 RAMBlock *block, *next_block;
3e837b2c 901 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
902
903 if (QLIST_EMPTY(&ram_list.blocks))
904 return 0;
905
906 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 907 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
908
909 end = block->offset + block->length;
910
911 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
912 if (next_block->offset >= end) {
913 next = MIN(next, next_block->offset);
914 }
915 }
916 if (next - end >= size && next - end < mingap) {
3e837b2c 917 offset = end;
04b16653
AW
918 mingap = next - end;
919 }
920 }
3e837b2c
AW
921
922 if (offset == RAM_ADDR_MAX) {
923 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
924 (uint64_t)size);
925 abort();
926 }
927
04b16653
AW
928 return offset;
929}
930
652d7ec2 931ram_addr_t last_ram_offset(void)
d17b5288
AW
932{
933 RAMBlock *block;
934 ram_addr_t last = 0;
935
936 QLIST_FOREACH(block, &ram_list.blocks, next)
937 last = MAX(last, block->offset + block->length);
938
939 return last;
940}
941
ddb97f1d
JB
942static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
943{
944 int ret;
945 QemuOpts *machine_opts;
946
947 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
948 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
949 if (machine_opts &&
950 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
951 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
952 if (ret) {
953 perror("qemu_madvise");
954 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
955 "but dump_guest_core=off specified\n");
956 }
957 }
958}
959
c5705a77 960void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
961{
962 RAMBlock *new_block, *block;
963
c5705a77
AK
964 new_block = NULL;
965 QLIST_FOREACH(block, &ram_list.blocks, next) {
966 if (block->offset == addr) {
967 new_block = block;
968 break;
969 }
970 }
971 assert(new_block);
972 assert(!new_block->idstr[0]);
84b89d78 973
09e5ab63
AL
974 if (dev) {
975 char *id = qdev_get_dev_path(dev);
84b89d78
CM
976 if (id) {
977 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 978 g_free(id);
84b89d78
CM
979 }
980 }
981 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
982
983 QLIST_FOREACH(block, &ram_list.blocks, next) {
c5705a77 984 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
985 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
986 new_block->idstr);
987 abort();
988 }
989 }
c5705a77
AK
990}
991
8490fc78
LC
992static int memory_try_enable_merging(void *addr, size_t len)
993{
994 QemuOpts *opts;
995
996 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
997 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
998 /* disabled by the user */
999 return 0;
1000 }
1001
1002 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1003}
1004
c5705a77
AK
1005ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1006 MemoryRegion *mr)
1007{
1008 RAMBlock *new_block;
1009
1010 size = TARGET_PAGE_ALIGN(size);
1011 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1012
7c637366 1013 new_block->mr = mr;
432d268c 1014 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1015 if (host) {
1016 new_block->host = host;
cd19cfa2 1017 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1018 } else {
1019 if (mem_path) {
c902760f 1020#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1021 new_block->host = file_ram_alloc(new_block, size, mem_path);
1022 if (!new_block->host) {
1023 new_block->host = qemu_vmalloc(size);
8490fc78 1024 memory_try_enable_merging(new_block->host, size);
6977dfe6 1025 }
c902760f 1026#else
6977dfe6
YT
1027 fprintf(stderr, "-mem-path option unsupported\n");
1028 exit(1);
c902760f 1029#endif
6977dfe6 1030 } else {
868bb33f 1031 if (xen_enabled()) {
fce537d4 1032 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1033 } else if (kvm_enabled()) {
1034 /* some s390/kvm configurations have special constraints */
1035 new_block->host = kvm_vmalloc(size);
432d268c
JN
1036 } else {
1037 new_block->host = qemu_vmalloc(size);
1038 }
8490fc78 1039 memory_try_enable_merging(new_block->host, size);
6977dfe6 1040 }
c902760f 1041 }
94a6b54f
PB
1042 new_block->length = size;
1043
f471a17e 1044 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 1045
7267c094 1046 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1047 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1048 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1049 0, size >> TARGET_PAGE_BITS);
1720aeee 1050 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1051
ddb97f1d 1052 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1053 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1054
6f0437e8
JK
1055 if (kvm_enabled())
1056 kvm_setup_guest_memory(new_block->host, size);
1057
94a6b54f
PB
1058 return new_block->offset;
1059}
e9a1ab19 1060
c5705a77 1061ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1062{
c5705a77 1063 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1064}
1065
1f2e98b6
AW
1066void qemu_ram_free_from_ptr(ram_addr_t addr)
1067{
1068 RAMBlock *block;
1069
1070 QLIST_FOREACH(block, &ram_list.blocks, next) {
1071 if (addr == block->offset) {
1072 QLIST_REMOVE(block, next);
7267c094 1073 g_free(block);
1f2e98b6
AW
1074 return;
1075 }
1076 }
1077}
1078
c227f099 1079void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1080{
04b16653
AW
1081 RAMBlock *block;
1082
1083 QLIST_FOREACH(block, &ram_list.blocks, next) {
1084 if (addr == block->offset) {
1085 QLIST_REMOVE(block, next);
cd19cfa2
HY
1086 if (block->flags & RAM_PREALLOC_MASK) {
1087 ;
1088 } else if (mem_path) {
04b16653
AW
1089#if defined (__linux__) && !defined(TARGET_S390X)
1090 if (block->fd) {
1091 munmap(block->host, block->length);
1092 close(block->fd);
1093 } else {
1094 qemu_vfree(block->host);
1095 }
fd28aa13
JK
1096#else
1097 abort();
04b16653
AW
1098#endif
1099 } else {
1100#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1101 munmap(block->host, block->length);
1102#else
868bb33f 1103 if (xen_enabled()) {
e41d7c69 1104 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
1105 } else {
1106 qemu_vfree(block->host);
1107 }
04b16653
AW
1108#endif
1109 }
7267c094 1110 g_free(block);
04b16653
AW
1111 return;
1112 }
1113 }
1114
e9a1ab19
FB
1115}
1116
cd19cfa2
HY
1117#ifndef _WIN32
1118void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1119{
1120 RAMBlock *block;
1121 ram_addr_t offset;
1122 int flags;
1123 void *area, *vaddr;
1124
1125 QLIST_FOREACH(block, &ram_list.blocks, next) {
1126 offset = addr - block->offset;
1127 if (offset < block->length) {
1128 vaddr = block->host + offset;
1129 if (block->flags & RAM_PREALLOC_MASK) {
1130 ;
1131 } else {
1132 flags = MAP_FIXED;
1133 munmap(vaddr, length);
1134 if (mem_path) {
1135#if defined(__linux__) && !defined(TARGET_S390X)
1136 if (block->fd) {
1137#ifdef MAP_POPULATE
1138 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1139 MAP_PRIVATE;
1140#else
1141 flags |= MAP_PRIVATE;
1142#endif
1143 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1144 flags, block->fd, offset);
1145 } else {
1146 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1147 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1148 flags, -1, 0);
1149 }
fd28aa13
JK
1150#else
1151 abort();
cd19cfa2
HY
1152#endif
1153 } else {
1154#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1155 flags |= MAP_SHARED | MAP_ANONYMOUS;
1156 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1157 flags, -1, 0);
1158#else
1159 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1160 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1161 flags, -1, 0);
1162#endif
1163 }
1164 if (area != vaddr) {
f15fbc4b
AP
1165 fprintf(stderr, "Could not remap addr: "
1166 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1167 length, addr);
1168 exit(1);
1169 }
8490fc78 1170 memory_try_enable_merging(vaddr, length);
ddb97f1d 1171 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1172 }
1173 return;
1174 }
1175 }
1176}
1177#endif /* !_WIN32 */
1178
dc828ca1 1179/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
1180 With the exception of the softmmu code in this file, this should
1181 only be used for local memory (e.g. video ram) that the device owns,
1182 and knows it isn't going to access beyond the end of the block.
1183
1184 It should not be used for general purpose DMA.
1185 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1186 */
c227f099 1187void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 1188{
94a6b54f
PB
1189 RAMBlock *block;
1190
f471a17e
AW
1191 QLIST_FOREACH(block, &ram_list.blocks, next) {
1192 if (addr - block->offset < block->length) {
7d82af38
VP
1193 /* Move this entry to to start of the list. */
1194 if (block != QLIST_FIRST(&ram_list.blocks)) {
1195 QLIST_REMOVE(block, next);
1196 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
1197 }
868bb33f 1198 if (xen_enabled()) {
432d268c
JN
1199 /* We need to check if the requested address is in the RAM
1200 * because we don't want to map the entire memory in QEMU.
712c2b41 1201 * In that case just map until the end of the page.
432d268c
JN
1202 */
1203 if (block->offset == 0) {
e41d7c69 1204 return xen_map_cache(addr, 0, 0);
432d268c 1205 } else if (block->host == NULL) {
e41d7c69
JK
1206 block->host =
1207 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1208 }
1209 }
f471a17e
AW
1210 return block->host + (addr - block->offset);
1211 }
94a6b54f 1212 }
f471a17e
AW
1213
1214 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1215 abort();
1216
1217 return NULL;
dc828ca1
PB
1218}
1219
b2e0a138
MT
1220/* Return a host pointer to ram allocated with qemu_ram_alloc.
1221 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
1222 */
8b9c99d9 1223static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1224{
1225 RAMBlock *block;
1226
1227 QLIST_FOREACH(block, &ram_list.blocks, next) {
1228 if (addr - block->offset < block->length) {
868bb33f 1229 if (xen_enabled()) {
432d268c
JN
1230 /* We need to check if the requested address is in the RAM
1231 * because we don't want to map the entire memory in QEMU.
712c2b41 1232 * In that case just map until the end of the page.
432d268c
JN
1233 */
1234 if (block->offset == 0) {
e41d7c69 1235 return xen_map_cache(addr, 0, 0);
432d268c 1236 } else if (block->host == NULL) {
e41d7c69
JK
1237 block->host =
1238 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1239 }
1240 }
b2e0a138
MT
1241 return block->host + (addr - block->offset);
1242 }
1243 }
1244
1245 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1246 abort();
1247
1248 return NULL;
1249}
1250
38bee5dc
SS
1251/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1252 * but takes a size argument */
8b9c99d9 1253static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1254{
8ab934f9
SS
1255 if (*size == 0) {
1256 return NULL;
1257 }
868bb33f 1258 if (xen_enabled()) {
e41d7c69 1259 return xen_map_cache(addr, *size, 1);
868bb33f 1260 } else {
38bee5dc
SS
1261 RAMBlock *block;
1262
1263 QLIST_FOREACH(block, &ram_list.blocks, next) {
1264 if (addr - block->offset < block->length) {
1265 if (addr - block->offset + *size > block->length)
1266 *size = block->length - addr + block->offset;
1267 return block->host + (addr - block->offset);
1268 }
1269 }
1270
1271 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1272 abort();
38bee5dc
SS
1273 }
1274}
1275
050a0ddf
AP
1276void qemu_put_ram_ptr(void *addr)
1277{
1278 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
1279}
1280
e890261f 1281int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1282{
94a6b54f
PB
1283 RAMBlock *block;
1284 uint8_t *host = ptr;
1285
868bb33f 1286 if (xen_enabled()) {
e41d7c69 1287 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
1288 return 0;
1289 }
1290
f471a17e 1291 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1292 /* This case append when the block is not mapped. */
1293 if (block->host == NULL) {
1294 continue;
1295 }
f471a17e 1296 if (host - block->host < block->length) {
e890261f
MT
1297 *ram_addr = block->offset + (host - block->host);
1298 return 0;
f471a17e 1299 }
94a6b54f 1300 }
432d268c 1301
e890261f
MT
1302 return -1;
1303}
f471a17e 1304
e890261f
MT
1305/* Some of the softmmu routines need to translate from a host pointer
1306 (typically a TLB entry) back to a ram offset. */
1307ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1308{
1309 ram_addr_t ram_addr;
f471a17e 1310
e890261f
MT
1311 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1312 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1313 abort();
1314 }
1315 return ram_addr;
5579c7f3
PB
1316}
1317
a8170e5e 1318static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1319 unsigned size)
e18231a3
BS
1320{
1321#ifdef DEBUG_UNASSIGNED
1322 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1323#endif
5b450407 1324#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1325 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
1326#endif
1327 return 0;
1328}
1329
a8170e5e 1330static void unassigned_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1331 uint64_t val, unsigned size)
e18231a3
BS
1332{
1333#ifdef DEBUG_UNASSIGNED
0e0df1e2 1334 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 1335#endif
5b450407 1336#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1337 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 1338#endif
33417e70
FB
1339}
1340
0e0df1e2
AK
1341static const MemoryRegionOps unassigned_mem_ops = {
1342 .read = unassigned_mem_read,
1343 .write = unassigned_mem_write,
1344 .endianness = DEVICE_NATIVE_ENDIAN,
1345};
e18231a3 1346
a8170e5e 1347static uint64_t error_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1348 unsigned size)
e18231a3 1349{
0e0df1e2 1350 abort();
e18231a3
BS
1351}
1352
a8170e5e 1353static void error_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1354 uint64_t value, unsigned size)
e18231a3 1355{
0e0df1e2 1356 abort();
33417e70
FB
1357}
1358
0e0df1e2
AK
1359static const MemoryRegionOps error_mem_ops = {
1360 .read = error_mem_read,
1361 .write = error_mem_write,
1362 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1363};
1364
0e0df1e2
AK
1365static const MemoryRegionOps rom_mem_ops = {
1366 .read = error_mem_read,
1367 .write = unassigned_mem_write,
1368 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1369};
1370
a8170e5e 1371static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1372 uint64_t val, unsigned size)
9fa3e853 1373{
3a7d929e 1374 int dirty_flags;
f7c11b53 1375 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1376 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1377#if !defined(CONFIG_USER_ONLY)
0e0df1e2 1378 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1379 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 1380#endif
3a7d929e 1381 }
0e0df1e2
AK
1382 switch (size) {
1383 case 1:
1384 stb_p(qemu_get_ram_ptr(ram_addr), val);
1385 break;
1386 case 2:
1387 stw_p(qemu_get_ram_ptr(ram_addr), val);
1388 break;
1389 case 4:
1390 stl_p(qemu_get_ram_ptr(ram_addr), val);
1391 break;
1392 default:
1393 abort();
3a7d929e 1394 }
f23db169 1395 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1396 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1397 /* we remove the notdirty callback only if the code has been
1398 flushed */
1399 if (dirty_flags == 0xff)
2e70f6ef 1400 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1401}
1402
0e0df1e2
AK
1403static const MemoryRegionOps notdirty_mem_ops = {
1404 .read = error_mem_read,
1405 .write = notdirty_mem_write,
1406 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1407};
1408
0f459d16 1409/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1410static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1411{
9349b4f9 1412 CPUArchState *env = cpu_single_env;
06d55cc1 1413 target_ulong pc, cs_base;
0f459d16 1414 target_ulong vaddr;
a1d1bb31 1415 CPUWatchpoint *wp;
06d55cc1 1416 int cpu_flags;
0f459d16 1417
06d55cc1
AL
1418 if (env->watchpoint_hit) {
1419 /* We re-entered the check after replacing the TB. Now raise
1420 * the debug interrupt so that is will trigger after the
1421 * current instruction. */
1422 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1423 return;
1424 }
2e70f6ef 1425 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1426 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1427 if ((vaddr == (wp->vaddr & len_mask) ||
1428 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1429 wp->flags |= BP_WATCHPOINT_HIT;
1430 if (!env->watchpoint_hit) {
1431 env->watchpoint_hit = wp;
5a316526 1432 tb_check_watchpoint(env);
6e140f28
AL
1433 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1434 env->exception_index = EXCP_DEBUG;
488d6577 1435 cpu_loop_exit(env);
6e140f28
AL
1436 } else {
1437 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1438 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1439 cpu_resume_from_signal(env, NULL);
6e140f28 1440 }
06d55cc1 1441 }
6e140f28
AL
1442 } else {
1443 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1444 }
1445 }
1446}
1447
6658ffb8
PB
1448/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1449 so these check for a hit then pass through to the normal out-of-line
1450 phys routines. */
a8170e5e 1451static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1452 unsigned size)
6658ffb8 1453{
1ec9b909
AK
1454 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1455 switch (size) {
1456 case 1: return ldub_phys(addr);
1457 case 2: return lduw_phys(addr);
1458 case 4: return ldl_phys(addr);
1459 default: abort();
1460 }
6658ffb8
PB
1461}
1462
a8170e5e 1463static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1464 uint64_t val, unsigned size)
6658ffb8 1465{
1ec9b909
AK
1466 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1467 switch (size) {
67364150
MF
1468 case 1:
1469 stb_phys(addr, val);
1470 break;
1471 case 2:
1472 stw_phys(addr, val);
1473 break;
1474 case 4:
1475 stl_phys(addr, val);
1476 break;
1ec9b909
AK
1477 default: abort();
1478 }
6658ffb8
PB
1479}
1480
1ec9b909
AK
1481static const MemoryRegionOps watch_mem_ops = {
1482 .read = watch_mem_read,
1483 .write = watch_mem_write,
1484 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1485};
6658ffb8 1486
a8170e5e 1487static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1488 unsigned len)
db7b5426 1489{
70c68e44 1490 subpage_t *mmio = opaque;
f6405247 1491 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1492 MemoryRegionSection *section;
db7b5426
BS
1493#if defined(DEBUG_SUBPAGE)
1494 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1495 mmio, len, addr, idx);
1496#endif
db7b5426 1497
5312bd8b
AK
1498 section = &phys_sections[mmio->sub_section[idx]];
1499 addr += mmio->base;
1500 addr -= section->offset_within_address_space;
1501 addr += section->offset_within_region;
37ec01d4 1502 return io_mem_read(section->mr, addr, len);
db7b5426
BS
1503}
1504
a8170e5e 1505static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1506 uint64_t value, unsigned len)
db7b5426 1507{
70c68e44 1508 subpage_t *mmio = opaque;
f6405247 1509 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1510 MemoryRegionSection *section;
db7b5426 1511#if defined(DEBUG_SUBPAGE)
70c68e44
AK
1512 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1513 " idx %d value %"PRIx64"\n",
f6405247 1514 __func__, mmio, len, addr, idx, value);
db7b5426 1515#endif
f6405247 1516
5312bd8b
AK
1517 section = &phys_sections[mmio->sub_section[idx]];
1518 addr += mmio->base;
1519 addr -= section->offset_within_address_space;
1520 addr += section->offset_within_region;
37ec01d4 1521 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
1522}
1523
70c68e44
AK
1524static const MemoryRegionOps subpage_ops = {
1525 .read = subpage_read,
1526 .write = subpage_write,
1527 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1528};
1529
a8170e5e 1530static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
de712f94 1531 unsigned size)
56384e8b
AF
1532{
1533 ram_addr_t raddr = addr;
1534 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1535 switch (size) {
1536 case 1: return ldub_p(ptr);
1537 case 2: return lduw_p(ptr);
1538 case 4: return ldl_p(ptr);
1539 default: abort();
1540 }
56384e8b
AF
1541}
1542
a8170e5e 1543static void subpage_ram_write(void *opaque, hwaddr addr,
de712f94 1544 uint64_t value, unsigned size)
56384e8b
AF
1545{
1546 ram_addr_t raddr = addr;
1547 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1548 switch (size) {
1549 case 1: return stb_p(ptr, value);
1550 case 2: return stw_p(ptr, value);
1551 case 4: return stl_p(ptr, value);
1552 default: abort();
1553 }
56384e8b
AF
1554}
1555
de712f94
AK
1556static const MemoryRegionOps subpage_ram_ops = {
1557 .read = subpage_ram_read,
1558 .write = subpage_ram_write,
1559 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
1560};
1561
c227f099 1562static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1563 uint16_t section)
db7b5426
BS
1564{
1565 int idx, eidx;
1566
1567 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1568 return -1;
1569 idx = SUBPAGE_IDX(start);
1570 eidx = SUBPAGE_IDX(end);
1571#if defined(DEBUG_SUBPAGE)
0bf9e31a 1572 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1573 mmio, start, end, idx, eidx, memory);
1574#endif
5312bd8b
AK
1575 if (memory_region_is_ram(phys_sections[section].mr)) {
1576 MemoryRegionSection new_section = phys_sections[section];
1577 new_section.mr = &io_mem_subpage_ram;
1578 section = phys_section_add(&new_section);
56384e8b 1579 }
db7b5426 1580 for (; idx <= eidx; idx++) {
5312bd8b 1581 mmio->sub_section[idx] = section;
db7b5426
BS
1582 }
1583
1584 return 0;
1585}
1586
a8170e5e 1587static subpage_t *subpage_init(hwaddr base)
db7b5426 1588{
c227f099 1589 subpage_t *mmio;
db7b5426 1590
7267c094 1591 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
1592
1593 mmio->base = base;
70c68e44
AK
1594 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1595 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1596 mmio->iomem.subpage = true;
db7b5426 1597#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1598 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1599 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1600#endif
0f0cb164 1601 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
1602
1603 return mmio;
1604}
1605
5312bd8b
AK
1606static uint16_t dummy_section(MemoryRegion *mr)
1607{
1608 MemoryRegionSection section = {
1609 .mr = mr,
1610 .offset_within_address_space = 0,
1611 .offset_within_region = 0,
1612 .size = UINT64_MAX,
1613 };
1614
1615 return phys_section_add(&section);
1616}
1617
a8170e5e 1618MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1619{
37ec01d4 1620 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1621}
1622
e9179ce1
AK
1623static void io_mem_init(void)
1624{
0e0df1e2 1625 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
1626 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1627 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1628 "unassigned", UINT64_MAX);
1629 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1630 "notdirty", UINT64_MAX);
de712f94
AK
1631 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1632 "subpage-ram", UINT64_MAX);
1ec9b909
AK
1633 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1634 "watch", UINT64_MAX);
e9179ce1
AK
1635}
1636
ac1970fb
AK
1637static void mem_begin(MemoryListener *listener)
1638{
1639 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1640
1641 destroy_all_mappings(d);
1642 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1643}
1644
50c1e149
AK
1645static void core_begin(MemoryListener *listener)
1646{
5312bd8b
AK
1647 phys_sections_clear();
1648 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
1649 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1650 phys_section_rom = dummy_section(&io_mem_rom);
1651 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
1652}
1653
1d71148e 1654static void tcg_commit(MemoryListener *listener)
50c1e149 1655{
9349b4f9 1656 CPUArchState *env;
117712c3
AK
1657
1658 /* since each CPU stores ram addresses in its TLB cache, we must
1659 reset the modified entries */
1660 /* XXX: slow ! */
1661 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1662 tlb_flush(env, 1);
1663 }
50c1e149
AK
1664}
1665
93632747
AK
1666static void core_log_global_start(MemoryListener *listener)
1667{
1668 cpu_physical_memory_set_dirty_tracking(1);
1669}
1670
1671static void core_log_global_stop(MemoryListener *listener)
1672{
1673 cpu_physical_memory_set_dirty_tracking(0);
1674}
1675
4855d41a
AK
1676static void io_region_add(MemoryListener *listener,
1677 MemoryRegionSection *section)
1678{
a2d33521
AK
1679 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1680
1681 mrio->mr = section->mr;
1682 mrio->offset = section->offset_within_region;
1683 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 1684 section->offset_within_address_space, section->size);
a2d33521 1685 ioport_register(&mrio->iorange);
4855d41a
AK
1686}
1687
1688static void io_region_del(MemoryListener *listener,
1689 MemoryRegionSection *section)
1690{
1691 isa_unassign_ioport(section->offset_within_address_space, section->size);
1692}
1693
93632747 1694static MemoryListener core_memory_listener = {
50c1e149 1695 .begin = core_begin,
93632747
AK
1696 .log_global_start = core_log_global_start,
1697 .log_global_stop = core_log_global_stop,
ac1970fb 1698 .priority = 1,
93632747
AK
1699};
1700
4855d41a
AK
1701static MemoryListener io_memory_listener = {
1702 .region_add = io_region_add,
1703 .region_del = io_region_del,
4855d41a
AK
1704 .priority = 0,
1705};
1706
1d71148e
AK
1707static MemoryListener tcg_memory_listener = {
1708 .commit = tcg_commit,
1709};
1710
ac1970fb
AK
1711void address_space_init_dispatch(AddressSpace *as)
1712{
1713 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1714
1715 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1716 d->listener = (MemoryListener) {
1717 .begin = mem_begin,
1718 .region_add = mem_add,
1719 .region_nop = mem_add,
1720 .priority = 0,
1721 };
1722 as->dispatch = d;
1723 memory_listener_register(&d->listener, as);
1724}
1725
83f3c251
AK
1726void address_space_destroy_dispatch(AddressSpace *as)
1727{
1728 AddressSpaceDispatch *d = as->dispatch;
1729
1730 memory_listener_unregister(&d->listener);
1731 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1732 g_free(d);
1733 as->dispatch = NULL;
1734}
1735
62152b8a
AK
1736static void memory_map_init(void)
1737{
7267c094 1738 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 1739 memory_region_init(system_memory, "system", INT64_MAX);
2673a5da
AK
1740 address_space_init(&address_space_memory, system_memory);
1741 address_space_memory.name = "memory";
309cb471 1742
7267c094 1743 system_io = g_malloc(sizeof(*system_io));
309cb471 1744 memory_region_init(system_io, "io", 65536);
2673a5da
AK
1745 address_space_init(&address_space_io, system_io);
1746 address_space_io.name = "I/O";
93632747 1747
f6790af6
AK
1748 memory_listener_register(&core_memory_listener, &address_space_memory);
1749 memory_listener_register(&io_memory_listener, &address_space_io);
1750 memory_listener_register(&tcg_memory_listener, &address_space_memory);
9e11908f
PM
1751
1752 dma_context_init(&dma_context_memory, &address_space_memory,
1753 NULL, NULL, NULL);
62152b8a
AK
1754}
1755
1756MemoryRegion *get_system_memory(void)
1757{
1758 return system_memory;
1759}
1760
309cb471
AK
1761MemoryRegion *get_system_io(void)
1762{
1763 return system_io;
1764}
1765
e2eef170
PB
1766#endif /* !defined(CONFIG_USER_ONLY) */
1767
13eb76e0
FB
1768/* physical memory access (slow version, mainly for debug) */
1769#if defined(CONFIG_USER_ONLY)
9349b4f9 1770int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1771 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1772{
1773 int l, flags;
1774 target_ulong page;
53a5960a 1775 void * p;
13eb76e0
FB
1776
1777 while (len > 0) {
1778 page = addr & TARGET_PAGE_MASK;
1779 l = (page + TARGET_PAGE_SIZE) - addr;
1780 if (l > len)
1781 l = len;
1782 flags = page_get_flags(page);
1783 if (!(flags & PAGE_VALID))
a68fe89c 1784 return -1;
13eb76e0
FB
1785 if (is_write) {
1786 if (!(flags & PAGE_WRITE))
a68fe89c 1787 return -1;
579a97f7 1788 /* XXX: this code should not depend on lock_user */
72fb7daa 1789 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1790 return -1;
72fb7daa
AJ
1791 memcpy(p, buf, l);
1792 unlock_user(p, addr, l);
13eb76e0
FB
1793 } else {
1794 if (!(flags & PAGE_READ))
a68fe89c 1795 return -1;
579a97f7 1796 /* XXX: this code should not depend on lock_user */
72fb7daa 1797 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1798 return -1;
72fb7daa 1799 memcpy(buf, p, l);
5b257578 1800 unlock_user(p, addr, 0);
13eb76e0
FB
1801 }
1802 len -= l;
1803 buf += l;
1804 addr += l;
1805 }
a68fe89c 1806 return 0;
13eb76e0 1807}
8df1cd07 1808
13eb76e0 1809#else
51d7a9eb 1810
a8170e5e
AK
1811static void invalidate_and_set_dirty(hwaddr addr,
1812 hwaddr length)
51d7a9eb
AP
1813{
1814 if (!cpu_physical_memory_is_dirty(addr)) {
1815 /* invalidate code */
1816 tb_invalidate_phys_page_range(addr, addr + length, 0);
1817 /* set dirty bit */
1818 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1819 }
e226939d 1820 xen_modified_memory(addr, length);
51d7a9eb
AP
1821}
1822
a8170e5e 1823void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1824 int len, bool is_write)
13eb76e0 1825{
ac1970fb 1826 AddressSpaceDispatch *d = as->dispatch;
37ec01d4 1827 int l;
13eb76e0
FB
1828 uint8_t *ptr;
1829 uint32_t val;
a8170e5e 1830 hwaddr page;
f3705d53 1831 MemoryRegionSection *section;
3b46e624 1832
13eb76e0
FB
1833 while (len > 0) {
1834 page = addr & TARGET_PAGE_MASK;
1835 l = (page + TARGET_PAGE_SIZE) - addr;
1836 if (l > len)
1837 l = len;
ac1970fb 1838 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1839
13eb76e0 1840 if (is_write) {
f3705d53 1841 if (!memory_region_is_ram(section->mr)) {
a8170e5e 1842 hwaddr addr1;
cc5bea60 1843 addr1 = memory_region_section_addr(section, addr);
6a00d601
FB
1844 /* XXX: could force cpu_single_env to NULL to avoid
1845 potential bugs */
6c2934db 1846 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 1847 /* 32 bit write access */
c27004ec 1848 val = ldl_p(buf);
37ec01d4 1849 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 1850 l = 4;
6c2934db 1851 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 1852 /* 16 bit write access */
c27004ec 1853 val = lduw_p(buf);
37ec01d4 1854 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
1855 l = 2;
1856 } else {
1c213d19 1857 /* 8 bit write access */
c27004ec 1858 val = ldub_p(buf);
37ec01d4 1859 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
1860 l = 1;
1861 }
f3705d53 1862 } else if (!section->readonly) {
8ca5692d 1863 ram_addr_t addr1;
f3705d53 1864 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 1865 + memory_region_section_addr(section, addr);
13eb76e0 1866 /* RAM case */
5579c7f3 1867 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1868 memcpy(ptr, buf, l);
51d7a9eb 1869 invalidate_and_set_dirty(addr1, l);
050a0ddf 1870 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1871 }
1872 } else {
cc5bea60
BS
1873 if (!(memory_region_is_ram(section->mr) ||
1874 memory_region_is_romd(section->mr))) {
a8170e5e 1875 hwaddr addr1;
13eb76e0 1876 /* I/O case */
cc5bea60 1877 addr1 = memory_region_section_addr(section, addr);
6c2934db 1878 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 1879 /* 32 bit read access */
37ec01d4 1880 val = io_mem_read(section->mr, addr1, 4);
c27004ec 1881 stl_p(buf, val);
13eb76e0 1882 l = 4;
6c2934db 1883 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 1884 /* 16 bit read access */
37ec01d4 1885 val = io_mem_read(section->mr, addr1, 2);
c27004ec 1886 stw_p(buf, val);
13eb76e0
FB
1887 l = 2;
1888 } else {
1c213d19 1889 /* 8 bit read access */
37ec01d4 1890 val = io_mem_read(section->mr, addr1, 1);
c27004ec 1891 stb_p(buf, val);
13eb76e0
FB
1892 l = 1;
1893 }
1894 } else {
1895 /* RAM case */
0a1b357f 1896 ptr = qemu_get_ram_ptr(section->mr->ram_addr
cc5bea60
BS
1897 + memory_region_section_addr(section,
1898 addr));
f3705d53 1899 memcpy(buf, ptr, l);
050a0ddf 1900 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1901 }
1902 }
1903 len -= l;
1904 buf += l;
1905 addr += l;
1906 }
1907}
8df1cd07 1908
a8170e5e 1909void address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1910 const uint8_t *buf, int len)
1911{
1912 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1913}
1914
1915/**
1916 * address_space_read: read from an address space.
1917 *
1918 * @as: #AddressSpace to be accessed
1919 * @addr: address within that address space
1920 * @buf: buffer with the data transferred
1921 */
a8170e5e 1922void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb
AK
1923{
1924 address_space_rw(as, addr, buf, len, false);
1925}
1926
1927
a8170e5e 1928void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1929 int len, int is_write)
1930{
1931 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1932}
1933
d0ecd2aa 1934/* used for ROM loading : can write in RAM and ROM */
a8170e5e 1935void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
1936 const uint8_t *buf, int len)
1937{
ac1970fb 1938 AddressSpaceDispatch *d = address_space_memory.dispatch;
d0ecd2aa
FB
1939 int l;
1940 uint8_t *ptr;
a8170e5e 1941 hwaddr page;
f3705d53 1942 MemoryRegionSection *section;
3b46e624 1943
d0ecd2aa
FB
1944 while (len > 0) {
1945 page = addr & TARGET_PAGE_MASK;
1946 l = (page + TARGET_PAGE_SIZE) - addr;
1947 if (l > len)
1948 l = len;
ac1970fb 1949 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1950
cc5bea60
BS
1951 if (!(memory_region_is_ram(section->mr) ||
1952 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
1953 /* do nothing */
1954 } else {
1955 unsigned long addr1;
f3705d53 1956 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 1957 + memory_region_section_addr(section, addr);
d0ecd2aa 1958 /* ROM/RAM case */
5579c7f3 1959 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 1960 memcpy(ptr, buf, l);
51d7a9eb 1961 invalidate_and_set_dirty(addr1, l);
050a0ddf 1962 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
1963 }
1964 len -= l;
1965 buf += l;
1966 addr += l;
1967 }
1968}
1969
6d16c2f8
AL
1970typedef struct {
1971 void *buffer;
a8170e5e
AK
1972 hwaddr addr;
1973 hwaddr len;
6d16c2f8
AL
1974} BounceBuffer;
1975
1976static BounceBuffer bounce;
1977
ba223c29
AL
1978typedef struct MapClient {
1979 void *opaque;
1980 void (*callback)(void *opaque);
72cf2d4f 1981 QLIST_ENTRY(MapClient) link;
ba223c29
AL
1982} MapClient;
1983
72cf2d4f
BS
1984static QLIST_HEAD(map_client_list, MapClient) map_client_list
1985 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
1986
1987void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
1988{
7267c094 1989 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
1990
1991 client->opaque = opaque;
1992 client->callback = callback;
72cf2d4f 1993 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
1994 return client;
1995}
1996
8b9c99d9 1997static void cpu_unregister_map_client(void *_client)
ba223c29
AL
1998{
1999 MapClient *client = (MapClient *)_client;
2000
72cf2d4f 2001 QLIST_REMOVE(client, link);
7267c094 2002 g_free(client);
ba223c29
AL
2003}
2004
2005static void cpu_notify_map_clients(void)
2006{
2007 MapClient *client;
2008
72cf2d4f
BS
2009 while (!QLIST_EMPTY(&map_client_list)) {
2010 client = QLIST_FIRST(&map_client_list);
ba223c29 2011 client->callback(client->opaque);
34d5e948 2012 cpu_unregister_map_client(client);
ba223c29
AL
2013 }
2014}
2015
6d16c2f8
AL
2016/* Map a physical memory region into a host virtual address.
2017 * May map a subset of the requested range, given by and returned in *plen.
2018 * May return NULL if resources needed to perform the mapping are exhausted.
2019 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2020 * Use cpu_register_map_client() to know when retrying the map operation is
2021 * likely to succeed.
6d16c2f8 2022 */
ac1970fb 2023void *address_space_map(AddressSpace *as,
a8170e5e
AK
2024 hwaddr addr,
2025 hwaddr *plen,
ac1970fb 2026 bool is_write)
6d16c2f8 2027{
ac1970fb 2028 AddressSpaceDispatch *d = as->dispatch;
a8170e5e
AK
2029 hwaddr len = *plen;
2030 hwaddr todo = 0;
6d16c2f8 2031 int l;
a8170e5e 2032 hwaddr page;
f3705d53 2033 MemoryRegionSection *section;
f15fbc4b 2034 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
2035 ram_addr_t rlen;
2036 void *ret;
6d16c2f8
AL
2037
2038 while (len > 0) {
2039 page = addr & TARGET_PAGE_MASK;
2040 l = (page + TARGET_PAGE_SIZE) - addr;
2041 if (l > len)
2042 l = len;
ac1970fb 2043 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
6d16c2f8 2044
f3705d53 2045 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 2046 if (todo || bounce.buffer) {
6d16c2f8
AL
2047 break;
2048 }
2049 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2050 bounce.addr = addr;
2051 bounce.len = l;
2052 if (!is_write) {
ac1970fb 2053 address_space_read(as, addr, bounce.buffer, l);
6d16c2f8 2054 }
38bee5dc
SS
2055
2056 *plen = l;
2057 return bounce.buffer;
6d16c2f8 2058 }
8ab934f9 2059 if (!todo) {
f3705d53 2060 raddr = memory_region_get_ram_addr(section->mr)
cc5bea60 2061 + memory_region_section_addr(section, addr);
8ab934f9 2062 }
6d16c2f8
AL
2063
2064 len -= l;
2065 addr += l;
38bee5dc 2066 todo += l;
6d16c2f8 2067 }
8ab934f9
SS
2068 rlen = todo;
2069 ret = qemu_ram_ptr_length(raddr, &rlen);
2070 *plen = rlen;
2071 return ret;
6d16c2f8
AL
2072}
2073
ac1970fb 2074/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2075 * Will also mark the memory as dirty if is_write == 1. access_len gives
2076 * the amount of memory that was actually read or written by the caller.
2077 */
a8170e5e
AK
2078void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2079 int is_write, hwaddr access_len)
6d16c2f8
AL
2080{
2081 if (buffer != bounce.buffer) {
2082 if (is_write) {
e890261f 2083 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
2084 while (access_len) {
2085 unsigned l;
2086 l = TARGET_PAGE_SIZE;
2087 if (l > access_len)
2088 l = access_len;
51d7a9eb 2089 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2090 addr1 += l;
2091 access_len -= l;
2092 }
2093 }
868bb33f 2094 if (xen_enabled()) {
e41d7c69 2095 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2096 }
6d16c2f8
AL
2097 return;
2098 }
2099 if (is_write) {
ac1970fb 2100 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2101 }
f8a83245 2102 qemu_vfree(bounce.buffer);
6d16c2f8 2103 bounce.buffer = NULL;
ba223c29 2104 cpu_notify_map_clients();
6d16c2f8 2105}
d0ecd2aa 2106
a8170e5e
AK
2107void *cpu_physical_memory_map(hwaddr addr,
2108 hwaddr *plen,
ac1970fb
AK
2109 int is_write)
2110{
2111 return address_space_map(&address_space_memory, addr, plen, is_write);
2112}
2113
a8170e5e
AK
2114void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2115 int is_write, hwaddr access_len)
ac1970fb
AK
2116{
2117 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2118}
2119
8df1cd07 2120/* warning: addr must be aligned */
a8170e5e 2121static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2122 enum device_endian endian)
8df1cd07 2123{
8df1cd07
FB
2124 uint8_t *ptr;
2125 uint32_t val;
f3705d53 2126 MemoryRegionSection *section;
8df1cd07 2127
ac1970fb 2128 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2129
cc5bea60
BS
2130 if (!(memory_region_is_ram(section->mr) ||
2131 memory_region_is_romd(section->mr))) {
8df1cd07 2132 /* I/O case */
cc5bea60 2133 addr = memory_region_section_addr(section, addr);
37ec01d4 2134 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
2135#if defined(TARGET_WORDS_BIGENDIAN)
2136 if (endian == DEVICE_LITTLE_ENDIAN) {
2137 val = bswap32(val);
2138 }
2139#else
2140 if (endian == DEVICE_BIG_ENDIAN) {
2141 val = bswap32(val);
2142 }
2143#endif
8df1cd07
FB
2144 } else {
2145 /* RAM case */
f3705d53 2146 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2147 & TARGET_PAGE_MASK)
cc5bea60 2148 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2149 switch (endian) {
2150 case DEVICE_LITTLE_ENDIAN:
2151 val = ldl_le_p(ptr);
2152 break;
2153 case DEVICE_BIG_ENDIAN:
2154 val = ldl_be_p(ptr);
2155 break;
2156 default:
2157 val = ldl_p(ptr);
2158 break;
2159 }
8df1cd07
FB
2160 }
2161 return val;
2162}
2163
a8170e5e 2164uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2165{
2166 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2167}
2168
a8170e5e 2169uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2170{
2171 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2172}
2173
a8170e5e 2174uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2175{
2176 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2177}
2178
84b7b8e7 2179/* warning: addr must be aligned */
a8170e5e 2180static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2181 enum device_endian endian)
84b7b8e7 2182{
84b7b8e7
FB
2183 uint8_t *ptr;
2184 uint64_t val;
f3705d53 2185 MemoryRegionSection *section;
84b7b8e7 2186
ac1970fb 2187 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2188
cc5bea60
BS
2189 if (!(memory_region_is_ram(section->mr) ||
2190 memory_region_is_romd(section->mr))) {
84b7b8e7 2191 /* I/O case */
cc5bea60 2192 addr = memory_region_section_addr(section, addr);
1e78bcc1
AG
2193
2194 /* XXX This is broken when device endian != cpu endian.
2195 Fix and add "endian" variable check */
84b7b8e7 2196#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2197 val = io_mem_read(section->mr, addr, 4) << 32;
2198 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 2199#else
37ec01d4
AK
2200 val = io_mem_read(section->mr, addr, 4);
2201 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
2202#endif
2203 } else {
2204 /* RAM case */
f3705d53 2205 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2206 & TARGET_PAGE_MASK)
cc5bea60 2207 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2208 switch (endian) {
2209 case DEVICE_LITTLE_ENDIAN:
2210 val = ldq_le_p(ptr);
2211 break;
2212 case DEVICE_BIG_ENDIAN:
2213 val = ldq_be_p(ptr);
2214 break;
2215 default:
2216 val = ldq_p(ptr);
2217 break;
2218 }
84b7b8e7
FB
2219 }
2220 return val;
2221}
2222
a8170e5e 2223uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2224{
2225 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2226}
2227
a8170e5e 2228uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2229{
2230 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2231}
2232
a8170e5e 2233uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2234{
2235 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2236}
2237
aab33094 2238/* XXX: optimize */
a8170e5e 2239uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2240{
2241 uint8_t val;
2242 cpu_physical_memory_read(addr, &val, 1);
2243 return val;
2244}
2245
733f0b02 2246/* warning: addr must be aligned */
a8170e5e 2247static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2248 enum device_endian endian)
aab33094 2249{
733f0b02
MT
2250 uint8_t *ptr;
2251 uint64_t val;
f3705d53 2252 MemoryRegionSection *section;
733f0b02 2253
ac1970fb 2254 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2255
cc5bea60
BS
2256 if (!(memory_region_is_ram(section->mr) ||
2257 memory_region_is_romd(section->mr))) {
733f0b02 2258 /* I/O case */
cc5bea60 2259 addr = memory_region_section_addr(section, addr);
37ec01d4 2260 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
2261#if defined(TARGET_WORDS_BIGENDIAN)
2262 if (endian == DEVICE_LITTLE_ENDIAN) {
2263 val = bswap16(val);
2264 }
2265#else
2266 if (endian == DEVICE_BIG_ENDIAN) {
2267 val = bswap16(val);
2268 }
2269#endif
733f0b02
MT
2270 } else {
2271 /* RAM case */
f3705d53 2272 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2273 & TARGET_PAGE_MASK)
cc5bea60 2274 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2275 switch (endian) {
2276 case DEVICE_LITTLE_ENDIAN:
2277 val = lduw_le_p(ptr);
2278 break;
2279 case DEVICE_BIG_ENDIAN:
2280 val = lduw_be_p(ptr);
2281 break;
2282 default:
2283 val = lduw_p(ptr);
2284 break;
2285 }
733f0b02
MT
2286 }
2287 return val;
aab33094
FB
2288}
2289
a8170e5e 2290uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2291{
2292 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2293}
2294
a8170e5e 2295uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2296{
2297 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2298}
2299
a8170e5e 2300uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2301{
2302 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2303}
2304
8df1cd07
FB
2305/* warning: addr must be aligned. The ram page is not masked as dirty
2306 and the code inside is not invalidated. It is useful if the dirty
2307 bits are used to track modified PTEs */
a8170e5e 2308void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2309{
8df1cd07 2310 uint8_t *ptr;
f3705d53 2311 MemoryRegionSection *section;
8df1cd07 2312
ac1970fb 2313 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2314
f3705d53 2315 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2316 addr = memory_region_section_addr(section, addr);
f3705d53 2317 if (memory_region_is_ram(section->mr)) {
37ec01d4 2318 section = &phys_sections[phys_section_rom];
06ef3525 2319 }
37ec01d4 2320 io_mem_write(section->mr, addr, val, 4);
8df1cd07 2321 } else {
f3705d53 2322 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 2323 & TARGET_PAGE_MASK)
cc5bea60 2324 + memory_region_section_addr(section, addr);
5579c7f3 2325 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2326 stl_p(ptr, val);
74576198
AL
2327
2328 if (unlikely(in_migration)) {
2329 if (!cpu_physical_memory_is_dirty(addr1)) {
2330 /* invalidate code */
2331 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2332 /* set dirty bit */
f7c11b53
YT
2333 cpu_physical_memory_set_dirty_flags(
2334 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2335 }
2336 }
8df1cd07
FB
2337 }
2338}
2339
a8170e5e 2340void stq_phys_notdirty(hwaddr addr, uint64_t val)
bc98a7ef 2341{
bc98a7ef 2342 uint8_t *ptr;
f3705d53 2343 MemoryRegionSection *section;
bc98a7ef 2344
ac1970fb 2345 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2346
f3705d53 2347 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2348 addr = memory_region_section_addr(section, addr);
f3705d53 2349 if (memory_region_is_ram(section->mr)) {
37ec01d4 2350 section = &phys_sections[phys_section_rom];
06ef3525 2351 }
bc98a7ef 2352#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2353 io_mem_write(section->mr, addr, val >> 32, 4);
2354 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 2355#else
37ec01d4
AK
2356 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2357 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
2358#endif
2359 } else {
f3705d53 2360 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2361 & TARGET_PAGE_MASK)
cc5bea60 2362 + memory_region_section_addr(section, addr));
bc98a7ef
JM
2363 stq_p(ptr, val);
2364 }
2365}
2366
8df1cd07 2367/* warning: addr must be aligned */
a8170e5e 2368static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2369 enum device_endian endian)
8df1cd07 2370{
8df1cd07 2371 uint8_t *ptr;
f3705d53 2372 MemoryRegionSection *section;
8df1cd07 2373
ac1970fb 2374 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2375
f3705d53 2376 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2377 addr = memory_region_section_addr(section, addr);
f3705d53 2378 if (memory_region_is_ram(section->mr)) {
37ec01d4 2379 section = &phys_sections[phys_section_rom];
06ef3525 2380 }
1e78bcc1
AG
2381#if defined(TARGET_WORDS_BIGENDIAN)
2382 if (endian == DEVICE_LITTLE_ENDIAN) {
2383 val = bswap32(val);
2384 }
2385#else
2386 if (endian == DEVICE_BIG_ENDIAN) {
2387 val = bswap32(val);
2388 }
2389#endif
37ec01d4 2390 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
2391 } else {
2392 unsigned long addr1;
f3705d53 2393 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2394 + memory_region_section_addr(section, addr);
8df1cd07 2395 /* RAM case */
5579c7f3 2396 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2397 switch (endian) {
2398 case DEVICE_LITTLE_ENDIAN:
2399 stl_le_p(ptr, val);
2400 break;
2401 case DEVICE_BIG_ENDIAN:
2402 stl_be_p(ptr, val);
2403 break;
2404 default:
2405 stl_p(ptr, val);
2406 break;
2407 }
51d7a9eb 2408 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2409 }
2410}
2411
a8170e5e 2412void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2413{
2414 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2415}
2416
a8170e5e 2417void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2418{
2419 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2420}
2421
a8170e5e 2422void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2423{
2424 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2425}
2426
aab33094 2427/* XXX: optimize */
a8170e5e 2428void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2429{
2430 uint8_t v = val;
2431 cpu_physical_memory_write(addr, &v, 1);
2432}
2433
733f0b02 2434/* warning: addr must be aligned */
a8170e5e 2435static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2436 enum device_endian endian)
aab33094 2437{
733f0b02 2438 uint8_t *ptr;
f3705d53 2439 MemoryRegionSection *section;
733f0b02 2440
ac1970fb 2441 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2442
f3705d53 2443 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2444 addr = memory_region_section_addr(section, addr);
f3705d53 2445 if (memory_region_is_ram(section->mr)) {
37ec01d4 2446 section = &phys_sections[phys_section_rom];
06ef3525 2447 }
1e78bcc1
AG
2448#if defined(TARGET_WORDS_BIGENDIAN)
2449 if (endian == DEVICE_LITTLE_ENDIAN) {
2450 val = bswap16(val);
2451 }
2452#else
2453 if (endian == DEVICE_BIG_ENDIAN) {
2454 val = bswap16(val);
2455 }
2456#endif
37ec01d4 2457 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
2458 } else {
2459 unsigned long addr1;
f3705d53 2460 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2461 + memory_region_section_addr(section, addr);
733f0b02
MT
2462 /* RAM case */
2463 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2464 switch (endian) {
2465 case DEVICE_LITTLE_ENDIAN:
2466 stw_le_p(ptr, val);
2467 break;
2468 case DEVICE_BIG_ENDIAN:
2469 stw_be_p(ptr, val);
2470 break;
2471 default:
2472 stw_p(ptr, val);
2473 break;
2474 }
51d7a9eb 2475 invalidate_and_set_dirty(addr1, 2);
733f0b02 2476 }
aab33094
FB
2477}
2478
a8170e5e 2479void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2480{
2481 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2482}
2483
a8170e5e 2484void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2485{
2486 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2487}
2488
a8170e5e 2489void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2490{
2491 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2492}
2493
aab33094 2494/* XXX: optimize */
a8170e5e 2495void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2496{
2497 val = tswap64(val);
71d2b725 2498 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2499}
2500
a8170e5e 2501void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2502{
2503 val = cpu_to_le64(val);
2504 cpu_physical_memory_write(addr, &val, 8);
2505}
2506
a8170e5e 2507void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2508{
2509 val = cpu_to_be64(val);
2510 cpu_physical_memory_write(addr, &val, 8);
2511}
2512
5e2972fd 2513/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2514int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2515 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2516{
2517 int l;
a8170e5e 2518 hwaddr phys_addr;
9b3c35e0 2519 target_ulong page;
13eb76e0
FB
2520
2521 while (len > 0) {
2522 page = addr & TARGET_PAGE_MASK;
2523 phys_addr = cpu_get_phys_page_debug(env, page);
2524 /* if no physical page mapped, return an error */
2525 if (phys_addr == -1)
2526 return -1;
2527 l = (page + TARGET_PAGE_SIZE) - addr;
2528 if (l > len)
2529 l = len;
5e2972fd 2530 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2531 if (is_write)
2532 cpu_physical_memory_write_rom(phys_addr, buf, l);
2533 else
5e2972fd 2534 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2535 len -= l;
2536 buf += l;
2537 addr += l;
2538 }
2539 return 0;
2540}
a68fe89c 2541#endif
13eb76e0 2542
b3755a91
PB
2543#if !defined(CONFIG_USER_ONLY)
2544
82afa586
BH
2545/*
2546 * A helper function for the _utterly broken_ virtio device model to find out if
2547 * it's running on a big endian machine. Don't do this at home kids!
2548 */
2549bool virtio_is_big_endian(void);
2550bool virtio_is_big_endian(void)
2551{
2552#if defined(TARGET_WORDS_BIGENDIAN)
2553 return true;
2554#else
2555 return false;
2556#endif
2557}
2558
61382a50 2559#endif
76f35538
WC
2560
2561#ifndef CONFIG_USER_ONLY
a8170e5e 2562bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538
WC
2563{
2564 MemoryRegionSection *section;
2565
ac1970fb
AK
2566 section = phys_page_find(address_space_memory.dispatch,
2567 phys_addr >> TARGET_PAGE_BITS);
76f35538
WC
2568
2569 return !(memory_region_is_ram(section->mr) ||
2570 memory_region_is_romd(section->mr));
2571}
2572#endif