]> git.proxmox.com Git - qemu.git/blame - exec.c
add a version number to ram_list
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
432d268c 34#include "hw/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
67d3b957 53//#define DEBUG_UNASSIGNED
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
e2eef170 56#if !defined(CONFIG_USER_ONLY)
9fa3e853 57int phys_ram_fd;
74576198 58static int in_migration;
94a6b54f 59
a3161038 60RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
61
62static MemoryRegion *system_memory;
309cb471 63static MemoryRegion *system_io;
62152b8a 64
f6790af6
AK
65AddressSpace address_space_io;
66AddressSpace address_space_memory;
9e11908f 67DMAContext dma_context_memory;
2673a5da 68
0e0df1e2 69MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 70static MemoryRegion io_mem_subpage_ram;
0e0df1e2 71
e2eef170 72#endif
9fa3e853 73
9349b4f9 74CPUArchState *first_cpu;
6a00d601
FB
75/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
9349b4f9 77DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 78/* 0 = Do not count executed instructions.
bf20dc07 79 1 = Precise instruction counting.
2e70f6ef
PB
80 2 = Adaptive rate instruction counting. */
81int use_icount = 0;
6a00d601 82
e2eef170 83#if !defined(CONFIG_USER_ONLY)
4346ae3e 84
5312bd8b
AK
85static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
aa102231
AK
88static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
5312bd8b 91
d6f2ea22
AK
92/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
07f07b31 96#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 97
e2eef170 98static void io_mem_init(void);
62152b8a 99static void memory_map_init(void);
8b9c99d9 100static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 101
1ec9b909 102static MemoryRegion io_mem_watch;
6658ffb8 103#endif
fd6ce8f6 104
6d9a1304 105#if !defined(CONFIG_USER_ONLY)
d6f2ea22 106
f7bf5461 107static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 108{
f7bf5461 109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
d6f2ea22
AK
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
f7bf5461
AK
117}
118
119static uint16_t phys_map_node_alloc(void)
120{
121 unsigned i;
122 uint16_t ret;
123
124 ret = phys_map_nodes_nb++;
125 assert(ret != PHYS_MAP_NODE_NIL);
126 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 127 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 128 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 130 }
f7bf5461 131 return ret;
d6f2ea22
AK
132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
92e873b9 139
a8170e5e
AK
140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
2999097b 142 int level)
f7bf5461
AK
143{
144 PhysPageEntry *p;
145 int i;
a8170e5e 146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 147
07f07b31 148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
07f07b31 153 p[i].is_leaf = 1;
c19e8800 154 p[i].ptr = phys_section_unassigned;
4346ae3e 155 }
67c4d23c 156 }
f7bf5461 157 } else {
c19e8800 158 p = phys_map_nodes[lp->ptr];
92e873b9 159 }
2999097b 160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 161
2999097b 162 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
c19e8800 165 lp->ptr = leaf;
07f07b31
AK
166 *index += step;
167 *nb -= step;
2999097b
AK
168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
f7bf5461
AK
172 }
173}
174
ac1970fb 175static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 176 hwaddr index, hwaddr nb,
2999097b 177 uint16_t leaf)
f7bf5461 178{
2999097b 179 /* Wildly overreserve - it doesn't matter much. */
07f07b31 180 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 181
ac1970fb 182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
183}
184
a8170e5e 185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
92e873b9 186{
ac1970fb 187 PhysPageEntry lp = d->phys_map;
31ab2b4a
AK
188 PhysPageEntry *p;
189 int i;
31ab2b4a 190 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 191
07f07b31 192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
194 goto not_found;
195 }
c19e8800 196 p = phys_map_nodes[lp.ptr];
31ab2b4a 197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 198 }
31ab2b4a 199
c19e8800 200 s_index = lp.ptr;
31ab2b4a 201not_found:
f3705d53
AK
202 return &phys_sections[s_index];
203}
204
e5548617
BS
205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 209 && mr != &io_mem_watch;
fd6ce8f6 210}
5b6dd868 211#endif
fd6ce8f6 212
5b6dd868 213void cpu_exec_init_all(void)
fdbb84d1 214{
5b6dd868
BS
215#if !defined(CONFIG_USER_ONLY)
216 memory_map_init();
217 io_mem_init();
fdbb84d1 218#endif
5b6dd868 219}
fdbb84d1 220
5b6dd868
BS
221#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
222
223static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 224{
5b6dd868 225 CPUArchState *env = opaque;
a513fe19 226
5b6dd868
BS
227 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
228 version_id is increased. */
229 env->interrupt_request &= ~0x01;
230 tlb_flush(env, 1);
231
232 return 0;
a513fe19 233}
7501267e 234
5b6dd868
BS
235static const VMStateDescription vmstate_cpu_common = {
236 .name = "cpu_common",
237 .version_id = 1,
238 .minimum_version_id = 1,
239 .minimum_version_id_old = 1,
240 .post_load = cpu_common_post_load,
241 .fields = (VMStateField []) {
242 VMSTATE_UINT32(halted, CPUArchState),
243 VMSTATE_UINT32(interrupt_request, CPUArchState),
244 VMSTATE_END_OF_LIST()
245 }
246};
247#endif
ea041c0e 248
5b6dd868 249CPUArchState *qemu_get_cpu(int cpu)
ea041c0e 250{
5b6dd868 251 CPUArchState *env = first_cpu;
ea041c0e 252
5b6dd868
BS
253 while (env) {
254 if (env->cpu_index == cpu)
255 break;
256 env = env->next_cpu;
ea041c0e 257 }
5b6dd868
BS
258
259 return env;
ea041c0e
FB
260}
261
5b6dd868 262void cpu_exec_init(CPUArchState *env)
ea041c0e 263{
5b6dd868
BS
264#ifndef CONFIG_USER_ONLY
265 CPUState *cpu = ENV_GET_CPU(env);
266#endif
267 CPUArchState **penv;
268 int cpu_index;
269
270#if defined(CONFIG_USER_ONLY)
271 cpu_list_lock();
272#endif
273 env->next_cpu = NULL;
274 penv = &first_cpu;
275 cpu_index = 0;
276 while (*penv != NULL) {
277 penv = &(*penv)->next_cpu;
278 cpu_index++;
279 }
280 env->cpu_index = cpu_index;
281 env->numa_node = 0;
282 QTAILQ_INIT(&env->breakpoints);
283 QTAILQ_INIT(&env->watchpoints);
284#ifndef CONFIG_USER_ONLY
285 cpu->thread_id = qemu_get_thread_id();
286#endif
287 *penv = env;
288#if defined(CONFIG_USER_ONLY)
289 cpu_list_unlock();
290#endif
291#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
292 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
293 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
294 cpu_save, cpu_load, env);
295#endif
ea041c0e
FB
296}
297
1fddef4b 298#if defined(TARGET_HAS_ICE)
94df27fd 299#if defined(CONFIG_USER_ONLY)
9349b4f9 300static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
301{
302 tb_invalidate_phys_page_range(pc, pc + 1, 0);
303}
304#else
1e7855a5
MF
305static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
306{
9d70c4b7
MF
307 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
308 (pc & ~TARGET_PAGE_MASK));
1e7855a5 309}
c27004ec 310#endif
94df27fd 311#endif /* TARGET_HAS_ICE */
d720b93d 312
c527ee8f 313#if defined(CONFIG_USER_ONLY)
9349b4f9 314void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
315
316{
317}
318
9349b4f9 319int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
320 int flags, CPUWatchpoint **watchpoint)
321{
322 return -ENOSYS;
323}
324#else
6658ffb8 325/* Add a watchpoint. */
9349b4f9 326int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 327 int flags, CPUWatchpoint **watchpoint)
6658ffb8 328{
b4051334 329 target_ulong len_mask = ~(len - 1);
c0ce998e 330 CPUWatchpoint *wp;
6658ffb8 331
b4051334 332 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
333 if ((len & (len - 1)) || (addr & ~len_mask) ||
334 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
335 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
336 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
337 return -EINVAL;
338 }
7267c094 339 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
340
341 wp->vaddr = addr;
b4051334 342 wp->len_mask = len_mask;
a1d1bb31
AL
343 wp->flags = flags;
344
2dc9f411 345 /* keep all GDB-injected watchpoints in front */
c0ce998e 346 if (flags & BP_GDB)
72cf2d4f 347 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 348 else
72cf2d4f 349 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 350
6658ffb8 351 tlb_flush_page(env, addr);
a1d1bb31
AL
352
353 if (watchpoint)
354 *watchpoint = wp;
355 return 0;
6658ffb8
PB
356}
357
a1d1bb31 358/* Remove a specific watchpoint. */
9349b4f9 359int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 360 int flags)
6658ffb8 361{
b4051334 362 target_ulong len_mask = ~(len - 1);
a1d1bb31 363 CPUWatchpoint *wp;
6658ffb8 364
72cf2d4f 365 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 366 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 367 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 368 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
369 return 0;
370 }
371 }
a1d1bb31 372 return -ENOENT;
6658ffb8
PB
373}
374
a1d1bb31 375/* Remove a specific watchpoint by reference. */
9349b4f9 376void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 377{
72cf2d4f 378 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 379
a1d1bb31
AL
380 tlb_flush_page(env, watchpoint->vaddr);
381
7267c094 382 g_free(watchpoint);
a1d1bb31
AL
383}
384
385/* Remove all matching watchpoints. */
9349b4f9 386void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 387{
c0ce998e 388 CPUWatchpoint *wp, *next;
a1d1bb31 389
72cf2d4f 390 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
391 if (wp->flags & mask)
392 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 393 }
7d03f82f 394}
c527ee8f 395#endif
7d03f82f 396
a1d1bb31 397/* Add a breakpoint. */
9349b4f9 398int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 399 CPUBreakpoint **breakpoint)
4c3a88a2 400{
1fddef4b 401#if defined(TARGET_HAS_ICE)
c0ce998e 402 CPUBreakpoint *bp;
3b46e624 403
7267c094 404 bp = g_malloc(sizeof(*bp));
4c3a88a2 405
a1d1bb31
AL
406 bp->pc = pc;
407 bp->flags = flags;
408
2dc9f411 409 /* keep all GDB-injected breakpoints in front */
c0ce998e 410 if (flags & BP_GDB)
72cf2d4f 411 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 412 else
72cf2d4f 413 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 414
d720b93d 415 breakpoint_invalidate(env, pc);
a1d1bb31
AL
416
417 if (breakpoint)
418 *breakpoint = bp;
4c3a88a2
FB
419 return 0;
420#else
a1d1bb31 421 return -ENOSYS;
4c3a88a2
FB
422#endif
423}
424
a1d1bb31 425/* Remove a specific breakpoint. */
9349b4f9 426int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 427{
7d03f82f 428#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
429 CPUBreakpoint *bp;
430
72cf2d4f 431 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
432 if (bp->pc == pc && bp->flags == flags) {
433 cpu_breakpoint_remove_by_ref(env, bp);
434 return 0;
435 }
7d03f82f 436 }
a1d1bb31
AL
437 return -ENOENT;
438#else
439 return -ENOSYS;
7d03f82f
EI
440#endif
441}
442
a1d1bb31 443/* Remove a specific breakpoint by reference. */
9349b4f9 444void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 445{
1fddef4b 446#if defined(TARGET_HAS_ICE)
72cf2d4f 447 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 448
a1d1bb31
AL
449 breakpoint_invalidate(env, breakpoint->pc);
450
7267c094 451 g_free(breakpoint);
a1d1bb31
AL
452#endif
453}
454
455/* Remove all matching breakpoints. */
9349b4f9 456void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
457{
458#if defined(TARGET_HAS_ICE)
c0ce998e 459 CPUBreakpoint *bp, *next;
a1d1bb31 460
72cf2d4f 461 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
462 if (bp->flags & mask)
463 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 464 }
4c3a88a2
FB
465#endif
466}
467
c33a346e
FB
468/* enable or disable single step mode. EXCP_DEBUG is returned by the
469 CPU loop after each instruction */
9349b4f9 470void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 471{
1fddef4b 472#if defined(TARGET_HAS_ICE)
c33a346e
FB
473 if (env->singlestep_enabled != enabled) {
474 env->singlestep_enabled = enabled;
e22a25c9
AL
475 if (kvm_enabled())
476 kvm_update_guest_debug(env, 0);
477 else {
ccbb4d44 478 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
479 /* XXX: only flush what is necessary */
480 tb_flush(env);
481 }
c33a346e
FB
482 }
483#endif
484}
485
9349b4f9 486void cpu_reset_interrupt(CPUArchState *env, int mask)
b54ad049
FB
487{
488 env->interrupt_request &= ~mask;
489}
490
9349b4f9 491void cpu_exit(CPUArchState *env)
3098dba0
AJ
492{
493 env->exit_request = 1;
494 cpu_unlink_tb(env);
495}
496
9349b4f9 497void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
498{
499 va_list ap;
493ae1f0 500 va_list ap2;
7501267e
FB
501
502 va_start(ap, fmt);
493ae1f0 503 va_copy(ap2, ap);
7501267e
FB
504 fprintf(stderr, "qemu: fatal: ");
505 vfprintf(stderr, fmt, ap);
506 fprintf(stderr, "\n");
6fd2a026 507 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
508 if (qemu_log_enabled()) {
509 qemu_log("qemu: fatal: ");
510 qemu_log_vprintf(fmt, ap2);
511 qemu_log("\n");
6fd2a026 512 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 513 qemu_log_flush();
93fcfe39 514 qemu_log_close();
924edcae 515 }
493ae1f0 516 va_end(ap2);
f9373291 517 va_end(ap);
fd052bf6
RV
518#if defined(CONFIG_USER_ONLY)
519 {
520 struct sigaction act;
521 sigfillset(&act.sa_mask);
522 act.sa_handler = SIG_DFL;
523 sigaction(SIGABRT, &act, NULL);
524 }
525#endif
7501267e
FB
526 abort();
527}
528
9349b4f9 529CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 530{
9349b4f9
AF
531 CPUArchState *new_env = cpu_init(env->cpu_model_str);
532 CPUArchState *next_cpu = new_env->next_cpu;
c5be9f08 533 int cpu_index = new_env->cpu_index;
5a38f081
AL
534#if defined(TARGET_HAS_ICE)
535 CPUBreakpoint *bp;
536 CPUWatchpoint *wp;
537#endif
538
9349b4f9 539 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081
AL
540
541 /* Preserve chaining and index. */
c5be9f08
TS
542 new_env->next_cpu = next_cpu;
543 new_env->cpu_index = cpu_index;
5a38f081
AL
544
545 /* Clone all break/watchpoints.
546 Note: Once we support ptrace with hw-debug register access, make sure
547 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
548 QTAILQ_INIT(&env->breakpoints);
549 QTAILQ_INIT(&env->watchpoints);
5a38f081 550#if defined(TARGET_HAS_ICE)
72cf2d4f 551 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
552 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
553 }
72cf2d4f 554 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
555 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
556 wp->flags, NULL);
557 }
558#endif
559
c5be9f08
TS
560 return new_env;
561}
562
0124311e 563#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
564static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
565 uintptr_t length)
566{
567 uintptr_t start1;
568
569 /* we modify the TLB cache so that the dirty bit will be set again
570 when accessing the range */
571 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
572 /* Check that we don't span multiple blocks - this breaks the
573 address comparisons below. */
574 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
575 != (end - 1) - start) {
576 abort();
577 }
578 cpu_tlb_reset_dirty_all(start1, length);
579
580}
581
5579c7f3 582/* Note: start and end must be within the same ram block. */
c227f099 583void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 584 int dirty_flags)
1ccde1cb 585{
d24981d3 586 uintptr_t length;
1ccde1cb
FB
587
588 start &= TARGET_PAGE_MASK;
589 end = TARGET_PAGE_ALIGN(end);
590
591 length = end - start;
592 if (length == 0)
593 return;
f7c11b53 594 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 595
d24981d3
JQ
596 if (tcg_enabled()) {
597 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 598 }
1ccde1cb
FB
599}
600
8b9c99d9 601static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 602{
f6f3fbca 603 int ret = 0;
74576198 604 in_migration = enable;
f6f3fbca 605 return ret;
74576198
AL
606}
607
a8170e5e 608hwaddr memory_region_section_get_iotlb(CPUArchState *env,
e5548617
BS
609 MemoryRegionSection *section,
610 target_ulong vaddr,
a8170e5e 611 hwaddr paddr,
e5548617
BS
612 int prot,
613 target_ulong *address)
614{
a8170e5e 615 hwaddr iotlb;
e5548617
BS
616 CPUWatchpoint *wp;
617
cc5bea60 618 if (memory_region_is_ram(section->mr)) {
e5548617
BS
619 /* Normal RAM. */
620 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 621 + memory_region_section_addr(section, paddr);
e5548617
BS
622 if (!section->readonly) {
623 iotlb |= phys_section_notdirty;
624 } else {
625 iotlb |= phys_section_rom;
626 }
627 } else {
628 /* IO handlers are currently passed a physical address.
629 It would be nice to pass an offset from the base address
630 of that region. This would avoid having to special case RAM,
631 and avoid full address decoding in every device.
632 We can't use the high bits of pd for this because
633 IO_MEM_ROMD uses these as a ram address. */
634 iotlb = section - phys_sections;
cc5bea60 635 iotlb += memory_region_section_addr(section, paddr);
e5548617
BS
636 }
637
638 /* Make accesses to pages with watchpoints go via the
639 watchpoint trap routines. */
640 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
641 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
642 /* Avoid trapping reads of pages with a write breakpoint. */
643 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
644 iotlb = phys_section_watch + paddr;
645 *address |= TLB_MMIO;
646 break;
647 }
648 }
649 }
650
651 return iotlb;
652}
9fa3e853
FB
653#endif /* defined(CONFIG_USER_ONLY) */
654
e2eef170 655#if !defined(CONFIG_USER_ONLY)
8da3ff18 656
c04b2b78
PB
657#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
658typedef struct subpage_t {
70c68e44 659 MemoryRegion iomem;
a8170e5e 660 hwaddr base;
5312bd8b 661 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
662} subpage_t;
663
c227f099 664static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 665 uint16_t section);
a8170e5e 666static subpage_t *subpage_init(hwaddr base);
5312bd8b 667static void destroy_page_desc(uint16_t section_index)
54688b1e 668{
5312bd8b
AK
669 MemoryRegionSection *section = &phys_sections[section_index];
670 MemoryRegion *mr = section->mr;
54688b1e
AK
671
672 if (mr->subpage) {
673 subpage_t *subpage = container_of(mr, subpage_t, iomem);
674 memory_region_destroy(&subpage->iomem);
675 g_free(subpage);
676 }
677}
678
4346ae3e 679static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
680{
681 unsigned i;
d6f2ea22 682 PhysPageEntry *p;
54688b1e 683
c19e8800 684 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
685 return;
686 }
687
c19e8800 688 p = phys_map_nodes[lp->ptr];
4346ae3e 689 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 690 if (!p[i].is_leaf) {
54688b1e 691 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 692 } else {
c19e8800 693 destroy_page_desc(p[i].ptr);
54688b1e 694 }
54688b1e 695 }
07f07b31 696 lp->is_leaf = 0;
c19e8800 697 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
698}
699
ac1970fb 700static void destroy_all_mappings(AddressSpaceDispatch *d)
54688b1e 701{
ac1970fb 702 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
d6f2ea22 703 phys_map_nodes_reset();
54688b1e
AK
704}
705
5312bd8b
AK
706static uint16_t phys_section_add(MemoryRegionSection *section)
707{
708 if (phys_sections_nb == phys_sections_nb_alloc) {
709 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
710 phys_sections = g_renew(MemoryRegionSection, phys_sections,
711 phys_sections_nb_alloc);
712 }
713 phys_sections[phys_sections_nb] = *section;
714 return phys_sections_nb++;
715}
716
717static void phys_sections_clear(void)
718{
719 phys_sections_nb = 0;
720}
721
ac1970fb 722static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
723{
724 subpage_t *subpage;
a8170e5e 725 hwaddr base = section->offset_within_address_space
0f0cb164 726 & TARGET_PAGE_MASK;
ac1970fb 727 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
0f0cb164
AK
728 MemoryRegionSection subsection = {
729 .offset_within_address_space = base,
730 .size = TARGET_PAGE_SIZE,
731 };
a8170e5e 732 hwaddr start, end;
0f0cb164 733
f3705d53 734 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 735
f3705d53 736 if (!(existing->mr->subpage)) {
0f0cb164
AK
737 subpage = subpage_init(base);
738 subsection.mr = &subpage->iomem;
ac1970fb 739 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 740 phys_section_add(&subsection));
0f0cb164 741 } else {
f3705d53 742 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
743 }
744 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
adb2a9b5 745 end = start + section->size - 1;
0f0cb164
AK
746 subpage_register(subpage, start, end, phys_section_add(section));
747}
748
749
ac1970fb 750static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
33417e70 751{
a8170e5e 752 hwaddr start_addr = section->offset_within_address_space;
dd81124b 753 ram_addr_t size = section->size;
a8170e5e 754 hwaddr addr;
5312bd8b 755 uint16_t section_index = phys_section_add(section);
dd81124b 756
3b8e6a2d 757 assert(size);
f6f3fbca 758
3b8e6a2d 759 addr = start_addr;
ac1970fb 760 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2999097b 761 section_index);
33417e70
FB
762}
763
ac1970fb 764static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 765{
ac1970fb 766 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
0f0cb164
AK
767 MemoryRegionSection now = *section, remain = *section;
768
769 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
770 || (now.size < TARGET_PAGE_SIZE)) {
771 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
772 - now.offset_within_address_space,
773 now.size);
ac1970fb 774 register_subpage(d, &now);
0f0cb164
AK
775 remain.size -= now.size;
776 remain.offset_within_address_space += now.size;
777 remain.offset_within_region += now.size;
778 }
69b67646
TH
779 while (remain.size >= TARGET_PAGE_SIZE) {
780 now = remain;
781 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
782 now.size = TARGET_PAGE_SIZE;
ac1970fb 783 register_subpage(d, &now);
69b67646
TH
784 } else {
785 now.size &= TARGET_PAGE_MASK;
ac1970fb 786 register_multipage(d, &now);
69b67646 787 }
0f0cb164
AK
788 remain.size -= now.size;
789 remain.offset_within_address_space += now.size;
790 remain.offset_within_region += now.size;
791 }
792 now = remain;
793 if (now.size) {
ac1970fb 794 register_subpage(d, &now);
0f0cb164
AK
795 }
796}
797
62a2744c
SY
798void qemu_flush_coalesced_mmio_buffer(void)
799{
800 if (kvm_enabled())
801 kvm_flush_coalesced_mmio_buffer();
802}
803
c902760f
MT
804#if defined(__linux__) && !defined(TARGET_S390X)
805
806#include <sys/vfs.h>
807
808#define HUGETLBFS_MAGIC 0x958458f6
809
810static long gethugepagesize(const char *path)
811{
812 struct statfs fs;
813 int ret;
814
815 do {
9742bf26 816 ret = statfs(path, &fs);
c902760f
MT
817 } while (ret != 0 && errno == EINTR);
818
819 if (ret != 0) {
9742bf26
YT
820 perror(path);
821 return 0;
c902760f
MT
822 }
823
824 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 825 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
826
827 return fs.f_bsize;
828}
829
04b16653
AW
830static void *file_ram_alloc(RAMBlock *block,
831 ram_addr_t memory,
832 const char *path)
c902760f
MT
833{
834 char *filename;
835 void *area;
836 int fd;
837#ifdef MAP_POPULATE
838 int flags;
839#endif
840 unsigned long hpagesize;
841
842 hpagesize = gethugepagesize(path);
843 if (!hpagesize) {
9742bf26 844 return NULL;
c902760f
MT
845 }
846
847 if (memory < hpagesize) {
848 return NULL;
849 }
850
851 if (kvm_enabled() && !kvm_has_sync_mmu()) {
852 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
853 return NULL;
854 }
855
856 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 857 return NULL;
c902760f
MT
858 }
859
860 fd = mkstemp(filename);
861 if (fd < 0) {
9742bf26
YT
862 perror("unable to create backing store for hugepages");
863 free(filename);
864 return NULL;
c902760f
MT
865 }
866 unlink(filename);
867 free(filename);
868
869 memory = (memory+hpagesize-1) & ~(hpagesize-1);
870
871 /*
872 * ftruncate is not supported by hugetlbfs in older
873 * hosts, so don't bother bailing out on errors.
874 * If anything goes wrong with it under other filesystems,
875 * mmap will fail.
876 */
877 if (ftruncate(fd, memory))
9742bf26 878 perror("ftruncate");
c902760f
MT
879
880#ifdef MAP_POPULATE
881 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
882 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
883 * to sidestep this quirk.
884 */
885 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
886 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
887#else
888 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
889#endif
890 if (area == MAP_FAILED) {
9742bf26
YT
891 perror("file_ram_alloc: can't mmap RAM pages");
892 close(fd);
893 return (NULL);
c902760f 894 }
04b16653 895 block->fd = fd;
c902760f
MT
896 return area;
897}
898#endif
899
d17b5288 900static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
901{
902 RAMBlock *block, *next_block;
3e837b2c 903 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 904
a3161038 905 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
906 return 0;
907
a3161038 908 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 909 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
910
911 end = block->offset + block->length;
912
a3161038 913 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
914 if (next_block->offset >= end) {
915 next = MIN(next, next_block->offset);
916 }
917 }
918 if (next - end >= size && next - end < mingap) {
3e837b2c 919 offset = end;
04b16653
AW
920 mingap = next - end;
921 }
922 }
3e837b2c
AW
923
924 if (offset == RAM_ADDR_MAX) {
925 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
926 (uint64_t)size);
927 abort();
928 }
929
04b16653
AW
930 return offset;
931}
932
652d7ec2 933ram_addr_t last_ram_offset(void)
d17b5288
AW
934{
935 RAMBlock *block;
936 ram_addr_t last = 0;
937
a3161038 938 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
939 last = MAX(last, block->offset + block->length);
940
941 return last;
942}
943
ddb97f1d
JB
944static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
945{
946 int ret;
947 QemuOpts *machine_opts;
948
949 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
950 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
951 if (machine_opts &&
952 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
953 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
954 if (ret) {
955 perror("qemu_madvise");
956 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
957 "but dump_guest_core=off specified\n");
958 }
959 }
960}
961
c5705a77 962void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
963{
964 RAMBlock *new_block, *block;
965
c5705a77 966 new_block = NULL;
a3161038 967 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
968 if (block->offset == addr) {
969 new_block = block;
970 break;
971 }
972 }
973 assert(new_block);
974 assert(!new_block->idstr[0]);
84b89d78 975
09e5ab63
AL
976 if (dev) {
977 char *id = qdev_get_dev_path(dev);
84b89d78
CM
978 if (id) {
979 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 980 g_free(id);
84b89d78
CM
981 }
982 }
983 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
984
a3161038 985 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 986 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
987 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
988 new_block->idstr);
989 abort();
990 }
991 }
c5705a77
AK
992}
993
8490fc78
LC
994static int memory_try_enable_merging(void *addr, size_t len)
995{
996 QemuOpts *opts;
997
998 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
999 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1000 /* disabled by the user */
1001 return 0;
1002 }
1003
1004 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1005}
1006
c5705a77
AK
1007ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1008 MemoryRegion *mr)
1009{
abb26d63 1010 RAMBlock *block, *new_block;
c5705a77
AK
1011
1012 size = TARGET_PAGE_ALIGN(size);
1013 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1014
7c637366 1015 new_block->mr = mr;
432d268c 1016 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1017 if (host) {
1018 new_block->host = host;
cd19cfa2 1019 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1020 } else {
1021 if (mem_path) {
c902760f 1022#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1023 new_block->host = file_ram_alloc(new_block, size, mem_path);
1024 if (!new_block->host) {
1025 new_block->host = qemu_vmalloc(size);
8490fc78 1026 memory_try_enable_merging(new_block->host, size);
6977dfe6 1027 }
c902760f 1028#else
6977dfe6
YT
1029 fprintf(stderr, "-mem-path option unsupported\n");
1030 exit(1);
c902760f 1031#endif
6977dfe6 1032 } else {
868bb33f 1033 if (xen_enabled()) {
fce537d4 1034 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1035 } else if (kvm_enabled()) {
1036 /* some s390/kvm configurations have special constraints */
1037 new_block->host = kvm_vmalloc(size);
432d268c
JN
1038 } else {
1039 new_block->host = qemu_vmalloc(size);
1040 }
8490fc78 1041 memory_try_enable_merging(new_block->host, size);
6977dfe6 1042 }
c902760f 1043 }
94a6b54f
PB
1044 new_block->length = size;
1045
abb26d63
PB
1046 /* Keep the list sorted from biggest to smallest block. */
1047 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1048 if (block->length < new_block->length) {
1049 break;
1050 }
1051 }
1052 if (block) {
1053 QTAILQ_INSERT_BEFORE(block, new_block, next);
1054 } else {
1055 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1056 }
0d6d3c87 1057 ram_list.mru_block = NULL;
94a6b54f 1058
f798b07f
UD
1059 ram_list.version++;
1060
7267c094 1061 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1062 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1063 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1064 0, size >> TARGET_PAGE_BITS);
1720aeee 1065 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1066
ddb97f1d 1067 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1068 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1069
6f0437e8
JK
1070 if (kvm_enabled())
1071 kvm_setup_guest_memory(new_block->host, size);
1072
94a6b54f
PB
1073 return new_block->offset;
1074}
e9a1ab19 1075
c5705a77 1076ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1077{
c5705a77 1078 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1079}
1080
1f2e98b6
AW
1081void qemu_ram_free_from_ptr(ram_addr_t addr)
1082{
1083 RAMBlock *block;
1084
a3161038 1085 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1086 if (addr == block->offset) {
a3161038 1087 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1088 ram_list.mru_block = NULL;
f798b07f 1089 ram_list.version++;
7267c094 1090 g_free(block);
1f2e98b6
AW
1091 return;
1092 }
1093 }
1094}
1095
c227f099 1096void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1097{
04b16653
AW
1098 RAMBlock *block;
1099
a3161038 1100 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1101 if (addr == block->offset) {
a3161038 1102 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1103 ram_list.mru_block = NULL;
f798b07f 1104 ram_list.version++;
cd19cfa2
HY
1105 if (block->flags & RAM_PREALLOC_MASK) {
1106 ;
1107 } else if (mem_path) {
04b16653
AW
1108#if defined (__linux__) && !defined(TARGET_S390X)
1109 if (block->fd) {
1110 munmap(block->host, block->length);
1111 close(block->fd);
1112 } else {
1113 qemu_vfree(block->host);
1114 }
fd28aa13
JK
1115#else
1116 abort();
04b16653
AW
1117#endif
1118 } else {
1119#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1120 munmap(block->host, block->length);
1121#else
868bb33f 1122 if (xen_enabled()) {
e41d7c69 1123 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
1124 } else {
1125 qemu_vfree(block->host);
1126 }
04b16653
AW
1127#endif
1128 }
7267c094 1129 g_free(block);
04b16653
AW
1130 return;
1131 }
1132 }
1133
e9a1ab19
FB
1134}
1135
cd19cfa2
HY
1136#ifndef _WIN32
1137void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1138{
1139 RAMBlock *block;
1140 ram_addr_t offset;
1141 int flags;
1142 void *area, *vaddr;
1143
a3161038 1144 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1145 offset = addr - block->offset;
1146 if (offset < block->length) {
1147 vaddr = block->host + offset;
1148 if (block->flags & RAM_PREALLOC_MASK) {
1149 ;
1150 } else {
1151 flags = MAP_FIXED;
1152 munmap(vaddr, length);
1153 if (mem_path) {
1154#if defined(__linux__) && !defined(TARGET_S390X)
1155 if (block->fd) {
1156#ifdef MAP_POPULATE
1157 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1158 MAP_PRIVATE;
1159#else
1160 flags |= MAP_PRIVATE;
1161#endif
1162 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1163 flags, block->fd, offset);
1164 } else {
1165 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1166 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1167 flags, -1, 0);
1168 }
fd28aa13
JK
1169#else
1170 abort();
cd19cfa2
HY
1171#endif
1172 } else {
1173#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1174 flags |= MAP_SHARED | MAP_ANONYMOUS;
1175 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1176 flags, -1, 0);
1177#else
1178 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1179 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1180 flags, -1, 0);
1181#endif
1182 }
1183 if (area != vaddr) {
f15fbc4b
AP
1184 fprintf(stderr, "Could not remap addr: "
1185 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1186 length, addr);
1187 exit(1);
1188 }
8490fc78 1189 memory_try_enable_merging(vaddr, length);
ddb97f1d 1190 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1191 }
1192 return;
1193 }
1194 }
1195}
1196#endif /* !_WIN32 */
1197
dc828ca1 1198/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
1199 With the exception of the softmmu code in this file, this should
1200 only be used for local memory (e.g. video ram) that the device owns,
1201 and knows it isn't going to access beyond the end of the block.
1202
1203 It should not be used for general purpose DMA.
1204 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1205 */
c227f099 1206void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 1207{
94a6b54f
PB
1208 RAMBlock *block;
1209
0d6d3c87
PB
1210 block = ram_list.mru_block;
1211 if (block && addr - block->offset < block->length) {
1212 goto found;
1213 }
a3161038 1214 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1215 if (addr - block->offset < block->length) {
0d6d3c87 1216 goto found;
f471a17e 1217 }
94a6b54f 1218 }
f471a17e
AW
1219
1220 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1221 abort();
1222
0d6d3c87
PB
1223found:
1224 ram_list.mru_block = block;
1225 if (xen_enabled()) {
1226 /* We need to check if the requested address is in the RAM
1227 * because we don't want to map the entire memory in QEMU.
1228 * In that case just map until the end of the page.
1229 */
1230 if (block->offset == 0) {
1231 return xen_map_cache(addr, 0, 0);
1232 } else if (block->host == NULL) {
1233 block->host =
1234 xen_map_cache(block->offset, block->length, 1);
1235 }
1236 }
1237 return block->host + (addr - block->offset);
dc828ca1
PB
1238}
1239
0d6d3c87
PB
1240/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1241 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1242 *
1243 * ??? Is this still necessary?
b2e0a138 1244 */
8b9c99d9 1245static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1246{
1247 RAMBlock *block;
1248
a3161038 1249 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1250 if (addr - block->offset < block->length) {
868bb33f 1251 if (xen_enabled()) {
432d268c
JN
1252 /* We need to check if the requested address is in the RAM
1253 * because we don't want to map the entire memory in QEMU.
712c2b41 1254 * In that case just map until the end of the page.
432d268c
JN
1255 */
1256 if (block->offset == 0) {
e41d7c69 1257 return xen_map_cache(addr, 0, 0);
432d268c 1258 } else if (block->host == NULL) {
e41d7c69
JK
1259 block->host =
1260 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1261 }
1262 }
b2e0a138
MT
1263 return block->host + (addr - block->offset);
1264 }
1265 }
1266
1267 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1268 abort();
1269
1270 return NULL;
1271}
1272
38bee5dc
SS
1273/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1274 * but takes a size argument */
8b9c99d9 1275static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1276{
8ab934f9
SS
1277 if (*size == 0) {
1278 return NULL;
1279 }
868bb33f 1280 if (xen_enabled()) {
e41d7c69 1281 return xen_map_cache(addr, *size, 1);
868bb33f 1282 } else {
38bee5dc
SS
1283 RAMBlock *block;
1284
a3161038 1285 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1286 if (addr - block->offset < block->length) {
1287 if (addr - block->offset + *size > block->length)
1288 *size = block->length - addr + block->offset;
1289 return block->host + (addr - block->offset);
1290 }
1291 }
1292
1293 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1294 abort();
38bee5dc
SS
1295 }
1296}
1297
050a0ddf
AP
1298void qemu_put_ram_ptr(void *addr)
1299{
1300 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
1301}
1302
e890261f 1303int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1304{
94a6b54f
PB
1305 RAMBlock *block;
1306 uint8_t *host = ptr;
1307
868bb33f 1308 if (xen_enabled()) {
e41d7c69 1309 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
1310 return 0;
1311 }
1312
a3161038 1313 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1314 /* This case append when the block is not mapped. */
1315 if (block->host == NULL) {
1316 continue;
1317 }
f471a17e 1318 if (host - block->host < block->length) {
e890261f
MT
1319 *ram_addr = block->offset + (host - block->host);
1320 return 0;
f471a17e 1321 }
94a6b54f 1322 }
432d268c 1323
e890261f
MT
1324 return -1;
1325}
f471a17e 1326
e890261f
MT
1327/* Some of the softmmu routines need to translate from a host pointer
1328 (typically a TLB entry) back to a ram offset. */
1329ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1330{
1331 ram_addr_t ram_addr;
f471a17e 1332
e890261f
MT
1333 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1334 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1335 abort();
1336 }
1337 return ram_addr;
5579c7f3
PB
1338}
1339
a8170e5e 1340static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1341 unsigned size)
e18231a3
BS
1342{
1343#ifdef DEBUG_UNASSIGNED
1344 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1345#endif
5b450407 1346#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1347 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
1348#endif
1349 return 0;
1350}
1351
a8170e5e 1352static void unassigned_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1353 uint64_t val, unsigned size)
e18231a3
BS
1354{
1355#ifdef DEBUG_UNASSIGNED
0e0df1e2 1356 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 1357#endif
5b450407 1358#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1359 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 1360#endif
33417e70
FB
1361}
1362
0e0df1e2
AK
1363static const MemoryRegionOps unassigned_mem_ops = {
1364 .read = unassigned_mem_read,
1365 .write = unassigned_mem_write,
1366 .endianness = DEVICE_NATIVE_ENDIAN,
1367};
e18231a3 1368
a8170e5e 1369static uint64_t error_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1370 unsigned size)
e18231a3 1371{
0e0df1e2 1372 abort();
e18231a3
BS
1373}
1374
a8170e5e 1375static void error_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1376 uint64_t value, unsigned size)
e18231a3 1377{
0e0df1e2 1378 abort();
33417e70
FB
1379}
1380
0e0df1e2
AK
1381static const MemoryRegionOps error_mem_ops = {
1382 .read = error_mem_read,
1383 .write = error_mem_write,
1384 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1385};
1386
0e0df1e2
AK
1387static const MemoryRegionOps rom_mem_ops = {
1388 .read = error_mem_read,
1389 .write = unassigned_mem_write,
1390 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1391};
1392
a8170e5e 1393static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1394 uint64_t val, unsigned size)
9fa3e853 1395{
3a7d929e 1396 int dirty_flags;
f7c11b53 1397 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1398 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1399#if !defined(CONFIG_USER_ONLY)
0e0df1e2 1400 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1401 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 1402#endif
3a7d929e 1403 }
0e0df1e2
AK
1404 switch (size) {
1405 case 1:
1406 stb_p(qemu_get_ram_ptr(ram_addr), val);
1407 break;
1408 case 2:
1409 stw_p(qemu_get_ram_ptr(ram_addr), val);
1410 break;
1411 case 4:
1412 stl_p(qemu_get_ram_ptr(ram_addr), val);
1413 break;
1414 default:
1415 abort();
3a7d929e 1416 }
f23db169 1417 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1418 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1419 /* we remove the notdirty callback only if the code has been
1420 flushed */
1421 if (dirty_flags == 0xff)
2e70f6ef 1422 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1423}
1424
0e0df1e2
AK
1425static const MemoryRegionOps notdirty_mem_ops = {
1426 .read = error_mem_read,
1427 .write = notdirty_mem_write,
1428 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1429};
1430
0f459d16 1431/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1432static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1433{
9349b4f9 1434 CPUArchState *env = cpu_single_env;
06d55cc1 1435 target_ulong pc, cs_base;
0f459d16 1436 target_ulong vaddr;
a1d1bb31 1437 CPUWatchpoint *wp;
06d55cc1 1438 int cpu_flags;
0f459d16 1439
06d55cc1
AL
1440 if (env->watchpoint_hit) {
1441 /* We re-entered the check after replacing the TB. Now raise
1442 * the debug interrupt so that is will trigger after the
1443 * current instruction. */
1444 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1445 return;
1446 }
2e70f6ef 1447 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1448 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1449 if ((vaddr == (wp->vaddr & len_mask) ||
1450 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1451 wp->flags |= BP_WATCHPOINT_HIT;
1452 if (!env->watchpoint_hit) {
1453 env->watchpoint_hit = wp;
5a316526 1454 tb_check_watchpoint(env);
6e140f28
AL
1455 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1456 env->exception_index = EXCP_DEBUG;
488d6577 1457 cpu_loop_exit(env);
6e140f28
AL
1458 } else {
1459 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1460 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1461 cpu_resume_from_signal(env, NULL);
6e140f28 1462 }
06d55cc1 1463 }
6e140f28
AL
1464 } else {
1465 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1466 }
1467 }
1468}
1469
6658ffb8
PB
1470/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1471 so these check for a hit then pass through to the normal out-of-line
1472 phys routines. */
a8170e5e 1473static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1474 unsigned size)
6658ffb8 1475{
1ec9b909
AK
1476 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1477 switch (size) {
1478 case 1: return ldub_phys(addr);
1479 case 2: return lduw_phys(addr);
1480 case 4: return ldl_phys(addr);
1481 default: abort();
1482 }
6658ffb8
PB
1483}
1484
a8170e5e 1485static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1486 uint64_t val, unsigned size)
6658ffb8 1487{
1ec9b909
AK
1488 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1489 switch (size) {
67364150
MF
1490 case 1:
1491 stb_phys(addr, val);
1492 break;
1493 case 2:
1494 stw_phys(addr, val);
1495 break;
1496 case 4:
1497 stl_phys(addr, val);
1498 break;
1ec9b909
AK
1499 default: abort();
1500 }
6658ffb8
PB
1501}
1502
1ec9b909
AK
1503static const MemoryRegionOps watch_mem_ops = {
1504 .read = watch_mem_read,
1505 .write = watch_mem_write,
1506 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1507};
6658ffb8 1508
a8170e5e 1509static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1510 unsigned len)
db7b5426 1511{
70c68e44 1512 subpage_t *mmio = opaque;
f6405247 1513 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1514 MemoryRegionSection *section;
db7b5426
BS
1515#if defined(DEBUG_SUBPAGE)
1516 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1517 mmio, len, addr, idx);
1518#endif
db7b5426 1519
5312bd8b
AK
1520 section = &phys_sections[mmio->sub_section[idx]];
1521 addr += mmio->base;
1522 addr -= section->offset_within_address_space;
1523 addr += section->offset_within_region;
37ec01d4 1524 return io_mem_read(section->mr, addr, len);
db7b5426
BS
1525}
1526
a8170e5e 1527static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1528 uint64_t value, unsigned len)
db7b5426 1529{
70c68e44 1530 subpage_t *mmio = opaque;
f6405247 1531 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1532 MemoryRegionSection *section;
db7b5426 1533#if defined(DEBUG_SUBPAGE)
70c68e44
AK
1534 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1535 " idx %d value %"PRIx64"\n",
f6405247 1536 __func__, mmio, len, addr, idx, value);
db7b5426 1537#endif
f6405247 1538
5312bd8b
AK
1539 section = &phys_sections[mmio->sub_section[idx]];
1540 addr += mmio->base;
1541 addr -= section->offset_within_address_space;
1542 addr += section->offset_within_region;
37ec01d4 1543 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
1544}
1545
70c68e44
AK
1546static const MemoryRegionOps subpage_ops = {
1547 .read = subpage_read,
1548 .write = subpage_write,
1549 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1550};
1551
a8170e5e 1552static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
de712f94 1553 unsigned size)
56384e8b
AF
1554{
1555 ram_addr_t raddr = addr;
1556 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1557 switch (size) {
1558 case 1: return ldub_p(ptr);
1559 case 2: return lduw_p(ptr);
1560 case 4: return ldl_p(ptr);
1561 default: abort();
1562 }
56384e8b
AF
1563}
1564
a8170e5e 1565static void subpage_ram_write(void *opaque, hwaddr addr,
de712f94 1566 uint64_t value, unsigned size)
56384e8b
AF
1567{
1568 ram_addr_t raddr = addr;
1569 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1570 switch (size) {
1571 case 1: return stb_p(ptr, value);
1572 case 2: return stw_p(ptr, value);
1573 case 4: return stl_p(ptr, value);
1574 default: abort();
1575 }
56384e8b
AF
1576}
1577
de712f94
AK
1578static const MemoryRegionOps subpage_ram_ops = {
1579 .read = subpage_ram_read,
1580 .write = subpage_ram_write,
1581 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
1582};
1583
c227f099 1584static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1585 uint16_t section)
db7b5426
BS
1586{
1587 int idx, eidx;
1588
1589 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1590 return -1;
1591 idx = SUBPAGE_IDX(start);
1592 eidx = SUBPAGE_IDX(end);
1593#if defined(DEBUG_SUBPAGE)
0bf9e31a 1594 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1595 mmio, start, end, idx, eidx, memory);
1596#endif
5312bd8b
AK
1597 if (memory_region_is_ram(phys_sections[section].mr)) {
1598 MemoryRegionSection new_section = phys_sections[section];
1599 new_section.mr = &io_mem_subpage_ram;
1600 section = phys_section_add(&new_section);
56384e8b 1601 }
db7b5426 1602 for (; idx <= eidx; idx++) {
5312bd8b 1603 mmio->sub_section[idx] = section;
db7b5426
BS
1604 }
1605
1606 return 0;
1607}
1608
a8170e5e 1609static subpage_t *subpage_init(hwaddr base)
db7b5426 1610{
c227f099 1611 subpage_t *mmio;
db7b5426 1612
7267c094 1613 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
1614
1615 mmio->base = base;
70c68e44
AK
1616 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1617 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1618 mmio->iomem.subpage = true;
db7b5426 1619#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1620 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1621 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1622#endif
0f0cb164 1623 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
1624
1625 return mmio;
1626}
1627
5312bd8b
AK
1628static uint16_t dummy_section(MemoryRegion *mr)
1629{
1630 MemoryRegionSection section = {
1631 .mr = mr,
1632 .offset_within_address_space = 0,
1633 .offset_within_region = 0,
1634 .size = UINT64_MAX,
1635 };
1636
1637 return phys_section_add(&section);
1638}
1639
a8170e5e 1640MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1641{
37ec01d4 1642 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1643}
1644
e9179ce1
AK
1645static void io_mem_init(void)
1646{
0e0df1e2 1647 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
1648 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1649 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1650 "unassigned", UINT64_MAX);
1651 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1652 "notdirty", UINT64_MAX);
de712f94
AK
1653 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1654 "subpage-ram", UINT64_MAX);
1ec9b909
AK
1655 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1656 "watch", UINT64_MAX);
e9179ce1
AK
1657}
1658
ac1970fb
AK
1659static void mem_begin(MemoryListener *listener)
1660{
1661 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1662
1663 destroy_all_mappings(d);
1664 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1665}
1666
50c1e149
AK
1667static void core_begin(MemoryListener *listener)
1668{
5312bd8b
AK
1669 phys_sections_clear();
1670 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
1671 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1672 phys_section_rom = dummy_section(&io_mem_rom);
1673 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
1674}
1675
1d71148e 1676static void tcg_commit(MemoryListener *listener)
50c1e149 1677{
9349b4f9 1678 CPUArchState *env;
117712c3
AK
1679
1680 /* since each CPU stores ram addresses in its TLB cache, we must
1681 reset the modified entries */
1682 /* XXX: slow ! */
1683 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1684 tlb_flush(env, 1);
1685 }
50c1e149
AK
1686}
1687
93632747
AK
1688static void core_log_global_start(MemoryListener *listener)
1689{
1690 cpu_physical_memory_set_dirty_tracking(1);
1691}
1692
1693static void core_log_global_stop(MemoryListener *listener)
1694{
1695 cpu_physical_memory_set_dirty_tracking(0);
1696}
1697
4855d41a
AK
1698static void io_region_add(MemoryListener *listener,
1699 MemoryRegionSection *section)
1700{
a2d33521
AK
1701 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1702
1703 mrio->mr = section->mr;
1704 mrio->offset = section->offset_within_region;
1705 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 1706 section->offset_within_address_space, section->size);
a2d33521 1707 ioport_register(&mrio->iorange);
4855d41a
AK
1708}
1709
1710static void io_region_del(MemoryListener *listener,
1711 MemoryRegionSection *section)
1712{
1713 isa_unassign_ioport(section->offset_within_address_space, section->size);
1714}
1715
93632747 1716static MemoryListener core_memory_listener = {
50c1e149 1717 .begin = core_begin,
93632747
AK
1718 .log_global_start = core_log_global_start,
1719 .log_global_stop = core_log_global_stop,
ac1970fb 1720 .priority = 1,
93632747
AK
1721};
1722
4855d41a
AK
1723static MemoryListener io_memory_listener = {
1724 .region_add = io_region_add,
1725 .region_del = io_region_del,
4855d41a
AK
1726 .priority = 0,
1727};
1728
1d71148e
AK
1729static MemoryListener tcg_memory_listener = {
1730 .commit = tcg_commit,
1731};
1732
ac1970fb
AK
1733void address_space_init_dispatch(AddressSpace *as)
1734{
1735 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1736
1737 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1738 d->listener = (MemoryListener) {
1739 .begin = mem_begin,
1740 .region_add = mem_add,
1741 .region_nop = mem_add,
1742 .priority = 0,
1743 };
1744 as->dispatch = d;
1745 memory_listener_register(&d->listener, as);
1746}
1747
83f3c251
AK
1748void address_space_destroy_dispatch(AddressSpace *as)
1749{
1750 AddressSpaceDispatch *d = as->dispatch;
1751
1752 memory_listener_unregister(&d->listener);
1753 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1754 g_free(d);
1755 as->dispatch = NULL;
1756}
1757
62152b8a
AK
1758static void memory_map_init(void)
1759{
7267c094 1760 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 1761 memory_region_init(system_memory, "system", INT64_MAX);
2673a5da
AK
1762 address_space_init(&address_space_memory, system_memory);
1763 address_space_memory.name = "memory";
309cb471 1764
7267c094 1765 system_io = g_malloc(sizeof(*system_io));
309cb471 1766 memory_region_init(system_io, "io", 65536);
2673a5da
AK
1767 address_space_init(&address_space_io, system_io);
1768 address_space_io.name = "I/O";
93632747 1769
f6790af6
AK
1770 memory_listener_register(&core_memory_listener, &address_space_memory);
1771 memory_listener_register(&io_memory_listener, &address_space_io);
1772 memory_listener_register(&tcg_memory_listener, &address_space_memory);
9e11908f
PM
1773
1774 dma_context_init(&dma_context_memory, &address_space_memory,
1775 NULL, NULL, NULL);
62152b8a
AK
1776}
1777
1778MemoryRegion *get_system_memory(void)
1779{
1780 return system_memory;
1781}
1782
309cb471
AK
1783MemoryRegion *get_system_io(void)
1784{
1785 return system_io;
1786}
1787
e2eef170
PB
1788#endif /* !defined(CONFIG_USER_ONLY) */
1789
13eb76e0
FB
1790/* physical memory access (slow version, mainly for debug) */
1791#if defined(CONFIG_USER_ONLY)
9349b4f9 1792int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1793 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1794{
1795 int l, flags;
1796 target_ulong page;
53a5960a 1797 void * p;
13eb76e0
FB
1798
1799 while (len > 0) {
1800 page = addr & TARGET_PAGE_MASK;
1801 l = (page + TARGET_PAGE_SIZE) - addr;
1802 if (l > len)
1803 l = len;
1804 flags = page_get_flags(page);
1805 if (!(flags & PAGE_VALID))
a68fe89c 1806 return -1;
13eb76e0
FB
1807 if (is_write) {
1808 if (!(flags & PAGE_WRITE))
a68fe89c 1809 return -1;
579a97f7 1810 /* XXX: this code should not depend on lock_user */
72fb7daa 1811 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1812 return -1;
72fb7daa
AJ
1813 memcpy(p, buf, l);
1814 unlock_user(p, addr, l);
13eb76e0
FB
1815 } else {
1816 if (!(flags & PAGE_READ))
a68fe89c 1817 return -1;
579a97f7 1818 /* XXX: this code should not depend on lock_user */
72fb7daa 1819 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1820 return -1;
72fb7daa 1821 memcpy(buf, p, l);
5b257578 1822 unlock_user(p, addr, 0);
13eb76e0
FB
1823 }
1824 len -= l;
1825 buf += l;
1826 addr += l;
1827 }
a68fe89c 1828 return 0;
13eb76e0 1829}
8df1cd07 1830
13eb76e0 1831#else
51d7a9eb 1832
a8170e5e
AK
1833static void invalidate_and_set_dirty(hwaddr addr,
1834 hwaddr length)
51d7a9eb
AP
1835{
1836 if (!cpu_physical_memory_is_dirty(addr)) {
1837 /* invalidate code */
1838 tb_invalidate_phys_page_range(addr, addr + length, 0);
1839 /* set dirty bit */
1840 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1841 }
e226939d 1842 xen_modified_memory(addr, length);
51d7a9eb
AP
1843}
1844
a8170e5e 1845void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1846 int len, bool is_write)
13eb76e0 1847{
ac1970fb 1848 AddressSpaceDispatch *d = as->dispatch;
37ec01d4 1849 int l;
13eb76e0
FB
1850 uint8_t *ptr;
1851 uint32_t val;
a8170e5e 1852 hwaddr page;
f3705d53 1853 MemoryRegionSection *section;
3b46e624 1854
13eb76e0
FB
1855 while (len > 0) {
1856 page = addr & TARGET_PAGE_MASK;
1857 l = (page + TARGET_PAGE_SIZE) - addr;
1858 if (l > len)
1859 l = len;
ac1970fb 1860 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1861
13eb76e0 1862 if (is_write) {
f3705d53 1863 if (!memory_region_is_ram(section->mr)) {
a8170e5e 1864 hwaddr addr1;
cc5bea60 1865 addr1 = memory_region_section_addr(section, addr);
6a00d601
FB
1866 /* XXX: could force cpu_single_env to NULL to avoid
1867 potential bugs */
6c2934db 1868 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 1869 /* 32 bit write access */
c27004ec 1870 val = ldl_p(buf);
37ec01d4 1871 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 1872 l = 4;
6c2934db 1873 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 1874 /* 16 bit write access */
c27004ec 1875 val = lduw_p(buf);
37ec01d4 1876 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
1877 l = 2;
1878 } else {
1c213d19 1879 /* 8 bit write access */
c27004ec 1880 val = ldub_p(buf);
37ec01d4 1881 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
1882 l = 1;
1883 }
f3705d53 1884 } else if (!section->readonly) {
8ca5692d 1885 ram_addr_t addr1;
f3705d53 1886 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 1887 + memory_region_section_addr(section, addr);
13eb76e0 1888 /* RAM case */
5579c7f3 1889 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1890 memcpy(ptr, buf, l);
51d7a9eb 1891 invalidate_and_set_dirty(addr1, l);
050a0ddf 1892 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1893 }
1894 } else {
cc5bea60
BS
1895 if (!(memory_region_is_ram(section->mr) ||
1896 memory_region_is_romd(section->mr))) {
a8170e5e 1897 hwaddr addr1;
13eb76e0 1898 /* I/O case */
cc5bea60 1899 addr1 = memory_region_section_addr(section, addr);
6c2934db 1900 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 1901 /* 32 bit read access */
37ec01d4 1902 val = io_mem_read(section->mr, addr1, 4);
c27004ec 1903 stl_p(buf, val);
13eb76e0 1904 l = 4;
6c2934db 1905 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 1906 /* 16 bit read access */
37ec01d4 1907 val = io_mem_read(section->mr, addr1, 2);
c27004ec 1908 stw_p(buf, val);
13eb76e0
FB
1909 l = 2;
1910 } else {
1c213d19 1911 /* 8 bit read access */
37ec01d4 1912 val = io_mem_read(section->mr, addr1, 1);
c27004ec 1913 stb_p(buf, val);
13eb76e0
FB
1914 l = 1;
1915 }
1916 } else {
1917 /* RAM case */
0a1b357f 1918 ptr = qemu_get_ram_ptr(section->mr->ram_addr
cc5bea60
BS
1919 + memory_region_section_addr(section,
1920 addr));
f3705d53 1921 memcpy(buf, ptr, l);
050a0ddf 1922 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1923 }
1924 }
1925 len -= l;
1926 buf += l;
1927 addr += l;
1928 }
1929}
8df1cd07 1930
a8170e5e 1931void address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1932 const uint8_t *buf, int len)
1933{
1934 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1935}
1936
1937/**
1938 * address_space_read: read from an address space.
1939 *
1940 * @as: #AddressSpace to be accessed
1941 * @addr: address within that address space
1942 * @buf: buffer with the data transferred
1943 */
a8170e5e 1944void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb
AK
1945{
1946 address_space_rw(as, addr, buf, len, false);
1947}
1948
1949
a8170e5e 1950void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1951 int len, int is_write)
1952{
1953 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1954}
1955
d0ecd2aa 1956/* used for ROM loading : can write in RAM and ROM */
a8170e5e 1957void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
1958 const uint8_t *buf, int len)
1959{
ac1970fb 1960 AddressSpaceDispatch *d = address_space_memory.dispatch;
d0ecd2aa
FB
1961 int l;
1962 uint8_t *ptr;
a8170e5e 1963 hwaddr page;
f3705d53 1964 MemoryRegionSection *section;
3b46e624 1965
d0ecd2aa
FB
1966 while (len > 0) {
1967 page = addr & TARGET_PAGE_MASK;
1968 l = (page + TARGET_PAGE_SIZE) - addr;
1969 if (l > len)
1970 l = len;
ac1970fb 1971 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1972
cc5bea60
BS
1973 if (!(memory_region_is_ram(section->mr) ||
1974 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
1975 /* do nothing */
1976 } else {
1977 unsigned long addr1;
f3705d53 1978 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 1979 + memory_region_section_addr(section, addr);
d0ecd2aa 1980 /* ROM/RAM case */
5579c7f3 1981 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 1982 memcpy(ptr, buf, l);
51d7a9eb 1983 invalidate_and_set_dirty(addr1, l);
050a0ddf 1984 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
1985 }
1986 len -= l;
1987 buf += l;
1988 addr += l;
1989 }
1990}
1991
6d16c2f8
AL
1992typedef struct {
1993 void *buffer;
a8170e5e
AK
1994 hwaddr addr;
1995 hwaddr len;
6d16c2f8
AL
1996} BounceBuffer;
1997
1998static BounceBuffer bounce;
1999
ba223c29
AL
2000typedef struct MapClient {
2001 void *opaque;
2002 void (*callback)(void *opaque);
72cf2d4f 2003 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2004} MapClient;
2005
72cf2d4f
BS
2006static QLIST_HEAD(map_client_list, MapClient) map_client_list
2007 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2008
2009void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2010{
7267c094 2011 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2012
2013 client->opaque = opaque;
2014 client->callback = callback;
72cf2d4f 2015 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2016 return client;
2017}
2018
8b9c99d9 2019static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2020{
2021 MapClient *client = (MapClient *)_client;
2022
72cf2d4f 2023 QLIST_REMOVE(client, link);
7267c094 2024 g_free(client);
ba223c29
AL
2025}
2026
2027static void cpu_notify_map_clients(void)
2028{
2029 MapClient *client;
2030
72cf2d4f
BS
2031 while (!QLIST_EMPTY(&map_client_list)) {
2032 client = QLIST_FIRST(&map_client_list);
ba223c29 2033 client->callback(client->opaque);
34d5e948 2034 cpu_unregister_map_client(client);
ba223c29
AL
2035 }
2036}
2037
6d16c2f8
AL
2038/* Map a physical memory region into a host virtual address.
2039 * May map a subset of the requested range, given by and returned in *plen.
2040 * May return NULL if resources needed to perform the mapping are exhausted.
2041 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2042 * Use cpu_register_map_client() to know when retrying the map operation is
2043 * likely to succeed.
6d16c2f8 2044 */
ac1970fb 2045void *address_space_map(AddressSpace *as,
a8170e5e
AK
2046 hwaddr addr,
2047 hwaddr *plen,
ac1970fb 2048 bool is_write)
6d16c2f8 2049{
ac1970fb 2050 AddressSpaceDispatch *d = as->dispatch;
a8170e5e
AK
2051 hwaddr len = *plen;
2052 hwaddr todo = 0;
6d16c2f8 2053 int l;
a8170e5e 2054 hwaddr page;
f3705d53 2055 MemoryRegionSection *section;
f15fbc4b 2056 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
2057 ram_addr_t rlen;
2058 void *ret;
6d16c2f8
AL
2059
2060 while (len > 0) {
2061 page = addr & TARGET_PAGE_MASK;
2062 l = (page + TARGET_PAGE_SIZE) - addr;
2063 if (l > len)
2064 l = len;
ac1970fb 2065 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
6d16c2f8 2066
f3705d53 2067 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 2068 if (todo || bounce.buffer) {
6d16c2f8
AL
2069 break;
2070 }
2071 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2072 bounce.addr = addr;
2073 bounce.len = l;
2074 if (!is_write) {
ac1970fb 2075 address_space_read(as, addr, bounce.buffer, l);
6d16c2f8 2076 }
38bee5dc
SS
2077
2078 *plen = l;
2079 return bounce.buffer;
6d16c2f8 2080 }
8ab934f9 2081 if (!todo) {
f3705d53 2082 raddr = memory_region_get_ram_addr(section->mr)
cc5bea60 2083 + memory_region_section_addr(section, addr);
8ab934f9 2084 }
6d16c2f8
AL
2085
2086 len -= l;
2087 addr += l;
38bee5dc 2088 todo += l;
6d16c2f8 2089 }
8ab934f9
SS
2090 rlen = todo;
2091 ret = qemu_ram_ptr_length(raddr, &rlen);
2092 *plen = rlen;
2093 return ret;
6d16c2f8
AL
2094}
2095
ac1970fb 2096/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2097 * Will also mark the memory as dirty if is_write == 1. access_len gives
2098 * the amount of memory that was actually read or written by the caller.
2099 */
a8170e5e
AK
2100void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2101 int is_write, hwaddr access_len)
6d16c2f8
AL
2102{
2103 if (buffer != bounce.buffer) {
2104 if (is_write) {
e890261f 2105 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
2106 while (access_len) {
2107 unsigned l;
2108 l = TARGET_PAGE_SIZE;
2109 if (l > access_len)
2110 l = access_len;
51d7a9eb 2111 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2112 addr1 += l;
2113 access_len -= l;
2114 }
2115 }
868bb33f 2116 if (xen_enabled()) {
e41d7c69 2117 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2118 }
6d16c2f8
AL
2119 return;
2120 }
2121 if (is_write) {
ac1970fb 2122 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2123 }
f8a83245 2124 qemu_vfree(bounce.buffer);
6d16c2f8 2125 bounce.buffer = NULL;
ba223c29 2126 cpu_notify_map_clients();
6d16c2f8 2127}
d0ecd2aa 2128
a8170e5e
AK
2129void *cpu_physical_memory_map(hwaddr addr,
2130 hwaddr *plen,
ac1970fb
AK
2131 int is_write)
2132{
2133 return address_space_map(&address_space_memory, addr, plen, is_write);
2134}
2135
a8170e5e
AK
2136void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2137 int is_write, hwaddr access_len)
ac1970fb
AK
2138{
2139 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2140}
2141
8df1cd07 2142/* warning: addr must be aligned */
a8170e5e 2143static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2144 enum device_endian endian)
8df1cd07 2145{
8df1cd07
FB
2146 uint8_t *ptr;
2147 uint32_t val;
f3705d53 2148 MemoryRegionSection *section;
8df1cd07 2149
ac1970fb 2150 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2151
cc5bea60
BS
2152 if (!(memory_region_is_ram(section->mr) ||
2153 memory_region_is_romd(section->mr))) {
8df1cd07 2154 /* I/O case */
cc5bea60 2155 addr = memory_region_section_addr(section, addr);
37ec01d4 2156 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
2157#if defined(TARGET_WORDS_BIGENDIAN)
2158 if (endian == DEVICE_LITTLE_ENDIAN) {
2159 val = bswap32(val);
2160 }
2161#else
2162 if (endian == DEVICE_BIG_ENDIAN) {
2163 val = bswap32(val);
2164 }
2165#endif
8df1cd07
FB
2166 } else {
2167 /* RAM case */
f3705d53 2168 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2169 & TARGET_PAGE_MASK)
cc5bea60 2170 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2171 switch (endian) {
2172 case DEVICE_LITTLE_ENDIAN:
2173 val = ldl_le_p(ptr);
2174 break;
2175 case DEVICE_BIG_ENDIAN:
2176 val = ldl_be_p(ptr);
2177 break;
2178 default:
2179 val = ldl_p(ptr);
2180 break;
2181 }
8df1cd07
FB
2182 }
2183 return val;
2184}
2185
a8170e5e 2186uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2187{
2188 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2189}
2190
a8170e5e 2191uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2192{
2193 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2194}
2195
a8170e5e 2196uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2197{
2198 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2199}
2200
84b7b8e7 2201/* warning: addr must be aligned */
a8170e5e 2202static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2203 enum device_endian endian)
84b7b8e7 2204{
84b7b8e7
FB
2205 uint8_t *ptr;
2206 uint64_t val;
f3705d53 2207 MemoryRegionSection *section;
84b7b8e7 2208
ac1970fb 2209 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2210
cc5bea60
BS
2211 if (!(memory_region_is_ram(section->mr) ||
2212 memory_region_is_romd(section->mr))) {
84b7b8e7 2213 /* I/O case */
cc5bea60 2214 addr = memory_region_section_addr(section, addr);
1e78bcc1
AG
2215
2216 /* XXX This is broken when device endian != cpu endian.
2217 Fix and add "endian" variable check */
84b7b8e7 2218#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2219 val = io_mem_read(section->mr, addr, 4) << 32;
2220 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 2221#else
37ec01d4
AK
2222 val = io_mem_read(section->mr, addr, 4);
2223 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
2224#endif
2225 } else {
2226 /* RAM case */
f3705d53 2227 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2228 & TARGET_PAGE_MASK)
cc5bea60 2229 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2230 switch (endian) {
2231 case DEVICE_LITTLE_ENDIAN:
2232 val = ldq_le_p(ptr);
2233 break;
2234 case DEVICE_BIG_ENDIAN:
2235 val = ldq_be_p(ptr);
2236 break;
2237 default:
2238 val = ldq_p(ptr);
2239 break;
2240 }
84b7b8e7
FB
2241 }
2242 return val;
2243}
2244
a8170e5e 2245uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2246{
2247 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2248}
2249
a8170e5e 2250uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2251{
2252 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2253}
2254
a8170e5e 2255uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2256{
2257 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2258}
2259
aab33094 2260/* XXX: optimize */
a8170e5e 2261uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2262{
2263 uint8_t val;
2264 cpu_physical_memory_read(addr, &val, 1);
2265 return val;
2266}
2267
733f0b02 2268/* warning: addr must be aligned */
a8170e5e 2269static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2270 enum device_endian endian)
aab33094 2271{
733f0b02
MT
2272 uint8_t *ptr;
2273 uint64_t val;
f3705d53 2274 MemoryRegionSection *section;
733f0b02 2275
ac1970fb 2276 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2277
cc5bea60
BS
2278 if (!(memory_region_is_ram(section->mr) ||
2279 memory_region_is_romd(section->mr))) {
733f0b02 2280 /* I/O case */
cc5bea60 2281 addr = memory_region_section_addr(section, addr);
37ec01d4 2282 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
2283#if defined(TARGET_WORDS_BIGENDIAN)
2284 if (endian == DEVICE_LITTLE_ENDIAN) {
2285 val = bswap16(val);
2286 }
2287#else
2288 if (endian == DEVICE_BIG_ENDIAN) {
2289 val = bswap16(val);
2290 }
2291#endif
733f0b02
MT
2292 } else {
2293 /* RAM case */
f3705d53 2294 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2295 & TARGET_PAGE_MASK)
cc5bea60 2296 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2297 switch (endian) {
2298 case DEVICE_LITTLE_ENDIAN:
2299 val = lduw_le_p(ptr);
2300 break;
2301 case DEVICE_BIG_ENDIAN:
2302 val = lduw_be_p(ptr);
2303 break;
2304 default:
2305 val = lduw_p(ptr);
2306 break;
2307 }
733f0b02
MT
2308 }
2309 return val;
aab33094
FB
2310}
2311
a8170e5e 2312uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2313{
2314 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2315}
2316
a8170e5e 2317uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2318{
2319 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2320}
2321
a8170e5e 2322uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2323{
2324 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2325}
2326
8df1cd07
FB
2327/* warning: addr must be aligned. The ram page is not masked as dirty
2328 and the code inside is not invalidated. It is useful if the dirty
2329 bits are used to track modified PTEs */
a8170e5e 2330void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2331{
8df1cd07 2332 uint8_t *ptr;
f3705d53 2333 MemoryRegionSection *section;
8df1cd07 2334
ac1970fb 2335 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2336
f3705d53 2337 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2338 addr = memory_region_section_addr(section, addr);
f3705d53 2339 if (memory_region_is_ram(section->mr)) {
37ec01d4 2340 section = &phys_sections[phys_section_rom];
06ef3525 2341 }
37ec01d4 2342 io_mem_write(section->mr, addr, val, 4);
8df1cd07 2343 } else {
f3705d53 2344 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 2345 & TARGET_PAGE_MASK)
cc5bea60 2346 + memory_region_section_addr(section, addr);
5579c7f3 2347 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2348 stl_p(ptr, val);
74576198
AL
2349
2350 if (unlikely(in_migration)) {
2351 if (!cpu_physical_memory_is_dirty(addr1)) {
2352 /* invalidate code */
2353 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2354 /* set dirty bit */
f7c11b53
YT
2355 cpu_physical_memory_set_dirty_flags(
2356 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2357 }
2358 }
8df1cd07
FB
2359 }
2360}
2361
a8170e5e 2362void stq_phys_notdirty(hwaddr addr, uint64_t val)
bc98a7ef 2363{
bc98a7ef 2364 uint8_t *ptr;
f3705d53 2365 MemoryRegionSection *section;
bc98a7ef 2366
ac1970fb 2367 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2368
f3705d53 2369 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2370 addr = memory_region_section_addr(section, addr);
f3705d53 2371 if (memory_region_is_ram(section->mr)) {
37ec01d4 2372 section = &phys_sections[phys_section_rom];
06ef3525 2373 }
bc98a7ef 2374#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2375 io_mem_write(section->mr, addr, val >> 32, 4);
2376 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 2377#else
37ec01d4
AK
2378 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2379 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
2380#endif
2381 } else {
f3705d53 2382 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2383 & TARGET_PAGE_MASK)
cc5bea60 2384 + memory_region_section_addr(section, addr));
bc98a7ef
JM
2385 stq_p(ptr, val);
2386 }
2387}
2388
8df1cd07 2389/* warning: addr must be aligned */
a8170e5e 2390static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2391 enum device_endian endian)
8df1cd07 2392{
8df1cd07 2393 uint8_t *ptr;
f3705d53 2394 MemoryRegionSection *section;
8df1cd07 2395
ac1970fb 2396 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2397
f3705d53 2398 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2399 addr = memory_region_section_addr(section, addr);
f3705d53 2400 if (memory_region_is_ram(section->mr)) {
37ec01d4 2401 section = &phys_sections[phys_section_rom];
06ef3525 2402 }
1e78bcc1
AG
2403#if defined(TARGET_WORDS_BIGENDIAN)
2404 if (endian == DEVICE_LITTLE_ENDIAN) {
2405 val = bswap32(val);
2406 }
2407#else
2408 if (endian == DEVICE_BIG_ENDIAN) {
2409 val = bswap32(val);
2410 }
2411#endif
37ec01d4 2412 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
2413 } else {
2414 unsigned long addr1;
f3705d53 2415 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2416 + memory_region_section_addr(section, addr);
8df1cd07 2417 /* RAM case */
5579c7f3 2418 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2419 switch (endian) {
2420 case DEVICE_LITTLE_ENDIAN:
2421 stl_le_p(ptr, val);
2422 break;
2423 case DEVICE_BIG_ENDIAN:
2424 stl_be_p(ptr, val);
2425 break;
2426 default:
2427 stl_p(ptr, val);
2428 break;
2429 }
51d7a9eb 2430 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2431 }
2432}
2433
a8170e5e 2434void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2435{
2436 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2437}
2438
a8170e5e 2439void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2440{
2441 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2442}
2443
a8170e5e 2444void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2445{
2446 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2447}
2448
aab33094 2449/* XXX: optimize */
a8170e5e 2450void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2451{
2452 uint8_t v = val;
2453 cpu_physical_memory_write(addr, &v, 1);
2454}
2455
733f0b02 2456/* warning: addr must be aligned */
a8170e5e 2457static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2458 enum device_endian endian)
aab33094 2459{
733f0b02 2460 uint8_t *ptr;
f3705d53 2461 MemoryRegionSection *section;
733f0b02 2462
ac1970fb 2463 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2464
f3705d53 2465 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2466 addr = memory_region_section_addr(section, addr);
f3705d53 2467 if (memory_region_is_ram(section->mr)) {
37ec01d4 2468 section = &phys_sections[phys_section_rom];
06ef3525 2469 }
1e78bcc1
AG
2470#if defined(TARGET_WORDS_BIGENDIAN)
2471 if (endian == DEVICE_LITTLE_ENDIAN) {
2472 val = bswap16(val);
2473 }
2474#else
2475 if (endian == DEVICE_BIG_ENDIAN) {
2476 val = bswap16(val);
2477 }
2478#endif
37ec01d4 2479 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
2480 } else {
2481 unsigned long addr1;
f3705d53 2482 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2483 + memory_region_section_addr(section, addr);
733f0b02
MT
2484 /* RAM case */
2485 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2486 switch (endian) {
2487 case DEVICE_LITTLE_ENDIAN:
2488 stw_le_p(ptr, val);
2489 break;
2490 case DEVICE_BIG_ENDIAN:
2491 stw_be_p(ptr, val);
2492 break;
2493 default:
2494 stw_p(ptr, val);
2495 break;
2496 }
51d7a9eb 2497 invalidate_and_set_dirty(addr1, 2);
733f0b02 2498 }
aab33094
FB
2499}
2500
a8170e5e 2501void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2502{
2503 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2504}
2505
a8170e5e 2506void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2507{
2508 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2509}
2510
a8170e5e 2511void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2512{
2513 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2514}
2515
aab33094 2516/* XXX: optimize */
a8170e5e 2517void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2518{
2519 val = tswap64(val);
71d2b725 2520 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2521}
2522
a8170e5e 2523void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2524{
2525 val = cpu_to_le64(val);
2526 cpu_physical_memory_write(addr, &val, 8);
2527}
2528
a8170e5e 2529void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2530{
2531 val = cpu_to_be64(val);
2532 cpu_physical_memory_write(addr, &val, 8);
2533}
2534
5e2972fd 2535/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2536int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2537 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2538{
2539 int l;
a8170e5e 2540 hwaddr phys_addr;
9b3c35e0 2541 target_ulong page;
13eb76e0
FB
2542
2543 while (len > 0) {
2544 page = addr & TARGET_PAGE_MASK;
2545 phys_addr = cpu_get_phys_page_debug(env, page);
2546 /* if no physical page mapped, return an error */
2547 if (phys_addr == -1)
2548 return -1;
2549 l = (page + TARGET_PAGE_SIZE) - addr;
2550 if (l > len)
2551 l = len;
5e2972fd 2552 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2553 if (is_write)
2554 cpu_physical_memory_write_rom(phys_addr, buf, l);
2555 else
5e2972fd 2556 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2557 len -= l;
2558 buf += l;
2559 addr += l;
2560 }
2561 return 0;
2562}
a68fe89c 2563#endif
13eb76e0 2564
b3755a91
PB
2565#if !defined(CONFIG_USER_ONLY)
2566
82afa586
BH
2567/*
2568 * A helper function for the _utterly broken_ virtio device model to find out if
2569 * it's running on a big endian machine. Don't do this at home kids!
2570 */
2571bool virtio_is_big_endian(void);
2572bool virtio_is_big_endian(void)
2573{
2574#if defined(TARGET_WORDS_BIGENDIAN)
2575 return true;
2576#else
2577 return false;
2578#endif
2579}
2580
61382a50 2581#endif
76f35538
WC
2582
2583#ifndef CONFIG_USER_ONLY
a8170e5e 2584bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538
WC
2585{
2586 MemoryRegionSection *section;
2587
ac1970fb
AK
2588 section = phys_page_find(address_space_memory.dispatch,
2589 phys_addr >> TARGET_PAGE_BITS);
76f35538
WC
2590
2591 return !(memory_region_is_ram(section->mr) ||
2592 memory_region_is_romd(section->mr));
2593}
2594#endif