]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
block: move include files to include/block/
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
077805fa 36#include "qemu-config.h"
62152b8a 37#include "memory.h"
9e11908f 38#include "dma.h"
62152b8a 39#include "exec-memory.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c
JN
42#else /* !CONFIG_USER_ONLY */
43#include "xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
54936004 46
0cac1b66 47#include "cputlb.h"
5b6dd868 48#include "translate-all.h"
0cac1b66 49
7762c2c1 50#include "memory-internal.h"
67d95c15 51
67d3b957 52//#define DEBUG_UNASSIGNED
db7b5426 53//#define DEBUG_SUBPAGE
1196be37 54
e2eef170 55#if !defined(CONFIG_USER_ONLY)
9fa3e853 56int phys_ram_fd;
74576198 57static int in_migration;
94a6b54f 58
85d59fef 59RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
60
61static MemoryRegion *system_memory;
309cb471 62static MemoryRegion *system_io;
62152b8a 63
f6790af6
AK
64AddressSpace address_space_io;
65AddressSpace address_space_memory;
9e11908f 66DMAContext dma_context_memory;
2673a5da 67
0e0df1e2 68MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 69static MemoryRegion io_mem_subpage_ram;
0e0df1e2 70
e2eef170 71#endif
9fa3e853 72
9349b4f9 73CPUArchState *first_cpu;
6a00d601
FB
74/* current CPU in the current thread. It is only valid inside
75 cpu_exec() */
9349b4f9 76DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 77/* 0 = Do not count executed instructions.
bf20dc07 78 1 = Precise instruction counting.
2e70f6ef
PB
79 2 = Adaptive rate instruction counting. */
80int use_icount = 0;
6a00d601 81
e2eef170 82#if !defined(CONFIG_USER_ONLY)
4346ae3e 83
5312bd8b
AK
84static MemoryRegionSection *phys_sections;
85static unsigned phys_sections_nb, phys_sections_nb_alloc;
86static uint16_t phys_section_unassigned;
aa102231
AK
87static uint16_t phys_section_notdirty;
88static uint16_t phys_section_rom;
89static uint16_t phys_section_watch;
5312bd8b 90
d6f2ea22
AK
91/* Simple allocator for PhysPageEntry nodes */
92static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
93static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
94
07f07b31 95#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 96
e2eef170 97static void io_mem_init(void);
62152b8a 98static void memory_map_init(void);
8b9c99d9 99static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 100
1ec9b909 101static MemoryRegion io_mem_watch;
6658ffb8 102#endif
fd6ce8f6 103
6d9a1304 104#if !defined(CONFIG_USER_ONLY)
d6f2ea22 105
f7bf5461 106static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 107{
f7bf5461 108 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
109 typedef PhysPageEntry Node[L2_SIZE];
110 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
112 phys_map_nodes_nb + nodes);
d6f2ea22
AK
113 phys_map_nodes = g_renew(Node, phys_map_nodes,
114 phys_map_nodes_nb_alloc);
115 }
f7bf5461
AK
116}
117
118static uint16_t phys_map_node_alloc(void)
119{
120 unsigned i;
121 uint16_t ret;
122
123 ret = phys_map_nodes_nb++;
124 assert(ret != PHYS_MAP_NODE_NIL);
125 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 126 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 127 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 128 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 129 }
f7bf5461 130 return ret;
d6f2ea22
AK
131}
132
133static void phys_map_nodes_reset(void)
134{
135 phys_map_nodes_nb = 0;
136}
137
92e873b9 138
a8170e5e
AK
139static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
140 hwaddr *nb, uint16_t leaf,
2999097b 141 int level)
f7bf5461
AK
142{
143 PhysPageEntry *p;
144 int i;
a8170e5e 145 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 146
07f07b31 147 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
148 lp->ptr = phys_map_node_alloc();
149 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
150 if (level == 0) {
151 for (i = 0; i < L2_SIZE; i++) {
07f07b31 152 p[i].is_leaf = 1;
c19e8800 153 p[i].ptr = phys_section_unassigned;
4346ae3e 154 }
67c4d23c 155 }
f7bf5461 156 } else {
c19e8800 157 p = phys_map_nodes[lp->ptr];
92e873b9 158 }
2999097b 159 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 160
2999097b 161 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
162 if ((*index & (step - 1)) == 0 && *nb >= step) {
163 lp->is_leaf = true;
c19e8800 164 lp->ptr = leaf;
07f07b31
AK
165 *index += step;
166 *nb -= step;
2999097b
AK
167 } else {
168 phys_page_set_level(lp, index, nb, leaf, level - 1);
169 }
170 ++lp;
f7bf5461
AK
171 }
172}
173
ac1970fb 174static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 175 hwaddr index, hwaddr nb,
2999097b 176 uint16_t leaf)
f7bf5461 177{
2999097b 178 /* Wildly overreserve - it doesn't matter much. */
07f07b31 179 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 180
ac1970fb 181 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
182}
183
a8170e5e 184MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
92e873b9 185{
ac1970fb 186 PhysPageEntry lp = d->phys_map;
31ab2b4a
AK
187 PhysPageEntry *p;
188 int i;
31ab2b4a 189 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 190
07f07b31 191 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 192 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
193 goto not_found;
194 }
c19e8800 195 p = phys_map_nodes[lp.ptr];
31ab2b4a 196 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 197 }
31ab2b4a 198
c19e8800 199 s_index = lp.ptr;
31ab2b4a 200not_found:
f3705d53
AK
201 return &phys_sections[s_index];
202}
203
e5548617
BS
204bool memory_region_is_unassigned(MemoryRegion *mr)
205{
206 return mr != &io_mem_ram && mr != &io_mem_rom
207 && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 208 && mr != &io_mem_watch;
fd6ce8f6 209}
5b6dd868 210#endif
fd6ce8f6 211
5b6dd868 212void cpu_exec_init_all(void)
fdbb84d1 213{
5b6dd868
BS
214#if !defined(CONFIG_USER_ONLY)
215 memory_map_init();
216 io_mem_init();
fdbb84d1 217#endif
5b6dd868 218}
fdbb84d1 219
5b6dd868
BS
220#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
221
222static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 223{
5b6dd868 224 CPUArchState *env = opaque;
a513fe19 225
5b6dd868
BS
226 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
227 version_id is increased. */
228 env->interrupt_request &= ~0x01;
229 tlb_flush(env, 1);
230
231 return 0;
a513fe19 232}
7501267e 233
5b6dd868
BS
234static const VMStateDescription vmstate_cpu_common = {
235 .name = "cpu_common",
236 .version_id = 1,
237 .minimum_version_id = 1,
238 .minimum_version_id_old = 1,
239 .post_load = cpu_common_post_load,
240 .fields = (VMStateField []) {
241 VMSTATE_UINT32(halted, CPUArchState),
242 VMSTATE_UINT32(interrupt_request, CPUArchState),
243 VMSTATE_END_OF_LIST()
244 }
245};
246#endif
ea041c0e 247
5b6dd868 248CPUArchState *qemu_get_cpu(int cpu)
ea041c0e 249{
5b6dd868 250 CPUArchState *env = first_cpu;
ea041c0e 251
5b6dd868
BS
252 while (env) {
253 if (env->cpu_index == cpu)
254 break;
255 env = env->next_cpu;
ea041c0e 256 }
5b6dd868
BS
257
258 return env;
ea041c0e
FB
259}
260
5b6dd868 261void cpu_exec_init(CPUArchState *env)
ea041c0e 262{
5b6dd868
BS
263#ifndef CONFIG_USER_ONLY
264 CPUState *cpu = ENV_GET_CPU(env);
265#endif
266 CPUArchState **penv;
267 int cpu_index;
268
269#if defined(CONFIG_USER_ONLY)
270 cpu_list_lock();
271#endif
272 env->next_cpu = NULL;
273 penv = &first_cpu;
274 cpu_index = 0;
275 while (*penv != NULL) {
276 penv = &(*penv)->next_cpu;
277 cpu_index++;
278 }
279 env->cpu_index = cpu_index;
280 env->numa_node = 0;
281 QTAILQ_INIT(&env->breakpoints);
282 QTAILQ_INIT(&env->watchpoints);
283#ifndef CONFIG_USER_ONLY
284 cpu->thread_id = qemu_get_thread_id();
285#endif
286 *penv = env;
287#if defined(CONFIG_USER_ONLY)
288 cpu_list_unlock();
289#endif
290#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
291 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
292 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
293 cpu_save, cpu_load, env);
294#endif
ea041c0e
FB
295}
296
1fddef4b 297#if defined(TARGET_HAS_ICE)
94df27fd 298#if defined(CONFIG_USER_ONLY)
9349b4f9 299static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
300{
301 tb_invalidate_phys_page_range(pc, pc + 1, 0);
302}
303#else
1e7855a5
MF
304static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
305{
9d70c4b7
MF
306 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
307 (pc & ~TARGET_PAGE_MASK));
1e7855a5 308}
c27004ec 309#endif
94df27fd 310#endif /* TARGET_HAS_ICE */
d720b93d 311
c527ee8f 312#if defined(CONFIG_USER_ONLY)
9349b4f9 313void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
314
315{
316}
317
9349b4f9 318int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
319 int flags, CPUWatchpoint **watchpoint)
320{
321 return -ENOSYS;
322}
323#else
6658ffb8 324/* Add a watchpoint. */
9349b4f9 325int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 326 int flags, CPUWatchpoint **watchpoint)
6658ffb8 327{
b4051334 328 target_ulong len_mask = ~(len - 1);
c0ce998e 329 CPUWatchpoint *wp;
6658ffb8 330
b4051334 331 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
332 if ((len & (len - 1)) || (addr & ~len_mask) ||
333 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
334 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
335 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
336 return -EINVAL;
337 }
7267c094 338 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
339
340 wp->vaddr = addr;
b4051334 341 wp->len_mask = len_mask;
a1d1bb31
AL
342 wp->flags = flags;
343
2dc9f411 344 /* keep all GDB-injected watchpoints in front */
c0ce998e 345 if (flags & BP_GDB)
72cf2d4f 346 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 347 else
72cf2d4f 348 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 349
6658ffb8 350 tlb_flush_page(env, addr);
a1d1bb31
AL
351
352 if (watchpoint)
353 *watchpoint = wp;
354 return 0;
6658ffb8
PB
355}
356
a1d1bb31 357/* Remove a specific watchpoint. */
9349b4f9 358int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 359 int flags)
6658ffb8 360{
b4051334 361 target_ulong len_mask = ~(len - 1);
a1d1bb31 362 CPUWatchpoint *wp;
6658ffb8 363
72cf2d4f 364 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 365 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 366 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 367 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
368 return 0;
369 }
370 }
a1d1bb31 371 return -ENOENT;
6658ffb8
PB
372}
373
a1d1bb31 374/* Remove a specific watchpoint by reference. */
9349b4f9 375void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 376{
72cf2d4f 377 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 378
a1d1bb31
AL
379 tlb_flush_page(env, watchpoint->vaddr);
380
7267c094 381 g_free(watchpoint);
a1d1bb31
AL
382}
383
384/* Remove all matching watchpoints. */
9349b4f9 385void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 386{
c0ce998e 387 CPUWatchpoint *wp, *next;
a1d1bb31 388
72cf2d4f 389 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
390 if (wp->flags & mask)
391 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 392 }
7d03f82f 393}
c527ee8f 394#endif
7d03f82f 395
a1d1bb31 396/* Add a breakpoint. */
9349b4f9 397int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 398 CPUBreakpoint **breakpoint)
4c3a88a2 399{
1fddef4b 400#if defined(TARGET_HAS_ICE)
c0ce998e 401 CPUBreakpoint *bp;
3b46e624 402
7267c094 403 bp = g_malloc(sizeof(*bp));
4c3a88a2 404
a1d1bb31
AL
405 bp->pc = pc;
406 bp->flags = flags;
407
2dc9f411 408 /* keep all GDB-injected breakpoints in front */
c0ce998e 409 if (flags & BP_GDB)
72cf2d4f 410 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 411 else
72cf2d4f 412 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 413
d720b93d 414 breakpoint_invalidate(env, pc);
a1d1bb31
AL
415
416 if (breakpoint)
417 *breakpoint = bp;
4c3a88a2
FB
418 return 0;
419#else
a1d1bb31 420 return -ENOSYS;
4c3a88a2
FB
421#endif
422}
423
a1d1bb31 424/* Remove a specific breakpoint. */
9349b4f9 425int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 426{
7d03f82f 427#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
428 CPUBreakpoint *bp;
429
72cf2d4f 430 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
431 if (bp->pc == pc && bp->flags == flags) {
432 cpu_breakpoint_remove_by_ref(env, bp);
433 return 0;
434 }
7d03f82f 435 }
a1d1bb31
AL
436 return -ENOENT;
437#else
438 return -ENOSYS;
7d03f82f
EI
439#endif
440}
441
a1d1bb31 442/* Remove a specific breakpoint by reference. */
9349b4f9 443void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 444{
1fddef4b 445#if defined(TARGET_HAS_ICE)
72cf2d4f 446 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 447
a1d1bb31
AL
448 breakpoint_invalidate(env, breakpoint->pc);
449
7267c094 450 g_free(breakpoint);
a1d1bb31
AL
451#endif
452}
453
454/* Remove all matching breakpoints. */
9349b4f9 455void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
456{
457#if defined(TARGET_HAS_ICE)
c0ce998e 458 CPUBreakpoint *bp, *next;
a1d1bb31 459
72cf2d4f 460 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
461 if (bp->flags & mask)
462 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 463 }
4c3a88a2
FB
464#endif
465}
466
c33a346e
FB
467/* enable or disable single step mode. EXCP_DEBUG is returned by the
468 CPU loop after each instruction */
9349b4f9 469void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 470{
1fddef4b 471#if defined(TARGET_HAS_ICE)
c33a346e
FB
472 if (env->singlestep_enabled != enabled) {
473 env->singlestep_enabled = enabled;
e22a25c9
AL
474 if (kvm_enabled())
475 kvm_update_guest_debug(env, 0);
476 else {
ccbb4d44 477 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
478 /* XXX: only flush what is necessary */
479 tb_flush(env);
480 }
c33a346e
FB
481 }
482#endif
483}
484
9349b4f9 485void cpu_reset_interrupt(CPUArchState *env, int mask)
b54ad049
FB
486{
487 env->interrupt_request &= ~mask;
488}
489
9349b4f9 490void cpu_exit(CPUArchState *env)
3098dba0
AJ
491{
492 env->exit_request = 1;
493 cpu_unlink_tb(env);
494}
495
9349b4f9 496void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
497{
498 va_list ap;
493ae1f0 499 va_list ap2;
7501267e
FB
500
501 va_start(ap, fmt);
493ae1f0 502 va_copy(ap2, ap);
7501267e
FB
503 fprintf(stderr, "qemu: fatal: ");
504 vfprintf(stderr, fmt, ap);
505 fprintf(stderr, "\n");
6fd2a026 506 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
507 if (qemu_log_enabled()) {
508 qemu_log("qemu: fatal: ");
509 qemu_log_vprintf(fmt, ap2);
510 qemu_log("\n");
6fd2a026 511 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 512 qemu_log_flush();
93fcfe39 513 qemu_log_close();
924edcae 514 }
493ae1f0 515 va_end(ap2);
f9373291 516 va_end(ap);
fd052bf6
RV
517#if defined(CONFIG_USER_ONLY)
518 {
519 struct sigaction act;
520 sigfillset(&act.sa_mask);
521 act.sa_handler = SIG_DFL;
522 sigaction(SIGABRT, &act, NULL);
523 }
524#endif
7501267e
FB
525 abort();
526}
527
9349b4f9 528CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 529{
9349b4f9
AF
530 CPUArchState *new_env = cpu_init(env->cpu_model_str);
531 CPUArchState *next_cpu = new_env->next_cpu;
c5be9f08 532 int cpu_index = new_env->cpu_index;
5a38f081
AL
533#if defined(TARGET_HAS_ICE)
534 CPUBreakpoint *bp;
535 CPUWatchpoint *wp;
536#endif
537
9349b4f9 538 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081
AL
539
540 /* Preserve chaining and index. */
c5be9f08
TS
541 new_env->next_cpu = next_cpu;
542 new_env->cpu_index = cpu_index;
5a38f081
AL
543
544 /* Clone all break/watchpoints.
545 Note: Once we support ptrace with hw-debug register access, make sure
546 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
547 QTAILQ_INIT(&env->breakpoints);
548 QTAILQ_INIT(&env->watchpoints);
5a38f081 549#if defined(TARGET_HAS_ICE)
72cf2d4f 550 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
551 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
552 }
72cf2d4f 553 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
554 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
555 wp->flags, NULL);
556 }
557#endif
558
c5be9f08
TS
559 return new_env;
560}
561
0124311e 562#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
563static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
564 uintptr_t length)
565{
566 uintptr_t start1;
567
568 /* we modify the TLB cache so that the dirty bit will be set again
569 when accessing the range */
570 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
571 /* Check that we don't span multiple blocks - this breaks the
572 address comparisons below. */
573 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
574 != (end - 1) - start) {
575 abort();
576 }
577 cpu_tlb_reset_dirty_all(start1, length);
578
579}
580
5579c7f3 581/* Note: start and end must be within the same ram block. */
c227f099 582void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 583 int dirty_flags)
1ccde1cb 584{
d24981d3 585 uintptr_t length;
1ccde1cb
FB
586
587 start &= TARGET_PAGE_MASK;
588 end = TARGET_PAGE_ALIGN(end);
589
590 length = end - start;
591 if (length == 0)
592 return;
f7c11b53 593 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 594
d24981d3
JQ
595 if (tcg_enabled()) {
596 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 597 }
1ccde1cb
FB
598}
599
8b9c99d9 600static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 601{
f6f3fbca 602 int ret = 0;
74576198 603 in_migration = enable;
f6f3fbca 604 return ret;
74576198
AL
605}
606
a8170e5e 607hwaddr memory_region_section_get_iotlb(CPUArchState *env,
e5548617
BS
608 MemoryRegionSection *section,
609 target_ulong vaddr,
a8170e5e 610 hwaddr paddr,
e5548617
BS
611 int prot,
612 target_ulong *address)
613{
a8170e5e 614 hwaddr iotlb;
e5548617
BS
615 CPUWatchpoint *wp;
616
cc5bea60 617 if (memory_region_is_ram(section->mr)) {
e5548617
BS
618 /* Normal RAM. */
619 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 620 + memory_region_section_addr(section, paddr);
e5548617
BS
621 if (!section->readonly) {
622 iotlb |= phys_section_notdirty;
623 } else {
624 iotlb |= phys_section_rom;
625 }
626 } else {
627 /* IO handlers are currently passed a physical address.
628 It would be nice to pass an offset from the base address
629 of that region. This would avoid having to special case RAM,
630 and avoid full address decoding in every device.
631 We can't use the high bits of pd for this because
632 IO_MEM_ROMD uses these as a ram address. */
633 iotlb = section - phys_sections;
cc5bea60 634 iotlb += memory_region_section_addr(section, paddr);
e5548617
BS
635 }
636
637 /* Make accesses to pages with watchpoints go via the
638 watchpoint trap routines. */
639 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
640 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
641 /* Avoid trapping reads of pages with a write breakpoint. */
642 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
643 iotlb = phys_section_watch + paddr;
644 *address |= TLB_MMIO;
645 break;
646 }
647 }
648 }
649
650 return iotlb;
651}
9fa3e853
FB
652#endif /* defined(CONFIG_USER_ONLY) */
653
e2eef170 654#if !defined(CONFIG_USER_ONLY)
8da3ff18 655
c04b2b78
PB
656#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
657typedef struct subpage_t {
70c68e44 658 MemoryRegion iomem;
a8170e5e 659 hwaddr base;
5312bd8b 660 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
661} subpage_t;
662
c227f099 663static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 664 uint16_t section);
a8170e5e 665static subpage_t *subpage_init(hwaddr base);
5312bd8b 666static void destroy_page_desc(uint16_t section_index)
54688b1e 667{
5312bd8b
AK
668 MemoryRegionSection *section = &phys_sections[section_index];
669 MemoryRegion *mr = section->mr;
54688b1e
AK
670
671 if (mr->subpage) {
672 subpage_t *subpage = container_of(mr, subpage_t, iomem);
673 memory_region_destroy(&subpage->iomem);
674 g_free(subpage);
675 }
676}
677
4346ae3e 678static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
679{
680 unsigned i;
d6f2ea22 681 PhysPageEntry *p;
54688b1e 682
c19e8800 683 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
684 return;
685 }
686
c19e8800 687 p = phys_map_nodes[lp->ptr];
4346ae3e 688 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 689 if (!p[i].is_leaf) {
54688b1e 690 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 691 } else {
c19e8800 692 destroy_page_desc(p[i].ptr);
54688b1e 693 }
54688b1e 694 }
07f07b31 695 lp->is_leaf = 0;
c19e8800 696 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
697}
698
ac1970fb 699static void destroy_all_mappings(AddressSpaceDispatch *d)
54688b1e 700{
ac1970fb 701 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
d6f2ea22 702 phys_map_nodes_reset();
54688b1e
AK
703}
704
5312bd8b
AK
705static uint16_t phys_section_add(MemoryRegionSection *section)
706{
707 if (phys_sections_nb == phys_sections_nb_alloc) {
708 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
709 phys_sections = g_renew(MemoryRegionSection, phys_sections,
710 phys_sections_nb_alloc);
711 }
712 phys_sections[phys_sections_nb] = *section;
713 return phys_sections_nb++;
714}
715
716static void phys_sections_clear(void)
717{
718 phys_sections_nb = 0;
719}
720
ac1970fb 721static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
722{
723 subpage_t *subpage;
a8170e5e 724 hwaddr base = section->offset_within_address_space
0f0cb164 725 & TARGET_PAGE_MASK;
ac1970fb 726 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
0f0cb164
AK
727 MemoryRegionSection subsection = {
728 .offset_within_address_space = base,
729 .size = TARGET_PAGE_SIZE,
730 };
a8170e5e 731 hwaddr start, end;
0f0cb164 732
f3705d53 733 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 734
f3705d53 735 if (!(existing->mr->subpage)) {
0f0cb164
AK
736 subpage = subpage_init(base);
737 subsection.mr = &subpage->iomem;
ac1970fb 738 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 739 phys_section_add(&subsection));
0f0cb164 740 } else {
f3705d53 741 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
742 }
743 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
adb2a9b5 744 end = start + section->size - 1;
0f0cb164
AK
745 subpage_register(subpage, start, end, phys_section_add(section));
746}
747
748
ac1970fb 749static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
33417e70 750{
a8170e5e 751 hwaddr start_addr = section->offset_within_address_space;
dd81124b 752 ram_addr_t size = section->size;
a8170e5e 753 hwaddr addr;
5312bd8b 754 uint16_t section_index = phys_section_add(section);
dd81124b 755
3b8e6a2d 756 assert(size);
f6f3fbca 757
3b8e6a2d 758 addr = start_addr;
ac1970fb 759 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2999097b 760 section_index);
33417e70
FB
761}
762
ac1970fb 763static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 764{
ac1970fb 765 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
0f0cb164
AK
766 MemoryRegionSection now = *section, remain = *section;
767
768 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
769 || (now.size < TARGET_PAGE_SIZE)) {
770 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
771 - now.offset_within_address_space,
772 now.size);
ac1970fb 773 register_subpage(d, &now);
0f0cb164
AK
774 remain.size -= now.size;
775 remain.offset_within_address_space += now.size;
776 remain.offset_within_region += now.size;
777 }
69b67646
TH
778 while (remain.size >= TARGET_PAGE_SIZE) {
779 now = remain;
780 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
781 now.size = TARGET_PAGE_SIZE;
ac1970fb 782 register_subpage(d, &now);
69b67646
TH
783 } else {
784 now.size &= TARGET_PAGE_MASK;
ac1970fb 785 register_multipage(d, &now);
69b67646 786 }
0f0cb164
AK
787 remain.size -= now.size;
788 remain.offset_within_address_space += now.size;
789 remain.offset_within_region += now.size;
790 }
791 now = remain;
792 if (now.size) {
ac1970fb 793 register_subpage(d, &now);
0f0cb164
AK
794 }
795}
796
62a2744c
SY
797void qemu_flush_coalesced_mmio_buffer(void)
798{
799 if (kvm_enabled())
800 kvm_flush_coalesced_mmio_buffer();
801}
802
c902760f
MT
803#if defined(__linux__) && !defined(TARGET_S390X)
804
805#include <sys/vfs.h>
806
807#define HUGETLBFS_MAGIC 0x958458f6
808
809static long gethugepagesize(const char *path)
810{
811 struct statfs fs;
812 int ret;
813
814 do {
9742bf26 815 ret = statfs(path, &fs);
c902760f
MT
816 } while (ret != 0 && errno == EINTR);
817
818 if (ret != 0) {
9742bf26
YT
819 perror(path);
820 return 0;
c902760f
MT
821 }
822
823 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 824 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
825
826 return fs.f_bsize;
827}
828
04b16653
AW
829static void *file_ram_alloc(RAMBlock *block,
830 ram_addr_t memory,
831 const char *path)
c902760f
MT
832{
833 char *filename;
834 void *area;
835 int fd;
836#ifdef MAP_POPULATE
837 int flags;
838#endif
839 unsigned long hpagesize;
840
841 hpagesize = gethugepagesize(path);
842 if (!hpagesize) {
9742bf26 843 return NULL;
c902760f
MT
844 }
845
846 if (memory < hpagesize) {
847 return NULL;
848 }
849
850 if (kvm_enabled() && !kvm_has_sync_mmu()) {
851 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
852 return NULL;
853 }
854
855 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 856 return NULL;
c902760f
MT
857 }
858
859 fd = mkstemp(filename);
860 if (fd < 0) {
9742bf26
YT
861 perror("unable to create backing store for hugepages");
862 free(filename);
863 return NULL;
c902760f
MT
864 }
865 unlink(filename);
866 free(filename);
867
868 memory = (memory+hpagesize-1) & ~(hpagesize-1);
869
870 /*
871 * ftruncate is not supported by hugetlbfs in older
872 * hosts, so don't bother bailing out on errors.
873 * If anything goes wrong with it under other filesystems,
874 * mmap will fail.
875 */
876 if (ftruncate(fd, memory))
9742bf26 877 perror("ftruncate");
c902760f
MT
878
879#ifdef MAP_POPULATE
880 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
881 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
882 * to sidestep this quirk.
883 */
884 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
885 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
886#else
887 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
888#endif
889 if (area == MAP_FAILED) {
9742bf26
YT
890 perror("file_ram_alloc: can't mmap RAM pages");
891 close(fd);
892 return (NULL);
c902760f 893 }
04b16653 894 block->fd = fd;
c902760f
MT
895 return area;
896}
897#endif
898
d17b5288 899static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
900{
901 RAMBlock *block, *next_block;
3e837b2c 902 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
903
904 if (QLIST_EMPTY(&ram_list.blocks))
905 return 0;
906
907 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 908 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
909
910 end = block->offset + block->length;
911
912 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
913 if (next_block->offset >= end) {
914 next = MIN(next, next_block->offset);
915 }
916 }
917 if (next - end >= size && next - end < mingap) {
3e837b2c 918 offset = end;
04b16653
AW
919 mingap = next - end;
920 }
921 }
3e837b2c
AW
922
923 if (offset == RAM_ADDR_MAX) {
924 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
925 (uint64_t)size);
926 abort();
927 }
928
04b16653
AW
929 return offset;
930}
931
652d7ec2 932ram_addr_t last_ram_offset(void)
d17b5288
AW
933{
934 RAMBlock *block;
935 ram_addr_t last = 0;
936
937 QLIST_FOREACH(block, &ram_list.blocks, next)
938 last = MAX(last, block->offset + block->length);
939
940 return last;
941}
942
ddb97f1d
JB
943static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
944{
945 int ret;
946 QemuOpts *machine_opts;
947
948 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
949 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
950 if (machine_opts &&
951 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
952 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
953 if (ret) {
954 perror("qemu_madvise");
955 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
956 "but dump_guest_core=off specified\n");
957 }
958 }
959}
960
c5705a77 961void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
962{
963 RAMBlock *new_block, *block;
964
c5705a77
AK
965 new_block = NULL;
966 QLIST_FOREACH(block, &ram_list.blocks, next) {
967 if (block->offset == addr) {
968 new_block = block;
969 break;
970 }
971 }
972 assert(new_block);
973 assert(!new_block->idstr[0]);
84b89d78 974
09e5ab63
AL
975 if (dev) {
976 char *id = qdev_get_dev_path(dev);
84b89d78
CM
977 if (id) {
978 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 979 g_free(id);
84b89d78
CM
980 }
981 }
982 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
983
984 QLIST_FOREACH(block, &ram_list.blocks, next) {
c5705a77 985 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
986 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
987 new_block->idstr);
988 abort();
989 }
990 }
c5705a77
AK
991}
992
8490fc78
LC
993static int memory_try_enable_merging(void *addr, size_t len)
994{
995 QemuOpts *opts;
996
997 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
998 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
999 /* disabled by the user */
1000 return 0;
1001 }
1002
1003 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1004}
1005
c5705a77
AK
1006ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1007 MemoryRegion *mr)
1008{
1009 RAMBlock *new_block;
1010
1011 size = TARGET_PAGE_ALIGN(size);
1012 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1013
7c637366 1014 new_block->mr = mr;
432d268c 1015 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1016 if (host) {
1017 new_block->host = host;
cd19cfa2 1018 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1019 } else {
1020 if (mem_path) {
c902760f 1021#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1022 new_block->host = file_ram_alloc(new_block, size, mem_path);
1023 if (!new_block->host) {
1024 new_block->host = qemu_vmalloc(size);
8490fc78 1025 memory_try_enable_merging(new_block->host, size);
6977dfe6 1026 }
c902760f 1027#else
6977dfe6
YT
1028 fprintf(stderr, "-mem-path option unsupported\n");
1029 exit(1);
c902760f 1030#endif
6977dfe6 1031 } else {
868bb33f 1032 if (xen_enabled()) {
fce537d4 1033 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1034 } else if (kvm_enabled()) {
1035 /* some s390/kvm configurations have special constraints */
1036 new_block->host = kvm_vmalloc(size);
432d268c
JN
1037 } else {
1038 new_block->host = qemu_vmalloc(size);
1039 }
8490fc78 1040 memory_try_enable_merging(new_block->host, size);
6977dfe6 1041 }
c902760f 1042 }
94a6b54f
PB
1043 new_block->length = size;
1044
f471a17e 1045 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 1046
7267c094 1047 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1048 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1049 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1050 0, size >> TARGET_PAGE_BITS);
1720aeee 1051 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1052
ddb97f1d 1053 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1054 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1055
6f0437e8
JK
1056 if (kvm_enabled())
1057 kvm_setup_guest_memory(new_block->host, size);
1058
94a6b54f
PB
1059 return new_block->offset;
1060}
e9a1ab19 1061
c5705a77 1062ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1063{
c5705a77 1064 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1065}
1066
1f2e98b6
AW
1067void qemu_ram_free_from_ptr(ram_addr_t addr)
1068{
1069 RAMBlock *block;
1070
1071 QLIST_FOREACH(block, &ram_list.blocks, next) {
1072 if (addr == block->offset) {
1073 QLIST_REMOVE(block, next);
7267c094 1074 g_free(block);
1f2e98b6
AW
1075 return;
1076 }
1077 }
1078}
1079
c227f099 1080void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1081{
04b16653
AW
1082 RAMBlock *block;
1083
1084 QLIST_FOREACH(block, &ram_list.blocks, next) {
1085 if (addr == block->offset) {
1086 QLIST_REMOVE(block, next);
cd19cfa2
HY
1087 if (block->flags & RAM_PREALLOC_MASK) {
1088 ;
1089 } else if (mem_path) {
04b16653
AW
1090#if defined (__linux__) && !defined(TARGET_S390X)
1091 if (block->fd) {
1092 munmap(block->host, block->length);
1093 close(block->fd);
1094 } else {
1095 qemu_vfree(block->host);
1096 }
fd28aa13
JK
1097#else
1098 abort();
04b16653
AW
1099#endif
1100 } else {
1101#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1102 munmap(block->host, block->length);
1103#else
868bb33f 1104 if (xen_enabled()) {
e41d7c69 1105 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
1106 } else {
1107 qemu_vfree(block->host);
1108 }
04b16653
AW
1109#endif
1110 }
7267c094 1111 g_free(block);
04b16653
AW
1112 return;
1113 }
1114 }
1115
e9a1ab19
FB
1116}
1117
cd19cfa2
HY
1118#ifndef _WIN32
1119void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1120{
1121 RAMBlock *block;
1122 ram_addr_t offset;
1123 int flags;
1124 void *area, *vaddr;
1125
1126 QLIST_FOREACH(block, &ram_list.blocks, next) {
1127 offset = addr - block->offset;
1128 if (offset < block->length) {
1129 vaddr = block->host + offset;
1130 if (block->flags & RAM_PREALLOC_MASK) {
1131 ;
1132 } else {
1133 flags = MAP_FIXED;
1134 munmap(vaddr, length);
1135 if (mem_path) {
1136#if defined(__linux__) && !defined(TARGET_S390X)
1137 if (block->fd) {
1138#ifdef MAP_POPULATE
1139 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1140 MAP_PRIVATE;
1141#else
1142 flags |= MAP_PRIVATE;
1143#endif
1144 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1145 flags, block->fd, offset);
1146 } else {
1147 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1148 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1149 flags, -1, 0);
1150 }
fd28aa13
JK
1151#else
1152 abort();
cd19cfa2
HY
1153#endif
1154 } else {
1155#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1156 flags |= MAP_SHARED | MAP_ANONYMOUS;
1157 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1158 flags, -1, 0);
1159#else
1160 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1161 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1162 flags, -1, 0);
1163#endif
1164 }
1165 if (area != vaddr) {
f15fbc4b
AP
1166 fprintf(stderr, "Could not remap addr: "
1167 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1168 length, addr);
1169 exit(1);
1170 }
8490fc78 1171 memory_try_enable_merging(vaddr, length);
ddb97f1d 1172 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1173 }
1174 return;
1175 }
1176 }
1177}
1178#endif /* !_WIN32 */
1179
dc828ca1 1180/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
1181 With the exception of the softmmu code in this file, this should
1182 only be used for local memory (e.g. video ram) that the device owns,
1183 and knows it isn't going to access beyond the end of the block.
1184
1185 It should not be used for general purpose DMA.
1186 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1187 */
c227f099 1188void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 1189{
94a6b54f
PB
1190 RAMBlock *block;
1191
f471a17e
AW
1192 QLIST_FOREACH(block, &ram_list.blocks, next) {
1193 if (addr - block->offset < block->length) {
7d82af38
VP
1194 /* Move this entry to to start of the list. */
1195 if (block != QLIST_FIRST(&ram_list.blocks)) {
1196 QLIST_REMOVE(block, next);
1197 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
1198 }
868bb33f 1199 if (xen_enabled()) {
432d268c
JN
1200 /* We need to check if the requested address is in the RAM
1201 * because we don't want to map the entire memory in QEMU.
712c2b41 1202 * In that case just map until the end of the page.
432d268c
JN
1203 */
1204 if (block->offset == 0) {
e41d7c69 1205 return xen_map_cache(addr, 0, 0);
432d268c 1206 } else if (block->host == NULL) {
e41d7c69
JK
1207 block->host =
1208 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1209 }
1210 }
f471a17e
AW
1211 return block->host + (addr - block->offset);
1212 }
94a6b54f 1213 }
f471a17e
AW
1214
1215 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1216 abort();
1217
1218 return NULL;
dc828ca1
PB
1219}
1220
b2e0a138
MT
1221/* Return a host pointer to ram allocated with qemu_ram_alloc.
1222 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
1223 */
8b9c99d9 1224static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1225{
1226 RAMBlock *block;
1227
1228 QLIST_FOREACH(block, &ram_list.blocks, next) {
1229 if (addr - block->offset < block->length) {
868bb33f 1230 if (xen_enabled()) {
432d268c
JN
1231 /* We need to check if the requested address is in the RAM
1232 * because we don't want to map the entire memory in QEMU.
712c2b41 1233 * In that case just map until the end of the page.
432d268c
JN
1234 */
1235 if (block->offset == 0) {
e41d7c69 1236 return xen_map_cache(addr, 0, 0);
432d268c 1237 } else if (block->host == NULL) {
e41d7c69
JK
1238 block->host =
1239 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1240 }
1241 }
b2e0a138
MT
1242 return block->host + (addr - block->offset);
1243 }
1244 }
1245
1246 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1247 abort();
1248
1249 return NULL;
1250}
1251
38bee5dc
SS
1252/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1253 * but takes a size argument */
8b9c99d9 1254static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1255{
8ab934f9
SS
1256 if (*size == 0) {
1257 return NULL;
1258 }
868bb33f 1259 if (xen_enabled()) {
e41d7c69 1260 return xen_map_cache(addr, *size, 1);
868bb33f 1261 } else {
38bee5dc
SS
1262 RAMBlock *block;
1263
1264 QLIST_FOREACH(block, &ram_list.blocks, next) {
1265 if (addr - block->offset < block->length) {
1266 if (addr - block->offset + *size > block->length)
1267 *size = block->length - addr + block->offset;
1268 return block->host + (addr - block->offset);
1269 }
1270 }
1271
1272 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1273 abort();
38bee5dc
SS
1274 }
1275}
1276
050a0ddf
AP
1277void qemu_put_ram_ptr(void *addr)
1278{
1279 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
1280}
1281
e890261f 1282int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1283{
94a6b54f
PB
1284 RAMBlock *block;
1285 uint8_t *host = ptr;
1286
868bb33f 1287 if (xen_enabled()) {
e41d7c69 1288 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
1289 return 0;
1290 }
1291
f471a17e 1292 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1293 /* This case append when the block is not mapped. */
1294 if (block->host == NULL) {
1295 continue;
1296 }
f471a17e 1297 if (host - block->host < block->length) {
e890261f
MT
1298 *ram_addr = block->offset + (host - block->host);
1299 return 0;
f471a17e 1300 }
94a6b54f 1301 }
432d268c 1302
e890261f
MT
1303 return -1;
1304}
f471a17e 1305
e890261f
MT
1306/* Some of the softmmu routines need to translate from a host pointer
1307 (typically a TLB entry) back to a ram offset. */
1308ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1309{
1310 ram_addr_t ram_addr;
f471a17e 1311
e890261f
MT
1312 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1313 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1314 abort();
1315 }
1316 return ram_addr;
5579c7f3
PB
1317}
1318
a8170e5e 1319static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1320 unsigned size)
e18231a3
BS
1321{
1322#ifdef DEBUG_UNASSIGNED
1323 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1324#endif
5b450407 1325#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1326 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
1327#endif
1328 return 0;
1329}
1330
a8170e5e 1331static void unassigned_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1332 uint64_t val, unsigned size)
e18231a3
BS
1333{
1334#ifdef DEBUG_UNASSIGNED
0e0df1e2 1335 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 1336#endif
5b450407 1337#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1338 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 1339#endif
33417e70
FB
1340}
1341
0e0df1e2
AK
1342static const MemoryRegionOps unassigned_mem_ops = {
1343 .read = unassigned_mem_read,
1344 .write = unassigned_mem_write,
1345 .endianness = DEVICE_NATIVE_ENDIAN,
1346};
e18231a3 1347
a8170e5e 1348static uint64_t error_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1349 unsigned size)
e18231a3 1350{
0e0df1e2 1351 abort();
e18231a3
BS
1352}
1353
a8170e5e 1354static void error_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1355 uint64_t value, unsigned size)
e18231a3 1356{
0e0df1e2 1357 abort();
33417e70
FB
1358}
1359
0e0df1e2
AK
1360static const MemoryRegionOps error_mem_ops = {
1361 .read = error_mem_read,
1362 .write = error_mem_write,
1363 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1364};
1365
0e0df1e2
AK
1366static const MemoryRegionOps rom_mem_ops = {
1367 .read = error_mem_read,
1368 .write = unassigned_mem_write,
1369 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1370};
1371
a8170e5e 1372static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1373 uint64_t val, unsigned size)
9fa3e853 1374{
3a7d929e 1375 int dirty_flags;
f7c11b53 1376 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1377 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1378#if !defined(CONFIG_USER_ONLY)
0e0df1e2 1379 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1380 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 1381#endif
3a7d929e 1382 }
0e0df1e2
AK
1383 switch (size) {
1384 case 1:
1385 stb_p(qemu_get_ram_ptr(ram_addr), val);
1386 break;
1387 case 2:
1388 stw_p(qemu_get_ram_ptr(ram_addr), val);
1389 break;
1390 case 4:
1391 stl_p(qemu_get_ram_ptr(ram_addr), val);
1392 break;
1393 default:
1394 abort();
3a7d929e 1395 }
f23db169 1396 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1397 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1398 /* we remove the notdirty callback only if the code has been
1399 flushed */
1400 if (dirty_flags == 0xff)
2e70f6ef 1401 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1402}
1403
0e0df1e2
AK
1404static const MemoryRegionOps notdirty_mem_ops = {
1405 .read = error_mem_read,
1406 .write = notdirty_mem_write,
1407 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1408};
1409
0f459d16 1410/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1411static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1412{
9349b4f9 1413 CPUArchState *env = cpu_single_env;
06d55cc1 1414 target_ulong pc, cs_base;
0f459d16 1415 target_ulong vaddr;
a1d1bb31 1416 CPUWatchpoint *wp;
06d55cc1 1417 int cpu_flags;
0f459d16 1418
06d55cc1
AL
1419 if (env->watchpoint_hit) {
1420 /* We re-entered the check after replacing the TB. Now raise
1421 * the debug interrupt so that is will trigger after the
1422 * current instruction. */
1423 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1424 return;
1425 }
2e70f6ef 1426 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1427 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1428 if ((vaddr == (wp->vaddr & len_mask) ||
1429 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1430 wp->flags |= BP_WATCHPOINT_HIT;
1431 if (!env->watchpoint_hit) {
1432 env->watchpoint_hit = wp;
5a316526 1433 tb_check_watchpoint(env);
6e140f28
AL
1434 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1435 env->exception_index = EXCP_DEBUG;
488d6577 1436 cpu_loop_exit(env);
6e140f28
AL
1437 } else {
1438 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1439 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1440 cpu_resume_from_signal(env, NULL);
6e140f28 1441 }
06d55cc1 1442 }
6e140f28
AL
1443 } else {
1444 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1445 }
1446 }
1447}
1448
6658ffb8
PB
1449/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1450 so these check for a hit then pass through to the normal out-of-line
1451 phys routines. */
a8170e5e 1452static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1453 unsigned size)
6658ffb8 1454{
1ec9b909
AK
1455 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1456 switch (size) {
1457 case 1: return ldub_phys(addr);
1458 case 2: return lduw_phys(addr);
1459 case 4: return ldl_phys(addr);
1460 default: abort();
1461 }
6658ffb8
PB
1462}
1463
a8170e5e 1464static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1465 uint64_t val, unsigned size)
6658ffb8 1466{
1ec9b909
AK
1467 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1468 switch (size) {
67364150
MF
1469 case 1:
1470 stb_phys(addr, val);
1471 break;
1472 case 2:
1473 stw_phys(addr, val);
1474 break;
1475 case 4:
1476 stl_phys(addr, val);
1477 break;
1ec9b909
AK
1478 default: abort();
1479 }
6658ffb8
PB
1480}
1481
1ec9b909
AK
1482static const MemoryRegionOps watch_mem_ops = {
1483 .read = watch_mem_read,
1484 .write = watch_mem_write,
1485 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1486};
6658ffb8 1487
a8170e5e 1488static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1489 unsigned len)
db7b5426 1490{
70c68e44 1491 subpage_t *mmio = opaque;
f6405247 1492 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1493 MemoryRegionSection *section;
db7b5426
BS
1494#if defined(DEBUG_SUBPAGE)
1495 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1496 mmio, len, addr, idx);
1497#endif
db7b5426 1498
5312bd8b
AK
1499 section = &phys_sections[mmio->sub_section[idx]];
1500 addr += mmio->base;
1501 addr -= section->offset_within_address_space;
1502 addr += section->offset_within_region;
37ec01d4 1503 return io_mem_read(section->mr, addr, len);
db7b5426
BS
1504}
1505
a8170e5e 1506static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1507 uint64_t value, unsigned len)
db7b5426 1508{
70c68e44 1509 subpage_t *mmio = opaque;
f6405247 1510 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1511 MemoryRegionSection *section;
db7b5426 1512#if defined(DEBUG_SUBPAGE)
70c68e44
AK
1513 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1514 " idx %d value %"PRIx64"\n",
f6405247 1515 __func__, mmio, len, addr, idx, value);
db7b5426 1516#endif
f6405247 1517
5312bd8b
AK
1518 section = &phys_sections[mmio->sub_section[idx]];
1519 addr += mmio->base;
1520 addr -= section->offset_within_address_space;
1521 addr += section->offset_within_region;
37ec01d4 1522 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
1523}
1524
70c68e44
AK
1525static const MemoryRegionOps subpage_ops = {
1526 .read = subpage_read,
1527 .write = subpage_write,
1528 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1529};
1530
a8170e5e 1531static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
de712f94 1532 unsigned size)
56384e8b
AF
1533{
1534 ram_addr_t raddr = addr;
1535 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1536 switch (size) {
1537 case 1: return ldub_p(ptr);
1538 case 2: return lduw_p(ptr);
1539 case 4: return ldl_p(ptr);
1540 default: abort();
1541 }
56384e8b
AF
1542}
1543
a8170e5e 1544static void subpage_ram_write(void *opaque, hwaddr addr,
de712f94 1545 uint64_t value, unsigned size)
56384e8b
AF
1546{
1547 ram_addr_t raddr = addr;
1548 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1549 switch (size) {
1550 case 1: return stb_p(ptr, value);
1551 case 2: return stw_p(ptr, value);
1552 case 4: return stl_p(ptr, value);
1553 default: abort();
1554 }
56384e8b
AF
1555}
1556
de712f94
AK
1557static const MemoryRegionOps subpage_ram_ops = {
1558 .read = subpage_ram_read,
1559 .write = subpage_ram_write,
1560 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
1561};
1562
c227f099 1563static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1564 uint16_t section)
db7b5426
BS
1565{
1566 int idx, eidx;
1567
1568 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1569 return -1;
1570 idx = SUBPAGE_IDX(start);
1571 eidx = SUBPAGE_IDX(end);
1572#if defined(DEBUG_SUBPAGE)
0bf9e31a 1573 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1574 mmio, start, end, idx, eidx, memory);
1575#endif
5312bd8b
AK
1576 if (memory_region_is_ram(phys_sections[section].mr)) {
1577 MemoryRegionSection new_section = phys_sections[section];
1578 new_section.mr = &io_mem_subpage_ram;
1579 section = phys_section_add(&new_section);
56384e8b 1580 }
db7b5426 1581 for (; idx <= eidx; idx++) {
5312bd8b 1582 mmio->sub_section[idx] = section;
db7b5426
BS
1583 }
1584
1585 return 0;
1586}
1587
a8170e5e 1588static subpage_t *subpage_init(hwaddr base)
db7b5426 1589{
c227f099 1590 subpage_t *mmio;
db7b5426 1591
7267c094 1592 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
1593
1594 mmio->base = base;
70c68e44
AK
1595 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1596 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1597 mmio->iomem.subpage = true;
db7b5426 1598#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1599 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1600 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1601#endif
0f0cb164 1602 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
1603
1604 return mmio;
1605}
1606
5312bd8b
AK
1607static uint16_t dummy_section(MemoryRegion *mr)
1608{
1609 MemoryRegionSection section = {
1610 .mr = mr,
1611 .offset_within_address_space = 0,
1612 .offset_within_region = 0,
1613 .size = UINT64_MAX,
1614 };
1615
1616 return phys_section_add(&section);
1617}
1618
a8170e5e 1619MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1620{
37ec01d4 1621 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1622}
1623
e9179ce1
AK
1624static void io_mem_init(void)
1625{
0e0df1e2 1626 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
1627 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1628 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1629 "unassigned", UINT64_MAX);
1630 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1631 "notdirty", UINT64_MAX);
de712f94
AK
1632 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1633 "subpage-ram", UINT64_MAX);
1ec9b909
AK
1634 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1635 "watch", UINT64_MAX);
e9179ce1
AK
1636}
1637
ac1970fb
AK
1638static void mem_begin(MemoryListener *listener)
1639{
1640 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1641
1642 destroy_all_mappings(d);
1643 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1644}
1645
50c1e149
AK
1646static void core_begin(MemoryListener *listener)
1647{
5312bd8b
AK
1648 phys_sections_clear();
1649 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
1650 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1651 phys_section_rom = dummy_section(&io_mem_rom);
1652 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
1653}
1654
1d71148e 1655static void tcg_commit(MemoryListener *listener)
50c1e149 1656{
9349b4f9 1657 CPUArchState *env;
117712c3
AK
1658
1659 /* since each CPU stores ram addresses in its TLB cache, we must
1660 reset the modified entries */
1661 /* XXX: slow ! */
1662 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1663 tlb_flush(env, 1);
1664 }
50c1e149
AK
1665}
1666
93632747
AK
1667static void core_log_global_start(MemoryListener *listener)
1668{
1669 cpu_physical_memory_set_dirty_tracking(1);
1670}
1671
1672static void core_log_global_stop(MemoryListener *listener)
1673{
1674 cpu_physical_memory_set_dirty_tracking(0);
1675}
1676
4855d41a
AK
1677static void io_region_add(MemoryListener *listener,
1678 MemoryRegionSection *section)
1679{
a2d33521
AK
1680 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1681
1682 mrio->mr = section->mr;
1683 mrio->offset = section->offset_within_region;
1684 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 1685 section->offset_within_address_space, section->size);
a2d33521 1686 ioport_register(&mrio->iorange);
4855d41a
AK
1687}
1688
1689static void io_region_del(MemoryListener *listener,
1690 MemoryRegionSection *section)
1691{
1692 isa_unassign_ioport(section->offset_within_address_space, section->size);
1693}
1694
93632747 1695static MemoryListener core_memory_listener = {
50c1e149 1696 .begin = core_begin,
93632747
AK
1697 .log_global_start = core_log_global_start,
1698 .log_global_stop = core_log_global_stop,
ac1970fb 1699 .priority = 1,
93632747
AK
1700};
1701
4855d41a
AK
1702static MemoryListener io_memory_listener = {
1703 .region_add = io_region_add,
1704 .region_del = io_region_del,
4855d41a
AK
1705 .priority = 0,
1706};
1707
1d71148e
AK
1708static MemoryListener tcg_memory_listener = {
1709 .commit = tcg_commit,
1710};
1711
ac1970fb
AK
1712void address_space_init_dispatch(AddressSpace *as)
1713{
1714 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1715
1716 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1717 d->listener = (MemoryListener) {
1718 .begin = mem_begin,
1719 .region_add = mem_add,
1720 .region_nop = mem_add,
1721 .priority = 0,
1722 };
1723 as->dispatch = d;
1724 memory_listener_register(&d->listener, as);
1725}
1726
83f3c251
AK
1727void address_space_destroy_dispatch(AddressSpace *as)
1728{
1729 AddressSpaceDispatch *d = as->dispatch;
1730
1731 memory_listener_unregister(&d->listener);
1732 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1733 g_free(d);
1734 as->dispatch = NULL;
1735}
1736
62152b8a
AK
1737static void memory_map_init(void)
1738{
7267c094 1739 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 1740 memory_region_init(system_memory, "system", INT64_MAX);
2673a5da
AK
1741 address_space_init(&address_space_memory, system_memory);
1742 address_space_memory.name = "memory";
309cb471 1743
7267c094 1744 system_io = g_malloc(sizeof(*system_io));
309cb471 1745 memory_region_init(system_io, "io", 65536);
2673a5da
AK
1746 address_space_init(&address_space_io, system_io);
1747 address_space_io.name = "I/O";
93632747 1748
f6790af6
AK
1749 memory_listener_register(&core_memory_listener, &address_space_memory);
1750 memory_listener_register(&io_memory_listener, &address_space_io);
1751 memory_listener_register(&tcg_memory_listener, &address_space_memory);
9e11908f
PM
1752
1753 dma_context_init(&dma_context_memory, &address_space_memory,
1754 NULL, NULL, NULL);
62152b8a
AK
1755}
1756
1757MemoryRegion *get_system_memory(void)
1758{
1759 return system_memory;
1760}
1761
309cb471
AK
1762MemoryRegion *get_system_io(void)
1763{
1764 return system_io;
1765}
1766
e2eef170
PB
1767#endif /* !defined(CONFIG_USER_ONLY) */
1768
13eb76e0
FB
1769/* physical memory access (slow version, mainly for debug) */
1770#if defined(CONFIG_USER_ONLY)
9349b4f9 1771int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1772 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1773{
1774 int l, flags;
1775 target_ulong page;
53a5960a 1776 void * p;
13eb76e0
FB
1777
1778 while (len > 0) {
1779 page = addr & TARGET_PAGE_MASK;
1780 l = (page + TARGET_PAGE_SIZE) - addr;
1781 if (l > len)
1782 l = len;
1783 flags = page_get_flags(page);
1784 if (!(flags & PAGE_VALID))
a68fe89c 1785 return -1;
13eb76e0
FB
1786 if (is_write) {
1787 if (!(flags & PAGE_WRITE))
a68fe89c 1788 return -1;
579a97f7 1789 /* XXX: this code should not depend on lock_user */
72fb7daa 1790 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1791 return -1;
72fb7daa
AJ
1792 memcpy(p, buf, l);
1793 unlock_user(p, addr, l);
13eb76e0
FB
1794 } else {
1795 if (!(flags & PAGE_READ))
a68fe89c 1796 return -1;
579a97f7 1797 /* XXX: this code should not depend on lock_user */
72fb7daa 1798 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1799 return -1;
72fb7daa 1800 memcpy(buf, p, l);
5b257578 1801 unlock_user(p, addr, 0);
13eb76e0
FB
1802 }
1803 len -= l;
1804 buf += l;
1805 addr += l;
1806 }
a68fe89c 1807 return 0;
13eb76e0 1808}
8df1cd07 1809
13eb76e0 1810#else
51d7a9eb 1811
a8170e5e
AK
1812static void invalidate_and_set_dirty(hwaddr addr,
1813 hwaddr length)
51d7a9eb
AP
1814{
1815 if (!cpu_physical_memory_is_dirty(addr)) {
1816 /* invalidate code */
1817 tb_invalidate_phys_page_range(addr, addr + length, 0);
1818 /* set dirty bit */
1819 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1820 }
e226939d 1821 xen_modified_memory(addr, length);
51d7a9eb
AP
1822}
1823
a8170e5e 1824void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1825 int len, bool is_write)
13eb76e0 1826{
ac1970fb 1827 AddressSpaceDispatch *d = as->dispatch;
37ec01d4 1828 int l;
13eb76e0
FB
1829 uint8_t *ptr;
1830 uint32_t val;
a8170e5e 1831 hwaddr page;
f3705d53 1832 MemoryRegionSection *section;
3b46e624 1833
13eb76e0
FB
1834 while (len > 0) {
1835 page = addr & TARGET_PAGE_MASK;
1836 l = (page + TARGET_PAGE_SIZE) - addr;
1837 if (l > len)
1838 l = len;
ac1970fb 1839 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1840
13eb76e0 1841 if (is_write) {
f3705d53 1842 if (!memory_region_is_ram(section->mr)) {
a8170e5e 1843 hwaddr addr1;
cc5bea60 1844 addr1 = memory_region_section_addr(section, addr);
6a00d601
FB
1845 /* XXX: could force cpu_single_env to NULL to avoid
1846 potential bugs */
6c2934db 1847 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 1848 /* 32 bit write access */
c27004ec 1849 val = ldl_p(buf);
37ec01d4 1850 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 1851 l = 4;
6c2934db 1852 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 1853 /* 16 bit write access */
c27004ec 1854 val = lduw_p(buf);
37ec01d4 1855 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
1856 l = 2;
1857 } else {
1c213d19 1858 /* 8 bit write access */
c27004ec 1859 val = ldub_p(buf);
37ec01d4 1860 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
1861 l = 1;
1862 }
f3705d53 1863 } else if (!section->readonly) {
8ca5692d 1864 ram_addr_t addr1;
f3705d53 1865 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 1866 + memory_region_section_addr(section, addr);
13eb76e0 1867 /* RAM case */
5579c7f3 1868 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1869 memcpy(ptr, buf, l);
51d7a9eb 1870 invalidate_and_set_dirty(addr1, l);
050a0ddf 1871 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1872 }
1873 } else {
cc5bea60
BS
1874 if (!(memory_region_is_ram(section->mr) ||
1875 memory_region_is_romd(section->mr))) {
a8170e5e 1876 hwaddr addr1;
13eb76e0 1877 /* I/O case */
cc5bea60 1878 addr1 = memory_region_section_addr(section, addr);
6c2934db 1879 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 1880 /* 32 bit read access */
37ec01d4 1881 val = io_mem_read(section->mr, addr1, 4);
c27004ec 1882 stl_p(buf, val);
13eb76e0 1883 l = 4;
6c2934db 1884 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 1885 /* 16 bit read access */
37ec01d4 1886 val = io_mem_read(section->mr, addr1, 2);
c27004ec 1887 stw_p(buf, val);
13eb76e0
FB
1888 l = 2;
1889 } else {
1c213d19 1890 /* 8 bit read access */
37ec01d4 1891 val = io_mem_read(section->mr, addr1, 1);
c27004ec 1892 stb_p(buf, val);
13eb76e0
FB
1893 l = 1;
1894 }
1895 } else {
1896 /* RAM case */
0a1b357f 1897 ptr = qemu_get_ram_ptr(section->mr->ram_addr
cc5bea60
BS
1898 + memory_region_section_addr(section,
1899 addr));
f3705d53 1900 memcpy(buf, ptr, l);
050a0ddf 1901 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1902 }
1903 }
1904 len -= l;
1905 buf += l;
1906 addr += l;
1907 }
1908}
8df1cd07 1909
a8170e5e 1910void address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1911 const uint8_t *buf, int len)
1912{
1913 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1914}
1915
1916/**
1917 * address_space_read: read from an address space.
1918 *
1919 * @as: #AddressSpace to be accessed
1920 * @addr: address within that address space
1921 * @buf: buffer with the data transferred
1922 */
a8170e5e 1923void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb
AK
1924{
1925 address_space_rw(as, addr, buf, len, false);
1926}
1927
1928
a8170e5e 1929void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1930 int len, int is_write)
1931{
1932 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1933}
1934
d0ecd2aa 1935/* used for ROM loading : can write in RAM and ROM */
a8170e5e 1936void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
1937 const uint8_t *buf, int len)
1938{
ac1970fb 1939 AddressSpaceDispatch *d = address_space_memory.dispatch;
d0ecd2aa
FB
1940 int l;
1941 uint8_t *ptr;
a8170e5e 1942 hwaddr page;
f3705d53 1943 MemoryRegionSection *section;
3b46e624 1944
d0ecd2aa
FB
1945 while (len > 0) {
1946 page = addr & TARGET_PAGE_MASK;
1947 l = (page + TARGET_PAGE_SIZE) - addr;
1948 if (l > len)
1949 l = len;
ac1970fb 1950 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1951
cc5bea60
BS
1952 if (!(memory_region_is_ram(section->mr) ||
1953 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
1954 /* do nothing */
1955 } else {
1956 unsigned long addr1;
f3705d53 1957 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 1958 + memory_region_section_addr(section, addr);
d0ecd2aa 1959 /* ROM/RAM case */
5579c7f3 1960 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 1961 memcpy(ptr, buf, l);
51d7a9eb 1962 invalidate_and_set_dirty(addr1, l);
050a0ddf 1963 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
1964 }
1965 len -= l;
1966 buf += l;
1967 addr += l;
1968 }
1969}
1970
6d16c2f8
AL
1971typedef struct {
1972 void *buffer;
a8170e5e
AK
1973 hwaddr addr;
1974 hwaddr len;
6d16c2f8
AL
1975} BounceBuffer;
1976
1977static BounceBuffer bounce;
1978
ba223c29
AL
1979typedef struct MapClient {
1980 void *opaque;
1981 void (*callback)(void *opaque);
72cf2d4f 1982 QLIST_ENTRY(MapClient) link;
ba223c29
AL
1983} MapClient;
1984
72cf2d4f
BS
1985static QLIST_HEAD(map_client_list, MapClient) map_client_list
1986 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
1987
1988void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
1989{
7267c094 1990 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
1991
1992 client->opaque = opaque;
1993 client->callback = callback;
72cf2d4f 1994 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
1995 return client;
1996}
1997
8b9c99d9 1998static void cpu_unregister_map_client(void *_client)
ba223c29
AL
1999{
2000 MapClient *client = (MapClient *)_client;
2001
72cf2d4f 2002 QLIST_REMOVE(client, link);
7267c094 2003 g_free(client);
ba223c29
AL
2004}
2005
2006static void cpu_notify_map_clients(void)
2007{
2008 MapClient *client;
2009
72cf2d4f
BS
2010 while (!QLIST_EMPTY(&map_client_list)) {
2011 client = QLIST_FIRST(&map_client_list);
ba223c29 2012 client->callback(client->opaque);
34d5e948 2013 cpu_unregister_map_client(client);
ba223c29
AL
2014 }
2015}
2016
6d16c2f8
AL
2017/* Map a physical memory region into a host virtual address.
2018 * May map a subset of the requested range, given by and returned in *plen.
2019 * May return NULL if resources needed to perform the mapping are exhausted.
2020 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2021 * Use cpu_register_map_client() to know when retrying the map operation is
2022 * likely to succeed.
6d16c2f8 2023 */
ac1970fb 2024void *address_space_map(AddressSpace *as,
a8170e5e
AK
2025 hwaddr addr,
2026 hwaddr *plen,
ac1970fb 2027 bool is_write)
6d16c2f8 2028{
ac1970fb 2029 AddressSpaceDispatch *d = as->dispatch;
a8170e5e
AK
2030 hwaddr len = *plen;
2031 hwaddr todo = 0;
6d16c2f8 2032 int l;
a8170e5e 2033 hwaddr page;
f3705d53 2034 MemoryRegionSection *section;
f15fbc4b 2035 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
2036 ram_addr_t rlen;
2037 void *ret;
6d16c2f8
AL
2038
2039 while (len > 0) {
2040 page = addr & TARGET_PAGE_MASK;
2041 l = (page + TARGET_PAGE_SIZE) - addr;
2042 if (l > len)
2043 l = len;
ac1970fb 2044 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
6d16c2f8 2045
f3705d53 2046 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 2047 if (todo || bounce.buffer) {
6d16c2f8
AL
2048 break;
2049 }
2050 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2051 bounce.addr = addr;
2052 bounce.len = l;
2053 if (!is_write) {
ac1970fb 2054 address_space_read(as, addr, bounce.buffer, l);
6d16c2f8 2055 }
38bee5dc
SS
2056
2057 *plen = l;
2058 return bounce.buffer;
6d16c2f8 2059 }
8ab934f9 2060 if (!todo) {
f3705d53 2061 raddr = memory_region_get_ram_addr(section->mr)
cc5bea60 2062 + memory_region_section_addr(section, addr);
8ab934f9 2063 }
6d16c2f8
AL
2064
2065 len -= l;
2066 addr += l;
38bee5dc 2067 todo += l;
6d16c2f8 2068 }
8ab934f9
SS
2069 rlen = todo;
2070 ret = qemu_ram_ptr_length(raddr, &rlen);
2071 *plen = rlen;
2072 return ret;
6d16c2f8
AL
2073}
2074
ac1970fb 2075/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2076 * Will also mark the memory as dirty if is_write == 1. access_len gives
2077 * the amount of memory that was actually read or written by the caller.
2078 */
a8170e5e
AK
2079void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2080 int is_write, hwaddr access_len)
6d16c2f8
AL
2081{
2082 if (buffer != bounce.buffer) {
2083 if (is_write) {
e890261f 2084 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
2085 while (access_len) {
2086 unsigned l;
2087 l = TARGET_PAGE_SIZE;
2088 if (l > access_len)
2089 l = access_len;
51d7a9eb 2090 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2091 addr1 += l;
2092 access_len -= l;
2093 }
2094 }
868bb33f 2095 if (xen_enabled()) {
e41d7c69 2096 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2097 }
6d16c2f8
AL
2098 return;
2099 }
2100 if (is_write) {
ac1970fb 2101 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2102 }
f8a83245 2103 qemu_vfree(bounce.buffer);
6d16c2f8 2104 bounce.buffer = NULL;
ba223c29 2105 cpu_notify_map_clients();
6d16c2f8 2106}
d0ecd2aa 2107
a8170e5e
AK
2108void *cpu_physical_memory_map(hwaddr addr,
2109 hwaddr *plen,
ac1970fb
AK
2110 int is_write)
2111{
2112 return address_space_map(&address_space_memory, addr, plen, is_write);
2113}
2114
a8170e5e
AK
2115void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2116 int is_write, hwaddr access_len)
ac1970fb
AK
2117{
2118 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2119}
2120
8df1cd07 2121/* warning: addr must be aligned */
a8170e5e 2122static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2123 enum device_endian endian)
8df1cd07 2124{
8df1cd07
FB
2125 uint8_t *ptr;
2126 uint32_t val;
f3705d53 2127 MemoryRegionSection *section;
8df1cd07 2128
ac1970fb 2129 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2130
cc5bea60
BS
2131 if (!(memory_region_is_ram(section->mr) ||
2132 memory_region_is_romd(section->mr))) {
8df1cd07 2133 /* I/O case */
cc5bea60 2134 addr = memory_region_section_addr(section, addr);
37ec01d4 2135 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
2136#if defined(TARGET_WORDS_BIGENDIAN)
2137 if (endian == DEVICE_LITTLE_ENDIAN) {
2138 val = bswap32(val);
2139 }
2140#else
2141 if (endian == DEVICE_BIG_ENDIAN) {
2142 val = bswap32(val);
2143 }
2144#endif
8df1cd07
FB
2145 } else {
2146 /* RAM case */
f3705d53 2147 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2148 & TARGET_PAGE_MASK)
cc5bea60 2149 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2150 switch (endian) {
2151 case DEVICE_LITTLE_ENDIAN:
2152 val = ldl_le_p(ptr);
2153 break;
2154 case DEVICE_BIG_ENDIAN:
2155 val = ldl_be_p(ptr);
2156 break;
2157 default:
2158 val = ldl_p(ptr);
2159 break;
2160 }
8df1cd07
FB
2161 }
2162 return val;
2163}
2164
a8170e5e 2165uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2166{
2167 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2168}
2169
a8170e5e 2170uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2171{
2172 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2173}
2174
a8170e5e 2175uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2176{
2177 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2178}
2179
84b7b8e7 2180/* warning: addr must be aligned */
a8170e5e 2181static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2182 enum device_endian endian)
84b7b8e7 2183{
84b7b8e7
FB
2184 uint8_t *ptr;
2185 uint64_t val;
f3705d53 2186 MemoryRegionSection *section;
84b7b8e7 2187
ac1970fb 2188 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2189
cc5bea60
BS
2190 if (!(memory_region_is_ram(section->mr) ||
2191 memory_region_is_romd(section->mr))) {
84b7b8e7 2192 /* I/O case */
cc5bea60 2193 addr = memory_region_section_addr(section, addr);
1e78bcc1
AG
2194
2195 /* XXX This is broken when device endian != cpu endian.
2196 Fix and add "endian" variable check */
84b7b8e7 2197#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2198 val = io_mem_read(section->mr, addr, 4) << 32;
2199 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 2200#else
37ec01d4
AK
2201 val = io_mem_read(section->mr, addr, 4);
2202 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
2203#endif
2204 } else {
2205 /* RAM case */
f3705d53 2206 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2207 & TARGET_PAGE_MASK)
cc5bea60 2208 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2209 switch (endian) {
2210 case DEVICE_LITTLE_ENDIAN:
2211 val = ldq_le_p(ptr);
2212 break;
2213 case DEVICE_BIG_ENDIAN:
2214 val = ldq_be_p(ptr);
2215 break;
2216 default:
2217 val = ldq_p(ptr);
2218 break;
2219 }
84b7b8e7
FB
2220 }
2221 return val;
2222}
2223
a8170e5e 2224uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2225{
2226 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2227}
2228
a8170e5e 2229uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2230{
2231 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2232}
2233
a8170e5e 2234uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2235{
2236 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2237}
2238
aab33094 2239/* XXX: optimize */
a8170e5e 2240uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2241{
2242 uint8_t val;
2243 cpu_physical_memory_read(addr, &val, 1);
2244 return val;
2245}
2246
733f0b02 2247/* warning: addr must be aligned */
a8170e5e 2248static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2249 enum device_endian endian)
aab33094 2250{
733f0b02
MT
2251 uint8_t *ptr;
2252 uint64_t val;
f3705d53 2253 MemoryRegionSection *section;
733f0b02 2254
ac1970fb 2255 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2256
cc5bea60
BS
2257 if (!(memory_region_is_ram(section->mr) ||
2258 memory_region_is_romd(section->mr))) {
733f0b02 2259 /* I/O case */
cc5bea60 2260 addr = memory_region_section_addr(section, addr);
37ec01d4 2261 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
2262#if defined(TARGET_WORDS_BIGENDIAN)
2263 if (endian == DEVICE_LITTLE_ENDIAN) {
2264 val = bswap16(val);
2265 }
2266#else
2267 if (endian == DEVICE_BIG_ENDIAN) {
2268 val = bswap16(val);
2269 }
2270#endif
733f0b02
MT
2271 } else {
2272 /* RAM case */
f3705d53 2273 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2274 & TARGET_PAGE_MASK)
cc5bea60 2275 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2276 switch (endian) {
2277 case DEVICE_LITTLE_ENDIAN:
2278 val = lduw_le_p(ptr);
2279 break;
2280 case DEVICE_BIG_ENDIAN:
2281 val = lduw_be_p(ptr);
2282 break;
2283 default:
2284 val = lduw_p(ptr);
2285 break;
2286 }
733f0b02
MT
2287 }
2288 return val;
aab33094
FB
2289}
2290
a8170e5e 2291uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2292{
2293 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2294}
2295
a8170e5e 2296uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2297{
2298 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2299}
2300
a8170e5e 2301uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2302{
2303 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2304}
2305
8df1cd07
FB
2306/* warning: addr must be aligned. The ram page is not masked as dirty
2307 and the code inside is not invalidated. It is useful if the dirty
2308 bits are used to track modified PTEs */
a8170e5e 2309void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2310{
8df1cd07 2311 uint8_t *ptr;
f3705d53 2312 MemoryRegionSection *section;
8df1cd07 2313
ac1970fb 2314 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2315
f3705d53 2316 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2317 addr = memory_region_section_addr(section, addr);
f3705d53 2318 if (memory_region_is_ram(section->mr)) {
37ec01d4 2319 section = &phys_sections[phys_section_rom];
06ef3525 2320 }
37ec01d4 2321 io_mem_write(section->mr, addr, val, 4);
8df1cd07 2322 } else {
f3705d53 2323 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 2324 & TARGET_PAGE_MASK)
cc5bea60 2325 + memory_region_section_addr(section, addr);
5579c7f3 2326 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2327 stl_p(ptr, val);
74576198
AL
2328
2329 if (unlikely(in_migration)) {
2330 if (!cpu_physical_memory_is_dirty(addr1)) {
2331 /* invalidate code */
2332 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2333 /* set dirty bit */
f7c11b53
YT
2334 cpu_physical_memory_set_dirty_flags(
2335 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2336 }
2337 }
8df1cd07
FB
2338 }
2339}
2340
a8170e5e 2341void stq_phys_notdirty(hwaddr addr, uint64_t val)
bc98a7ef 2342{
bc98a7ef 2343 uint8_t *ptr;
f3705d53 2344 MemoryRegionSection *section;
bc98a7ef 2345
ac1970fb 2346 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2347
f3705d53 2348 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2349 addr = memory_region_section_addr(section, addr);
f3705d53 2350 if (memory_region_is_ram(section->mr)) {
37ec01d4 2351 section = &phys_sections[phys_section_rom];
06ef3525 2352 }
bc98a7ef 2353#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2354 io_mem_write(section->mr, addr, val >> 32, 4);
2355 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 2356#else
37ec01d4
AK
2357 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2358 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
2359#endif
2360 } else {
f3705d53 2361 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2362 & TARGET_PAGE_MASK)
cc5bea60 2363 + memory_region_section_addr(section, addr));
bc98a7ef
JM
2364 stq_p(ptr, val);
2365 }
2366}
2367
8df1cd07 2368/* warning: addr must be aligned */
a8170e5e 2369static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2370 enum device_endian endian)
8df1cd07 2371{
8df1cd07 2372 uint8_t *ptr;
f3705d53 2373 MemoryRegionSection *section;
8df1cd07 2374
ac1970fb 2375 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2376
f3705d53 2377 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2378 addr = memory_region_section_addr(section, addr);
f3705d53 2379 if (memory_region_is_ram(section->mr)) {
37ec01d4 2380 section = &phys_sections[phys_section_rom];
06ef3525 2381 }
1e78bcc1
AG
2382#if defined(TARGET_WORDS_BIGENDIAN)
2383 if (endian == DEVICE_LITTLE_ENDIAN) {
2384 val = bswap32(val);
2385 }
2386#else
2387 if (endian == DEVICE_BIG_ENDIAN) {
2388 val = bswap32(val);
2389 }
2390#endif
37ec01d4 2391 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
2392 } else {
2393 unsigned long addr1;
f3705d53 2394 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2395 + memory_region_section_addr(section, addr);
8df1cd07 2396 /* RAM case */
5579c7f3 2397 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2398 switch (endian) {
2399 case DEVICE_LITTLE_ENDIAN:
2400 stl_le_p(ptr, val);
2401 break;
2402 case DEVICE_BIG_ENDIAN:
2403 stl_be_p(ptr, val);
2404 break;
2405 default:
2406 stl_p(ptr, val);
2407 break;
2408 }
51d7a9eb 2409 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2410 }
2411}
2412
a8170e5e 2413void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2414{
2415 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2416}
2417
a8170e5e 2418void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2419{
2420 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2421}
2422
a8170e5e 2423void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2424{
2425 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2426}
2427
aab33094 2428/* XXX: optimize */
a8170e5e 2429void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2430{
2431 uint8_t v = val;
2432 cpu_physical_memory_write(addr, &v, 1);
2433}
2434
733f0b02 2435/* warning: addr must be aligned */
a8170e5e 2436static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2437 enum device_endian endian)
aab33094 2438{
733f0b02 2439 uint8_t *ptr;
f3705d53 2440 MemoryRegionSection *section;
733f0b02 2441
ac1970fb 2442 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2443
f3705d53 2444 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2445 addr = memory_region_section_addr(section, addr);
f3705d53 2446 if (memory_region_is_ram(section->mr)) {
37ec01d4 2447 section = &phys_sections[phys_section_rom];
06ef3525 2448 }
1e78bcc1
AG
2449#if defined(TARGET_WORDS_BIGENDIAN)
2450 if (endian == DEVICE_LITTLE_ENDIAN) {
2451 val = bswap16(val);
2452 }
2453#else
2454 if (endian == DEVICE_BIG_ENDIAN) {
2455 val = bswap16(val);
2456 }
2457#endif
37ec01d4 2458 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
2459 } else {
2460 unsigned long addr1;
f3705d53 2461 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2462 + memory_region_section_addr(section, addr);
733f0b02
MT
2463 /* RAM case */
2464 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2465 switch (endian) {
2466 case DEVICE_LITTLE_ENDIAN:
2467 stw_le_p(ptr, val);
2468 break;
2469 case DEVICE_BIG_ENDIAN:
2470 stw_be_p(ptr, val);
2471 break;
2472 default:
2473 stw_p(ptr, val);
2474 break;
2475 }
51d7a9eb 2476 invalidate_and_set_dirty(addr1, 2);
733f0b02 2477 }
aab33094
FB
2478}
2479
a8170e5e 2480void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2481{
2482 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2483}
2484
a8170e5e 2485void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2486{
2487 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2488}
2489
a8170e5e 2490void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2491{
2492 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2493}
2494
aab33094 2495/* XXX: optimize */
a8170e5e 2496void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2497{
2498 val = tswap64(val);
71d2b725 2499 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2500}
2501
a8170e5e 2502void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2503{
2504 val = cpu_to_le64(val);
2505 cpu_physical_memory_write(addr, &val, 8);
2506}
2507
a8170e5e 2508void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2509{
2510 val = cpu_to_be64(val);
2511 cpu_physical_memory_write(addr, &val, 8);
2512}
2513
5e2972fd 2514/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2515int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2516 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2517{
2518 int l;
a8170e5e 2519 hwaddr phys_addr;
9b3c35e0 2520 target_ulong page;
13eb76e0
FB
2521
2522 while (len > 0) {
2523 page = addr & TARGET_PAGE_MASK;
2524 phys_addr = cpu_get_phys_page_debug(env, page);
2525 /* if no physical page mapped, return an error */
2526 if (phys_addr == -1)
2527 return -1;
2528 l = (page + TARGET_PAGE_SIZE) - addr;
2529 if (l > len)
2530 l = len;
5e2972fd 2531 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2532 if (is_write)
2533 cpu_physical_memory_write_rom(phys_addr, buf, l);
2534 else
5e2972fd 2535 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2536 len -= l;
2537 buf += l;
2538 addr += l;
2539 }
2540 return 0;
2541}
a68fe89c 2542#endif
13eb76e0 2543
b3755a91
PB
2544#if !defined(CONFIG_USER_ONLY)
2545
82afa586
BH
2546/*
2547 * A helper function for the _utterly broken_ virtio device model to find out if
2548 * it's running on a big endian machine. Don't do this at home kids!
2549 */
2550bool virtio_is_big_endian(void);
2551bool virtio_is_big_endian(void)
2552{
2553#if defined(TARGET_WORDS_BIGENDIAN)
2554 return true;
2555#else
2556 return false;
2557#endif
2558}
2559
61382a50 2560#endif
76f35538
WC
2561
2562#ifndef CONFIG_USER_ONLY
a8170e5e 2563bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538
WC
2564{
2565 MemoryRegionSection *section;
2566
ac1970fb
AK
2567 section = phys_page_find(address_space_memory.dispatch,
2568 phys_addr >> TARGET_PAGE_BITS);
76f35538
WC
2569
2570 return !(memory_region_is_ram(section->mr) ||
2571 memory_region_is_romd(section->mr));
2572}
2573#endif