]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
exec: change RAM list to a TAILQ
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
432d268c 34#include "hw/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
67d3b957 53//#define DEBUG_UNASSIGNED
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
e2eef170 56#if !defined(CONFIG_USER_ONLY)
9fa3e853 57int phys_ram_fd;
74576198 58static int in_migration;
94a6b54f 59
a3161038 60RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
61
62static MemoryRegion *system_memory;
309cb471 63static MemoryRegion *system_io;
62152b8a 64
f6790af6
AK
65AddressSpace address_space_io;
66AddressSpace address_space_memory;
9e11908f 67DMAContext dma_context_memory;
2673a5da 68
0e0df1e2 69MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 70static MemoryRegion io_mem_subpage_ram;
0e0df1e2 71
e2eef170 72#endif
9fa3e853 73
9349b4f9 74CPUArchState *first_cpu;
6a00d601
FB
75/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
9349b4f9 77DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 78/* 0 = Do not count executed instructions.
bf20dc07 79 1 = Precise instruction counting.
2e70f6ef
PB
80 2 = Adaptive rate instruction counting. */
81int use_icount = 0;
6a00d601 82
e2eef170 83#if !defined(CONFIG_USER_ONLY)
4346ae3e 84
5312bd8b
AK
85static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
aa102231
AK
88static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
5312bd8b 91
d6f2ea22
AK
92/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
07f07b31 96#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 97
e2eef170 98static void io_mem_init(void);
62152b8a 99static void memory_map_init(void);
8b9c99d9 100static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 101
1ec9b909 102static MemoryRegion io_mem_watch;
6658ffb8 103#endif
fd6ce8f6 104
6d9a1304 105#if !defined(CONFIG_USER_ONLY)
d6f2ea22 106
f7bf5461 107static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 108{
f7bf5461 109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
d6f2ea22
AK
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
f7bf5461
AK
117}
118
119static uint16_t phys_map_node_alloc(void)
120{
121 unsigned i;
122 uint16_t ret;
123
124 ret = phys_map_nodes_nb++;
125 assert(ret != PHYS_MAP_NODE_NIL);
126 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 127 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 128 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 130 }
f7bf5461 131 return ret;
d6f2ea22
AK
132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
92e873b9 139
a8170e5e
AK
140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
2999097b 142 int level)
f7bf5461
AK
143{
144 PhysPageEntry *p;
145 int i;
a8170e5e 146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 147
07f07b31 148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
07f07b31 153 p[i].is_leaf = 1;
c19e8800 154 p[i].ptr = phys_section_unassigned;
4346ae3e 155 }
67c4d23c 156 }
f7bf5461 157 } else {
c19e8800 158 p = phys_map_nodes[lp->ptr];
92e873b9 159 }
2999097b 160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 161
2999097b 162 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
c19e8800 165 lp->ptr = leaf;
07f07b31
AK
166 *index += step;
167 *nb -= step;
2999097b
AK
168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
f7bf5461
AK
172 }
173}
174
ac1970fb 175static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 176 hwaddr index, hwaddr nb,
2999097b 177 uint16_t leaf)
f7bf5461 178{
2999097b 179 /* Wildly overreserve - it doesn't matter much. */
07f07b31 180 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 181
ac1970fb 182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
183}
184
a8170e5e 185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
92e873b9 186{
ac1970fb 187 PhysPageEntry lp = d->phys_map;
31ab2b4a
AK
188 PhysPageEntry *p;
189 int i;
31ab2b4a 190 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 191
07f07b31 192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
194 goto not_found;
195 }
c19e8800 196 p = phys_map_nodes[lp.ptr];
31ab2b4a 197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 198 }
31ab2b4a 199
c19e8800 200 s_index = lp.ptr;
31ab2b4a 201not_found:
f3705d53
AK
202 return &phys_sections[s_index];
203}
204
e5548617
BS
205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 209 && mr != &io_mem_watch;
fd6ce8f6 210}
5b6dd868 211#endif
fd6ce8f6 212
5b6dd868 213void cpu_exec_init_all(void)
fdbb84d1 214{
5b6dd868
BS
215#if !defined(CONFIG_USER_ONLY)
216 memory_map_init();
217 io_mem_init();
fdbb84d1 218#endif
5b6dd868 219}
fdbb84d1 220
5b6dd868
BS
221#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
222
223static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 224{
5b6dd868 225 CPUArchState *env = opaque;
a513fe19 226
5b6dd868
BS
227 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
228 version_id is increased. */
229 env->interrupt_request &= ~0x01;
230 tlb_flush(env, 1);
231
232 return 0;
a513fe19 233}
7501267e 234
5b6dd868
BS
235static const VMStateDescription vmstate_cpu_common = {
236 .name = "cpu_common",
237 .version_id = 1,
238 .minimum_version_id = 1,
239 .minimum_version_id_old = 1,
240 .post_load = cpu_common_post_load,
241 .fields = (VMStateField []) {
242 VMSTATE_UINT32(halted, CPUArchState),
243 VMSTATE_UINT32(interrupt_request, CPUArchState),
244 VMSTATE_END_OF_LIST()
245 }
246};
247#endif
ea041c0e 248
5b6dd868 249CPUArchState *qemu_get_cpu(int cpu)
ea041c0e 250{
5b6dd868 251 CPUArchState *env = first_cpu;
ea041c0e 252
5b6dd868
BS
253 while (env) {
254 if (env->cpu_index == cpu)
255 break;
256 env = env->next_cpu;
ea041c0e 257 }
5b6dd868
BS
258
259 return env;
ea041c0e
FB
260}
261
5b6dd868 262void cpu_exec_init(CPUArchState *env)
ea041c0e 263{
5b6dd868
BS
264#ifndef CONFIG_USER_ONLY
265 CPUState *cpu = ENV_GET_CPU(env);
266#endif
267 CPUArchState **penv;
268 int cpu_index;
269
270#if defined(CONFIG_USER_ONLY)
271 cpu_list_lock();
272#endif
273 env->next_cpu = NULL;
274 penv = &first_cpu;
275 cpu_index = 0;
276 while (*penv != NULL) {
277 penv = &(*penv)->next_cpu;
278 cpu_index++;
279 }
280 env->cpu_index = cpu_index;
281 env->numa_node = 0;
282 QTAILQ_INIT(&env->breakpoints);
283 QTAILQ_INIT(&env->watchpoints);
284#ifndef CONFIG_USER_ONLY
285 cpu->thread_id = qemu_get_thread_id();
286#endif
287 *penv = env;
288#if defined(CONFIG_USER_ONLY)
289 cpu_list_unlock();
290#endif
291#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
292 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
293 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
294 cpu_save, cpu_load, env);
295#endif
ea041c0e
FB
296}
297
1fddef4b 298#if defined(TARGET_HAS_ICE)
94df27fd 299#if defined(CONFIG_USER_ONLY)
9349b4f9 300static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
301{
302 tb_invalidate_phys_page_range(pc, pc + 1, 0);
303}
304#else
1e7855a5
MF
305static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
306{
9d70c4b7
MF
307 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
308 (pc & ~TARGET_PAGE_MASK));
1e7855a5 309}
c27004ec 310#endif
94df27fd 311#endif /* TARGET_HAS_ICE */
d720b93d 312
c527ee8f 313#if defined(CONFIG_USER_ONLY)
9349b4f9 314void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
315
316{
317}
318
9349b4f9 319int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
320 int flags, CPUWatchpoint **watchpoint)
321{
322 return -ENOSYS;
323}
324#else
6658ffb8 325/* Add a watchpoint. */
9349b4f9 326int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 327 int flags, CPUWatchpoint **watchpoint)
6658ffb8 328{
b4051334 329 target_ulong len_mask = ~(len - 1);
c0ce998e 330 CPUWatchpoint *wp;
6658ffb8 331
b4051334 332 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
333 if ((len & (len - 1)) || (addr & ~len_mask) ||
334 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
335 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
336 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
337 return -EINVAL;
338 }
7267c094 339 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
340
341 wp->vaddr = addr;
b4051334 342 wp->len_mask = len_mask;
a1d1bb31
AL
343 wp->flags = flags;
344
2dc9f411 345 /* keep all GDB-injected watchpoints in front */
c0ce998e 346 if (flags & BP_GDB)
72cf2d4f 347 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 348 else
72cf2d4f 349 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 350
6658ffb8 351 tlb_flush_page(env, addr);
a1d1bb31
AL
352
353 if (watchpoint)
354 *watchpoint = wp;
355 return 0;
6658ffb8
PB
356}
357
a1d1bb31 358/* Remove a specific watchpoint. */
9349b4f9 359int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 360 int flags)
6658ffb8 361{
b4051334 362 target_ulong len_mask = ~(len - 1);
a1d1bb31 363 CPUWatchpoint *wp;
6658ffb8 364
72cf2d4f 365 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 366 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 367 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 368 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
369 return 0;
370 }
371 }
a1d1bb31 372 return -ENOENT;
6658ffb8
PB
373}
374
a1d1bb31 375/* Remove a specific watchpoint by reference. */
9349b4f9 376void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 377{
72cf2d4f 378 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 379
a1d1bb31
AL
380 tlb_flush_page(env, watchpoint->vaddr);
381
7267c094 382 g_free(watchpoint);
a1d1bb31
AL
383}
384
385/* Remove all matching watchpoints. */
9349b4f9 386void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 387{
c0ce998e 388 CPUWatchpoint *wp, *next;
a1d1bb31 389
72cf2d4f 390 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
391 if (wp->flags & mask)
392 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 393 }
7d03f82f 394}
c527ee8f 395#endif
7d03f82f 396
a1d1bb31 397/* Add a breakpoint. */
9349b4f9 398int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 399 CPUBreakpoint **breakpoint)
4c3a88a2 400{
1fddef4b 401#if defined(TARGET_HAS_ICE)
c0ce998e 402 CPUBreakpoint *bp;
3b46e624 403
7267c094 404 bp = g_malloc(sizeof(*bp));
4c3a88a2 405
a1d1bb31
AL
406 bp->pc = pc;
407 bp->flags = flags;
408
2dc9f411 409 /* keep all GDB-injected breakpoints in front */
c0ce998e 410 if (flags & BP_GDB)
72cf2d4f 411 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 412 else
72cf2d4f 413 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 414
d720b93d 415 breakpoint_invalidate(env, pc);
a1d1bb31
AL
416
417 if (breakpoint)
418 *breakpoint = bp;
4c3a88a2
FB
419 return 0;
420#else
a1d1bb31 421 return -ENOSYS;
4c3a88a2
FB
422#endif
423}
424
a1d1bb31 425/* Remove a specific breakpoint. */
9349b4f9 426int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 427{
7d03f82f 428#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
429 CPUBreakpoint *bp;
430
72cf2d4f 431 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
432 if (bp->pc == pc && bp->flags == flags) {
433 cpu_breakpoint_remove_by_ref(env, bp);
434 return 0;
435 }
7d03f82f 436 }
a1d1bb31
AL
437 return -ENOENT;
438#else
439 return -ENOSYS;
7d03f82f
EI
440#endif
441}
442
a1d1bb31 443/* Remove a specific breakpoint by reference. */
9349b4f9 444void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 445{
1fddef4b 446#if defined(TARGET_HAS_ICE)
72cf2d4f 447 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 448
a1d1bb31
AL
449 breakpoint_invalidate(env, breakpoint->pc);
450
7267c094 451 g_free(breakpoint);
a1d1bb31
AL
452#endif
453}
454
455/* Remove all matching breakpoints. */
9349b4f9 456void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
457{
458#if defined(TARGET_HAS_ICE)
c0ce998e 459 CPUBreakpoint *bp, *next;
a1d1bb31 460
72cf2d4f 461 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
462 if (bp->flags & mask)
463 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 464 }
4c3a88a2
FB
465#endif
466}
467
c33a346e
FB
468/* enable or disable single step mode. EXCP_DEBUG is returned by the
469 CPU loop after each instruction */
9349b4f9 470void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 471{
1fddef4b 472#if defined(TARGET_HAS_ICE)
c33a346e
FB
473 if (env->singlestep_enabled != enabled) {
474 env->singlestep_enabled = enabled;
e22a25c9
AL
475 if (kvm_enabled())
476 kvm_update_guest_debug(env, 0);
477 else {
ccbb4d44 478 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
479 /* XXX: only flush what is necessary */
480 tb_flush(env);
481 }
c33a346e
FB
482 }
483#endif
484}
485
9349b4f9 486void cpu_reset_interrupt(CPUArchState *env, int mask)
b54ad049
FB
487{
488 env->interrupt_request &= ~mask;
489}
490
9349b4f9 491void cpu_exit(CPUArchState *env)
3098dba0
AJ
492{
493 env->exit_request = 1;
494 cpu_unlink_tb(env);
495}
496
9349b4f9 497void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
498{
499 va_list ap;
493ae1f0 500 va_list ap2;
7501267e
FB
501
502 va_start(ap, fmt);
493ae1f0 503 va_copy(ap2, ap);
7501267e
FB
504 fprintf(stderr, "qemu: fatal: ");
505 vfprintf(stderr, fmt, ap);
506 fprintf(stderr, "\n");
6fd2a026 507 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
508 if (qemu_log_enabled()) {
509 qemu_log("qemu: fatal: ");
510 qemu_log_vprintf(fmt, ap2);
511 qemu_log("\n");
6fd2a026 512 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 513 qemu_log_flush();
93fcfe39 514 qemu_log_close();
924edcae 515 }
493ae1f0 516 va_end(ap2);
f9373291 517 va_end(ap);
fd052bf6
RV
518#if defined(CONFIG_USER_ONLY)
519 {
520 struct sigaction act;
521 sigfillset(&act.sa_mask);
522 act.sa_handler = SIG_DFL;
523 sigaction(SIGABRT, &act, NULL);
524 }
525#endif
7501267e
FB
526 abort();
527}
528
9349b4f9 529CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 530{
9349b4f9
AF
531 CPUArchState *new_env = cpu_init(env->cpu_model_str);
532 CPUArchState *next_cpu = new_env->next_cpu;
c5be9f08 533 int cpu_index = new_env->cpu_index;
5a38f081
AL
534#if defined(TARGET_HAS_ICE)
535 CPUBreakpoint *bp;
536 CPUWatchpoint *wp;
537#endif
538
9349b4f9 539 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081
AL
540
541 /* Preserve chaining and index. */
c5be9f08
TS
542 new_env->next_cpu = next_cpu;
543 new_env->cpu_index = cpu_index;
5a38f081
AL
544
545 /* Clone all break/watchpoints.
546 Note: Once we support ptrace with hw-debug register access, make sure
547 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
548 QTAILQ_INIT(&env->breakpoints);
549 QTAILQ_INIT(&env->watchpoints);
5a38f081 550#if defined(TARGET_HAS_ICE)
72cf2d4f 551 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
552 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
553 }
72cf2d4f 554 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
555 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
556 wp->flags, NULL);
557 }
558#endif
559
c5be9f08
TS
560 return new_env;
561}
562
0124311e 563#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
564static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
565 uintptr_t length)
566{
567 uintptr_t start1;
568
569 /* we modify the TLB cache so that the dirty bit will be set again
570 when accessing the range */
571 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
572 /* Check that we don't span multiple blocks - this breaks the
573 address comparisons below. */
574 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
575 != (end - 1) - start) {
576 abort();
577 }
578 cpu_tlb_reset_dirty_all(start1, length);
579
580}
581
5579c7f3 582/* Note: start and end must be within the same ram block. */
c227f099 583void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 584 int dirty_flags)
1ccde1cb 585{
d24981d3 586 uintptr_t length;
1ccde1cb
FB
587
588 start &= TARGET_PAGE_MASK;
589 end = TARGET_PAGE_ALIGN(end);
590
591 length = end - start;
592 if (length == 0)
593 return;
f7c11b53 594 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 595
d24981d3
JQ
596 if (tcg_enabled()) {
597 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 598 }
1ccde1cb
FB
599}
600
8b9c99d9 601static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 602{
f6f3fbca 603 int ret = 0;
74576198 604 in_migration = enable;
f6f3fbca 605 return ret;
74576198
AL
606}
607
a8170e5e 608hwaddr memory_region_section_get_iotlb(CPUArchState *env,
e5548617
BS
609 MemoryRegionSection *section,
610 target_ulong vaddr,
a8170e5e 611 hwaddr paddr,
e5548617
BS
612 int prot,
613 target_ulong *address)
614{
a8170e5e 615 hwaddr iotlb;
e5548617
BS
616 CPUWatchpoint *wp;
617
cc5bea60 618 if (memory_region_is_ram(section->mr)) {
e5548617
BS
619 /* Normal RAM. */
620 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 621 + memory_region_section_addr(section, paddr);
e5548617
BS
622 if (!section->readonly) {
623 iotlb |= phys_section_notdirty;
624 } else {
625 iotlb |= phys_section_rom;
626 }
627 } else {
628 /* IO handlers are currently passed a physical address.
629 It would be nice to pass an offset from the base address
630 of that region. This would avoid having to special case RAM,
631 and avoid full address decoding in every device.
632 We can't use the high bits of pd for this because
633 IO_MEM_ROMD uses these as a ram address. */
634 iotlb = section - phys_sections;
cc5bea60 635 iotlb += memory_region_section_addr(section, paddr);
e5548617
BS
636 }
637
638 /* Make accesses to pages with watchpoints go via the
639 watchpoint trap routines. */
640 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
641 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
642 /* Avoid trapping reads of pages with a write breakpoint. */
643 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
644 iotlb = phys_section_watch + paddr;
645 *address |= TLB_MMIO;
646 break;
647 }
648 }
649 }
650
651 return iotlb;
652}
9fa3e853
FB
653#endif /* defined(CONFIG_USER_ONLY) */
654
e2eef170 655#if !defined(CONFIG_USER_ONLY)
8da3ff18 656
c04b2b78
PB
657#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
658typedef struct subpage_t {
70c68e44 659 MemoryRegion iomem;
a8170e5e 660 hwaddr base;
5312bd8b 661 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
662} subpage_t;
663
c227f099 664static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 665 uint16_t section);
a8170e5e 666static subpage_t *subpage_init(hwaddr base);
5312bd8b 667static void destroy_page_desc(uint16_t section_index)
54688b1e 668{
5312bd8b
AK
669 MemoryRegionSection *section = &phys_sections[section_index];
670 MemoryRegion *mr = section->mr;
54688b1e
AK
671
672 if (mr->subpage) {
673 subpage_t *subpage = container_of(mr, subpage_t, iomem);
674 memory_region_destroy(&subpage->iomem);
675 g_free(subpage);
676 }
677}
678
4346ae3e 679static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
680{
681 unsigned i;
d6f2ea22 682 PhysPageEntry *p;
54688b1e 683
c19e8800 684 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
685 return;
686 }
687
c19e8800 688 p = phys_map_nodes[lp->ptr];
4346ae3e 689 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 690 if (!p[i].is_leaf) {
54688b1e 691 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 692 } else {
c19e8800 693 destroy_page_desc(p[i].ptr);
54688b1e 694 }
54688b1e 695 }
07f07b31 696 lp->is_leaf = 0;
c19e8800 697 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
698}
699
ac1970fb 700static void destroy_all_mappings(AddressSpaceDispatch *d)
54688b1e 701{
ac1970fb 702 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
d6f2ea22 703 phys_map_nodes_reset();
54688b1e
AK
704}
705
5312bd8b
AK
706static uint16_t phys_section_add(MemoryRegionSection *section)
707{
708 if (phys_sections_nb == phys_sections_nb_alloc) {
709 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
710 phys_sections = g_renew(MemoryRegionSection, phys_sections,
711 phys_sections_nb_alloc);
712 }
713 phys_sections[phys_sections_nb] = *section;
714 return phys_sections_nb++;
715}
716
717static void phys_sections_clear(void)
718{
719 phys_sections_nb = 0;
720}
721
ac1970fb 722static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
723{
724 subpage_t *subpage;
a8170e5e 725 hwaddr base = section->offset_within_address_space
0f0cb164 726 & TARGET_PAGE_MASK;
ac1970fb 727 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
0f0cb164
AK
728 MemoryRegionSection subsection = {
729 .offset_within_address_space = base,
730 .size = TARGET_PAGE_SIZE,
731 };
a8170e5e 732 hwaddr start, end;
0f0cb164 733
f3705d53 734 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 735
f3705d53 736 if (!(existing->mr->subpage)) {
0f0cb164
AK
737 subpage = subpage_init(base);
738 subsection.mr = &subpage->iomem;
ac1970fb 739 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 740 phys_section_add(&subsection));
0f0cb164 741 } else {
f3705d53 742 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
743 }
744 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
adb2a9b5 745 end = start + section->size - 1;
0f0cb164
AK
746 subpage_register(subpage, start, end, phys_section_add(section));
747}
748
749
ac1970fb 750static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
33417e70 751{
a8170e5e 752 hwaddr start_addr = section->offset_within_address_space;
dd81124b 753 ram_addr_t size = section->size;
a8170e5e 754 hwaddr addr;
5312bd8b 755 uint16_t section_index = phys_section_add(section);
dd81124b 756
3b8e6a2d 757 assert(size);
f6f3fbca 758
3b8e6a2d 759 addr = start_addr;
ac1970fb 760 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2999097b 761 section_index);
33417e70
FB
762}
763
ac1970fb 764static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 765{
ac1970fb 766 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
0f0cb164
AK
767 MemoryRegionSection now = *section, remain = *section;
768
769 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
770 || (now.size < TARGET_PAGE_SIZE)) {
771 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
772 - now.offset_within_address_space,
773 now.size);
ac1970fb 774 register_subpage(d, &now);
0f0cb164
AK
775 remain.size -= now.size;
776 remain.offset_within_address_space += now.size;
777 remain.offset_within_region += now.size;
778 }
69b67646
TH
779 while (remain.size >= TARGET_PAGE_SIZE) {
780 now = remain;
781 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
782 now.size = TARGET_PAGE_SIZE;
ac1970fb 783 register_subpage(d, &now);
69b67646
TH
784 } else {
785 now.size &= TARGET_PAGE_MASK;
ac1970fb 786 register_multipage(d, &now);
69b67646 787 }
0f0cb164
AK
788 remain.size -= now.size;
789 remain.offset_within_address_space += now.size;
790 remain.offset_within_region += now.size;
791 }
792 now = remain;
793 if (now.size) {
ac1970fb 794 register_subpage(d, &now);
0f0cb164
AK
795 }
796}
797
62a2744c
SY
798void qemu_flush_coalesced_mmio_buffer(void)
799{
800 if (kvm_enabled())
801 kvm_flush_coalesced_mmio_buffer();
802}
803
c902760f
MT
804#if defined(__linux__) && !defined(TARGET_S390X)
805
806#include <sys/vfs.h>
807
808#define HUGETLBFS_MAGIC 0x958458f6
809
810static long gethugepagesize(const char *path)
811{
812 struct statfs fs;
813 int ret;
814
815 do {
9742bf26 816 ret = statfs(path, &fs);
c902760f
MT
817 } while (ret != 0 && errno == EINTR);
818
819 if (ret != 0) {
9742bf26
YT
820 perror(path);
821 return 0;
c902760f
MT
822 }
823
824 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 825 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
826
827 return fs.f_bsize;
828}
829
04b16653
AW
830static void *file_ram_alloc(RAMBlock *block,
831 ram_addr_t memory,
832 const char *path)
c902760f
MT
833{
834 char *filename;
835 void *area;
836 int fd;
837#ifdef MAP_POPULATE
838 int flags;
839#endif
840 unsigned long hpagesize;
841
842 hpagesize = gethugepagesize(path);
843 if (!hpagesize) {
9742bf26 844 return NULL;
c902760f
MT
845 }
846
847 if (memory < hpagesize) {
848 return NULL;
849 }
850
851 if (kvm_enabled() && !kvm_has_sync_mmu()) {
852 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
853 return NULL;
854 }
855
856 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 857 return NULL;
c902760f
MT
858 }
859
860 fd = mkstemp(filename);
861 if (fd < 0) {
9742bf26
YT
862 perror("unable to create backing store for hugepages");
863 free(filename);
864 return NULL;
c902760f
MT
865 }
866 unlink(filename);
867 free(filename);
868
869 memory = (memory+hpagesize-1) & ~(hpagesize-1);
870
871 /*
872 * ftruncate is not supported by hugetlbfs in older
873 * hosts, so don't bother bailing out on errors.
874 * If anything goes wrong with it under other filesystems,
875 * mmap will fail.
876 */
877 if (ftruncate(fd, memory))
9742bf26 878 perror("ftruncate");
c902760f
MT
879
880#ifdef MAP_POPULATE
881 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
882 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
883 * to sidestep this quirk.
884 */
885 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
886 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
887#else
888 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
889#endif
890 if (area == MAP_FAILED) {
9742bf26
YT
891 perror("file_ram_alloc: can't mmap RAM pages");
892 close(fd);
893 return (NULL);
c902760f 894 }
04b16653 895 block->fd = fd;
c902760f
MT
896 return area;
897}
898#endif
899
d17b5288 900static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
901{
902 RAMBlock *block, *next_block;
3e837b2c 903 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 904
a3161038 905 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
906 return 0;
907
a3161038 908 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 909 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
910
911 end = block->offset + block->length;
912
a3161038 913 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
914 if (next_block->offset >= end) {
915 next = MIN(next, next_block->offset);
916 }
917 }
918 if (next - end >= size && next - end < mingap) {
3e837b2c 919 offset = end;
04b16653
AW
920 mingap = next - end;
921 }
922 }
3e837b2c
AW
923
924 if (offset == RAM_ADDR_MAX) {
925 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
926 (uint64_t)size);
927 abort();
928 }
929
04b16653
AW
930 return offset;
931}
932
652d7ec2 933ram_addr_t last_ram_offset(void)
d17b5288
AW
934{
935 RAMBlock *block;
936 ram_addr_t last = 0;
937
a3161038 938 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
939 last = MAX(last, block->offset + block->length);
940
941 return last;
942}
943
ddb97f1d
JB
944static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
945{
946 int ret;
947 QemuOpts *machine_opts;
948
949 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
950 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
951 if (machine_opts &&
952 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
953 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
954 if (ret) {
955 perror("qemu_madvise");
956 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
957 "but dump_guest_core=off specified\n");
958 }
959 }
960}
961
c5705a77 962void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
963{
964 RAMBlock *new_block, *block;
965
c5705a77 966 new_block = NULL;
a3161038 967 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
968 if (block->offset == addr) {
969 new_block = block;
970 break;
971 }
972 }
973 assert(new_block);
974 assert(!new_block->idstr[0]);
84b89d78 975
09e5ab63
AL
976 if (dev) {
977 char *id = qdev_get_dev_path(dev);
84b89d78
CM
978 if (id) {
979 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 980 g_free(id);
84b89d78
CM
981 }
982 }
983 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
984
a3161038 985 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 986 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
987 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
988 new_block->idstr);
989 abort();
990 }
991 }
c5705a77
AK
992}
993
8490fc78
LC
994static int memory_try_enable_merging(void *addr, size_t len)
995{
996 QemuOpts *opts;
997
998 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
999 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1000 /* disabled by the user */
1001 return 0;
1002 }
1003
1004 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1005}
1006
c5705a77
AK
1007ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1008 MemoryRegion *mr)
1009{
1010 RAMBlock *new_block;
1011
1012 size = TARGET_PAGE_ALIGN(size);
1013 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1014
7c637366 1015 new_block->mr = mr;
432d268c 1016 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1017 if (host) {
1018 new_block->host = host;
cd19cfa2 1019 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1020 } else {
1021 if (mem_path) {
c902760f 1022#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1023 new_block->host = file_ram_alloc(new_block, size, mem_path);
1024 if (!new_block->host) {
1025 new_block->host = qemu_vmalloc(size);
8490fc78 1026 memory_try_enable_merging(new_block->host, size);
6977dfe6 1027 }
c902760f 1028#else
6977dfe6
YT
1029 fprintf(stderr, "-mem-path option unsupported\n");
1030 exit(1);
c902760f 1031#endif
6977dfe6 1032 } else {
868bb33f 1033 if (xen_enabled()) {
fce537d4 1034 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1035 } else if (kvm_enabled()) {
1036 /* some s390/kvm configurations have special constraints */
1037 new_block->host = kvm_vmalloc(size);
432d268c
JN
1038 } else {
1039 new_block->host = qemu_vmalloc(size);
1040 }
8490fc78 1041 memory_try_enable_merging(new_block->host, size);
6977dfe6 1042 }
c902760f 1043 }
94a6b54f
PB
1044 new_block->length = size;
1045
a3161038 1046 QTAILQ_INSERT_HEAD(&ram_list.blocks, new_block, next);
0d6d3c87 1047 ram_list.mru_block = NULL;
94a6b54f 1048
7267c094 1049 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1050 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1051 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1052 0, size >> TARGET_PAGE_BITS);
1720aeee 1053 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1054
ddb97f1d 1055 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1056 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1057
6f0437e8
JK
1058 if (kvm_enabled())
1059 kvm_setup_guest_memory(new_block->host, size);
1060
94a6b54f
PB
1061 return new_block->offset;
1062}
e9a1ab19 1063
c5705a77 1064ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1065{
c5705a77 1066 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1067}
1068
1f2e98b6
AW
1069void qemu_ram_free_from_ptr(ram_addr_t addr)
1070{
1071 RAMBlock *block;
1072
a3161038 1073 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1074 if (addr == block->offset) {
a3161038 1075 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1076 ram_list.mru_block = NULL;
7267c094 1077 g_free(block);
1f2e98b6
AW
1078 return;
1079 }
1080 }
1081}
1082
c227f099 1083void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1084{
04b16653
AW
1085 RAMBlock *block;
1086
a3161038 1087 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1088 if (addr == block->offset) {
a3161038 1089 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1090 ram_list.mru_block = NULL;
cd19cfa2
HY
1091 if (block->flags & RAM_PREALLOC_MASK) {
1092 ;
1093 } else if (mem_path) {
04b16653
AW
1094#if defined (__linux__) && !defined(TARGET_S390X)
1095 if (block->fd) {
1096 munmap(block->host, block->length);
1097 close(block->fd);
1098 } else {
1099 qemu_vfree(block->host);
1100 }
fd28aa13
JK
1101#else
1102 abort();
04b16653
AW
1103#endif
1104 } else {
1105#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1106 munmap(block->host, block->length);
1107#else
868bb33f 1108 if (xen_enabled()) {
e41d7c69 1109 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
1110 } else {
1111 qemu_vfree(block->host);
1112 }
04b16653
AW
1113#endif
1114 }
7267c094 1115 g_free(block);
04b16653
AW
1116 return;
1117 }
1118 }
1119
e9a1ab19
FB
1120}
1121
cd19cfa2
HY
1122#ifndef _WIN32
1123void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1124{
1125 RAMBlock *block;
1126 ram_addr_t offset;
1127 int flags;
1128 void *area, *vaddr;
1129
a3161038 1130 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1131 offset = addr - block->offset;
1132 if (offset < block->length) {
1133 vaddr = block->host + offset;
1134 if (block->flags & RAM_PREALLOC_MASK) {
1135 ;
1136 } else {
1137 flags = MAP_FIXED;
1138 munmap(vaddr, length);
1139 if (mem_path) {
1140#if defined(__linux__) && !defined(TARGET_S390X)
1141 if (block->fd) {
1142#ifdef MAP_POPULATE
1143 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1144 MAP_PRIVATE;
1145#else
1146 flags |= MAP_PRIVATE;
1147#endif
1148 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1149 flags, block->fd, offset);
1150 } else {
1151 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1152 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1153 flags, -1, 0);
1154 }
fd28aa13
JK
1155#else
1156 abort();
cd19cfa2
HY
1157#endif
1158 } else {
1159#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1160 flags |= MAP_SHARED | MAP_ANONYMOUS;
1161 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1162 flags, -1, 0);
1163#else
1164 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1165 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1166 flags, -1, 0);
1167#endif
1168 }
1169 if (area != vaddr) {
f15fbc4b
AP
1170 fprintf(stderr, "Could not remap addr: "
1171 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1172 length, addr);
1173 exit(1);
1174 }
8490fc78 1175 memory_try_enable_merging(vaddr, length);
ddb97f1d 1176 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1177 }
1178 return;
1179 }
1180 }
1181}
1182#endif /* !_WIN32 */
1183
dc828ca1 1184/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
1185 With the exception of the softmmu code in this file, this should
1186 only be used for local memory (e.g. video ram) that the device owns,
1187 and knows it isn't going to access beyond the end of the block.
1188
1189 It should not be used for general purpose DMA.
1190 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1191 */
c227f099 1192void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 1193{
94a6b54f
PB
1194 RAMBlock *block;
1195
0d6d3c87
PB
1196 block = ram_list.mru_block;
1197 if (block && addr - block->offset < block->length) {
1198 goto found;
1199 }
a3161038 1200 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1201 if (addr - block->offset < block->length) {
0d6d3c87 1202 goto found;
f471a17e 1203 }
94a6b54f 1204 }
f471a17e
AW
1205
1206 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1207 abort();
1208
0d6d3c87
PB
1209found:
1210 ram_list.mru_block = block;
1211 if (xen_enabled()) {
1212 /* We need to check if the requested address is in the RAM
1213 * because we don't want to map the entire memory in QEMU.
1214 * In that case just map until the end of the page.
1215 */
1216 if (block->offset == 0) {
1217 return xen_map_cache(addr, 0, 0);
1218 } else if (block->host == NULL) {
1219 block->host =
1220 xen_map_cache(block->offset, block->length, 1);
1221 }
1222 }
1223 return block->host + (addr - block->offset);
dc828ca1
PB
1224}
1225
0d6d3c87
PB
1226/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1227 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1228 *
1229 * ??? Is this still necessary?
b2e0a138 1230 */
8b9c99d9 1231static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1232{
1233 RAMBlock *block;
1234
a3161038 1235 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1236 if (addr - block->offset < block->length) {
868bb33f 1237 if (xen_enabled()) {
432d268c
JN
1238 /* We need to check if the requested address is in the RAM
1239 * because we don't want to map the entire memory in QEMU.
712c2b41 1240 * In that case just map until the end of the page.
432d268c
JN
1241 */
1242 if (block->offset == 0) {
e41d7c69 1243 return xen_map_cache(addr, 0, 0);
432d268c 1244 } else if (block->host == NULL) {
e41d7c69
JK
1245 block->host =
1246 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1247 }
1248 }
b2e0a138
MT
1249 return block->host + (addr - block->offset);
1250 }
1251 }
1252
1253 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1254 abort();
1255
1256 return NULL;
1257}
1258
38bee5dc
SS
1259/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1260 * but takes a size argument */
8b9c99d9 1261static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1262{
8ab934f9
SS
1263 if (*size == 0) {
1264 return NULL;
1265 }
868bb33f 1266 if (xen_enabled()) {
e41d7c69 1267 return xen_map_cache(addr, *size, 1);
868bb33f 1268 } else {
38bee5dc
SS
1269 RAMBlock *block;
1270
a3161038 1271 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1272 if (addr - block->offset < block->length) {
1273 if (addr - block->offset + *size > block->length)
1274 *size = block->length - addr + block->offset;
1275 return block->host + (addr - block->offset);
1276 }
1277 }
1278
1279 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1280 abort();
38bee5dc
SS
1281 }
1282}
1283
050a0ddf
AP
1284void qemu_put_ram_ptr(void *addr)
1285{
1286 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
1287}
1288
e890261f 1289int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1290{
94a6b54f
PB
1291 RAMBlock *block;
1292 uint8_t *host = ptr;
1293
868bb33f 1294 if (xen_enabled()) {
e41d7c69 1295 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
1296 return 0;
1297 }
1298
a3161038 1299 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1300 /* This case append when the block is not mapped. */
1301 if (block->host == NULL) {
1302 continue;
1303 }
f471a17e 1304 if (host - block->host < block->length) {
e890261f
MT
1305 *ram_addr = block->offset + (host - block->host);
1306 return 0;
f471a17e 1307 }
94a6b54f 1308 }
432d268c 1309
e890261f
MT
1310 return -1;
1311}
f471a17e 1312
e890261f
MT
1313/* Some of the softmmu routines need to translate from a host pointer
1314 (typically a TLB entry) back to a ram offset. */
1315ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1316{
1317 ram_addr_t ram_addr;
f471a17e 1318
e890261f
MT
1319 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1320 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1321 abort();
1322 }
1323 return ram_addr;
5579c7f3
PB
1324}
1325
a8170e5e 1326static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1327 unsigned size)
e18231a3
BS
1328{
1329#ifdef DEBUG_UNASSIGNED
1330 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1331#endif
5b450407 1332#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1333 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
1334#endif
1335 return 0;
1336}
1337
a8170e5e 1338static void unassigned_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1339 uint64_t val, unsigned size)
e18231a3
BS
1340{
1341#ifdef DEBUG_UNASSIGNED
0e0df1e2 1342 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 1343#endif
5b450407 1344#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1345 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 1346#endif
33417e70
FB
1347}
1348
0e0df1e2
AK
1349static const MemoryRegionOps unassigned_mem_ops = {
1350 .read = unassigned_mem_read,
1351 .write = unassigned_mem_write,
1352 .endianness = DEVICE_NATIVE_ENDIAN,
1353};
e18231a3 1354
a8170e5e 1355static uint64_t error_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1356 unsigned size)
e18231a3 1357{
0e0df1e2 1358 abort();
e18231a3
BS
1359}
1360
a8170e5e 1361static void error_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1362 uint64_t value, unsigned size)
e18231a3 1363{
0e0df1e2 1364 abort();
33417e70
FB
1365}
1366
0e0df1e2
AK
1367static const MemoryRegionOps error_mem_ops = {
1368 .read = error_mem_read,
1369 .write = error_mem_write,
1370 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1371};
1372
0e0df1e2
AK
1373static const MemoryRegionOps rom_mem_ops = {
1374 .read = error_mem_read,
1375 .write = unassigned_mem_write,
1376 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1377};
1378
a8170e5e 1379static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1380 uint64_t val, unsigned size)
9fa3e853 1381{
3a7d929e 1382 int dirty_flags;
f7c11b53 1383 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1384 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1385#if !defined(CONFIG_USER_ONLY)
0e0df1e2 1386 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1387 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 1388#endif
3a7d929e 1389 }
0e0df1e2
AK
1390 switch (size) {
1391 case 1:
1392 stb_p(qemu_get_ram_ptr(ram_addr), val);
1393 break;
1394 case 2:
1395 stw_p(qemu_get_ram_ptr(ram_addr), val);
1396 break;
1397 case 4:
1398 stl_p(qemu_get_ram_ptr(ram_addr), val);
1399 break;
1400 default:
1401 abort();
3a7d929e 1402 }
f23db169 1403 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1404 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1405 /* we remove the notdirty callback only if the code has been
1406 flushed */
1407 if (dirty_flags == 0xff)
2e70f6ef 1408 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1409}
1410
0e0df1e2
AK
1411static const MemoryRegionOps notdirty_mem_ops = {
1412 .read = error_mem_read,
1413 .write = notdirty_mem_write,
1414 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1415};
1416
0f459d16 1417/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1418static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1419{
9349b4f9 1420 CPUArchState *env = cpu_single_env;
06d55cc1 1421 target_ulong pc, cs_base;
0f459d16 1422 target_ulong vaddr;
a1d1bb31 1423 CPUWatchpoint *wp;
06d55cc1 1424 int cpu_flags;
0f459d16 1425
06d55cc1
AL
1426 if (env->watchpoint_hit) {
1427 /* We re-entered the check after replacing the TB. Now raise
1428 * the debug interrupt so that is will trigger after the
1429 * current instruction. */
1430 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1431 return;
1432 }
2e70f6ef 1433 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1434 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1435 if ((vaddr == (wp->vaddr & len_mask) ||
1436 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1437 wp->flags |= BP_WATCHPOINT_HIT;
1438 if (!env->watchpoint_hit) {
1439 env->watchpoint_hit = wp;
5a316526 1440 tb_check_watchpoint(env);
6e140f28
AL
1441 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1442 env->exception_index = EXCP_DEBUG;
488d6577 1443 cpu_loop_exit(env);
6e140f28
AL
1444 } else {
1445 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1446 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1447 cpu_resume_from_signal(env, NULL);
6e140f28 1448 }
06d55cc1 1449 }
6e140f28
AL
1450 } else {
1451 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1452 }
1453 }
1454}
1455
6658ffb8
PB
1456/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1457 so these check for a hit then pass through to the normal out-of-line
1458 phys routines. */
a8170e5e 1459static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1460 unsigned size)
6658ffb8 1461{
1ec9b909
AK
1462 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1463 switch (size) {
1464 case 1: return ldub_phys(addr);
1465 case 2: return lduw_phys(addr);
1466 case 4: return ldl_phys(addr);
1467 default: abort();
1468 }
6658ffb8
PB
1469}
1470
a8170e5e 1471static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1472 uint64_t val, unsigned size)
6658ffb8 1473{
1ec9b909
AK
1474 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1475 switch (size) {
67364150
MF
1476 case 1:
1477 stb_phys(addr, val);
1478 break;
1479 case 2:
1480 stw_phys(addr, val);
1481 break;
1482 case 4:
1483 stl_phys(addr, val);
1484 break;
1ec9b909
AK
1485 default: abort();
1486 }
6658ffb8
PB
1487}
1488
1ec9b909
AK
1489static const MemoryRegionOps watch_mem_ops = {
1490 .read = watch_mem_read,
1491 .write = watch_mem_write,
1492 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1493};
6658ffb8 1494
a8170e5e 1495static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1496 unsigned len)
db7b5426 1497{
70c68e44 1498 subpage_t *mmio = opaque;
f6405247 1499 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1500 MemoryRegionSection *section;
db7b5426
BS
1501#if defined(DEBUG_SUBPAGE)
1502 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1503 mmio, len, addr, idx);
1504#endif
db7b5426 1505
5312bd8b
AK
1506 section = &phys_sections[mmio->sub_section[idx]];
1507 addr += mmio->base;
1508 addr -= section->offset_within_address_space;
1509 addr += section->offset_within_region;
37ec01d4 1510 return io_mem_read(section->mr, addr, len);
db7b5426
BS
1511}
1512
a8170e5e 1513static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1514 uint64_t value, unsigned len)
db7b5426 1515{
70c68e44 1516 subpage_t *mmio = opaque;
f6405247 1517 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1518 MemoryRegionSection *section;
db7b5426 1519#if defined(DEBUG_SUBPAGE)
70c68e44
AK
1520 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1521 " idx %d value %"PRIx64"\n",
f6405247 1522 __func__, mmio, len, addr, idx, value);
db7b5426 1523#endif
f6405247 1524
5312bd8b
AK
1525 section = &phys_sections[mmio->sub_section[idx]];
1526 addr += mmio->base;
1527 addr -= section->offset_within_address_space;
1528 addr += section->offset_within_region;
37ec01d4 1529 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
1530}
1531
70c68e44
AK
1532static const MemoryRegionOps subpage_ops = {
1533 .read = subpage_read,
1534 .write = subpage_write,
1535 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1536};
1537
a8170e5e 1538static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
de712f94 1539 unsigned size)
56384e8b
AF
1540{
1541 ram_addr_t raddr = addr;
1542 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1543 switch (size) {
1544 case 1: return ldub_p(ptr);
1545 case 2: return lduw_p(ptr);
1546 case 4: return ldl_p(ptr);
1547 default: abort();
1548 }
56384e8b
AF
1549}
1550
a8170e5e 1551static void subpage_ram_write(void *opaque, hwaddr addr,
de712f94 1552 uint64_t value, unsigned size)
56384e8b
AF
1553{
1554 ram_addr_t raddr = addr;
1555 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1556 switch (size) {
1557 case 1: return stb_p(ptr, value);
1558 case 2: return stw_p(ptr, value);
1559 case 4: return stl_p(ptr, value);
1560 default: abort();
1561 }
56384e8b
AF
1562}
1563
de712f94
AK
1564static const MemoryRegionOps subpage_ram_ops = {
1565 .read = subpage_ram_read,
1566 .write = subpage_ram_write,
1567 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
1568};
1569
c227f099 1570static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1571 uint16_t section)
db7b5426
BS
1572{
1573 int idx, eidx;
1574
1575 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1576 return -1;
1577 idx = SUBPAGE_IDX(start);
1578 eidx = SUBPAGE_IDX(end);
1579#if defined(DEBUG_SUBPAGE)
0bf9e31a 1580 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1581 mmio, start, end, idx, eidx, memory);
1582#endif
5312bd8b
AK
1583 if (memory_region_is_ram(phys_sections[section].mr)) {
1584 MemoryRegionSection new_section = phys_sections[section];
1585 new_section.mr = &io_mem_subpage_ram;
1586 section = phys_section_add(&new_section);
56384e8b 1587 }
db7b5426 1588 for (; idx <= eidx; idx++) {
5312bd8b 1589 mmio->sub_section[idx] = section;
db7b5426
BS
1590 }
1591
1592 return 0;
1593}
1594
a8170e5e 1595static subpage_t *subpage_init(hwaddr base)
db7b5426 1596{
c227f099 1597 subpage_t *mmio;
db7b5426 1598
7267c094 1599 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
1600
1601 mmio->base = base;
70c68e44
AK
1602 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1603 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1604 mmio->iomem.subpage = true;
db7b5426 1605#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1606 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1607 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1608#endif
0f0cb164 1609 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
1610
1611 return mmio;
1612}
1613
5312bd8b
AK
1614static uint16_t dummy_section(MemoryRegion *mr)
1615{
1616 MemoryRegionSection section = {
1617 .mr = mr,
1618 .offset_within_address_space = 0,
1619 .offset_within_region = 0,
1620 .size = UINT64_MAX,
1621 };
1622
1623 return phys_section_add(&section);
1624}
1625
a8170e5e 1626MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1627{
37ec01d4 1628 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1629}
1630
e9179ce1
AK
1631static void io_mem_init(void)
1632{
0e0df1e2 1633 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
1634 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1635 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1636 "unassigned", UINT64_MAX);
1637 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1638 "notdirty", UINT64_MAX);
de712f94
AK
1639 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1640 "subpage-ram", UINT64_MAX);
1ec9b909
AK
1641 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1642 "watch", UINT64_MAX);
e9179ce1
AK
1643}
1644
ac1970fb
AK
1645static void mem_begin(MemoryListener *listener)
1646{
1647 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1648
1649 destroy_all_mappings(d);
1650 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1651}
1652
50c1e149
AK
1653static void core_begin(MemoryListener *listener)
1654{
5312bd8b
AK
1655 phys_sections_clear();
1656 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
1657 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1658 phys_section_rom = dummy_section(&io_mem_rom);
1659 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
1660}
1661
1d71148e 1662static void tcg_commit(MemoryListener *listener)
50c1e149 1663{
9349b4f9 1664 CPUArchState *env;
117712c3
AK
1665
1666 /* since each CPU stores ram addresses in its TLB cache, we must
1667 reset the modified entries */
1668 /* XXX: slow ! */
1669 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1670 tlb_flush(env, 1);
1671 }
50c1e149
AK
1672}
1673
93632747
AK
1674static void core_log_global_start(MemoryListener *listener)
1675{
1676 cpu_physical_memory_set_dirty_tracking(1);
1677}
1678
1679static void core_log_global_stop(MemoryListener *listener)
1680{
1681 cpu_physical_memory_set_dirty_tracking(0);
1682}
1683
4855d41a
AK
1684static void io_region_add(MemoryListener *listener,
1685 MemoryRegionSection *section)
1686{
a2d33521
AK
1687 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1688
1689 mrio->mr = section->mr;
1690 mrio->offset = section->offset_within_region;
1691 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 1692 section->offset_within_address_space, section->size);
a2d33521 1693 ioport_register(&mrio->iorange);
4855d41a
AK
1694}
1695
1696static void io_region_del(MemoryListener *listener,
1697 MemoryRegionSection *section)
1698{
1699 isa_unassign_ioport(section->offset_within_address_space, section->size);
1700}
1701
93632747 1702static MemoryListener core_memory_listener = {
50c1e149 1703 .begin = core_begin,
93632747
AK
1704 .log_global_start = core_log_global_start,
1705 .log_global_stop = core_log_global_stop,
ac1970fb 1706 .priority = 1,
93632747
AK
1707};
1708
4855d41a
AK
1709static MemoryListener io_memory_listener = {
1710 .region_add = io_region_add,
1711 .region_del = io_region_del,
4855d41a
AK
1712 .priority = 0,
1713};
1714
1d71148e
AK
1715static MemoryListener tcg_memory_listener = {
1716 .commit = tcg_commit,
1717};
1718
ac1970fb
AK
1719void address_space_init_dispatch(AddressSpace *as)
1720{
1721 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1722
1723 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1724 d->listener = (MemoryListener) {
1725 .begin = mem_begin,
1726 .region_add = mem_add,
1727 .region_nop = mem_add,
1728 .priority = 0,
1729 };
1730 as->dispatch = d;
1731 memory_listener_register(&d->listener, as);
1732}
1733
83f3c251
AK
1734void address_space_destroy_dispatch(AddressSpace *as)
1735{
1736 AddressSpaceDispatch *d = as->dispatch;
1737
1738 memory_listener_unregister(&d->listener);
1739 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1740 g_free(d);
1741 as->dispatch = NULL;
1742}
1743
62152b8a
AK
1744static void memory_map_init(void)
1745{
7267c094 1746 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 1747 memory_region_init(system_memory, "system", INT64_MAX);
2673a5da
AK
1748 address_space_init(&address_space_memory, system_memory);
1749 address_space_memory.name = "memory";
309cb471 1750
7267c094 1751 system_io = g_malloc(sizeof(*system_io));
309cb471 1752 memory_region_init(system_io, "io", 65536);
2673a5da
AK
1753 address_space_init(&address_space_io, system_io);
1754 address_space_io.name = "I/O";
93632747 1755
f6790af6
AK
1756 memory_listener_register(&core_memory_listener, &address_space_memory);
1757 memory_listener_register(&io_memory_listener, &address_space_io);
1758 memory_listener_register(&tcg_memory_listener, &address_space_memory);
9e11908f
PM
1759
1760 dma_context_init(&dma_context_memory, &address_space_memory,
1761 NULL, NULL, NULL);
62152b8a
AK
1762}
1763
1764MemoryRegion *get_system_memory(void)
1765{
1766 return system_memory;
1767}
1768
309cb471
AK
1769MemoryRegion *get_system_io(void)
1770{
1771 return system_io;
1772}
1773
e2eef170
PB
1774#endif /* !defined(CONFIG_USER_ONLY) */
1775
13eb76e0
FB
1776/* physical memory access (slow version, mainly for debug) */
1777#if defined(CONFIG_USER_ONLY)
9349b4f9 1778int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1779 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1780{
1781 int l, flags;
1782 target_ulong page;
53a5960a 1783 void * p;
13eb76e0
FB
1784
1785 while (len > 0) {
1786 page = addr & TARGET_PAGE_MASK;
1787 l = (page + TARGET_PAGE_SIZE) - addr;
1788 if (l > len)
1789 l = len;
1790 flags = page_get_flags(page);
1791 if (!(flags & PAGE_VALID))
a68fe89c 1792 return -1;
13eb76e0
FB
1793 if (is_write) {
1794 if (!(flags & PAGE_WRITE))
a68fe89c 1795 return -1;
579a97f7 1796 /* XXX: this code should not depend on lock_user */
72fb7daa 1797 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1798 return -1;
72fb7daa
AJ
1799 memcpy(p, buf, l);
1800 unlock_user(p, addr, l);
13eb76e0
FB
1801 } else {
1802 if (!(flags & PAGE_READ))
a68fe89c 1803 return -1;
579a97f7 1804 /* XXX: this code should not depend on lock_user */
72fb7daa 1805 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1806 return -1;
72fb7daa 1807 memcpy(buf, p, l);
5b257578 1808 unlock_user(p, addr, 0);
13eb76e0
FB
1809 }
1810 len -= l;
1811 buf += l;
1812 addr += l;
1813 }
a68fe89c 1814 return 0;
13eb76e0 1815}
8df1cd07 1816
13eb76e0 1817#else
51d7a9eb 1818
a8170e5e
AK
1819static void invalidate_and_set_dirty(hwaddr addr,
1820 hwaddr length)
51d7a9eb
AP
1821{
1822 if (!cpu_physical_memory_is_dirty(addr)) {
1823 /* invalidate code */
1824 tb_invalidate_phys_page_range(addr, addr + length, 0);
1825 /* set dirty bit */
1826 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1827 }
e226939d 1828 xen_modified_memory(addr, length);
51d7a9eb
AP
1829}
1830
a8170e5e 1831void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1832 int len, bool is_write)
13eb76e0 1833{
ac1970fb 1834 AddressSpaceDispatch *d = as->dispatch;
37ec01d4 1835 int l;
13eb76e0
FB
1836 uint8_t *ptr;
1837 uint32_t val;
a8170e5e 1838 hwaddr page;
f3705d53 1839 MemoryRegionSection *section;
3b46e624 1840
13eb76e0
FB
1841 while (len > 0) {
1842 page = addr & TARGET_PAGE_MASK;
1843 l = (page + TARGET_PAGE_SIZE) - addr;
1844 if (l > len)
1845 l = len;
ac1970fb 1846 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1847
13eb76e0 1848 if (is_write) {
f3705d53 1849 if (!memory_region_is_ram(section->mr)) {
a8170e5e 1850 hwaddr addr1;
cc5bea60 1851 addr1 = memory_region_section_addr(section, addr);
6a00d601
FB
1852 /* XXX: could force cpu_single_env to NULL to avoid
1853 potential bugs */
6c2934db 1854 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 1855 /* 32 bit write access */
c27004ec 1856 val = ldl_p(buf);
37ec01d4 1857 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 1858 l = 4;
6c2934db 1859 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 1860 /* 16 bit write access */
c27004ec 1861 val = lduw_p(buf);
37ec01d4 1862 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
1863 l = 2;
1864 } else {
1c213d19 1865 /* 8 bit write access */
c27004ec 1866 val = ldub_p(buf);
37ec01d4 1867 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
1868 l = 1;
1869 }
f3705d53 1870 } else if (!section->readonly) {
8ca5692d 1871 ram_addr_t addr1;
f3705d53 1872 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 1873 + memory_region_section_addr(section, addr);
13eb76e0 1874 /* RAM case */
5579c7f3 1875 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1876 memcpy(ptr, buf, l);
51d7a9eb 1877 invalidate_and_set_dirty(addr1, l);
050a0ddf 1878 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1879 }
1880 } else {
cc5bea60
BS
1881 if (!(memory_region_is_ram(section->mr) ||
1882 memory_region_is_romd(section->mr))) {
a8170e5e 1883 hwaddr addr1;
13eb76e0 1884 /* I/O case */
cc5bea60 1885 addr1 = memory_region_section_addr(section, addr);
6c2934db 1886 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 1887 /* 32 bit read access */
37ec01d4 1888 val = io_mem_read(section->mr, addr1, 4);
c27004ec 1889 stl_p(buf, val);
13eb76e0 1890 l = 4;
6c2934db 1891 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 1892 /* 16 bit read access */
37ec01d4 1893 val = io_mem_read(section->mr, addr1, 2);
c27004ec 1894 stw_p(buf, val);
13eb76e0
FB
1895 l = 2;
1896 } else {
1c213d19 1897 /* 8 bit read access */
37ec01d4 1898 val = io_mem_read(section->mr, addr1, 1);
c27004ec 1899 stb_p(buf, val);
13eb76e0
FB
1900 l = 1;
1901 }
1902 } else {
1903 /* RAM case */
0a1b357f 1904 ptr = qemu_get_ram_ptr(section->mr->ram_addr
cc5bea60
BS
1905 + memory_region_section_addr(section,
1906 addr));
f3705d53 1907 memcpy(buf, ptr, l);
050a0ddf 1908 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1909 }
1910 }
1911 len -= l;
1912 buf += l;
1913 addr += l;
1914 }
1915}
8df1cd07 1916
a8170e5e 1917void address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1918 const uint8_t *buf, int len)
1919{
1920 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1921}
1922
1923/**
1924 * address_space_read: read from an address space.
1925 *
1926 * @as: #AddressSpace to be accessed
1927 * @addr: address within that address space
1928 * @buf: buffer with the data transferred
1929 */
a8170e5e 1930void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb
AK
1931{
1932 address_space_rw(as, addr, buf, len, false);
1933}
1934
1935
a8170e5e 1936void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1937 int len, int is_write)
1938{
1939 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1940}
1941
d0ecd2aa 1942/* used for ROM loading : can write in RAM and ROM */
a8170e5e 1943void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
1944 const uint8_t *buf, int len)
1945{
ac1970fb 1946 AddressSpaceDispatch *d = address_space_memory.dispatch;
d0ecd2aa
FB
1947 int l;
1948 uint8_t *ptr;
a8170e5e 1949 hwaddr page;
f3705d53 1950 MemoryRegionSection *section;
3b46e624 1951
d0ecd2aa
FB
1952 while (len > 0) {
1953 page = addr & TARGET_PAGE_MASK;
1954 l = (page + TARGET_PAGE_SIZE) - addr;
1955 if (l > len)
1956 l = len;
ac1970fb 1957 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1958
cc5bea60
BS
1959 if (!(memory_region_is_ram(section->mr) ||
1960 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
1961 /* do nothing */
1962 } else {
1963 unsigned long addr1;
f3705d53 1964 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 1965 + memory_region_section_addr(section, addr);
d0ecd2aa 1966 /* ROM/RAM case */
5579c7f3 1967 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 1968 memcpy(ptr, buf, l);
51d7a9eb 1969 invalidate_and_set_dirty(addr1, l);
050a0ddf 1970 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
1971 }
1972 len -= l;
1973 buf += l;
1974 addr += l;
1975 }
1976}
1977
6d16c2f8
AL
1978typedef struct {
1979 void *buffer;
a8170e5e
AK
1980 hwaddr addr;
1981 hwaddr len;
6d16c2f8
AL
1982} BounceBuffer;
1983
1984static BounceBuffer bounce;
1985
ba223c29
AL
1986typedef struct MapClient {
1987 void *opaque;
1988 void (*callback)(void *opaque);
72cf2d4f 1989 QLIST_ENTRY(MapClient) link;
ba223c29
AL
1990} MapClient;
1991
72cf2d4f
BS
1992static QLIST_HEAD(map_client_list, MapClient) map_client_list
1993 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
1994
1995void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
1996{
7267c094 1997 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
1998
1999 client->opaque = opaque;
2000 client->callback = callback;
72cf2d4f 2001 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2002 return client;
2003}
2004
8b9c99d9 2005static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2006{
2007 MapClient *client = (MapClient *)_client;
2008
72cf2d4f 2009 QLIST_REMOVE(client, link);
7267c094 2010 g_free(client);
ba223c29
AL
2011}
2012
2013static void cpu_notify_map_clients(void)
2014{
2015 MapClient *client;
2016
72cf2d4f
BS
2017 while (!QLIST_EMPTY(&map_client_list)) {
2018 client = QLIST_FIRST(&map_client_list);
ba223c29 2019 client->callback(client->opaque);
34d5e948 2020 cpu_unregister_map_client(client);
ba223c29
AL
2021 }
2022}
2023
6d16c2f8
AL
2024/* Map a physical memory region into a host virtual address.
2025 * May map a subset of the requested range, given by and returned in *plen.
2026 * May return NULL if resources needed to perform the mapping are exhausted.
2027 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2028 * Use cpu_register_map_client() to know when retrying the map operation is
2029 * likely to succeed.
6d16c2f8 2030 */
ac1970fb 2031void *address_space_map(AddressSpace *as,
a8170e5e
AK
2032 hwaddr addr,
2033 hwaddr *plen,
ac1970fb 2034 bool is_write)
6d16c2f8 2035{
ac1970fb 2036 AddressSpaceDispatch *d = as->dispatch;
a8170e5e
AK
2037 hwaddr len = *plen;
2038 hwaddr todo = 0;
6d16c2f8 2039 int l;
a8170e5e 2040 hwaddr page;
f3705d53 2041 MemoryRegionSection *section;
f15fbc4b 2042 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
2043 ram_addr_t rlen;
2044 void *ret;
6d16c2f8
AL
2045
2046 while (len > 0) {
2047 page = addr & TARGET_PAGE_MASK;
2048 l = (page + TARGET_PAGE_SIZE) - addr;
2049 if (l > len)
2050 l = len;
ac1970fb 2051 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
6d16c2f8 2052
f3705d53 2053 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 2054 if (todo || bounce.buffer) {
6d16c2f8
AL
2055 break;
2056 }
2057 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2058 bounce.addr = addr;
2059 bounce.len = l;
2060 if (!is_write) {
ac1970fb 2061 address_space_read(as, addr, bounce.buffer, l);
6d16c2f8 2062 }
38bee5dc
SS
2063
2064 *plen = l;
2065 return bounce.buffer;
6d16c2f8 2066 }
8ab934f9 2067 if (!todo) {
f3705d53 2068 raddr = memory_region_get_ram_addr(section->mr)
cc5bea60 2069 + memory_region_section_addr(section, addr);
8ab934f9 2070 }
6d16c2f8
AL
2071
2072 len -= l;
2073 addr += l;
38bee5dc 2074 todo += l;
6d16c2f8 2075 }
8ab934f9
SS
2076 rlen = todo;
2077 ret = qemu_ram_ptr_length(raddr, &rlen);
2078 *plen = rlen;
2079 return ret;
6d16c2f8
AL
2080}
2081
ac1970fb 2082/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2083 * Will also mark the memory as dirty if is_write == 1. access_len gives
2084 * the amount of memory that was actually read or written by the caller.
2085 */
a8170e5e
AK
2086void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2087 int is_write, hwaddr access_len)
6d16c2f8
AL
2088{
2089 if (buffer != bounce.buffer) {
2090 if (is_write) {
e890261f 2091 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
2092 while (access_len) {
2093 unsigned l;
2094 l = TARGET_PAGE_SIZE;
2095 if (l > access_len)
2096 l = access_len;
51d7a9eb 2097 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2098 addr1 += l;
2099 access_len -= l;
2100 }
2101 }
868bb33f 2102 if (xen_enabled()) {
e41d7c69 2103 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2104 }
6d16c2f8
AL
2105 return;
2106 }
2107 if (is_write) {
ac1970fb 2108 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2109 }
f8a83245 2110 qemu_vfree(bounce.buffer);
6d16c2f8 2111 bounce.buffer = NULL;
ba223c29 2112 cpu_notify_map_clients();
6d16c2f8 2113}
d0ecd2aa 2114
a8170e5e
AK
2115void *cpu_physical_memory_map(hwaddr addr,
2116 hwaddr *plen,
ac1970fb
AK
2117 int is_write)
2118{
2119 return address_space_map(&address_space_memory, addr, plen, is_write);
2120}
2121
a8170e5e
AK
2122void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2123 int is_write, hwaddr access_len)
ac1970fb
AK
2124{
2125 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2126}
2127
8df1cd07 2128/* warning: addr must be aligned */
a8170e5e 2129static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2130 enum device_endian endian)
8df1cd07 2131{
8df1cd07
FB
2132 uint8_t *ptr;
2133 uint32_t val;
f3705d53 2134 MemoryRegionSection *section;
8df1cd07 2135
ac1970fb 2136 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2137
cc5bea60
BS
2138 if (!(memory_region_is_ram(section->mr) ||
2139 memory_region_is_romd(section->mr))) {
8df1cd07 2140 /* I/O case */
cc5bea60 2141 addr = memory_region_section_addr(section, addr);
37ec01d4 2142 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
2143#if defined(TARGET_WORDS_BIGENDIAN)
2144 if (endian == DEVICE_LITTLE_ENDIAN) {
2145 val = bswap32(val);
2146 }
2147#else
2148 if (endian == DEVICE_BIG_ENDIAN) {
2149 val = bswap32(val);
2150 }
2151#endif
8df1cd07
FB
2152 } else {
2153 /* RAM case */
f3705d53 2154 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2155 & TARGET_PAGE_MASK)
cc5bea60 2156 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2157 switch (endian) {
2158 case DEVICE_LITTLE_ENDIAN:
2159 val = ldl_le_p(ptr);
2160 break;
2161 case DEVICE_BIG_ENDIAN:
2162 val = ldl_be_p(ptr);
2163 break;
2164 default:
2165 val = ldl_p(ptr);
2166 break;
2167 }
8df1cd07
FB
2168 }
2169 return val;
2170}
2171
a8170e5e 2172uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2173{
2174 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2175}
2176
a8170e5e 2177uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2178{
2179 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2180}
2181
a8170e5e 2182uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2183{
2184 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2185}
2186
84b7b8e7 2187/* warning: addr must be aligned */
a8170e5e 2188static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2189 enum device_endian endian)
84b7b8e7 2190{
84b7b8e7
FB
2191 uint8_t *ptr;
2192 uint64_t val;
f3705d53 2193 MemoryRegionSection *section;
84b7b8e7 2194
ac1970fb 2195 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2196
cc5bea60
BS
2197 if (!(memory_region_is_ram(section->mr) ||
2198 memory_region_is_romd(section->mr))) {
84b7b8e7 2199 /* I/O case */
cc5bea60 2200 addr = memory_region_section_addr(section, addr);
1e78bcc1
AG
2201
2202 /* XXX This is broken when device endian != cpu endian.
2203 Fix and add "endian" variable check */
84b7b8e7 2204#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2205 val = io_mem_read(section->mr, addr, 4) << 32;
2206 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 2207#else
37ec01d4
AK
2208 val = io_mem_read(section->mr, addr, 4);
2209 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
2210#endif
2211 } else {
2212 /* RAM case */
f3705d53 2213 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2214 & TARGET_PAGE_MASK)
cc5bea60 2215 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2216 switch (endian) {
2217 case DEVICE_LITTLE_ENDIAN:
2218 val = ldq_le_p(ptr);
2219 break;
2220 case DEVICE_BIG_ENDIAN:
2221 val = ldq_be_p(ptr);
2222 break;
2223 default:
2224 val = ldq_p(ptr);
2225 break;
2226 }
84b7b8e7
FB
2227 }
2228 return val;
2229}
2230
a8170e5e 2231uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2232{
2233 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2234}
2235
a8170e5e 2236uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2237{
2238 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2239}
2240
a8170e5e 2241uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2242{
2243 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2244}
2245
aab33094 2246/* XXX: optimize */
a8170e5e 2247uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2248{
2249 uint8_t val;
2250 cpu_physical_memory_read(addr, &val, 1);
2251 return val;
2252}
2253
733f0b02 2254/* warning: addr must be aligned */
a8170e5e 2255static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2256 enum device_endian endian)
aab33094 2257{
733f0b02
MT
2258 uint8_t *ptr;
2259 uint64_t val;
f3705d53 2260 MemoryRegionSection *section;
733f0b02 2261
ac1970fb 2262 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2263
cc5bea60
BS
2264 if (!(memory_region_is_ram(section->mr) ||
2265 memory_region_is_romd(section->mr))) {
733f0b02 2266 /* I/O case */
cc5bea60 2267 addr = memory_region_section_addr(section, addr);
37ec01d4 2268 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
2269#if defined(TARGET_WORDS_BIGENDIAN)
2270 if (endian == DEVICE_LITTLE_ENDIAN) {
2271 val = bswap16(val);
2272 }
2273#else
2274 if (endian == DEVICE_BIG_ENDIAN) {
2275 val = bswap16(val);
2276 }
2277#endif
733f0b02
MT
2278 } else {
2279 /* RAM case */
f3705d53 2280 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2281 & TARGET_PAGE_MASK)
cc5bea60 2282 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2283 switch (endian) {
2284 case DEVICE_LITTLE_ENDIAN:
2285 val = lduw_le_p(ptr);
2286 break;
2287 case DEVICE_BIG_ENDIAN:
2288 val = lduw_be_p(ptr);
2289 break;
2290 default:
2291 val = lduw_p(ptr);
2292 break;
2293 }
733f0b02
MT
2294 }
2295 return val;
aab33094
FB
2296}
2297
a8170e5e 2298uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2299{
2300 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2301}
2302
a8170e5e 2303uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2304{
2305 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2306}
2307
a8170e5e 2308uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2309{
2310 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2311}
2312
8df1cd07
FB
2313/* warning: addr must be aligned. The ram page is not masked as dirty
2314 and the code inside is not invalidated. It is useful if the dirty
2315 bits are used to track modified PTEs */
a8170e5e 2316void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2317{
8df1cd07 2318 uint8_t *ptr;
f3705d53 2319 MemoryRegionSection *section;
8df1cd07 2320
ac1970fb 2321 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2322
f3705d53 2323 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2324 addr = memory_region_section_addr(section, addr);
f3705d53 2325 if (memory_region_is_ram(section->mr)) {
37ec01d4 2326 section = &phys_sections[phys_section_rom];
06ef3525 2327 }
37ec01d4 2328 io_mem_write(section->mr, addr, val, 4);
8df1cd07 2329 } else {
f3705d53 2330 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 2331 & TARGET_PAGE_MASK)
cc5bea60 2332 + memory_region_section_addr(section, addr);
5579c7f3 2333 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2334 stl_p(ptr, val);
74576198
AL
2335
2336 if (unlikely(in_migration)) {
2337 if (!cpu_physical_memory_is_dirty(addr1)) {
2338 /* invalidate code */
2339 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2340 /* set dirty bit */
f7c11b53
YT
2341 cpu_physical_memory_set_dirty_flags(
2342 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2343 }
2344 }
8df1cd07
FB
2345 }
2346}
2347
a8170e5e 2348void stq_phys_notdirty(hwaddr addr, uint64_t val)
bc98a7ef 2349{
bc98a7ef 2350 uint8_t *ptr;
f3705d53 2351 MemoryRegionSection *section;
bc98a7ef 2352
ac1970fb 2353 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2354
f3705d53 2355 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2356 addr = memory_region_section_addr(section, addr);
f3705d53 2357 if (memory_region_is_ram(section->mr)) {
37ec01d4 2358 section = &phys_sections[phys_section_rom];
06ef3525 2359 }
bc98a7ef 2360#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2361 io_mem_write(section->mr, addr, val >> 32, 4);
2362 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 2363#else
37ec01d4
AK
2364 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2365 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
2366#endif
2367 } else {
f3705d53 2368 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2369 & TARGET_PAGE_MASK)
cc5bea60 2370 + memory_region_section_addr(section, addr));
bc98a7ef
JM
2371 stq_p(ptr, val);
2372 }
2373}
2374
8df1cd07 2375/* warning: addr must be aligned */
a8170e5e 2376static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2377 enum device_endian endian)
8df1cd07 2378{
8df1cd07 2379 uint8_t *ptr;
f3705d53 2380 MemoryRegionSection *section;
8df1cd07 2381
ac1970fb 2382 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2383
f3705d53 2384 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2385 addr = memory_region_section_addr(section, addr);
f3705d53 2386 if (memory_region_is_ram(section->mr)) {
37ec01d4 2387 section = &phys_sections[phys_section_rom];
06ef3525 2388 }
1e78bcc1
AG
2389#if defined(TARGET_WORDS_BIGENDIAN)
2390 if (endian == DEVICE_LITTLE_ENDIAN) {
2391 val = bswap32(val);
2392 }
2393#else
2394 if (endian == DEVICE_BIG_ENDIAN) {
2395 val = bswap32(val);
2396 }
2397#endif
37ec01d4 2398 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
2399 } else {
2400 unsigned long addr1;
f3705d53 2401 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2402 + memory_region_section_addr(section, addr);
8df1cd07 2403 /* RAM case */
5579c7f3 2404 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2405 switch (endian) {
2406 case DEVICE_LITTLE_ENDIAN:
2407 stl_le_p(ptr, val);
2408 break;
2409 case DEVICE_BIG_ENDIAN:
2410 stl_be_p(ptr, val);
2411 break;
2412 default:
2413 stl_p(ptr, val);
2414 break;
2415 }
51d7a9eb 2416 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2417 }
2418}
2419
a8170e5e 2420void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2421{
2422 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2423}
2424
a8170e5e 2425void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2426{
2427 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2428}
2429
a8170e5e 2430void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2431{
2432 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2433}
2434
aab33094 2435/* XXX: optimize */
a8170e5e 2436void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2437{
2438 uint8_t v = val;
2439 cpu_physical_memory_write(addr, &v, 1);
2440}
2441
733f0b02 2442/* warning: addr must be aligned */
a8170e5e 2443static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2444 enum device_endian endian)
aab33094 2445{
733f0b02 2446 uint8_t *ptr;
f3705d53 2447 MemoryRegionSection *section;
733f0b02 2448
ac1970fb 2449 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2450
f3705d53 2451 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2452 addr = memory_region_section_addr(section, addr);
f3705d53 2453 if (memory_region_is_ram(section->mr)) {
37ec01d4 2454 section = &phys_sections[phys_section_rom];
06ef3525 2455 }
1e78bcc1
AG
2456#if defined(TARGET_WORDS_BIGENDIAN)
2457 if (endian == DEVICE_LITTLE_ENDIAN) {
2458 val = bswap16(val);
2459 }
2460#else
2461 if (endian == DEVICE_BIG_ENDIAN) {
2462 val = bswap16(val);
2463 }
2464#endif
37ec01d4 2465 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
2466 } else {
2467 unsigned long addr1;
f3705d53 2468 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2469 + memory_region_section_addr(section, addr);
733f0b02
MT
2470 /* RAM case */
2471 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2472 switch (endian) {
2473 case DEVICE_LITTLE_ENDIAN:
2474 stw_le_p(ptr, val);
2475 break;
2476 case DEVICE_BIG_ENDIAN:
2477 stw_be_p(ptr, val);
2478 break;
2479 default:
2480 stw_p(ptr, val);
2481 break;
2482 }
51d7a9eb 2483 invalidate_and_set_dirty(addr1, 2);
733f0b02 2484 }
aab33094
FB
2485}
2486
a8170e5e 2487void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2488{
2489 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2490}
2491
a8170e5e 2492void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2493{
2494 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2495}
2496
a8170e5e 2497void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2498{
2499 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2500}
2501
aab33094 2502/* XXX: optimize */
a8170e5e 2503void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2504{
2505 val = tswap64(val);
71d2b725 2506 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2507}
2508
a8170e5e 2509void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2510{
2511 val = cpu_to_le64(val);
2512 cpu_physical_memory_write(addr, &val, 8);
2513}
2514
a8170e5e 2515void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2516{
2517 val = cpu_to_be64(val);
2518 cpu_physical_memory_write(addr, &val, 8);
2519}
2520
5e2972fd 2521/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2522int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2523 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2524{
2525 int l;
a8170e5e 2526 hwaddr phys_addr;
9b3c35e0 2527 target_ulong page;
13eb76e0
FB
2528
2529 while (len > 0) {
2530 page = addr & TARGET_PAGE_MASK;
2531 phys_addr = cpu_get_phys_page_debug(env, page);
2532 /* if no physical page mapped, return an error */
2533 if (phys_addr == -1)
2534 return -1;
2535 l = (page + TARGET_PAGE_SIZE) - addr;
2536 if (l > len)
2537 l = len;
5e2972fd 2538 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2539 if (is_write)
2540 cpu_physical_memory_write_rom(phys_addr, buf, l);
2541 else
5e2972fd 2542 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2543 len -= l;
2544 buf += l;
2545 addr += l;
2546 }
2547 return 0;
2548}
a68fe89c 2549#endif
13eb76e0 2550
b3755a91
PB
2551#if !defined(CONFIG_USER_ONLY)
2552
82afa586
BH
2553/*
2554 * A helper function for the _utterly broken_ virtio device model to find out if
2555 * it's running on a big endian machine. Don't do this at home kids!
2556 */
2557bool virtio_is_big_endian(void);
2558bool virtio_is_big_endian(void)
2559{
2560#if defined(TARGET_WORDS_BIGENDIAN)
2561 return true;
2562#else
2563 return false;
2564#endif
2565}
2566
61382a50 2567#endif
76f35538
WC
2568
2569#ifndef CONFIG_USER_ONLY
a8170e5e 2570bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538
WC
2571{
2572 MemoryRegionSection *section;
2573
ac1970fb
AK
2574 section = phys_page_find(address_space_memory.dispatch,
2575 phys_addr >> TARGET_PAGE_BITS);
76f35538
WC
2576
2577 return !(memory_region_is_ram(section->mr) ||
2578 memory_region_is_romd(section->mr));
2579}
2580#endif