]> git.proxmox.com Git - qemu.git/blame - exec.c
exec: sort the memory from biggest to smallest
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
432d268c 34#include "hw/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
67d3b957 53//#define DEBUG_UNASSIGNED
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
e2eef170 56#if !defined(CONFIG_USER_ONLY)
9fa3e853 57int phys_ram_fd;
74576198 58static int in_migration;
94a6b54f 59
a3161038 60RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
61
62static MemoryRegion *system_memory;
309cb471 63static MemoryRegion *system_io;
62152b8a 64
f6790af6
AK
65AddressSpace address_space_io;
66AddressSpace address_space_memory;
9e11908f 67DMAContext dma_context_memory;
2673a5da 68
0e0df1e2 69MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 70static MemoryRegion io_mem_subpage_ram;
0e0df1e2 71
e2eef170 72#endif
9fa3e853 73
9349b4f9 74CPUArchState *first_cpu;
6a00d601
FB
75/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
9349b4f9 77DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 78/* 0 = Do not count executed instructions.
bf20dc07 79 1 = Precise instruction counting.
2e70f6ef
PB
80 2 = Adaptive rate instruction counting. */
81int use_icount = 0;
6a00d601 82
e2eef170 83#if !defined(CONFIG_USER_ONLY)
4346ae3e 84
5312bd8b
AK
85static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
aa102231
AK
88static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
5312bd8b 91
d6f2ea22
AK
92/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
07f07b31 96#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 97
e2eef170 98static void io_mem_init(void);
62152b8a 99static void memory_map_init(void);
8b9c99d9 100static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 101
1ec9b909 102static MemoryRegion io_mem_watch;
6658ffb8 103#endif
fd6ce8f6 104
6d9a1304 105#if !defined(CONFIG_USER_ONLY)
d6f2ea22 106
f7bf5461 107static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 108{
f7bf5461 109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
d6f2ea22
AK
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
f7bf5461
AK
117}
118
119static uint16_t phys_map_node_alloc(void)
120{
121 unsigned i;
122 uint16_t ret;
123
124 ret = phys_map_nodes_nb++;
125 assert(ret != PHYS_MAP_NODE_NIL);
126 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 127 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 128 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 130 }
f7bf5461 131 return ret;
d6f2ea22
AK
132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
92e873b9 139
a8170e5e
AK
140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
2999097b 142 int level)
f7bf5461
AK
143{
144 PhysPageEntry *p;
145 int i;
a8170e5e 146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 147
07f07b31 148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
07f07b31 153 p[i].is_leaf = 1;
c19e8800 154 p[i].ptr = phys_section_unassigned;
4346ae3e 155 }
67c4d23c 156 }
f7bf5461 157 } else {
c19e8800 158 p = phys_map_nodes[lp->ptr];
92e873b9 159 }
2999097b 160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 161
2999097b 162 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
c19e8800 165 lp->ptr = leaf;
07f07b31
AK
166 *index += step;
167 *nb -= step;
2999097b
AK
168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
f7bf5461
AK
172 }
173}
174
ac1970fb 175static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 176 hwaddr index, hwaddr nb,
2999097b 177 uint16_t leaf)
f7bf5461 178{
2999097b 179 /* Wildly overreserve - it doesn't matter much. */
07f07b31 180 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 181
ac1970fb 182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
183}
184
a8170e5e 185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
92e873b9 186{
ac1970fb 187 PhysPageEntry lp = d->phys_map;
31ab2b4a
AK
188 PhysPageEntry *p;
189 int i;
31ab2b4a 190 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 191
07f07b31 192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
194 goto not_found;
195 }
c19e8800 196 p = phys_map_nodes[lp.ptr];
31ab2b4a 197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 198 }
31ab2b4a 199
c19e8800 200 s_index = lp.ptr;
31ab2b4a 201not_found:
f3705d53
AK
202 return &phys_sections[s_index];
203}
204
e5548617
BS
205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 209 && mr != &io_mem_watch;
fd6ce8f6 210}
5b6dd868 211#endif
fd6ce8f6 212
5b6dd868 213void cpu_exec_init_all(void)
fdbb84d1 214{
5b6dd868
BS
215#if !defined(CONFIG_USER_ONLY)
216 memory_map_init();
217 io_mem_init();
fdbb84d1 218#endif
5b6dd868 219}
fdbb84d1 220
5b6dd868
BS
221#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
222
223static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 224{
5b6dd868 225 CPUArchState *env = opaque;
a513fe19 226
5b6dd868
BS
227 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
228 version_id is increased. */
229 env->interrupt_request &= ~0x01;
230 tlb_flush(env, 1);
231
232 return 0;
a513fe19 233}
7501267e 234
5b6dd868
BS
235static const VMStateDescription vmstate_cpu_common = {
236 .name = "cpu_common",
237 .version_id = 1,
238 .minimum_version_id = 1,
239 .minimum_version_id_old = 1,
240 .post_load = cpu_common_post_load,
241 .fields = (VMStateField []) {
242 VMSTATE_UINT32(halted, CPUArchState),
243 VMSTATE_UINT32(interrupt_request, CPUArchState),
244 VMSTATE_END_OF_LIST()
245 }
246};
247#endif
ea041c0e 248
5b6dd868 249CPUArchState *qemu_get_cpu(int cpu)
ea041c0e 250{
5b6dd868 251 CPUArchState *env = first_cpu;
ea041c0e 252
5b6dd868
BS
253 while (env) {
254 if (env->cpu_index == cpu)
255 break;
256 env = env->next_cpu;
ea041c0e 257 }
5b6dd868
BS
258
259 return env;
ea041c0e
FB
260}
261
5b6dd868 262void cpu_exec_init(CPUArchState *env)
ea041c0e 263{
5b6dd868
BS
264#ifndef CONFIG_USER_ONLY
265 CPUState *cpu = ENV_GET_CPU(env);
266#endif
267 CPUArchState **penv;
268 int cpu_index;
269
270#if defined(CONFIG_USER_ONLY)
271 cpu_list_lock();
272#endif
273 env->next_cpu = NULL;
274 penv = &first_cpu;
275 cpu_index = 0;
276 while (*penv != NULL) {
277 penv = &(*penv)->next_cpu;
278 cpu_index++;
279 }
280 env->cpu_index = cpu_index;
281 env->numa_node = 0;
282 QTAILQ_INIT(&env->breakpoints);
283 QTAILQ_INIT(&env->watchpoints);
284#ifndef CONFIG_USER_ONLY
285 cpu->thread_id = qemu_get_thread_id();
286#endif
287 *penv = env;
288#if defined(CONFIG_USER_ONLY)
289 cpu_list_unlock();
290#endif
291#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
292 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
293 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
294 cpu_save, cpu_load, env);
295#endif
ea041c0e
FB
296}
297
1fddef4b 298#if defined(TARGET_HAS_ICE)
94df27fd 299#if defined(CONFIG_USER_ONLY)
9349b4f9 300static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
301{
302 tb_invalidate_phys_page_range(pc, pc + 1, 0);
303}
304#else
1e7855a5
MF
305static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
306{
9d70c4b7
MF
307 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
308 (pc & ~TARGET_PAGE_MASK));
1e7855a5 309}
c27004ec 310#endif
94df27fd 311#endif /* TARGET_HAS_ICE */
d720b93d 312
c527ee8f 313#if defined(CONFIG_USER_ONLY)
9349b4f9 314void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
315
316{
317}
318
9349b4f9 319int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
320 int flags, CPUWatchpoint **watchpoint)
321{
322 return -ENOSYS;
323}
324#else
6658ffb8 325/* Add a watchpoint. */
9349b4f9 326int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 327 int flags, CPUWatchpoint **watchpoint)
6658ffb8 328{
b4051334 329 target_ulong len_mask = ~(len - 1);
c0ce998e 330 CPUWatchpoint *wp;
6658ffb8 331
b4051334 332 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
333 if ((len & (len - 1)) || (addr & ~len_mask) ||
334 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
335 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
336 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
337 return -EINVAL;
338 }
7267c094 339 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
340
341 wp->vaddr = addr;
b4051334 342 wp->len_mask = len_mask;
a1d1bb31
AL
343 wp->flags = flags;
344
2dc9f411 345 /* keep all GDB-injected watchpoints in front */
c0ce998e 346 if (flags & BP_GDB)
72cf2d4f 347 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 348 else
72cf2d4f 349 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 350
6658ffb8 351 tlb_flush_page(env, addr);
a1d1bb31
AL
352
353 if (watchpoint)
354 *watchpoint = wp;
355 return 0;
6658ffb8
PB
356}
357
a1d1bb31 358/* Remove a specific watchpoint. */
9349b4f9 359int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 360 int flags)
6658ffb8 361{
b4051334 362 target_ulong len_mask = ~(len - 1);
a1d1bb31 363 CPUWatchpoint *wp;
6658ffb8 364
72cf2d4f 365 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 366 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 367 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 368 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
369 return 0;
370 }
371 }
a1d1bb31 372 return -ENOENT;
6658ffb8
PB
373}
374
a1d1bb31 375/* Remove a specific watchpoint by reference. */
9349b4f9 376void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 377{
72cf2d4f 378 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 379
a1d1bb31
AL
380 tlb_flush_page(env, watchpoint->vaddr);
381
7267c094 382 g_free(watchpoint);
a1d1bb31
AL
383}
384
385/* Remove all matching watchpoints. */
9349b4f9 386void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 387{
c0ce998e 388 CPUWatchpoint *wp, *next;
a1d1bb31 389
72cf2d4f 390 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
391 if (wp->flags & mask)
392 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 393 }
7d03f82f 394}
c527ee8f 395#endif
7d03f82f 396
a1d1bb31 397/* Add a breakpoint. */
9349b4f9 398int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 399 CPUBreakpoint **breakpoint)
4c3a88a2 400{
1fddef4b 401#if defined(TARGET_HAS_ICE)
c0ce998e 402 CPUBreakpoint *bp;
3b46e624 403
7267c094 404 bp = g_malloc(sizeof(*bp));
4c3a88a2 405
a1d1bb31
AL
406 bp->pc = pc;
407 bp->flags = flags;
408
2dc9f411 409 /* keep all GDB-injected breakpoints in front */
c0ce998e 410 if (flags & BP_GDB)
72cf2d4f 411 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 412 else
72cf2d4f 413 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 414
d720b93d 415 breakpoint_invalidate(env, pc);
a1d1bb31
AL
416
417 if (breakpoint)
418 *breakpoint = bp;
4c3a88a2
FB
419 return 0;
420#else
a1d1bb31 421 return -ENOSYS;
4c3a88a2
FB
422#endif
423}
424
a1d1bb31 425/* Remove a specific breakpoint. */
9349b4f9 426int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 427{
7d03f82f 428#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
429 CPUBreakpoint *bp;
430
72cf2d4f 431 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
432 if (bp->pc == pc && bp->flags == flags) {
433 cpu_breakpoint_remove_by_ref(env, bp);
434 return 0;
435 }
7d03f82f 436 }
a1d1bb31
AL
437 return -ENOENT;
438#else
439 return -ENOSYS;
7d03f82f
EI
440#endif
441}
442
a1d1bb31 443/* Remove a specific breakpoint by reference. */
9349b4f9 444void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 445{
1fddef4b 446#if defined(TARGET_HAS_ICE)
72cf2d4f 447 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 448
a1d1bb31
AL
449 breakpoint_invalidate(env, breakpoint->pc);
450
7267c094 451 g_free(breakpoint);
a1d1bb31
AL
452#endif
453}
454
455/* Remove all matching breakpoints. */
9349b4f9 456void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
457{
458#if defined(TARGET_HAS_ICE)
c0ce998e 459 CPUBreakpoint *bp, *next;
a1d1bb31 460
72cf2d4f 461 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
462 if (bp->flags & mask)
463 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 464 }
4c3a88a2
FB
465#endif
466}
467
c33a346e
FB
468/* enable or disable single step mode. EXCP_DEBUG is returned by the
469 CPU loop after each instruction */
9349b4f9 470void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 471{
1fddef4b 472#if defined(TARGET_HAS_ICE)
c33a346e
FB
473 if (env->singlestep_enabled != enabled) {
474 env->singlestep_enabled = enabled;
e22a25c9
AL
475 if (kvm_enabled())
476 kvm_update_guest_debug(env, 0);
477 else {
ccbb4d44 478 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
479 /* XXX: only flush what is necessary */
480 tb_flush(env);
481 }
c33a346e
FB
482 }
483#endif
484}
485
9349b4f9 486void cpu_reset_interrupt(CPUArchState *env, int mask)
b54ad049
FB
487{
488 env->interrupt_request &= ~mask;
489}
490
9349b4f9 491void cpu_exit(CPUArchState *env)
3098dba0
AJ
492{
493 env->exit_request = 1;
494 cpu_unlink_tb(env);
495}
496
9349b4f9 497void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
498{
499 va_list ap;
493ae1f0 500 va_list ap2;
7501267e
FB
501
502 va_start(ap, fmt);
493ae1f0 503 va_copy(ap2, ap);
7501267e
FB
504 fprintf(stderr, "qemu: fatal: ");
505 vfprintf(stderr, fmt, ap);
506 fprintf(stderr, "\n");
6fd2a026 507 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
508 if (qemu_log_enabled()) {
509 qemu_log("qemu: fatal: ");
510 qemu_log_vprintf(fmt, ap2);
511 qemu_log("\n");
6fd2a026 512 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 513 qemu_log_flush();
93fcfe39 514 qemu_log_close();
924edcae 515 }
493ae1f0 516 va_end(ap2);
f9373291 517 va_end(ap);
fd052bf6
RV
518#if defined(CONFIG_USER_ONLY)
519 {
520 struct sigaction act;
521 sigfillset(&act.sa_mask);
522 act.sa_handler = SIG_DFL;
523 sigaction(SIGABRT, &act, NULL);
524 }
525#endif
7501267e
FB
526 abort();
527}
528
9349b4f9 529CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 530{
9349b4f9
AF
531 CPUArchState *new_env = cpu_init(env->cpu_model_str);
532 CPUArchState *next_cpu = new_env->next_cpu;
c5be9f08 533 int cpu_index = new_env->cpu_index;
5a38f081
AL
534#if defined(TARGET_HAS_ICE)
535 CPUBreakpoint *bp;
536 CPUWatchpoint *wp;
537#endif
538
9349b4f9 539 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081
AL
540
541 /* Preserve chaining and index. */
c5be9f08
TS
542 new_env->next_cpu = next_cpu;
543 new_env->cpu_index = cpu_index;
5a38f081
AL
544
545 /* Clone all break/watchpoints.
546 Note: Once we support ptrace with hw-debug register access, make sure
547 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
548 QTAILQ_INIT(&env->breakpoints);
549 QTAILQ_INIT(&env->watchpoints);
5a38f081 550#if defined(TARGET_HAS_ICE)
72cf2d4f 551 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
552 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
553 }
72cf2d4f 554 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
555 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
556 wp->flags, NULL);
557 }
558#endif
559
c5be9f08
TS
560 return new_env;
561}
562
0124311e 563#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
564static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
565 uintptr_t length)
566{
567 uintptr_t start1;
568
569 /* we modify the TLB cache so that the dirty bit will be set again
570 when accessing the range */
571 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
572 /* Check that we don't span multiple blocks - this breaks the
573 address comparisons below. */
574 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
575 != (end - 1) - start) {
576 abort();
577 }
578 cpu_tlb_reset_dirty_all(start1, length);
579
580}
581
5579c7f3 582/* Note: start and end must be within the same ram block. */
c227f099 583void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 584 int dirty_flags)
1ccde1cb 585{
d24981d3 586 uintptr_t length;
1ccde1cb
FB
587
588 start &= TARGET_PAGE_MASK;
589 end = TARGET_PAGE_ALIGN(end);
590
591 length = end - start;
592 if (length == 0)
593 return;
f7c11b53 594 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 595
d24981d3
JQ
596 if (tcg_enabled()) {
597 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 598 }
1ccde1cb
FB
599}
600
8b9c99d9 601static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 602{
f6f3fbca 603 int ret = 0;
74576198 604 in_migration = enable;
f6f3fbca 605 return ret;
74576198
AL
606}
607
a8170e5e 608hwaddr memory_region_section_get_iotlb(CPUArchState *env,
e5548617
BS
609 MemoryRegionSection *section,
610 target_ulong vaddr,
a8170e5e 611 hwaddr paddr,
e5548617
BS
612 int prot,
613 target_ulong *address)
614{
a8170e5e 615 hwaddr iotlb;
e5548617
BS
616 CPUWatchpoint *wp;
617
cc5bea60 618 if (memory_region_is_ram(section->mr)) {
e5548617
BS
619 /* Normal RAM. */
620 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 621 + memory_region_section_addr(section, paddr);
e5548617
BS
622 if (!section->readonly) {
623 iotlb |= phys_section_notdirty;
624 } else {
625 iotlb |= phys_section_rom;
626 }
627 } else {
628 /* IO handlers are currently passed a physical address.
629 It would be nice to pass an offset from the base address
630 of that region. This would avoid having to special case RAM,
631 and avoid full address decoding in every device.
632 We can't use the high bits of pd for this because
633 IO_MEM_ROMD uses these as a ram address. */
634 iotlb = section - phys_sections;
cc5bea60 635 iotlb += memory_region_section_addr(section, paddr);
e5548617
BS
636 }
637
638 /* Make accesses to pages with watchpoints go via the
639 watchpoint trap routines. */
640 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
641 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
642 /* Avoid trapping reads of pages with a write breakpoint. */
643 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
644 iotlb = phys_section_watch + paddr;
645 *address |= TLB_MMIO;
646 break;
647 }
648 }
649 }
650
651 return iotlb;
652}
9fa3e853
FB
653#endif /* defined(CONFIG_USER_ONLY) */
654
e2eef170 655#if !defined(CONFIG_USER_ONLY)
8da3ff18 656
c04b2b78
PB
657#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
658typedef struct subpage_t {
70c68e44 659 MemoryRegion iomem;
a8170e5e 660 hwaddr base;
5312bd8b 661 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
662} subpage_t;
663
c227f099 664static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 665 uint16_t section);
a8170e5e 666static subpage_t *subpage_init(hwaddr base);
5312bd8b 667static void destroy_page_desc(uint16_t section_index)
54688b1e 668{
5312bd8b
AK
669 MemoryRegionSection *section = &phys_sections[section_index];
670 MemoryRegion *mr = section->mr;
54688b1e
AK
671
672 if (mr->subpage) {
673 subpage_t *subpage = container_of(mr, subpage_t, iomem);
674 memory_region_destroy(&subpage->iomem);
675 g_free(subpage);
676 }
677}
678
4346ae3e 679static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
680{
681 unsigned i;
d6f2ea22 682 PhysPageEntry *p;
54688b1e 683
c19e8800 684 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
685 return;
686 }
687
c19e8800 688 p = phys_map_nodes[lp->ptr];
4346ae3e 689 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 690 if (!p[i].is_leaf) {
54688b1e 691 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 692 } else {
c19e8800 693 destroy_page_desc(p[i].ptr);
54688b1e 694 }
54688b1e 695 }
07f07b31 696 lp->is_leaf = 0;
c19e8800 697 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
698}
699
ac1970fb 700static void destroy_all_mappings(AddressSpaceDispatch *d)
54688b1e 701{
ac1970fb 702 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
d6f2ea22 703 phys_map_nodes_reset();
54688b1e
AK
704}
705
5312bd8b
AK
706static uint16_t phys_section_add(MemoryRegionSection *section)
707{
708 if (phys_sections_nb == phys_sections_nb_alloc) {
709 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
710 phys_sections = g_renew(MemoryRegionSection, phys_sections,
711 phys_sections_nb_alloc);
712 }
713 phys_sections[phys_sections_nb] = *section;
714 return phys_sections_nb++;
715}
716
717static void phys_sections_clear(void)
718{
719 phys_sections_nb = 0;
720}
721
ac1970fb 722static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
723{
724 subpage_t *subpage;
a8170e5e 725 hwaddr base = section->offset_within_address_space
0f0cb164 726 & TARGET_PAGE_MASK;
ac1970fb 727 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
0f0cb164
AK
728 MemoryRegionSection subsection = {
729 .offset_within_address_space = base,
730 .size = TARGET_PAGE_SIZE,
731 };
a8170e5e 732 hwaddr start, end;
0f0cb164 733
f3705d53 734 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 735
f3705d53 736 if (!(existing->mr->subpage)) {
0f0cb164
AK
737 subpage = subpage_init(base);
738 subsection.mr = &subpage->iomem;
ac1970fb 739 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 740 phys_section_add(&subsection));
0f0cb164 741 } else {
f3705d53 742 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
743 }
744 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
adb2a9b5 745 end = start + section->size - 1;
0f0cb164
AK
746 subpage_register(subpage, start, end, phys_section_add(section));
747}
748
749
ac1970fb 750static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
33417e70 751{
a8170e5e 752 hwaddr start_addr = section->offset_within_address_space;
dd81124b 753 ram_addr_t size = section->size;
a8170e5e 754 hwaddr addr;
5312bd8b 755 uint16_t section_index = phys_section_add(section);
dd81124b 756
3b8e6a2d 757 assert(size);
f6f3fbca 758
3b8e6a2d 759 addr = start_addr;
ac1970fb 760 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2999097b 761 section_index);
33417e70
FB
762}
763
ac1970fb 764static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 765{
ac1970fb 766 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
0f0cb164
AK
767 MemoryRegionSection now = *section, remain = *section;
768
769 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
770 || (now.size < TARGET_PAGE_SIZE)) {
771 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
772 - now.offset_within_address_space,
773 now.size);
ac1970fb 774 register_subpage(d, &now);
0f0cb164
AK
775 remain.size -= now.size;
776 remain.offset_within_address_space += now.size;
777 remain.offset_within_region += now.size;
778 }
69b67646
TH
779 while (remain.size >= TARGET_PAGE_SIZE) {
780 now = remain;
781 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
782 now.size = TARGET_PAGE_SIZE;
ac1970fb 783 register_subpage(d, &now);
69b67646
TH
784 } else {
785 now.size &= TARGET_PAGE_MASK;
ac1970fb 786 register_multipage(d, &now);
69b67646 787 }
0f0cb164
AK
788 remain.size -= now.size;
789 remain.offset_within_address_space += now.size;
790 remain.offset_within_region += now.size;
791 }
792 now = remain;
793 if (now.size) {
ac1970fb 794 register_subpage(d, &now);
0f0cb164
AK
795 }
796}
797
62a2744c
SY
798void qemu_flush_coalesced_mmio_buffer(void)
799{
800 if (kvm_enabled())
801 kvm_flush_coalesced_mmio_buffer();
802}
803
c902760f
MT
804#if defined(__linux__) && !defined(TARGET_S390X)
805
806#include <sys/vfs.h>
807
808#define HUGETLBFS_MAGIC 0x958458f6
809
810static long gethugepagesize(const char *path)
811{
812 struct statfs fs;
813 int ret;
814
815 do {
9742bf26 816 ret = statfs(path, &fs);
c902760f
MT
817 } while (ret != 0 && errno == EINTR);
818
819 if (ret != 0) {
9742bf26
YT
820 perror(path);
821 return 0;
c902760f
MT
822 }
823
824 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 825 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
826
827 return fs.f_bsize;
828}
829
04b16653
AW
830static void *file_ram_alloc(RAMBlock *block,
831 ram_addr_t memory,
832 const char *path)
c902760f
MT
833{
834 char *filename;
835 void *area;
836 int fd;
837#ifdef MAP_POPULATE
838 int flags;
839#endif
840 unsigned long hpagesize;
841
842 hpagesize = gethugepagesize(path);
843 if (!hpagesize) {
9742bf26 844 return NULL;
c902760f
MT
845 }
846
847 if (memory < hpagesize) {
848 return NULL;
849 }
850
851 if (kvm_enabled() && !kvm_has_sync_mmu()) {
852 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
853 return NULL;
854 }
855
856 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 857 return NULL;
c902760f
MT
858 }
859
860 fd = mkstemp(filename);
861 if (fd < 0) {
9742bf26
YT
862 perror("unable to create backing store for hugepages");
863 free(filename);
864 return NULL;
c902760f
MT
865 }
866 unlink(filename);
867 free(filename);
868
869 memory = (memory+hpagesize-1) & ~(hpagesize-1);
870
871 /*
872 * ftruncate is not supported by hugetlbfs in older
873 * hosts, so don't bother bailing out on errors.
874 * If anything goes wrong with it under other filesystems,
875 * mmap will fail.
876 */
877 if (ftruncate(fd, memory))
9742bf26 878 perror("ftruncate");
c902760f
MT
879
880#ifdef MAP_POPULATE
881 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
882 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
883 * to sidestep this quirk.
884 */
885 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
886 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
887#else
888 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
889#endif
890 if (area == MAP_FAILED) {
9742bf26
YT
891 perror("file_ram_alloc: can't mmap RAM pages");
892 close(fd);
893 return (NULL);
c902760f 894 }
04b16653 895 block->fd = fd;
c902760f
MT
896 return area;
897}
898#endif
899
d17b5288 900static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
901{
902 RAMBlock *block, *next_block;
3e837b2c 903 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 904
a3161038 905 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
906 return 0;
907
a3161038 908 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 909 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
910
911 end = block->offset + block->length;
912
a3161038 913 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
914 if (next_block->offset >= end) {
915 next = MIN(next, next_block->offset);
916 }
917 }
918 if (next - end >= size && next - end < mingap) {
3e837b2c 919 offset = end;
04b16653
AW
920 mingap = next - end;
921 }
922 }
3e837b2c
AW
923
924 if (offset == RAM_ADDR_MAX) {
925 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
926 (uint64_t)size);
927 abort();
928 }
929
04b16653
AW
930 return offset;
931}
932
652d7ec2 933ram_addr_t last_ram_offset(void)
d17b5288
AW
934{
935 RAMBlock *block;
936 ram_addr_t last = 0;
937
a3161038 938 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
939 last = MAX(last, block->offset + block->length);
940
941 return last;
942}
943
ddb97f1d
JB
944static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
945{
946 int ret;
947 QemuOpts *machine_opts;
948
949 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
950 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
951 if (machine_opts &&
952 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
953 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
954 if (ret) {
955 perror("qemu_madvise");
956 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
957 "but dump_guest_core=off specified\n");
958 }
959 }
960}
961
c5705a77 962void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
963{
964 RAMBlock *new_block, *block;
965
c5705a77 966 new_block = NULL;
a3161038 967 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
968 if (block->offset == addr) {
969 new_block = block;
970 break;
971 }
972 }
973 assert(new_block);
974 assert(!new_block->idstr[0]);
84b89d78 975
09e5ab63
AL
976 if (dev) {
977 char *id = qdev_get_dev_path(dev);
84b89d78
CM
978 if (id) {
979 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 980 g_free(id);
84b89d78
CM
981 }
982 }
983 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
984
a3161038 985 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 986 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
987 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
988 new_block->idstr);
989 abort();
990 }
991 }
c5705a77
AK
992}
993
8490fc78
LC
994static int memory_try_enable_merging(void *addr, size_t len)
995{
996 QemuOpts *opts;
997
998 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
999 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1000 /* disabled by the user */
1001 return 0;
1002 }
1003
1004 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1005}
1006
c5705a77
AK
1007ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1008 MemoryRegion *mr)
1009{
abb26d63 1010 RAMBlock *block, *new_block;
c5705a77
AK
1011
1012 size = TARGET_PAGE_ALIGN(size);
1013 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1014
7c637366 1015 new_block->mr = mr;
432d268c 1016 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1017 if (host) {
1018 new_block->host = host;
cd19cfa2 1019 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1020 } else {
1021 if (mem_path) {
c902760f 1022#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1023 new_block->host = file_ram_alloc(new_block, size, mem_path);
1024 if (!new_block->host) {
1025 new_block->host = qemu_vmalloc(size);
8490fc78 1026 memory_try_enable_merging(new_block->host, size);
6977dfe6 1027 }
c902760f 1028#else
6977dfe6
YT
1029 fprintf(stderr, "-mem-path option unsupported\n");
1030 exit(1);
c902760f 1031#endif
6977dfe6 1032 } else {
868bb33f 1033 if (xen_enabled()) {
fce537d4 1034 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1035 } else if (kvm_enabled()) {
1036 /* some s390/kvm configurations have special constraints */
1037 new_block->host = kvm_vmalloc(size);
432d268c
JN
1038 } else {
1039 new_block->host = qemu_vmalloc(size);
1040 }
8490fc78 1041 memory_try_enable_merging(new_block->host, size);
6977dfe6 1042 }
c902760f 1043 }
94a6b54f
PB
1044 new_block->length = size;
1045
abb26d63
PB
1046 /* Keep the list sorted from biggest to smallest block. */
1047 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1048 if (block->length < new_block->length) {
1049 break;
1050 }
1051 }
1052 if (block) {
1053 QTAILQ_INSERT_BEFORE(block, new_block, next);
1054 } else {
1055 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1056 }
0d6d3c87 1057 ram_list.mru_block = NULL;
94a6b54f 1058
7267c094 1059 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1060 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1061 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1062 0, size >> TARGET_PAGE_BITS);
1720aeee 1063 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1064
ddb97f1d 1065 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1066 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1067
6f0437e8
JK
1068 if (kvm_enabled())
1069 kvm_setup_guest_memory(new_block->host, size);
1070
94a6b54f
PB
1071 return new_block->offset;
1072}
e9a1ab19 1073
c5705a77 1074ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1075{
c5705a77 1076 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1077}
1078
1f2e98b6
AW
1079void qemu_ram_free_from_ptr(ram_addr_t addr)
1080{
1081 RAMBlock *block;
1082
a3161038 1083 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1084 if (addr == block->offset) {
a3161038 1085 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1086 ram_list.mru_block = NULL;
7267c094 1087 g_free(block);
1f2e98b6
AW
1088 return;
1089 }
1090 }
1091}
1092
c227f099 1093void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1094{
04b16653
AW
1095 RAMBlock *block;
1096
a3161038 1097 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1098 if (addr == block->offset) {
a3161038 1099 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1100 ram_list.mru_block = NULL;
cd19cfa2
HY
1101 if (block->flags & RAM_PREALLOC_MASK) {
1102 ;
1103 } else if (mem_path) {
04b16653
AW
1104#if defined (__linux__) && !defined(TARGET_S390X)
1105 if (block->fd) {
1106 munmap(block->host, block->length);
1107 close(block->fd);
1108 } else {
1109 qemu_vfree(block->host);
1110 }
fd28aa13
JK
1111#else
1112 abort();
04b16653
AW
1113#endif
1114 } else {
1115#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1116 munmap(block->host, block->length);
1117#else
868bb33f 1118 if (xen_enabled()) {
e41d7c69 1119 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
1120 } else {
1121 qemu_vfree(block->host);
1122 }
04b16653
AW
1123#endif
1124 }
7267c094 1125 g_free(block);
04b16653
AW
1126 return;
1127 }
1128 }
1129
e9a1ab19
FB
1130}
1131
cd19cfa2
HY
1132#ifndef _WIN32
1133void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1134{
1135 RAMBlock *block;
1136 ram_addr_t offset;
1137 int flags;
1138 void *area, *vaddr;
1139
a3161038 1140 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1141 offset = addr - block->offset;
1142 if (offset < block->length) {
1143 vaddr = block->host + offset;
1144 if (block->flags & RAM_PREALLOC_MASK) {
1145 ;
1146 } else {
1147 flags = MAP_FIXED;
1148 munmap(vaddr, length);
1149 if (mem_path) {
1150#if defined(__linux__) && !defined(TARGET_S390X)
1151 if (block->fd) {
1152#ifdef MAP_POPULATE
1153 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1154 MAP_PRIVATE;
1155#else
1156 flags |= MAP_PRIVATE;
1157#endif
1158 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1159 flags, block->fd, offset);
1160 } else {
1161 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1162 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1163 flags, -1, 0);
1164 }
fd28aa13
JK
1165#else
1166 abort();
cd19cfa2
HY
1167#endif
1168 } else {
1169#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1170 flags |= MAP_SHARED | MAP_ANONYMOUS;
1171 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1172 flags, -1, 0);
1173#else
1174 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1175 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1176 flags, -1, 0);
1177#endif
1178 }
1179 if (area != vaddr) {
f15fbc4b
AP
1180 fprintf(stderr, "Could not remap addr: "
1181 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1182 length, addr);
1183 exit(1);
1184 }
8490fc78 1185 memory_try_enable_merging(vaddr, length);
ddb97f1d 1186 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1187 }
1188 return;
1189 }
1190 }
1191}
1192#endif /* !_WIN32 */
1193
dc828ca1 1194/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
1195 With the exception of the softmmu code in this file, this should
1196 only be used for local memory (e.g. video ram) that the device owns,
1197 and knows it isn't going to access beyond the end of the block.
1198
1199 It should not be used for general purpose DMA.
1200 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1201 */
c227f099 1202void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 1203{
94a6b54f
PB
1204 RAMBlock *block;
1205
0d6d3c87
PB
1206 block = ram_list.mru_block;
1207 if (block && addr - block->offset < block->length) {
1208 goto found;
1209 }
a3161038 1210 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1211 if (addr - block->offset < block->length) {
0d6d3c87 1212 goto found;
f471a17e 1213 }
94a6b54f 1214 }
f471a17e
AW
1215
1216 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1217 abort();
1218
0d6d3c87
PB
1219found:
1220 ram_list.mru_block = block;
1221 if (xen_enabled()) {
1222 /* We need to check if the requested address is in the RAM
1223 * because we don't want to map the entire memory in QEMU.
1224 * In that case just map until the end of the page.
1225 */
1226 if (block->offset == 0) {
1227 return xen_map_cache(addr, 0, 0);
1228 } else if (block->host == NULL) {
1229 block->host =
1230 xen_map_cache(block->offset, block->length, 1);
1231 }
1232 }
1233 return block->host + (addr - block->offset);
dc828ca1
PB
1234}
1235
0d6d3c87
PB
1236/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1237 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1238 *
1239 * ??? Is this still necessary?
b2e0a138 1240 */
8b9c99d9 1241static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1242{
1243 RAMBlock *block;
1244
a3161038 1245 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1246 if (addr - block->offset < block->length) {
868bb33f 1247 if (xen_enabled()) {
432d268c
JN
1248 /* We need to check if the requested address is in the RAM
1249 * because we don't want to map the entire memory in QEMU.
712c2b41 1250 * In that case just map until the end of the page.
432d268c
JN
1251 */
1252 if (block->offset == 0) {
e41d7c69 1253 return xen_map_cache(addr, 0, 0);
432d268c 1254 } else if (block->host == NULL) {
e41d7c69
JK
1255 block->host =
1256 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1257 }
1258 }
b2e0a138
MT
1259 return block->host + (addr - block->offset);
1260 }
1261 }
1262
1263 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1264 abort();
1265
1266 return NULL;
1267}
1268
38bee5dc
SS
1269/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1270 * but takes a size argument */
8b9c99d9 1271static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1272{
8ab934f9
SS
1273 if (*size == 0) {
1274 return NULL;
1275 }
868bb33f 1276 if (xen_enabled()) {
e41d7c69 1277 return xen_map_cache(addr, *size, 1);
868bb33f 1278 } else {
38bee5dc
SS
1279 RAMBlock *block;
1280
a3161038 1281 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1282 if (addr - block->offset < block->length) {
1283 if (addr - block->offset + *size > block->length)
1284 *size = block->length - addr + block->offset;
1285 return block->host + (addr - block->offset);
1286 }
1287 }
1288
1289 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1290 abort();
38bee5dc
SS
1291 }
1292}
1293
050a0ddf
AP
1294void qemu_put_ram_ptr(void *addr)
1295{
1296 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
1297}
1298
e890261f 1299int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1300{
94a6b54f
PB
1301 RAMBlock *block;
1302 uint8_t *host = ptr;
1303
868bb33f 1304 if (xen_enabled()) {
e41d7c69 1305 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
1306 return 0;
1307 }
1308
a3161038 1309 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1310 /* This case append when the block is not mapped. */
1311 if (block->host == NULL) {
1312 continue;
1313 }
f471a17e 1314 if (host - block->host < block->length) {
e890261f
MT
1315 *ram_addr = block->offset + (host - block->host);
1316 return 0;
f471a17e 1317 }
94a6b54f 1318 }
432d268c 1319
e890261f
MT
1320 return -1;
1321}
f471a17e 1322
e890261f
MT
1323/* Some of the softmmu routines need to translate from a host pointer
1324 (typically a TLB entry) back to a ram offset. */
1325ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1326{
1327 ram_addr_t ram_addr;
f471a17e 1328
e890261f
MT
1329 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1330 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1331 abort();
1332 }
1333 return ram_addr;
5579c7f3
PB
1334}
1335
a8170e5e 1336static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1337 unsigned size)
e18231a3
BS
1338{
1339#ifdef DEBUG_UNASSIGNED
1340 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1341#endif
5b450407 1342#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1343 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
1344#endif
1345 return 0;
1346}
1347
a8170e5e 1348static void unassigned_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1349 uint64_t val, unsigned size)
e18231a3
BS
1350{
1351#ifdef DEBUG_UNASSIGNED
0e0df1e2 1352 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 1353#endif
5b450407 1354#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1355 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 1356#endif
33417e70
FB
1357}
1358
0e0df1e2
AK
1359static const MemoryRegionOps unassigned_mem_ops = {
1360 .read = unassigned_mem_read,
1361 .write = unassigned_mem_write,
1362 .endianness = DEVICE_NATIVE_ENDIAN,
1363};
e18231a3 1364
a8170e5e 1365static uint64_t error_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1366 unsigned size)
e18231a3 1367{
0e0df1e2 1368 abort();
e18231a3
BS
1369}
1370
a8170e5e 1371static void error_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1372 uint64_t value, unsigned size)
e18231a3 1373{
0e0df1e2 1374 abort();
33417e70
FB
1375}
1376
0e0df1e2
AK
1377static const MemoryRegionOps error_mem_ops = {
1378 .read = error_mem_read,
1379 .write = error_mem_write,
1380 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1381};
1382
0e0df1e2
AK
1383static const MemoryRegionOps rom_mem_ops = {
1384 .read = error_mem_read,
1385 .write = unassigned_mem_write,
1386 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1387};
1388
a8170e5e 1389static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1390 uint64_t val, unsigned size)
9fa3e853 1391{
3a7d929e 1392 int dirty_flags;
f7c11b53 1393 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1394 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1395#if !defined(CONFIG_USER_ONLY)
0e0df1e2 1396 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1397 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 1398#endif
3a7d929e 1399 }
0e0df1e2
AK
1400 switch (size) {
1401 case 1:
1402 stb_p(qemu_get_ram_ptr(ram_addr), val);
1403 break;
1404 case 2:
1405 stw_p(qemu_get_ram_ptr(ram_addr), val);
1406 break;
1407 case 4:
1408 stl_p(qemu_get_ram_ptr(ram_addr), val);
1409 break;
1410 default:
1411 abort();
3a7d929e 1412 }
f23db169 1413 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1414 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1415 /* we remove the notdirty callback only if the code has been
1416 flushed */
1417 if (dirty_flags == 0xff)
2e70f6ef 1418 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1419}
1420
0e0df1e2
AK
1421static const MemoryRegionOps notdirty_mem_ops = {
1422 .read = error_mem_read,
1423 .write = notdirty_mem_write,
1424 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1425};
1426
0f459d16 1427/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1428static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1429{
9349b4f9 1430 CPUArchState *env = cpu_single_env;
06d55cc1 1431 target_ulong pc, cs_base;
0f459d16 1432 target_ulong vaddr;
a1d1bb31 1433 CPUWatchpoint *wp;
06d55cc1 1434 int cpu_flags;
0f459d16 1435
06d55cc1
AL
1436 if (env->watchpoint_hit) {
1437 /* We re-entered the check after replacing the TB. Now raise
1438 * the debug interrupt so that is will trigger after the
1439 * current instruction. */
1440 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1441 return;
1442 }
2e70f6ef 1443 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1444 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1445 if ((vaddr == (wp->vaddr & len_mask) ||
1446 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1447 wp->flags |= BP_WATCHPOINT_HIT;
1448 if (!env->watchpoint_hit) {
1449 env->watchpoint_hit = wp;
5a316526 1450 tb_check_watchpoint(env);
6e140f28
AL
1451 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1452 env->exception_index = EXCP_DEBUG;
488d6577 1453 cpu_loop_exit(env);
6e140f28
AL
1454 } else {
1455 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1456 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1457 cpu_resume_from_signal(env, NULL);
6e140f28 1458 }
06d55cc1 1459 }
6e140f28
AL
1460 } else {
1461 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1462 }
1463 }
1464}
1465
6658ffb8
PB
1466/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1467 so these check for a hit then pass through to the normal out-of-line
1468 phys routines. */
a8170e5e 1469static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1470 unsigned size)
6658ffb8 1471{
1ec9b909
AK
1472 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1473 switch (size) {
1474 case 1: return ldub_phys(addr);
1475 case 2: return lduw_phys(addr);
1476 case 4: return ldl_phys(addr);
1477 default: abort();
1478 }
6658ffb8
PB
1479}
1480
a8170e5e 1481static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1482 uint64_t val, unsigned size)
6658ffb8 1483{
1ec9b909
AK
1484 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1485 switch (size) {
67364150
MF
1486 case 1:
1487 stb_phys(addr, val);
1488 break;
1489 case 2:
1490 stw_phys(addr, val);
1491 break;
1492 case 4:
1493 stl_phys(addr, val);
1494 break;
1ec9b909
AK
1495 default: abort();
1496 }
6658ffb8
PB
1497}
1498
1ec9b909
AK
1499static const MemoryRegionOps watch_mem_ops = {
1500 .read = watch_mem_read,
1501 .write = watch_mem_write,
1502 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1503};
6658ffb8 1504
a8170e5e 1505static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1506 unsigned len)
db7b5426 1507{
70c68e44 1508 subpage_t *mmio = opaque;
f6405247 1509 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1510 MemoryRegionSection *section;
db7b5426
BS
1511#if defined(DEBUG_SUBPAGE)
1512 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1513 mmio, len, addr, idx);
1514#endif
db7b5426 1515
5312bd8b
AK
1516 section = &phys_sections[mmio->sub_section[idx]];
1517 addr += mmio->base;
1518 addr -= section->offset_within_address_space;
1519 addr += section->offset_within_region;
37ec01d4 1520 return io_mem_read(section->mr, addr, len);
db7b5426
BS
1521}
1522
a8170e5e 1523static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1524 uint64_t value, unsigned len)
db7b5426 1525{
70c68e44 1526 subpage_t *mmio = opaque;
f6405247 1527 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1528 MemoryRegionSection *section;
db7b5426 1529#if defined(DEBUG_SUBPAGE)
70c68e44
AK
1530 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1531 " idx %d value %"PRIx64"\n",
f6405247 1532 __func__, mmio, len, addr, idx, value);
db7b5426 1533#endif
f6405247 1534
5312bd8b
AK
1535 section = &phys_sections[mmio->sub_section[idx]];
1536 addr += mmio->base;
1537 addr -= section->offset_within_address_space;
1538 addr += section->offset_within_region;
37ec01d4 1539 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
1540}
1541
70c68e44
AK
1542static const MemoryRegionOps subpage_ops = {
1543 .read = subpage_read,
1544 .write = subpage_write,
1545 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1546};
1547
a8170e5e 1548static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
de712f94 1549 unsigned size)
56384e8b
AF
1550{
1551 ram_addr_t raddr = addr;
1552 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1553 switch (size) {
1554 case 1: return ldub_p(ptr);
1555 case 2: return lduw_p(ptr);
1556 case 4: return ldl_p(ptr);
1557 default: abort();
1558 }
56384e8b
AF
1559}
1560
a8170e5e 1561static void subpage_ram_write(void *opaque, hwaddr addr,
de712f94 1562 uint64_t value, unsigned size)
56384e8b
AF
1563{
1564 ram_addr_t raddr = addr;
1565 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1566 switch (size) {
1567 case 1: return stb_p(ptr, value);
1568 case 2: return stw_p(ptr, value);
1569 case 4: return stl_p(ptr, value);
1570 default: abort();
1571 }
56384e8b
AF
1572}
1573
de712f94
AK
1574static const MemoryRegionOps subpage_ram_ops = {
1575 .read = subpage_ram_read,
1576 .write = subpage_ram_write,
1577 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
1578};
1579
c227f099 1580static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1581 uint16_t section)
db7b5426
BS
1582{
1583 int idx, eidx;
1584
1585 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1586 return -1;
1587 idx = SUBPAGE_IDX(start);
1588 eidx = SUBPAGE_IDX(end);
1589#if defined(DEBUG_SUBPAGE)
0bf9e31a 1590 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1591 mmio, start, end, idx, eidx, memory);
1592#endif
5312bd8b
AK
1593 if (memory_region_is_ram(phys_sections[section].mr)) {
1594 MemoryRegionSection new_section = phys_sections[section];
1595 new_section.mr = &io_mem_subpage_ram;
1596 section = phys_section_add(&new_section);
56384e8b 1597 }
db7b5426 1598 for (; idx <= eidx; idx++) {
5312bd8b 1599 mmio->sub_section[idx] = section;
db7b5426
BS
1600 }
1601
1602 return 0;
1603}
1604
a8170e5e 1605static subpage_t *subpage_init(hwaddr base)
db7b5426 1606{
c227f099 1607 subpage_t *mmio;
db7b5426 1608
7267c094 1609 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
1610
1611 mmio->base = base;
70c68e44
AK
1612 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1613 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1614 mmio->iomem.subpage = true;
db7b5426 1615#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1616 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1617 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1618#endif
0f0cb164 1619 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
1620
1621 return mmio;
1622}
1623
5312bd8b
AK
1624static uint16_t dummy_section(MemoryRegion *mr)
1625{
1626 MemoryRegionSection section = {
1627 .mr = mr,
1628 .offset_within_address_space = 0,
1629 .offset_within_region = 0,
1630 .size = UINT64_MAX,
1631 };
1632
1633 return phys_section_add(&section);
1634}
1635
a8170e5e 1636MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1637{
37ec01d4 1638 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1639}
1640
e9179ce1
AK
1641static void io_mem_init(void)
1642{
0e0df1e2 1643 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
1644 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1645 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1646 "unassigned", UINT64_MAX);
1647 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1648 "notdirty", UINT64_MAX);
de712f94
AK
1649 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1650 "subpage-ram", UINT64_MAX);
1ec9b909
AK
1651 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1652 "watch", UINT64_MAX);
e9179ce1
AK
1653}
1654
ac1970fb
AK
1655static void mem_begin(MemoryListener *listener)
1656{
1657 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1658
1659 destroy_all_mappings(d);
1660 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1661}
1662
50c1e149
AK
1663static void core_begin(MemoryListener *listener)
1664{
5312bd8b
AK
1665 phys_sections_clear();
1666 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
1667 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1668 phys_section_rom = dummy_section(&io_mem_rom);
1669 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
1670}
1671
1d71148e 1672static void tcg_commit(MemoryListener *listener)
50c1e149 1673{
9349b4f9 1674 CPUArchState *env;
117712c3
AK
1675
1676 /* since each CPU stores ram addresses in its TLB cache, we must
1677 reset the modified entries */
1678 /* XXX: slow ! */
1679 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1680 tlb_flush(env, 1);
1681 }
50c1e149
AK
1682}
1683
93632747
AK
1684static void core_log_global_start(MemoryListener *listener)
1685{
1686 cpu_physical_memory_set_dirty_tracking(1);
1687}
1688
1689static void core_log_global_stop(MemoryListener *listener)
1690{
1691 cpu_physical_memory_set_dirty_tracking(0);
1692}
1693
4855d41a
AK
1694static void io_region_add(MemoryListener *listener,
1695 MemoryRegionSection *section)
1696{
a2d33521
AK
1697 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1698
1699 mrio->mr = section->mr;
1700 mrio->offset = section->offset_within_region;
1701 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 1702 section->offset_within_address_space, section->size);
a2d33521 1703 ioport_register(&mrio->iorange);
4855d41a
AK
1704}
1705
1706static void io_region_del(MemoryListener *listener,
1707 MemoryRegionSection *section)
1708{
1709 isa_unassign_ioport(section->offset_within_address_space, section->size);
1710}
1711
93632747 1712static MemoryListener core_memory_listener = {
50c1e149 1713 .begin = core_begin,
93632747
AK
1714 .log_global_start = core_log_global_start,
1715 .log_global_stop = core_log_global_stop,
ac1970fb 1716 .priority = 1,
93632747
AK
1717};
1718
4855d41a
AK
1719static MemoryListener io_memory_listener = {
1720 .region_add = io_region_add,
1721 .region_del = io_region_del,
4855d41a
AK
1722 .priority = 0,
1723};
1724
1d71148e
AK
1725static MemoryListener tcg_memory_listener = {
1726 .commit = tcg_commit,
1727};
1728
ac1970fb
AK
1729void address_space_init_dispatch(AddressSpace *as)
1730{
1731 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1732
1733 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1734 d->listener = (MemoryListener) {
1735 .begin = mem_begin,
1736 .region_add = mem_add,
1737 .region_nop = mem_add,
1738 .priority = 0,
1739 };
1740 as->dispatch = d;
1741 memory_listener_register(&d->listener, as);
1742}
1743
83f3c251
AK
1744void address_space_destroy_dispatch(AddressSpace *as)
1745{
1746 AddressSpaceDispatch *d = as->dispatch;
1747
1748 memory_listener_unregister(&d->listener);
1749 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1750 g_free(d);
1751 as->dispatch = NULL;
1752}
1753
62152b8a
AK
1754static void memory_map_init(void)
1755{
7267c094 1756 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 1757 memory_region_init(system_memory, "system", INT64_MAX);
2673a5da
AK
1758 address_space_init(&address_space_memory, system_memory);
1759 address_space_memory.name = "memory";
309cb471 1760
7267c094 1761 system_io = g_malloc(sizeof(*system_io));
309cb471 1762 memory_region_init(system_io, "io", 65536);
2673a5da
AK
1763 address_space_init(&address_space_io, system_io);
1764 address_space_io.name = "I/O";
93632747 1765
f6790af6
AK
1766 memory_listener_register(&core_memory_listener, &address_space_memory);
1767 memory_listener_register(&io_memory_listener, &address_space_io);
1768 memory_listener_register(&tcg_memory_listener, &address_space_memory);
9e11908f
PM
1769
1770 dma_context_init(&dma_context_memory, &address_space_memory,
1771 NULL, NULL, NULL);
62152b8a
AK
1772}
1773
1774MemoryRegion *get_system_memory(void)
1775{
1776 return system_memory;
1777}
1778
309cb471
AK
1779MemoryRegion *get_system_io(void)
1780{
1781 return system_io;
1782}
1783
e2eef170
PB
1784#endif /* !defined(CONFIG_USER_ONLY) */
1785
13eb76e0
FB
1786/* physical memory access (slow version, mainly for debug) */
1787#if defined(CONFIG_USER_ONLY)
9349b4f9 1788int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1789 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1790{
1791 int l, flags;
1792 target_ulong page;
53a5960a 1793 void * p;
13eb76e0
FB
1794
1795 while (len > 0) {
1796 page = addr & TARGET_PAGE_MASK;
1797 l = (page + TARGET_PAGE_SIZE) - addr;
1798 if (l > len)
1799 l = len;
1800 flags = page_get_flags(page);
1801 if (!(flags & PAGE_VALID))
a68fe89c 1802 return -1;
13eb76e0
FB
1803 if (is_write) {
1804 if (!(flags & PAGE_WRITE))
a68fe89c 1805 return -1;
579a97f7 1806 /* XXX: this code should not depend on lock_user */
72fb7daa 1807 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1808 return -1;
72fb7daa
AJ
1809 memcpy(p, buf, l);
1810 unlock_user(p, addr, l);
13eb76e0
FB
1811 } else {
1812 if (!(flags & PAGE_READ))
a68fe89c 1813 return -1;
579a97f7 1814 /* XXX: this code should not depend on lock_user */
72fb7daa 1815 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1816 return -1;
72fb7daa 1817 memcpy(buf, p, l);
5b257578 1818 unlock_user(p, addr, 0);
13eb76e0
FB
1819 }
1820 len -= l;
1821 buf += l;
1822 addr += l;
1823 }
a68fe89c 1824 return 0;
13eb76e0 1825}
8df1cd07 1826
13eb76e0 1827#else
51d7a9eb 1828
a8170e5e
AK
1829static void invalidate_and_set_dirty(hwaddr addr,
1830 hwaddr length)
51d7a9eb
AP
1831{
1832 if (!cpu_physical_memory_is_dirty(addr)) {
1833 /* invalidate code */
1834 tb_invalidate_phys_page_range(addr, addr + length, 0);
1835 /* set dirty bit */
1836 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1837 }
e226939d 1838 xen_modified_memory(addr, length);
51d7a9eb
AP
1839}
1840
a8170e5e 1841void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1842 int len, bool is_write)
13eb76e0 1843{
ac1970fb 1844 AddressSpaceDispatch *d = as->dispatch;
37ec01d4 1845 int l;
13eb76e0
FB
1846 uint8_t *ptr;
1847 uint32_t val;
a8170e5e 1848 hwaddr page;
f3705d53 1849 MemoryRegionSection *section;
3b46e624 1850
13eb76e0
FB
1851 while (len > 0) {
1852 page = addr & TARGET_PAGE_MASK;
1853 l = (page + TARGET_PAGE_SIZE) - addr;
1854 if (l > len)
1855 l = len;
ac1970fb 1856 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1857
13eb76e0 1858 if (is_write) {
f3705d53 1859 if (!memory_region_is_ram(section->mr)) {
a8170e5e 1860 hwaddr addr1;
cc5bea60 1861 addr1 = memory_region_section_addr(section, addr);
6a00d601
FB
1862 /* XXX: could force cpu_single_env to NULL to avoid
1863 potential bugs */
6c2934db 1864 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 1865 /* 32 bit write access */
c27004ec 1866 val = ldl_p(buf);
37ec01d4 1867 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 1868 l = 4;
6c2934db 1869 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 1870 /* 16 bit write access */
c27004ec 1871 val = lduw_p(buf);
37ec01d4 1872 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
1873 l = 2;
1874 } else {
1c213d19 1875 /* 8 bit write access */
c27004ec 1876 val = ldub_p(buf);
37ec01d4 1877 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
1878 l = 1;
1879 }
f3705d53 1880 } else if (!section->readonly) {
8ca5692d 1881 ram_addr_t addr1;
f3705d53 1882 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 1883 + memory_region_section_addr(section, addr);
13eb76e0 1884 /* RAM case */
5579c7f3 1885 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1886 memcpy(ptr, buf, l);
51d7a9eb 1887 invalidate_and_set_dirty(addr1, l);
050a0ddf 1888 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1889 }
1890 } else {
cc5bea60
BS
1891 if (!(memory_region_is_ram(section->mr) ||
1892 memory_region_is_romd(section->mr))) {
a8170e5e 1893 hwaddr addr1;
13eb76e0 1894 /* I/O case */
cc5bea60 1895 addr1 = memory_region_section_addr(section, addr);
6c2934db 1896 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 1897 /* 32 bit read access */
37ec01d4 1898 val = io_mem_read(section->mr, addr1, 4);
c27004ec 1899 stl_p(buf, val);
13eb76e0 1900 l = 4;
6c2934db 1901 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 1902 /* 16 bit read access */
37ec01d4 1903 val = io_mem_read(section->mr, addr1, 2);
c27004ec 1904 stw_p(buf, val);
13eb76e0
FB
1905 l = 2;
1906 } else {
1c213d19 1907 /* 8 bit read access */
37ec01d4 1908 val = io_mem_read(section->mr, addr1, 1);
c27004ec 1909 stb_p(buf, val);
13eb76e0
FB
1910 l = 1;
1911 }
1912 } else {
1913 /* RAM case */
0a1b357f 1914 ptr = qemu_get_ram_ptr(section->mr->ram_addr
cc5bea60
BS
1915 + memory_region_section_addr(section,
1916 addr));
f3705d53 1917 memcpy(buf, ptr, l);
050a0ddf 1918 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1919 }
1920 }
1921 len -= l;
1922 buf += l;
1923 addr += l;
1924 }
1925}
8df1cd07 1926
a8170e5e 1927void address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1928 const uint8_t *buf, int len)
1929{
1930 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1931}
1932
1933/**
1934 * address_space_read: read from an address space.
1935 *
1936 * @as: #AddressSpace to be accessed
1937 * @addr: address within that address space
1938 * @buf: buffer with the data transferred
1939 */
a8170e5e 1940void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb
AK
1941{
1942 address_space_rw(as, addr, buf, len, false);
1943}
1944
1945
a8170e5e 1946void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1947 int len, int is_write)
1948{
1949 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1950}
1951
d0ecd2aa 1952/* used for ROM loading : can write in RAM and ROM */
a8170e5e 1953void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
1954 const uint8_t *buf, int len)
1955{
ac1970fb 1956 AddressSpaceDispatch *d = address_space_memory.dispatch;
d0ecd2aa
FB
1957 int l;
1958 uint8_t *ptr;
a8170e5e 1959 hwaddr page;
f3705d53 1960 MemoryRegionSection *section;
3b46e624 1961
d0ecd2aa
FB
1962 while (len > 0) {
1963 page = addr & TARGET_PAGE_MASK;
1964 l = (page + TARGET_PAGE_SIZE) - addr;
1965 if (l > len)
1966 l = len;
ac1970fb 1967 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1968
cc5bea60
BS
1969 if (!(memory_region_is_ram(section->mr) ||
1970 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
1971 /* do nothing */
1972 } else {
1973 unsigned long addr1;
f3705d53 1974 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 1975 + memory_region_section_addr(section, addr);
d0ecd2aa 1976 /* ROM/RAM case */
5579c7f3 1977 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 1978 memcpy(ptr, buf, l);
51d7a9eb 1979 invalidate_and_set_dirty(addr1, l);
050a0ddf 1980 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
1981 }
1982 len -= l;
1983 buf += l;
1984 addr += l;
1985 }
1986}
1987
6d16c2f8
AL
1988typedef struct {
1989 void *buffer;
a8170e5e
AK
1990 hwaddr addr;
1991 hwaddr len;
6d16c2f8
AL
1992} BounceBuffer;
1993
1994static BounceBuffer bounce;
1995
ba223c29
AL
1996typedef struct MapClient {
1997 void *opaque;
1998 void (*callback)(void *opaque);
72cf2d4f 1999 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2000} MapClient;
2001
72cf2d4f
BS
2002static QLIST_HEAD(map_client_list, MapClient) map_client_list
2003 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2004
2005void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2006{
7267c094 2007 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2008
2009 client->opaque = opaque;
2010 client->callback = callback;
72cf2d4f 2011 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2012 return client;
2013}
2014
8b9c99d9 2015static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2016{
2017 MapClient *client = (MapClient *)_client;
2018
72cf2d4f 2019 QLIST_REMOVE(client, link);
7267c094 2020 g_free(client);
ba223c29
AL
2021}
2022
2023static void cpu_notify_map_clients(void)
2024{
2025 MapClient *client;
2026
72cf2d4f
BS
2027 while (!QLIST_EMPTY(&map_client_list)) {
2028 client = QLIST_FIRST(&map_client_list);
ba223c29 2029 client->callback(client->opaque);
34d5e948 2030 cpu_unregister_map_client(client);
ba223c29
AL
2031 }
2032}
2033
6d16c2f8
AL
2034/* Map a physical memory region into a host virtual address.
2035 * May map a subset of the requested range, given by and returned in *plen.
2036 * May return NULL if resources needed to perform the mapping are exhausted.
2037 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2038 * Use cpu_register_map_client() to know when retrying the map operation is
2039 * likely to succeed.
6d16c2f8 2040 */
ac1970fb 2041void *address_space_map(AddressSpace *as,
a8170e5e
AK
2042 hwaddr addr,
2043 hwaddr *plen,
ac1970fb 2044 bool is_write)
6d16c2f8 2045{
ac1970fb 2046 AddressSpaceDispatch *d = as->dispatch;
a8170e5e
AK
2047 hwaddr len = *plen;
2048 hwaddr todo = 0;
6d16c2f8 2049 int l;
a8170e5e 2050 hwaddr page;
f3705d53 2051 MemoryRegionSection *section;
f15fbc4b 2052 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
2053 ram_addr_t rlen;
2054 void *ret;
6d16c2f8
AL
2055
2056 while (len > 0) {
2057 page = addr & TARGET_PAGE_MASK;
2058 l = (page + TARGET_PAGE_SIZE) - addr;
2059 if (l > len)
2060 l = len;
ac1970fb 2061 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
6d16c2f8 2062
f3705d53 2063 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 2064 if (todo || bounce.buffer) {
6d16c2f8
AL
2065 break;
2066 }
2067 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2068 bounce.addr = addr;
2069 bounce.len = l;
2070 if (!is_write) {
ac1970fb 2071 address_space_read(as, addr, bounce.buffer, l);
6d16c2f8 2072 }
38bee5dc
SS
2073
2074 *plen = l;
2075 return bounce.buffer;
6d16c2f8 2076 }
8ab934f9 2077 if (!todo) {
f3705d53 2078 raddr = memory_region_get_ram_addr(section->mr)
cc5bea60 2079 + memory_region_section_addr(section, addr);
8ab934f9 2080 }
6d16c2f8
AL
2081
2082 len -= l;
2083 addr += l;
38bee5dc 2084 todo += l;
6d16c2f8 2085 }
8ab934f9
SS
2086 rlen = todo;
2087 ret = qemu_ram_ptr_length(raddr, &rlen);
2088 *plen = rlen;
2089 return ret;
6d16c2f8
AL
2090}
2091
ac1970fb 2092/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2093 * Will also mark the memory as dirty if is_write == 1. access_len gives
2094 * the amount of memory that was actually read or written by the caller.
2095 */
a8170e5e
AK
2096void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2097 int is_write, hwaddr access_len)
6d16c2f8
AL
2098{
2099 if (buffer != bounce.buffer) {
2100 if (is_write) {
e890261f 2101 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
2102 while (access_len) {
2103 unsigned l;
2104 l = TARGET_PAGE_SIZE;
2105 if (l > access_len)
2106 l = access_len;
51d7a9eb 2107 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2108 addr1 += l;
2109 access_len -= l;
2110 }
2111 }
868bb33f 2112 if (xen_enabled()) {
e41d7c69 2113 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2114 }
6d16c2f8
AL
2115 return;
2116 }
2117 if (is_write) {
ac1970fb 2118 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2119 }
f8a83245 2120 qemu_vfree(bounce.buffer);
6d16c2f8 2121 bounce.buffer = NULL;
ba223c29 2122 cpu_notify_map_clients();
6d16c2f8 2123}
d0ecd2aa 2124
a8170e5e
AK
2125void *cpu_physical_memory_map(hwaddr addr,
2126 hwaddr *plen,
ac1970fb
AK
2127 int is_write)
2128{
2129 return address_space_map(&address_space_memory, addr, plen, is_write);
2130}
2131
a8170e5e
AK
2132void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2133 int is_write, hwaddr access_len)
ac1970fb
AK
2134{
2135 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2136}
2137
8df1cd07 2138/* warning: addr must be aligned */
a8170e5e 2139static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2140 enum device_endian endian)
8df1cd07 2141{
8df1cd07
FB
2142 uint8_t *ptr;
2143 uint32_t val;
f3705d53 2144 MemoryRegionSection *section;
8df1cd07 2145
ac1970fb 2146 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2147
cc5bea60
BS
2148 if (!(memory_region_is_ram(section->mr) ||
2149 memory_region_is_romd(section->mr))) {
8df1cd07 2150 /* I/O case */
cc5bea60 2151 addr = memory_region_section_addr(section, addr);
37ec01d4 2152 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
2153#if defined(TARGET_WORDS_BIGENDIAN)
2154 if (endian == DEVICE_LITTLE_ENDIAN) {
2155 val = bswap32(val);
2156 }
2157#else
2158 if (endian == DEVICE_BIG_ENDIAN) {
2159 val = bswap32(val);
2160 }
2161#endif
8df1cd07
FB
2162 } else {
2163 /* RAM case */
f3705d53 2164 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2165 & TARGET_PAGE_MASK)
cc5bea60 2166 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2167 switch (endian) {
2168 case DEVICE_LITTLE_ENDIAN:
2169 val = ldl_le_p(ptr);
2170 break;
2171 case DEVICE_BIG_ENDIAN:
2172 val = ldl_be_p(ptr);
2173 break;
2174 default:
2175 val = ldl_p(ptr);
2176 break;
2177 }
8df1cd07
FB
2178 }
2179 return val;
2180}
2181
a8170e5e 2182uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2183{
2184 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2185}
2186
a8170e5e 2187uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2188{
2189 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2190}
2191
a8170e5e 2192uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2193{
2194 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2195}
2196
84b7b8e7 2197/* warning: addr must be aligned */
a8170e5e 2198static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2199 enum device_endian endian)
84b7b8e7 2200{
84b7b8e7
FB
2201 uint8_t *ptr;
2202 uint64_t val;
f3705d53 2203 MemoryRegionSection *section;
84b7b8e7 2204
ac1970fb 2205 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2206
cc5bea60
BS
2207 if (!(memory_region_is_ram(section->mr) ||
2208 memory_region_is_romd(section->mr))) {
84b7b8e7 2209 /* I/O case */
cc5bea60 2210 addr = memory_region_section_addr(section, addr);
1e78bcc1
AG
2211
2212 /* XXX This is broken when device endian != cpu endian.
2213 Fix and add "endian" variable check */
84b7b8e7 2214#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2215 val = io_mem_read(section->mr, addr, 4) << 32;
2216 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 2217#else
37ec01d4
AK
2218 val = io_mem_read(section->mr, addr, 4);
2219 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
2220#endif
2221 } else {
2222 /* RAM case */
f3705d53 2223 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2224 & TARGET_PAGE_MASK)
cc5bea60 2225 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2226 switch (endian) {
2227 case DEVICE_LITTLE_ENDIAN:
2228 val = ldq_le_p(ptr);
2229 break;
2230 case DEVICE_BIG_ENDIAN:
2231 val = ldq_be_p(ptr);
2232 break;
2233 default:
2234 val = ldq_p(ptr);
2235 break;
2236 }
84b7b8e7
FB
2237 }
2238 return val;
2239}
2240
a8170e5e 2241uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2242{
2243 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2244}
2245
a8170e5e 2246uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2247{
2248 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2249}
2250
a8170e5e 2251uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2252{
2253 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2254}
2255
aab33094 2256/* XXX: optimize */
a8170e5e 2257uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2258{
2259 uint8_t val;
2260 cpu_physical_memory_read(addr, &val, 1);
2261 return val;
2262}
2263
733f0b02 2264/* warning: addr must be aligned */
a8170e5e 2265static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2266 enum device_endian endian)
aab33094 2267{
733f0b02
MT
2268 uint8_t *ptr;
2269 uint64_t val;
f3705d53 2270 MemoryRegionSection *section;
733f0b02 2271
ac1970fb 2272 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2273
cc5bea60
BS
2274 if (!(memory_region_is_ram(section->mr) ||
2275 memory_region_is_romd(section->mr))) {
733f0b02 2276 /* I/O case */
cc5bea60 2277 addr = memory_region_section_addr(section, addr);
37ec01d4 2278 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
2279#if defined(TARGET_WORDS_BIGENDIAN)
2280 if (endian == DEVICE_LITTLE_ENDIAN) {
2281 val = bswap16(val);
2282 }
2283#else
2284 if (endian == DEVICE_BIG_ENDIAN) {
2285 val = bswap16(val);
2286 }
2287#endif
733f0b02
MT
2288 } else {
2289 /* RAM case */
f3705d53 2290 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2291 & TARGET_PAGE_MASK)
cc5bea60 2292 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2293 switch (endian) {
2294 case DEVICE_LITTLE_ENDIAN:
2295 val = lduw_le_p(ptr);
2296 break;
2297 case DEVICE_BIG_ENDIAN:
2298 val = lduw_be_p(ptr);
2299 break;
2300 default:
2301 val = lduw_p(ptr);
2302 break;
2303 }
733f0b02
MT
2304 }
2305 return val;
aab33094
FB
2306}
2307
a8170e5e 2308uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2309{
2310 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2311}
2312
a8170e5e 2313uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2314{
2315 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2316}
2317
a8170e5e 2318uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2319{
2320 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2321}
2322
8df1cd07
FB
2323/* warning: addr must be aligned. The ram page is not masked as dirty
2324 and the code inside is not invalidated. It is useful if the dirty
2325 bits are used to track modified PTEs */
a8170e5e 2326void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2327{
8df1cd07 2328 uint8_t *ptr;
f3705d53 2329 MemoryRegionSection *section;
8df1cd07 2330
ac1970fb 2331 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2332
f3705d53 2333 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2334 addr = memory_region_section_addr(section, addr);
f3705d53 2335 if (memory_region_is_ram(section->mr)) {
37ec01d4 2336 section = &phys_sections[phys_section_rom];
06ef3525 2337 }
37ec01d4 2338 io_mem_write(section->mr, addr, val, 4);
8df1cd07 2339 } else {
f3705d53 2340 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 2341 & TARGET_PAGE_MASK)
cc5bea60 2342 + memory_region_section_addr(section, addr);
5579c7f3 2343 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2344 stl_p(ptr, val);
74576198
AL
2345
2346 if (unlikely(in_migration)) {
2347 if (!cpu_physical_memory_is_dirty(addr1)) {
2348 /* invalidate code */
2349 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2350 /* set dirty bit */
f7c11b53
YT
2351 cpu_physical_memory_set_dirty_flags(
2352 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2353 }
2354 }
8df1cd07
FB
2355 }
2356}
2357
a8170e5e 2358void stq_phys_notdirty(hwaddr addr, uint64_t val)
bc98a7ef 2359{
bc98a7ef 2360 uint8_t *ptr;
f3705d53 2361 MemoryRegionSection *section;
bc98a7ef 2362
ac1970fb 2363 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2364
f3705d53 2365 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2366 addr = memory_region_section_addr(section, addr);
f3705d53 2367 if (memory_region_is_ram(section->mr)) {
37ec01d4 2368 section = &phys_sections[phys_section_rom];
06ef3525 2369 }
bc98a7ef 2370#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2371 io_mem_write(section->mr, addr, val >> 32, 4);
2372 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 2373#else
37ec01d4
AK
2374 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2375 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
2376#endif
2377 } else {
f3705d53 2378 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2379 & TARGET_PAGE_MASK)
cc5bea60 2380 + memory_region_section_addr(section, addr));
bc98a7ef
JM
2381 stq_p(ptr, val);
2382 }
2383}
2384
8df1cd07 2385/* warning: addr must be aligned */
a8170e5e 2386static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2387 enum device_endian endian)
8df1cd07 2388{
8df1cd07 2389 uint8_t *ptr;
f3705d53 2390 MemoryRegionSection *section;
8df1cd07 2391
ac1970fb 2392 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2393
f3705d53 2394 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2395 addr = memory_region_section_addr(section, addr);
f3705d53 2396 if (memory_region_is_ram(section->mr)) {
37ec01d4 2397 section = &phys_sections[phys_section_rom];
06ef3525 2398 }
1e78bcc1
AG
2399#if defined(TARGET_WORDS_BIGENDIAN)
2400 if (endian == DEVICE_LITTLE_ENDIAN) {
2401 val = bswap32(val);
2402 }
2403#else
2404 if (endian == DEVICE_BIG_ENDIAN) {
2405 val = bswap32(val);
2406 }
2407#endif
37ec01d4 2408 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
2409 } else {
2410 unsigned long addr1;
f3705d53 2411 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2412 + memory_region_section_addr(section, addr);
8df1cd07 2413 /* RAM case */
5579c7f3 2414 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2415 switch (endian) {
2416 case DEVICE_LITTLE_ENDIAN:
2417 stl_le_p(ptr, val);
2418 break;
2419 case DEVICE_BIG_ENDIAN:
2420 stl_be_p(ptr, val);
2421 break;
2422 default:
2423 stl_p(ptr, val);
2424 break;
2425 }
51d7a9eb 2426 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2427 }
2428}
2429
a8170e5e 2430void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2431{
2432 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2433}
2434
a8170e5e 2435void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2436{
2437 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2438}
2439
a8170e5e 2440void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2441{
2442 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2443}
2444
aab33094 2445/* XXX: optimize */
a8170e5e 2446void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2447{
2448 uint8_t v = val;
2449 cpu_physical_memory_write(addr, &v, 1);
2450}
2451
733f0b02 2452/* warning: addr must be aligned */
a8170e5e 2453static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2454 enum device_endian endian)
aab33094 2455{
733f0b02 2456 uint8_t *ptr;
f3705d53 2457 MemoryRegionSection *section;
733f0b02 2458
ac1970fb 2459 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2460
f3705d53 2461 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2462 addr = memory_region_section_addr(section, addr);
f3705d53 2463 if (memory_region_is_ram(section->mr)) {
37ec01d4 2464 section = &phys_sections[phys_section_rom];
06ef3525 2465 }
1e78bcc1
AG
2466#if defined(TARGET_WORDS_BIGENDIAN)
2467 if (endian == DEVICE_LITTLE_ENDIAN) {
2468 val = bswap16(val);
2469 }
2470#else
2471 if (endian == DEVICE_BIG_ENDIAN) {
2472 val = bswap16(val);
2473 }
2474#endif
37ec01d4 2475 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
2476 } else {
2477 unsigned long addr1;
f3705d53 2478 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2479 + memory_region_section_addr(section, addr);
733f0b02
MT
2480 /* RAM case */
2481 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2482 switch (endian) {
2483 case DEVICE_LITTLE_ENDIAN:
2484 stw_le_p(ptr, val);
2485 break;
2486 case DEVICE_BIG_ENDIAN:
2487 stw_be_p(ptr, val);
2488 break;
2489 default:
2490 stw_p(ptr, val);
2491 break;
2492 }
51d7a9eb 2493 invalidate_and_set_dirty(addr1, 2);
733f0b02 2494 }
aab33094
FB
2495}
2496
a8170e5e 2497void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2498{
2499 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2500}
2501
a8170e5e 2502void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2503{
2504 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2505}
2506
a8170e5e 2507void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2508{
2509 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2510}
2511
aab33094 2512/* XXX: optimize */
a8170e5e 2513void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2514{
2515 val = tswap64(val);
71d2b725 2516 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2517}
2518
a8170e5e 2519void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2520{
2521 val = cpu_to_le64(val);
2522 cpu_physical_memory_write(addr, &val, 8);
2523}
2524
a8170e5e 2525void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2526{
2527 val = cpu_to_be64(val);
2528 cpu_physical_memory_write(addr, &val, 8);
2529}
2530
5e2972fd 2531/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2532int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2533 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2534{
2535 int l;
a8170e5e 2536 hwaddr phys_addr;
9b3c35e0 2537 target_ulong page;
13eb76e0
FB
2538
2539 while (len > 0) {
2540 page = addr & TARGET_PAGE_MASK;
2541 phys_addr = cpu_get_phys_page_debug(env, page);
2542 /* if no physical page mapped, return an error */
2543 if (phys_addr == -1)
2544 return -1;
2545 l = (page + TARGET_PAGE_SIZE) - addr;
2546 if (l > len)
2547 l = len;
5e2972fd 2548 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2549 if (is_write)
2550 cpu_physical_memory_write_rom(phys_addr, buf, l);
2551 else
5e2972fd 2552 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2553 len -= l;
2554 buf += l;
2555 addr += l;
2556 }
2557 return 0;
2558}
a68fe89c 2559#endif
13eb76e0 2560
b3755a91
PB
2561#if !defined(CONFIG_USER_ONLY)
2562
82afa586
BH
2563/*
2564 * A helper function for the _utterly broken_ virtio device model to find out if
2565 * it's running on a big endian machine. Don't do this at home kids!
2566 */
2567bool virtio_is_big_endian(void);
2568bool virtio_is_big_endian(void)
2569{
2570#if defined(TARGET_WORDS_BIGENDIAN)
2571 return true;
2572#else
2573 return false;
2574#endif
2575}
2576
61382a50 2577#endif
76f35538
WC
2578
2579#ifndef CONFIG_USER_ONLY
a8170e5e 2580bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538
WC
2581{
2582 MemoryRegionSection *section;
2583
ac1970fb
AK
2584 section = phys_page_find(address_space_memory.dispatch,
2585 phys_addr >> TARGET_PAGE_BITS);
76f35538
WC
2586
2587 return !(memory_region_is_ram(section->mr) ||
2588 memory_region_is_romd(section->mr));
2589}
2590#endif