]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
exec: Pass CPUState to cpu_reset_interrupt()
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
432d268c 34#include "hw/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
67d3b957 53//#define DEBUG_UNASSIGNED
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
e2eef170 56#if !defined(CONFIG_USER_ONLY)
9fa3e853 57int phys_ram_fd;
74576198 58static int in_migration;
94a6b54f 59
a3161038 60RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
61
62static MemoryRegion *system_memory;
309cb471 63static MemoryRegion *system_io;
62152b8a 64
f6790af6
AK
65AddressSpace address_space_io;
66AddressSpace address_space_memory;
9e11908f 67DMAContext dma_context_memory;
2673a5da 68
0e0df1e2 69MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 70static MemoryRegion io_mem_subpage_ram;
0e0df1e2 71
e2eef170 72#endif
9fa3e853 73
9349b4f9 74CPUArchState *first_cpu;
6a00d601
FB
75/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
9349b4f9 77DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 78/* 0 = Do not count executed instructions.
bf20dc07 79 1 = Precise instruction counting.
2e70f6ef 80 2 = Adaptive rate instruction counting. */
5708fc66 81int use_icount;
6a00d601 82
e2eef170 83#if !defined(CONFIG_USER_ONLY)
4346ae3e 84
5312bd8b
AK
85static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
aa102231
AK
88static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
5312bd8b 91
d6f2ea22
AK
92/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
07f07b31 96#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 97
e2eef170 98static void io_mem_init(void);
62152b8a 99static void memory_map_init(void);
8b9c99d9 100static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 101
1ec9b909 102static MemoryRegion io_mem_watch;
6658ffb8 103#endif
fd6ce8f6 104
6d9a1304 105#if !defined(CONFIG_USER_ONLY)
d6f2ea22 106
f7bf5461 107static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 108{
f7bf5461 109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
d6f2ea22
AK
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
f7bf5461
AK
117}
118
119static uint16_t phys_map_node_alloc(void)
120{
121 unsigned i;
122 uint16_t ret;
123
124 ret = phys_map_nodes_nb++;
125 assert(ret != PHYS_MAP_NODE_NIL);
126 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 127 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 128 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 130 }
f7bf5461 131 return ret;
d6f2ea22
AK
132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
92e873b9 139
a8170e5e
AK
140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
2999097b 142 int level)
f7bf5461
AK
143{
144 PhysPageEntry *p;
145 int i;
a8170e5e 146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 147
07f07b31 148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
07f07b31 153 p[i].is_leaf = 1;
c19e8800 154 p[i].ptr = phys_section_unassigned;
4346ae3e 155 }
67c4d23c 156 }
f7bf5461 157 } else {
c19e8800 158 p = phys_map_nodes[lp->ptr];
92e873b9 159 }
2999097b 160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 161
2999097b 162 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
c19e8800 165 lp->ptr = leaf;
07f07b31
AK
166 *index += step;
167 *nb -= step;
2999097b
AK
168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
f7bf5461
AK
172 }
173}
174
ac1970fb 175static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 176 hwaddr index, hwaddr nb,
2999097b 177 uint16_t leaf)
f7bf5461 178{
2999097b 179 /* Wildly overreserve - it doesn't matter much. */
07f07b31 180 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 181
ac1970fb 182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
183}
184
a8170e5e 185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
92e873b9 186{
ac1970fb 187 PhysPageEntry lp = d->phys_map;
31ab2b4a
AK
188 PhysPageEntry *p;
189 int i;
31ab2b4a 190 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 191
07f07b31 192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
194 goto not_found;
195 }
c19e8800 196 p = phys_map_nodes[lp.ptr];
31ab2b4a 197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 198 }
31ab2b4a 199
c19e8800 200 s_index = lp.ptr;
31ab2b4a 201not_found:
f3705d53
AK
202 return &phys_sections[s_index];
203}
204
e5548617
BS
205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 209 && mr != &io_mem_watch;
fd6ce8f6 210}
5b6dd868 211#endif
fd6ce8f6 212
5b6dd868 213void cpu_exec_init_all(void)
fdbb84d1 214{
5b6dd868 215#if !defined(CONFIG_USER_ONLY)
b2a8658e 216 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
217 memory_map_init();
218 io_mem_init();
fdbb84d1 219#endif
5b6dd868 220}
fdbb84d1 221
b170fce3 222#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
223
224static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 225{
259186a7 226 CPUState *cpu = opaque;
a513fe19 227
5b6dd868
BS
228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
259186a7
AF
230 cpu->interrupt_request &= ~0x01;
231 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
232
233 return 0;
a513fe19 234}
7501267e 235
5b6dd868
BS
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
259186a7
AF
243 VMSTATE_UINT32(halted, CPUState),
244 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
245 VMSTATE_END_OF_LIST()
246 }
247};
b170fce3
AF
248#else
249#define vmstate_cpu_common vmstate_dummy
5b6dd868 250#endif
ea041c0e 251
38d8f5c8 252CPUState *qemu_get_cpu(int index)
ea041c0e 253{
5b6dd868 254 CPUArchState *env = first_cpu;
38d8f5c8 255 CPUState *cpu = NULL;
ea041c0e 256
5b6dd868 257 while (env) {
55e5c285
AF
258 cpu = ENV_GET_CPU(env);
259 if (cpu->cpu_index == index) {
5b6dd868 260 break;
55e5c285 261 }
5b6dd868 262 env = env->next_cpu;
ea041c0e 263 }
5b6dd868 264
d76fddae 265 return env ? cpu : NULL;
ea041c0e
FB
266}
267
5b6dd868 268void cpu_exec_init(CPUArchState *env)
ea041c0e 269{
5b6dd868 270 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 271 CPUClass *cc = CPU_GET_CLASS(cpu);
5b6dd868
BS
272 CPUArchState **penv;
273 int cpu_index;
274
275#if defined(CONFIG_USER_ONLY)
276 cpu_list_lock();
277#endif
278 env->next_cpu = NULL;
279 penv = &first_cpu;
280 cpu_index = 0;
281 while (*penv != NULL) {
282 penv = &(*penv)->next_cpu;
283 cpu_index++;
284 }
55e5c285 285 cpu->cpu_index = cpu_index;
1b1ed8dc 286 cpu->numa_node = 0;
5b6dd868
BS
287 QTAILQ_INIT(&env->breakpoints);
288 QTAILQ_INIT(&env->watchpoints);
289#ifndef CONFIG_USER_ONLY
290 cpu->thread_id = qemu_get_thread_id();
291#endif
292 *penv = env;
293#if defined(CONFIG_USER_ONLY)
294 cpu_list_unlock();
295#endif
259186a7 296 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
b170fce3 297#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
298 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
299 cpu_save, cpu_load, env);
b170fce3 300 assert(cc->vmsd == NULL);
5b6dd868 301#endif
b170fce3
AF
302 if (cc->vmsd != NULL) {
303 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
304 }
ea041c0e
FB
305}
306
1fddef4b 307#if defined(TARGET_HAS_ICE)
94df27fd 308#if defined(CONFIG_USER_ONLY)
9349b4f9 309static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
310{
311 tb_invalidate_phys_page_range(pc, pc + 1, 0);
312}
313#else
1e7855a5
MF
314static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
315{
9d70c4b7
MF
316 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
317 (pc & ~TARGET_PAGE_MASK));
1e7855a5 318}
c27004ec 319#endif
94df27fd 320#endif /* TARGET_HAS_ICE */
d720b93d 321
c527ee8f 322#if defined(CONFIG_USER_ONLY)
9349b4f9 323void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
324
325{
326}
327
9349b4f9 328int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
329 int flags, CPUWatchpoint **watchpoint)
330{
331 return -ENOSYS;
332}
333#else
6658ffb8 334/* Add a watchpoint. */
9349b4f9 335int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 336 int flags, CPUWatchpoint **watchpoint)
6658ffb8 337{
b4051334 338 target_ulong len_mask = ~(len - 1);
c0ce998e 339 CPUWatchpoint *wp;
6658ffb8 340
b4051334 341 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
342 if ((len & (len - 1)) || (addr & ~len_mask) ||
343 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
344 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
345 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
346 return -EINVAL;
347 }
7267c094 348 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
349
350 wp->vaddr = addr;
b4051334 351 wp->len_mask = len_mask;
a1d1bb31
AL
352 wp->flags = flags;
353
2dc9f411 354 /* keep all GDB-injected watchpoints in front */
c0ce998e 355 if (flags & BP_GDB)
72cf2d4f 356 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 357 else
72cf2d4f 358 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 359
6658ffb8 360 tlb_flush_page(env, addr);
a1d1bb31
AL
361
362 if (watchpoint)
363 *watchpoint = wp;
364 return 0;
6658ffb8
PB
365}
366
a1d1bb31 367/* Remove a specific watchpoint. */
9349b4f9 368int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 369 int flags)
6658ffb8 370{
b4051334 371 target_ulong len_mask = ~(len - 1);
a1d1bb31 372 CPUWatchpoint *wp;
6658ffb8 373
72cf2d4f 374 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 375 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 376 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 377 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
378 return 0;
379 }
380 }
a1d1bb31 381 return -ENOENT;
6658ffb8
PB
382}
383
a1d1bb31 384/* Remove a specific watchpoint by reference. */
9349b4f9 385void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 386{
72cf2d4f 387 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 388
a1d1bb31
AL
389 tlb_flush_page(env, watchpoint->vaddr);
390
7267c094 391 g_free(watchpoint);
a1d1bb31
AL
392}
393
394/* Remove all matching watchpoints. */
9349b4f9 395void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 396{
c0ce998e 397 CPUWatchpoint *wp, *next;
a1d1bb31 398
72cf2d4f 399 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
400 if (wp->flags & mask)
401 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 402 }
7d03f82f 403}
c527ee8f 404#endif
7d03f82f 405
a1d1bb31 406/* Add a breakpoint. */
9349b4f9 407int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 408 CPUBreakpoint **breakpoint)
4c3a88a2 409{
1fddef4b 410#if defined(TARGET_HAS_ICE)
c0ce998e 411 CPUBreakpoint *bp;
3b46e624 412
7267c094 413 bp = g_malloc(sizeof(*bp));
4c3a88a2 414
a1d1bb31
AL
415 bp->pc = pc;
416 bp->flags = flags;
417
2dc9f411 418 /* keep all GDB-injected breakpoints in front */
c0ce998e 419 if (flags & BP_GDB)
72cf2d4f 420 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 421 else
72cf2d4f 422 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 423
d720b93d 424 breakpoint_invalidate(env, pc);
a1d1bb31
AL
425
426 if (breakpoint)
427 *breakpoint = bp;
4c3a88a2
FB
428 return 0;
429#else
a1d1bb31 430 return -ENOSYS;
4c3a88a2
FB
431#endif
432}
433
a1d1bb31 434/* Remove a specific breakpoint. */
9349b4f9 435int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 436{
7d03f82f 437#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
438 CPUBreakpoint *bp;
439
72cf2d4f 440 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
441 if (bp->pc == pc && bp->flags == flags) {
442 cpu_breakpoint_remove_by_ref(env, bp);
443 return 0;
444 }
7d03f82f 445 }
a1d1bb31
AL
446 return -ENOENT;
447#else
448 return -ENOSYS;
7d03f82f
EI
449#endif
450}
451
a1d1bb31 452/* Remove a specific breakpoint by reference. */
9349b4f9 453void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 454{
1fddef4b 455#if defined(TARGET_HAS_ICE)
72cf2d4f 456 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 457
a1d1bb31
AL
458 breakpoint_invalidate(env, breakpoint->pc);
459
7267c094 460 g_free(breakpoint);
a1d1bb31
AL
461#endif
462}
463
464/* Remove all matching breakpoints. */
9349b4f9 465void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
466{
467#if defined(TARGET_HAS_ICE)
c0ce998e 468 CPUBreakpoint *bp, *next;
a1d1bb31 469
72cf2d4f 470 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
471 if (bp->flags & mask)
472 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 473 }
4c3a88a2
FB
474#endif
475}
476
c33a346e
FB
477/* enable or disable single step mode. EXCP_DEBUG is returned by the
478 CPU loop after each instruction */
9349b4f9 479void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 480{
1fddef4b 481#if defined(TARGET_HAS_ICE)
c33a346e
FB
482 if (env->singlestep_enabled != enabled) {
483 env->singlestep_enabled = enabled;
e22a25c9
AL
484 if (kvm_enabled())
485 kvm_update_guest_debug(env, 0);
486 else {
ccbb4d44 487 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
488 /* XXX: only flush what is necessary */
489 tb_flush(env);
490 }
c33a346e
FB
491 }
492#endif
493}
494
9349b4f9 495void cpu_exit(CPUArchState *env)
3098dba0 496{
fcd7d003
AF
497 CPUState *cpu = ENV_GET_CPU(env);
498
499 cpu->exit_request = 1;
378df4b2 500 cpu->tcg_exit_req = 1;
3098dba0
AJ
501}
502
9349b4f9 503void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
504{
505 va_list ap;
493ae1f0 506 va_list ap2;
7501267e
FB
507
508 va_start(ap, fmt);
493ae1f0 509 va_copy(ap2, ap);
7501267e
FB
510 fprintf(stderr, "qemu: fatal: ");
511 vfprintf(stderr, fmt, ap);
512 fprintf(stderr, "\n");
6fd2a026 513 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
514 if (qemu_log_enabled()) {
515 qemu_log("qemu: fatal: ");
516 qemu_log_vprintf(fmt, ap2);
517 qemu_log("\n");
6fd2a026 518 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 519 qemu_log_flush();
93fcfe39 520 qemu_log_close();
924edcae 521 }
493ae1f0 522 va_end(ap2);
f9373291 523 va_end(ap);
fd052bf6
RV
524#if defined(CONFIG_USER_ONLY)
525 {
526 struct sigaction act;
527 sigfillset(&act.sa_mask);
528 act.sa_handler = SIG_DFL;
529 sigaction(SIGABRT, &act, NULL);
530 }
531#endif
7501267e
FB
532 abort();
533}
534
9349b4f9 535CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 536{
9349b4f9
AF
537 CPUArchState *new_env = cpu_init(env->cpu_model_str);
538 CPUArchState *next_cpu = new_env->next_cpu;
5a38f081
AL
539#if defined(TARGET_HAS_ICE)
540 CPUBreakpoint *bp;
541 CPUWatchpoint *wp;
542#endif
543
9349b4f9 544 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 545
55e5c285 546 /* Preserve chaining. */
c5be9f08 547 new_env->next_cpu = next_cpu;
5a38f081
AL
548
549 /* Clone all break/watchpoints.
550 Note: Once we support ptrace with hw-debug register access, make sure
551 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
552 QTAILQ_INIT(&env->breakpoints);
553 QTAILQ_INIT(&env->watchpoints);
5a38f081 554#if defined(TARGET_HAS_ICE)
72cf2d4f 555 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
556 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
557 }
72cf2d4f 558 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
559 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
560 wp->flags, NULL);
561 }
562#endif
563
c5be9f08
TS
564 return new_env;
565}
566
0124311e 567#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
568static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
569 uintptr_t length)
570{
571 uintptr_t start1;
572
573 /* we modify the TLB cache so that the dirty bit will be set again
574 when accessing the range */
575 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
576 /* Check that we don't span multiple blocks - this breaks the
577 address comparisons below. */
578 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
579 != (end - 1) - start) {
580 abort();
581 }
582 cpu_tlb_reset_dirty_all(start1, length);
583
584}
585
5579c7f3 586/* Note: start and end must be within the same ram block. */
c227f099 587void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 588 int dirty_flags)
1ccde1cb 589{
d24981d3 590 uintptr_t length;
1ccde1cb
FB
591
592 start &= TARGET_PAGE_MASK;
593 end = TARGET_PAGE_ALIGN(end);
594
595 length = end - start;
596 if (length == 0)
597 return;
f7c11b53 598 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 599
d24981d3
JQ
600 if (tcg_enabled()) {
601 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 602 }
1ccde1cb
FB
603}
604
8b9c99d9 605static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 606{
f6f3fbca 607 int ret = 0;
74576198 608 in_migration = enable;
f6f3fbca 609 return ret;
74576198
AL
610}
611
a8170e5e 612hwaddr memory_region_section_get_iotlb(CPUArchState *env,
e5548617
BS
613 MemoryRegionSection *section,
614 target_ulong vaddr,
a8170e5e 615 hwaddr paddr,
e5548617
BS
616 int prot,
617 target_ulong *address)
618{
a8170e5e 619 hwaddr iotlb;
e5548617
BS
620 CPUWatchpoint *wp;
621
cc5bea60 622 if (memory_region_is_ram(section->mr)) {
e5548617
BS
623 /* Normal RAM. */
624 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 625 + memory_region_section_addr(section, paddr);
e5548617
BS
626 if (!section->readonly) {
627 iotlb |= phys_section_notdirty;
628 } else {
629 iotlb |= phys_section_rom;
630 }
631 } else {
632 /* IO handlers are currently passed a physical address.
633 It would be nice to pass an offset from the base address
634 of that region. This would avoid having to special case RAM,
635 and avoid full address decoding in every device.
636 We can't use the high bits of pd for this because
637 IO_MEM_ROMD uses these as a ram address. */
638 iotlb = section - phys_sections;
cc5bea60 639 iotlb += memory_region_section_addr(section, paddr);
e5548617
BS
640 }
641
642 /* Make accesses to pages with watchpoints go via the
643 watchpoint trap routines. */
644 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
645 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
646 /* Avoid trapping reads of pages with a write breakpoint. */
647 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
648 iotlb = phys_section_watch + paddr;
649 *address |= TLB_MMIO;
650 break;
651 }
652 }
653 }
654
655 return iotlb;
656}
9fa3e853
FB
657#endif /* defined(CONFIG_USER_ONLY) */
658
e2eef170 659#if !defined(CONFIG_USER_ONLY)
8da3ff18 660
c04b2b78
PB
661#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
662typedef struct subpage_t {
70c68e44 663 MemoryRegion iomem;
a8170e5e 664 hwaddr base;
5312bd8b 665 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
666} subpage_t;
667
c227f099 668static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 669 uint16_t section);
a8170e5e 670static subpage_t *subpage_init(hwaddr base);
5312bd8b 671static void destroy_page_desc(uint16_t section_index)
54688b1e 672{
5312bd8b
AK
673 MemoryRegionSection *section = &phys_sections[section_index];
674 MemoryRegion *mr = section->mr;
54688b1e
AK
675
676 if (mr->subpage) {
677 subpage_t *subpage = container_of(mr, subpage_t, iomem);
678 memory_region_destroy(&subpage->iomem);
679 g_free(subpage);
680 }
681}
682
4346ae3e 683static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
684{
685 unsigned i;
d6f2ea22 686 PhysPageEntry *p;
54688b1e 687
c19e8800 688 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
689 return;
690 }
691
c19e8800 692 p = phys_map_nodes[lp->ptr];
4346ae3e 693 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 694 if (!p[i].is_leaf) {
54688b1e 695 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 696 } else {
c19e8800 697 destroy_page_desc(p[i].ptr);
54688b1e 698 }
54688b1e 699 }
07f07b31 700 lp->is_leaf = 0;
c19e8800 701 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
702}
703
ac1970fb 704static void destroy_all_mappings(AddressSpaceDispatch *d)
54688b1e 705{
ac1970fb 706 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
d6f2ea22 707 phys_map_nodes_reset();
54688b1e
AK
708}
709
5312bd8b
AK
710static uint16_t phys_section_add(MemoryRegionSection *section)
711{
712 if (phys_sections_nb == phys_sections_nb_alloc) {
713 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
714 phys_sections = g_renew(MemoryRegionSection, phys_sections,
715 phys_sections_nb_alloc);
716 }
717 phys_sections[phys_sections_nb] = *section;
718 return phys_sections_nb++;
719}
720
721static void phys_sections_clear(void)
722{
723 phys_sections_nb = 0;
724}
725
ac1970fb 726static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
727{
728 subpage_t *subpage;
a8170e5e 729 hwaddr base = section->offset_within_address_space
0f0cb164 730 & TARGET_PAGE_MASK;
ac1970fb 731 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
0f0cb164
AK
732 MemoryRegionSection subsection = {
733 .offset_within_address_space = base,
734 .size = TARGET_PAGE_SIZE,
735 };
a8170e5e 736 hwaddr start, end;
0f0cb164 737
f3705d53 738 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 739
f3705d53 740 if (!(existing->mr->subpage)) {
0f0cb164
AK
741 subpage = subpage_init(base);
742 subsection.mr = &subpage->iomem;
ac1970fb 743 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 744 phys_section_add(&subsection));
0f0cb164 745 } else {
f3705d53 746 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
747 }
748 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
adb2a9b5 749 end = start + section->size - 1;
0f0cb164
AK
750 subpage_register(subpage, start, end, phys_section_add(section));
751}
752
753
ac1970fb 754static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
33417e70 755{
a8170e5e 756 hwaddr start_addr = section->offset_within_address_space;
dd81124b 757 ram_addr_t size = section->size;
a8170e5e 758 hwaddr addr;
5312bd8b 759 uint16_t section_index = phys_section_add(section);
dd81124b 760
3b8e6a2d 761 assert(size);
f6f3fbca 762
3b8e6a2d 763 addr = start_addr;
ac1970fb 764 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2999097b 765 section_index);
33417e70
FB
766}
767
ac1970fb 768static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 769{
ac1970fb 770 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
0f0cb164
AK
771 MemoryRegionSection now = *section, remain = *section;
772
773 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
774 || (now.size < TARGET_PAGE_SIZE)) {
775 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
776 - now.offset_within_address_space,
777 now.size);
ac1970fb 778 register_subpage(d, &now);
0f0cb164
AK
779 remain.size -= now.size;
780 remain.offset_within_address_space += now.size;
781 remain.offset_within_region += now.size;
782 }
69b67646
TH
783 while (remain.size >= TARGET_PAGE_SIZE) {
784 now = remain;
785 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
786 now.size = TARGET_PAGE_SIZE;
ac1970fb 787 register_subpage(d, &now);
69b67646
TH
788 } else {
789 now.size &= TARGET_PAGE_MASK;
ac1970fb 790 register_multipage(d, &now);
69b67646 791 }
0f0cb164
AK
792 remain.size -= now.size;
793 remain.offset_within_address_space += now.size;
794 remain.offset_within_region += now.size;
795 }
796 now = remain;
797 if (now.size) {
ac1970fb 798 register_subpage(d, &now);
0f0cb164
AK
799 }
800}
801
62a2744c
SY
802void qemu_flush_coalesced_mmio_buffer(void)
803{
804 if (kvm_enabled())
805 kvm_flush_coalesced_mmio_buffer();
806}
807
b2a8658e
UD
808void qemu_mutex_lock_ramlist(void)
809{
810 qemu_mutex_lock(&ram_list.mutex);
811}
812
813void qemu_mutex_unlock_ramlist(void)
814{
815 qemu_mutex_unlock(&ram_list.mutex);
816}
817
c902760f
MT
818#if defined(__linux__) && !defined(TARGET_S390X)
819
820#include <sys/vfs.h>
821
822#define HUGETLBFS_MAGIC 0x958458f6
823
824static long gethugepagesize(const char *path)
825{
826 struct statfs fs;
827 int ret;
828
829 do {
9742bf26 830 ret = statfs(path, &fs);
c902760f
MT
831 } while (ret != 0 && errno == EINTR);
832
833 if (ret != 0) {
9742bf26
YT
834 perror(path);
835 return 0;
c902760f
MT
836 }
837
838 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 839 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
840
841 return fs.f_bsize;
842}
843
04b16653
AW
844static void *file_ram_alloc(RAMBlock *block,
845 ram_addr_t memory,
846 const char *path)
c902760f
MT
847{
848 char *filename;
849 void *area;
850 int fd;
851#ifdef MAP_POPULATE
852 int flags;
853#endif
854 unsigned long hpagesize;
855
856 hpagesize = gethugepagesize(path);
857 if (!hpagesize) {
9742bf26 858 return NULL;
c902760f
MT
859 }
860
861 if (memory < hpagesize) {
862 return NULL;
863 }
864
865 if (kvm_enabled() && !kvm_has_sync_mmu()) {
866 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
867 return NULL;
868 }
869
e4ada482 870 filename = g_strdup_printf("%s/qemu_back_mem.XXXXXX", path);
c902760f
MT
871
872 fd = mkstemp(filename);
873 if (fd < 0) {
9742bf26 874 perror("unable to create backing store for hugepages");
e4ada482 875 g_free(filename);
9742bf26 876 return NULL;
c902760f
MT
877 }
878 unlink(filename);
e4ada482 879 g_free(filename);
c902760f
MT
880
881 memory = (memory+hpagesize-1) & ~(hpagesize-1);
882
883 /*
884 * ftruncate is not supported by hugetlbfs in older
885 * hosts, so don't bother bailing out on errors.
886 * If anything goes wrong with it under other filesystems,
887 * mmap will fail.
888 */
889 if (ftruncate(fd, memory))
9742bf26 890 perror("ftruncate");
c902760f
MT
891
892#ifdef MAP_POPULATE
893 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
894 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
895 * to sidestep this quirk.
896 */
897 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
898 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
899#else
900 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
901#endif
902 if (area == MAP_FAILED) {
9742bf26
YT
903 perror("file_ram_alloc: can't mmap RAM pages");
904 close(fd);
905 return (NULL);
c902760f 906 }
04b16653 907 block->fd = fd;
c902760f
MT
908 return area;
909}
910#endif
911
d17b5288 912static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
913{
914 RAMBlock *block, *next_block;
3e837b2c 915 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 916
a3161038 917 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
918 return 0;
919
a3161038 920 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 921 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
922
923 end = block->offset + block->length;
924
a3161038 925 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
926 if (next_block->offset >= end) {
927 next = MIN(next, next_block->offset);
928 }
929 }
930 if (next - end >= size && next - end < mingap) {
3e837b2c 931 offset = end;
04b16653
AW
932 mingap = next - end;
933 }
934 }
3e837b2c
AW
935
936 if (offset == RAM_ADDR_MAX) {
937 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
938 (uint64_t)size);
939 abort();
940 }
941
04b16653
AW
942 return offset;
943}
944
652d7ec2 945ram_addr_t last_ram_offset(void)
d17b5288
AW
946{
947 RAMBlock *block;
948 ram_addr_t last = 0;
949
a3161038 950 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
951 last = MAX(last, block->offset + block->length);
952
953 return last;
954}
955
ddb97f1d
JB
956static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
957{
958 int ret;
959 QemuOpts *machine_opts;
960
961 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
962 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
963 if (machine_opts &&
964 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
965 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
966 if (ret) {
967 perror("qemu_madvise");
968 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
969 "but dump_guest_core=off specified\n");
970 }
971 }
972}
973
c5705a77 974void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
975{
976 RAMBlock *new_block, *block;
977
c5705a77 978 new_block = NULL;
a3161038 979 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
980 if (block->offset == addr) {
981 new_block = block;
982 break;
983 }
984 }
985 assert(new_block);
986 assert(!new_block->idstr[0]);
84b89d78 987
09e5ab63
AL
988 if (dev) {
989 char *id = qdev_get_dev_path(dev);
84b89d78
CM
990 if (id) {
991 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 992 g_free(id);
84b89d78
CM
993 }
994 }
995 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
996
b2a8658e
UD
997 /* This assumes the iothread lock is taken here too. */
998 qemu_mutex_lock_ramlist();
a3161038 999 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1000 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1001 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1002 new_block->idstr);
1003 abort();
1004 }
1005 }
b2a8658e 1006 qemu_mutex_unlock_ramlist();
c5705a77
AK
1007}
1008
8490fc78
LC
1009static int memory_try_enable_merging(void *addr, size_t len)
1010{
1011 QemuOpts *opts;
1012
1013 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1014 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1015 /* disabled by the user */
1016 return 0;
1017 }
1018
1019 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1020}
1021
c5705a77
AK
1022ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1023 MemoryRegion *mr)
1024{
abb26d63 1025 RAMBlock *block, *new_block;
c5705a77
AK
1026
1027 size = TARGET_PAGE_ALIGN(size);
1028 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1029
b2a8658e
UD
1030 /* This assumes the iothread lock is taken here too. */
1031 qemu_mutex_lock_ramlist();
7c637366 1032 new_block->mr = mr;
432d268c 1033 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1034 if (host) {
1035 new_block->host = host;
cd19cfa2 1036 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1037 } else {
1038 if (mem_path) {
c902760f 1039#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1040 new_block->host = file_ram_alloc(new_block, size, mem_path);
1041 if (!new_block->host) {
1042 new_block->host = qemu_vmalloc(size);
8490fc78 1043 memory_try_enable_merging(new_block->host, size);
6977dfe6 1044 }
c902760f 1045#else
6977dfe6
YT
1046 fprintf(stderr, "-mem-path option unsupported\n");
1047 exit(1);
c902760f 1048#endif
6977dfe6 1049 } else {
868bb33f 1050 if (xen_enabled()) {
fce537d4 1051 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1052 } else if (kvm_enabled()) {
1053 /* some s390/kvm configurations have special constraints */
1054 new_block->host = kvm_vmalloc(size);
432d268c
JN
1055 } else {
1056 new_block->host = qemu_vmalloc(size);
1057 }
8490fc78 1058 memory_try_enable_merging(new_block->host, size);
6977dfe6 1059 }
c902760f 1060 }
94a6b54f
PB
1061 new_block->length = size;
1062
abb26d63
PB
1063 /* Keep the list sorted from biggest to smallest block. */
1064 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1065 if (block->length < new_block->length) {
1066 break;
1067 }
1068 }
1069 if (block) {
1070 QTAILQ_INSERT_BEFORE(block, new_block, next);
1071 } else {
1072 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1073 }
0d6d3c87 1074 ram_list.mru_block = NULL;
94a6b54f 1075
f798b07f 1076 ram_list.version++;
b2a8658e 1077 qemu_mutex_unlock_ramlist();
f798b07f 1078
7267c094 1079 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1080 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1081 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1082 0, size >> TARGET_PAGE_BITS);
1720aeee 1083 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1084
ddb97f1d 1085 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1086 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1087
6f0437e8
JK
1088 if (kvm_enabled())
1089 kvm_setup_guest_memory(new_block->host, size);
1090
94a6b54f
PB
1091 return new_block->offset;
1092}
e9a1ab19 1093
c5705a77 1094ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1095{
c5705a77 1096 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1097}
1098
1f2e98b6
AW
1099void qemu_ram_free_from_ptr(ram_addr_t addr)
1100{
1101 RAMBlock *block;
1102
b2a8658e
UD
1103 /* This assumes the iothread lock is taken here too. */
1104 qemu_mutex_lock_ramlist();
a3161038 1105 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1106 if (addr == block->offset) {
a3161038 1107 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1108 ram_list.mru_block = NULL;
f798b07f 1109 ram_list.version++;
7267c094 1110 g_free(block);
b2a8658e 1111 break;
1f2e98b6
AW
1112 }
1113 }
b2a8658e 1114 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1115}
1116
c227f099 1117void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1118{
04b16653
AW
1119 RAMBlock *block;
1120
b2a8658e
UD
1121 /* This assumes the iothread lock is taken here too. */
1122 qemu_mutex_lock_ramlist();
a3161038 1123 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1124 if (addr == block->offset) {
a3161038 1125 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1126 ram_list.mru_block = NULL;
f798b07f 1127 ram_list.version++;
cd19cfa2
HY
1128 if (block->flags & RAM_PREALLOC_MASK) {
1129 ;
1130 } else if (mem_path) {
04b16653
AW
1131#if defined (__linux__) && !defined(TARGET_S390X)
1132 if (block->fd) {
1133 munmap(block->host, block->length);
1134 close(block->fd);
1135 } else {
1136 qemu_vfree(block->host);
1137 }
fd28aa13
JK
1138#else
1139 abort();
04b16653
AW
1140#endif
1141 } else {
1142#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1143 munmap(block->host, block->length);
1144#else
868bb33f 1145 if (xen_enabled()) {
e41d7c69 1146 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
1147 } else {
1148 qemu_vfree(block->host);
1149 }
04b16653
AW
1150#endif
1151 }
7267c094 1152 g_free(block);
b2a8658e 1153 break;
04b16653
AW
1154 }
1155 }
b2a8658e 1156 qemu_mutex_unlock_ramlist();
04b16653 1157
e9a1ab19
FB
1158}
1159
cd19cfa2
HY
1160#ifndef _WIN32
1161void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1162{
1163 RAMBlock *block;
1164 ram_addr_t offset;
1165 int flags;
1166 void *area, *vaddr;
1167
a3161038 1168 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1169 offset = addr - block->offset;
1170 if (offset < block->length) {
1171 vaddr = block->host + offset;
1172 if (block->flags & RAM_PREALLOC_MASK) {
1173 ;
1174 } else {
1175 flags = MAP_FIXED;
1176 munmap(vaddr, length);
1177 if (mem_path) {
1178#if defined(__linux__) && !defined(TARGET_S390X)
1179 if (block->fd) {
1180#ifdef MAP_POPULATE
1181 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1182 MAP_PRIVATE;
1183#else
1184 flags |= MAP_PRIVATE;
1185#endif
1186 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1187 flags, block->fd, offset);
1188 } else {
1189 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1190 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1191 flags, -1, 0);
1192 }
fd28aa13
JK
1193#else
1194 abort();
cd19cfa2
HY
1195#endif
1196 } else {
1197#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1198 flags |= MAP_SHARED | MAP_ANONYMOUS;
1199 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1200 flags, -1, 0);
1201#else
1202 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1203 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1204 flags, -1, 0);
1205#endif
1206 }
1207 if (area != vaddr) {
f15fbc4b
AP
1208 fprintf(stderr, "Could not remap addr: "
1209 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1210 length, addr);
1211 exit(1);
1212 }
8490fc78 1213 memory_try_enable_merging(vaddr, length);
ddb97f1d 1214 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1215 }
1216 return;
1217 }
1218 }
1219}
1220#endif /* !_WIN32 */
1221
dc828ca1 1222/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
1223 With the exception of the softmmu code in this file, this should
1224 only be used for local memory (e.g. video ram) that the device owns,
1225 and knows it isn't going to access beyond the end of the block.
1226
1227 It should not be used for general purpose DMA.
1228 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1229 */
c227f099 1230void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 1231{
94a6b54f
PB
1232 RAMBlock *block;
1233
b2a8658e 1234 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1235 block = ram_list.mru_block;
1236 if (block && addr - block->offset < block->length) {
1237 goto found;
1238 }
a3161038 1239 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1240 if (addr - block->offset < block->length) {
0d6d3c87 1241 goto found;
f471a17e 1242 }
94a6b54f 1243 }
f471a17e
AW
1244
1245 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1246 abort();
1247
0d6d3c87
PB
1248found:
1249 ram_list.mru_block = block;
1250 if (xen_enabled()) {
1251 /* We need to check if the requested address is in the RAM
1252 * because we don't want to map the entire memory in QEMU.
1253 * In that case just map until the end of the page.
1254 */
1255 if (block->offset == 0) {
1256 return xen_map_cache(addr, 0, 0);
1257 } else if (block->host == NULL) {
1258 block->host =
1259 xen_map_cache(block->offset, block->length, 1);
1260 }
1261 }
1262 return block->host + (addr - block->offset);
dc828ca1
PB
1263}
1264
0d6d3c87
PB
1265/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1266 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1267 *
1268 * ??? Is this still necessary?
b2e0a138 1269 */
8b9c99d9 1270static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1271{
1272 RAMBlock *block;
1273
b2a8658e 1274 /* The list is protected by the iothread lock here. */
a3161038 1275 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1276 if (addr - block->offset < block->length) {
868bb33f 1277 if (xen_enabled()) {
432d268c
JN
1278 /* We need to check if the requested address is in the RAM
1279 * because we don't want to map the entire memory in QEMU.
712c2b41 1280 * In that case just map until the end of the page.
432d268c
JN
1281 */
1282 if (block->offset == 0) {
e41d7c69 1283 return xen_map_cache(addr, 0, 0);
432d268c 1284 } else if (block->host == NULL) {
e41d7c69
JK
1285 block->host =
1286 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1287 }
1288 }
b2e0a138
MT
1289 return block->host + (addr - block->offset);
1290 }
1291 }
1292
1293 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1294 abort();
1295
1296 return NULL;
1297}
1298
38bee5dc
SS
1299/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1300 * but takes a size argument */
8b9c99d9 1301static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1302{
8ab934f9
SS
1303 if (*size == 0) {
1304 return NULL;
1305 }
868bb33f 1306 if (xen_enabled()) {
e41d7c69 1307 return xen_map_cache(addr, *size, 1);
868bb33f 1308 } else {
38bee5dc
SS
1309 RAMBlock *block;
1310
a3161038 1311 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1312 if (addr - block->offset < block->length) {
1313 if (addr - block->offset + *size > block->length)
1314 *size = block->length - addr + block->offset;
1315 return block->host + (addr - block->offset);
1316 }
1317 }
1318
1319 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1320 abort();
38bee5dc
SS
1321 }
1322}
1323
050a0ddf
AP
1324void qemu_put_ram_ptr(void *addr)
1325{
1326 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
1327}
1328
e890261f 1329int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1330{
94a6b54f
PB
1331 RAMBlock *block;
1332 uint8_t *host = ptr;
1333
868bb33f 1334 if (xen_enabled()) {
e41d7c69 1335 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
1336 return 0;
1337 }
1338
a3161038 1339 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1340 /* This case append when the block is not mapped. */
1341 if (block->host == NULL) {
1342 continue;
1343 }
f471a17e 1344 if (host - block->host < block->length) {
e890261f
MT
1345 *ram_addr = block->offset + (host - block->host);
1346 return 0;
f471a17e 1347 }
94a6b54f 1348 }
432d268c 1349
e890261f
MT
1350 return -1;
1351}
f471a17e 1352
e890261f
MT
1353/* Some of the softmmu routines need to translate from a host pointer
1354 (typically a TLB entry) back to a ram offset. */
1355ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1356{
1357 ram_addr_t ram_addr;
f471a17e 1358
e890261f
MT
1359 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1360 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1361 abort();
1362 }
1363 return ram_addr;
5579c7f3
PB
1364}
1365
a8170e5e 1366static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1367 unsigned size)
e18231a3
BS
1368{
1369#ifdef DEBUG_UNASSIGNED
1370 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1371#endif
5b450407 1372#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1373 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
1374#endif
1375 return 0;
1376}
1377
a8170e5e 1378static void unassigned_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1379 uint64_t val, unsigned size)
e18231a3
BS
1380{
1381#ifdef DEBUG_UNASSIGNED
0e0df1e2 1382 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 1383#endif
5b450407 1384#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1385 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 1386#endif
33417e70
FB
1387}
1388
0e0df1e2
AK
1389static const MemoryRegionOps unassigned_mem_ops = {
1390 .read = unassigned_mem_read,
1391 .write = unassigned_mem_write,
1392 .endianness = DEVICE_NATIVE_ENDIAN,
1393};
e18231a3 1394
a8170e5e 1395static uint64_t error_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1396 unsigned size)
e18231a3 1397{
0e0df1e2 1398 abort();
e18231a3
BS
1399}
1400
a8170e5e 1401static void error_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1402 uint64_t value, unsigned size)
e18231a3 1403{
0e0df1e2 1404 abort();
33417e70
FB
1405}
1406
0e0df1e2
AK
1407static const MemoryRegionOps error_mem_ops = {
1408 .read = error_mem_read,
1409 .write = error_mem_write,
1410 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1411};
1412
0e0df1e2
AK
1413static const MemoryRegionOps rom_mem_ops = {
1414 .read = error_mem_read,
1415 .write = unassigned_mem_write,
1416 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1417};
1418
a8170e5e 1419static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1420 uint64_t val, unsigned size)
9fa3e853 1421{
3a7d929e 1422 int dirty_flags;
f7c11b53 1423 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1424 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1425#if !defined(CONFIG_USER_ONLY)
0e0df1e2 1426 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1427 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 1428#endif
3a7d929e 1429 }
0e0df1e2
AK
1430 switch (size) {
1431 case 1:
1432 stb_p(qemu_get_ram_ptr(ram_addr), val);
1433 break;
1434 case 2:
1435 stw_p(qemu_get_ram_ptr(ram_addr), val);
1436 break;
1437 case 4:
1438 stl_p(qemu_get_ram_ptr(ram_addr), val);
1439 break;
1440 default:
1441 abort();
3a7d929e 1442 }
f23db169 1443 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1444 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1445 /* we remove the notdirty callback only if the code has been
1446 flushed */
1447 if (dirty_flags == 0xff)
2e70f6ef 1448 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1449}
1450
0e0df1e2
AK
1451static const MemoryRegionOps notdirty_mem_ops = {
1452 .read = error_mem_read,
1453 .write = notdirty_mem_write,
1454 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1455};
1456
0f459d16 1457/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1458static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1459{
9349b4f9 1460 CPUArchState *env = cpu_single_env;
06d55cc1 1461 target_ulong pc, cs_base;
0f459d16 1462 target_ulong vaddr;
a1d1bb31 1463 CPUWatchpoint *wp;
06d55cc1 1464 int cpu_flags;
0f459d16 1465
06d55cc1
AL
1466 if (env->watchpoint_hit) {
1467 /* We re-entered the check after replacing the TB. Now raise
1468 * the debug interrupt so that is will trigger after the
1469 * current instruction. */
1470 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1471 return;
1472 }
2e70f6ef 1473 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1474 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1475 if ((vaddr == (wp->vaddr & len_mask) ||
1476 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1477 wp->flags |= BP_WATCHPOINT_HIT;
1478 if (!env->watchpoint_hit) {
1479 env->watchpoint_hit = wp;
5a316526 1480 tb_check_watchpoint(env);
6e140f28
AL
1481 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1482 env->exception_index = EXCP_DEBUG;
488d6577 1483 cpu_loop_exit(env);
6e140f28
AL
1484 } else {
1485 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1486 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1487 cpu_resume_from_signal(env, NULL);
6e140f28 1488 }
06d55cc1 1489 }
6e140f28
AL
1490 } else {
1491 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1492 }
1493 }
1494}
1495
6658ffb8
PB
1496/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1497 so these check for a hit then pass through to the normal out-of-line
1498 phys routines. */
a8170e5e 1499static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1500 unsigned size)
6658ffb8 1501{
1ec9b909
AK
1502 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1503 switch (size) {
1504 case 1: return ldub_phys(addr);
1505 case 2: return lduw_phys(addr);
1506 case 4: return ldl_phys(addr);
1507 default: abort();
1508 }
6658ffb8
PB
1509}
1510
a8170e5e 1511static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1512 uint64_t val, unsigned size)
6658ffb8 1513{
1ec9b909
AK
1514 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1515 switch (size) {
67364150
MF
1516 case 1:
1517 stb_phys(addr, val);
1518 break;
1519 case 2:
1520 stw_phys(addr, val);
1521 break;
1522 case 4:
1523 stl_phys(addr, val);
1524 break;
1ec9b909
AK
1525 default: abort();
1526 }
6658ffb8
PB
1527}
1528
1ec9b909
AK
1529static const MemoryRegionOps watch_mem_ops = {
1530 .read = watch_mem_read,
1531 .write = watch_mem_write,
1532 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1533};
6658ffb8 1534
a8170e5e 1535static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1536 unsigned len)
db7b5426 1537{
70c68e44 1538 subpage_t *mmio = opaque;
f6405247 1539 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1540 MemoryRegionSection *section;
db7b5426
BS
1541#if defined(DEBUG_SUBPAGE)
1542 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1543 mmio, len, addr, idx);
1544#endif
db7b5426 1545
5312bd8b
AK
1546 section = &phys_sections[mmio->sub_section[idx]];
1547 addr += mmio->base;
1548 addr -= section->offset_within_address_space;
1549 addr += section->offset_within_region;
37ec01d4 1550 return io_mem_read(section->mr, addr, len);
db7b5426
BS
1551}
1552
a8170e5e 1553static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1554 uint64_t value, unsigned len)
db7b5426 1555{
70c68e44 1556 subpage_t *mmio = opaque;
f6405247 1557 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1558 MemoryRegionSection *section;
db7b5426 1559#if defined(DEBUG_SUBPAGE)
70c68e44
AK
1560 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1561 " idx %d value %"PRIx64"\n",
f6405247 1562 __func__, mmio, len, addr, idx, value);
db7b5426 1563#endif
f6405247 1564
5312bd8b
AK
1565 section = &phys_sections[mmio->sub_section[idx]];
1566 addr += mmio->base;
1567 addr -= section->offset_within_address_space;
1568 addr += section->offset_within_region;
37ec01d4 1569 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
1570}
1571
70c68e44
AK
1572static const MemoryRegionOps subpage_ops = {
1573 .read = subpage_read,
1574 .write = subpage_write,
1575 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1576};
1577
a8170e5e 1578static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
de712f94 1579 unsigned size)
56384e8b
AF
1580{
1581 ram_addr_t raddr = addr;
1582 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1583 switch (size) {
1584 case 1: return ldub_p(ptr);
1585 case 2: return lduw_p(ptr);
1586 case 4: return ldl_p(ptr);
1587 default: abort();
1588 }
56384e8b
AF
1589}
1590
a8170e5e 1591static void subpage_ram_write(void *opaque, hwaddr addr,
de712f94 1592 uint64_t value, unsigned size)
56384e8b
AF
1593{
1594 ram_addr_t raddr = addr;
1595 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1596 switch (size) {
1597 case 1: return stb_p(ptr, value);
1598 case 2: return stw_p(ptr, value);
1599 case 4: return stl_p(ptr, value);
1600 default: abort();
1601 }
56384e8b
AF
1602}
1603
de712f94
AK
1604static const MemoryRegionOps subpage_ram_ops = {
1605 .read = subpage_ram_read,
1606 .write = subpage_ram_write,
1607 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
1608};
1609
c227f099 1610static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1611 uint16_t section)
db7b5426
BS
1612{
1613 int idx, eidx;
1614
1615 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1616 return -1;
1617 idx = SUBPAGE_IDX(start);
1618 eidx = SUBPAGE_IDX(end);
1619#if defined(DEBUG_SUBPAGE)
0bf9e31a 1620 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1621 mmio, start, end, idx, eidx, memory);
1622#endif
5312bd8b
AK
1623 if (memory_region_is_ram(phys_sections[section].mr)) {
1624 MemoryRegionSection new_section = phys_sections[section];
1625 new_section.mr = &io_mem_subpage_ram;
1626 section = phys_section_add(&new_section);
56384e8b 1627 }
db7b5426 1628 for (; idx <= eidx; idx++) {
5312bd8b 1629 mmio->sub_section[idx] = section;
db7b5426
BS
1630 }
1631
1632 return 0;
1633}
1634
a8170e5e 1635static subpage_t *subpage_init(hwaddr base)
db7b5426 1636{
c227f099 1637 subpage_t *mmio;
db7b5426 1638
7267c094 1639 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
1640
1641 mmio->base = base;
70c68e44
AK
1642 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1643 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1644 mmio->iomem.subpage = true;
db7b5426 1645#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1646 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1647 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1648#endif
0f0cb164 1649 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
1650
1651 return mmio;
1652}
1653
5312bd8b
AK
1654static uint16_t dummy_section(MemoryRegion *mr)
1655{
1656 MemoryRegionSection section = {
1657 .mr = mr,
1658 .offset_within_address_space = 0,
1659 .offset_within_region = 0,
1660 .size = UINT64_MAX,
1661 };
1662
1663 return phys_section_add(&section);
1664}
1665
a8170e5e 1666MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1667{
37ec01d4 1668 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1669}
1670
e9179ce1
AK
1671static void io_mem_init(void)
1672{
0e0df1e2 1673 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
1674 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1675 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1676 "unassigned", UINT64_MAX);
1677 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1678 "notdirty", UINT64_MAX);
de712f94
AK
1679 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1680 "subpage-ram", UINT64_MAX);
1ec9b909
AK
1681 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1682 "watch", UINT64_MAX);
e9179ce1
AK
1683}
1684
ac1970fb
AK
1685static void mem_begin(MemoryListener *listener)
1686{
1687 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1688
1689 destroy_all_mappings(d);
1690 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1691}
1692
50c1e149
AK
1693static void core_begin(MemoryListener *listener)
1694{
5312bd8b
AK
1695 phys_sections_clear();
1696 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
1697 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1698 phys_section_rom = dummy_section(&io_mem_rom);
1699 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
1700}
1701
1d71148e 1702static void tcg_commit(MemoryListener *listener)
50c1e149 1703{
9349b4f9 1704 CPUArchState *env;
117712c3
AK
1705
1706 /* since each CPU stores ram addresses in its TLB cache, we must
1707 reset the modified entries */
1708 /* XXX: slow ! */
1709 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1710 tlb_flush(env, 1);
1711 }
50c1e149
AK
1712}
1713
93632747
AK
1714static void core_log_global_start(MemoryListener *listener)
1715{
1716 cpu_physical_memory_set_dirty_tracking(1);
1717}
1718
1719static void core_log_global_stop(MemoryListener *listener)
1720{
1721 cpu_physical_memory_set_dirty_tracking(0);
1722}
1723
4855d41a
AK
1724static void io_region_add(MemoryListener *listener,
1725 MemoryRegionSection *section)
1726{
a2d33521
AK
1727 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1728
1729 mrio->mr = section->mr;
1730 mrio->offset = section->offset_within_region;
1731 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 1732 section->offset_within_address_space, section->size);
a2d33521 1733 ioport_register(&mrio->iorange);
4855d41a
AK
1734}
1735
1736static void io_region_del(MemoryListener *listener,
1737 MemoryRegionSection *section)
1738{
1739 isa_unassign_ioport(section->offset_within_address_space, section->size);
1740}
1741
93632747 1742static MemoryListener core_memory_listener = {
50c1e149 1743 .begin = core_begin,
93632747
AK
1744 .log_global_start = core_log_global_start,
1745 .log_global_stop = core_log_global_stop,
ac1970fb 1746 .priority = 1,
93632747
AK
1747};
1748
4855d41a
AK
1749static MemoryListener io_memory_listener = {
1750 .region_add = io_region_add,
1751 .region_del = io_region_del,
4855d41a
AK
1752 .priority = 0,
1753};
1754
1d71148e
AK
1755static MemoryListener tcg_memory_listener = {
1756 .commit = tcg_commit,
1757};
1758
ac1970fb
AK
1759void address_space_init_dispatch(AddressSpace *as)
1760{
1761 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1762
1763 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1764 d->listener = (MemoryListener) {
1765 .begin = mem_begin,
1766 .region_add = mem_add,
1767 .region_nop = mem_add,
1768 .priority = 0,
1769 };
1770 as->dispatch = d;
1771 memory_listener_register(&d->listener, as);
1772}
1773
83f3c251
AK
1774void address_space_destroy_dispatch(AddressSpace *as)
1775{
1776 AddressSpaceDispatch *d = as->dispatch;
1777
1778 memory_listener_unregister(&d->listener);
1779 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1780 g_free(d);
1781 as->dispatch = NULL;
1782}
1783
62152b8a
AK
1784static void memory_map_init(void)
1785{
7267c094 1786 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 1787 memory_region_init(system_memory, "system", INT64_MAX);
2673a5da
AK
1788 address_space_init(&address_space_memory, system_memory);
1789 address_space_memory.name = "memory";
309cb471 1790
7267c094 1791 system_io = g_malloc(sizeof(*system_io));
309cb471 1792 memory_region_init(system_io, "io", 65536);
2673a5da
AK
1793 address_space_init(&address_space_io, system_io);
1794 address_space_io.name = "I/O";
93632747 1795
f6790af6
AK
1796 memory_listener_register(&core_memory_listener, &address_space_memory);
1797 memory_listener_register(&io_memory_listener, &address_space_io);
1798 memory_listener_register(&tcg_memory_listener, &address_space_memory);
9e11908f
PM
1799
1800 dma_context_init(&dma_context_memory, &address_space_memory,
1801 NULL, NULL, NULL);
62152b8a
AK
1802}
1803
1804MemoryRegion *get_system_memory(void)
1805{
1806 return system_memory;
1807}
1808
309cb471
AK
1809MemoryRegion *get_system_io(void)
1810{
1811 return system_io;
1812}
1813
e2eef170
PB
1814#endif /* !defined(CONFIG_USER_ONLY) */
1815
13eb76e0
FB
1816/* physical memory access (slow version, mainly for debug) */
1817#if defined(CONFIG_USER_ONLY)
9349b4f9 1818int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1819 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1820{
1821 int l, flags;
1822 target_ulong page;
53a5960a 1823 void * p;
13eb76e0
FB
1824
1825 while (len > 0) {
1826 page = addr & TARGET_PAGE_MASK;
1827 l = (page + TARGET_PAGE_SIZE) - addr;
1828 if (l > len)
1829 l = len;
1830 flags = page_get_flags(page);
1831 if (!(flags & PAGE_VALID))
a68fe89c 1832 return -1;
13eb76e0
FB
1833 if (is_write) {
1834 if (!(flags & PAGE_WRITE))
a68fe89c 1835 return -1;
579a97f7 1836 /* XXX: this code should not depend on lock_user */
72fb7daa 1837 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1838 return -1;
72fb7daa
AJ
1839 memcpy(p, buf, l);
1840 unlock_user(p, addr, l);
13eb76e0
FB
1841 } else {
1842 if (!(flags & PAGE_READ))
a68fe89c 1843 return -1;
579a97f7 1844 /* XXX: this code should not depend on lock_user */
72fb7daa 1845 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1846 return -1;
72fb7daa 1847 memcpy(buf, p, l);
5b257578 1848 unlock_user(p, addr, 0);
13eb76e0
FB
1849 }
1850 len -= l;
1851 buf += l;
1852 addr += l;
1853 }
a68fe89c 1854 return 0;
13eb76e0 1855}
8df1cd07 1856
13eb76e0 1857#else
51d7a9eb 1858
a8170e5e
AK
1859static void invalidate_and_set_dirty(hwaddr addr,
1860 hwaddr length)
51d7a9eb
AP
1861{
1862 if (!cpu_physical_memory_is_dirty(addr)) {
1863 /* invalidate code */
1864 tb_invalidate_phys_page_range(addr, addr + length, 0);
1865 /* set dirty bit */
1866 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1867 }
e226939d 1868 xen_modified_memory(addr, length);
51d7a9eb
AP
1869}
1870
a8170e5e 1871void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1872 int len, bool is_write)
13eb76e0 1873{
ac1970fb 1874 AddressSpaceDispatch *d = as->dispatch;
37ec01d4 1875 int l;
13eb76e0
FB
1876 uint8_t *ptr;
1877 uint32_t val;
a8170e5e 1878 hwaddr page;
f3705d53 1879 MemoryRegionSection *section;
3b46e624 1880
13eb76e0
FB
1881 while (len > 0) {
1882 page = addr & TARGET_PAGE_MASK;
1883 l = (page + TARGET_PAGE_SIZE) - addr;
1884 if (l > len)
1885 l = len;
ac1970fb 1886 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1887
13eb76e0 1888 if (is_write) {
f3705d53 1889 if (!memory_region_is_ram(section->mr)) {
a8170e5e 1890 hwaddr addr1;
cc5bea60 1891 addr1 = memory_region_section_addr(section, addr);
6a00d601
FB
1892 /* XXX: could force cpu_single_env to NULL to avoid
1893 potential bugs */
6c2934db 1894 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 1895 /* 32 bit write access */
c27004ec 1896 val = ldl_p(buf);
37ec01d4 1897 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 1898 l = 4;
6c2934db 1899 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 1900 /* 16 bit write access */
c27004ec 1901 val = lduw_p(buf);
37ec01d4 1902 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
1903 l = 2;
1904 } else {
1c213d19 1905 /* 8 bit write access */
c27004ec 1906 val = ldub_p(buf);
37ec01d4 1907 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
1908 l = 1;
1909 }
f3705d53 1910 } else if (!section->readonly) {
8ca5692d 1911 ram_addr_t addr1;
f3705d53 1912 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 1913 + memory_region_section_addr(section, addr);
13eb76e0 1914 /* RAM case */
5579c7f3 1915 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1916 memcpy(ptr, buf, l);
51d7a9eb 1917 invalidate_and_set_dirty(addr1, l);
050a0ddf 1918 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1919 }
1920 } else {
cc5bea60
BS
1921 if (!(memory_region_is_ram(section->mr) ||
1922 memory_region_is_romd(section->mr))) {
a8170e5e 1923 hwaddr addr1;
13eb76e0 1924 /* I/O case */
cc5bea60 1925 addr1 = memory_region_section_addr(section, addr);
6c2934db 1926 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 1927 /* 32 bit read access */
37ec01d4 1928 val = io_mem_read(section->mr, addr1, 4);
c27004ec 1929 stl_p(buf, val);
13eb76e0 1930 l = 4;
6c2934db 1931 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 1932 /* 16 bit read access */
37ec01d4 1933 val = io_mem_read(section->mr, addr1, 2);
c27004ec 1934 stw_p(buf, val);
13eb76e0
FB
1935 l = 2;
1936 } else {
1c213d19 1937 /* 8 bit read access */
37ec01d4 1938 val = io_mem_read(section->mr, addr1, 1);
c27004ec 1939 stb_p(buf, val);
13eb76e0
FB
1940 l = 1;
1941 }
1942 } else {
1943 /* RAM case */
0a1b357f 1944 ptr = qemu_get_ram_ptr(section->mr->ram_addr
cc5bea60
BS
1945 + memory_region_section_addr(section,
1946 addr));
f3705d53 1947 memcpy(buf, ptr, l);
050a0ddf 1948 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1949 }
1950 }
1951 len -= l;
1952 buf += l;
1953 addr += l;
1954 }
1955}
8df1cd07 1956
a8170e5e 1957void address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1958 const uint8_t *buf, int len)
1959{
1960 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1961}
1962
1963/**
1964 * address_space_read: read from an address space.
1965 *
1966 * @as: #AddressSpace to be accessed
1967 * @addr: address within that address space
1968 * @buf: buffer with the data transferred
1969 */
a8170e5e 1970void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb
AK
1971{
1972 address_space_rw(as, addr, buf, len, false);
1973}
1974
1975
a8170e5e 1976void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1977 int len, int is_write)
1978{
1979 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1980}
1981
d0ecd2aa 1982/* used for ROM loading : can write in RAM and ROM */
a8170e5e 1983void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
1984 const uint8_t *buf, int len)
1985{
ac1970fb 1986 AddressSpaceDispatch *d = address_space_memory.dispatch;
d0ecd2aa
FB
1987 int l;
1988 uint8_t *ptr;
a8170e5e 1989 hwaddr page;
f3705d53 1990 MemoryRegionSection *section;
3b46e624 1991
d0ecd2aa
FB
1992 while (len > 0) {
1993 page = addr & TARGET_PAGE_MASK;
1994 l = (page + TARGET_PAGE_SIZE) - addr;
1995 if (l > len)
1996 l = len;
ac1970fb 1997 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1998
cc5bea60
BS
1999 if (!(memory_region_is_ram(section->mr) ||
2000 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
2001 /* do nothing */
2002 } else {
2003 unsigned long addr1;
f3705d53 2004 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 2005 + memory_region_section_addr(section, addr);
d0ecd2aa 2006 /* ROM/RAM case */
5579c7f3 2007 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2008 memcpy(ptr, buf, l);
51d7a9eb 2009 invalidate_and_set_dirty(addr1, l);
050a0ddf 2010 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
2011 }
2012 len -= l;
2013 buf += l;
2014 addr += l;
2015 }
2016}
2017
6d16c2f8
AL
2018typedef struct {
2019 void *buffer;
a8170e5e
AK
2020 hwaddr addr;
2021 hwaddr len;
6d16c2f8
AL
2022} BounceBuffer;
2023
2024static BounceBuffer bounce;
2025
ba223c29
AL
2026typedef struct MapClient {
2027 void *opaque;
2028 void (*callback)(void *opaque);
72cf2d4f 2029 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2030} MapClient;
2031
72cf2d4f
BS
2032static QLIST_HEAD(map_client_list, MapClient) map_client_list
2033 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2034
2035void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2036{
7267c094 2037 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2038
2039 client->opaque = opaque;
2040 client->callback = callback;
72cf2d4f 2041 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2042 return client;
2043}
2044
8b9c99d9 2045static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2046{
2047 MapClient *client = (MapClient *)_client;
2048
72cf2d4f 2049 QLIST_REMOVE(client, link);
7267c094 2050 g_free(client);
ba223c29
AL
2051}
2052
2053static void cpu_notify_map_clients(void)
2054{
2055 MapClient *client;
2056
72cf2d4f
BS
2057 while (!QLIST_EMPTY(&map_client_list)) {
2058 client = QLIST_FIRST(&map_client_list);
ba223c29 2059 client->callback(client->opaque);
34d5e948 2060 cpu_unregister_map_client(client);
ba223c29
AL
2061 }
2062}
2063
6d16c2f8
AL
2064/* Map a physical memory region into a host virtual address.
2065 * May map a subset of the requested range, given by and returned in *plen.
2066 * May return NULL if resources needed to perform the mapping are exhausted.
2067 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2068 * Use cpu_register_map_client() to know when retrying the map operation is
2069 * likely to succeed.
6d16c2f8 2070 */
ac1970fb 2071void *address_space_map(AddressSpace *as,
a8170e5e
AK
2072 hwaddr addr,
2073 hwaddr *plen,
ac1970fb 2074 bool is_write)
6d16c2f8 2075{
ac1970fb 2076 AddressSpaceDispatch *d = as->dispatch;
a8170e5e
AK
2077 hwaddr len = *plen;
2078 hwaddr todo = 0;
6d16c2f8 2079 int l;
a8170e5e 2080 hwaddr page;
f3705d53 2081 MemoryRegionSection *section;
f15fbc4b 2082 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
2083 ram_addr_t rlen;
2084 void *ret;
6d16c2f8
AL
2085
2086 while (len > 0) {
2087 page = addr & TARGET_PAGE_MASK;
2088 l = (page + TARGET_PAGE_SIZE) - addr;
2089 if (l > len)
2090 l = len;
ac1970fb 2091 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
6d16c2f8 2092
f3705d53 2093 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 2094 if (todo || bounce.buffer) {
6d16c2f8
AL
2095 break;
2096 }
2097 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2098 bounce.addr = addr;
2099 bounce.len = l;
2100 if (!is_write) {
ac1970fb 2101 address_space_read(as, addr, bounce.buffer, l);
6d16c2f8 2102 }
38bee5dc
SS
2103
2104 *plen = l;
2105 return bounce.buffer;
6d16c2f8 2106 }
8ab934f9 2107 if (!todo) {
f3705d53 2108 raddr = memory_region_get_ram_addr(section->mr)
cc5bea60 2109 + memory_region_section_addr(section, addr);
8ab934f9 2110 }
6d16c2f8
AL
2111
2112 len -= l;
2113 addr += l;
38bee5dc 2114 todo += l;
6d16c2f8 2115 }
8ab934f9
SS
2116 rlen = todo;
2117 ret = qemu_ram_ptr_length(raddr, &rlen);
2118 *plen = rlen;
2119 return ret;
6d16c2f8
AL
2120}
2121
ac1970fb 2122/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2123 * Will also mark the memory as dirty if is_write == 1. access_len gives
2124 * the amount of memory that was actually read or written by the caller.
2125 */
a8170e5e
AK
2126void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2127 int is_write, hwaddr access_len)
6d16c2f8
AL
2128{
2129 if (buffer != bounce.buffer) {
2130 if (is_write) {
e890261f 2131 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
2132 while (access_len) {
2133 unsigned l;
2134 l = TARGET_PAGE_SIZE;
2135 if (l > access_len)
2136 l = access_len;
51d7a9eb 2137 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2138 addr1 += l;
2139 access_len -= l;
2140 }
2141 }
868bb33f 2142 if (xen_enabled()) {
e41d7c69 2143 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2144 }
6d16c2f8
AL
2145 return;
2146 }
2147 if (is_write) {
ac1970fb 2148 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2149 }
f8a83245 2150 qemu_vfree(bounce.buffer);
6d16c2f8 2151 bounce.buffer = NULL;
ba223c29 2152 cpu_notify_map_clients();
6d16c2f8 2153}
d0ecd2aa 2154
a8170e5e
AK
2155void *cpu_physical_memory_map(hwaddr addr,
2156 hwaddr *plen,
ac1970fb
AK
2157 int is_write)
2158{
2159 return address_space_map(&address_space_memory, addr, plen, is_write);
2160}
2161
a8170e5e
AK
2162void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2163 int is_write, hwaddr access_len)
ac1970fb
AK
2164{
2165 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2166}
2167
8df1cd07 2168/* warning: addr must be aligned */
a8170e5e 2169static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2170 enum device_endian endian)
8df1cd07 2171{
8df1cd07
FB
2172 uint8_t *ptr;
2173 uint32_t val;
f3705d53 2174 MemoryRegionSection *section;
8df1cd07 2175
ac1970fb 2176 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2177
cc5bea60
BS
2178 if (!(memory_region_is_ram(section->mr) ||
2179 memory_region_is_romd(section->mr))) {
8df1cd07 2180 /* I/O case */
cc5bea60 2181 addr = memory_region_section_addr(section, addr);
37ec01d4 2182 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
2183#if defined(TARGET_WORDS_BIGENDIAN)
2184 if (endian == DEVICE_LITTLE_ENDIAN) {
2185 val = bswap32(val);
2186 }
2187#else
2188 if (endian == DEVICE_BIG_ENDIAN) {
2189 val = bswap32(val);
2190 }
2191#endif
8df1cd07
FB
2192 } else {
2193 /* RAM case */
f3705d53 2194 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2195 & TARGET_PAGE_MASK)
cc5bea60 2196 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2197 switch (endian) {
2198 case DEVICE_LITTLE_ENDIAN:
2199 val = ldl_le_p(ptr);
2200 break;
2201 case DEVICE_BIG_ENDIAN:
2202 val = ldl_be_p(ptr);
2203 break;
2204 default:
2205 val = ldl_p(ptr);
2206 break;
2207 }
8df1cd07
FB
2208 }
2209 return val;
2210}
2211
a8170e5e 2212uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2213{
2214 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2215}
2216
a8170e5e 2217uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2218{
2219 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2220}
2221
a8170e5e 2222uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2223{
2224 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2225}
2226
84b7b8e7 2227/* warning: addr must be aligned */
a8170e5e 2228static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2229 enum device_endian endian)
84b7b8e7 2230{
84b7b8e7
FB
2231 uint8_t *ptr;
2232 uint64_t val;
f3705d53 2233 MemoryRegionSection *section;
84b7b8e7 2234
ac1970fb 2235 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2236
cc5bea60
BS
2237 if (!(memory_region_is_ram(section->mr) ||
2238 memory_region_is_romd(section->mr))) {
84b7b8e7 2239 /* I/O case */
cc5bea60 2240 addr = memory_region_section_addr(section, addr);
1e78bcc1
AG
2241
2242 /* XXX This is broken when device endian != cpu endian.
2243 Fix and add "endian" variable check */
84b7b8e7 2244#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2245 val = io_mem_read(section->mr, addr, 4) << 32;
2246 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 2247#else
37ec01d4
AK
2248 val = io_mem_read(section->mr, addr, 4);
2249 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
2250#endif
2251 } else {
2252 /* RAM case */
f3705d53 2253 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2254 & TARGET_PAGE_MASK)
cc5bea60 2255 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2256 switch (endian) {
2257 case DEVICE_LITTLE_ENDIAN:
2258 val = ldq_le_p(ptr);
2259 break;
2260 case DEVICE_BIG_ENDIAN:
2261 val = ldq_be_p(ptr);
2262 break;
2263 default:
2264 val = ldq_p(ptr);
2265 break;
2266 }
84b7b8e7
FB
2267 }
2268 return val;
2269}
2270
a8170e5e 2271uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2272{
2273 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2274}
2275
a8170e5e 2276uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2277{
2278 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2279}
2280
a8170e5e 2281uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2282{
2283 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2284}
2285
aab33094 2286/* XXX: optimize */
a8170e5e 2287uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2288{
2289 uint8_t val;
2290 cpu_physical_memory_read(addr, &val, 1);
2291 return val;
2292}
2293
733f0b02 2294/* warning: addr must be aligned */
a8170e5e 2295static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2296 enum device_endian endian)
aab33094 2297{
733f0b02
MT
2298 uint8_t *ptr;
2299 uint64_t val;
f3705d53 2300 MemoryRegionSection *section;
733f0b02 2301
ac1970fb 2302 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2303
cc5bea60
BS
2304 if (!(memory_region_is_ram(section->mr) ||
2305 memory_region_is_romd(section->mr))) {
733f0b02 2306 /* I/O case */
cc5bea60 2307 addr = memory_region_section_addr(section, addr);
37ec01d4 2308 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
2309#if defined(TARGET_WORDS_BIGENDIAN)
2310 if (endian == DEVICE_LITTLE_ENDIAN) {
2311 val = bswap16(val);
2312 }
2313#else
2314 if (endian == DEVICE_BIG_ENDIAN) {
2315 val = bswap16(val);
2316 }
2317#endif
733f0b02
MT
2318 } else {
2319 /* RAM case */
f3705d53 2320 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2321 & TARGET_PAGE_MASK)
cc5bea60 2322 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2323 switch (endian) {
2324 case DEVICE_LITTLE_ENDIAN:
2325 val = lduw_le_p(ptr);
2326 break;
2327 case DEVICE_BIG_ENDIAN:
2328 val = lduw_be_p(ptr);
2329 break;
2330 default:
2331 val = lduw_p(ptr);
2332 break;
2333 }
733f0b02
MT
2334 }
2335 return val;
aab33094
FB
2336}
2337
a8170e5e 2338uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2339{
2340 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2341}
2342
a8170e5e 2343uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2344{
2345 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2346}
2347
a8170e5e 2348uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2349{
2350 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2351}
2352
8df1cd07
FB
2353/* warning: addr must be aligned. The ram page is not masked as dirty
2354 and the code inside is not invalidated. It is useful if the dirty
2355 bits are used to track modified PTEs */
a8170e5e 2356void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2357{
8df1cd07 2358 uint8_t *ptr;
f3705d53 2359 MemoryRegionSection *section;
8df1cd07 2360
ac1970fb 2361 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2362
f3705d53 2363 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2364 addr = memory_region_section_addr(section, addr);
f3705d53 2365 if (memory_region_is_ram(section->mr)) {
37ec01d4 2366 section = &phys_sections[phys_section_rom];
06ef3525 2367 }
37ec01d4 2368 io_mem_write(section->mr, addr, val, 4);
8df1cd07 2369 } else {
f3705d53 2370 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 2371 & TARGET_PAGE_MASK)
cc5bea60 2372 + memory_region_section_addr(section, addr);
5579c7f3 2373 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2374 stl_p(ptr, val);
74576198
AL
2375
2376 if (unlikely(in_migration)) {
2377 if (!cpu_physical_memory_is_dirty(addr1)) {
2378 /* invalidate code */
2379 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2380 /* set dirty bit */
f7c11b53
YT
2381 cpu_physical_memory_set_dirty_flags(
2382 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2383 }
2384 }
8df1cd07
FB
2385 }
2386}
2387
a8170e5e 2388void stq_phys_notdirty(hwaddr addr, uint64_t val)
bc98a7ef 2389{
bc98a7ef 2390 uint8_t *ptr;
f3705d53 2391 MemoryRegionSection *section;
bc98a7ef 2392
ac1970fb 2393 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2394
f3705d53 2395 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2396 addr = memory_region_section_addr(section, addr);
f3705d53 2397 if (memory_region_is_ram(section->mr)) {
37ec01d4 2398 section = &phys_sections[phys_section_rom];
06ef3525 2399 }
bc98a7ef 2400#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2401 io_mem_write(section->mr, addr, val >> 32, 4);
2402 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 2403#else
37ec01d4
AK
2404 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2405 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
2406#endif
2407 } else {
f3705d53 2408 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2409 & TARGET_PAGE_MASK)
cc5bea60 2410 + memory_region_section_addr(section, addr));
bc98a7ef
JM
2411 stq_p(ptr, val);
2412 }
2413}
2414
8df1cd07 2415/* warning: addr must be aligned */
a8170e5e 2416static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2417 enum device_endian endian)
8df1cd07 2418{
8df1cd07 2419 uint8_t *ptr;
f3705d53 2420 MemoryRegionSection *section;
8df1cd07 2421
ac1970fb 2422 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2423
f3705d53 2424 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2425 addr = memory_region_section_addr(section, addr);
f3705d53 2426 if (memory_region_is_ram(section->mr)) {
37ec01d4 2427 section = &phys_sections[phys_section_rom];
06ef3525 2428 }
1e78bcc1
AG
2429#if defined(TARGET_WORDS_BIGENDIAN)
2430 if (endian == DEVICE_LITTLE_ENDIAN) {
2431 val = bswap32(val);
2432 }
2433#else
2434 if (endian == DEVICE_BIG_ENDIAN) {
2435 val = bswap32(val);
2436 }
2437#endif
37ec01d4 2438 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
2439 } else {
2440 unsigned long addr1;
f3705d53 2441 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2442 + memory_region_section_addr(section, addr);
8df1cd07 2443 /* RAM case */
5579c7f3 2444 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2445 switch (endian) {
2446 case DEVICE_LITTLE_ENDIAN:
2447 stl_le_p(ptr, val);
2448 break;
2449 case DEVICE_BIG_ENDIAN:
2450 stl_be_p(ptr, val);
2451 break;
2452 default:
2453 stl_p(ptr, val);
2454 break;
2455 }
51d7a9eb 2456 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2457 }
2458}
2459
a8170e5e 2460void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2461{
2462 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2463}
2464
a8170e5e 2465void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2466{
2467 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2468}
2469
a8170e5e 2470void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2471{
2472 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2473}
2474
aab33094 2475/* XXX: optimize */
a8170e5e 2476void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2477{
2478 uint8_t v = val;
2479 cpu_physical_memory_write(addr, &v, 1);
2480}
2481
733f0b02 2482/* warning: addr must be aligned */
a8170e5e 2483static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2484 enum device_endian endian)
aab33094 2485{
733f0b02 2486 uint8_t *ptr;
f3705d53 2487 MemoryRegionSection *section;
733f0b02 2488
ac1970fb 2489 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2490
f3705d53 2491 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2492 addr = memory_region_section_addr(section, addr);
f3705d53 2493 if (memory_region_is_ram(section->mr)) {
37ec01d4 2494 section = &phys_sections[phys_section_rom];
06ef3525 2495 }
1e78bcc1
AG
2496#if defined(TARGET_WORDS_BIGENDIAN)
2497 if (endian == DEVICE_LITTLE_ENDIAN) {
2498 val = bswap16(val);
2499 }
2500#else
2501 if (endian == DEVICE_BIG_ENDIAN) {
2502 val = bswap16(val);
2503 }
2504#endif
37ec01d4 2505 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
2506 } else {
2507 unsigned long addr1;
f3705d53 2508 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2509 + memory_region_section_addr(section, addr);
733f0b02
MT
2510 /* RAM case */
2511 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2512 switch (endian) {
2513 case DEVICE_LITTLE_ENDIAN:
2514 stw_le_p(ptr, val);
2515 break;
2516 case DEVICE_BIG_ENDIAN:
2517 stw_be_p(ptr, val);
2518 break;
2519 default:
2520 stw_p(ptr, val);
2521 break;
2522 }
51d7a9eb 2523 invalidate_and_set_dirty(addr1, 2);
733f0b02 2524 }
aab33094
FB
2525}
2526
a8170e5e 2527void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2528{
2529 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2530}
2531
a8170e5e 2532void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2533{
2534 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2535}
2536
a8170e5e 2537void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2538{
2539 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2540}
2541
aab33094 2542/* XXX: optimize */
a8170e5e 2543void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2544{
2545 val = tswap64(val);
71d2b725 2546 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2547}
2548
a8170e5e 2549void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2550{
2551 val = cpu_to_le64(val);
2552 cpu_physical_memory_write(addr, &val, 8);
2553}
2554
a8170e5e 2555void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2556{
2557 val = cpu_to_be64(val);
2558 cpu_physical_memory_write(addr, &val, 8);
2559}
2560
5e2972fd 2561/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2562int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2563 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2564{
2565 int l;
a8170e5e 2566 hwaddr phys_addr;
9b3c35e0 2567 target_ulong page;
13eb76e0
FB
2568
2569 while (len > 0) {
2570 page = addr & TARGET_PAGE_MASK;
2571 phys_addr = cpu_get_phys_page_debug(env, page);
2572 /* if no physical page mapped, return an error */
2573 if (phys_addr == -1)
2574 return -1;
2575 l = (page + TARGET_PAGE_SIZE) - addr;
2576 if (l > len)
2577 l = len;
5e2972fd 2578 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2579 if (is_write)
2580 cpu_physical_memory_write_rom(phys_addr, buf, l);
2581 else
5e2972fd 2582 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2583 len -= l;
2584 buf += l;
2585 addr += l;
2586 }
2587 return 0;
2588}
a68fe89c 2589#endif
13eb76e0 2590
8e4a424b
BS
2591#if !defined(CONFIG_USER_ONLY)
2592
2593/*
2594 * A helper function for the _utterly broken_ virtio device model to find out if
2595 * it's running on a big endian machine. Don't do this at home kids!
2596 */
2597bool virtio_is_big_endian(void);
2598bool virtio_is_big_endian(void)
2599{
2600#if defined(TARGET_WORDS_BIGENDIAN)
2601 return true;
2602#else
2603 return false;
2604#endif
2605}
2606
2607#endif
2608
76f35538 2609#ifndef CONFIG_USER_ONLY
a8170e5e 2610bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538
WC
2611{
2612 MemoryRegionSection *section;
2613
ac1970fb
AK
2614 section = phys_page_find(address_space_memory.dispatch,
2615 phys_addr >> TARGET_PAGE_BITS);
76f35538
WC
2616
2617 return !(memory_region_is_ram(section->mr) ||
2618 memory_region_is_romd(section->mr));
2619}
2620#endif