]> git.proxmox.com Git - qemu.git/blame - exec.c
Merge remote-tracking branch 'afaerber/qom-cpu' into staging
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
432d268c 34#include "hw/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
67d3b957 53//#define DEBUG_UNASSIGNED
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
e2eef170 56#if !defined(CONFIG_USER_ONLY)
9fa3e853 57int phys_ram_fd;
74576198 58static int in_migration;
94a6b54f 59
a3161038 60RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
61
62static MemoryRegion *system_memory;
309cb471 63static MemoryRegion *system_io;
62152b8a 64
f6790af6
AK
65AddressSpace address_space_io;
66AddressSpace address_space_memory;
9e11908f 67DMAContext dma_context_memory;
2673a5da 68
0e0df1e2 69MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 70static MemoryRegion io_mem_subpage_ram;
0e0df1e2 71
e2eef170 72#endif
9fa3e853 73
9349b4f9 74CPUArchState *first_cpu;
6a00d601
FB
75/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
9349b4f9 77DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 78/* 0 = Do not count executed instructions.
bf20dc07 79 1 = Precise instruction counting.
2e70f6ef 80 2 = Adaptive rate instruction counting. */
5708fc66 81int use_icount;
6a00d601 82
e2eef170 83#if !defined(CONFIG_USER_ONLY)
4346ae3e 84
5312bd8b
AK
85static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
aa102231
AK
88static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
5312bd8b 91
d6f2ea22
AK
92/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
07f07b31 96#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 97
e2eef170 98static void io_mem_init(void);
62152b8a 99static void memory_map_init(void);
8b9c99d9 100static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 101
1ec9b909 102static MemoryRegion io_mem_watch;
6658ffb8 103#endif
fd6ce8f6 104
6d9a1304 105#if !defined(CONFIG_USER_ONLY)
d6f2ea22 106
f7bf5461 107static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 108{
f7bf5461 109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
d6f2ea22
AK
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
f7bf5461
AK
117}
118
119static uint16_t phys_map_node_alloc(void)
120{
121 unsigned i;
122 uint16_t ret;
123
124 ret = phys_map_nodes_nb++;
125 assert(ret != PHYS_MAP_NODE_NIL);
126 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 127 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 128 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 130 }
f7bf5461 131 return ret;
d6f2ea22
AK
132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
92e873b9 139
a8170e5e
AK
140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
2999097b 142 int level)
f7bf5461
AK
143{
144 PhysPageEntry *p;
145 int i;
a8170e5e 146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 147
07f07b31 148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
07f07b31 153 p[i].is_leaf = 1;
c19e8800 154 p[i].ptr = phys_section_unassigned;
4346ae3e 155 }
67c4d23c 156 }
f7bf5461 157 } else {
c19e8800 158 p = phys_map_nodes[lp->ptr];
92e873b9 159 }
2999097b 160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 161
2999097b 162 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
c19e8800 165 lp->ptr = leaf;
07f07b31
AK
166 *index += step;
167 *nb -= step;
2999097b
AK
168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
f7bf5461
AK
172 }
173}
174
ac1970fb 175static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 176 hwaddr index, hwaddr nb,
2999097b 177 uint16_t leaf)
f7bf5461 178{
2999097b 179 /* Wildly overreserve - it doesn't matter much. */
07f07b31 180 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 181
ac1970fb 182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
183}
184
a8170e5e 185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
92e873b9 186{
ac1970fb 187 PhysPageEntry lp = d->phys_map;
31ab2b4a
AK
188 PhysPageEntry *p;
189 int i;
31ab2b4a 190 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 191
07f07b31 192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
194 goto not_found;
195 }
c19e8800 196 p = phys_map_nodes[lp.ptr];
31ab2b4a 197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 198 }
31ab2b4a 199
c19e8800 200 s_index = lp.ptr;
31ab2b4a 201not_found:
f3705d53
AK
202 return &phys_sections[s_index];
203}
204
e5548617
BS
205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 209 && mr != &io_mem_watch;
fd6ce8f6 210}
5b6dd868 211#endif
fd6ce8f6 212
5b6dd868 213void cpu_exec_init_all(void)
fdbb84d1 214{
5b6dd868 215#if !defined(CONFIG_USER_ONLY)
b2a8658e 216 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
217 memory_map_init();
218 io_mem_init();
fdbb84d1 219#endif
5b6dd868 220}
fdbb84d1 221
b170fce3 222#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
223
224static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 225{
259186a7 226 CPUState *cpu = opaque;
a513fe19 227
5b6dd868
BS
228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
259186a7
AF
230 cpu->interrupt_request &= ~0x01;
231 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
232
233 return 0;
a513fe19 234}
7501267e 235
5b6dd868
BS
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
259186a7
AF
243 VMSTATE_UINT32(halted, CPUState),
244 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
245 VMSTATE_END_OF_LIST()
246 }
247};
b170fce3
AF
248#else
249#define vmstate_cpu_common vmstate_dummy
5b6dd868 250#endif
ea041c0e 251
38d8f5c8 252CPUState *qemu_get_cpu(int index)
ea041c0e 253{
5b6dd868 254 CPUArchState *env = first_cpu;
38d8f5c8 255 CPUState *cpu = NULL;
ea041c0e 256
5b6dd868 257 while (env) {
55e5c285
AF
258 cpu = ENV_GET_CPU(env);
259 if (cpu->cpu_index == index) {
5b6dd868 260 break;
55e5c285 261 }
5b6dd868 262 env = env->next_cpu;
ea041c0e 263 }
5b6dd868 264
d76fddae 265 return env ? cpu : NULL;
ea041c0e
FB
266}
267
5b6dd868 268void cpu_exec_init(CPUArchState *env)
ea041c0e 269{
5b6dd868 270 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 271 CPUClass *cc = CPU_GET_CLASS(cpu);
5b6dd868
BS
272 CPUArchState **penv;
273 int cpu_index;
274
275#if defined(CONFIG_USER_ONLY)
276 cpu_list_lock();
277#endif
278 env->next_cpu = NULL;
279 penv = &first_cpu;
280 cpu_index = 0;
281 while (*penv != NULL) {
282 penv = &(*penv)->next_cpu;
283 cpu_index++;
284 }
55e5c285 285 cpu->cpu_index = cpu_index;
1b1ed8dc 286 cpu->numa_node = 0;
5b6dd868
BS
287 QTAILQ_INIT(&env->breakpoints);
288 QTAILQ_INIT(&env->watchpoints);
289#ifndef CONFIG_USER_ONLY
290 cpu->thread_id = qemu_get_thread_id();
291#endif
292 *penv = env;
293#if defined(CONFIG_USER_ONLY)
294 cpu_list_unlock();
295#endif
259186a7 296 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
5b6dd868 297#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
298 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
299 cpu_save, cpu_load, env);
b170fce3 300 assert(cc->vmsd == NULL);
5b6dd868 301#endif
b170fce3
AF
302 if (cc->vmsd != NULL) {
303 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
304 }
ea041c0e
FB
305}
306
1fddef4b 307#if defined(TARGET_HAS_ICE)
94df27fd 308#if defined(CONFIG_USER_ONLY)
9349b4f9 309static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
310{
311 tb_invalidate_phys_page_range(pc, pc + 1, 0);
312}
313#else
1e7855a5
MF
314static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
315{
9d70c4b7
MF
316 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
317 (pc & ~TARGET_PAGE_MASK));
1e7855a5 318}
c27004ec 319#endif
94df27fd 320#endif /* TARGET_HAS_ICE */
d720b93d 321
c527ee8f 322#if defined(CONFIG_USER_ONLY)
9349b4f9 323void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
324
325{
326}
327
9349b4f9 328int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
329 int flags, CPUWatchpoint **watchpoint)
330{
331 return -ENOSYS;
332}
333#else
6658ffb8 334/* Add a watchpoint. */
9349b4f9 335int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 336 int flags, CPUWatchpoint **watchpoint)
6658ffb8 337{
b4051334 338 target_ulong len_mask = ~(len - 1);
c0ce998e 339 CPUWatchpoint *wp;
6658ffb8 340
b4051334 341 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
342 if ((len & (len - 1)) || (addr & ~len_mask) ||
343 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
344 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
345 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
346 return -EINVAL;
347 }
7267c094 348 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
349
350 wp->vaddr = addr;
b4051334 351 wp->len_mask = len_mask;
a1d1bb31
AL
352 wp->flags = flags;
353
2dc9f411 354 /* keep all GDB-injected watchpoints in front */
c0ce998e 355 if (flags & BP_GDB)
72cf2d4f 356 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 357 else
72cf2d4f 358 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 359
6658ffb8 360 tlb_flush_page(env, addr);
a1d1bb31
AL
361
362 if (watchpoint)
363 *watchpoint = wp;
364 return 0;
6658ffb8
PB
365}
366
a1d1bb31 367/* Remove a specific watchpoint. */
9349b4f9 368int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 369 int flags)
6658ffb8 370{
b4051334 371 target_ulong len_mask = ~(len - 1);
a1d1bb31 372 CPUWatchpoint *wp;
6658ffb8 373
72cf2d4f 374 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 375 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 376 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 377 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
378 return 0;
379 }
380 }
a1d1bb31 381 return -ENOENT;
6658ffb8
PB
382}
383
a1d1bb31 384/* Remove a specific watchpoint by reference. */
9349b4f9 385void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 386{
72cf2d4f 387 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 388
a1d1bb31
AL
389 tlb_flush_page(env, watchpoint->vaddr);
390
7267c094 391 g_free(watchpoint);
a1d1bb31
AL
392}
393
394/* Remove all matching watchpoints. */
9349b4f9 395void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 396{
c0ce998e 397 CPUWatchpoint *wp, *next;
a1d1bb31 398
72cf2d4f 399 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
400 if (wp->flags & mask)
401 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 402 }
7d03f82f 403}
c527ee8f 404#endif
7d03f82f 405
a1d1bb31 406/* Add a breakpoint. */
9349b4f9 407int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 408 CPUBreakpoint **breakpoint)
4c3a88a2 409{
1fddef4b 410#if defined(TARGET_HAS_ICE)
c0ce998e 411 CPUBreakpoint *bp;
3b46e624 412
7267c094 413 bp = g_malloc(sizeof(*bp));
4c3a88a2 414
a1d1bb31
AL
415 bp->pc = pc;
416 bp->flags = flags;
417
2dc9f411 418 /* keep all GDB-injected breakpoints in front */
c0ce998e 419 if (flags & BP_GDB)
72cf2d4f 420 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 421 else
72cf2d4f 422 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 423
d720b93d 424 breakpoint_invalidate(env, pc);
a1d1bb31
AL
425
426 if (breakpoint)
427 *breakpoint = bp;
4c3a88a2
FB
428 return 0;
429#else
a1d1bb31 430 return -ENOSYS;
4c3a88a2
FB
431#endif
432}
433
a1d1bb31 434/* Remove a specific breakpoint. */
9349b4f9 435int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 436{
7d03f82f 437#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
438 CPUBreakpoint *bp;
439
72cf2d4f 440 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
441 if (bp->pc == pc && bp->flags == flags) {
442 cpu_breakpoint_remove_by_ref(env, bp);
443 return 0;
444 }
7d03f82f 445 }
a1d1bb31
AL
446 return -ENOENT;
447#else
448 return -ENOSYS;
7d03f82f
EI
449#endif
450}
451
a1d1bb31 452/* Remove a specific breakpoint by reference. */
9349b4f9 453void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 454{
1fddef4b 455#if defined(TARGET_HAS_ICE)
72cf2d4f 456 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 457
a1d1bb31
AL
458 breakpoint_invalidate(env, breakpoint->pc);
459
7267c094 460 g_free(breakpoint);
a1d1bb31
AL
461#endif
462}
463
464/* Remove all matching breakpoints. */
9349b4f9 465void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
466{
467#if defined(TARGET_HAS_ICE)
c0ce998e 468 CPUBreakpoint *bp, *next;
a1d1bb31 469
72cf2d4f 470 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
471 if (bp->flags & mask)
472 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 473 }
4c3a88a2
FB
474#endif
475}
476
c33a346e
FB
477/* enable or disable single step mode. EXCP_DEBUG is returned by the
478 CPU loop after each instruction */
9349b4f9 479void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 480{
1fddef4b 481#if defined(TARGET_HAS_ICE)
c33a346e
FB
482 if (env->singlestep_enabled != enabled) {
483 env->singlestep_enabled = enabled;
e22a25c9
AL
484 if (kvm_enabled())
485 kvm_update_guest_debug(env, 0);
486 else {
ccbb4d44 487 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
488 /* XXX: only flush what is necessary */
489 tb_flush(env);
490 }
c33a346e
FB
491 }
492#endif
493}
494
9349b4f9 495void cpu_exit(CPUArchState *env)
3098dba0 496{
fcd7d003
AF
497 CPUState *cpu = ENV_GET_CPU(env);
498
499 cpu->exit_request = 1;
378df4b2 500 cpu->tcg_exit_req = 1;
3098dba0
AJ
501}
502
9349b4f9 503void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
504{
505 va_list ap;
493ae1f0 506 va_list ap2;
7501267e
FB
507
508 va_start(ap, fmt);
493ae1f0 509 va_copy(ap2, ap);
7501267e
FB
510 fprintf(stderr, "qemu: fatal: ");
511 vfprintf(stderr, fmt, ap);
512 fprintf(stderr, "\n");
6fd2a026 513 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
514 if (qemu_log_enabled()) {
515 qemu_log("qemu: fatal: ");
516 qemu_log_vprintf(fmt, ap2);
517 qemu_log("\n");
6fd2a026 518 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 519 qemu_log_flush();
93fcfe39 520 qemu_log_close();
924edcae 521 }
493ae1f0 522 va_end(ap2);
f9373291 523 va_end(ap);
fd052bf6
RV
524#if defined(CONFIG_USER_ONLY)
525 {
526 struct sigaction act;
527 sigfillset(&act.sa_mask);
528 act.sa_handler = SIG_DFL;
529 sigaction(SIGABRT, &act, NULL);
530 }
531#endif
7501267e
FB
532 abort();
533}
534
9349b4f9 535CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 536{
9349b4f9
AF
537 CPUArchState *new_env = cpu_init(env->cpu_model_str);
538 CPUArchState *next_cpu = new_env->next_cpu;
5a38f081
AL
539#if defined(TARGET_HAS_ICE)
540 CPUBreakpoint *bp;
541 CPUWatchpoint *wp;
542#endif
543
9349b4f9 544 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 545
55e5c285 546 /* Preserve chaining. */
c5be9f08 547 new_env->next_cpu = next_cpu;
5a38f081
AL
548
549 /* Clone all break/watchpoints.
550 Note: Once we support ptrace with hw-debug register access, make sure
551 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
552 QTAILQ_INIT(&env->breakpoints);
553 QTAILQ_INIT(&env->watchpoints);
5a38f081 554#if defined(TARGET_HAS_ICE)
72cf2d4f 555 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
556 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
557 }
72cf2d4f 558 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
559 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
560 wp->flags, NULL);
561 }
562#endif
563
c5be9f08
TS
564 return new_env;
565}
566
0124311e 567#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
568static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
569 uintptr_t length)
570{
571 uintptr_t start1;
572
573 /* we modify the TLB cache so that the dirty bit will be set again
574 when accessing the range */
575 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
576 /* Check that we don't span multiple blocks - this breaks the
577 address comparisons below. */
578 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
579 != (end - 1) - start) {
580 abort();
581 }
582 cpu_tlb_reset_dirty_all(start1, length);
583
584}
585
5579c7f3 586/* Note: start and end must be within the same ram block. */
c227f099 587void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 588 int dirty_flags)
1ccde1cb 589{
d24981d3 590 uintptr_t length;
1ccde1cb
FB
591
592 start &= TARGET_PAGE_MASK;
593 end = TARGET_PAGE_ALIGN(end);
594
595 length = end - start;
596 if (length == 0)
597 return;
f7c11b53 598 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 599
d24981d3
JQ
600 if (tcg_enabled()) {
601 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 602 }
1ccde1cb
FB
603}
604
8b9c99d9 605static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 606{
f6f3fbca 607 int ret = 0;
74576198 608 in_migration = enable;
f6f3fbca 609 return ret;
74576198
AL
610}
611
a8170e5e 612hwaddr memory_region_section_get_iotlb(CPUArchState *env,
e5548617
BS
613 MemoryRegionSection *section,
614 target_ulong vaddr,
a8170e5e 615 hwaddr paddr,
e5548617
BS
616 int prot,
617 target_ulong *address)
618{
a8170e5e 619 hwaddr iotlb;
e5548617
BS
620 CPUWatchpoint *wp;
621
cc5bea60 622 if (memory_region_is_ram(section->mr)) {
e5548617
BS
623 /* Normal RAM. */
624 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 625 + memory_region_section_addr(section, paddr);
e5548617
BS
626 if (!section->readonly) {
627 iotlb |= phys_section_notdirty;
628 } else {
629 iotlb |= phys_section_rom;
630 }
631 } else {
632 /* IO handlers are currently passed a physical address.
633 It would be nice to pass an offset from the base address
634 of that region. This would avoid having to special case RAM,
635 and avoid full address decoding in every device.
636 We can't use the high bits of pd for this because
637 IO_MEM_ROMD uses these as a ram address. */
638 iotlb = section - phys_sections;
cc5bea60 639 iotlb += memory_region_section_addr(section, paddr);
e5548617
BS
640 }
641
642 /* Make accesses to pages with watchpoints go via the
643 watchpoint trap routines. */
644 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
645 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
646 /* Avoid trapping reads of pages with a write breakpoint. */
647 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
648 iotlb = phys_section_watch + paddr;
649 *address |= TLB_MMIO;
650 break;
651 }
652 }
653 }
654
655 return iotlb;
656}
9fa3e853
FB
657#endif /* defined(CONFIG_USER_ONLY) */
658
e2eef170 659#if !defined(CONFIG_USER_ONLY)
8da3ff18 660
c04b2b78
PB
661#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
662typedef struct subpage_t {
70c68e44 663 MemoryRegion iomem;
a8170e5e 664 hwaddr base;
5312bd8b 665 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
666} subpage_t;
667
c227f099 668static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 669 uint16_t section);
a8170e5e 670static subpage_t *subpage_init(hwaddr base);
5312bd8b 671static void destroy_page_desc(uint16_t section_index)
54688b1e 672{
5312bd8b
AK
673 MemoryRegionSection *section = &phys_sections[section_index];
674 MemoryRegion *mr = section->mr;
54688b1e
AK
675
676 if (mr->subpage) {
677 subpage_t *subpage = container_of(mr, subpage_t, iomem);
678 memory_region_destroy(&subpage->iomem);
679 g_free(subpage);
680 }
681}
682
4346ae3e 683static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
684{
685 unsigned i;
d6f2ea22 686 PhysPageEntry *p;
54688b1e 687
c19e8800 688 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
689 return;
690 }
691
c19e8800 692 p = phys_map_nodes[lp->ptr];
4346ae3e 693 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 694 if (!p[i].is_leaf) {
54688b1e 695 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 696 } else {
c19e8800 697 destroy_page_desc(p[i].ptr);
54688b1e 698 }
54688b1e 699 }
07f07b31 700 lp->is_leaf = 0;
c19e8800 701 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
702}
703
ac1970fb 704static void destroy_all_mappings(AddressSpaceDispatch *d)
54688b1e 705{
ac1970fb 706 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
d6f2ea22 707 phys_map_nodes_reset();
54688b1e
AK
708}
709
5312bd8b
AK
710static uint16_t phys_section_add(MemoryRegionSection *section)
711{
712 if (phys_sections_nb == phys_sections_nb_alloc) {
713 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
714 phys_sections = g_renew(MemoryRegionSection, phys_sections,
715 phys_sections_nb_alloc);
716 }
717 phys_sections[phys_sections_nb] = *section;
718 return phys_sections_nb++;
719}
720
721static void phys_sections_clear(void)
722{
723 phys_sections_nb = 0;
724}
725
ac1970fb 726static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
727{
728 subpage_t *subpage;
a8170e5e 729 hwaddr base = section->offset_within_address_space
0f0cb164 730 & TARGET_PAGE_MASK;
ac1970fb 731 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
0f0cb164
AK
732 MemoryRegionSection subsection = {
733 .offset_within_address_space = base,
734 .size = TARGET_PAGE_SIZE,
735 };
a8170e5e 736 hwaddr start, end;
0f0cb164 737
f3705d53 738 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 739
f3705d53 740 if (!(existing->mr->subpage)) {
0f0cb164
AK
741 subpage = subpage_init(base);
742 subsection.mr = &subpage->iomem;
ac1970fb 743 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 744 phys_section_add(&subsection));
0f0cb164 745 } else {
f3705d53 746 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
747 }
748 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
adb2a9b5 749 end = start + section->size - 1;
0f0cb164
AK
750 subpage_register(subpage, start, end, phys_section_add(section));
751}
752
753
ac1970fb 754static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
33417e70 755{
a8170e5e 756 hwaddr start_addr = section->offset_within_address_space;
dd81124b 757 ram_addr_t size = section->size;
a8170e5e 758 hwaddr addr;
5312bd8b 759 uint16_t section_index = phys_section_add(section);
dd81124b 760
3b8e6a2d 761 assert(size);
f6f3fbca 762
3b8e6a2d 763 addr = start_addr;
ac1970fb 764 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2999097b 765 section_index);
33417e70
FB
766}
767
ac1970fb 768static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 769{
ac1970fb 770 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
0f0cb164
AK
771 MemoryRegionSection now = *section, remain = *section;
772
773 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
774 || (now.size < TARGET_PAGE_SIZE)) {
775 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
776 - now.offset_within_address_space,
777 now.size);
ac1970fb 778 register_subpage(d, &now);
0f0cb164
AK
779 remain.size -= now.size;
780 remain.offset_within_address_space += now.size;
781 remain.offset_within_region += now.size;
782 }
69b67646
TH
783 while (remain.size >= TARGET_PAGE_SIZE) {
784 now = remain;
785 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
786 now.size = TARGET_PAGE_SIZE;
ac1970fb 787 register_subpage(d, &now);
69b67646
TH
788 } else {
789 now.size &= TARGET_PAGE_MASK;
ac1970fb 790 register_multipage(d, &now);
69b67646 791 }
0f0cb164
AK
792 remain.size -= now.size;
793 remain.offset_within_address_space += now.size;
794 remain.offset_within_region += now.size;
795 }
796 now = remain;
797 if (now.size) {
ac1970fb 798 register_subpage(d, &now);
0f0cb164
AK
799 }
800}
801
62a2744c
SY
802void qemu_flush_coalesced_mmio_buffer(void)
803{
804 if (kvm_enabled())
805 kvm_flush_coalesced_mmio_buffer();
806}
807
b2a8658e
UD
808void qemu_mutex_lock_ramlist(void)
809{
810 qemu_mutex_lock(&ram_list.mutex);
811}
812
813void qemu_mutex_unlock_ramlist(void)
814{
815 qemu_mutex_unlock(&ram_list.mutex);
816}
817
c902760f
MT
818#if defined(__linux__) && !defined(TARGET_S390X)
819
820#include <sys/vfs.h>
821
822#define HUGETLBFS_MAGIC 0x958458f6
823
824static long gethugepagesize(const char *path)
825{
826 struct statfs fs;
827 int ret;
828
829 do {
9742bf26 830 ret = statfs(path, &fs);
c902760f
MT
831 } while (ret != 0 && errno == EINTR);
832
833 if (ret != 0) {
9742bf26
YT
834 perror(path);
835 return 0;
c902760f
MT
836 }
837
838 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 839 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
840
841 return fs.f_bsize;
842}
843
04b16653
AW
844static void *file_ram_alloc(RAMBlock *block,
845 ram_addr_t memory,
846 const char *path)
c902760f
MT
847{
848 char *filename;
8ca761f6
PF
849 char *sanitized_name;
850 char *c;
c902760f
MT
851 void *area;
852 int fd;
853#ifdef MAP_POPULATE
854 int flags;
855#endif
856 unsigned long hpagesize;
857
858 hpagesize = gethugepagesize(path);
859 if (!hpagesize) {
9742bf26 860 return NULL;
c902760f
MT
861 }
862
863 if (memory < hpagesize) {
864 return NULL;
865 }
866
867 if (kvm_enabled() && !kvm_has_sync_mmu()) {
868 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
869 return NULL;
870 }
871
8ca761f6
PF
872 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
873 sanitized_name = g_strdup(block->mr->name);
874 for (c = sanitized_name; *c != '\0'; c++) {
875 if (*c == '/')
876 *c = '_';
877 }
878
879 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
880 sanitized_name);
881 g_free(sanitized_name);
c902760f
MT
882
883 fd = mkstemp(filename);
884 if (fd < 0) {
9742bf26 885 perror("unable to create backing store for hugepages");
e4ada482 886 g_free(filename);
9742bf26 887 return NULL;
c902760f
MT
888 }
889 unlink(filename);
e4ada482 890 g_free(filename);
c902760f
MT
891
892 memory = (memory+hpagesize-1) & ~(hpagesize-1);
893
894 /*
895 * ftruncate is not supported by hugetlbfs in older
896 * hosts, so don't bother bailing out on errors.
897 * If anything goes wrong with it under other filesystems,
898 * mmap will fail.
899 */
900 if (ftruncate(fd, memory))
9742bf26 901 perror("ftruncate");
c902760f
MT
902
903#ifdef MAP_POPULATE
904 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
905 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
906 * to sidestep this quirk.
907 */
908 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
909 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
910#else
911 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
912#endif
913 if (area == MAP_FAILED) {
9742bf26
YT
914 perror("file_ram_alloc: can't mmap RAM pages");
915 close(fd);
916 return (NULL);
c902760f 917 }
04b16653 918 block->fd = fd;
c902760f
MT
919 return area;
920}
921#endif
922
d17b5288 923static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
924{
925 RAMBlock *block, *next_block;
3e837b2c 926 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 927
a3161038 928 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
929 return 0;
930
a3161038 931 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 932 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
933
934 end = block->offset + block->length;
935
a3161038 936 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
937 if (next_block->offset >= end) {
938 next = MIN(next, next_block->offset);
939 }
940 }
941 if (next - end >= size && next - end < mingap) {
3e837b2c 942 offset = end;
04b16653
AW
943 mingap = next - end;
944 }
945 }
3e837b2c
AW
946
947 if (offset == RAM_ADDR_MAX) {
948 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
949 (uint64_t)size);
950 abort();
951 }
952
04b16653
AW
953 return offset;
954}
955
652d7ec2 956ram_addr_t last_ram_offset(void)
d17b5288
AW
957{
958 RAMBlock *block;
959 ram_addr_t last = 0;
960
a3161038 961 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
962 last = MAX(last, block->offset + block->length);
963
964 return last;
965}
966
ddb97f1d
JB
967static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
968{
969 int ret;
970 QemuOpts *machine_opts;
971
972 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
973 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
974 if (machine_opts &&
975 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
976 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
977 if (ret) {
978 perror("qemu_madvise");
979 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
980 "but dump_guest_core=off specified\n");
981 }
982 }
983}
984
c5705a77 985void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
986{
987 RAMBlock *new_block, *block;
988
c5705a77 989 new_block = NULL;
a3161038 990 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
991 if (block->offset == addr) {
992 new_block = block;
993 break;
994 }
995 }
996 assert(new_block);
997 assert(!new_block->idstr[0]);
84b89d78 998
09e5ab63
AL
999 if (dev) {
1000 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1001 if (id) {
1002 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1003 g_free(id);
84b89d78
CM
1004 }
1005 }
1006 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1007
b2a8658e
UD
1008 /* This assumes the iothread lock is taken here too. */
1009 qemu_mutex_lock_ramlist();
a3161038 1010 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1011 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1012 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1013 new_block->idstr);
1014 abort();
1015 }
1016 }
b2a8658e 1017 qemu_mutex_unlock_ramlist();
c5705a77
AK
1018}
1019
8490fc78
LC
1020static int memory_try_enable_merging(void *addr, size_t len)
1021{
1022 QemuOpts *opts;
1023
1024 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1025 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1026 /* disabled by the user */
1027 return 0;
1028 }
1029
1030 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1031}
1032
c5705a77
AK
1033ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1034 MemoryRegion *mr)
1035{
abb26d63 1036 RAMBlock *block, *new_block;
c5705a77
AK
1037
1038 size = TARGET_PAGE_ALIGN(size);
1039 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1040
b2a8658e
UD
1041 /* This assumes the iothread lock is taken here too. */
1042 qemu_mutex_lock_ramlist();
7c637366 1043 new_block->mr = mr;
432d268c 1044 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1045 if (host) {
1046 new_block->host = host;
cd19cfa2 1047 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1048 } else {
1049 if (mem_path) {
c902760f 1050#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1051 new_block->host = file_ram_alloc(new_block, size, mem_path);
1052 if (!new_block->host) {
1053 new_block->host = qemu_vmalloc(size);
8490fc78 1054 memory_try_enable_merging(new_block->host, size);
6977dfe6 1055 }
c902760f 1056#else
6977dfe6
YT
1057 fprintf(stderr, "-mem-path option unsupported\n");
1058 exit(1);
c902760f 1059#endif
6977dfe6 1060 } else {
868bb33f 1061 if (xen_enabled()) {
fce537d4 1062 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1063 } else if (kvm_enabled()) {
1064 /* some s390/kvm configurations have special constraints */
1065 new_block->host = kvm_vmalloc(size);
432d268c
JN
1066 } else {
1067 new_block->host = qemu_vmalloc(size);
1068 }
8490fc78 1069 memory_try_enable_merging(new_block->host, size);
6977dfe6 1070 }
c902760f 1071 }
94a6b54f
PB
1072 new_block->length = size;
1073
abb26d63
PB
1074 /* Keep the list sorted from biggest to smallest block. */
1075 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1076 if (block->length < new_block->length) {
1077 break;
1078 }
1079 }
1080 if (block) {
1081 QTAILQ_INSERT_BEFORE(block, new_block, next);
1082 } else {
1083 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1084 }
0d6d3c87 1085 ram_list.mru_block = NULL;
94a6b54f 1086
f798b07f 1087 ram_list.version++;
b2a8658e 1088 qemu_mutex_unlock_ramlist();
f798b07f 1089
7267c094 1090 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1091 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1092 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1093 0, size >> TARGET_PAGE_BITS);
1720aeee 1094 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1095
ddb97f1d 1096 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1097 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1098
6f0437e8
JK
1099 if (kvm_enabled())
1100 kvm_setup_guest_memory(new_block->host, size);
1101
94a6b54f
PB
1102 return new_block->offset;
1103}
e9a1ab19 1104
c5705a77 1105ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1106{
c5705a77 1107 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1108}
1109
1f2e98b6
AW
1110void qemu_ram_free_from_ptr(ram_addr_t addr)
1111{
1112 RAMBlock *block;
1113
b2a8658e
UD
1114 /* This assumes the iothread lock is taken here too. */
1115 qemu_mutex_lock_ramlist();
a3161038 1116 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1117 if (addr == block->offset) {
a3161038 1118 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1119 ram_list.mru_block = NULL;
f798b07f 1120 ram_list.version++;
7267c094 1121 g_free(block);
b2a8658e 1122 break;
1f2e98b6
AW
1123 }
1124 }
b2a8658e 1125 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1126}
1127
c227f099 1128void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1129{
04b16653
AW
1130 RAMBlock *block;
1131
b2a8658e
UD
1132 /* This assumes the iothread lock is taken here too. */
1133 qemu_mutex_lock_ramlist();
a3161038 1134 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1135 if (addr == block->offset) {
a3161038 1136 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1137 ram_list.mru_block = NULL;
f798b07f 1138 ram_list.version++;
cd19cfa2
HY
1139 if (block->flags & RAM_PREALLOC_MASK) {
1140 ;
1141 } else if (mem_path) {
04b16653
AW
1142#if defined (__linux__) && !defined(TARGET_S390X)
1143 if (block->fd) {
1144 munmap(block->host, block->length);
1145 close(block->fd);
1146 } else {
1147 qemu_vfree(block->host);
1148 }
fd28aa13
JK
1149#else
1150 abort();
04b16653
AW
1151#endif
1152 } else {
1153#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1154 munmap(block->host, block->length);
1155#else
868bb33f 1156 if (xen_enabled()) {
e41d7c69 1157 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
1158 } else {
1159 qemu_vfree(block->host);
1160 }
04b16653
AW
1161#endif
1162 }
7267c094 1163 g_free(block);
b2a8658e 1164 break;
04b16653
AW
1165 }
1166 }
b2a8658e 1167 qemu_mutex_unlock_ramlist();
04b16653 1168
e9a1ab19
FB
1169}
1170
cd19cfa2
HY
1171#ifndef _WIN32
1172void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1173{
1174 RAMBlock *block;
1175 ram_addr_t offset;
1176 int flags;
1177 void *area, *vaddr;
1178
a3161038 1179 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1180 offset = addr - block->offset;
1181 if (offset < block->length) {
1182 vaddr = block->host + offset;
1183 if (block->flags & RAM_PREALLOC_MASK) {
1184 ;
1185 } else {
1186 flags = MAP_FIXED;
1187 munmap(vaddr, length);
1188 if (mem_path) {
1189#if defined(__linux__) && !defined(TARGET_S390X)
1190 if (block->fd) {
1191#ifdef MAP_POPULATE
1192 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1193 MAP_PRIVATE;
1194#else
1195 flags |= MAP_PRIVATE;
1196#endif
1197 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1198 flags, block->fd, offset);
1199 } else {
1200 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1201 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1202 flags, -1, 0);
1203 }
fd28aa13
JK
1204#else
1205 abort();
cd19cfa2
HY
1206#endif
1207 } else {
1208#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1209 flags |= MAP_SHARED | MAP_ANONYMOUS;
1210 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1211 flags, -1, 0);
1212#else
1213 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1214 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1215 flags, -1, 0);
1216#endif
1217 }
1218 if (area != vaddr) {
f15fbc4b
AP
1219 fprintf(stderr, "Could not remap addr: "
1220 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1221 length, addr);
1222 exit(1);
1223 }
8490fc78 1224 memory_try_enable_merging(vaddr, length);
ddb97f1d 1225 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1226 }
1227 return;
1228 }
1229 }
1230}
1231#endif /* !_WIN32 */
1232
dc828ca1 1233/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
1234 With the exception of the softmmu code in this file, this should
1235 only be used for local memory (e.g. video ram) that the device owns,
1236 and knows it isn't going to access beyond the end of the block.
1237
1238 It should not be used for general purpose DMA.
1239 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1240 */
c227f099 1241void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 1242{
94a6b54f
PB
1243 RAMBlock *block;
1244
b2a8658e 1245 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1246 block = ram_list.mru_block;
1247 if (block && addr - block->offset < block->length) {
1248 goto found;
1249 }
a3161038 1250 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1251 if (addr - block->offset < block->length) {
0d6d3c87 1252 goto found;
f471a17e 1253 }
94a6b54f 1254 }
f471a17e
AW
1255
1256 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1257 abort();
1258
0d6d3c87
PB
1259found:
1260 ram_list.mru_block = block;
1261 if (xen_enabled()) {
1262 /* We need to check if the requested address is in the RAM
1263 * because we don't want to map the entire memory in QEMU.
1264 * In that case just map until the end of the page.
1265 */
1266 if (block->offset == 0) {
1267 return xen_map_cache(addr, 0, 0);
1268 } else if (block->host == NULL) {
1269 block->host =
1270 xen_map_cache(block->offset, block->length, 1);
1271 }
1272 }
1273 return block->host + (addr - block->offset);
dc828ca1
PB
1274}
1275
0d6d3c87
PB
1276/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1277 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1278 *
1279 * ??? Is this still necessary?
b2e0a138 1280 */
8b9c99d9 1281static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1282{
1283 RAMBlock *block;
1284
b2a8658e 1285 /* The list is protected by the iothread lock here. */
a3161038 1286 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1287 if (addr - block->offset < block->length) {
868bb33f 1288 if (xen_enabled()) {
432d268c
JN
1289 /* We need to check if the requested address is in the RAM
1290 * because we don't want to map the entire memory in QEMU.
712c2b41 1291 * In that case just map until the end of the page.
432d268c
JN
1292 */
1293 if (block->offset == 0) {
e41d7c69 1294 return xen_map_cache(addr, 0, 0);
432d268c 1295 } else if (block->host == NULL) {
e41d7c69
JK
1296 block->host =
1297 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1298 }
1299 }
b2e0a138
MT
1300 return block->host + (addr - block->offset);
1301 }
1302 }
1303
1304 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1305 abort();
1306
1307 return NULL;
1308}
1309
38bee5dc
SS
1310/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1311 * but takes a size argument */
8b9c99d9 1312static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1313{
8ab934f9
SS
1314 if (*size == 0) {
1315 return NULL;
1316 }
868bb33f 1317 if (xen_enabled()) {
e41d7c69 1318 return xen_map_cache(addr, *size, 1);
868bb33f 1319 } else {
38bee5dc
SS
1320 RAMBlock *block;
1321
a3161038 1322 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1323 if (addr - block->offset < block->length) {
1324 if (addr - block->offset + *size > block->length)
1325 *size = block->length - addr + block->offset;
1326 return block->host + (addr - block->offset);
1327 }
1328 }
1329
1330 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1331 abort();
38bee5dc
SS
1332 }
1333}
1334
050a0ddf
AP
1335void qemu_put_ram_ptr(void *addr)
1336{
1337 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
1338}
1339
e890261f 1340int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1341{
94a6b54f
PB
1342 RAMBlock *block;
1343 uint8_t *host = ptr;
1344
868bb33f 1345 if (xen_enabled()) {
e41d7c69 1346 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
1347 return 0;
1348 }
1349
a3161038 1350 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1351 /* This case append when the block is not mapped. */
1352 if (block->host == NULL) {
1353 continue;
1354 }
f471a17e 1355 if (host - block->host < block->length) {
e890261f
MT
1356 *ram_addr = block->offset + (host - block->host);
1357 return 0;
f471a17e 1358 }
94a6b54f 1359 }
432d268c 1360
e890261f
MT
1361 return -1;
1362}
f471a17e 1363
e890261f
MT
1364/* Some of the softmmu routines need to translate from a host pointer
1365 (typically a TLB entry) back to a ram offset. */
1366ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1367{
1368 ram_addr_t ram_addr;
f471a17e 1369
e890261f
MT
1370 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1371 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1372 abort();
1373 }
1374 return ram_addr;
5579c7f3
PB
1375}
1376
a8170e5e 1377static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1378 unsigned size)
e18231a3
BS
1379{
1380#ifdef DEBUG_UNASSIGNED
1381 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1382#endif
5b450407 1383#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1384 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
1385#endif
1386 return 0;
1387}
1388
a8170e5e 1389static void unassigned_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1390 uint64_t val, unsigned size)
e18231a3
BS
1391{
1392#ifdef DEBUG_UNASSIGNED
0e0df1e2 1393 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 1394#endif
5b450407 1395#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1396 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 1397#endif
33417e70
FB
1398}
1399
0e0df1e2
AK
1400static const MemoryRegionOps unassigned_mem_ops = {
1401 .read = unassigned_mem_read,
1402 .write = unassigned_mem_write,
1403 .endianness = DEVICE_NATIVE_ENDIAN,
1404};
e18231a3 1405
a8170e5e 1406static uint64_t error_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1407 unsigned size)
e18231a3 1408{
0e0df1e2 1409 abort();
e18231a3
BS
1410}
1411
a8170e5e 1412static void error_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1413 uint64_t value, unsigned size)
e18231a3 1414{
0e0df1e2 1415 abort();
33417e70
FB
1416}
1417
0e0df1e2
AK
1418static const MemoryRegionOps error_mem_ops = {
1419 .read = error_mem_read,
1420 .write = error_mem_write,
1421 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1422};
1423
0e0df1e2
AK
1424static const MemoryRegionOps rom_mem_ops = {
1425 .read = error_mem_read,
1426 .write = unassigned_mem_write,
1427 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1428};
1429
a8170e5e 1430static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1431 uint64_t val, unsigned size)
9fa3e853 1432{
3a7d929e 1433 int dirty_flags;
f7c11b53 1434 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1435 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1436#if !defined(CONFIG_USER_ONLY)
0e0df1e2 1437 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1438 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 1439#endif
3a7d929e 1440 }
0e0df1e2
AK
1441 switch (size) {
1442 case 1:
1443 stb_p(qemu_get_ram_ptr(ram_addr), val);
1444 break;
1445 case 2:
1446 stw_p(qemu_get_ram_ptr(ram_addr), val);
1447 break;
1448 case 4:
1449 stl_p(qemu_get_ram_ptr(ram_addr), val);
1450 break;
1451 default:
1452 abort();
3a7d929e 1453 }
f23db169 1454 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1455 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1456 /* we remove the notdirty callback only if the code has been
1457 flushed */
1458 if (dirty_flags == 0xff)
2e70f6ef 1459 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1460}
1461
0e0df1e2
AK
1462static const MemoryRegionOps notdirty_mem_ops = {
1463 .read = error_mem_read,
1464 .write = notdirty_mem_write,
1465 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1466};
1467
0f459d16 1468/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1469static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1470{
9349b4f9 1471 CPUArchState *env = cpu_single_env;
06d55cc1 1472 target_ulong pc, cs_base;
0f459d16 1473 target_ulong vaddr;
a1d1bb31 1474 CPUWatchpoint *wp;
06d55cc1 1475 int cpu_flags;
0f459d16 1476
06d55cc1
AL
1477 if (env->watchpoint_hit) {
1478 /* We re-entered the check after replacing the TB. Now raise
1479 * the debug interrupt so that is will trigger after the
1480 * current instruction. */
c3affe56 1481 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1482 return;
1483 }
2e70f6ef 1484 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1485 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1486 if ((vaddr == (wp->vaddr & len_mask) ||
1487 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1488 wp->flags |= BP_WATCHPOINT_HIT;
1489 if (!env->watchpoint_hit) {
1490 env->watchpoint_hit = wp;
5a316526 1491 tb_check_watchpoint(env);
6e140f28
AL
1492 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1493 env->exception_index = EXCP_DEBUG;
488d6577 1494 cpu_loop_exit(env);
6e140f28
AL
1495 } else {
1496 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1497 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1498 cpu_resume_from_signal(env, NULL);
6e140f28 1499 }
06d55cc1 1500 }
6e140f28
AL
1501 } else {
1502 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1503 }
1504 }
1505}
1506
6658ffb8
PB
1507/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1508 so these check for a hit then pass through to the normal out-of-line
1509 phys routines. */
a8170e5e 1510static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1511 unsigned size)
6658ffb8 1512{
1ec9b909
AK
1513 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1514 switch (size) {
1515 case 1: return ldub_phys(addr);
1516 case 2: return lduw_phys(addr);
1517 case 4: return ldl_phys(addr);
1518 default: abort();
1519 }
6658ffb8
PB
1520}
1521
a8170e5e 1522static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1523 uint64_t val, unsigned size)
6658ffb8 1524{
1ec9b909
AK
1525 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1526 switch (size) {
67364150
MF
1527 case 1:
1528 stb_phys(addr, val);
1529 break;
1530 case 2:
1531 stw_phys(addr, val);
1532 break;
1533 case 4:
1534 stl_phys(addr, val);
1535 break;
1ec9b909
AK
1536 default: abort();
1537 }
6658ffb8
PB
1538}
1539
1ec9b909
AK
1540static const MemoryRegionOps watch_mem_ops = {
1541 .read = watch_mem_read,
1542 .write = watch_mem_write,
1543 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1544};
6658ffb8 1545
a8170e5e 1546static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1547 unsigned len)
db7b5426 1548{
70c68e44 1549 subpage_t *mmio = opaque;
f6405247 1550 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1551 MemoryRegionSection *section;
db7b5426
BS
1552#if defined(DEBUG_SUBPAGE)
1553 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1554 mmio, len, addr, idx);
1555#endif
db7b5426 1556
5312bd8b
AK
1557 section = &phys_sections[mmio->sub_section[idx]];
1558 addr += mmio->base;
1559 addr -= section->offset_within_address_space;
1560 addr += section->offset_within_region;
37ec01d4 1561 return io_mem_read(section->mr, addr, len);
db7b5426
BS
1562}
1563
a8170e5e 1564static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1565 uint64_t value, unsigned len)
db7b5426 1566{
70c68e44 1567 subpage_t *mmio = opaque;
f6405247 1568 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1569 MemoryRegionSection *section;
db7b5426 1570#if defined(DEBUG_SUBPAGE)
70c68e44
AK
1571 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1572 " idx %d value %"PRIx64"\n",
f6405247 1573 __func__, mmio, len, addr, idx, value);
db7b5426 1574#endif
f6405247 1575
5312bd8b
AK
1576 section = &phys_sections[mmio->sub_section[idx]];
1577 addr += mmio->base;
1578 addr -= section->offset_within_address_space;
1579 addr += section->offset_within_region;
37ec01d4 1580 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
1581}
1582
70c68e44
AK
1583static const MemoryRegionOps subpage_ops = {
1584 .read = subpage_read,
1585 .write = subpage_write,
1586 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1587};
1588
a8170e5e 1589static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
de712f94 1590 unsigned size)
56384e8b
AF
1591{
1592 ram_addr_t raddr = addr;
1593 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1594 switch (size) {
1595 case 1: return ldub_p(ptr);
1596 case 2: return lduw_p(ptr);
1597 case 4: return ldl_p(ptr);
1598 default: abort();
1599 }
56384e8b
AF
1600}
1601
a8170e5e 1602static void subpage_ram_write(void *opaque, hwaddr addr,
de712f94 1603 uint64_t value, unsigned size)
56384e8b
AF
1604{
1605 ram_addr_t raddr = addr;
1606 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1607 switch (size) {
1608 case 1: return stb_p(ptr, value);
1609 case 2: return stw_p(ptr, value);
1610 case 4: return stl_p(ptr, value);
1611 default: abort();
1612 }
56384e8b
AF
1613}
1614
de712f94
AK
1615static const MemoryRegionOps subpage_ram_ops = {
1616 .read = subpage_ram_read,
1617 .write = subpage_ram_write,
1618 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
1619};
1620
c227f099 1621static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1622 uint16_t section)
db7b5426
BS
1623{
1624 int idx, eidx;
1625
1626 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1627 return -1;
1628 idx = SUBPAGE_IDX(start);
1629 eidx = SUBPAGE_IDX(end);
1630#if defined(DEBUG_SUBPAGE)
0bf9e31a 1631 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1632 mmio, start, end, idx, eidx, memory);
1633#endif
5312bd8b
AK
1634 if (memory_region_is_ram(phys_sections[section].mr)) {
1635 MemoryRegionSection new_section = phys_sections[section];
1636 new_section.mr = &io_mem_subpage_ram;
1637 section = phys_section_add(&new_section);
56384e8b 1638 }
db7b5426 1639 for (; idx <= eidx; idx++) {
5312bd8b 1640 mmio->sub_section[idx] = section;
db7b5426
BS
1641 }
1642
1643 return 0;
1644}
1645
a8170e5e 1646static subpage_t *subpage_init(hwaddr base)
db7b5426 1647{
c227f099 1648 subpage_t *mmio;
db7b5426 1649
7267c094 1650 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
1651
1652 mmio->base = base;
70c68e44
AK
1653 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1654 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1655 mmio->iomem.subpage = true;
db7b5426 1656#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1657 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1658 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1659#endif
0f0cb164 1660 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
1661
1662 return mmio;
1663}
1664
5312bd8b
AK
1665static uint16_t dummy_section(MemoryRegion *mr)
1666{
1667 MemoryRegionSection section = {
1668 .mr = mr,
1669 .offset_within_address_space = 0,
1670 .offset_within_region = 0,
1671 .size = UINT64_MAX,
1672 };
1673
1674 return phys_section_add(&section);
1675}
1676
a8170e5e 1677MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1678{
37ec01d4 1679 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1680}
1681
e9179ce1
AK
1682static void io_mem_init(void)
1683{
0e0df1e2 1684 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
1685 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1686 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1687 "unassigned", UINT64_MAX);
1688 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1689 "notdirty", UINT64_MAX);
de712f94
AK
1690 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1691 "subpage-ram", UINT64_MAX);
1ec9b909
AK
1692 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1693 "watch", UINT64_MAX);
e9179ce1
AK
1694}
1695
ac1970fb
AK
1696static void mem_begin(MemoryListener *listener)
1697{
1698 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1699
1700 destroy_all_mappings(d);
1701 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1702}
1703
50c1e149
AK
1704static void core_begin(MemoryListener *listener)
1705{
5312bd8b
AK
1706 phys_sections_clear();
1707 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
1708 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1709 phys_section_rom = dummy_section(&io_mem_rom);
1710 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
1711}
1712
1d71148e 1713static void tcg_commit(MemoryListener *listener)
50c1e149 1714{
9349b4f9 1715 CPUArchState *env;
117712c3
AK
1716
1717 /* since each CPU stores ram addresses in its TLB cache, we must
1718 reset the modified entries */
1719 /* XXX: slow ! */
1720 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1721 tlb_flush(env, 1);
1722 }
50c1e149
AK
1723}
1724
93632747
AK
1725static void core_log_global_start(MemoryListener *listener)
1726{
1727 cpu_physical_memory_set_dirty_tracking(1);
1728}
1729
1730static void core_log_global_stop(MemoryListener *listener)
1731{
1732 cpu_physical_memory_set_dirty_tracking(0);
1733}
1734
4855d41a
AK
1735static void io_region_add(MemoryListener *listener,
1736 MemoryRegionSection *section)
1737{
a2d33521
AK
1738 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1739
1740 mrio->mr = section->mr;
1741 mrio->offset = section->offset_within_region;
1742 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 1743 section->offset_within_address_space, section->size);
a2d33521 1744 ioport_register(&mrio->iorange);
4855d41a
AK
1745}
1746
1747static void io_region_del(MemoryListener *listener,
1748 MemoryRegionSection *section)
1749{
1750 isa_unassign_ioport(section->offset_within_address_space, section->size);
1751}
1752
93632747 1753static MemoryListener core_memory_listener = {
50c1e149 1754 .begin = core_begin,
93632747
AK
1755 .log_global_start = core_log_global_start,
1756 .log_global_stop = core_log_global_stop,
ac1970fb 1757 .priority = 1,
93632747
AK
1758};
1759
4855d41a
AK
1760static MemoryListener io_memory_listener = {
1761 .region_add = io_region_add,
1762 .region_del = io_region_del,
4855d41a
AK
1763 .priority = 0,
1764};
1765
1d71148e
AK
1766static MemoryListener tcg_memory_listener = {
1767 .commit = tcg_commit,
1768};
1769
ac1970fb
AK
1770void address_space_init_dispatch(AddressSpace *as)
1771{
1772 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1773
1774 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1775 d->listener = (MemoryListener) {
1776 .begin = mem_begin,
1777 .region_add = mem_add,
1778 .region_nop = mem_add,
1779 .priority = 0,
1780 };
1781 as->dispatch = d;
1782 memory_listener_register(&d->listener, as);
1783}
1784
83f3c251
AK
1785void address_space_destroy_dispatch(AddressSpace *as)
1786{
1787 AddressSpaceDispatch *d = as->dispatch;
1788
1789 memory_listener_unregister(&d->listener);
1790 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1791 g_free(d);
1792 as->dispatch = NULL;
1793}
1794
62152b8a
AK
1795static void memory_map_init(void)
1796{
7267c094 1797 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 1798 memory_region_init(system_memory, "system", INT64_MAX);
2673a5da
AK
1799 address_space_init(&address_space_memory, system_memory);
1800 address_space_memory.name = "memory";
309cb471 1801
7267c094 1802 system_io = g_malloc(sizeof(*system_io));
309cb471 1803 memory_region_init(system_io, "io", 65536);
2673a5da
AK
1804 address_space_init(&address_space_io, system_io);
1805 address_space_io.name = "I/O";
93632747 1806
f6790af6
AK
1807 memory_listener_register(&core_memory_listener, &address_space_memory);
1808 memory_listener_register(&io_memory_listener, &address_space_io);
1809 memory_listener_register(&tcg_memory_listener, &address_space_memory);
9e11908f
PM
1810
1811 dma_context_init(&dma_context_memory, &address_space_memory,
1812 NULL, NULL, NULL);
62152b8a
AK
1813}
1814
1815MemoryRegion *get_system_memory(void)
1816{
1817 return system_memory;
1818}
1819
309cb471
AK
1820MemoryRegion *get_system_io(void)
1821{
1822 return system_io;
1823}
1824
e2eef170
PB
1825#endif /* !defined(CONFIG_USER_ONLY) */
1826
13eb76e0
FB
1827/* physical memory access (slow version, mainly for debug) */
1828#if defined(CONFIG_USER_ONLY)
9349b4f9 1829int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1830 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1831{
1832 int l, flags;
1833 target_ulong page;
53a5960a 1834 void * p;
13eb76e0
FB
1835
1836 while (len > 0) {
1837 page = addr & TARGET_PAGE_MASK;
1838 l = (page + TARGET_PAGE_SIZE) - addr;
1839 if (l > len)
1840 l = len;
1841 flags = page_get_flags(page);
1842 if (!(flags & PAGE_VALID))
a68fe89c 1843 return -1;
13eb76e0
FB
1844 if (is_write) {
1845 if (!(flags & PAGE_WRITE))
a68fe89c 1846 return -1;
579a97f7 1847 /* XXX: this code should not depend on lock_user */
72fb7daa 1848 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1849 return -1;
72fb7daa
AJ
1850 memcpy(p, buf, l);
1851 unlock_user(p, addr, l);
13eb76e0
FB
1852 } else {
1853 if (!(flags & PAGE_READ))
a68fe89c 1854 return -1;
579a97f7 1855 /* XXX: this code should not depend on lock_user */
72fb7daa 1856 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1857 return -1;
72fb7daa 1858 memcpy(buf, p, l);
5b257578 1859 unlock_user(p, addr, 0);
13eb76e0
FB
1860 }
1861 len -= l;
1862 buf += l;
1863 addr += l;
1864 }
a68fe89c 1865 return 0;
13eb76e0 1866}
8df1cd07 1867
13eb76e0 1868#else
51d7a9eb 1869
a8170e5e
AK
1870static void invalidate_and_set_dirty(hwaddr addr,
1871 hwaddr length)
51d7a9eb
AP
1872{
1873 if (!cpu_physical_memory_is_dirty(addr)) {
1874 /* invalidate code */
1875 tb_invalidate_phys_page_range(addr, addr + length, 0);
1876 /* set dirty bit */
1877 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1878 }
e226939d 1879 xen_modified_memory(addr, length);
51d7a9eb
AP
1880}
1881
a8170e5e 1882void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1883 int len, bool is_write)
13eb76e0 1884{
ac1970fb 1885 AddressSpaceDispatch *d = as->dispatch;
37ec01d4 1886 int l;
13eb76e0
FB
1887 uint8_t *ptr;
1888 uint32_t val;
a8170e5e 1889 hwaddr page;
f3705d53 1890 MemoryRegionSection *section;
3b46e624 1891
13eb76e0
FB
1892 while (len > 0) {
1893 page = addr & TARGET_PAGE_MASK;
1894 l = (page + TARGET_PAGE_SIZE) - addr;
1895 if (l > len)
1896 l = len;
ac1970fb 1897 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1898
13eb76e0 1899 if (is_write) {
f3705d53 1900 if (!memory_region_is_ram(section->mr)) {
a8170e5e 1901 hwaddr addr1;
cc5bea60 1902 addr1 = memory_region_section_addr(section, addr);
6a00d601
FB
1903 /* XXX: could force cpu_single_env to NULL to avoid
1904 potential bugs */
6c2934db 1905 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 1906 /* 32 bit write access */
c27004ec 1907 val = ldl_p(buf);
37ec01d4 1908 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 1909 l = 4;
6c2934db 1910 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 1911 /* 16 bit write access */
c27004ec 1912 val = lduw_p(buf);
37ec01d4 1913 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
1914 l = 2;
1915 } else {
1c213d19 1916 /* 8 bit write access */
c27004ec 1917 val = ldub_p(buf);
37ec01d4 1918 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
1919 l = 1;
1920 }
f3705d53 1921 } else if (!section->readonly) {
8ca5692d 1922 ram_addr_t addr1;
f3705d53 1923 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 1924 + memory_region_section_addr(section, addr);
13eb76e0 1925 /* RAM case */
5579c7f3 1926 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1927 memcpy(ptr, buf, l);
51d7a9eb 1928 invalidate_and_set_dirty(addr1, l);
050a0ddf 1929 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1930 }
1931 } else {
cc5bea60
BS
1932 if (!(memory_region_is_ram(section->mr) ||
1933 memory_region_is_romd(section->mr))) {
a8170e5e 1934 hwaddr addr1;
13eb76e0 1935 /* I/O case */
cc5bea60 1936 addr1 = memory_region_section_addr(section, addr);
6c2934db 1937 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 1938 /* 32 bit read access */
37ec01d4 1939 val = io_mem_read(section->mr, addr1, 4);
c27004ec 1940 stl_p(buf, val);
13eb76e0 1941 l = 4;
6c2934db 1942 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 1943 /* 16 bit read access */
37ec01d4 1944 val = io_mem_read(section->mr, addr1, 2);
c27004ec 1945 stw_p(buf, val);
13eb76e0
FB
1946 l = 2;
1947 } else {
1c213d19 1948 /* 8 bit read access */
37ec01d4 1949 val = io_mem_read(section->mr, addr1, 1);
c27004ec 1950 stb_p(buf, val);
13eb76e0
FB
1951 l = 1;
1952 }
1953 } else {
1954 /* RAM case */
0a1b357f 1955 ptr = qemu_get_ram_ptr(section->mr->ram_addr
cc5bea60
BS
1956 + memory_region_section_addr(section,
1957 addr));
f3705d53 1958 memcpy(buf, ptr, l);
050a0ddf 1959 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1960 }
1961 }
1962 len -= l;
1963 buf += l;
1964 addr += l;
1965 }
1966}
8df1cd07 1967
a8170e5e 1968void address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1969 const uint8_t *buf, int len)
1970{
1971 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1972}
1973
1974/**
1975 * address_space_read: read from an address space.
1976 *
1977 * @as: #AddressSpace to be accessed
1978 * @addr: address within that address space
1979 * @buf: buffer with the data transferred
1980 */
a8170e5e 1981void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb
AK
1982{
1983 address_space_rw(as, addr, buf, len, false);
1984}
1985
1986
a8170e5e 1987void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1988 int len, int is_write)
1989{
1990 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1991}
1992
d0ecd2aa 1993/* used for ROM loading : can write in RAM and ROM */
a8170e5e 1994void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
1995 const uint8_t *buf, int len)
1996{
ac1970fb 1997 AddressSpaceDispatch *d = address_space_memory.dispatch;
d0ecd2aa
FB
1998 int l;
1999 uint8_t *ptr;
a8170e5e 2000 hwaddr page;
f3705d53 2001 MemoryRegionSection *section;
3b46e624 2002
d0ecd2aa
FB
2003 while (len > 0) {
2004 page = addr & TARGET_PAGE_MASK;
2005 l = (page + TARGET_PAGE_SIZE) - addr;
2006 if (l > len)
2007 l = len;
ac1970fb 2008 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 2009
cc5bea60
BS
2010 if (!(memory_region_is_ram(section->mr) ||
2011 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
2012 /* do nothing */
2013 } else {
2014 unsigned long addr1;
f3705d53 2015 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 2016 + memory_region_section_addr(section, addr);
d0ecd2aa 2017 /* ROM/RAM case */
5579c7f3 2018 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2019 memcpy(ptr, buf, l);
51d7a9eb 2020 invalidate_and_set_dirty(addr1, l);
050a0ddf 2021 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
2022 }
2023 len -= l;
2024 buf += l;
2025 addr += l;
2026 }
2027}
2028
6d16c2f8
AL
2029typedef struct {
2030 void *buffer;
a8170e5e
AK
2031 hwaddr addr;
2032 hwaddr len;
6d16c2f8
AL
2033} BounceBuffer;
2034
2035static BounceBuffer bounce;
2036
ba223c29
AL
2037typedef struct MapClient {
2038 void *opaque;
2039 void (*callback)(void *opaque);
72cf2d4f 2040 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2041} MapClient;
2042
72cf2d4f
BS
2043static QLIST_HEAD(map_client_list, MapClient) map_client_list
2044 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2045
2046void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2047{
7267c094 2048 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2049
2050 client->opaque = opaque;
2051 client->callback = callback;
72cf2d4f 2052 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2053 return client;
2054}
2055
8b9c99d9 2056static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2057{
2058 MapClient *client = (MapClient *)_client;
2059
72cf2d4f 2060 QLIST_REMOVE(client, link);
7267c094 2061 g_free(client);
ba223c29
AL
2062}
2063
2064static void cpu_notify_map_clients(void)
2065{
2066 MapClient *client;
2067
72cf2d4f
BS
2068 while (!QLIST_EMPTY(&map_client_list)) {
2069 client = QLIST_FIRST(&map_client_list);
ba223c29 2070 client->callback(client->opaque);
34d5e948 2071 cpu_unregister_map_client(client);
ba223c29
AL
2072 }
2073}
2074
6d16c2f8
AL
2075/* Map a physical memory region into a host virtual address.
2076 * May map a subset of the requested range, given by and returned in *plen.
2077 * May return NULL if resources needed to perform the mapping are exhausted.
2078 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2079 * Use cpu_register_map_client() to know when retrying the map operation is
2080 * likely to succeed.
6d16c2f8 2081 */
ac1970fb 2082void *address_space_map(AddressSpace *as,
a8170e5e
AK
2083 hwaddr addr,
2084 hwaddr *plen,
ac1970fb 2085 bool is_write)
6d16c2f8 2086{
ac1970fb 2087 AddressSpaceDispatch *d = as->dispatch;
a8170e5e
AK
2088 hwaddr len = *plen;
2089 hwaddr todo = 0;
6d16c2f8 2090 int l;
a8170e5e 2091 hwaddr page;
f3705d53 2092 MemoryRegionSection *section;
f15fbc4b 2093 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
2094 ram_addr_t rlen;
2095 void *ret;
6d16c2f8
AL
2096
2097 while (len > 0) {
2098 page = addr & TARGET_PAGE_MASK;
2099 l = (page + TARGET_PAGE_SIZE) - addr;
2100 if (l > len)
2101 l = len;
ac1970fb 2102 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
6d16c2f8 2103
f3705d53 2104 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 2105 if (todo || bounce.buffer) {
6d16c2f8
AL
2106 break;
2107 }
2108 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2109 bounce.addr = addr;
2110 bounce.len = l;
2111 if (!is_write) {
ac1970fb 2112 address_space_read(as, addr, bounce.buffer, l);
6d16c2f8 2113 }
38bee5dc
SS
2114
2115 *plen = l;
2116 return bounce.buffer;
6d16c2f8 2117 }
8ab934f9 2118 if (!todo) {
f3705d53 2119 raddr = memory_region_get_ram_addr(section->mr)
cc5bea60 2120 + memory_region_section_addr(section, addr);
8ab934f9 2121 }
6d16c2f8
AL
2122
2123 len -= l;
2124 addr += l;
38bee5dc 2125 todo += l;
6d16c2f8 2126 }
8ab934f9
SS
2127 rlen = todo;
2128 ret = qemu_ram_ptr_length(raddr, &rlen);
2129 *plen = rlen;
2130 return ret;
6d16c2f8
AL
2131}
2132
ac1970fb 2133/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2134 * Will also mark the memory as dirty if is_write == 1. access_len gives
2135 * the amount of memory that was actually read or written by the caller.
2136 */
a8170e5e
AK
2137void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2138 int is_write, hwaddr access_len)
6d16c2f8
AL
2139{
2140 if (buffer != bounce.buffer) {
2141 if (is_write) {
e890261f 2142 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
2143 while (access_len) {
2144 unsigned l;
2145 l = TARGET_PAGE_SIZE;
2146 if (l > access_len)
2147 l = access_len;
51d7a9eb 2148 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2149 addr1 += l;
2150 access_len -= l;
2151 }
2152 }
868bb33f 2153 if (xen_enabled()) {
e41d7c69 2154 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2155 }
6d16c2f8
AL
2156 return;
2157 }
2158 if (is_write) {
ac1970fb 2159 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2160 }
f8a83245 2161 qemu_vfree(bounce.buffer);
6d16c2f8 2162 bounce.buffer = NULL;
ba223c29 2163 cpu_notify_map_clients();
6d16c2f8 2164}
d0ecd2aa 2165
a8170e5e
AK
2166void *cpu_physical_memory_map(hwaddr addr,
2167 hwaddr *plen,
ac1970fb
AK
2168 int is_write)
2169{
2170 return address_space_map(&address_space_memory, addr, plen, is_write);
2171}
2172
a8170e5e
AK
2173void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2174 int is_write, hwaddr access_len)
ac1970fb
AK
2175{
2176 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2177}
2178
8df1cd07 2179/* warning: addr must be aligned */
a8170e5e 2180static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2181 enum device_endian endian)
8df1cd07 2182{
8df1cd07
FB
2183 uint8_t *ptr;
2184 uint32_t val;
f3705d53 2185 MemoryRegionSection *section;
8df1cd07 2186
ac1970fb 2187 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2188
cc5bea60
BS
2189 if (!(memory_region_is_ram(section->mr) ||
2190 memory_region_is_romd(section->mr))) {
8df1cd07 2191 /* I/O case */
cc5bea60 2192 addr = memory_region_section_addr(section, addr);
37ec01d4 2193 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
2194#if defined(TARGET_WORDS_BIGENDIAN)
2195 if (endian == DEVICE_LITTLE_ENDIAN) {
2196 val = bswap32(val);
2197 }
2198#else
2199 if (endian == DEVICE_BIG_ENDIAN) {
2200 val = bswap32(val);
2201 }
2202#endif
8df1cd07
FB
2203 } else {
2204 /* RAM case */
f3705d53 2205 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2206 & TARGET_PAGE_MASK)
cc5bea60 2207 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2208 switch (endian) {
2209 case DEVICE_LITTLE_ENDIAN:
2210 val = ldl_le_p(ptr);
2211 break;
2212 case DEVICE_BIG_ENDIAN:
2213 val = ldl_be_p(ptr);
2214 break;
2215 default:
2216 val = ldl_p(ptr);
2217 break;
2218 }
8df1cd07
FB
2219 }
2220 return val;
2221}
2222
a8170e5e 2223uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2224{
2225 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2226}
2227
a8170e5e 2228uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2229{
2230 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2231}
2232
a8170e5e 2233uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2234{
2235 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2236}
2237
84b7b8e7 2238/* warning: addr must be aligned */
a8170e5e 2239static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2240 enum device_endian endian)
84b7b8e7 2241{
84b7b8e7
FB
2242 uint8_t *ptr;
2243 uint64_t val;
f3705d53 2244 MemoryRegionSection *section;
84b7b8e7 2245
ac1970fb 2246 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2247
cc5bea60
BS
2248 if (!(memory_region_is_ram(section->mr) ||
2249 memory_region_is_romd(section->mr))) {
84b7b8e7 2250 /* I/O case */
cc5bea60 2251 addr = memory_region_section_addr(section, addr);
1e78bcc1
AG
2252
2253 /* XXX This is broken when device endian != cpu endian.
2254 Fix and add "endian" variable check */
84b7b8e7 2255#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2256 val = io_mem_read(section->mr, addr, 4) << 32;
2257 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 2258#else
37ec01d4
AK
2259 val = io_mem_read(section->mr, addr, 4);
2260 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
2261#endif
2262 } else {
2263 /* RAM case */
f3705d53 2264 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2265 & TARGET_PAGE_MASK)
cc5bea60 2266 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2267 switch (endian) {
2268 case DEVICE_LITTLE_ENDIAN:
2269 val = ldq_le_p(ptr);
2270 break;
2271 case DEVICE_BIG_ENDIAN:
2272 val = ldq_be_p(ptr);
2273 break;
2274 default:
2275 val = ldq_p(ptr);
2276 break;
2277 }
84b7b8e7
FB
2278 }
2279 return val;
2280}
2281
a8170e5e 2282uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2283{
2284 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2285}
2286
a8170e5e 2287uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2288{
2289 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2290}
2291
a8170e5e 2292uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2293{
2294 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2295}
2296
aab33094 2297/* XXX: optimize */
a8170e5e 2298uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2299{
2300 uint8_t val;
2301 cpu_physical_memory_read(addr, &val, 1);
2302 return val;
2303}
2304
733f0b02 2305/* warning: addr must be aligned */
a8170e5e 2306static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2307 enum device_endian endian)
aab33094 2308{
733f0b02
MT
2309 uint8_t *ptr;
2310 uint64_t val;
f3705d53 2311 MemoryRegionSection *section;
733f0b02 2312
ac1970fb 2313 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2314
cc5bea60
BS
2315 if (!(memory_region_is_ram(section->mr) ||
2316 memory_region_is_romd(section->mr))) {
733f0b02 2317 /* I/O case */
cc5bea60 2318 addr = memory_region_section_addr(section, addr);
37ec01d4 2319 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
2320#if defined(TARGET_WORDS_BIGENDIAN)
2321 if (endian == DEVICE_LITTLE_ENDIAN) {
2322 val = bswap16(val);
2323 }
2324#else
2325 if (endian == DEVICE_BIG_ENDIAN) {
2326 val = bswap16(val);
2327 }
2328#endif
733f0b02
MT
2329 } else {
2330 /* RAM case */
f3705d53 2331 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2332 & TARGET_PAGE_MASK)
cc5bea60 2333 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2334 switch (endian) {
2335 case DEVICE_LITTLE_ENDIAN:
2336 val = lduw_le_p(ptr);
2337 break;
2338 case DEVICE_BIG_ENDIAN:
2339 val = lduw_be_p(ptr);
2340 break;
2341 default:
2342 val = lduw_p(ptr);
2343 break;
2344 }
733f0b02
MT
2345 }
2346 return val;
aab33094
FB
2347}
2348
a8170e5e 2349uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2350{
2351 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2352}
2353
a8170e5e 2354uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2355{
2356 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2357}
2358
a8170e5e 2359uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2360{
2361 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2362}
2363
8df1cd07
FB
2364/* warning: addr must be aligned. The ram page is not masked as dirty
2365 and the code inside is not invalidated. It is useful if the dirty
2366 bits are used to track modified PTEs */
a8170e5e 2367void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2368{
8df1cd07 2369 uint8_t *ptr;
f3705d53 2370 MemoryRegionSection *section;
8df1cd07 2371
ac1970fb 2372 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2373
f3705d53 2374 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2375 addr = memory_region_section_addr(section, addr);
f3705d53 2376 if (memory_region_is_ram(section->mr)) {
37ec01d4 2377 section = &phys_sections[phys_section_rom];
06ef3525 2378 }
37ec01d4 2379 io_mem_write(section->mr, addr, val, 4);
8df1cd07 2380 } else {
f3705d53 2381 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 2382 & TARGET_PAGE_MASK)
cc5bea60 2383 + memory_region_section_addr(section, addr);
5579c7f3 2384 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2385 stl_p(ptr, val);
74576198
AL
2386
2387 if (unlikely(in_migration)) {
2388 if (!cpu_physical_memory_is_dirty(addr1)) {
2389 /* invalidate code */
2390 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2391 /* set dirty bit */
f7c11b53
YT
2392 cpu_physical_memory_set_dirty_flags(
2393 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2394 }
2395 }
8df1cd07
FB
2396 }
2397}
2398
a8170e5e 2399void stq_phys_notdirty(hwaddr addr, uint64_t val)
bc98a7ef 2400{
bc98a7ef 2401 uint8_t *ptr;
f3705d53 2402 MemoryRegionSection *section;
bc98a7ef 2403
ac1970fb 2404 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2405
f3705d53 2406 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2407 addr = memory_region_section_addr(section, addr);
f3705d53 2408 if (memory_region_is_ram(section->mr)) {
37ec01d4 2409 section = &phys_sections[phys_section_rom];
06ef3525 2410 }
bc98a7ef 2411#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2412 io_mem_write(section->mr, addr, val >> 32, 4);
2413 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 2414#else
37ec01d4
AK
2415 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2416 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
2417#endif
2418 } else {
f3705d53 2419 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2420 & TARGET_PAGE_MASK)
cc5bea60 2421 + memory_region_section_addr(section, addr));
bc98a7ef
JM
2422 stq_p(ptr, val);
2423 }
2424}
2425
8df1cd07 2426/* warning: addr must be aligned */
a8170e5e 2427static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2428 enum device_endian endian)
8df1cd07 2429{
8df1cd07 2430 uint8_t *ptr;
f3705d53 2431 MemoryRegionSection *section;
8df1cd07 2432
ac1970fb 2433 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2434
f3705d53 2435 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2436 addr = memory_region_section_addr(section, addr);
f3705d53 2437 if (memory_region_is_ram(section->mr)) {
37ec01d4 2438 section = &phys_sections[phys_section_rom];
06ef3525 2439 }
1e78bcc1
AG
2440#if defined(TARGET_WORDS_BIGENDIAN)
2441 if (endian == DEVICE_LITTLE_ENDIAN) {
2442 val = bswap32(val);
2443 }
2444#else
2445 if (endian == DEVICE_BIG_ENDIAN) {
2446 val = bswap32(val);
2447 }
2448#endif
37ec01d4 2449 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
2450 } else {
2451 unsigned long addr1;
f3705d53 2452 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2453 + memory_region_section_addr(section, addr);
8df1cd07 2454 /* RAM case */
5579c7f3 2455 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2456 switch (endian) {
2457 case DEVICE_LITTLE_ENDIAN:
2458 stl_le_p(ptr, val);
2459 break;
2460 case DEVICE_BIG_ENDIAN:
2461 stl_be_p(ptr, val);
2462 break;
2463 default:
2464 stl_p(ptr, val);
2465 break;
2466 }
51d7a9eb 2467 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2468 }
2469}
2470
a8170e5e 2471void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2472{
2473 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2474}
2475
a8170e5e 2476void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2477{
2478 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2479}
2480
a8170e5e 2481void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2482{
2483 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2484}
2485
aab33094 2486/* XXX: optimize */
a8170e5e 2487void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2488{
2489 uint8_t v = val;
2490 cpu_physical_memory_write(addr, &v, 1);
2491}
2492
733f0b02 2493/* warning: addr must be aligned */
a8170e5e 2494static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2495 enum device_endian endian)
aab33094 2496{
733f0b02 2497 uint8_t *ptr;
f3705d53 2498 MemoryRegionSection *section;
733f0b02 2499
ac1970fb 2500 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2501
f3705d53 2502 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2503 addr = memory_region_section_addr(section, addr);
f3705d53 2504 if (memory_region_is_ram(section->mr)) {
37ec01d4 2505 section = &phys_sections[phys_section_rom];
06ef3525 2506 }
1e78bcc1
AG
2507#if defined(TARGET_WORDS_BIGENDIAN)
2508 if (endian == DEVICE_LITTLE_ENDIAN) {
2509 val = bswap16(val);
2510 }
2511#else
2512 if (endian == DEVICE_BIG_ENDIAN) {
2513 val = bswap16(val);
2514 }
2515#endif
37ec01d4 2516 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
2517 } else {
2518 unsigned long addr1;
f3705d53 2519 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2520 + memory_region_section_addr(section, addr);
733f0b02
MT
2521 /* RAM case */
2522 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2523 switch (endian) {
2524 case DEVICE_LITTLE_ENDIAN:
2525 stw_le_p(ptr, val);
2526 break;
2527 case DEVICE_BIG_ENDIAN:
2528 stw_be_p(ptr, val);
2529 break;
2530 default:
2531 stw_p(ptr, val);
2532 break;
2533 }
51d7a9eb 2534 invalidate_and_set_dirty(addr1, 2);
733f0b02 2535 }
aab33094
FB
2536}
2537
a8170e5e 2538void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2539{
2540 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2541}
2542
a8170e5e 2543void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2544{
2545 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2546}
2547
a8170e5e 2548void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2549{
2550 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2551}
2552
aab33094 2553/* XXX: optimize */
a8170e5e 2554void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2555{
2556 val = tswap64(val);
71d2b725 2557 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2558}
2559
a8170e5e 2560void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2561{
2562 val = cpu_to_le64(val);
2563 cpu_physical_memory_write(addr, &val, 8);
2564}
2565
a8170e5e 2566void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2567{
2568 val = cpu_to_be64(val);
2569 cpu_physical_memory_write(addr, &val, 8);
2570}
2571
5e2972fd 2572/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2573int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2574 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2575{
2576 int l;
a8170e5e 2577 hwaddr phys_addr;
9b3c35e0 2578 target_ulong page;
13eb76e0
FB
2579
2580 while (len > 0) {
2581 page = addr & TARGET_PAGE_MASK;
2582 phys_addr = cpu_get_phys_page_debug(env, page);
2583 /* if no physical page mapped, return an error */
2584 if (phys_addr == -1)
2585 return -1;
2586 l = (page + TARGET_PAGE_SIZE) - addr;
2587 if (l > len)
2588 l = len;
5e2972fd 2589 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2590 if (is_write)
2591 cpu_physical_memory_write_rom(phys_addr, buf, l);
2592 else
5e2972fd 2593 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2594 len -= l;
2595 buf += l;
2596 addr += l;
2597 }
2598 return 0;
2599}
a68fe89c 2600#endif
13eb76e0 2601
8e4a424b
BS
2602#if !defined(CONFIG_USER_ONLY)
2603
2604/*
2605 * A helper function for the _utterly broken_ virtio device model to find out if
2606 * it's running on a big endian machine. Don't do this at home kids!
2607 */
2608bool virtio_is_big_endian(void);
2609bool virtio_is_big_endian(void)
2610{
2611#if defined(TARGET_WORDS_BIGENDIAN)
2612 return true;
2613#else
2614 return false;
2615#endif
2616}
2617
2618#endif
2619
76f35538 2620#ifndef CONFIG_USER_ONLY
a8170e5e 2621bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538
WC
2622{
2623 MemoryRegionSection *section;
2624
ac1970fb
AK
2625 section = phys_page_find(address_space_memory.dispatch,
2626 phys_addr >> TARGET_PAGE_BITS);
76f35538
WC
2627
2628 return !(memory_region_is_ram(section->mr) ||
2629 memory_region_is_romd(section->mr));
2630}
2631#endif