]> git.proxmox.com Git - qemu.git/blame - exec.c
main-loop: narrow win32 pollfds_fill() event bitmasks
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
0d09e41a 34#include "hw/xen/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
67d3b957 53//#define DEBUG_UNASSIGNED
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
e2eef170 56#if !defined(CONFIG_USER_ONLY)
9fa3e853 57int phys_ram_fd;
74576198 58static int in_migration;
94a6b54f 59
a3161038 60RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
61
62static MemoryRegion *system_memory;
309cb471 63static MemoryRegion *system_io;
62152b8a 64
f6790af6
AK
65AddressSpace address_space_io;
66AddressSpace address_space_memory;
9e11908f 67DMAContext dma_context_memory;
2673a5da 68
0e0df1e2 69MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 70static MemoryRegion io_mem_subpage_ram;
0e0df1e2 71
e2eef170 72#endif
9fa3e853 73
9349b4f9 74CPUArchState *first_cpu;
6a00d601
FB
75/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
9349b4f9 77DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 78/* 0 = Do not count executed instructions.
bf20dc07 79 1 = Precise instruction counting.
2e70f6ef 80 2 = Adaptive rate instruction counting. */
5708fc66 81int use_icount;
6a00d601 82
e2eef170 83#if !defined(CONFIG_USER_ONLY)
4346ae3e 84
5312bd8b
AK
85static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
aa102231
AK
88static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
5312bd8b 91
d6f2ea22
AK
92/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
07f07b31 96#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 97
e2eef170 98static void io_mem_init(void);
62152b8a 99static void memory_map_init(void);
8b9c99d9 100static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 101
1ec9b909 102static MemoryRegion io_mem_watch;
6658ffb8 103#endif
fd6ce8f6 104
6d9a1304 105#if !defined(CONFIG_USER_ONLY)
d6f2ea22 106
f7bf5461 107static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 108{
f7bf5461 109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
d6f2ea22
AK
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
f7bf5461
AK
117}
118
119static uint16_t phys_map_node_alloc(void)
120{
121 unsigned i;
122 uint16_t ret;
123
124 ret = phys_map_nodes_nb++;
125 assert(ret != PHYS_MAP_NODE_NIL);
126 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 127 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 128 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 130 }
f7bf5461 131 return ret;
d6f2ea22
AK
132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
92e873b9 139
a8170e5e
AK
140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
2999097b 142 int level)
f7bf5461
AK
143{
144 PhysPageEntry *p;
145 int i;
a8170e5e 146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 147
07f07b31 148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
07f07b31 153 p[i].is_leaf = 1;
c19e8800 154 p[i].ptr = phys_section_unassigned;
4346ae3e 155 }
67c4d23c 156 }
f7bf5461 157 } else {
c19e8800 158 p = phys_map_nodes[lp->ptr];
92e873b9 159 }
2999097b 160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 161
2999097b 162 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
c19e8800 165 lp->ptr = leaf;
07f07b31
AK
166 *index += step;
167 *nb -= step;
2999097b
AK
168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
f7bf5461
AK
172 }
173}
174
ac1970fb 175static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 176 hwaddr index, hwaddr nb,
2999097b 177 uint16_t leaf)
f7bf5461 178{
2999097b 179 /* Wildly overreserve - it doesn't matter much. */
07f07b31 180 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 181
ac1970fb 182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
183}
184
a8170e5e 185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
92e873b9 186{
ac1970fb 187 PhysPageEntry lp = d->phys_map;
31ab2b4a
AK
188 PhysPageEntry *p;
189 int i;
31ab2b4a 190 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 191
07f07b31 192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
194 goto not_found;
195 }
c19e8800 196 p = phys_map_nodes[lp.ptr];
31ab2b4a 197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 198 }
31ab2b4a 199
c19e8800 200 s_index = lp.ptr;
31ab2b4a 201not_found:
f3705d53
AK
202 return &phys_sections[s_index];
203}
204
e5548617
BS
205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 209 && mr != &io_mem_watch;
fd6ce8f6 210}
5b6dd868 211#endif
fd6ce8f6 212
5b6dd868 213void cpu_exec_init_all(void)
fdbb84d1 214{
5b6dd868 215#if !defined(CONFIG_USER_ONLY)
b2a8658e 216 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
217 memory_map_init();
218 io_mem_init();
fdbb84d1 219#endif
5b6dd868 220}
fdbb84d1 221
b170fce3 222#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
223
224static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 225{
259186a7 226 CPUState *cpu = opaque;
a513fe19 227
5b6dd868
BS
228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
259186a7
AF
230 cpu->interrupt_request &= ~0x01;
231 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
232
233 return 0;
a513fe19 234}
7501267e 235
5b6dd868
BS
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
259186a7
AF
243 VMSTATE_UINT32(halted, CPUState),
244 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
245 VMSTATE_END_OF_LIST()
246 }
247};
b170fce3
AF
248#else
249#define vmstate_cpu_common vmstate_dummy
5b6dd868 250#endif
ea041c0e 251
38d8f5c8 252CPUState *qemu_get_cpu(int index)
ea041c0e 253{
5b6dd868 254 CPUArchState *env = first_cpu;
38d8f5c8 255 CPUState *cpu = NULL;
ea041c0e 256
5b6dd868 257 while (env) {
55e5c285
AF
258 cpu = ENV_GET_CPU(env);
259 if (cpu->cpu_index == index) {
5b6dd868 260 break;
55e5c285 261 }
5b6dd868 262 env = env->next_cpu;
ea041c0e 263 }
5b6dd868 264
d76fddae 265 return env ? cpu : NULL;
ea041c0e
FB
266}
267
d6b9e0d6
MT
268void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
269{
270 CPUArchState *env = first_cpu;
271
272 while (env) {
273 func(ENV_GET_CPU(env), data);
274 env = env->next_cpu;
275 }
276}
277
5b6dd868 278void cpu_exec_init(CPUArchState *env)
ea041c0e 279{
5b6dd868 280 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 281 CPUClass *cc = CPU_GET_CLASS(cpu);
5b6dd868
BS
282 CPUArchState **penv;
283 int cpu_index;
284
285#if defined(CONFIG_USER_ONLY)
286 cpu_list_lock();
287#endif
288 env->next_cpu = NULL;
289 penv = &first_cpu;
290 cpu_index = 0;
291 while (*penv != NULL) {
292 penv = &(*penv)->next_cpu;
293 cpu_index++;
294 }
55e5c285 295 cpu->cpu_index = cpu_index;
1b1ed8dc 296 cpu->numa_node = 0;
5b6dd868
BS
297 QTAILQ_INIT(&env->breakpoints);
298 QTAILQ_INIT(&env->watchpoints);
299#ifndef CONFIG_USER_ONLY
300 cpu->thread_id = qemu_get_thread_id();
301#endif
302 *penv = env;
303#if defined(CONFIG_USER_ONLY)
304 cpu_list_unlock();
305#endif
259186a7 306 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
5b6dd868 307#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
308 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
309 cpu_save, cpu_load, env);
b170fce3 310 assert(cc->vmsd == NULL);
5b6dd868 311#endif
b170fce3
AF
312 if (cc->vmsd != NULL) {
313 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
314 }
ea041c0e
FB
315}
316
1fddef4b 317#if defined(TARGET_HAS_ICE)
94df27fd 318#if defined(CONFIG_USER_ONLY)
9349b4f9 319static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
320{
321 tb_invalidate_phys_page_range(pc, pc + 1, 0);
322}
323#else
1e7855a5
MF
324static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
325{
9d70c4b7
MF
326 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
327 (pc & ~TARGET_PAGE_MASK));
1e7855a5 328}
c27004ec 329#endif
94df27fd 330#endif /* TARGET_HAS_ICE */
d720b93d 331
c527ee8f 332#if defined(CONFIG_USER_ONLY)
9349b4f9 333void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
334
335{
336}
337
9349b4f9 338int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
339 int flags, CPUWatchpoint **watchpoint)
340{
341 return -ENOSYS;
342}
343#else
6658ffb8 344/* Add a watchpoint. */
9349b4f9 345int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 346 int flags, CPUWatchpoint **watchpoint)
6658ffb8 347{
b4051334 348 target_ulong len_mask = ~(len - 1);
c0ce998e 349 CPUWatchpoint *wp;
6658ffb8 350
b4051334 351 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
352 if ((len & (len - 1)) || (addr & ~len_mask) ||
353 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
354 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
355 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
356 return -EINVAL;
357 }
7267c094 358 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
359
360 wp->vaddr = addr;
b4051334 361 wp->len_mask = len_mask;
a1d1bb31
AL
362 wp->flags = flags;
363
2dc9f411 364 /* keep all GDB-injected watchpoints in front */
c0ce998e 365 if (flags & BP_GDB)
72cf2d4f 366 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 367 else
72cf2d4f 368 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 369
6658ffb8 370 tlb_flush_page(env, addr);
a1d1bb31
AL
371
372 if (watchpoint)
373 *watchpoint = wp;
374 return 0;
6658ffb8
PB
375}
376
a1d1bb31 377/* Remove a specific watchpoint. */
9349b4f9 378int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 379 int flags)
6658ffb8 380{
b4051334 381 target_ulong len_mask = ~(len - 1);
a1d1bb31 382 CPUWatchpoint *wp;
6658ffb8 383
72cf2d4f 384 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 385 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 386 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 387 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
388 return 0;
389 }
390 }
a1d1bb31 391 return -ENOENT;
6658ffb8
PB
392}
393
a1d1bb31 394/* Remove a specific watchpoint by reference. */
9349b4f9 395void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 396{
72cf2d4f 397 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 398
a1d1bb31
AL
399 tlb_flush_page(env, watchpoint->vaddr);
400
7267c094 401 g_free(watchpoint);
a1d1bb31
AL
402}
403
404/* Remove all matching watchpoints. */
9349b4f9 405void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 406{
c0ce998e 407 CPUWatchpoint *wp, *next;
a1d1bb31 408
72cf2d4f 409 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
410 if (wp->flags & mask)
411 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 412 }
7d03f82f 413}
c527ee8f 414#endif
7d03f82f 415
a1d1bb31 416/* Add a breakpoint. */
9349b4f9 417int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 418 CPUBreakpoint **breakpoint)
4c3a88a2 419{
1fddef4b 420#if defined(TARGET_HAS_ICE)
c0ce998e 421 CPUBreakpoint *bp;
3b46e624 422
7267c094 423 bp = g_malloc(sizeof(*bp));
4c3a88a2 424
a1d1bb31
AL
425 bp->pc = pc;
426 bp->flags = flags;
427
2dc9f411 428 /* keep all GDB-injected breakpoints in front */
c0ce998e 429 if (flags & BP_GDB)
72cf2d4f 430 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 431 else
72cf2d4f 432 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 433
d720b93d 434 breakpoint_invalidate(env, pc);
a1d1bb31
AL
435
436 if (breakpoint)
437 *breakpoint = bp;
4c3a88a2
FB
438 return 0;
439#else
a1d1bb31 440 return -ENOSYS;
4c3a88a2
FB
441#endif
442}
443
a1d1bb31 444/* Remove a specific breakpoint. */
9349b4f9 445int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 446{
7d03f82f 447#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
448 CPUBreakpoint *bp;
449
72cf2d4f 450 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
451 if (bp->pc == pc && bp->flags == flags) {
452 cpu_breakpoint_remove_by_ref(env, bp);
453 return 0;
454 }
7d03f82f 455 }
a1d1bb31
AL
456 return -ENOENT;
457#else
458 return -ENOSYS;
7d03f82f
EI
459#endif
460}
461
a1d1bb31 462/* Remove a specific breakpoint by reference. */
9349b4f9 463void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 464{
1fddef4b 465#if defined(TARGET_HAS_ICE)
72cf2d4f 466 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 467
a1d1bb31
AL
468 breakpoint_invalidate(env, breakpoint->pc);
469
7267c094 470 g_free(breakpoint);
a1d1bb31
AL
471#endif
472}
473
474/* Remove all matching breakpoints. */
9349b4f9 475void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
476{
477#if defined(TARGET_HAS_ICE)
c0ce998e 478 CPUBreakpoint *bp, *next;
a1d1bb31 479
72cf2d4f 480 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
481 if (bp->flags & mask)
482 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 483 }
4c3a88a2
FB
484#endif
485}
486
c33a346e
FB
487/* enable or disable single step mode. EXCP_DEBUG is returned by the
488 CPU loop after each instruction */
9349b4f9 489void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 490{
1fddef4b 491#if defined(TARGET_HAS_ICE)
c33a346e
FB
492 if (env->singlestep_enabled != enabled) {
493 env->singlestep_enabled = enabled;
e22a25c9
AL
494 if (kvm_enabled())
495 kvm_update_guest_debug(env, 0);
496 else {
ccbb4d44 497 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
498 /* XXX: only flush what is necessary */
499 tb_flush(env);
500 }
c33a346e
FB
501 }
502#endif
503}
504
9349b4f9 505void cpu_exit(CPUArchState *env)
3098dba0 506{
fcd7d003
AF
507 CPUState *cpu = ENV_GET_CPU(env);
508
509 cpu->exit_request = 1;
378df4b2 510 cpu->tcg_exit_req = 1;
3098dba0
AJ
511}
512
9349b4f9 513void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
514{
515 va_list ap;
493ae1f0 516 va_list ap2;
7501267e
FB
517
518 va_start(ap, fmt);
493ae1f0 519 va_copy(ap2, ap);
7501267e
FB
520 fprintf(stderr, "qemu: fatal: ");
521 vfprintf(stderr, fmt, ap);
522 fprintf(stderr, "\n");
6fd2a026 523 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
524 if (qemu_log_enabled()) {
525 qemu_log("qemu: fatal: ");
526 qemu_log_vprintf(fmt, ap2);
527 qemu_log("\n");
6fd2a026 528 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 529 qemu_log_flush();
93fcfe39 530 qemu_log_close();
924edcae 531 }
493ae1f0 532 va_end(ap2);
f9373291 533 va_end(ap);
fd052bf6
RV
534#if defined(CONFIG_USER_ONLY)
535 {
536 struct sigaction act;
537 sigfillset(&act.sa_mask);
538 act.sa_handler = SIG_DFL;
539 sigaction(SIGABRT, &act, NULL);
540 }
541#endif
7501267e
FB
542 abort();
543}
544
9349b4f9 545CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 546{
9349b4f9
AF
547 CPUArchState *new_env = cpu_init(env->cpu_model_str);
548 CPUArchState *next_cpu = new_env->next_cpu;
5a38f081
AL
549#if defined(TARGET_HAS_ICE)
550 CPUBreakpoint *bp;
551 CPUWatchpoint *wp;
552#endif
553
9349b4f9 554 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 555
55e5c285 556 /* Preserve chaining. */
c5be9f08 557 new_env->next_cpu = next_cpu;
5a38f081
AL
558
559 /* Clone all break/watchpoints.
560 Note: Once we support ptrace with hw-debug register access, make sure
561 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
562 QTAILQ_INIT(&env->breakpoints);
563 QTAILQ_INIT(&env->watchpoints);
5a38f081 564#if defined(TARGET_HAS_ICE)
72cf2d4f 565 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
566 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
567 }
72cf2d4f 568 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
569 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
570 wp->flags, NULL);
571 }
572#endif
573
c5be9f08
TS
574 return new_env;
575}
576
0124311e 577#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
578static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
579 uintptr_t length)
580{
581 uintptr_t start1;
582
583 /* we modify the TLB cache so that the dirty bit will be set again
584 when accessing the range */
585 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
586 /* Check that we don't span multiple blocks - this breaks the
587 address comparisons below. */
588 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
589 != (end - 1) - start) {
590 abort();
591 }
592 cpu_tlb_reset_dirty_all(start1, length);
593
594}
595
5579c7f3 596/* Note: start and end must be within the same ram block. */
c227f099 597void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 598 int dirty_flags)
1ccde1cb 599{
d24981d3 600 uintptr_t length;
1ccde1cb
FB
601
602 start &= TARGET_PAGE_MASK;
603 end = TARGET_PAGE_ALIGN(end);
604
605 length = end - start;
606 if (length == 0)
607 return;
f7c11b53 608 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 609
d24981d3
JQ
610 if (tcg_enabled()) {
611 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 612 }
1ccde1cb
FB
613}
614
8b9c99d9 615static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 616{
f6f3fbca 617 int ret = 0;
74576198 618 in_migration = enable;
f6f3fbca 619 return ret;
74576198
AL
620}
621
a8170e5e 622hwaddr memory_region_section_get_iotlb(CPUArchState *env,
e5548617
BS
623 MemoryRegionSection *section,
624 target_ulong vaddr,
a8170e5e 625 hwaddr paddr,
e5548617
BS
626 int prot,
627 target_ulong *address)
628{
a8170e5e 629 hwaddr iotlb;
e5548617
BS
630 CPUWatchpoint *wp;
631
cc5bea60 632 if (memory_region_is_ram(section->mr)) {
e5548617
BS
633 /* Normal RAM. */
634 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 635 + memory_region_section_addr(section, paddr);
e5548617
BS
636 if (!section->readonly) {
637 iotlb |= phys_section_notdirty;
638 } else {
639 iotlb |= phys_section_rom;
640 }
641 } else {
642 /* IO handlers are currently passed a physical address.
643 It would be nice to pass an offset from the base address
644 of that region. This would avoid having to special case RAM,
645 and avoid full address decoding in every device.
646 We can't use the high bits of pd for this because
647 IO_MEM_ROMD uses these as a ram address. */
648 iotlb = section - phys_sections;
cc5bea60 649 iotlb += memory_region_section_addr(section, paddr);
e5548617
BS
650 }
651
652 /* Make accesses to pages with watchpoints go via the
653 watchpoint trap routines. */
654 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
655 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
656 /* Avoid trapping reads of pages with a write breakpoint. */
657 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
658 iotlb = phys_section_watch + paddr;
659 *address |= TLB_MMIO;
660 break;
661 }
662 }
663 }
664
665 return iotlb;
666}
9fa3e853
FB
667#endif /* defined(CONFIG_USER_ONLY) */
668
e2eef170 669#if !defined(CONFIG_USER_ONLY)
8da3ff18 670
c04b2b78
PB
671#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
672typedef struct subpage_t {
70c68e44 673 MemoryRegion iomem;
a8170e5e 674 hwaddr base;
5312bd8b 675 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
676} subpage_t;
677
c227f099 678static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 679 uint16_t section);
a8170e5e 680static subpage_t *subpage_init(hwaddr base);
5312bd8b 681static void destroy_page_desc(uint16_t section_index)
54688b1e 682{
5312bd8b
AK
683 MemoryRegionSection *section = &phys_sections[section_index];
684 MemoryRegion *mr = section->mr;
54688b1e
AK
685
686 if (mr->subpage) {
687 subpage_t *subpage = container_of(mr, subpage_t, iomem);
688 memory_region_destroy(&subpage->iomem);
689 g_free(subpage);
690 }
691}
692
4346ae3e 693static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
694{
695 unsigned i;
d6f2ea22 696 PhysPageEntry *p;
54688b1e 697
c19e8800 698 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
699 return;
700 }
701
c19e8800 702 p = phys_map_nodes[lp->ptr];
4346ae3e 703 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 704 if (!p[i].is_leaf) {
54688b1e 705 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 706 } else {
c19e8800 707 destroy_page_desc(p[i].ptr);
54688b1e 708 }
54688b1e 709 }
07f07b31 710 lp->is_leaf = 0;
c19e8800 711 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
712}
713
ac1970fb 714static void destroy_all_mappings(AddressSpaceDispatch *d)
54688b1e 715{
ac1970fb 716 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
d6f2ea22 717 phys_map_nodes_reset();
54688b1e
AK
718}
719
5312bd8b
AK
720static uint16_t phys_section_add(MemoryRegionSection *section)
721{
722 if (phys_sections_nb == phys_sections_nb_alloc) {
723 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
724 phys_sections = g_renew(MemoryRegionSection, phys_sections,
725 phys_sections_nb_alloc);
726 }
727 phys_sections[phys_sections_nb] = *section;
728 return phys_sections_nb++;
729}
730
731static void phys_sections_clear(void)
732{
733 phys_sections_nb = 0;
734}
735
ac1970fb 736static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
737{
738 subpage_t *subpage;
a8170e5e 739 hwaddr base = section->offset_within_address_space
0f0cb164 740 & TARGET_PAGE_MASK;
ac1970fb 741 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
0f0cb164
AK
742 MemoryRegionSection subsection = {
743 .offset_within_address_space = base,
744 .size = TARGET_PAGE_SIZE,
745 };
a8170e5e 746 hwaddr start, end;
0f0cb164 747
f3705d53 748 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 749
f3705d53 750 if (!(existing->mr->subpage)) {
0f0cb164
AK
751 subpage = subpage_init(base);
752 subsection.mr = &subpage->iomem;
ac1970fb 753 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 754 phys_section_add(&subsection));
0f0cb164 755 } else {
f3705d53 756 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
757 }
758 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
adb2a9b5 759 end = start + section->size - 1;
0f0cb164
AK
760 subpage_register(subpage, start, end, phys_section_add(section));
761}
762
763
ac1970fb 764static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
33417e70 765{
a8170e5e 766 hwaddr start_addr = section->offset_within_address_space;
dd81124b 767 ram_addr_t size = section->size;
a8170e5e 768 hwaddr addr;
5312bd8b 769 uint16_t section_index = phys_section_add(section);
dd81124b 770
3b8e6a2d 771 assert(size);
f6f3fbca 772
3b8e6a2d 773 addr = start_addr;
ac1970fb 774 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2999097b 775 section_index);
33417e70
FB
776}
777
ac1970fb 778static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 779{
ac1970fb 780 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
0f0cb164
AK
781 MemoryRegionSection now = *section, remain = *section;
782
783 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
784 || (now.size < TARGET_PAGE_SIZE)) {
785 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
786 - now.offset_within_address_space,
787 now.size);
ac1970fb 788 register_subpage(d, &now);
0f0cb164
AK
789 remain.size -= now.size;
790 remain.offset_within_address_space += now.size;
791 remain.offset_within_region += now.size;
792 }
69b67646
TH
793 while (remain.size >= TARGET_PAGE_SIZE) {
794 now = remain;
795 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
796 now.size = TARGET_PAGE_SIZE;
ac1970fb 797 register_subpage(d, &now);
69b67646
TH
798 } else {
799 now.size &= TARGET_PAGE_MASK;
ac1970fb 800 register_multipage(d, &now);
69b67646 801 }
0f0cb164
AK
802 remain.size -= now.size;
803 remain.offset_within_address_space += now.size;
804 remain.offset_within_region += now.size;
805 }
806 now = remain;
807 if (now.size) {
ac1970fb 808 register_subpage(d, &now);
0f0cb164
AK
809 }
810}
811
62a2744c
SY
812void qemu_flush_coalesced_mmio_buffer(void)
813{
814 if (kvm_enabled())
815 kvm_flush_coalesced_mmio_buffer();
816}
817
b2a8658e
UD
818void qemu_mutex_lock_ramlist(void)
819{
820 qemu_mutex_lock(&ram_list.mutex);
821}
822
823void qemu_mutex_unlock_ramlist(void)
824{
825 qemu_mutex_unlock(&ram_list.mutex);
826}
827
c902760f
MT
828#if defined(__linux__) && !defined(TARGET_S390X)
829
830#include <sys/vfs.h>
831
832#define HUGETLBFS_MAGIC 0x958458f6
833
834static long gethugepagesize(const char *path)
835{
836 struct statfs fs;
837 int ret;
838
839 do {
9742bf26 840 ret = statfs(path, &fs);
c902760f
MT
841 } while (ret != 0 && errno == EINTR);
842
843 if (ret != 0) {
9742bf26
YT
844 perror(path);
845 return 0;
c902760f
MT
846 }
847
848 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 849 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
850
851 return fs.f_bsize;
852}
853
04b16653
AW
854static void *file_ram_alloc(RAMBlock *block,
855 ram_addr_t memory,
856 const char *path)
c902760f
MT
857{
858 char *filename;
8ca761f6
PF
859 char *sanitized_name;
860 char *c;
c902760f
MT
861 void *area;
862 int fd;
863#ifdef MAP_POPULATE
864 int flags;
865#endif
866 unsigned long hpagesize;
867
868 hpagesize = gethugepagesize(path);
869 if (!hpagesize) {
9742bf26 870 return NULL;
c902760f
MT
871 }
872
873 if (memory < hpagesize) {
874 return NULL;
875 }
876
877 if (kvm_enabled() && !kvm_has_sync_mmu()) {
878 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
879 return NULL;
880 }
881
8ca761f6
PF
882 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
883 sanitized_name = g_strdup(block->mr->name);
884 for (c = sanitized_name; *c != '\0'; c++) {
885 if (*c == '/')
886 *c = '_';
887 }
888
889 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
890 sanitized_name);
891 g_free(sanitized_name);
c902760f
MT
892
893 fd = mkstemp(filename);
894 if (fd < 0) {
9742bf26 895 perror("unable to create backing store for hugepages");
e4ada482 896 g_free(filename);
9742bf26 897 return NULL;
c902760f
MT
898 }
899 unlink(filename);
e4ada482 900 g_free(filename);
c902760f
MT
901
902 memory = (memory+hpagesize-1) & ~(hpagesize-1);
903
904 /*
905 * ftruncate is not supported by hugetlbfs in older
906 * hosts, so don't bother bailing out on errors.
907 * If anything goes wrong with it under other filesystems,
908 * mmap will fail.
909 */
910 if (ftruncate(fd, memory))
9742bf26 911 perror("ftruncate");
c902760f
MT
912
913#ifdef MAP_POPULATE
914 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
915 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
916 * to sidestep this quirk.
917 */
918 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
919 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
920#else
921 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
922#endif
923 if (area == MAP_FAILED) {
9742bf26
YT
924 perror("file_ram_alloc: can't mmap RAM pages");
925 close(fd);
926 return (NULL);
c902760f 927 }
04b16653 928 block->fd = fd;
c902760f
MT
929 return area;
930}
931#endif
932
d17b5288 933static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
934{
935 RAMBlock *block, *next_block;
3e837b2c 936 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 937
49cd9ac6
SH
938 assert(size != 0); /* it would hand out same offset multiple times */
939
a3161038 940 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
941 return 0;
942
a3161038 943 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 944 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
945
946 end = block->offset + block->length;
947
a3161038 948 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
949 if (next_block->offset >= end) {
950 next = MIN(next, next_block->offset);
951 }
952 }
953 if (next - end >= size && next - end < mingap) {
3e837b2c 954 offset = end;
04b16653
AW
955 mingap = next - end;
956 }
957 }
3e837b2c
AW
958
959 if (offset == RAM_ADDR_MAX) {
960 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
961 (uint64_t)size);
962 abort();
963 }
964
04b16653
AW
965 return offset;
966}
967
652d7ec2 968ram_addr_t last_ram_offset(void)
d17b5288
AW
969{
970 RAMBlock *block;
971 ram_addr_t last = 0;
972
a3161038 973 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
974 last = MAX(last, block->offset + block->length);
975
976 return last;
977}
978
ddb97f1d
JB
979static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
980{
981 int ret;
982 QemuOpts *machine_opts;
983
984 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
985 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
986 if (machine_opts &&
987 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
988 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
989 if (ret) {
990 perror("qemu_madvise");
991 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
992 "but dump_guest_core=off specified\n");
993 }
994 }
995}
996
c5705a77 997void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
998{
999 RAMBlock *new_block, *block;
1000
c5705a77 1001 new_block = NULL;
a3161038 1002 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
1003 if (block->offset == addr) {
1004 new_block = block;
1005 break;
1006 }
1007 }
1008 assert(new_block);
1009 assert(!new_block->idstr[0]);
84b89d78 1010
09e5ab63
AL
1011 if (dev) {
1012 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1013 if (id) {
1014 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1015 g_free(id);
84b89d78
CM
1016 }
1017 }
1018 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1019
b2a8658e
UD
1020 /* This assumes the iothread lock is taken here too. */
1021 qemu_mutex_lock_ramlist();
a3161038 1022 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1023 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1024 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1025 new_block->idstr);
1026 abort();
1027 }
1028 }
b2a8658e 1029 qemu_mutex_unlock_ramlist();
c5705a77
AK
1030}
1031
8490fc78
LC
1032static int memory_try_enable_merging(void *addr, size_t len)
1033{
1034 QemuOpts *opts;
1035
1036 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1037 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1038 /* disabled by the user */
1039 return 0;
1040 }
1041
1042 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1043}
1044
c5705a77
AK
1045ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1046 MemoryRegion *mr)
1047{
abb26d63 1048 RAMBlock *block, *new_block;
c5705a77
AK
1049
1050 size = TARGET_PAGE_ALIGN(size);
1051 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1052
b2a8658e
UD
1053 /* This assumes the iothread lock is taken here too. */
1054 qemu_mutex_lock_ramlist();
7c637366 1055 new_block->mr = mr;
432d268c 1056 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1057 if (host) {
1058 new_block->host = host;
cd19cfa2 1059 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1060 } else {
1061 if (mem_path) {
c902760f 1062#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1063 new_block->host = file_ram_alloc(new_block, size, mem_path);
1064 if (!new_block->host) {
6eebf958 1065 new_block->host = qemu_anon_ram_alloc(size);
8490fc78 1066 memory_try_enable_merging(new_block->host, size);
6977dfe6 1067 }
c902760f 1068#else
6977dfe6
YT
1069 fprintf(stderr, "-mem-path option unsupported\n");
1070 exit(1);
c902760f 1071#endif
6977dfe6 1072 } else {
868bb33f 1073 if (xen_enabled()) {
fce537d4 1074 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1075 } else if (kvm_enabled()) {
1076 /* some s390/kvm configurations have special constraints */
6eebf958 1077 new_block->host = kvm_ram_alloc(size);
432d268c 1078 } else {
6eebf958 1079 new_block->host = qemu_anon_ram_alloc(size);
432d268c 1080 }
8490fc78 1081 memory_try_enable_merging(new_block->host, size);
6977dfe6 1082 }
c902760f 1083 }
94a6b54f
PB
1084 new_block->length = size;
1085
abb26d63
PB
1086 /* Keep the list sorted from biggest to smallest block. */
1087 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1088 if (block->length < new_block->length) {
1089 break;
1090 }
1091 }
1092 if (block) {
1093 QTAILQ_INSERT_BEFORE(block, new_block, next);
1094 } else {
1095 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1096 }
0d6d3c87 1097 ram_list.mru_block = NULL;
94a6b54f 1098
f798b07f 1099 ram_list.version++;
b2a8658e 1100 qemu_mutex_unlock_ramlist();
f798b07f 1101
7267c094 1102 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1103 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1104 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1105 0, size >> TARGET_PAGE_BITS);
1720aeee 1106 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1107
ddb97f1d 1108 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1109 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1110
6f0437e8
JK
1111 if (kvm_enabled())
1112 kvm_setup_guest_memory(new_block->host, size);
1113
94a6b54f
PB
1114 return new_block->offset;
1115}
e9a1ab19 1116
c5705a77 1117ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1118{
c5705a77 1119 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1120}
1121
1f2e98b6
AW
1122void qemu_ram_free_from_ptr(ram_addr_t addr)
1123{
1124 RAMBlock *block;
1125
b2a8658e
UD
1126 /* This assumes the iothread lock is taken here too. */
1127 qemu_mutex_lock_ramlist();
a3161038 1128 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1129 if (addr == block->offset) {
a3161038 1130 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1131 ram_list.mru_block = NULL;
f798b07f 1132 ram_list.version++;
7267c094 1133 g_free(block);
b2a8658e 1134 break;
1f2e98b6
AW
1135 }
1136 }
b2a8658e 1137 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1138}
1139
c227f099 1140void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1141{
04b16653
AW
1142 RAMBlock *block;
1143
b2a8658e
UD
1144 /* This assumes the iothread lock is taken here too. */
1145 qemu_mutex_lock_ramlist();
a3161038 1146 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1147 if (addr == block->offset) {
a3161038 1148 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1149 ram_list.mru_block = NULL;
f798b07f 1150 ram_list.version++;
cd19cfa2
HY
1151 if (block->flags & RAM_PREALLOC_MASK) {
1152 ;
1153 } else if (mem_path) {
04b16653
AW
1154#if defined (__linux__) && !defined(TARGET_S390X)
1155 if (block->fd) {
1156 munmap(block->host, block->length);
1157 close(block->fd);
1158 } else {
e7a09b92 1159 qemu_anon_ram_free(block->host, block->length);
04b16653 1160 }
fd28aa13
JK
1161#else
1162 abort();
04b16653
AW
1163#endif
1164 } else {
868bb33f 1165 if (xen_enabled()) {
e41d7c69 1166 xen_invalidate_map_cache_entry(block->host);
432d268c 1167 } else {
e7a09b92 1168 qemu_anon_ram_free(block->host, block->length);
432d268c 1169 }
04b16653 1170 }
7267c094 1171 g_free(block);
b2a8658e 1172 break;
04b16653
AW
1173 }
1174 }
b2a8658e 1175 qemu_mutex_unlock_ramlist();
04b16653 1176
e9a1ab19
FB
1177}
1178
cd19cfa2
HY
1179#ifndef _WIN32
1180void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1181{
1182 RAMBlock *block;
1183 ram_addr_t offset;
1184 int flags;
1185 void *area, *vaddr;
1186
a3161038 1187 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1188 offset = addr - block->offset;
1189 if (offset < block->length) {
1190 vaddr = block->host + offset;
1191 if (block->flags & RAM_PREALLOC_MASK) {
1192 ;
1193 } else {
1194 flags = MAP_FIXED;
1195 munmap(vaddr, length);
1196 if (mem_path) {
1197#if defined(__linux__) && !defined(TARGET_S390X)
1198 if (block->fd) {
1199#ifdef MAP_POPULATE
1200 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1201 MAP_PRIVATE;
1202#else
1203 flags |= MAP_PRIVATE;
1204#endif
1205 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1206 flags, block->fd, offset);
1207 } else {
1208 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1209 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1210 flags, -1, 0);
1211 }
fd28aa13
JK
1212#else
1213 abort();
cd19cfa2
HY
1214#endif
1215 } else {
1216#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1217 flags |= MAP_SHARED | MAP_ANONYMOUS;
1218 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1219 flags, -1, 0);
1220#else
1221 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1222 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1223 flags, -1, 0);
1224#endif
1225 }
1226 if (area != vaddr) {
f15fbc4b
AP
1227 fprintf(stderr, "Could not remap addr: "
1228 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1229 length, addr);
1230 exit(1);
1231 }
8490fc78 1232 memory_try_enable_merging(vaddr, length);
ddb97f1d 1233 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1234 }
1235 return;
1236 }
1237 }
1238}
1239#endif /* !_WIN32 */
1240
dc828ca1 1241/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
1242 With the exception of the softmmu code in this file, this should
1243 only be used for local memory (e.g. video ram) that the device owns,
1244 and knows it isn't going to access beyond the end of the block.
1245
1246 It should not be used for general purpose DMA.
1247 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1248 */
c227f099 1249void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 1250{
94a6b54f
PB
1251 RAMBlock *block;
1252
b2a8658e 1253 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1254 block = ram_list.mru_block;
1255 if (block && addr - block->offset < block->length) {
1256 goto found;
1257 }
a3161038 1258 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1259 if (addr - block->offset < block->length) {
0d6d3c87 1260 goto found;
f471a17e 1261 }
94a6b54f 1262 }
f471a17e
AW
1263
1264 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1265 abort();
1266
0d6d3c87
PB
1267found:
1268 ram_list.mru_block = block;
1269 if (xen_enabled()) {
1270 /* We need to check if the requested address is in the RAM
1271 * because we don't want to map the entire memory in QEMU.
1272 * In that case just map until the end of the page.
1273 */
1274 if (block->offset == 0) {
1275 return xen_map_cache(addr, 0, 0);
1276 } else if (block->host == NULL) {
1277 block->host =
1278 xen_map_cache(block->offset, block->length, 1);
1279 }
1280 }
1281 return block->host + (addr - block->offset);
dc828ca1
PB
1282}
1283
0d6d3c87
PB
1284/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1285 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1286 *
1287 * ??? Is this still necessary?
b2e0a138 1288 */
8b9c99d9 1289static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1290{
1291 RAMBlock *block;
1292
b2a8658e 1293 /* The list is protected by the iothread lock here. */
a3161038 1294 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1295 if (addr - block->offset < block->length) {
868bb33f 1296 if (xen_enabled()) {
432d268c
JN
1297 /* We need to check if the requested address is in the RAM
1298 * because we don't want to map the entire memory in QEMU.
712c2b41 1299 * In that case just map until the end of the page.
432d268c
JN
1300 */
1301 if (block->offset == 0) {
e41d7c69 1302 return xen_map_cache(addr, 0, 0);
432d268c 1303 } else if (block->host == NULL) {
e41d7c69
JK
1304 block->host =
1305 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1306 }
1307 }
b2e0a138
MT
1308 return block->host + (addr - block->offset);
1309 }
1310 }
1311
1312 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1313 abort();
1314
1315 return NULL;
1316}
1317
38bee5dc
SS
1318/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1319 * but takes a size argument */
8b9c99d9 1320static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1321{
8ab934f9
SS
1322 if (*size == 0) {
1323 return NULL;
1324 }
868bb33f 1325 if (xen_enabled()) {
e41d7c69 1326 return xen_map_cache(addr, *size, 1);
868bb33f 1327 } else {
38bee5dc
SS
1328 RAMBlock *block;
1329
a3161038 1330 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1331 if (addr - block->offset < block->length) {
1332 if (addr - block->offset + *size > block->length)
1333 *size = block->length - addr + block->offset;
1334 return block->host + (addr - block->offset);
1335 }
1336 }
1337
1338 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1339 abort();
38bee5dc
SS
1340 }
1341}
1342
050a0ddf
AP
1343void qemu_put_ram_ptr(void *addr)
1344{
1345 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
1346}
1347
e890261f 1348int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1349{
94a6b54f
PB
1350 RAMBlock *block;
1351 uint8_t *host = ptr;
1352
868bb33f 1353 if (xen_enabled()) {
e41d7c69 1354 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
1355 return 0;
1356 }
1357
a3161038 1358 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1359 /* This case append when the block is not mapped. */
1360 if (block->host == NULL) {
1361 continue;
1362 }
f471a17e 1363 if (host - block->host < block->length) {
e890261f
MT
1364 *ram_addr = block->offset + (host - block->host);
1365 return 0;
f471a17e 1366 }
94a6b54f 1367 }
432d268c 1368
e890261f
MT
1369 return -1;
1370}
f471a17e 1371
e890261f
MT
1372/* Some of the softmmu routines need to translate from a host pointer
1373 (typically a TLB entry) back to a ram offset. */
1374ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1375{
1376 ram_addr_t ram_addr;
f471a17e 1377
e890261f
MT
1378 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1379 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1380 abort();
1381 }
1382 return ram_addr;
5579c7f3
PB
1383}
1384
a8170e5e 1385static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1386 unsigned size)
e18231a3
BS
1387{
1388#ifdef DEBUG_UNASSIGNED
1389 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1390#endif
5b450407 1391#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1392 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
1393#endif
1394 return 0;
1395}
1396
a8170e5e 1397static void unassigned_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1398 uint64_t val, unsigned size)
e18231a3
BS
1399{
1400#ifdef DEBUG_UNASSIGNED
0e0df1e2 1401 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 1402#endif
5b450407 1403#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1404 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 1405#endif
33417e70
FB
1406}
1407
0e0df1e2
AK
1408static const MemoryRegionOps unassigned_mem_ops = {
1409 .read = unassigned_mem_read,
1410 .write = unassigned_mem_write,
1411 .endianness = DEVICE_NATIVE_ENDIAN,
1412};
e18231a3 1413
a8170e5e 1414static uint64_t error_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1415 unsigned size)
e18231a3 1416{
0e0df1e2 1417 abort();
e18231a3
BS
1418}
1419
a8170e5e 1420static void error_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1421 uint64_t value, unsigned size)
e18231a3 1422{
0e0df1e2 1423 abort();
33417e70
FB
1424}
1425
0e0df1e2
AK
1426static const MemoryRegionOps error_mem_ops = {
1427 .read = error_mem_read,
1428 .write = error_mem_write,
1429 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1430};
1431
0e0df1e2
AK
1432static const MemoryRegionOps rom_mem_ops = {
1433 .read = error_mem_read,
1434 .write = unassigned_mem_write,
1435 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1436};
1437
a8170e5e 1438static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1439 uint64_t val, unsigned size)
9fa3e853 1440{
3a7d929e 1441 int dirty_flags;
f7c11b53 1442 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1443 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1444#if !defined(CONFIG_USER_ONLY)
0e0df1e2 1445 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1446 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 1447#endif
3a7d929e 1448 }
0e0df1e2
AK
1449 switch (size) {
1450 case 1:
1451 stb_p(qemu_get_ram_ptr(ram_addr), val);
1452 break;
1453 case 2:
1454 stw_p(qemu_get_ram_ptr(ram_addr), val);
1455 break;
1456 case 4:
1457 stl_p(qemu_get_ram_ptr(ram_addr), val);
1458 break;
1459 default:
1460 abort();
3a7d929e 1461 }
f23db169 1462 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1463 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1464 /* we remove the notdirty callback only if the code has been
1465 flushed */
1466 if (dirty_flags == 0xff)
2e70f6ef 1467 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1468}
1469
0e0df1e2
AK
1470static const MemoryRegionOps notdirty_mem_ops = {
1471 .read = error_mem_read,
1472 .write = notdirty_mem_write,
1473 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1474};
1475
0f459d16 1476/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1477static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1478{
9349b4f9 1479 CPUArchState *env = cpu_single_env;
06d55cc1 1480 target_ulong pc, cs_base;
0f459d16 1481 target_ulong vaddr;
a1d1bb31 1482 CPUWatchpoint *wp;
06d55cc1 1483 int cpu_flags;
0f459d16 1484
06d55cc1
AL
1485 if (env->watchpoint_hit) {
1486 /* We re-entered the check after replacing the TB. Now raise
1487 * the debug interrupt so that is will trigger after the
1488 * current instruction. */
c3affe56 1489 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1490 return;
1491 }
2e70f6ef 1492 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1493 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1494 if ((vaddr == (wp->vaddr & len_mask) ||
1495 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1496 wp->flags |= BP_WATCHPOINT_HIT;
1497 if (!env->watchpoint_hit) {
1498 env->watchpoint_hit = wp;
5a316526 1499 tb_check_watchpoint(env);
6e140f28
AL
1500 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1501 env->exception_index = EXCP_DEBUG;
488d6577 1502 cpu_loop_exit(env);
6e140f28
AL
1503 } else {
1504 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1505 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1506 cpu_resume_from_signal(env, NULL);
6e140f28 1507 }
06d55cc1 1508 }
6e140f28
AL
1509 } else {
1510 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1511 }
1512 }
1513}
1514
6658ffb8
PB
1515/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1516 so these check for a hit then pass through to the normal out-of-line
1517 phys routines. */
a8170e5e 1518static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1519 unsigned size)
6658ffb8 1520{
1ec9b909
AK
1521 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1522 switch (size) {
1523 case 1: return ldub_phys(addr);
1524 case 2: return lduw_phys(addr);
1525 case 4: return ldl_phys(addr);
1526 default: abort();
1527 }
6658ffb8
PB
1528}
1529
a8170e5e 1530static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1531 uint64_t val, unsigned size)
6658ffb8 1532{
1ec9b909
AK
1533 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1534 switch (size) {
67364150
MF
1535 case 1:
1536 stb_phys(addr, val);
1537 break;
1538 case 2:
1539 stw_phys(addr, val);
1540 break;
1541 case 4:
1542 stl_phys(addr, val);
1543 break;
1ec9b909
AK
1544 default: abort();
1545 }
6658ffb8
PB
1546}
1547
1ec9b909
AK
1548static const MemoryRegionOps watch_mem_ops = {
1549 .read = watch_mem_read,
1550 .write = watch_mem_write,
1551 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1552};
6658ffb8 1553
a8170e5e 1554static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1555 unsigned len)
db7b5426 1556{
70c68e44 1557 subpage_t *mmio = opaque;
f6405247 1558 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1559 MemoryRegionSection *section;
db7b5426
BS
1560#if defined(DEBUG_SUBPAGE)
1561 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1562 mmio, len, addr, idx);
1563#endif
db7b5426 1564
5312bd8b
AK
1565 section = &phys_sections[mmio->sub_section[idx]];
1566 addr += mmio->base;
1567 addr -= section->offset_within_address_space;
1568 addr += section->offset_within_region;
37ec01d4 1569 return io_mem_read(section->mr, addr, len);
db7b5426
BS
1570}
1571
a8170e5e 1572static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1573 uint64_t value, unsigned len)
db7b5426 1574{
70c68e44 1575 subpage_t *mmio = opaque;
f6405247 1576 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1577 MemoryRegionSection *section;
db7b5426 1578#if defined(DEBUG_SUBPAGE)
70c68e44
AK
1579 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1580 " idx %d value %"PRIx64"\n",
f6405247 1581 __func__, mmio, len, addr, idx, value);
db7b5426 1582#endif
f6405247 1583
5312bd8b
AK
1584 section = &phys_sections[mmio->sub_section[idx]];
1585 addr += mmio->base;
1586 addr -= section->offset_within_address_space;
1587 addr += section->offset_within_region;
37ec01d4 1588 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
1589}
1590
70c68e44
AK
1591static const MemoryRegionOps subpage_ops = {
1592 .read = subpage_read,
1593 .write = subpage_write,
1594 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1595};
1596
a8170e5e 1597static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
de712f94 1598 unsigned size)
56384e8b
AF
1599{
1600 ram_addr_t raddr = addr;
1601 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1602 switch (size) {
1603 case 1: return ldub_p(ptr);
1604 case 2: return lduw_p(ptr);
1605 case 4: return ldl_p(ptr);
1606 default: abort();
1607 }
56384e8b
AF
1608}
1609
a8170e5e 1610static void subpage_ram_write(void *opaque, hwaddr addr,
de712f94 1611 uint64_t value, unsigned size)
56384e8b
AF
1612{
1613 ram_addr_t raddr = addr;
1614 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1615 switch (size) {
1616 case 1: return stb_p(ptr, value);
1617 case 2: return stw_p(ptr, value);
1618 case 4: return stl_p(ptr, value);
1619 default: abort();
1620 }
56384e8b
AF
1621}
1622
de712f94
AK
1623static const MemoryRegionOps subpage_ram_ops = {
1624 .read = subpage_ram_read,
1625 .write = subpage_ram_write,
1626 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
1627};
1628
c227f099 1629static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1630 uint16_t section)
db7b5426
BS
1631{
1632 int idx, eidx;
1633
1634 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1635 return -1;
1636 idx = SUBPAGE_IDX(start);
1637 eidx = SUBPAGE_IDX(end);
1638#if defined(DEBUG_SUBPAGE)
0bf9e31a 1639 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1640 mmio, start, end, idx, eidx, memory);
1641#endif
5312bd8b
AK
1642 if (memory_region_is_ram(phys_sections[section].mr)) {
1643 MemoryRegionSection new_section = phys_sections[section];
1644 new_section.mr = &io_mem_subpage_ram;
1645 section = phys_section_add(&new_section);
56384e8b 1646 }
db7b5426 1647 for (; idx <= eidx; idx++) {
5312bd8b 1648 mmio->sub_section[idx] = section;
db7b5426
BS
1649 }
1650
1651 return 0;
1652}
1653
a8170e5e 1654static subpage_t *subpage_init(hwaddr base)
db7b5426 1655{
c227f099 1656 subpage_t *mmio;
db7b5426 1657
7267c094 1658 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
1659
1660 mmio->base = base;
70c68e44
AK
1661 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1662 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1663 mmio->iomem.subpage = true;
db7b5426 1664#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1665 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1666 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1667#endif
0f0cb164 1668 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
1669
1670 return mmio;
1671}
1672
5312bd8b
AK
1673static uint16_t dummy_section(MemoryRegion *mr)
1674{
1675 MemoryRegionSection section = {
1676 .mr = mr,
1677 .offset_within_address_space = 0,
1678 .offset_within_region = 0,
1679 .size = UINT64_MAX,
1680 };
1681
1682 return phys_section_add(&section);
1683}
1684
a8170e5e 1685MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1686{
37ec01d4 1687 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1688}
1689
e9179ce1
AK
1690static void io_mem_init(void)
1691{
0e0df1e2 1692 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
1693 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1694 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1695 "unassigned", UINT64_MAX);
1696 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1697 "notdirty", UINT64_MAX);
de712f94
AK
1698 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1699 "subpage-ram", UINT64_MAX);
1ec9b909
AK
1700 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1701 "watch", UINT64_MAX);
e9179ce1
AK
1702}
1703
ac1970fb
AK
1704static void mem_begin(MemoryListener *listener)
1705{
1706 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1707
1708 destroy_all_mappings(d);
1709 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1710}
1711
50c1e149
AK
1712static void core_begin(MemoryListener *listener)
1713{
5312bd8b
AK
1714 phys_sections_clear();
1715 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
1716 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1717 phys_section_rom = dummy_section(&io_mem_rom);
1718 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
1719}
1720
1d71148e 1721static void tcg_commit(MemoryListener *listener)
50c1e149 1722{
9349b4f9 1723 CPUArchState *env;
117712c3
AK
1724
1725 /* since each CPU stores ram addresses in its TLB cache, we must
1726 reset the modified entries */
1727 /* XXX: slow ! */
1728 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1729 tlb_flush(env, 1);
1730 }
50c1e149
AK
1731}
1732
93632747
AK
1733static void core_log_global_start(MemoryListener *listener)
1734{
1735 cpu_physical_memory_set_dirty_tracking(1);
1736}
1737
1738static void core_log_global_stop(MemoryListener *listener)
1739{
1740 cpu_physical_memory_set_dirty_tracking(0);
1741}
1742
4855d41a
AK
1743static void io_region_add(MemoryListener *listener,
1744 MemoryRegionSection *section)
1745{
a2d33521
AK
1746 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1747
1748 mrio->mr = section->mr;
1749 mrio->offset = section->offset_within_region;
1750 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 1751 section->offset_within_address_space, section->size);
a2d33521 1752 ioport_register(&mrio->iorange);
4855d41a
AK
1753}
1754
1755static void io_region_del(MemoryListener *listener,
1756 MemoryRegionSection *section)
1757{
1758 isa_unassign_ioport(section->offset_within_address_space, section->size);
1759}
1760
93632747 1761static MemoryListener core_memory_listener = {
50c1e149 1762 .begin = core_begin,
93632747
AK
1763 .log_global_start = core_log_global_start,
1764 .log_global_stop = core_log_global_stop,
ac1970fb 1765 .priority = 1,
93632747
AK
1766};
1767
4855d41a
AK
1768static MemoryListener io_memory_listener = {
1769 .region_add = io_region_add,
1770 .region_del = io_region_del,
4855d41a
AK
1771 .priority = 0,
1772};
1773
1d71148e
AK
1774static MemoryListener tcg_memory_listener = {
1775 .commit = tcg_commit,
1776};
1777
ac1970fb
AK
1778void address_space_init_dispatch(AddressSpace *as)
1779{
1780 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1781
1782 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1783 d->listener = (MemoryListener) {
1784 .begin = mem_begin,
1785 .region_add = mem_add,
1786 .region_nop = mem_add,
1787 .priority = 0,
1788 };
1789 as->dispatch = d;
1790 memory_listener_register(&d->listener, as);
1791}
1792
83f3c251
AK
1793void address_space_destroy_dispatch(AddressSpace *as)
1794{
1795 AddressSpaceDispatch *d = as->dispatch;
1796
1797 memory_listener_unregister(&d->listener);
1798 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1799 g_free(d);
1800 as->dispatch = NULL;
1801}
1802
62152b8a
AK
1803static void memory_map_init(void)
1804{
7267c094 1805 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 1806 memory_region_init(system_memory, "system", INT64_MAX);
2673a5da
AK
1807 address_space_init(&address_space_memory, system_memory);
1808 address_space_memory.name = "memory";
309cb471 1809
7267c094 1810 system_io = g_malloc(sizeof(*system_io));
309cb471 1811 memory_region_init(system_io, "io", 65536);
2673a5da
AK
1812 address_space_init(&address_space_io, system_io);
1813 address_space_io.name = "I/O";
93632747 1814
f6790af6
AK
1815 memory_listener_register(&core_memory_listener, &address_space_memory);
1816 memory_listener_register(&io_memory_listener, &address_space_io);
1817 memory_listener_register(&tcg_memory_listener, &address_space_memory);
9e11908f
PM
1818
1819 dma_context_init(&dma_context_memory, &address_space_memory,
1820 NULL, NULL, NULL);
62152b8a
AK
1821}
1822
1823MemoryRegion *get_system_memory(void)
1824{
1825 return system_memory;
1826}
1827
309cb471
AK
1828MemoryRegion *get_system_io(void)
1829{
1830 return system_io;
1831}
1832
e2eef170
PB
1833#endif /* !defined(CONFIG_USER_ONLY) */
1834
13eb76e0
FB
1835/* physical memory access (slow version, mainly for debug) */
1836#if defined(CONFIG_USER_ONLY)
9349b4f9 1837int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1838 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1839{
1840 int l, flags;
1841 target_ulong page;
53a5960a 1842 void * p;
13eb76e0
FB
1843
1844 while (len > 0) {
1845 page = addr & TARGET_PAGE_MASK;
1846 l = (page + TARGET_PAGE_SIZE) - addr;
1847 if (l > len)
1848 l = len;
1849 flags = page_get_flags(page);
1850 if (!(flags & PAGE_VALID))
a68fe89c 1851 return -1;
13eb76e0
FB
1852 if (is_write) {
1853 if (!(flags & PAGE_WRITE))
a68fe89c 1854 return -1;
579a97f7 1855 /* XXX: this code should not depend on lock_user */
72fb7daa 1856 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1857 return -1;
72fb7daa
AJ
1858 memcpy(p, buf, l);
1859 unlock_user(p, addr, l);
13eb76e0
FB
1860 } else {
1861 if (!(flags & PAGE_READ))
a68fe89c 1862 return -1;
579a97f7 1863 /* XXX: this code should not depend on lock_user */
72fb7daa 1864 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1865 return -1;
72fb7daa 1866 memcpy(buf, p, l);
5b257578 1867 unlock_user(p, addr, 0);
13eb76e0
FB
1868 }
1869 len -= l;
1870 buf += l;
1871 addr += l;
1872 }
a68fe89c 1873 return 0;
13eb76e0 1874}
8df1cd07 1875
13eb76e0 1876#else
51d7a9eb 1877
a8170e5e
AK
1878static void invalidate_and_set_dirty(hwaddr addr,
1879 hwaddr length)
51d7a9eb
AP
1880{
1881 if (!cpu_physical_memory_is_dirty(addr)) {
1882 /* invalidate code */
1883 tb_invalidate_phys_page_range(addr, addr + length, 0);
1884 /* set dirty bit */
1885 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1886 }
e226939d 1887 xen_modified_memory(addr, length);
51d7a9eb
AP
1888}
1889
a8170e5e 1890void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1891 int len, bool is_write)
13eb76e0 1892{
ac1970fb 1893 AddressSpaceDispatch *d = as->dispatch;
37ec01d4 1894 int l;
13eb76e0
FB
1895 uint8_t *ptr;
1896 uint32_t val;
a8170e5e 1897 hwaddr page;
f3705d53 1898 MemoryRegionSection *section;
3b46e624 1899
13eb76e0
FB
1900 while (len > 0) {
1901 page = addr & TARGET_PAGE_MASK;
1902 l = (page + TARGET_PAGE_SIZE) - addr;
1903 if (l > len)
1904 l = len;
ac1970fb 1905 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1906
13eb76e0 1907 if (is_write) {
f3705d53 1908 if (!memory_region_is_ram(section->mr)) {
a8170e5e 1909 hwaddr addr1;
cc5bea60 1910 addr1 = memory_region_section_addr(section, addr);
6a00d601
FB
1911 /* XXX: could force cpu_single_env to NULL to avoid
1912 potential bugs */
6c2934db 1913 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 1914 /* 32 bit write access */
c27004ec 1915 val = ldl_p(buf);
37ec01d4 1916 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 1917 l = 4;
6c2934db 1918 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 1919 /* 16 bit write access */
c27004ec 1920 val = lduw_p(buf);
37ec01d4 1921 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
1922 l = 2;
1923 } else {
1c213d19 1924 /* 8 bit write access */
c27004ec 1925 val = ldub_p(buf);
37ec01d4 1926 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
1927 l = 1;
1928 }
f3705d53 1929 } else if (!section->readonly) {
8ca5692d 1930 ram_addr_t addr1;
f3705d53 1931 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 1932 + memory_region_section_addr(section, addr);
13eb76e0 1933 /* RAM case */
5579c7f3 1934 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1935 memcpy(ptr, buf, l);
51d7a9eb 1936 invalidate_and_set_dirty(addr1, l);
050a0ddf 1937 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1938 }
1939 } else {
cc5bea60
BS
1940 if (!(memory_region_is_ram(section->mr) ||
1941 memory_region_is_romd(section->mr))) {
a8170e5e 1942 hwaddr addr1;
13eb76e0 1943 /* I/O case */
cc5bea60 1944 addr1 = memory_region_section_addr(section, addr);
6c2934db 1945 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 1946 /* 32 bit read access */
37ec01d4 1947 val = io_mem_read(section->mr, addr1, 4);
c27004ec 1948 stl_p(buf, val);
13eb76e0 1949 l = 4;
6c2934db 1950 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 1951 /* 16 bit read access */
37ec01d4 1952 val = io_mem_read(section->mr, addr1, 2);
c27004ec 1953 stw_p(buf, val);
13eb76e0
FB
1954 l = 2;
1955 } else {
1c213d19 1956 /* 8 bit read access */
37ec01d4 1957 val = io_mem_read(section->mr, addr1, 1);
c27004ec 1958 stb_p(buf, val);
13eb76e0
FB
1959 l = 1;
1960 }
1961 } else {
1962 /* RAM case */
0a1b357f 1963 ptr = qemu_get_ram_ptr(section->mr->ram_addr
cc5bea60
BS
1964 + memory_region_section_addr(section,
1965 addr));
f3705d53 1966 memcpy(buf, ptr, l);
050a0ddf 1967 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1968 }
1969 }
1970 len -= l;
1971 buf += l;
1972 addr += l;
1973 }
1974}
8df1cd07 1975
a8170e5e 1976void address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1977 const uint8_t *buf, int len)
1978{
1979 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1980}
1981
1982/**
1983 * address_space_read: read from an address space.
1984 *
1985 * @as: #AddressSpace to be accessed
1986 * @addr: address within that address space
1987 * @buf: buffer with the data transferred
1988 */
a8170e5e 1989void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb
AK
1990{
1991 address_space_rw(as, addr, buf, len, false);
1992}
1993
1994
a8170e5e 1995void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1996 int len, int is_write)
1997{
1998 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1999}
2000
d0ecd2aa 2001/* used for ROM loading : can write in RAM and ROM */
a8170e5e 2002void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
2003 const uint8_t *buf, int len)
2004{
ac1970fb 2005 AddressSpaceDispatch *d = address_space_memory.dispatch;
d0ecd2aa
FB
2006 int l;
2007 uint8_t *ptr;
a8170e5e 2008 hwaddr page;
f3705d53 2009 MemoryRegionSection *section;
3b46e624 2010
d0ecd2aa
FB
2011 while (len > 0) {
2012 page = addr & TARGET_PAGE_MASK;
2013 l = (page + TARGET_PAGE_SIZE) - addr;
2014 if (l > len)
2015 l = len;
ac1970fb 2016 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 2017
cc5bea60
BS
2018 if (!(memory_region_is_ram(section->mr) ||
2019 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
2020 /* do nothing */
2021 } else {
2022 unsigned long addr1;
f3705d53 2023 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 2024 + memory_region_section_addr(section, addr);
d0ecd2aa 2025 /* ROM/RAM case */
5579c7f3 2026 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2027 memcpy(ptr, buf, l);
51d7a9eb 2028 invalidate_and_set_dirty(addr1, l);
050a0ddf 2029 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
2030 }
2031 len -= l;
2032 buf += l;
2033 addr += l;
2034 }
2035}
2036
6d16c2f8
AL
2037typedef struct {
2038 void *buffer;
a8170e5e
AK
2039 hwaddr addr;
2040 hwaddr len;
6d16c2f8
AL
2041} BounceBuffer;
2042
2043static BounceBuffer bounce;
2044
ba223c29
AL
2045typedef struct MapClient {
2046 void *opaque;
2047 void (*callback)(void *opaque);
72cf2d4f 2048 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2049} MapClient;
2050
72cf2d4f
BS
2051static QLIST_HEAD(map_client_list, MapClient) map_client_list
2052 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2053
2054void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2055{
7267c094 2056 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2057
2058 client->opaque = opaque;
2059 client->callback = callback;
72cf2d4f 2060 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2061 return client;
2062}
2063
8b9c99d9 2064static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2065{
2066 MapClient *client = (MapClient *)_client;
2067
72cf2d4f 2068 QLIST_REMOVE(client, link);
7267c094 2069 g_free(client);
ba223c29
AL
2070}
2071
2072static void cpu_notify_map_clients(void)
2073{
2074 MapClient *client;
2075
72cf2d4f
BS
2076 while (!QLIST_EMPTY(&map_client_list)) {
2077 client = QLIST_FIRST(&map_client_list);
ba223c29 2078 client->callback(client->opaque);
34d5e948 2079 cpu_unregister_map_client(client);
ba223c29
AL
2080 }
2081}
2082
6d16c2f8
AL
2083/* Map a physical memory region into a host virtual address.
2084 * May map a subset of the requested range, given by and returned in *plen.
2085 * May return NULL if resources needed to perform the mapping are exhausted.
2086 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2087 * Use cpu_register_map_client() to know when retrying the map operation is
2088 * likely to succeed.
6d16c2f8 2089 */
ac1970fb 2090void *address_space_map(AddressSpace *as,
a8170e5e
AK
2091 hwaddr addr,
2092 hwaddr *plen,
ac1970fb 2093 bool is_write)
6d16c2f8 2094{
ac1970fb 2095 AddressSpaceDispatch *d = as->dispatch;
a8170e5e
AK
2096 hwaddr len = *plen;
2097 hwaddr todo = 0;
6d16c2f8 2098 int l;
a8170e5e 2099 hwaddr page;
f3705d53 2100 MemoryRegionSection *section;
f15fbc4b 2101 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
2102 ram_addr_t rlen;
2103 void *ret;
6d16c2f8
AL
2104
2105 while (len > 0) {
2106 page = addr & TARGET_PAGE_MASK;
2107 l = (page + TARGET_PAGE_SIZE) - addr;
2108 if (l > len)
2109 l = len;
ac1970fb 2110 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
6d16c2f8 2111
f3705d53 2112 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 2113 if (todo || bounce.buffer) {
6d16c2f8
AL
2114 break;
2115 }
2116 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2117 bounce.addr = addr;
2118 bounce.len = l;
2119 if (!is_write) {
ac1970fb 2120 address_space_read(as, addr, bounce.buffer, l);
6d16c2f8 2121 }
38bee5dc
SS
2122
2123 *plen = l;
2124 return bounce.buffer;
6d16c2f8 2125 }
8ab934f9 2126 if (!todo) {
f3705d53 2127 raddr = memory_region_get_ram_addr(section->mr)
cc5bea60 2128 + memory_region_section_addr(section, addr);
8ab934f9 2129 }
6d16c2f8
AL
2130
2131 len -= l;
2132 addr += l;
38bee5dc 2133 todo += l;
6d16c2f8 2134 }
8ab934f9
SS
2135 rlen = todo;
2136 ret = qemu_ram_ptr_length(raddr, &rlen);
2137 *plen = rlen;
2138 return ret;
6d16c2f8
AL
2139}
2140
ac1970fb 2141/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2142 * Will also mark the memory as dirty if is_write == 1. access_len gives
2143 * the amount of memory that was actually read or written by the caller.
2144 */
a8170e5e
AK
2145void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2146 int is_write, hwaddr access_len)
6d16c2f8
AL
2147{
2148 if (buffer != bounce.buffer) {
2149 if (is_write) {
e890261f 2150 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
2151 while (access_len) {
2152 unsigned l;
2153 l = TARGET_PAGE_SIZE;
2154 if (l > access_len)
2155 l = access_len;
51d7a9eb 2156 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2157 addr1 += l;
2158 access_len -= l;
2159 }
2160 }
868bb33f 2161 if (xen_enabled()) {
e41d7c69 2162 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2163 }
6d16c2f8
AL
2164 return;
2165 }
2166 if (is_write) {
ac1970fb 2167 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2168 }
f8a83245 2169 qemu_vfree(bounce.buffer);
6d16c2f8 2170 bounce.buffer = NULL;
ba223c29 2171 cpu_notify_map_clients();
6d16c2f8 2172}
d0ecd2aa 2173
a8170e5e
AK
2174void *cpu_physical_memory_map(hwaddr addr,
2175 hwaddr *plen,
ac1970fb
AK
2176 int is_write)
2177{
2178 return address_space_map(&address_space_memory, addr, plen, is_write);
2179}
2180
a8170e5e
AK
2181void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2182 int is_write, hwaddr access_len)
ac1970fb
AK
2183{
2184 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2185}
2186
8df1cd07 2187/* warning: addr must be aligned */
a8170e5e 2188static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2189 enum device_endian endian)
8df1cd07 2190{
8df1cd07
FB
2191 uint8_t *ptr;
2192 uint32_t val;
f3705d53 2193 MemoryRegionSection *section;
8df1cd07 2194
ac1970fb 2195 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2196
cc5bea60
BS
2197 if (!(memory_region_is_ram(section->mr) ||
2198 memory_region_is_romd(section->mr))) {
8df1cd07 2199 /* I/O case */
cc5bea60 2200 addr = memory_region_section_addr(section, addr);
37ec01d4 2201 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
2202#if defined(TARGET_WORDS_BIGENDIAN)
2203 if (endian == DEVICE_LITTLE_ENDIAN) {
2204 val = bswap32(val);
2205 }
2206#else
2207 if (endian == DEVICE_BIG_ENDIAN) {
2208 val = bswap32(val);
2209 }
2210#endif
8df1cd07
FB
2211 } else {
2212 /* RAM case */
f3705d53 2213 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2214 & TARGET_PAGE_MASK)
cc5bea60 2215 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2216 switch (endian) {
2217 case DEVICE_LITTLE_ENDIAN:
2218 val = ldl_le_p(ptr);
2219 break;
2220 case DEVICE_BIG_ENDIAN:
2221 val = ldl_be_p(ptr);
2222 break;
2223 default:
2224 val = ldl_p(ptr);
2225 break;
2226 }
8df1cd07
FB
2227 }
2228 return val;
2229}
2230
a8170e5e 2231uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2232{
2233 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2234}
2235
a8170e5e 2236uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2237{
2238 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2239}
2240
a8170e5e 2241uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2242{
2243 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2244}
2245
84b7b8e7 2246/* warning: addr must be aligned */
a8170e5e 2247static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2248 enum device_endian endian)
84b7b8e7 2249{
84b7b8e7
FB
2250 uint8_t *ptr;
2251 uint64_t val;
f3705d53 2252 MemoryRegionSection *section;
84b7b8e7 2253
ac1970fb 2254 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2255
cc5bea60
BS
2256 if (!(memory_region_is_ram(section->mr) ||
2257 memory_region_is_romd(section->mr))) {
84b7b8e7 2258 /* I/O case */
cc5bea60 2259 addr = memory_region_section_addr(section, addr);
1e78bcc1
AG
2260
2261 /* XXX This is broken when device endian != cpu endian.
2262 Fix and add "endian" variable check */
84b7b8e7 2263#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2264 val = io_mem_read(section->mr, addr, 4) << 32;
2265 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 2266#else
37ec01d4
AK
2267 val = io_mem_read(section->mr, addr, 4);
2268 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
2269#endif
2270 } else {
2271 /* RAM case */
f3705d53 2272 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2273 & TARGET_PAGE_MASK)
cc5bea60 2274 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2275 switch (endian) {
2276 case DEVICE_LITTLE_ENDIAN:
2277 val = ldq_le_p(ptr);
2278 break;
2279 case DEVICE_BIG_ENDIAN:
2280 val = ldq_be_p(ptr);
2281 break;
2282 default:
2283 val = ldq_p(ptr);
2284 break;
2285 }
84b7b8e7
FB
2286 }
2287 return val;
2288}
2289
a8170e5e 2290uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2291{
2292 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2293}
2294
a8170e5e 2295uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2296{
2297 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2298}
2299
a8170e5e 2300uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2301{
2302 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2303}
2304
aab33094 2305/* XXX: optimize */
a8170e5e 2306uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2307{
2308 uint8_t val;
2309 cpu_physical_memory_read(addr, &val, 1);
2310 return val;
2311}
2312
733f0b02 2313/* warning: addr must be aligned */
a8170e5e 2314static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2315 enum device_endian endian)
aab33094 2316{
733f0b02
MT
2317 uint8_t *ptr;
2318 uint64_t val;
f3705d53 2319 MemoryRegionSection *section;
733f0b02 2320
ac1970fb 2321 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2322
cc5bea60
BS
2323 if (!(memory_region_is_ram(section->mr) ||
2324 memory_region_is_romd(section->mr))) {
733f0b02 2325 /* I/O case */
cc5bea60 2326 addr = memory_region_section_addr(section, addr);
37ec01d4 2327 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
2328#if defined(TARGET_WORDS_BIGENDIAN)
2329 if (endian == DEVICE_LITTLE_ENDIAN) {
2330 val = bswap16(val);
2331 }
2332#else
2333 if (endian == DEVICE_BIG_ENDIAN) {
2334 val = bswap16(val);
2335 }
2336#endif
733f0b02
MT
2337 } else {
2338 /* RAM case */
f3705d53 2339 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2340 & TARGET_PAGE_MASK)
cc5bea60 2341 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2342 switch (endian) {
2343 case DEVICE_LITTLE_ENDIAN:
2344 val = lduw_le_p(ptr);
2345 break;
2346 case DEVICE_BIG_ENDIAN:
2347 val = lduw_be_p(ptr);
2348 break;
2349 default:
2350 val = lduw_p(ptr);
2351 break;
2352 }
733f0b02
MT
2353 }
2354 return val;
aab33094
FB
2355}
2356
a8170e5e 2357uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2358{
2359 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2360}
2361
a8170e5e 2362uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2363{
2364 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2365}
2366
a8170e5e 2367uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2368{
2369 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2370}
2371
8df1cd07
FB
2372/* warning: addr must be aligned. The ram page is not masked as dirty
2373 and the code inside is not invalidated. It is useful if the dirty
2374 bits are used to track modified PTEs */
a8170e5e 2375void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2376{
8df1cd07 2377 uint8_t *ptr;
f3705d53 2378 MemoryRegionSection *section;
8df1cd07 2379
ac1970fb 2380 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2381
f3705d53 2382 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2383 addr = memory_region_section_addr(section, addr);
f3705d53 2384 if (memory_region_is_ram(section->mr)) {
37ec01d4 2385 section = &phys_sections[phys_section_rom];
06ef3525 2386 }
37ec01d4 2387 io_mem_write(section->mr, addr, val, 4);
8df1cd07 2388 } else {
f3705d53 2389 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 2390 & TARGET_PAGE_MASK)
cc5bea60 2391 + memory_region_section_addr(section, addr);
5579c7f3 2392 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2393 stl_p(ptr, val);
74576198
AL
2394
2395 if (unlikely(in_migration)) {
2396 if (!cpu_physical_memory_is_dirty(addr1)) {
2397 /* invalidate code */
2398 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2399 /* set dirty bit */
f7c11b53
YT
2400 cpu_physical_memory_set_dirty_flags(
2401 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2402 }
2403 }
8df1cd07
FB
2404 }
2405}
2406
a8170e5e 2407void stq_phys_notdirty(hwaddr addr, uint64_t val)
bc98a7ef 2408{
bc98a7ef 2409 uint8_t *ptr;
f3705d53 2410 MemoryRegionSection *section;
bc98a7ef 2411
ac1970fb 2412 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2413
f3705d53 2414 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2415 addr = memory_region_section_addr(section, addr);
f3705d53 2416 if (memory_region_is_ram(section->mr)) {
37ec01d4 2417 section = &phys_sections[phys_section_rom];
06ef3525 2418 }
bc98a7ef 2419#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2420 io_mem_write(section->mr, addr, val >> 32, 4);
2421 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 2422#else
37ec01d4
AK
2423 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2424 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
2425#endif
2426 } else {
f3705d53 2427 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2428 & TARGET_PAGE_MASK)
cc5bea60 2429 + memory_region_section_addr(section, addr));
bc98a7ef
JM
2430 stq_p(ptr, val);
2431 }
2432}
2433
8df1cd07 2434/* warning: addr must be aligned */
a8170e5e 2435static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2436 enum device_endian endian)
8df1cd07 2437{
8df1cd07 2438 uint8_t *ptr;
f3705d53 2439 MemoryRegionSection *section;
8df1cd07 2440
ac1970fb 2441 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2442
f3705d53 2443 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2444 addr = memory_region_section_addr(section, addr);
f3705d53 2445 if (memory_region_is_ram(section->mr)) {
37ec01d4 2446 section = &phys_sections[phys_section_rom];
06ef3525 2447 }
1e78bcc1
AG
2448#if defined(TARGET_WORDS_BIGENDIAN)
2449 if (endian == DEVICE_LITTLE_ENDIAN) {
2450 val = bswap32(val);
2451 }
2452#else
2453 if (endian == DEVICE_BIG_ENDIAN) {
2454 val = bswap32(val);
2455 }
2456#endif
37ec01d4 2457 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
2458 } else {
2459 unsigned long addr1;
f3705d53 2460 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2461 + memory_region_section_addr(section, addr);
8df1cd07 2462 /* RAM case */
5579c7f3 2463 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2464 switch (endian) {
2465 case DEVICE_LITTLE_ENDIAN:
2466 stl_le_p(ptr, val);
2467 break;
2468 case DEVICE_BIG_ENDIAN:
2469 stl_be_p(ptr, val);
2470 break;
2471 default:
2472 stl_p(ptr, val);
2473 break;
2474 }
51d7a9eb 2475 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2476 }
2477}
2478
a8170e5e 2479void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2480{
2481 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2482}
2483
a8170e5e 2484void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2485{
2486 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2487}
2488
a8170e5e 2489void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2490{
2491 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2492}
2493
aab33094 2494/* XXX: optimize */
a8170e5e 2495void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2496{
2497 uint8_t v = val;
2498 cpu_physical_memory_write(addr, &v, 1);
2499}
2500
733f0b02 2501/* warning: addr must be aligned */
a8170e5e 2502static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2503 enum device_endian endian)
aab33094 2504{
733f0b02 2505 uint8_t *ptr;
f3705d53 2506 MemoryRegionSection *section;
733f0b02 2507
ac1970fb 2508 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2509
f3705d53 2510 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2511 addr = memory_region_section_addr(section, addr);
f3705d53 2512 if (memory_region_is_ram(section->mr)) {
37ec01d4 2513 section = &phys_sections[phys_section_rom];
06ef3525 2514 }
1e78bcc1
AG
2515#if defined(TARGET_WORDS_BIGENDIAN)
2516 if (endian == DEVICE_LITTLE_ENDIAN) {
2517 val = bswap16(val);
2518 }
2519#else
2520 if (endian == DEVICE_BIG_ENDIAN) {
2521 val = bswap16(val);
2522 }
2523#endif
37ec01d4 2524 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
2525 } else {
2526 unsigned long addr1;
f3705d53 2527 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2528 + memory_region_section_addr(section, addr);
733f0b02
MT
2529 /* RAM case */
2530 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2531 switch (endian) {
2532 case DEVICE_LITTLE_ENDIAN:
2533 stw_le_p(ptr, val);
2534 break;
2535 case DEVICE_BIG_ENDIAN:
2536 stw_be_p(ptr, val);
2537 break;
2538 default:
2539 stw_p(ptr, val);
2540 break;
2541 }
51d7a9eb 2542 invalidate_and_set_dirty(addr1, 2);
733f0b02 2543 }
aab33094
FB
2544}
2545
a8170e5e 2546void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2547{
2548 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2549}
2550
a8170e5e 2551void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2552{
2553 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2554}
2555
a8170e5e 2556void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2557{
2558 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2559}
2560
aab33094 2561/* XXX: optimize */
a8170e5e 2562void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2563{
2564 val = tswap64(val);
71d2b725 2565 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2566}
2567
a8170e5e 2568void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2569{
2570 val = cpu_to_le64(val);
2571 cpu_physical_memory_write(addr, &val, 8);
2572}
2573
a8170e5e 2574void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2575{
2576 val = cpu_to_be64(val);
2577 cpu_physical_memory_write(addr, &val, 8);
2578}
2579
5e2972fd 2580/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2581int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2582 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2583{
2584 int l;
a8170e5e 2585 hwaddr phys_addr;
9b3c35e0 2586 target_ulong page;
13eb76e0
FB
2587
2588 while (len > 0) {
2589 page = addr & TARGET_PAGE_MASK;
2590 phys_addr = cpu_get_phys_page_debug(env, page);
2591 /* if no physical page mapped, return an error */
2592 if (phys_addr == -1)
2593 return -1;
2594 l = (page + TARGET_PAGE_SIZE) - addr;
2595 if (l > len)
2596 l = len;
5e2972fd 2597 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2598 if (is_write)
2599 cpu_physical_memory_write_rom(phys_addr, buf, l);
2600 else
5e2972fd 2601 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2602 len -= l;
2603 buf += l;
2604 addr += l;
2605 }
2606 return 0;
2607}
a68fe89c 2608#endif
13eb76e0 2609
8e4a424b
BS
2610#if !defined(CONFIG_USER_ONLY)
2611
2612/*
2613 * A helper function for the _utterly broken_ virtio device model to find out if
2614 * it's running on a big endian machine. Don't do this at home kids!
2615 */
2616bool virtio_is_big_endian(void);
2617bool virtio_is_big_endian(void)
2618{
2619#if defined(TARGET_WORDS_BIGENDIAN)
2620 return true;
2621#else
2622 return false;
2623#endif
2624}
2625
2626#endif
2627
76f35538 2628#ifndef CONFIG_USER_ONLY
a8170e5e 2629bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538
WC
2630{
2631 MemoryRegionSection *section;
2632
ac1970fb
AK
2633 section = phys_page_find(address_space_memory.dispatch,
2634 phys_addr >> TARGET_PAGE_BITS);
76f35538
WC
2635
2636 return !(memory_region_is_ram(section->mr) ||
2637 memory_region_is_romd(section->mr));
2638}
2639#endif