]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
exec: eliminate stq_phys_notdirty
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
0d09e41a 34#include "hw/xen/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
67d3b957 53//#define DEBUG_UNASSIGNED
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
e2eef170 56#if !defined(CONFIG_USER_ONLY)
9fa3e853 57int phys_ram_fd;
74576198 58static int in_migration;
94a6b54f 59
a3161038 60RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
61
62static MemoryRegion *system_memory;
309cb471 63static MemoryRegion *system_io;
62152b8a 64
f6790af6
AK
65AddressSpace address_space_io;
66AddressSpace address_space_memory;
9e11908f 67DMAContext dma_context_memory;
2673a5da 68
0e0df1e2 69MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 70static MemoryRegion io_mem_subpage_ram;
0e0df1e2 71
e2eef170 72#endif
9fa3e853 73
9349b4f9 74CPUArchState *first_cpu;
6a00d601
FB
75/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
9349b4f9 77DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 78/* 0 = Do not count executed instructions.
bf20dc07 79 1 = Precise instruction counting.
2e70f6ef 80 2 = Adaptive rate instruction counting. */
5708fc66 81int use_icount;
6a00d601 82
e2eef170 83#if !defined(CONFIG_USER_ONLY)
4346ae3e 84
5312bd8b
AK
85static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
aa102231
AK
88static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
5312bd8b 91
d6f2ea22
AK
92/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
07f07b31 96#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 97
e2eef170 98static void io_mem_init(void);
62152b8a 99static void memory_map_init(void);
8b9c99d9 100static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 101
1ec9b909 102static MemoryRegion io_mem_watch;
6658ffb8 103#endif
fd6ce8f6 104
6d9a1304 105#if !defined(CONFIG_USER_ONLY)
d6f2ea22 106
f7bf5461 107static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 108{
f7bf5461 109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
d6f2ea22
AK
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
f7bf5461
AK
117}
118
119static uint16_t phys_map_node_alloc(void)
120{
121 unsigned i;
122 uint16_t ret;
123
124 ret = phys_map_nodes_nb++;
125 assert(ret != PHYS_MAP_NODE_NIL);
126 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 127 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 128 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 130 }
f7bf5461 131 return ret;
d6f2ea22
AK
132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
92e873b9 139
a8170e5e
AK
140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
2999097b 142 int level)
f7bf5461
AK
143{
144 PhysPageEntry *p;
145 int i;
a8170e5e 146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 147
07f07b31 148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
07f07b31 153 p[i].is_leaf = 1;
c19e8800 154 p[i].ptr = phys_section_unassigned;
4346ae3e 155 }
67c4d23c 156 }
f7bf5461 157 } else {
c19e8800 158 p = phys_map_nodes[lp->ptr];
92e873b9 159 }
2999097b 160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 161
2999097b 162 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
c19e8800 165 lp->ptr = leaf;
07f07b31
AK
166 *index += step;
167 *nb -= step;
2999097b
AK
168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
f7bf5461
AK
172 }
173}
174
ac1970fb 175static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 176 hwaddr index, hwaddr nb,
2999097b 177 uint16_t leaf)
f7bf5461 178{
2999097b 179 /* Wildly overreserve - it doesn't matter much. */
07f07b31 180 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 181
ac1970fb 182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
183}
184
a8170e5e 185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
92e873b9 186{
ac1970fb 187 PhysPageEntry lp = d->phys_map;
31ab2b4a
AK
188 PhysPageEntry *p;
189 int i;
31ab2b4a 190 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 191
07f07b31 192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
194 goto not_found;
195 }
c19e8800 196 p = phys_map_nodes[lp.ptr];
31ab2b4a 197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 198 }
31ab2b4a 199
c19e8800 200 s_index = lp.ptr;
31ab2b4a 201not_found:
f3705d53
AK
202 return &phys_sections[s_index];
203}
204
e5548617
BS
205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 209 && mr != &io_mem_watch;
fd6ce8f6 210}
5b6dd868 211#endif
fd6ce8f6 212
5b6dd868 213void cpu_exec_init_all(void)
fdbb84d1 214{
5b6dd868 215#if !defined(CONFIG_USER_ONLY)
b2a8658e 216 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
217 memory_map_init();
218 io_mem_init();
fdbb84d1 219#endif
5b6dd868 220}
fdbb84d1 221
b170fce3 222#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
223
224static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 225{
259186a7 226 CPUState *cpu = opaque;
a513fe19 227
5b6dd868
BS
228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
259186a7
AF
230 cpu->interrupt_request &= ~0x01;
231 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
232
233 return 0;
a513fe19 234}
7501267e 235
5b6dd868
BS
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
259186a7
AF
243 VMSTATE_UINT32(halted, CPUState),
244 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
245 VMSTATE_END_OF_LIST()
246 }
247};
b170fce3
AF
248#else
249#define vmstate_cpu_common vmstate_dummy
5b6dd868 250#endif
ea041c0e 251
38d8f5c8 252CPUState *qemu_get_cpu(int index)
ea041c0e 253{
5b6dd868 254 CPUArchState *env = first_cpu;
38d8f5c8 255 CPUState *cpu = NULL;
ea041c0e 256
5b6dd868 257 while (env) {
55e5c285
AF
258 cpu = ENV_GET_CPU(env);
259 if (cpu->cpu_index == index) {
5b6dd868 260 break;
55e5c285 261 }
5b6dd868 262 env = env->next_cpu;
ea041c0e 263 }
5b6dd868 264
d76fddae 265 return env ? cpu : NULL;
ea041c0e
FB
266}
267
d6b9e0d6
MT
268void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
269{
270 CPUArchState *env = first_cpu;
271
272 while (env) {
273 func(ENV_GET_CPU(env), data);
274 env = env->next_cpu;
275 }
276}
277
5b6dd868 278void cpu_exec_init(CPUArchState *env)
ea041c0e 279{
5b6dd868 280 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 281 CPUClass *cc = CPU_GET_CLASS(cpu);
5b6dd868
BS
282 CPUArchState **penv;
283 int cpu_index;
284
285#if defined(CONFIG_USER_ONLY)
286 cpu_list_lock();
287#endif
288 env->next_cpu = NULL;
289 penv = &first_cpu;
290 cpu_index = 0;
291 while (*penv != NULL) {
292 penv = &(*penv)->next_cpu;
293 cpu_index++;
294 }
55e5c285 295 cpu->cpu_index = cpu_index;
1b1ed8dc 296 cpu->numa_node = 0;
5b6dd868
BS
297 QTAILQ_INIT(&env->breakpoints);
298 QTAILQ_INIT(&env->watchpoints);
299#ifndef CONFIG_USER_ONLY
300 cpu->thread_id = qemu_get_thread_id();
301#endif
302 *penv = env;
303#if defined(CONFIG_USER_ONLY)
304 cpu_list_unlock();
305#endif
259186a7 306 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
5b6dd868 307#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
308 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
309 cpu_save, cpu_load, env);
b170fce3 310 assert(cc->vmsd == NULL);
5b6dd868 311#endif
b170fce3
AF
312 if (cc->vmsd != NULL) {
313 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
314 }
ea041c0e
FB
315}
316
1fddef4b 317#if defined(TARGET_HAS_ICE)
94df27fd 318#if defined(CONFIG_USER_ONLY)
9349b4f9 319static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
320{
321 tb_invalidate_phys_page_range(pc, pc + 1, 0);
322}
323#else
1e7855a5
MF
324static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
325{
9d70c4b7
MF
326 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
327 (pc & ~TARGET_PAGE_MASK));
1e7855a5 328}
c27004ec 329#endif
94df27fd 330#endif /* TARGET_HAS_ICE */
d720b93d 331
c527ee8f 332#if defined(CONFIG_USER_ONLY)
9349b4f9 333void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
334
335{
336}
337
9349b4f9 338int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
339 int flags, CPUWatchpoint **watchpoint)
340{
341 return -ENOSYS;
342}
343#else
6658ffb8 344/* Add a watchpoint. */
9349b4f9 345int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 346 int flags, CPUWatchpoint **watchpoint)
6658ffb8 347{
b4051334 348 target_ulong len_mask = ~(len - 1);
c0ce998e 349 CPUWatchpoint *wp;
6658ffb8 350
b4051334 351 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
352 if ((len & (len - 1)) || (addr & ~len_mask) ||
353 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
354 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
355 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
356 return -EINVAL;
357 }
7267c094 358 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
359
360 wp->vaddr = addr;
b4051334 361 wp->len_mask = len_mask;
a1d1bb31
AL
362 wp->flags = flags;
363
2dc9f411 364 /* keep all GDB-injected watchpoints in front */
c0ce998e 365 if (flags & BP_GDB)
72cf2d4f 366 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 367 else
72cf2d4f 368 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 369
6658ffb8 370 tlb_flush_page(env, addr);
a1d1bb31
AL
371
372 if (watchpoint)
373 *watchpoint = wp;
374 return 0;
6658ffb8
PB
375}
376
a1d1bb31 377/* Remove a specific watchpoint. */
9349b4f9 378int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 379 int flags)
6658ffb8 380{
b4051334 381 target_ulong len_mask = ~(len - 1);
a1d1bb31 382 CPUWatchpoint *wp;
6658ffb8 383
72cf2d4f 384 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 385 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 386 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 387 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
388 return 0;
389 }
390 }
a1d1bb31 391 return -ENOENT;
6658ffb8
PB
392}
393
a1d1bb31 394/* Remove a specific watchpoint by reference. */
9349b4f9 395void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 396{
72cf2d4f 397 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 398
a1d1bb31
AL
399 tlb_flush_page(env, watchpoint->vaddr);
400
7267c094 401 g_free(watchpoint);
a1d1bb31
AL
402}
403
404/* Remove all matching watchpoints. */
9349b4f9 405void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 406{
c0ce998e 407 CPUWatchpoint *wp, *next;
a1d1bb31 408
72cf2d4f 409 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
410 if (wp->flags & mask)
411 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 412 }
7d03f82f 413}
c527ee8f 414#endif
7d03f82f 415
a1d1bb31 416/* Add a breakpoint. */
9349b4f9 417int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 418 CPUBreakpoint **breakpoint)
4c3a88a2 419{
1fddef4b 420#if defined(TARGET_HAS_ICE)
c0ce998e 421 CPUBreakpoint *bp;
3b46e624 422
7267c094 423 bp = g_malloc(sizeof(*bp));
4c3a88a2 424
a1d1bb31
AL
425 bp->pc = pc;
426 bp->flags = flags;
427
2dc9f411 428 /* keep all GDB-injected breakpoints in front */
c0ce998e 429 if (flags & BP_GDB)
72cf2d4f 430 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 431 else
72cf2d4f 432 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 433
d720b93d 434 breakpoint_invalidate(env, pc);
a1d1bb31
AL
435
436 if (breakpoint)
437 *breakpoint = bp;
4c3a88a2
FB
438 return 0;
439#else
a1d1bb31 440 return -ENOSYS;
4c3a88a2
FB
441#endif
442}
443
a1d1bb31 444/* Remove a specific breakpoint. */
9349b4f9 445int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 446{
7d03f82f 447#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
448 CPUBreakpoint *bp;
449
72cf2d4f 450 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
451 if (bp->pc == pc && bp->flags == flags) {
452 cpu_breakpoint_remove_by_ref(env, bp);
453 return 0;
454 }
7d03f82f 455 }
a1d1bb31
AL
456 return -ENOENT;
457#else
458 return -ENOSYS;
7d03f82f
EI
459#endif
460}
461
a1d1bb31 462/* Remove a specific breakpoint by reference. */
9349b4f9 463void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 464{
1fddef4b 465#if defined(TARGET_HAS_ICE)
72cf2d4f 466 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 467
a1d1bb31
AL
468 breakpoint_invalidate(env, breakpoint->pc);
469
7267c094 470 g_free(breakpoint);
a1d1bb31
AL
471#endif
472}
473
474/* Remove all matching breakpoints. */
9349b4f9 475void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
476{
477#if defined(TARGET_HAS_ICE)
c0ce998e 478 CPUBreakpoint *bp, *next;
a1d1bb31 479
72cf2d4f 480 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
481 if (bp->flags & mask)
482 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 483 }
4c3a88a2
FB
484#endif
485}
486
c33a346e
FB
487/* enable or disable single step mode. EXCP_DEBUG is returned by the
488 CPU loop after each instruction */
9349b4f9 489void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 490{
1fddef4b 491#if defined(TARGET_HAS_ICE)
c33a346e
FB
492 if (env->singlestep_enabled != enabled) {
493 env->singlestep_enabled = enabled;
e22a25c9
AL
494 if (kvm_enabled())
495 kvm_update_guest_debug(env, 0);
496 else {
ccbb4d44 497 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
498 /* XXX: only flush what is necessary */
499 tb_flush(env);
500 }
c33a346e
FB
501 }
502#endif
503}
504
9349b4f9 505void cpu_exit(CPUArchState *env)
3098dba0 506{
fcd7d003
AF
507 CPUState *cpu = ENV_GET_CPU(env);
508
509 cpu->exit_request = 1;
378df4b2 510 cpu->tcg_exit_req = 1;
3098dba0
AJ
511}
512
9349b4f9 513void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
514{
515 va_list ap;
493ae1f0 516 va_list ap2;
7501267e
FB
517
518 va_start(ap, fmt);
493ae1f0 519 va_copy(ap2, ap);
7501267e
FB
520 fprintf(stderr, "qemu: fatal: ");
521 vfprintf(stderr, fmt, ap);
522 fprintf(stderr, "\n");
6fd2a026 523 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
524 if (qemu_log_enabled()) {
525 qemu_log("qemu: fatal: ");
526 qemu_log_vprintf(fmt, ap2);
527 qemu_log("\n");
6fd2a026 528 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 529 qemu_log_flush();
93fcfe39 530 qemu_log_close();
924edcae 531 }
493ae1f0 532 va_end(ap2);
f9373291 533 va_end(ap);
fd052bf6
RV
534#if defined(CONFIG_USER_ONLY)
535 {
536 struct sigaction act;
537 sigfillset(&act.sa_mask);
538 act.sa_handler = SIG_DFL;
539 sigaction(SIGABRT, &act, NULL);
540 }
541#endif
7501267e
FB
542 abort();
543}
544
9349b4f9 545CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 546{
9349b4f9
AF
547 CPUArchState *new_env = cpu_init(env->cpu_model_str);
548 CPUArchState *next_cpu = new_env->next_cpu;
5a38f081
AL
549#if defined(TARGET_HAS_ICE)
550 CPUBreakpoint *bp;
551 CPUWatchpoint *wp;
552#endif
553
9349b4f9 554 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 555
55e5c285 556 /* Preserve chaining. */
c5be9f08 557 new_env->next_cpu = next_cpu;
5a38f081
AL
558
559 /* Clone all break/watchpoints.
560 Note: Once we support ptrace with hw-debug register access, make sure
561 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
562 QTAILQ_INIT(&env->breakpoints);
563 QTAILQ_INIT(&env->watchpoints);
5a38f081 564#if defined(TARGET_HAS_ICE)
72cf2d4f 565 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
566 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
567 }
72cf2d4f 568 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
569 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
570 wp->flags, NULL);
571 }
572#endif
573
c5be9f08
TS
574 return new_env;
575}
576
0124311e 577#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
578static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
579 uintptr_t length)
580{
581 uintptr_t start1;
582
583 /* we modify the TLB cache so that the dirty bit will be set again
584 when accessing the range */
585 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
586 /* Check that we don't span multiple blocks - this breaks the
587 address comparisons below. */
588 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
589 != (end - 1) - start) {
590 abort();
591 }
592 cpu_tlb_reset_dirty_all(start1, length);
593
594}
595
5579c7f3 596/* Note: start and end must be within the same ram block. */
c227f099 597void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 598 int dirty_flags)
1ccde1cb 599{
d24981d3 600 uintptr_t length;
1ccde1cb
FB
601
602 start &= TARGET_PAGE_MASK;
603 end = TARGET_PAGE_ALIGN(end);
604
605 length = end - start;
606 if (length == 0)
607 return;
f7c11b53 608 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 609
d24981d3
JQ
610 if (tcg_enabled()) {
611 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 612 }
1ccde1cb
FB
613}
614
8b9c99d9 615static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 616{
f6f3fbca 617 int ret = 0;
74576198 618 in_migration = enable;
f6f3fbca 619 return ret;
74576198
AL
620}
621
a8170e5e 622hwaddr memory_region_section_get_iotlb(CPUArchState *env,
e5548617
BS
623 MemoryRegionSection *section,
624 target_ulong vaddr,
a8170e5e 625 hwaddr paddr,
e5548617
BS
626 int prot,
627 target_ulong *address)
628{
a8170e5e 629 hwaddr iotlb;
e5548617
BS
630 CPUWatchpoint *wp;
631
cc5bea60 632 if (memory_region_is_ram(section->mr)) {
e5548617
BS
633 /* Normal RAM. */
634 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 635 + memory_region_section_addr(section, paddr);
e5548617
BS
636 if (!section->readonly) {
637 iotlb |= phys_section_notdirty;
638 } else {
639 iotlb |= phys_section_rom;
640 }
641 } else {
e5548617 642 iotlb = section - phys_sections;
cc5bea60 643 iotlb += memory_region_section_addr(section, paddr);
e5548617
BS
644 }
645
646 /* Make accesses to pages with watchpoints go via the
647 watchpoint trap routines. */
648 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
649 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
650 /* Avoid trapping reads of pages with a write breakpoint. */
651 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
652 iotlb = phys_section_watch + paddr;
653 *address |= TLB_MMIO;
654 break;
655 }
656 }
657 }
658
659 return iotlb;
660}
9fa3e853
FB
661#endif /* defined(CONFIG_USER_ONLY) */
662
e2eef170 663#if !defined(CONFIG_USER_ONLY)
8da3ff18 664
c04b2b78
PB
665#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
666typedef struct subpage_t {
70c68e44 667 MemoryRegion iomem;
a8170e5e 668 hwaddr base;
5312bd8b 669 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
670} subpage_t;
671
c227f099 672static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 673 uint16_t section);
a8170e5e 674static subpage_t *subpage_init(hwaddr base);
5312bd8b 675static void destroy_page_desc(uint16_t section_index)
54688b1e 676{
5312bd8b
AK
677 MemoryRegionSection *section = &phys_sections[section_index];
678 MemoryRegion *mr = section->mr;
54688b1e
AK
679
680 if (mr->subpage) {
681 subpage_t *subpage = container_of(mr, subpage_t, iomem);
682 memory_region_destroy(&subpage->iomem);
683 g_free(subpage);
684 }
685}
686
4346ae3e 687static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
688{
689 unsigned i;
d6f2ea22 690 PhysPageEntry *p;
54688b1e 691
c19e8800 692 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
693 return;
694 }
695
c19e8800 696 p = phys_map_nodes[lp->ptr];
4346ae3e 697 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 698 if (!p[i].is_leaf) {
54688b1e 699 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 700 } else {
c19e8800 701 destroy_page_desc(p[i].ptr);
54688b1e 702 }
54688b1e 703 }
07f07b31 704 lp->is_leaf = 0;
c19e8800 705 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
706}
707
ac1970fb 708static void destroy_all_mappings(AddressSpaceDispatch *d)
54688b1e 709{
ac1970fb 710 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
d6f2ea22 711 phys_map_nodes_reset();
54688b1e
AK
712}
713
5312bd8b
AK
714static uint16_t phys_section_add(MemoryRegionSection *section)
715{
716 if (phys_sections_nb == phys_sections_nb_alloc) {
717 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
718 phys_sections = g_renew(MemoryRegionSection, phys_sections,
719 phys_sections_nb_alloc);
720 }
721 phys_sections[phys_sections_nb] = *section;
722 return phys_sections_nb++;
723}
724
725static void phys_sections_clear(void)
726{
727 phys_sections_nb = 0;
728}
729
ac1970fb 730static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
731{
732 subpage_t *subpage;
a8170e5e 733 hwaddr base = section->offset_within_address_space
0f0cb164 734 & TARGET_PAGE_MASK;
ac1970fb 735 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
0f0cb164
AK
736 MemoryRegionSection subsection = {
737 .offset_within_address_space = base,
738 .size = TARGET_PAGE_SIZE,
739 };
a8170e5e 740 hwaddr start, end;
0f0cb164 741
f3705d53 742 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 743
f3705d53 744 if (!(existing->mr->subpage)) {
0f0cb164
AK
745 subpage = subpage_init(base);
746 subsection.mr = &subpage->iomem;
ac1970fb 747 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 748 phys_section_add(&subsection));
0f0cb164 749 } else {
f3705d53 750 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
751 }
752 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
adb2a9b5 753 end = start + section->size - 1;
0f0cb164
AK
754 subpage_register(subpage, start, end, phys_section_add(section));
755}
756
757
ac1970fb 758static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
33417e70 759{
a8170e5e 760 hwaddr start_addr = section->offset_within_address_space;
dd81124b 761 ram_addr_t size = section->size;
a8170e5e 762 hwaddr addr;
5312bd8b 763 uint16_t section_index = phys_section_add(section);
dd81124b 764
3b8e6a2d 765 assert(size);
f6f3fbca 766
3b8e6a2d 767 addr = start_addr;
ac1970fb 768 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2999097b 769 section_index);
33417e70
FB
770}
771
ac1970fb 772static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 773{
ac1970fb 774 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
0f0cb164
AK
775 MemoryRegionSection now = *section, remain = *section;
776
777 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
778 || (now.size < TARGET_PAGE_SIZE)) {
779 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
780 - now.offset_within_address_space,
781 now.size);
ac1970fb 782 register_subpage(d, &now);
0f0cb164
AK
783 remain.size -= now.size;
784 remain.offset_within_address_space += now.size;
785 remain.offset_within_region += now.size;
786 }
69b67646
TH
787 while (remain.size >= TARGET_PAGE_SIZE) {
788 now = remain;
789 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
790 now.size = TARGET_PAGE_SIZE;
ac1970fb 791 register_subpage(d, &now);
69b67646
TH
792 } else {
793 now.size &= TARGET_PAGE_MASK;
ac1970fb 794 register_multipage(d, &now);
69b67646 795 }
0f0cb164
AK
796 remain.size -= now.size;
797 remain.offset_within_address_space += now.size;
798 remain.offset_within_region += now.size;
799 }
800 now = remain;
801 if (now.size) {
ac1970fb 802 register_subpage(d, &now);
0f0cb164
AK
803 }
804}
805
62a2744c
SY
806void qemu_flush_coalesced_mmio_buffer(void)
807{
808 if (kvm_enabled())
809 kvm_flush_coalesced_mmio_buffer();
810}
811
b2a8658e
UD
812void qemu_mutex_lock_ramlist(void)
813{
814 qemu_mutex_lock(&ram_list.mutex);
815}
816
817void qemu_mutex_unlock_ramlist(void)
818{
819 qemu_mutex_unlock(&ram_list.mutex);
820}
821
c902760f
MT
822#if defined(__linux__) && !defined(TARGET_S390X)
823
824#include <sys/vfs.h>
825
826#define HUGETLBFS_MAGIC 0x958458f6
827
828static long gethugepagesize(const char *path)
829{
830 struct statfs fs;
831 int ret;
832
833 do {
9742bf26 834 ret = statfs(path, &fs);
c902760f
MT
835 } while (ret != 0 && errno == EINTR);
836
837 if (ret != 0) {
9742bf26
YT
838 perror(path);
839 return 0;
c902760f
MT
840 }
841
842 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 843 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
844
845 return fs.f_bsize;
846}
847
04b16653
AW
848static void *file_ram_alloc(RAMBlock *block,
849 ram_addr_t memory,
850 const char *path)
c902760f
MT
851{
852 char *filename;
8ca761f6
PF
853 char *sanitized_name;
854 char *c;
c902760f
MT
855 void *area;
856 int fd;
857#ifdef MAP_POPULATE
858 int flags;
859#endif
860 unsigned long hpagesize;
861
862 hpagesize = gethugepagesize(path);
863 if (!hpagesize) {
9742bf26 864 return NULL;
c902760f
MT
865 }
866
867 if (memory < hpagesize) {
868 return NULL;
869 }
870
871 if (kvm_enabled() && !kvm_has_sync_mmu()) {
872 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
873 return NULL;
874 }
875
8ca761f6
PF
876 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
877 sanitized_name = g_strdup(block->mr->name);
878 for (c = sanitized_name; *c != '\0'; c++) {
879 if (*c == '/')
880 *c = '_';
881 }
882
883 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
884 sanitized_name);
885 g_free(sanitized_name);
c902760f
MT
886
887 fd = mkstemp(filename);
888 if (fd < 0) {
9742bf26 889 perror("unable to create backing store for hugepages");
e4ada482 890 g_free(filename);
9742bf26 891 return NULL;
c902760f
MT
892 }
893 unlink(filename);
e4ada482 894 g_free(filename);
c902760f
MT
895
896 memory = (memory+hpagesize-1) & ~(hpagesize-1);
897
898 /*
899 * ftruncate is not supported by hugetlbfs in older
900 * hosts, so don't bother bailing out on errors.
901 * If anything goes wrong with it under other filesystems,
902 * mmap will fail.
903 */
904 if (ftruncate(fd, memory))
9742bf26 905 perror("ftruncate");
c902760f
MT
906
907#ifdef MAP_POPULATE
908 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
909 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
910 * to sidestep this quirk.
911 */
912 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
913 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
914#else
915 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
916#endif
917 if (area == MAP_FAILED) {
9742bf26
YT
918 perror("file_ram_alloc: can't mmap RAM pages");
919 close(fd);
920 return (NULL);
c902760f 921 }
04b16653 922 block->fd = fd;
c902760f
MT
923 return area;
924}
925#endif
926
d17b5288 927static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
928{
929 RAMBlock *block, *next_block;
3e837b2c 930 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 931
49cd9ac6
SH
932 assert(size != 0); /* it would hand out same offset multiple times */
933
a3161038 934 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
935 return 0;
936
a3161038 937 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 938 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
939
940 end = block->offset + block->length;
941
a3161038 942 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
943 if (next_block->offset >= end) {
944 next = MIN(next, next_block->offset);
945 }
946 }
947 if (next - end >= size && next - end < mingap) {
3e837b2c 948 offset = end;
04b16653
AW
949 mingap = next - end;
950 }
951 }
3e837b2c
AW
952
953 if (offset == RAM_ADDR_MAX) {
954 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
955 (uint64_t)size);
956 abort();
957 }
958
04b16653
AW
959 return offset;
960}
961
652d7ec2 962ram_addr_t last_ram_offset(void)
d17b5288
AW
963{
964 RAMBlock *block;
965 ram_addr_t last = 0;
966
a3161038 967 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
968 last = MAX(last, block->offset + block->length);
969
970 return last;
971}
972
ddb97f1d
JB
973static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
974{
975 int ret;
976 QemuOpts *machine_opts;
977
978 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
979 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
980 if (machine_opts &&
981 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
982 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
983 if (ret) {
984 perror("qemu_madvise");
985 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
986 "but dump_guest_core=off specified\n");
987 }
988 }
989}
990
c5705a77 991void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
992{
993 RAMBlock *new_block, *block;
994
c5705a77 995 new_block = NULL;
a3161038 996 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
997 if (block->offset == addr) {
998 new_block = block;
999 break;
1000 }
1001 }
1002 assert(new_block);
1003 assert(!new_block->idstr[0]);
84b89d78 1004
09e5ab63
AL
1005 if (dev) {
1006 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1007 if (id) {
1008 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1009 g_free(id);
84b89d78
CM
1010 }
1011 }
1012 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1013
b2a8658e
UD
1014 /* This assumes the iothread lock is taken here too. */
1015 qemu_mutex_lock_ramlist();
a3161038 1016 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1017 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1018 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1019 new_block->idstr);
1020 abort();
1021 }
1022 }
b2a8658e 1023 qemu_mutex_unlock_ramlist();
c5705a77
AK
1024}
1025
8490fc78
LC
1026static int memory_try_enable_merging(void *addr, size_t len)
1027{
1028 QemuOpts *opts;
1029
1030 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1031 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1032 /* disabled by the user */
1033 return 0;
1034 }
1035
1036 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1037}
1038
c5705a77
AK
1039ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1040 MemoryRegion *mr)
1041{
abb26d63 1042 RAMBlock *block, *new_block;
c5705a77
AK
1043
1044 size = TARGET_PAGE_ALIGN(size);
1045 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1046
b2a8658e
UD
1047 /* This assumes the iothread lock is taken here too. */
1048 qemu_mutex_lock_ramlist();
7c637366 1049 new_block->mr = mr;
432d268c 1050 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1051 if (host) {
1052 new_block->host = host;
cd19cfa2 1053 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1054 } else {
1055 if (mem_path) {
c902760f 1056#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1057 new_block->host = file_ram_alloc(new_block, size, mem_path);
1058 if (!new_block->host) {
6eebf958 1059 new_block->host = qemu_anon_ram_alloc(size);
8490fc78 1060 memory_try_enable_merging(new_block->host, size);
6977dfe6 1061 }
c902760f 1062#else
6977dfe6
YT
1063 fprintf(stderr, "-mem-path option unsupported\n");
1064 exit(1);
c902760f 1065#endif
6977dfe6 1066 } else {
868bb33f 1067 if (xen_enabled()) {
fce537d4 1068 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1069 } else if (kvm_enabled()) {
1070 /* some s390/kvm configurations have special constraints */
6eebf958 1071 new_block->host = kvm_ram_alloc(size);
432d268c 1072 } else {
6eebf958 1073 new_block->host = qemu_anon_ram_alloc(size);
432d268c 1074 }
8490fc78 1075 memory_try_enable_merging(new_block->host, size);
6977dfe6 1076 }
c902760f 1077 }
94a6b54f
PB
1078 new_block->length = size;
1079
abb26d63
PB
1080 /* Keep the list sorted from biggest to smallest block. */
1081 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1082 if (block->length < new_block->length) {
1083 break;
1084 }
1085 }
1086 if (block) {
1087 QTAILQ_INSERT_BEFORE(block, new_block, next);
1088 } else {
1089 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1090 }
0d6d3c87 1091 ram_list.mru_block = NULL;
94a6b54f 1092
f798b07f 1093 ram_list.version++;
b2a8658e 1094 qemu_mutex_unlock_ramlist();
f798b07f 1095
7267c094 1096 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1097 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1098 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1099 0, size >> TARGET_PAGE_BITS);
1720aeee 1100 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1101
ddb97f1d 1102 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1103 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1104
6f0437e8
JK
1105 if (kvm_enabled())
1106 kvm_setup_guest_memory(new_block->host, size);
1107
94a6b54f
PB
1108 return new_block->offset;
1109}
e9a1ab19 1110
c5705a77 1111ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1112{
c5705a77 1113 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1114}
1115
1f2e98b6
AW
1116void qemu_ram_free_from_ptr(ram_addr_t addr)
1117{
1118 RAMBlock *block;
1119
b2a8658e
UD
1120 /* This assumes the iothread lock is taken here too. */
1121 qemu_mutex_lock_ramlist();
a3161038 1122 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1123 if (addr == block->offset) {
a3161038 1124 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1125 ram_list.mru_block = NULL;
f798b07f 1126 ram_list.version++;
7267c094 1127 g_free(block);
b2a8658e 1128 break;
1f2e98b6
AW
1129 }
1130 }
b2a8658e 1131 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1132}
1133
c227f099 1134void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1135{
04b16653
AW
1136 RAMBlock *block;
1137
b2a8658e
UD
1138 /* This assumes the iothread lock is taken here too. */
1139 qemu_mutex_lock_ramlist();
a3161038 1140 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1141 if (addr == block->offset) {
a3161038 1142 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1143 ram_list.mru_block = NULL;
f798b07f 1144 ram_list.version++;
cd19cfa2
HY
1145 if (block->flags & RAM_PREALLOC_MASK) {
1146 ;
1147 } else if (mem_path) {
04b16653
AW
1148#if defined (__linux__) && !defined(TARGET_S390X)
1149 if (block->fd) {
1150 munmap(block->host, block->length);
1151 close(block->fd);
1152 } else {
e7a09b92 1153 qemu_anon_ram_free(block->host, block->length);
04b16653 1154 }
fd28aa13
JK
1155#else
1156 abort();
04b16653
AW
1157#endif
1158 } else {
868bb33f 1159 if (xen_enabled()) {
e41d7c69 1160 xen_invalidate_map_cache_entry(block->host);
432d268c 1161 } else {
e7a09b92 1162 qemu_anon_ram_free(block->host, block->length);
432d268c 1163 }
04b16653 1164 }
7267c094 1165 g_free(block);
b2a8658e 1166 break;
04b16653
AW
1167 }
1168 }
b2a8658e 1169 qemu_mutex_unlock_ramlist();
04b16653 1170
e9a1ab19
FB
1171}
1172
cd19cfa2
HY
1173#ifndef _WIN32
1174void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1175{
1176 RAMBlock *block;
1177 ram_addr_t offset;
1178 int flags;
1179 void *area, *vaddr;
1180
a3161038 1181 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1182 offset = addr - block->offset;
1183 if (offset < block->length) {
1184 vaddr = block->host + offset;
1185 if (block->flags & RAM_PREALLOC_MASK) {
1186 ;
1187 } else {
1188 flags = MAP_FIXED;
1189 munmap(vaddr, length);
1190 if (mem_path) {
1191#if defined(__linux__) && !defined(TARGET_S390X)
1192 if (block->fd) {
1193#ifdef MAP_POPULATE
1194 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1195 MAP_PRIVATE;
1196#else
1197 flags |= MAP_PRIVATE;
1198#endif
1199 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1200 flags, block->fd, offset);
1201 } else {
1202 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1203 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1204 flags, -1, 0);
1205 }
fd28aa13
JK
1206#else
1207 abort();
cd19cfa2
HY
1208#endif
1209 } else {
1210#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1211 flags |= MAP_SHARED | MAP_ANONYMOUS;
1212 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1213 flags, -1, 0);
1214#else
1215 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1216 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1217 flags, -1, 0);
1218#endif
1219 }
1220 if (area != vaddr) {
f15fbc4b
AP
1221 fprintf(stderr, "Could not remap addr: "
1222 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1223 length, addr);
1224 exit(1);
1225 }
8490fc78 1226 memory_try_enable_merging(vaddr, length);
ddb97f1d 1227 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1228 }
1229 return;
1230 }
1231 }
1232}
1233#endif /* !_WIN32 */
1234
dc828ca1 1235/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
1236 With the exception of the softmmu code in this file, this should
1237 only be used for local memory (e.g. video ram) that the device owns,
1238 and knows it isn't going to access beyond the end of the block.
1239
1240 It should not be used for general purpose DMA.
1241 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1242 */
c227f099 1243void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 1244{
94a6b54f
PB
1245 RAMBlock *block;
1246
b2a8658e 1247 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1248 block = ram_list.mru_block;
1249 if (block && addr - block->offset < block->length) {
1250 goto found;
1251 }
a3161038 1252 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1253 if (addr - block->offset < block->length) {
0d6d3c87 1254 goto found;
f471a17e 1255 }
94a6b54f 1256 }
f471a17e
AW
1257
1258 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1259 abort();
1260
0d6d3c87
PB
1261found:
1262 ram_list.mru_block = block;
1263 if (xen_enabled()) {
1264 /* We need to check if the requested address is in the RAM
1265 * because we don't want to map the entire memory in QEMU.
1266 * In that case just map until the end of the page.
1267 */
1268 if (block->offset == 0) {
1269 return xen_map_cache(addr, 0, 0);
1270 } else if (block->host == NULL) {
1271 block->host =
1272 xen_map_cache(block->offset, block->length, 1);
1273 }
1274 }
1275 return block->host + (addr - block->offset);
dc828ca1
PB
1276}
1277
0d6d3c87
PB
1278/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1279 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1280 *
1281 * ??? Is this still necessary?
b2e0a138 1282 */
8b9c99d9 1283static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1284{
1285 RAMBlock *block;
1286
b2a8658e 1287 /* The list is protected by the iothread lock here. */
a3161038 1288 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1289 if (addr - block->offset < block->length) {
868bb33f 1290 if (xen_enabled()) {
432d268c
JN
1291 /* We need to check if the requested address is in the RAM
1292 * because we don't want to map the entire memory in QEMU.
712c2b41 1293 * In that case just map until the end of the page.
432d268c
JN
1294 */
1295 if (block->offset == 0) {
e41d7c69 1296 return xen_map_cache(addr, 0, 0);
432d268c 1297 } else if (block->host == NULL) {
e41d7c69
JK
1298 block->host =
1299 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1300 }
1301 }
b2e0a138
MT
1302 return block->host + (addr - block->offset);
1303 }
1304 }
1305
1306 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1307 abort();
1308
1309 return NULL;
1310}
1311
38bee5dc
SS
1312/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1313 * but takes a size argument */
8b9c99d9 1314static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1315{
8ab934f9
SS
1316 if (*size == 0) {
1317 return NULL;
1318 }
868bb33f 1319 if (xen_enabled()) {
e41d7c69 1320 return xen_map_cache(addr, *size, 1);
868bb33f 1321 } else {
38bee5dc
SS
1322 RAMBlock *block;
1323
a3161038 1324 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1325 if (addr - block->offset < block->length) {
1326 if (addr - block->offset + *size > block->length)
1327 *size = block->length - addr + block->offset;
1328 return block->host + (addr - block->offset);
1329 }
1330 }
1331
1332 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1333 abort();
38bee5dc
SS
1334 }
1335}
1336
e890261f 1337int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1338{
94a6b54f
PB
1339 RAMBlock *block;
1340 uint8_t *host = ptr;
1341
868bb33f 1342 if (xen_enabled()) {
e41d7c69 1343 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
1344 return 0;
1345 }
1346
a3161038 1347 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1348 /* This case append when the block is not mapped. */
1349 if (block->host == NULL) {
1350 continue;
1351 }
f471a17e 1352 if (host - block->host < block->length) {
e890261f
MT
1353 *ram_addr = block->offset + (host - block->host);
1354 return 0;
f471a17e 1355 }
94a6b54f 1356 }
432d268c 1357
e890261f
MT
1358 return -1;
1359}
f471a17e 1360
e890261f
MT
1361/* Some of the softmmu routines need to translate from a host pointer
1362 (typically a TLB entry) back to a ram offset. */
1363ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1364{
1365 ram_addr_t ram_addr;
f471a17e 1366
e890261f
MT
1367 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1368 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1369 abort();
1370 }
1371 return ram_addr;
5579c7f3
PB
1372}
1373
a8170e5e 1374static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1375 unsigned size)
e18231a3
BS
1376{
1377#ifdef DEBUG_UNASSIGNED
1378 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1379#endif
5b450407 1380#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1381 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
1382#endif
1383 return 0;
1384}
1385
a8170e5e 1386static void unassigned_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1387 uint64_t val, unsigned size)
e18231a3
BS
1388{
1389#ifdef DEBUG_UNASSIGNED
0e0df1e2 1390 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 1391#endif
5b450407 1392#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1393 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 1394#endif
33417e70
FB
1395}
1396
0e0df1e2
AK
1397static const MemoryRegionOps unassigned_mem_ops = {
1398 .read = unassigned_mem_read,
1399 .write = unassigned_mem_write,
1400 .endianness = DEVICE_NATIVE_ENDIAN,
1401};
e18231a3 1402
a8170e5e 1403static uint64_t error_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1404 unsigned size)
e18231a3 1405{
0e0df1e2 1406 abort();
e18231a3
BS
1407}
1408
a8170e5e 1409static void error_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1410 uint64_t value, unsigned size)
e18231a3 1411{
0e0df1e2 1412 abort();
33417e70
FB
1413}
1414
0e0df1e2
AK
1415static const MemoryRegionOps error_mem_ops = {
1416 .read = error_mem_read,
1417 .write = error_mem_write,
1418 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1419};
1420
0e0df1e2
AK
1421static const MemoryRegionOps rom_mem_ops = {
1422 .read = error_mem_read,
1423 .write = unassigned_mem_write,
1424 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1425};
1426
a8170e5e 1427static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1428 uint64_t val, unsigned size)
9fa3e853 1429{
3a7d929e 1430 int dirty_flags;
f7c11b53 1431 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1432 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1433#if !defined(CONFIG_USER_ONLY)
0e0df1e2 1434 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1435 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 1436#endif
3a7d929e 1437 }
0e0df1e2
AK
1438 switch (size) {
1439 case 1:
1440 stb_p(qemu_get_ram_ptr(ram_addr), val);
1441 break;
1442 case 2:
1443 stw_p(qemu_get_ram_ptr(ram_addr), val);
1444 break;
1445 case 4:
1446 stl_p(qemu_get_ram_ptr(ram_addr), val);
1447 break;
1448 default:
1449 abort();
3a7d929e 1450 }
f23db169 1451 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1452 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1453 /* we remove the notdirty callback only if the code has been
1454 flushed */
1455 if (dirty_flags == 0xff)
2e70f6ef 1456 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1457}
1458
0e0df1e2
AK
1459static const MemoryRegionOps notdirty_mem_ops = {
1460 .read = error_mem_read,
1461 .write = notdirty_mem_write,
1462 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1463};
1464
0f459d16 1465/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1466static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1467{
9349b4f9 1468 CPUArchState *env = cpu_single_env;
06d55cc1 1469 target_ulong pc, cs_base;
0f459d16 1470 target_ulong vaddr;
a1d1bb31 1471 CPUWatchpoint *wp;
06d55cc1 1472 int cpu_flags;
0f459d16 1473
06d55cc1
AL
1474 if (env->watchpoint_hit) {
1475 /* We re-entered the check after replacing the TB. Now raise
1476 * the debug interrupt so that is will trigger after the
1477 * current instruction. */
c3affe56 1478 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1479 return;
1480 }
2e70f6ef 1481 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1482 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1483 if ((vaddr == (wp->vaddr & len_mask) ||
1484 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1485 wp->flags |= BP_WATCHPOINT_HIT;
1486 if (!env->watchpoint_hit) {
1487 env->watchpoint_hit = wp;
5a316526 1488 tb_check_watchpoint(env);
6e140f28
AL
1489 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1490 env->exception_index = EXCP_DEBUG;
488d6577 1491 cpu_loop_exit(env);
6e140f28
AL
1492 } else {
1493 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1494 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1495 cpu_resume_from_signal(env, NULL);
6e140f28 1496 }
06d55cc1 1497 }
6e140f28
AL
1498 } else {
1499 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1500 }
1501 }
1502}
1503
6658ffb8
PB
1504/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1505 so these check for a hit then pass through to the normal out-of-line
1506 phys routines. */
a8170e5e 1507static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1508 unsigned size)
6658ffb8 1509{
1ec9b909
AK
1510 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1511 switch (size) {
1512 case 1: return ldub_phys(addr);
1513 case 2: return lduw_phys(addr);
1514 case 4: return ldl_phys(addr);
1515 default: abort();
1516 }
6658ffb8
PB
1517}
1518
a8170e5e 1519static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1520 uint64_t val, unsigned size)
6658ffb8 1521{
1ec9b909
AK
1522 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1523 switch (size) {
67364150
MF
1524 case 1:
1525 stb_phys(addr, val);
1526 break;
1527 case 2:
1528 stw_phys(addr, val);
1529 break;
1530 case 4:
1531 stl_phys(addr, val);
1532 break;
1ec9b909
AK
1533 default: abort();
1534 }
6658ffb8
PB
1535}
1536
1ec9b909
AK
1537static const MemoryRegionOps watch_mem_ops = {
1538 .read = watch_mem_read,
1539 .write = watch_mem_write,
1540 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1541};
6658ffb8 1542
a8170e5e 1543static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1544 unsigned len)
db7b5426 1545{
70c68e44 1546 subpage_t *mmio = opaque;
f6405247 1547 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1548 MemoryRegionSection *section;
db7b5426
BS
1549#if defined(DEBUG_SUBPAGE)
1550 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1551 mmio, len, addr, idx);
1552#endif
db7b5426 1553
5312bd8b
AK
1554 section = &phys_sections[mmio->sub_section[idx]];
1555 addr += mmio->base;
1556 addr -= section->offset_within_address_space;
1557 addr += section->offset_within_region;
37ec01d4 1558 return io_mem_read(section->mr, addr, len);
db7b5426
BS
1559}
1560
a8170e5e 1561static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1562 uint64_t value, unsigned len)
db7b5426 1563{
70c68e44 1564 subpage_t *mmio = opaque;
f6405247 1565 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1566 MemoryRegionSection *section;
db7b5426 1567#if defined(DEBUG_SUBPAGE)
70c68e44
AK
1568 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1569 " idx %d value %"PRIx64"\n",
f6405247 1570 __func__, mmio, len, addr, idx, value);
db7b5426 1571#endif
f6405247 1572
5312bd8b
AK
1573 section = &phys_sections[mmio->sub_section[idx]];
1574 addr += mmio->base;
1575 addr -= section->offset_within_address_space;
1576 addr += section->offset_within_region;
37ec01d4 1577 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
1578}
1579
70c68e44
AK
1580static const MemoryRegionOps subpage_ops = {
1581 .read = subpage_read,
1582 .write = subpage_write,
1583 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1584};
1585
a8170e5e 1586static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
de712f94 1587 unsigned size)
56384e8b
AF
1588{
1589 ram_addr_t raddr = addr;
1590 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1591 switch (size) {
1592 case 1: return ldub_p(ptr);
1593 case 2: return lduw_p(ptr);
1594 case 4: return ldl_p(ptr);
1595 default: abort();
1596 }
56384e8b
AF
1597}
1598
a8170e5e 1599static void subpage_ram_write(void *opaque, hwaddr addr,
de712f94 1600 uint64_t value, unsigned size)
56384e8b
AF
1601{
1602 ram_addr_t raddr = addr;
1603 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1604 switch (size) {
1605 case 1: return stb_p(ptr, value);
1606 case 2: return stw_p(ptr, value);
1607 case 4: return stl_p(ptr, value);
1608 default: abort();
1609 }
56384e8b
AF
1610}
1611
de712f94
AK
1612static const MemoryRegionOps subpage_ram_ops = {
1613 .read = subpage_ram_read,
1614 .write = subpage_ram_write,
1615 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
1616};
1617
c227f099 1618static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1619 uint16_t section)
db7b5426
BS
1620{
1621 int idx, eidx;
1622
1623 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1624 return -1;
1625 idx = SUBPAGE_IDX(start);
1626 eidx = SUBPAGE_IDX(end);
1627#if defined(DEBUG_SUBPAGE)
0bf9e31a 1628 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1629 mmio, start, end, idx, eidx, memory);
1630#endif
5312bd8b
AK
1631 if (memory_region_is_ram(phys_sections[section].mr)) {
1632 MemoryRegionSection new_section = phys_sections[section];
1633 new_section.mr = &io_mem_subpage_ram;
1634 section = phys_section_add(&new_section);
56384e8b 1635 }
db7b5426 1636 for (; idx <= eidx; idx++) {
5312bd8b 1637 mmio->sub_section[idx] = section;
db7b5426
BS
1638 }
1639
1640 return 0;
1641}
1642
a8170e5e 1643static subpage_t *subpage_init(hwaddr base)
db7b5426 1644{
c227f099 1645 subpage_t *mmio;
db7b5426 1646
7267c094 1647 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
1648
1649 mmio->base = base;
70c68e44
AK
1650 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1651 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1652 mmio->iomem.subpage = true;
db7b5426 1653#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1654 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1655 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1656#endif
0f0cb164 1657 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
1658
1659 return mmio;
1660}
1661
5312bd8b
AK
1662static uint16_t dummy_section(MemoryRegion *mr)
1663{
1664 MemoryRegionSection section = {
1665 .mr = mr,
1666 .offset_within_address_space = 0,
1667 .offset_within_region = 0,
1668 .size = UINT64_MAX,
1669 };
1670
1671 return phys_section_add(&section);
1672}
1673
a8170e5e 1674MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1675{
37ec01d4 1676 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1677}
1678
e9179ce1
AK
1679static void io_mem_init(void)
1680{
0e0df1e2 1681 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
1682 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1683 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1684 "unassigned", UINT64_MAX);
1685 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1686 "notdirty", UINT64_MAX);
de712f94
AK
1687 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1688 "subpage-ram", UINT64_MAX);
1ec9b909
AK
1689 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1690 "watch", UINT64_MAX);
e9179ce1
AK
1691}
1692
ac1970fb
AK
1693static void mem_begin(MemoryListener *listener)
1694{
1695 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1696
1697 destroy_all_mappings(d);
1698 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1699}
1700
50c1e149
AK
1701static void core_begin(MemoryListener *listener)
1702{
5312bd8b
AK
1703 phys_sections_clear();
1704 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
1705 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1706 phys_section_rom = dummy_section(&io_mem_rom);
1707 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
1708}
1709
1d71148e 1710static void tcg_commit(MemoryListener *listener)
50c1e149 1711{
9349b4f9 1712 CPUArchState *env;
117712c3
AK
1713
1714 /* since each CPU stores ram addresses in its TLB cache, we must
1715 reset the modified entries */
1716 /* XXX: slow ! */
1717 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1718 tlb_flush(env, 1);
1719 }
50c1e149
AK
1720}
1721
93632747
AK
1722static void core_log_global_start(MemoryListener *listener)
1723{
1724 cpu_physical_memory_set_dirty_tracking(1);
1725}
1726
1727static void core_log_global_stop(MemoryListener *listener)
1728{
1729 cpu_physical_memory_set_dirty_tracking(0);
1730}
1731
4855d41a
AK
1732static void io_region_add(MemoryListener *listener,
1733 MemoryRegionSection *section)
1734{
a2d33521
AK
1735 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1736
1737 mrio->mr = section->mr;
1738 mrio->offset = section->offset_within_region;
1739 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 1740 section->offset_within_address_space, section->size);
a2d33521 1741 ioport_register(&mrio->iorange);
4855d41a
AK
1742}
1743
1744static void io_region_del(MemoryListener *listener,
1745 MemoryRegionSection *section)
1746{
1747 isa_unassign_ioport(section->offset_within_address_space, section->size);
1748}
1749
93632747 1750static MemoryListener core_memory_listener = {
50c1e149 1751 .begin = core_begin,
93632747
AK
1752 .log_global_start = core_log_global_start,
1753 .log_global_stop = core_log_global_stop,
ac1970fb 1754 .priority = 1,
93632747
AK
1755};
1756
4855d41a
AK
1757static MemoryListener io_memory_listener = {
1758 .region_add = io_region_add,
1759 .region_del = io_region_del,
4855d41a
AK
1760 .priority = 0,
1761};
1762
1d71148e
AK
1763static MemoryListener tcg_memory_listener = {
1764 .commit = tcg_commit,
1765};
1766
ac1970fb
AK
1767void address_space_init_dispatch(AddressSpace *as)
1768{
1769 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1770
1771 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1772 d->listener = (MemoryListener) {
1773 .begin = mem_begin,
1774 .region_add = mem_add,
1775 .region_nop = mem_add,
1776 .priority = 0,
1777 };
1778 as->dispatch = d;
1779 memory_listener_register(&d->listener, as);
1780}
1781
83f3c251
AK
1782void address_space_destroy_dispatch(AddressSpace *as)
1783{
1784 AddressSpaceDispatch *d = as->dispatch;
1785
1786 memory_listener_unregister(&d->listener);
1787 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1788 g_free(d);
1789 as->dispatch = NULL;
1790}
1791
62152b8a
AK
1792static void memory_map_init(void)
1793{
7267c094 1794 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 1795 memory_region_init(system_memory, "system", INT64_MAX);
2673a5da
AK
1796 address_space_init(&address_space_memory, system_memory);
1797 address_space_memory.name = "memory";
309cb471 1798
7267c094 1799 system_io = g_malloc(sizeof(*system_io));
309cb471 1800 memory_region_init(system_io, "io", 65536);
2673a5da
AK
1801 address_space_init(&address_space_io, system_io);
1802 address_space_io.name = "I/O";
93632747 1803
f6790af6
AK
1804 memory_listener_register(&core_memory_listener, &address_space_memory);
1805 memory_listener_register(&io_memory_listener, &address_space_io);
1806 memory_listener_register(&tcg_memory_listener, &address_space_memory);
9e11908f
PM
1807
1808 dma_context_init(&dma_context_memory, &address_space_memory,
1809 NULL, NULL, NULL);
62152b8a
AK
1810}
1811
1812MemoryRegion *get_system_memory(void)
1813{
1814 return system_memory;
1815}
1816
309cb471
AK
1817MemoryRegion *get_system_io(void)
1818{
1819 return system_io;
1820}
1821
e2eef170
PB
1822#endif /* !defined(CONFIG_USER_ONLY) */
1823
13eb76e0
FB
1824/* physical memory access (slow version, mainly for debug) */
1825#if defined(CONFIG_USER_ONLY)
9349b4f9 1826int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1827 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1828{
1829 int l, flags;
1830 target_ulong page;
53a5960a 1831 void * p;
13eb76e0
FB
1832
1833 while (len > 0) {
1834 page = addr & TARGET_PAGE_MASK;
1835 l = (page + TARGET_PAGE_SIZE) - addr;
1836 if (l > len)
1837 l = len;
1838 flags = page_get_flags(page);
1839 if (!(flags & PAGE_VALID))
a68fe89c 1840 return -1;
13eb76e0
FB
1841 if (is_write) {
1842 if (!(flags & PAGE_WRITE))
a68fe89c 1843 return -1;
579a97f7 1844 /* XXX: this code should not depend on lock_user */
72fb7daa 1845 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1846 return -1;
72fb7daa
AJ
1847 memcpy(p, buf, l);
1848 unlock_user(p, addr, l);
13eb76e0
FB
1849 } else {
1850 if (!(flags & PAGE_READ))
a68fe89c 1851 return -1;
579a97f7 1852 /* XXX: this code should not depend on lock_user */
72fb7daa 1853 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1854 return -1;
72fb7daa 1855 memcpy(buf, p, l);
5b257578 1856 unlock_user(p, addr, 0);
13eb76e0
FB
1857 }
1858 len -= l;
1859 buf += l;
1860 addr += l;
1861 }
a68fe89c 1862 return 0;
13eb76e0 1863}
8df1cd07 1864
13eb76e0 1865#else
51d7a9eb 1866
a8170e5e
AK
1867static void invalidate_and_set_dirty(hwaddr addr,
1868 hwaddr length)
51d7a9eb
AP
1869{
1870 if (!cpu_physical_memory_is_dirty(addr)) {
1871 /* invalidate code */
1872 tb_invalidate_phys_page_range(addr, addr + length, 0);
1873 /* set dirty bit */
1874 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1875 }
e226939d 1876 xen_modified_memory(addr, length);
51d7a9eb
AP
1877}
1878
a8170e5e 1879void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1880 int len, bool is_write)
13eb76e0 1881{
ac1970fb 1882 AddressSpaceDispatch *d = as->dispatch;
37ec01d4 1883 int l;
13eb76e0
FB
1884 uint8_t *ptr;
1885 uint32_t val;
a8170e5e 1886 hwaddr page;
f3705d53 1887 MemoryRegionSection *section;
3b46e624 1888
13eb76e0
FB
1889 while (len > 0) {
1890 page = addr & TARGET_PAGE_MASK;
1891 l = (page + TARGET_PAGE_SIZE) - addr;
1892 if (l > len)
1893 l = len;
ac1970fb 1894 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1895
13eb76e0 1896 if (is_write) {
f3705d53 1897 if (!memory_region_is_ram(section->mr)) {
a8170e5e 1898 hwaddr addr1;
cc5bea60 1899 addr1 = memory_region_section_addr(section, addr);
6a00d601
FB
1900 /* XXX: could force cpu_single_env to NULL to avoid
1901 potential bugs */
6c2934db 1902 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 1903 /* 32 bit write access */
c27004ec 1904 val = ldl_p(buf);
37ec01d4 1905 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 1906 l = 4;
6c2934db 1907 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 1908 /* 16 bit write access */
c27004ec 1909 val = lduw_p(buf);
37ec01d4 1910 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
1911 l = 2;
1912 } else {
1c213d19 1913 /* 8 bit write access */
c27004ec 1914 val = ldub_p(buf);
37ec01d4 1915 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
1916 l = 1;
1917 }
f3705d53 1918 } else if (!section->readonly) {
8ca5692d 1919 ram_addr_t addr1;
f3705d53 1920 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 1921 + memory_region_section_addr(section, addr);
13eb76e0 1922 /* RAM case */
5579c7f3 1923 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1924 memcpy(ptr, buf, l);
51d7a9eb 1925 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
1926 }
1927 } else {
cc5bea60
BS
1928 if (!(memory_region_is_ram(section->mr) ||
1929 memory_region_is_romd(section->mr))) {
a8170e5e 1930 hwaddr addr1;
13eb76e0 1931 /* I/O case */
cc5bea60 1932 addr1 = memory_region_section_addr(section, addr);
6c2934db 1933 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 1934 /* 32 bit read access */
37ec01d4 1935 val = io_mem_read(section->mr, addr1, 4);
c27004ec 1936 stl_p(buf, val);
13eb76e0 1937 l = 4;
6c2934db 1938 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 1939 /* 16 bit read access */
37ec01d4 1940 val = io_mem_read(section->mr, addr1, 2);
c27004ec 1941 stw_p(buf, val);
13eb76e0
FB
1942 l = 2;
1943 } else {
1c213d19 1944 /* 8 bit read access */
37ec01d4 1945 val = io_mem_read(section->mr, addr1, 1);
c27004ec 1946 stb_p(buf, val);
13eb76e0
FB
1947 l = 1;
1948 }
1949 } else {
1950 /* RAM case */
0a1b357f 1951 ptr = qemu_get_ram_ptr(section->mr->ram_addr
cc5bea60
BS
1952 + memory_region_section_addr(section,
1953 addr));
f3705d53 1954 memcpy(buf, ptr, l);
13eb76e0
FB
1955 }
1956 }
1957 len -= l;
1958 buf += l;
1959 addr += l;
1960 }
1961}
8df1cd07 1962
a8170e5e 1963void address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1964 const uint8_t *buf, int len)
1965{
1966 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1967}
1968
1969/**
1970 * address_space_read: read from an address space.
1971 *
1972 * @as: #AddressSpace to be accessed
1973 * @addr: address within that address space
1974 * @buf: buffer with the data transferred
1975 */
a8170e5e 1976void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb
AK
1977{
1978 address_space_rw(as, addr, buf, len, false);
1979}
1980
1981
a8170e5e 1982void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1983 int len, int is_write)
1984{
1985 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1986}
1987
d0ecd2aa 1988/* used for ROM loading : can write in RAM and ROM */
a8170e5e 1989void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
1990 const uint8_t *buf, int len)
1991{
ac1970fb 1992 AddressSpaceDispatch *d = address_space_memory.dispatch;
d0ecd2aa
FB
1993 int l;
1994 uint8_t *ptr;
a8170e5e 1995 hwaddr page;
f3705d53 1996 MemoryRegionSection *section;
3b46e624 1997
d0ecd2aa
FB
1998 while (len > 0) {
1999 page = addr & TARGET_PAGE_MASK;
2000 l = (page + TARGET_PAGE_SIZE) - addr;
2001 if (l > len)
2002 l = len;
ac1970fb 2003 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 2004
cc5bea60
BS
2005 if (!(memory_region_is_ram(section->mr) ||
2006 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
2007 /* do nothing */
2008 } else {
2009 unsigned long addr1;
f3705d53 2010 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 2011 + memory_region_section_addr(section, addr);
d0ecd2aa 2012 /* ROM/RAM case */
5579c7f3 2013 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2014 memcpy(ptr, buf, l);
51d7a9eb 2015 invalidate_and_set_dirty(addr1, l);
d0ecd2aa
FB
2016 }
2017 len -= l;
2018 buf += l;
2019 addr += l;
2020 }
2021}
2022
6d16c2f8
AL
2023typedef struct {
2024 void *buffer;
a8170e5e
AK
2025 hwaddr addr;
2026 hwaddr len;
6d16c2f8
AL
2027} BounceBuffer;
2028
2029static BounceBuffer bounce;
2030
ba223c29
AL
2031typedef struct MapClient {
2032 void *opaque;
2033 void (*callback)(void *opaque);
72cf2d4f 2034 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2035} MapClient;
2036
72cf2d4f
BS
2037static QLIST_HEAD(map_client_list, MapClient) map_client_list
2038 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2039
2040void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2041{
7267c094 2042 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2043
2044 client->opaque = opaque;
2045 client->callback = callback;
72cf2d4f 2046 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2047 return client;
2048}
2049
8b9c99d9 2050static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2051{
2052 MapClient *client = (MapClient *)_client;
2053
72cf2d4f 2054 QLIST_REMOVE(client, link);
7267c094 2055 g_free(client);
ba223c29
AL
2056}
2057
2058static void cpu_notify_map_clients(void)
2059{
2060 MapClient *client;
2061
72cf2d4f
BS
2062 while (!QLIST_EMPTY(&map_client_list)) {
2063 client = QLIST_FIRST(&map_client_list);
ba223c29 2064 client->callback(client->opaque);
34d5e948 2065 cpu_unregister_map_client(client);
ba223c29
AL
2066 }
2067}
2068
6d16c2f8
AL
2069/* Map a physical memory region into a host virtual address.
2070 * May map a subset of the requested range, given by and returned in *plen.
2071 * May return NULL if resources needed to perform the mapping are exhausted.
2072 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2073 * Use cpu_register_map_client() to know when retrying the map operation is
2074 * likely to succeed.
6d16c2f8 2075 */
ac1970fb 2076void *address_space_map(AddressSpace *as,
a8170e5e
AK
2077 hwaddr addr,
2078 hwaddr *plen,
ac1970fb 2079 bool is_write)
6d16c2f8 2080{
ac1970fb 2081 AddressSpaceDispatch *d = as->dispatch;
a8170e5e
AK
2082 hwaddr len = *plen;
2083 hwaddr todo = 0;
6d16c2f8 2084 int l;
a8170e5e 2085 hwaddr page;
f3705d53 2086 MemoryRegionSection *section;
f15fbc4b 2087 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
2088 ram_addr_t rlen;
2089 void *ret;
6d16c2f8
AL
2090
2091 while (len > 0) {
2092 page = addr & TARGET_PAGE_MASK;
2093 l = (page + TARGET_PAGE_SIZE) - addr;
2094 if (l > len)
2095 l = len;
ac1970fb 2096 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
6d16c2f8 2097
f3705d53 2098 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 2099 if (todo || bounce.buffer) {
6d16c2f8
AL
2100 break;
2101 }
2102 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2103 bounce.addr = addr;
2104 bounce.len = l;
2105 if (!is_write) {
ac1970fb 2106 address_space_read(as, addr, bounce.buffer, l);
6d16c2f8 2107 }
38bee5dc
SS
2108
2109 *plen = l;
2110 return bounce.buffer;
6d16c2f8 2111 }
8ab934f9 2112 if (!todo) {
f3705d53 2113 raddr = memory_region_get_ram_addr(section->mr)
cc5bea60 2114 + memory_region_section_addr(section, addr);
8ab934f9 2115 }
6d16c2f8
AL
2116
2117 len -= l;
2118 addr += l;
38bee5dc 2119 todo += l;
6d16c2f8 2120 }
8ab934f9
SS
2121 rlen = todo;
2122 ret = qemu_ram_ptr_length(raddr, &rlen);
2123 *plen = rlen;
2124 return ret;
6d16c2f8
AL
2125}
2126
ac1970fb 2127/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2128 * Will also mark the memory as dirty if is_write == 1. access_len gives
2129 * the amount of memory that was actually read or written by the caller.
2130 */
a8170e5e
AK
2131void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2132 int is_write, hwaddr access_len)
6d16c2f8
AL
2133{
2134 if (buffer != bounce.buffer) {
2135 if (is_write) {
e890261f 2136 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
2137 while (access_len) {
2138 unsigned l;
2139 l = TARGET_PAGE_SIZE;
2140 if (l > access_len)
2141 l = access_len;
51d7a9eb 2142 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2143 addr1 += l;
2144 access_len -= l;
2145 }
2146 }
868bb33f 2147 if (xen_enabled()) {
e41d7c69 2148 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2149 }
6d16c2f8
AL
2150 return;
2151 }
2152 if (is_write) {
ac1970fb 2153 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2154 }
f8a83245 2155 qemu_vfree(bounce.buffer);
6d16c2f8 2156 bounce.buffer = NULL;
ba223c29 2157 cpu_notify_map_clients();
6d16c2f8 2158}
d0ecd2aa 2159
a8170e5e
AK
2160void *cpu_physical_memory_map(hwaddr addr,
2161 hwaddr *plen,
ac1970fb
AK
2162 int is_write)
2163{
2164 return address_space_map(&address_space_memory, addr, plen, is_write);
2165}
2166
a8170e5e
AK
2167void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2168 int is_write, hwaddr access_len)
ac1970fb
AK
2169{
2170 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2171}
2172
8df1cd07 2173/* warning: addr must be aligned */
a8170e5e 2174static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2175 enum device_endian endian)
8df1cd07 2176{
8df1cd07
FB
2177 uint8_t *ptr;
2178 uint32_t val;
f3705d53 2179 MemoryRegionSection *section;
8df1cd07 2180
ac1970fb 2181 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2182
cc5bea60
BS
2183 if (!(memory_region_is_ram(section->mr) ||
2184 memory_region_is_romd(section->mr))) {
8df1cd07 2185 /* I/O case */
cc5bea60 2186 addr = memory_region_section_addr(section, addr);
37ec01d4 2187 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
2188#if defined(TARGET_WORDS_BIGENDIAN)
2189 if (endian == DEVICE_LITTLE_ENDIAN) {
2190 val = bswap32(val);
2191 }
2192#else
2193 if (endian == DEVICE_BIG_ENDIAN) {
2194 val = bswap32(val);
2195 }
2196#endif
8df1cd07
FB
2197 } else {
2198 /* RAM case */
f3705d53 2199 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2200 & TARGET_PAGE_MASK)
cc5bea60 2201 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2202 switch (endian) {
2203 case DEVICE_LITTLE_ENDIAN:
2204 val = ldl_le_p(ptr);
2205 break;
2206 case DEVICE_BIG_ENDIAN:
2207 val = ldl_be_p(ptr);
2208 break;
2209 default:
2210 val = ldl_p(ptr);
2211 break;
2212 }
8df1cd07
FB
2213 }
2214 return val;
2215}
2216
a8170e5e 2217uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2218{
2219 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2220}
2221
a8170e5e 2222uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2223{
2224 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2225}
2226
a8170e5e 2227uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2228{
2229 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2230}
2231
84b7b8e7 2232/* warning: addr must be aligned */
a8170e5e 2233static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2234 enum device_endian endian)
84b7b8e7 2235{
84b7b8e7
FB
2236 uint8_t *ptr;
2237 uint64_t val;
f3705d53 2238 MemoryRegionSection *section;
84b7b8e7 2239
ac1970fb 2240 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2241
cc5bea60
BS
2242 if (!(memory_region_is_ram(section->mr) ||
2243 memory_region_is_romd(section->mr))) {
84b7b8e7 2244 /* I/O case */
cc5bea60 2245 addr = memory_region_section_addr(section, addr);
1e78bcc1
AG
2246
2247 /* XXX This is broken when device endian != cpu endian.
2248 Fix and add "endian" variable check */
84b7b8e7 2249#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2250 val = io_mem_read(section->mr, addr, 4) << 32;
2251 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 2252#else
37ec01d4
AK
2253 val = io_mem_read(section->mr, addr, 4);
2254 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
2255#endif
2256 } else {
2257 /* RAM case */
f3705d53 2258 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2259 & TARGET_PAGE_MASK)
cc5bea60 2260 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2261 switch (endian) {
2262 case DEVICE_LITTLE_ENDIAN:
2263 val = ldq_le_p(ptr);
2264 break;
2265 case DEVICE_BIG_ENDIAN:
2266 val = ldq_be_p(ptr);
2267 break;
2268 default:
2269 val = ldq_p(ptr);
2270 break;
2271 }
84b7b8e7
FB
2272 }
2273 return val;
2274}
2275
a8170e5e 2276uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2277{
2278 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2279}
2280
a8170e5e 2281uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2282{
2283 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2284}
2285
a8170e5e 2286uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2287{
2288 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2289}
2290
aab33094 2291/* XXX: optimize */
a8170e5e 2292uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2293{
2294 uint8_t val;
2295 cpu_physical_memory_read(addr, &val, 1);
2296 return val;
2297}
2298
733f0b02 2299/* warning: addr must be aligned */
a8170e5e 2300static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2301 enum device_endian endian)
aab33094 2302{
733f0b02
MT
2303 uint8_t *ptr;
2304 uint64_t val;
f3705d53 2305 MemoryRegionSection *section;
733f0b02 2306
ac1970fb 2307 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2308
cc5bea60
BS
2309 if (!(memory_region_is_ram(section->mr) ||
2310 memory_region_is_romd(section->mr))) {
733f0b02 2311 /* I/O case */
cc5bea60 2312 addr = memory_region_section_addr(section, addr);
37ec01d4 2313 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
2314#if defined(TARGET_WORDS_BIGENDIAN)
2315 if (endian == DEVICE_LITTLE_ENDIAN) {
2316 val = bswap16(val);
2317 }
2318#else
2319 if (endian == DEVICE_BIG_ENDIAN) {
2320 val = bswap16(val);
2321 }
2322#endif
733f0b02
MT
2323 } else {
2324 /* RAM case */
f3705d53 2325 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2326 & TARGET_PAGE_MASK)
cc5bea60 2327 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2328 switch (endian) {
2329 case DEVICE_LITTLE_ENDIAN:
2330 val = lduw_le_p(ptr);
2331 break;
2332 case DEVICE_BIG_ENDIAN:
2333 val = lduw_be_p(ptr);
2334 break;
2335 default:
2336 val = lduw_p(ptr);
2337 break;
2338 }
733f0b02
MT
2339 }
2340 return val;
aab33094
FB
2341}
2342
a8170e5e 2343uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2344{
2345 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2346}
2347
a8170e5e 2348uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2349{
2350 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2351}
2352
a8170e5e 2353uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2354{
2355 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2356}
2357
8df1cd07
FB
2358/* warning: addr must be aligned. The ram page is not masked as dirty
2359 and the code inside is not invalidated. It is useful if the dirty
2360 bits are used to track modified PTEs */
a8170e5e 2361void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2362{
8df1cd07 2363 uint8_t *ptr;
f3705d53 2364 MemoryRegionSection *section;
8df1cd07 2365
ac1970fb 2366 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2367
f3705d53 2368 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2369 addr = memory_region_section_addr(section, addr);
f3705d53 2370 if (memory_region_is_ram(section->mr)) {
37ec01d4 2371 section = &phys_sections[phys_section_rom];
06ef3525 2372 }
37ec01d4 2373 io_mem_write(section->mr, addr, val, 4);
8df1cd07 2374 } else {
f3705d53 2375 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 2376 & TARGET_PAGE_MASK)
cc5bea60 2377 + memory_region_section_addr(section, addr);
5579c7f3 2378 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2379 stl_p(ptr, val);
74576198
AL
2380
2381 if (unlikely(in_migration)) {
2382 if (!cpu_physical_memory_is_dirty(addr1)) {
2383 /* invalidate code */
2384 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2385 /* set dirty bit */
f7c11b53
YT
2386 cpu_physical_memory_set_dirty_flags(
2387 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2388 }
2389 }
8df1cd07
FB
2390 }
2391}
2392
2393/* warning: addr must be aligned */
a8170e5e 2394static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2395 enum device_endian endian)
8df1cd07 2396{
8df1cd07 2397 uint8_t *ptr;
f3705d53 2398 MemoryRegionSection *section;
8df1cd07 2399
ac1970fb 2400 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2401
f3705d53 2402 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2403 addr = memory_region_section_addr(section, addr);
f3705d53 2404 if (memory_region_is_ram(section->mr)) {
37ec01d4 2405 section = &phys_sections[phys_section_rom];
06ef3525 2406 }
1e78bcc1
AG
2407#if defined(TARGET_WORDS_BIGENDIAN)
2408 if (endian == DEVICE_LITTLE_ENDIAN) {
2409 val = bswap32(val);
2410 }
2411#else
2412 if (endian == DEVICE_BIG_ENDIAN) {
2413 val = bswap32(val);
2414 }
2415#endif
37ec01d4 2416 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
2417 } else {
2418 unsigned long addr1;
f3705d53 2419 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2420 + memory_region_section_addr(section, addr);
8df1cd07 2421 /* RAM case */
5579c7f3 2422 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2423 switch (endian) {
2424 case DEVICE_LITTLE_ENDIAN:
2425 stl_le_p(ptr, val);
2426 break;
2427 case DEVICE_BIG_ENDIAN:
2428 stl_be_p(ptr, val);
2429 break;
2430 default:
2431 stl_p(ptr, val);
2432 break;
2433 }
51d7a9eb 2434 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2435 }
2436}
2437
a8170e5e 2438void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2439{
2440 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2441}
2442
a8170e5e 2443void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2444{
2445 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2446}
2447
a8170e5e 2448void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2449{
2450 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2451}
2452
aab33094 2453/* XXX: optimize */
a8170e5e 2454void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2455{
2456 uint8_t v = val;
2457 cpu_physical_memory_write(addr, &v, 1);
2458}
2459
733f0b02 2460/* warning: addr must be aligned */
a8170e5e 2461static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2462 enum device_endian endian)
aab33094 2463{
733f0b02 2464 uint8_t *ptr;
f3705d53 2465 MemoryRegionSection *section;
733f0b02 2466
ac1970fb 2467 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2468
f3705d53 2469 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2470 addr = memory_region_section_addr(section, addr);
f3705d53 2471 if (memory_region_is_ram(section->mr)) {
37ec01d4 2472 section = &phys_sections[phys_section_rom];
06ef3525 2473 }
1e78bcc1
AG
2474#if defined(TARGET_WORDS_BIGENDIAN)
2475 if (endian == DEVICE_LITTLE_ENDIAN) {
2476 val = bswap16(val);
2477 }
2478#else
2479 if (endian == DEVICE_BIG_ENDIAN) {
2480 val = bswap16(val);
2481 }
2482#endif
37ec01d4 2483 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
2484 } else {
2485 unsigned long addr1;
f3705d53 2486 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2487 + memory_region_section_addr(section, addr);
733f0b02
MT
2488 /* RAM case */
2489 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2490 switch (endian) {
2491 case DEVICE_LITTLE_ENDIAN:
2492 stw_le_p(ptr, val);
2493 break;
2494 case DEVICE_BIG_ENDIAN:
2495 stw_be_p(ptr, val);
2496 break;
2497 default:
2498 stw_p(ptr, val);
2499 break;
2500 }
51d7a9eb 2501 invalidate_and_set_dirty(addr1, 2);
733f0b02 2502 }
aab33094
FB
2503}
2504
a8170e5e 2505void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2506{
2507 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2508}
2509
a8170e5e 2510void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2511{
2512 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2513}
2514
a8170e5e 2515void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2516{
2517 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2518}
2519
aab33094 2520/* XXX: optimize */
a8170e5e 2521void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2522{
2523 val = tswap64(val);
71d2b725 2524 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2525}
2526
a8170e5e 2527void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2528{
2529 val = cpu_to_le64(val);
2530 cpu_physical_memory_write(addr, &val, 8);
2531}
2532
a8170e5e 2533void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2534{
2535 val = cpu_to_be64(val);
2536 cpu_physical_memory_write(addr, &val, 8);
2537}
2538
5e2972fd 2539/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2540int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2541 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2542{
2543 int l;
a8170e5e 2544 hwaddr phys_addr;
9b3c35e0 2545 target_ulong page;
13eb76e0
FB
2546
2547 while (len > 0) {
2548 page = addr & TARGET_PAGE_MASK;
2549 phys_addr = cpu_get_phys_page_debug(env, page);
2550 /* if no physical page mapped, return an error */
2551 if (phys_addr == -1)
2552 return -1;
2553 l = (page + TARGET_PAGE_SIZE) - addr;
2554 if (l > len)
2555 l = len;
5e2972fd 2556 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2557 if (is_write)
2558 cpu_physical_memory_write_rom(phys_addr, buf, l);
2559 else
5e2972fd 2560 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2561 len -= l;
2562 buf += l;
2563 addr += l;
2564 }
2565 return 0;
2566}
a68fe89c 2567#endif
13eb76e0 2568
8e4a424b
BS
2569#if !defined(CONFIG_USER_ONLY)
2570
2571/*
2572 * A helper function for the _utterly broken_ virtio device model to find out if
2573 * it's running on a big endian machine. Don't do this at home kids!
2574 */
2575bool virtio_is_big_endian(void);
2576bool virtio_is_big_endian(void)
2577{
2578#if defined(TARGET_WORDS_BIGENDIAN)
2579 return true;
2580#else
2581 return false;
2582#endif
2583}
2584
2585#endif
2586
76f35538 2587#ifndef CONFIG_USER_ONLY
a8170e5e 2588bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538
WC
2589{
2590 MemoryRegionSection *section;
2591
ac1970fb
AK
2592 section = phys_page_find(address_space_memory.dispatch,
2593 phys_addr >> TARGET_PAGE_BITS);
76f35538
WC
2594
2595 return !(memory_region_is_ram(section->mr) ||
2596 memory_region_is_romd(section->mr));
2597}
2598#endif