]> git.proxmox.com Git - qemu.git/blame - exec.c
target-cris/helper.c: Update Coding Style
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
432d268c 34#include "hw/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
67d3b957 53//#define DEBUG_UNASSIGNED
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
e2eef170 56#if !defined(CONFIG_USER_ONLY)
9fa3e853 57int phys_ram_fd;
74576198 58static int in_migration;
94a6b54f 59
a3161038 60RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
61
62static MemoryRegion *system_memory;
309cb471 63static MemoryRegion *system_io;
62152b8a 64
f6790af6
AK
65AddressSpace address_space_io;
66AddressSpace address_space_memory;
9e11908f 67DMAContext dma_context_memory;
2673a5da 68
0e0df1e2 69MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 70static MemoryRegion io_mem_subpage_ram;
0e0df1e2 71
e2eef170 72#endif
9fa3e853 73
9349b4f9 74CPUArchState *first_cpu;
6a00d601
FB
75/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
9349b4f9 77DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 78/* 0 = Do not count executed instructions.
bf20dc07 79 1 = Precise instruction counting.
2e70f6ef 80 2 = Adaptive rate instruction counting. */
5708fc66 81int use_icount;
6a00d601 82
e2eef170 83#if !defined(CONFIG_USER_ONLY)
4346ae3e 84
5312bd8b
AK
85static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
aa102231
AK
88static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
5312bd8b 91
d6f2ea22
AK
92/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
07f07b31 96#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 97
e2eef170 98static void io_mem_init(void);
62152b8a 99static void memory_map_init(void);
8b9c99d9 100static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 101
1ec9b909 102static MemoryRegion io_mem_watch;
6658ffb8 103#endif
fd6ce8f6 104
6d9a1304 105#if !defined(CONFIG_USER_ONLY)
d6f2ea22 106
f7bf5461 107static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 108{
f7bf5461 109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
d6f2ea22
AK
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
f7bf5461
AK
117}
118
119static uint16_t phys_map_node_alloc(void)
120{
121 unsigned i;
122 uint16_t ret;
123
124 ret = phys_map_nodes_nb++;
125 assert(ret != PHYS_MAP_NODE_NIL);
126 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 127 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 128 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 130 }
f7bf5461 131 return ret;
d6f2ea22
AK
132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
92e873b9 139
a8170e5e
AK
140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
2999097b 142 int level)
f7bf5461
AK
143{
144 PhysPageEntry *p;
145 int i;
a8170e5e 146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 147
07f07b31 148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
07f07b31 153 p[i].is_leaf = 1;
c19e8800 154 p[i].ptr = phys_section_unassigned;
4346ae3e 155 }
67c4d23c 156 }
f7bf5461 157 } else {
c19e8800 158 p = phys_map_nodes[lp->ptr];
92e873b9 159 }
2999097b 160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 161
2999097b 162 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
c19e8800 165 lp->ptr = leaf;
07f07b31
AK
166 *index += step;
167 *nb -= step;
2999097b
AK
168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
f7bf5461
AK
172 }
173}
174
ac1970fb 175static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 176 hwaddr index, hwaddr nb,
2999097b 177 uint16_t leaf)
f7bf5461 178{
2999097b 179 /* Wildly overreserve - it doesn't matter much. */
07f07b31 180 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 181
ac1970fb 182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
183}
184
a8170e5e 185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
92e873b9 186{
ac1970fb 187 PhysPageEntry lp = d->phys_map;
31ab2b4a
AK
188 PhysPageEntry *p;
189 int i;
31ab2b4a 190 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 191
07f07b31 192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
194 goto not_found;
195 }
c19e8800 196 p = phys_map_nodes[lp.ptr];
31ab2b4a 197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 198 }
31ab2b4a 199
c19e8800 200 s_index = lp.ptr;
31ab2b4a 201not_found:
f3705d53
AK
202 return &phys_sections[s_index];
203}
204
e5548617
BS
205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 209 && mr != &io_mem_watch;
fd6ce8f6 210}
5b6dd868 211#endif
fd6ce8f6 212
5b6dd868 213void cpu_exec_init_all(void)
fdbb84d1 214{
5b6dd868 215#if !defined(CONFIG_USER_ONLY)
b2a8658e 216 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
217 memory_map_init();
218 io_mem_init();
fdbb84d1 219#endif
5b6dd868 220}
fdbb84d1 221
b170fce3 222#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
223
224static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 225{
5b6dd868 226 CPUArchState *env = opaque;
a513fe19 227
5b6dd868
BS
228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
230 env->interrupt_request &= ~0x01;
231 tlb_flush(env, 1);
232
233 return 0;
a513fe19 234}
7501267e 235
5b6dd868
BS
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
243 VMSTATE_UINT32(halted, CPUArchState),
244 VMSTATE_UINT32(interrupt_request, CPUArchState),
245 VMSTATE_END_OF_LIST()
246 }
247};
b170fce3
AF
248#else
249#define vmstate_cpu_common vmstate_dummy
5b6dd868 250#endif
ea041c0e 251
38d8f5c8 252CPUState *qemu_get_cpu(int index)
ea041c0e 253{
5b6dd868 254 CPUArchState *env = first_cpu;
38d8f5c8 255 CPUState *cpu = NULL;
ea041c0e 256
5b6dd868 257 while (env) {
55e5c285
AF
258 cpu = ENV_GET_CPU(env);
259 if (cpu->cpu_index == index) {
5b6dd868 260 break;
55e5c285 261 }
5b6dd868 262 env = env->next_cpu;
ea041c0e 263 }
5b6dd868 264
d76fddae 265 return env ? cpu : NULL;
ea041c0e
FB
266}
267
5b6dd868 268void cpu_exec_init(CPUArchState *env)
ea041c0e 269{
5b6dd868 270 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 271 CPUClass *cc = CPU_GET_CLASS(cpu);
5b6dd868
BS
272 CPUArchState **penv;
273 int cpu_index;
274
275#if defined(CONFIG_USER_ONLY)
276 cpu_list_lock();
277#endif
278 env->next_cpu = NULL;
279 penv = &first_cpu;
280 cpu_index = 0;
281 while (*penv != NULL) {
282 penv = &(*penv)->next_cpu;
283 cpu_index++;
284 }
55e5c285 285 cpu->cpu_index = cpu_index;
1b1ed8dc 286 cpu->numa_node = 0;
5b6dd868
BS
287 QTAILQ_INIT(&env->breakpoints);
288 QTAILQ_INIT(&env->watchpoints);
289#ifndef CONFIG_USER_ONLY
290 cpu->thread_id = qemu_get_thread_id();
291#endif
292 *penv = env;
293#if defined(CONFIG_USER_ONLY)
294 cpu_list_unlock();
295#endif
5b6dd868 296 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
b170fce3 297#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
298 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
299 cpu_save, cpu_load, env);
b170fce3 300 assert(cc->vmsd == NULL);
5b6dd868 301#endif
b170fce3
AF
302 if (cc->vmsd != NULL) {
303 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
304 }
ea041c0e
FB
305}
306
1fddef4b 307#if defined(TARGET_HAS_ICE)
94df27fd 308#if defined(CONFIG_USER_ONLY)
9349b4f9 309static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
310{
311 tb_invalidate_phys_page_range(pc, pc + 1, 0);
312}
313#else
1e7855a5
MF
314static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
315{
9d70c4b7
MF
316 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
317 (pc & ~TARGET_PAGE_MASK));
1e7855a5 318}
c27004ec 319#endif
94df27fd 320#endif /* TARGET_HAS_ICE */
d720b93d 321
c527ee8f 322#if defined(CONFIG_USER_ONLY)
9349b4f9 323void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
324
325{
326}
327
9349b4f9 328int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
329 int flags, CPUWatchpoint **watchpoint)
330{
331 return -ENOSYS;
332}
333#else
6658ffb8 334/* Add a watchpoint. */
9349b4f9 335int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 336 int flags, CPUWatchpoint **watchpoint)
6658ffb8 337{
b4051334 338 target_ulong len_mask = ~(len - 1);
c0ce998e 339 CPUWatchpoint *wp;
6658ffb8 340
b4051334 341 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
342 if ((len & (len - 1)) || (addr & ~len_mask) ||
343 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
344 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
345 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
346 return -EINVAL;
347 }
7267c094 348 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
349
350 wp->vaddr = addr;
b4051334 351 wp->len_mask = len_mask;
a1d1bb31
AL
352 wp->flags = flags;
353
2dc9f411 354 /* keep all GDB-injected watchpoints in front */
c0ce998e 355 if (flags & BP_GDB)
72cf2d4f 356 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 357 else
72cf2d4f 358 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 359
6658ffb8 360 tlb_flush_page(env, addr);
a1d1bb31
AL
361
362 if (watchpoint)
363 *watchpoint = wp;
364 return 0;
6658ffb8
PB
365}
366
a1d1bb31 367/* Remove a specific watchpoint. */
9349b4f9 368int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 369 int flags)
6658ffb8 370{
b4051334 371 target_ulong len_mask = ~(len - 1);
a1d1bb31 372 CPUWatchpoint *wp;
6658ffb8 373
72cf2d4f 374 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 375 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 376 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 377 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
378 return 0;
379 }
380 }
a1d1bb31 381 return -ENOENT;
6658ffb8
PB
382}
383
a1d1bb31 384/* Remove a specific watchpoint by reference. */
9349b4f9 385void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 386{
72cf2d4f 387 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 388
a1d1bb31
AL
389 tlb_flush_page(env, watchpoint->vaddr);
390
7267c094 391 g_free(watchpoint);
a1d1bb31
AL
392}
393
394/* Remove all matching watchpoints. */
9349b4f9 395void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 396{
c0ce998e 397 CPUWatchpoint *wp, *next;
a1d1bb31 398
72cf2d4f 399 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
400 if (wp->flags & mask)
401 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 402 }
7d03f82f 403}
c527ee8f 404#endif
7d03f82f 405
a1d1bb31 406/* Add a breakpoint. */
9349b4f9 407int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 408 CPUBreakpoint **breakpoint)
4c3a88a2 409{
1fddef4b 410#if defined(TARGET_HAS_ICE)
c0ce998e 411 CPUBreakpoint *bp;
3b46e624 412
7267c094 413 bp = g_malloc(sizeof(*bp));
4c3a88a2 414
a1d1bb31
AL
415 bp->pc = pc;
416 bp->flags = flags;
417
2dc9f411 418 /* keep all GDB-injected breakpoints in front */
c0ce998e 419 if (flags & BP_GDB)
72cf2d4f 420 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 421 else
72cf2d4f 422 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 423
d720b93d 424 breakpoint_invalidate(env, pc);
a1d1bb31
AL
425
426 if (breakpoint)
427 *breakpoint = bp;
4c3a88a2
FB
428 return 0;
429#else
a1d1bb31 430 return -ENOSYS;
4c3a88a2
FB
431#endif
432}
433
a1d1bb31 434/* Remove a specific breakpoint. */
9349b4f9 435int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 436{
7d03f82f 437#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
438 CPUBreakpoint *bp;
439
72cf2d4f 440 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
441 if (bp->pc == pc && bp->flags == flags) {
442 cpu_breakpoint_remove_by_ref(env, bp);
443 return 0;
444 }
7d03f82f 445 }
a1d1bb31
AL
446 return -ENOENT;
447#else
448 return -ENOSYS;
7d03f82f
EI
449#endif
450}
451
a1d1bb31 452/* Remove a specific breakpoint by reference. */
9349b4f9 453void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 454{
1fddef4b 455#if defined(TARGET_HAS_ICE)
72cf2d4f 456 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 457
a1d1bb31
AL
458 breakpoint_invalidate(env, breakpoint->pc);
459
7267c094 460 g_free(breakpoint);
a1d1bb31
AL
461#endif
462}
463
464/* Remove all matching breakpoints. */
9349b4f9 465void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
466{
467#if defined(TARGET_HAS_ICE)
c0ce998e 468 CPUBreakpoint *bp, *next;
a1d1bb31 469
72cf2d4f 470 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
471 if (bp->flags & mask)
472 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 473 }
4c3a88a2
FB
474#endif
475}
476
c33a346e
FB
477/* enable or disable single step mode. EXCP_DEBUG is returned by the
478 CPU loop after each instruction */
9349b4f9 479void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 480{
1fddef4b 481#if defined(TARGET_HAS_ICE)
c33a346e
FB
482 if (env->singlestep_enabled != enabled) {
483 env->singlestep_enabled = enabled;
e22a25c9
AL
484 if (kvm_enabled())
485 kvm_update_guest_debug(env, 0);
486 else {
ccbb4d44 487 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
488 /* XXX: only flush what is necessary */
489 tb_flush(env);
490 }
c33a346e
FB
491 }
492#endif
493}
494
9349b4f9 495void cpu_reset_interrupt(CPUArchState *env, int mask)
b54ad049
FB
496{
497 env->interrupt_request &= ~mask;
498}
499
9349b4f9 500void cpu_exit(CPUArchState *env)
3098dba0 501{
fcd7d003
AF
502 CPUState *cpu = ENV_GET_CPU(env);
503
504 cpu->exit_request = 1;
378df4b2 505 cpu->tcg_exit_req = 1;
3098dba0
AJ
506}
507
9349b4f9 508void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
509{
510 va_list ap;
493ae1f0 511 va_list ap2;
7501267e
FB
512
513 va_start(ap, fmt);
493ae1f0 514 va_copy(ap2, ap);
7501267e
FB
515 fprintf(stderr, "qemu: fatal: ");
516 vfprintf(stderr, fmt, ap);
517 fprintf(stderr, "\n");
6fd2a026 518 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
519 if (qemu_log_enabled()) {
520 qemu_log("qemu: fatal: ");
521 qemu_log_vprintf(fmt, ap2);
522 qemu_log("\n");
6fd2a026 523 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 524 qemu_log_flush();
93fcfe39 525 qemu_log_close();
924edcae 526 }
493ae1f0 527 va_end(ap2);
f9373291 528 va_end(ap);
fd052bf6
RV
529#if defined(CONFIG_USER_ONLY)
530 {
531 struct sigaction act;
532 sigfillset(&act.sa_mask);
533 act.sa_handler = SIG_DFL;
534 sigaction(SIGABRT, &act, NULL);
535 }
536#endif
7501267e
FB
537 abort();
538}
539
9349b4f9 540CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 541{
9349b4f9
AF
542 CPUArchState *new_env = cpu_init(env->cpu_model_str);
543 CPUArchState *next_cpu = new_env->next_cpu;
5a38f081
AL
544#if defined(TARGET_HAS_ICE)
545 CPUBreakpoint *bp;
546 CPUWatchpoint *wp;
547#endif
548
9349b4f9 549 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 550
55e5c285 551 /* Preserve chaining. */
c5be9f08 552 new_env->next_cpu = next_cpu;
5a38f081
AL
553
554 /* Clone all break/watchpoints.
555 Note: Once we support ptrace with hw-debug register access, make sure
556 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
557 QTAILQ_INIT(&env->breakpoints);
558 QTAILQ_INIT(&env->watchpoints);
5a38f081 559#if defined(TARGET_HAS_ICE)
72cf2d4f 560 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
561 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
562 }
72cf2d4f 563 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
564 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
565 wp->flags, NULL);
566 }
567#endif
568
c5be9f08
TS
569 return new_env;
570}
571
0124311e 572#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
573static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
574 uintptr_t length)
575{
576 uintptr_t start1;
577
578 /* we modify the TLB cache so that the dirty bit will be set again
579 when accessing the range */
580 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
581 /* Check that we don't span multiple blocks - this breaks the
582 address comparisons below. */
583 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
584 != (end - 1) - start) {
585 abort();
586 }
587 cpu_tlb_reset_dirty_all(start1, length);
588
589}
590
5579c7f3 591/* Note: start and end must be within the same ram block. */
c227f099 592void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 593 int dirty_flags)
1ccde1cb 594{
d24981d3 595 uintptr_t length;
1ccde1cb
FB
596
597 start &= TARGET_PAGE_MASK;
598 end = TARGET_PAGE_ALIGN(end);
599
600 length = end - start;
601 if (length == 0)
602 return;
f7c11b53 603 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 604
d24981d3
JQ
605 if (tcg_enabled()) {
606 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 607 }
1ccde1cb
FB
608}
609
8b9c99d9 610static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 611{
f6f3fbca 612 int ret = 0;
74576198 613 in_migration = enable;
f6f3fbca 614 return ret;
74576198
AL
615}
616
a8170e5e 617hwaddr memory_region_section_get_iotlb(CPUArchState *env,
e5548617
BS
618 MemoryRegionSection *section,
619 target_ulong vaddr,
a8170e5e 620 hwaddr paddr,
e5548617
BS
621 int prot,
622 target_ulong *address)
623{
a8170e5e 624 hwaddr iotlb;
e5548617
BS
625 CPUWatchpoint *wp;
626
cc5bea60 627 if (memory_region_is_ram(section->mr)) {
e5548617
BS
628 /* Normal RAM. */
629 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 630 + memory_region_section_addr(section, paddr);
e5548617
BS
631 if (!section->readonly) {
632 iotlb |= phys_section_notdirty;
633 } else {
634 iotlb |= phys_section_rom;
635 }
636 } else {
637 /* IO handlers are currently passed a physical address.
638 It would be nice to pass an offset from the base address
639 of that region. This would avoid having to special case RAM,
640 and avoid full address decoding in every device.
641 We can't use the high bits of pd for this because
642 IO_MEM_ROMD uses these as a ram address. */
643 iotlb = section - phys_sections;
cc5bea60 644 iotlb += memory_region_section_addr(section, paddr);
e5548617
BS
645 }
646
647 /* Make accesses to pages with watchpoints go via the
648 watchpoint trap routines. */
649 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
650 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
651 /* Avoid trapping reads of pages with a write breakpoint. */
652 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
653 iotlb = phys_section_watch + paddr;
654 *address |= TLB_MMIO;
655 break;
656 }
657 }
658 }
659
660 return iotlb;
661}
9fa3e853
FB
662#endif /* defined(CONFIG_USER_ONLY) */
663
e2eef170 664#if !defined(CONFIG_USER_ONLY)
8da3ff18 665
c04b2b78
PB
666#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
667typedef struct subpage_t {
70c68e44 668 MemoryRegion iomem;
a8170e5e 669 hwaddr base;
5312bd8b 670 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
671} subpage_t;
672
c227f099 673static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 674 uint16_t section);
a8170e5e 675static subpage_t *subpage_init(hwaddr base);
5312bd8b 676static void destroy_page_desc(uint16_t section_index)
54688b1e 677{
5312bd8b
AK
678 MemoryRegionSection *section = &phys_sections[section_index];
679 MemoryRegion *mr = section->mr;
54688b1e
AK
680
681 if (mr->subpage) {
682 subpage_t *subpage = container_of(mr, subpage_t, iomem);
683 memory_region_destroy(&subpage->iomem);
684 g_free(subpage);
685 }
686}
687
4346ae3e 688static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
689{
690 unsigned i;
d6f2ea22 691 PhysPageEntry *p;
54688b1e 692
c19e8800 693 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
694 return;
695 }
696
c19e8800 697 p = phys_map_nodes[lp->ptr];
4346ae3e 698 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 699 if (!p[i].is_leaf) {
54688b1e 700 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 701 } else {
c19e8800 702 destroy_page_desc(p[i].ptr);
54688b1e 703 }
54688b1e 704 }
07f07b31 705 lp->is_leaf = 0;
c19e8800 706 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
707}
708
ac1970fb 709static void destroy_all_mappings(AddressSpaceDispatch *d)
54688b1e 710{
ac1970fb 711 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
d6f2ea22 712 phys_map_nodes_reset();
54688b1e
AK
713}
714
5312bd8b
AK
715static uint16_t phys_section_add(MemoryRegionSection *section)
716{
717 if (phys_sections_nb == phys_sections_nb_alloc) {
718 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
719 phys_sections = g_renew(MemoryRegionSection, phys_sections,
720 phys_sections_nb_alloc);
721 }
722 phys_sections[phys_sections_nb] = *section;
723 return phys_sections_nb++;
724}
725
726static void phys_sections_clear(void)
727{
728 phys_sections_nb = 0;
729}
730
ac1970fb 731static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
732{
733 subpage_t *subpage;
a8170e5e 734 hwaddr base = section->offset_within_address_space
0f0cb164 735 & TARGET_PAGE_MASK;
ac1970fb 736 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
0f0cb164
AK
737 MemoryRegionSection subsection = {
738 .offset_within_address_space = base,
739 .size = TARGET_PAGE_SIZE,
740 };
a8170e5e 741 hwaddr start, end;
0f0cb164 742
f3705d53 743 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 744
f3705d53 745 if (!(existing->mr->subpage)) {
0f0cb164
AK
746 subpage = subpage_init(base);
747 subsection.mr = &subpage->iomem;
ac1970fb 748 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 749 phys_section_add(&subsection));
0f0cb164 750 } else {
f3705d53 751 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
752 }
753 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
adb2a9b5 754 end = start + section->size - 1;
0f0cb164
AK
755 subpage_register(subpage, start, end, phys_section_add(section));
756}
757
758
ac1970fb 759static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
33417e70 760{
a8170e5e 761 hwaddr start_addr = section->offset_within_address_space;
dd81124b 762 ram_addr_t size = section->size;
a8170e5e 763 hwaddr addr;
5312bd8b 764 uint16_t section_index = phys_section_add(section);
dd81124b 765
3b8e6a2d 766 assert(size);
f6f3fbca 767
3b8e6a2d 768 addr = start_addr;
ac1970fb 769 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2999097b 770 section_index);
33417e70
FB
771}
772
ac1970fb 773static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 774{
ac1970fb 775 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
0f0cb164
AK
776 MemoryRegionSection now = *section, remain = *section;
777
778 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
779 || (now.size < TARGET_PAGE_SIZE)) {
780 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
781 - now.offset_within_address_space,
782 now.size);
ac1970fb 783 register_subpage(d, &now);
0f0cb164
AK
784 remain.size -= now.size;
785 remain.offset_within_address_space += now.size;
786 remain.offset_within_region += now.size;
787 }
69b67646
TH
788 while (remain.size >= TARGET_PAGE_SIZE) {
789 now = remain;
790 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
791 now.size = TARGET_PAGE_SIZE;
ac1970fb 792 register_subpage(d, &now);
69b67646
TH
793 } else {
794 now.size &= TARGET_PAGE_MASK;
ac1970fb 795 register_multipage(d, &now);
69b67646 796 }
0f0cb164
AK
797 remain.size -= now.size;
798 remain.offset_within_address_space += now.size;
799 remain.offset_within_region += now.size;
800 }
801 now = remain;
802 if (now.size) {
ac1970fb 803 register_subpage(d, &now);
0f0cb164
AK
804 }
805}
806
62a2744c
SY
807void qemu_flush_coalesced_mmio_buffer(void)
808{
809 if (kvm_enabled())
810 kvm_flush_coalesced_mmio_buffer();
811}
812
b2a8658e
UD
813void qemu_mutex_lock_ramlist(void)
814{
815 qemu_mutex_lock(&ram_list.mutex);
816}
817
818void qemu_mutex_unlock_ramlist(void)
819{
820 qemu_mutex_unlock(&ram_list.mutex);
821}
822
c902760f
MT
823#if defined(__linux__) && !defined(TARGET_S390X)
824
825#include <sys/vfs.h>
826
827#define HUGETLBFS_MAGIC 0x958458f6
828
829static long gethugepagesize(const char *path)
830{
831 struct statfs fs;
832 int ret;
833
834 do {
9742bf26 835 ret = statfs(path, &fs);
c902760f
MT
836 } while (ret != 0 && errno == EINTR);
837
838 if (ret != 0) {
9742bf26
YT
839 perror(path);
840 return 0;
c902760f
MT
841 }
842
843 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 844 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
845
846 return fs.f_bsize;
847}
848
04b16653
AW
849static void *file_ram_alloc(RAMBlock *block,
850 ram_addr_t memory,
851 const char *path)
c902760f
MT
852{
853 char *filename;
854 void *area;
855 int fd;
856#ifdef MAP_POPULATE
857 int flags;
858#endif
859 unsigned long hpagesize;
860
861 hpagesize = gethugepagesize(path);
862 if (!hpagesize) {
9742bf26 863 return NULL;
c902760f
MT
864 }
865
866 if (memory < hpagesize) {
867 return NULL;
868 }
869
870 if (kvm_enabled() && !kvm_has_sync_mmu()) {
871 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
872 return NULL;
873 }
874
e4ada482 875 filename = g_strdup_printf("%s/qemu_back_mem.XXXXXX", path);
c902760f
MT
876
877 fd = mkstemp(filename);
878 if (fd < 0) {
9742bf26 879 perror("unable to create backing store for hugepages");
e4ada482 880 g_free(filename);
9742bf26 881 return NULL;
c902760f
MT
882 }
883 unlink(filename);
e4ada482 884 g_free(filename);
c902760f
MT
885
886 memory = (memory+hpagesize-1) & ~(hpagesize-1);
887
888 /*
889 * ftruncate is not supported by hugetlbfs in older
890 * hosts, so don't bother bailing out on errors.
891 * If anything goes wrong with it under other filesystems,
892 * mmap will fail.
893 */
894 if (ftruncate(fd, memory))
9742bf26 895 perror("ftruncate");
c902760f
MT
896
897#ifdef MAP_POPULATE
898 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
899 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
900 * to sidestep this quirk.
901 */
902 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
903 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
904#else
905 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
906#endif
907 if (area == MAP_FAILED) {
9742bf26
YT
908 perror("file_ram_alloc: can't mmap RAM pages");
909 close(fd);
910 return (NULL);
c902760f 911 }
04b16653 912 block->fd = fd;
c902760f
MT
913 return area;
914}
915#endif
916
d17b5288 917static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
918{
919 RAMBlock *block, *next_block;
3e837b2c 920 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 921
a3161038 922 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
923 return 0;
924
a3161038 925 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 926 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
927
928 end = block->offset + block->length;
929
a3161038 930 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
931 if (next_block->offset >= end) {
932 next = MIN(next, next_block->offset);
933 }
934 }
935 if (next - end >= size && next - end < mingap) {
3e837b2c 936 offset = end;
04b16653
AW
937 mingap = next - end;
938 }
939 }
3e837b2c
AW
940
941 if (offset == RAM_ADDR_MAX) {
942 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
943 (uint64_t)size);
944 abort();
945 }
946
04b16653
AW
947 return offset;
948}
949
652d7ec2 950ram_addr_t last_ram_offset(void)
d17b5288
AW
951{
952 RAMBlock *block;
953 ram_addr_t last = 0;
954
a3161038 955 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
956 last = MAX(last, block->offset + block->length);
957
958 return last;
959}
960
ddb97f1d
JB
961static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
962{
963 int ret;
964 QemuOpts *machine_opts;
965
966 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
967 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
968 if (machine_opts &&
969 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
970 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
971 if (ret) {
972 perror("qemu_madvise");
973 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
974 "but dump_guest_core=off specified\n");
975 }
976 }
977}
978
c5705a77 979void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
980{
981 RAMBlock *new_block, *block;
982
c5705a77 983 new_block = NULL;
a3161038 984 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
985 if (block->offset == addr) {
986 new_block = block;
987 break;
988 }
989 }
990 assert(new_block);
991 assert(!new_block->idstr[0]);
84b89d78 992
09e5ab63
AL
993 if (dev) {
994 char *id = qdev_get_dev_path(dev);
84b89d78
CM
995 if (id) {
996 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 997 g_free(id);
84b89d78
CM
998 }
999 }
1000 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1001
b2a8658e
UD
1002 /* This assumes the iothread lock is taken here too. */
1003 qemu_mutex_lock_ramlist();
a3161038 1004 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1005 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1006 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1007 new_block->idstr);
1008 abort();
1009 }
1010 }
b2a8658e 1011 qemu_mutex_unlock_ramlist();
c5705a77
AK
1012}
1013
8490fc78
LC
1014static int memory_try_enable_merging(void *addr, size_t len)
1015{
1016 QemuOpts *opts;
1017
1018 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1019 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1020 /* disabled by the user */
1021 return 0;
1022 }
1023
1024 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1025}
1026
c5705a77
AK
1027ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1028 MemoryRegion *mr)
1029{
abb26d63 1030 RAMBlock *block, *new_block;
c5705a77
AK
1031
1032 size = TARGET_PAGE_ALIGN(size);
1033 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1034
b2a8658e
UD
1035 /* This assumes the iothread lock is taken here too. */
1036 qemu_mutex_lock_ramlist();
7c637366 1037 new_block->mr = mr;
432d268c 1038 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1039 if (host) {
1040 new_block->host = host;
cd19cfa2 1041 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1042 } else {
1043 if (mem_path) {
c902760f 1044#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1045 new_block->host = file_ram_alloc(new_block, size, mem_path);
1046 if (!new_block->host) {
1047 new_block->host = qemu_vmalloc(size);
8490fc78 1048 memory_try_enable_merging(new_block->host, size);
6977dfe6 1049 }
c902760f 1050#else
6977dfe6
YT
1051 fprintf(stderr, "-mem-path option unsupported\n");
1052 exit(1);
c902760f 1053#endif
6977dfe6 1054 } else {
868bb33f 1055 if (xen_enabled()) {
fce537d4 1056 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1057 } else if (kvm_enabled()) {
1058 /* some s390/kvm configurations have special constraints */
1059 new_block->host = kvm_vmalloc(size);
432d268c
JN
1060 } else {
1061 new_block->host = qemu_vmalloc(size);
1062 }
8490fc78 1063 memory_try_enable_merging(new_block->host, size);
6977dfe6 1064 }
c902760f 1065 }
94a6b54f
PB
1066 new_block->length = size;
1067
abb26d63
PB
1068 /* Keep the list sorted from biggest to smallest block. */
1069 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1070 if (block->length < new_block->length) {
1071 break;
1072 }
1073 }
1074 if (block) {
1075 QTAILQ_INSERT_BEFORE(block, new_block, next);
1076 } else {
1077 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1078 }
0d6d3c87 1079 ram_list.mru_block = NULL;
94a6b54f 1080
f798b07f 1081 ram_list.version++;
b2a8658e 1082 qemu_mutex_unlock_ramlist();
f798b07f 1083
7267c094 1084 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1085 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1086 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1087 0, size >> TARGET_PAGE_BITS);
1720aeee 1088 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1089
ddb97f1d 1090 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1091 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1092
6f0437e8
JK
1093 if (kvm_enabled())
1094 kvm_setup_guest_memory(new_block->host, size);
1095
94a6b54f
PB
1096 return new_block->offset;
1097}
e9a1ab19 1098
c5705a77 1099ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1100{
c5705a77 1101 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1102}
1103
1f2e98b6
AW
1104void qemu_ram_free_from_ptr(ram_addr_t addr)
1105{
1106 RAMBlock *block;
1107
b2a8658e
UD
1108 /* This assumes the iothread lock is taken here too. */
1109 qemu_mutex_lock_ramlist();
a3161038 1110 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1111 if (addr == block->offset) {
a3161038 1112 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1113 ram_list.mru_block = NULL;
f798b07f 1114 ram_list.version++;
7267c094 1115 g_free(block);
b2a8658e 1116 break;
1f2e98b6
AW
1117 }
1118 }
b2a8658e 1119 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1120}
1121
c227f099 1122void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1123{
04b16653
AW
1124 RAMBlock *block;
1125
b2a8658e
UD
1126 /* This assumes the iothread lock is taken here too. */
1127 qemu_mutex_lock_ramlist();
a3161038 1128 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1129 if (addr == block->offset) {
a3161038 1130 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1131 ram_list.mru_block = NULL;
f798b07f 1132 ram_list.version++;
cd19cfa2
HY
1133 if (block->flags & RAM_PREALLOC_MASK) {
1134 ;
1135 } else if (mem_path) {
04b16653
AW
1136#if defined (__linux__) && !defined(TARGET_S390X)
1137 if (block->fd) {
1138 munmap(block->host, block->length);
1139 close(block->fd);
1140 } else {
1141 qemu_vfree(block->host);
1142 }
fd28aa13
JK
1143#else
1144 abort();
04b16653
AW
1145#endif
1146 } else {
1147#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1148 munmap(block->host, block->length);
1149#else
868bb33f 1150 if (xen_enabled()) {
e41d7c69 1151 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
1152 } else {
1153 qemu_vfree(block->host);
1154 }
04b16653
AW
1155#endif
1156 }
7267c094 1157 g_free(block);
b2a8658e 1158 break;
04b16653
AW
1159 }
1160 }
b2a8658e 1161 qemu_mutex_unlock_ramlist();
04b16653 1162
e9a1ab19
FB
1163}
1164
cd19cfa2
HY
1165#ifndef _WIN32
1166void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1167{
1168 RAMBlock *block;
1169 ram_addr_t offset;
1170 int flags;
1171 void *area, *vaddr;
1172
a3161038 1173 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1174 offset = addr - block->offset;
1175 if (offset < block->length) {
1176 vaddr = block->host + offset;
1177 if (block->flags & RAM_PREALLOC_MASK) {
1178 ;
1179 } else {
1180 flags = MAP_FIXED;
1181 munmap(vaddr, length);
1182 if (mem_path) {
1183#if defined(__linux__) && !defined(TARGET_S390X)
1184 if (block->fd) {
1185#ifdef MAP_POPULATE
1186 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1187 MAP_PRIVATE;
1188#else
1189 flags |= MAP_PRIVATE;
1190#endif
1191 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1192 flags, block->fd, offset);
1193 } else {
1194 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1195 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1196 flags, -1, 0);
1197 }
fd28aa13
JK
1198#else
1199 abort();
cd19cfa2
HY
1200#endif
1201 } else {
1202#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1203 flags |= MAP_SHARED | MAP_ANONYMOUS;
1204 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1205 flags, -1, 0);
1206#else
1207 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1208 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1209 flags, -1, 0);
1210#endif
1211 }
1212 if (area != vaddr) {
f15fbc4b
AP
1213 fprintf(stderr, "Could not remap addr: "
1214 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1215 length, addr);
1216 exit(1);
1217 }
8490fc78 1218 memory_try_enable_merging(vaddr, length);
ddb97f1d 1219 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1220 }
1221 return;
1222 }
1223 }
1224}
1225#endif /* !_WIN32 */
1226
dc828ca1 1227/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
1228 With the exception of the softmmu code in this file, this should
1229 only be used for local memory (e.g. video ram) that the device owns,
1230 and knows it isn't going to access beyond the end of the block.
1231
1232 It should not be used for general purpose DMA.
1233 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1234 */
c227f099 1235void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 1236{
94a6b54f
PB
1237 RAMBlock *block;
1238
b2a8658e 1239 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1240 block = ram_list.mru_block;
1241 if (block && addr - block->offset < block->length) {
1242 goto found;
1243 }
a3161038 1244 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1245 if (addr - block->offset < block->length) {
0d6d3c87 1246 goto found;
f471a17e 1247 }
94a6b54f 1248 }
f471a17e
AW
1249
1250 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1251 abort();
1252
0d6d3c87
PB
1253found:
1254 ram_list.mru_block = block;
1255 if (xen_enabled()) {
1256 /* We need to check if the requested address is in the RAM
1257 * because we don't want to map the entire memory in QEMU.
1258 * In that case just map until the end of the page.
1259 */
1260 if (block->offset == 0) {
1261 return xen_map_cache(addr, 0, 0);
1262 } else if (block->host == NULL) {
1263 block->host =
1264 xen_map_cache(block->offset, block->length, 1);
1265 }
1266 }
1267 return block->host + (addr - block->offset);
dc828ca1
PB
1268}
1269
0d6d3c87
PB
1270/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1271 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1272 *
1273 * ??? Is this still necessary?
b2e0a138 1274 */
8b9c99d9 1275static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1276{
1277 RAMBlock *block;
1278
b2a8658e 1279 /* The list is protected by the iothread lock here. */
a3161038 1280 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1281 if (addr - block->offset < block->length) {
868bb33f 1282 if (xen_enabled()) {
432d268c
JN
1283 /* We need to check if the requested address is in the RAM
1284 * because we don't want to map the entire memory in QEMU.
712c2b41 1285 * In that case just map until the end of the page.
432d268c
JN
1286 */
1287 if (block->offset == 0) {
e41d7c69 1288 return xen_map_cache(addr, 0, 0);
432d268c 1289 } else if (block->host == NULL) {
e41d7c69
JK
1290 block->host =
1291 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1292 }
1293 }
b2e0a138
MT
1294 return block->host + (addr - block->offset);
1295 }
1296 }
1297
1298 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1299 abort();
1300
1301 return NULL;
1302}
1303
38bee5dc
SS
1304/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1305 * but takes a size argument */
8b9c99d9 1306static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1307{
8ab934f9
SS
1308 if (*size == 0) {
1309 return NULL;
1310 }
868bb33f 1311 if (xen_enabled()) {
e41d7c69 1312 return xen_map_cache(addr, *size, 1);
868bb33f 1313 } else {
38bee5dc
SS
1314 RAMBlock *block;
1315
a3161038 1316 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1317 if (addr - block->offset < block->length) {
1318 if (addr - block->offset + *size > block->length)
1319 *size = block->length - addr + block->offset;
1320 return block->host + (addr - block->offset);
1321 }
1322 }
1323
1324 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1325 abort();
38bee5dc
SS
1326 }
1327}
1328
050a0ddf
AP
1329void qemu_put_ram_ptr(void *addr)
1330{
1331 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
1332}
1333
e890261f 1334int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1335{
94a6b54f
PB
1336 RAMBlock *block;
1337 uint8_t *host = ptr;
1338
868bb33f 1339 if (xen_enabled()) {
e41d7c69 1340 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
1341 return 0;
1342 }
1343
a3161038 1344 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1345 /* This case append when the block is not mapped. */
1346 if (block->host == NULL) {
1347 continue;
1348 }
f471a17e 1349 if (host - block->host < block->length) {
e890261f
MT
1350 *ram_addr = block->offset + (host - block->host);
1351 return 0;
f471a17e 1352 }
94a6b54f 1353 }
432d268c 1354
e890261f
MT
1355 return -1;
1356}
f471a17e 1357
e890261f
MT
1358/* Some of the softmmu routines need to translate from a host pointer
1359 (typically a TLB entry) back to a ram offset. */
1360ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1361{
1362 ram_addr_t ram_addr;
f471a17e 1363
e890261f
MT
1364 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1365 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1366 abort();
1367 }
1368 return ram_addr;
5579c7f3
PB
1369}
1370
a8170e5e 1371static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1372 unsigned size)
e18231a3
BS
1373{
1374#ifdef DEBUG_UNASSIGNED
1375 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1376#endif
5b450407 1377#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1378 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
1379#endif
1380 return 0;
1381}
1382
a8170e5e 1383static void unassigned_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1384 uint64_t val, unsigned size)
e18231a3
BS
1385{
1386#ifdef DEBUG_UNASSIGNED
0e0df1e2 1387 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 1388#endif
5b450407 1389#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1390 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 1391#endif
33417e70
FB
1392}
1393
0e0df1e2
AK
1394static const MemoryRegionOps unassigned_mem_ops = {
1395 .read = unassigned_mem_read,
1396 .write = unassigned_mem_write,
1397 .endianness = DEVICE_NATIVE_ENDIAN,
1398};
e18231a3 1399
a8170e5e 1400static uint64_t error_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1401 unsigned size)
e18231a3 1402{
0e0df1e2 1403 abort();
e18231a3
BS
1404}
1405
a8170e5e 1406static void error_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1407 uint64_t value, unsigned size)
e18231a3 1408{
0e0df1e2 1409 abort();
33417e70
FB
1410}
1411
0e0df1e2
AK
1412static const MemoryRegionOps error_mem_ops = {
1413 .read = error_mem_read,
1414 .write = error_mem_write,
1415 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1416};
1417
0e0df1e2
AK
1418static const MemoryRegionOps rom_mem_ops = {
1419 .read = error_mem_read,
1420 .write = unassigned_mem_write,
1421 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1422};
1423
a8170e5e 1424static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1425 uint64_t val, unsigned size)
9fa3e853 1426{
3a7d929e 1427 int dirty_flags;
f7c11b53 1428 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1429 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1430#if !defined(CONFIG_USER_ONLY)
0e0df1e2 1431 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1432 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 1433#endif
3a7d929e 1434 }
0e0df1e2
AK
1435 switch (size) {
1436 case 1:
1437 stb_p(qemu_get_ram_ptr(ram_addr), val);
1438 break;
1439 case 2:
1440 stw_p(qemu_get_ram_ptr(ram_addr), val);
1441 break;
1442 case 4:
1443 stl_p(qemu_get_ram_ptr(ram_addr), val);
1444 break;
1445 default:
1446 abort();
3a7d929e 1447 }
f23db169 1448 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1449 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1450 /* we remove the notdirty callback only if the code has been
1451 flushed */
1452 if (dirty_flags == 0xff)
2e70f6ef 1453 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1454}
1455
0e0df1e2
AK
1456static const MemoryRegionOps notdirty_mem_ops = {
1457 .read = error_mem_read,
1458 .write = notdirty_mem_write,
1459 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1460};
1461
0f459d16 1462/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1463static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1464{
9349b4f9 1465 CPUArchState *env = cpu_single_env;
06d55cc1 1466 target_ulong pc, cs_base;
0f459d16 1467 target_ulong vaddr;
a1d1bb31 1468 CPUWatchpoint *wp;
06d55cc1 1469 int cpu_flags;
0f459d16 1470
06d55cc1
AL
1471 if (env->watchpoint_hit) {
1472 /* We re-entered the check after replacing the TB. Now raise
1473 * the debug interrupt so that is will trigger after the
1474 * current instruction. */
1475 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1476 return;
1477 }
2e70f6ef 1478 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1479 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1480 if ((vaddr == (wp->vaddr & len_mask) ||
1481 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1482 wp->flags |= BP_WATCHPOINT_HIT;
1483 if (!env->watchpoint_hit) {
1484 env->watchpoint_hit = wp;
5a316526 1485 tb_check_watchpoint(env);
6e140f28
AL
1486 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1487 env->exception_index = EXCP_DEBUG;
488d6577 1488 cpu_loop_exit(env);
6e140f28
AL
1489 } else {
1490 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1491 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1492 cpu_resume_from_signal(env, NULL);
6e140f28 1493 }
06d55cc1 1494 }
6e140f28
AL
1495 } else {
1496 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1497 }
1498 }
1499}
1500
6658ffb8
PB
1501/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1502 so these check for a hit then pass through to the normal out-of-line
1503 phys routines. */
a8170e5e 1504static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1505 unsigned size)
6658ffb8 1506{
1ec9b909
AK
1507 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1508 switch (size) {
1509 case 1: return ldub_phys(addr);
1510 case 2: return lduw_phys(addr);
1511 case 4: return ldl_phys(addr);
1512 default: abort();
1513 }
6658ffb8
PB
1514}
1515
a8170e5e 1516static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1517 uint64_t val, unsigned size)
6658ffb8 1518{
1ec9b909
AK
1519 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1520 switch (size) {
67364150
MF
1521 case 1:
1522 stb_phys(addr, val);
1523 break;
1524 case 2:
1525 stw_phys(addr, val);
1526 break;
1527 case 4:
1528 stl_phys(addr, val);
1529 break;
1ec9b909
AK
1530 default: abort();
1531 }
6658ffb8
PB
1532}
1533
1ec9b909
AK
1534static const MemoryRegionOps watch_mem_ops = {
1535 .read = watch_mem_read,
1536 .write = watch_mem_write,
1537 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1538};
6658ffb8 1539
a8170e5e 1540static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1541 unsigned len)
db7b5426 1542{
70c68e44 1543 subpage_t *mmio = opaque;
f6405247 1544 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1545 MemoryRegionSection *section;
db7b5426
BS
1546#if defined(DEBUG_SUBPAGE)
1547 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1548 mmio, len, addr, idx);
1549#endif
db7b5426 1550
5312bd8b
AK
1551 section = &phys_sections[mmio->sub_section[idx]];
1552 addr += mmio->base;
1553 addr -= section->offset_within_address_space;
1554 addr += section->offset_within_region;
37ec01d4 1555 return io_mem_read(section->mr, addr, len);
db7b5426
BS
1556}
1557
a8170e5e 1558static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1559 uint64_t value, unsigned len)
db7b5426 1560{
70c68e44 1561 subpage_t *mmio = opaque;
f6405247 1562 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1563 MemoryRegionSection *section;
db7b5426 1564#if defined(DEBUG_SUBPAGE)
70c68e44
AK
1565 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1566 " idx %d value %"PRIx64"\n",
f6405247 1567 __func__, mmio, len, addr, idx, value);
db7b5426 1568#endif
f6405247 1569
5312bd8b
AK
1570 section = &phys_sections[mmio->sub_section[idx]];
1571 addr += mmio->base;
1572 addr -= section->offset_within_address_space;
1573 addr += section->offset_within_region;
37ec01d4 1574 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
1575}
1576
70c68e44
AK
1577static const MemoryRegionOps subpage_ops = {
1578 .read = subpage_read,
1579 .write = subpage_write,
1580 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1581};
1582
a8170e5e 1583static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
de712f94 1584 unsigned size)
56384e8b
AF
1585{
1586 ram_addr_t raddr = addr;
1587 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1588 switch (size) {
1589 case 1: return ldub_p(ptr);
1590 case 2: return lduw_p(ptr);
1591 case 4: return ldl_p(ptr);
1592 default: abort();
1593 }
56384e8b
AF
1594}
1595
a8170e5e 1596static void subpage_ram_write(void *opaque, hwaddr addr,
de712f94 1597 uint64_t value, unsigned size)
56384e8b
AF
1598{
1599 ram_addr_t raddr = addr;
1600 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1601 switch (size) {
1602 case 1: return stb_p(ptr, value);
1603 case 2: return stw_p(ptr, value);
1604 case 4: return stl_p(ptr, value);
1605 default: abort();
1606 }
56384e8b
AF
1607}
1608
de712f94
AK
1609static const MemoryRegionOps subpage_ram_ops = {
1610 .read = subpage_ram_read,
1611 .write = subpage_ram_write,
1612 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
1613};
1614
c227f099 1615static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1616 uint16_t section)
db7b5426
BS
1617{
1618 int idx, eidx;
1619
1620 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1621 return -1;
1622 idx = SUBPAGE_IDX(start);
1623 eidx = SUBPAGE_IDX(end);
1624#if defined(DEBUG_SUBPAGE)
0bf9e31a 1625 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1626 mmio, start, end, idx, eidx, memory);
1627#endif
5312bd8b
AK
1628 if (memory_region_is_ram(phys_sections[section].mr)) {
1629 MemoryRegionSection new_section = phys_sections[section];
1630 new_section.mr = &io_mem_subpage_ram;
1631 section = phys_section_add(&new_section);
56384e8b 1632 }
db7b5426 1633 for (; idx <= eidx; idx++) {
5312bd8b 1634 mmio->sub_section[idx] = section;
db7b5426
BS
1635 }
1636
1637 return 0;
1638}
1639
a8170e5e 1640static subpage_t *subpage_init(hwaddr base)
db7b5426 1641{
c227f099 1642 subpage_t *mmio;
db7b5426 1643
7267c094 1644 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
1645
1646 mmio->base = base;
70c68e44
AK
1647 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1648 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1649 mmio->iomem.subpage = true;
db7b5426 1650#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1651 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1652 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1653#endif
0f0cb164 1654 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
1655
1656 return mmio;
1657}
1658
5312bd8b
AK
1659static uint16_t dummy_section(MemoryRegion *mr)
1660{
1661 MemoryRegionSection section = {
1662 .mr = mr,
1663 .offset_within_address_space = 0,
1664 .offset_within_region = 0,
1665 .size = UINT64_MAX,
1666 };
1667
1668 return phys_section_add(&section);
1669}
1670
a8170e5e 1671MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1672{
37ec01d4 1673 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1674}
1675
e9179ce1
AK
1676static void io_mem_init(void)
1677{
0e0df1e2 1678 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
1679 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1680 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1681 "unassigned", UINT64_MAX);
1682 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1683 "notdirty", UINT64_MAX);
de712f94
AK
1684 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1685 "subpage-ram", UINT64_MAX);
1ec9b909
AK
1686 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1687 "watch", UINT64_MAX);
e9179ce1
AK
1688}
1689
ac1970fb
AK
1690static void mem_begin(MemoryListener *listener)
1691{
1692 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1693
1694 destroy_all_mappings(d);
1695 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1696}
1697
50c1e149
AK
1698static void core_begin(MemoryListener *listener)
1699{
5312bd8b
AK
1700 phys_sections_clear();
1701 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
1702 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1703 phys_section_rom = dummy_section(&io_mem_rom);
1704 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
1705}
1706
1d71148e 1707static void tcg_commit(MemoryListener *listener)
50c1e149 1708{
9349b4f9 1709 CPUArchState *env;
117712c3
AK
1710
1711 /* since each CPU stores ram addresses in its TLB cache, we must
1712 reset the modified entries */
1713 /* XXX: slow ! */
1714 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1715 tlb_flush(env, 1);
1716 }
50c1e149
AK
1717}
1718
93632747
AK
1719static void core_log_global_start(MemoryListener *listener)
1720{
1721 cpu_physical_memory_set_dirty_tracking(1);
1722}
1723
1724static void core_log_global_stop(MemoryListener *listener)
1725{
1726 cpu_physical_memory_set_dirty_tracking(0);
1727}
1728
4855d41a
AK
1729static void io_region_add(MemoryListener *listener,
1730 MemoryRegionSection *section)
1731{
a2d33521
AK
1732 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1733
1734 mrio->mr = section->mr;
1735 mrio->offset = section->offset_within_region;
1736 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 1737 section->offset_within_address_space, section->size);
a2d33521 1738 ioport_register(&mrio->iorange);
4855d41a
AK
1739}
1740
1741static void io_region_del(MemoryListener *listener,
1742 MemoryRegionSection *section)
1743{
1744 isa_unassign_ioport(section->offset_within_address_space, section->size);
1745}
1746
93632747 1747static MemoryListener core_memory_listener = {
50c1e149 1748 .begin = core_begin,
93632747
AK
1749 .log_global_start = core_log_global_start,
1750 .log_global_stop = core_log_global_stop,
ac1970fb 1751 .priority = 1,
93632747
AK
1752};
1753
4855d41a
AK
1754static MemoryListener io_memory_listener = {
1755 .region_add = io_region_add,
1756 .region_del = io_region_del,
4855d41a
AK
1757 .priority = 0,
1758};
1759
1d71148e
AK
1760static MemoryListener tcg_memory_listener = {
1761 .commit = tcg_commit,
1762};
1763
ac1970fb
AK
1764void address_space_init_dispatch(AddressSpace *as)
1765{
1766 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1767
1768 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1769 d->listener = (MemoryListener) {
1770 .begin = mem_begin,
1771 .region_add = mem_add,
1772 .region_nop = mem_add,
1773 .priority = 0,
1774 };
1775 as->dispatch = d;
1776 memory_listener_register(&d->listener, as);
1777}
1778
83f3c251
AK
1779void address_space_destroy_dispatch(AddressSpace *as)
1780{
1781 AddressSpaceDispatch *d = as->dispatch;
1782
1783 memory_listener_unregister(&d->listener);
1784 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1785 g_free(d);
1786 as->dispatch = NULL;
1787}
1788
62152b8a
AK
1789static void memory_map_init(void)
1790{
7267c094 1791 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 1792 memory_region_init(system_memory, "system", INT64_MAX);
2673a5da
AK
1793 address_space_init(&address_space_memory, system_memory);
1794 address_space_memory.name = "memory";
309cb471 1795
7267c094 1796 system_io = g_malloc(sizeof(*system_io));
309cb471 1797 memory_region_init(system_io, "io", 65536);
2673a5da
AK
1798 address_space_init(&address_space_io, system_io);
1799 address_space_io.name = "I/O";
93632747 1800
f6790af6
AK
1801 memory_listener_register(&core_memory_listener, &address_space_memory);
1802 memory_listener_register(&io_memory_listener, &address_space_io);
1803 memory_listener_register(&tcg_memory_listener, &address_space_memory);
9e11908f
PM
1804
1805 dma_context_init(&dma_context_memory, &address_space_memory,
1806 NULL, NULL, NULL);
62152b8a
AK
1807}
1808
1809MemoryRegion *get_system_memory(void)
1810{
1811 return system_memory;
1812}
1813
309cb471
AK
1814MemoryRegion *get_system_io(void)
1815{
1816 return system_io;
1817}
1818
e2eef170
PB
1819#endif /* !defined(CONFIG_USER_ONLY) */
1820
13eb76e0
FB
1821/* physical memory access (slow version, mainly for debug) */
1822#if defined(CONFIG_USER_ONLY)
9349b4f9 1823int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1824 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1825{
1826 int l, flags;
1827 target_ulong page;
53a5960a 1828 void * p;
13eb76e0
FB
1829
1830 while (len > 0) {
1831 page = addr & TARGET_PAGE_MASK;
1832 l = (page + TARGET_PAGE_SIZE) - addr;
1833 if (l > len)
1834 l = len;
1835 flags = page_get_flags(page);
1836 if (!(flags & PAGE_VALID))
a68fe89c 1837 return -1;
13eb76e0
FB
1838 if (is_write) {
1839 if (!(flags & PAGE_WRITE))
a68fe89c 1840 return -1;
579a97f7 1841 /* XXX: this code should not depend on lock_user */
72fb7daa 1842 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1843 return -1;
72fb7daa
AJ
1844 memcpy(p, buf, l);
1845 unlock_user(p, addr, l);
13eb76e0
FB
1846 } else {
1847 if (!(flags & PAGE_READ))
a68fe89c 1848 return -1;
579a97f7 1849 /* XXX: this code should not depend on lock_user */
72fb7daa 1850 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1851 return -1;
72fb7daa 1852 memcpy(buf, p, l);
5b257578 1853 unlock_user(p, addr, 0);
13eb76e0
FB
1854 }
1855 len -= l;
1856 buf += l;
1857 addr += l;
1858 }
a68fe89c 1859 return 0;
13eb76e0 1860}
8df1cd07 1861
13eb76e0 1862#else
51d7a9eb 1863
a8170e5e
AK
1864static void invalidate_and_set_dirty(hwaddr addr,
1865 hwaddr length)
51d7a9eb
AP
1866{
1867 if (!cpu_physical_memory_is_dirty(addr)) {
1868 /* invalidate code */
1869 tb_invalidate_phys_page_range(addr, addr + length, 0);
1870 /* set dirty bit */
1871 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1872 }
e226939d 1873 xen_modified_memory(addr, length);
51d7a9eb
AP
1874}
1875
a8170e5e 1876void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1877 int len, bool is_write)
13eb76e0 1878{
ac1970fb 1879 AddressSpaceDispatch *d = as->dispatch;
37ec01d4 1880 int l;
13eb76e0
FB
1881 uint8_t *ptr;
1882 uint32_t val;
a8170e5e 1883 hwaddr page;
f3705d53 1884 MemoryRegionSection *section;
3b46e624 1885
13eb76e0
FB
1886 while (len > 0) {
1887 page = addr & TARGET_PAGE_MASK;
1888 l = (page + TARGET_PAGE_SIZE) - addr;
1889 if (l > len)
1890 l = len;
ac1970fb 1891 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1892
13eb76e0 1893 if (is_write) {
f3705d53 1894 if (!memory_region_is_ram(section->mr)) {
a8170e5e 1895 hwaddr addr1;
cc5bea60 1896 addr1 = memory_region_section_addr(section, addr);
6a00d601
FB
1897 /* XXX: could force cpu_single_env to NULL to avoid
1898 potential bugs */
6c2934db 1899 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 1900 /* 32 bit write access */
c27004ec 1901 val = ldl_p(buf);
37ec01d4 1902 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 1903 l = 4;
6c2934db 1904 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 1905 /* 16 bit write access */
c27004ec 1906 val = lduw_p(buf);
37ec01d4 1907 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
1908 l = 2;
1909 } else {
1c213d19 1910 /* 8 bit write access */
c27004ec 1911 val = ldub_p(buf);
37ec01d4 1912 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
1913 l = 1;
1914 }
f3705d53 1915 } else if (!section->readonly) {
8ca5692d 1916 ram_addr_t addr1;
f3705d53 1917 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 1918 + memory_region_section_addr(section, addr);
13eb76e0 1919 /* RAM case */
5579c7f3 1920 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1921 memcpy(ptr, buf, l);
51d7a9eb 1922 invalidate_and_set_dirty(addr1, l);
050a0ddf 1923 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1924 }
1925 } else {
cc5bea60
BS
1926 if (!(memory_region_is_ram(section->mr) ||
1927 memory_region_is_romd(section->mr))) {
a8170e5e 1928 hwaddr addr1;
13eb76e0 1929 /* I/O case */
cc5bea60 1930 addr1 = memory_region_section_addr(section, addr);
6c2934db 1931 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 1932 /* 32 bit read access */
37ec01d4 1933 val = io_mem_read(section->mr, addr1, 4);
c27004ec 1934 stl_p(buf, val);
13eb76e0 1935 l = 4;
6c2934db 1936 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 1937 /* 16 bit read access */
37ec01d4 1938 val = io_mem_read(section->mr, addr1, 2);
c27004ec 1939 stw_p(buf, val);
13eb76e0
FB
1940 l = 2;
1941 } else {
1c213d19 1942 /* 8 bit read access */
37ec01d4 1943 val = io_mem_read(section->mr, addr1, 1);
c27004ec 1944 stb_p(buf, val);
13eb76e0
FB
1945 l = 1;
1946 }
1947 } else {
1948 /* RAM case */
0a1b357f 1949 ptr = qemu_get_ram_ptr(section->mr->ram_addr
cc5bea60
BS
1950 + memory_region_section_addr(section,
1951 addr));
f3705d53 1952 memcpy(buf, ptr, l);
050a0ddf 1953 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1954 }
1955 }
1956 len -= l;
1957 buf += l;
1958 addr += l;
1959 }
1960}
8df1cd07 1961
a8170e5e 1962void address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1963 const uint8_t *buf, int len)
1964{
1965 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1966}
1967
1968/**
1969 * address_space_read: read from an address space.
1970 *
1971 * @as: #AddressSpace to be accessed
1972 * @addr: address within that address space
1973 * @buf: buffer with the data transferred
1974 */
a8170e5e 1975void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb
AK
1976{
1977 address_space_rw(as, addr, buf, len, false);
1978}
1979
1980
a8170e5e 1981void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1982 int len, int is_write)
1983{
1984 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1985}
1986
d0ecd2aa 1987/* used for ROM loading : can write in RAM and ROM */
a8170e5e 1988void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
1989 const uint8_t *buf, int len)
1990{
ac1970fb 1991 AddressSpaceDispatch *d = address_space_memory.dispatch;
d0ecd2aa
FB
1992 int l;
1993 uint8_t *ptr;
a8170e5e 1994 hwaddr page;
f3705d53 1995 MemoryRegionSection *section;
3b46e624 1996
d0ecd2aa
FB
1997 while (len > 0) {
1998 page = addr & TARGET_PAGE_MASK;
1999 l = (page + TARGET_PAGE_SIZE) - addr;
2000 if (l > len)
2001 l = len;
ac1970fb 2002 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 2003
cc5bea60
BS
2004 if (!(memory_region_is_ram(section->mr) ||
2005 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
2006 /* do nothing */
2007 } else {
2008 unsigned long addr1;
f3705d53 2009 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 2010 + memory_region_section_addr(section, addr);
d0ecd2aa 2011 /* ROM/RAM case */
5579c7f3 2012 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2013 memcpy(ptr, buf, l);
51d7a9eb 2014 invalidate_and_set_dirty(addr1, l);
050a0ddf 2015 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
2016 }
2017 len -= l;
2018 buf += l;
2019 addr += l;
2020 }
2021}
2022
6d16c2f8
AL
2023typedef struct {
2024 void *buffer;
a8170e5e
AK
2025 hwaddr addr;
2026 hwaddr len;
6d16c2f8
AL
2027} BounceBuffer;
2028
2029static BounceBuffer bounce;
2030
ba223c29
AL
2031typedef struct MapClient {
2032 void *opaque;
2033 void (*callback)(void *opaque);
72cf2d4f 2034 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2035} MapClient;
2036
72cf2d4f
BS
2037static QLIST_HEAD(map_client_list, MapClient) map_client_list
2038 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2039
2040void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2041{
7267c094 2042 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2043
2044 client->opaque = opaque;
2045 client->callback = callback;
72cf2d4f 2046 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2047 return client;
2048}
2049
8b9c99d9 2050static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2051{
2052 MapClient *client = (MapClient *)_client;
2053
72cf2d4f 2054 QLIST_REMOVE(client, link);
7267c094 2055 g_free(client);
ba223c29
AL
2056}
2057
2058static void cpu_notify_map_clients(void)
2059{
2060 MapClient *client;
2061
72cf2d4f
BS
2062 while (!QLIST_EMPTY(&map_client_list)) {
2063 client = QLIST_FIRST(&map_client_list);
ba223c29 2064 client->callback(client->opaque);
34d5e948 2065 cpu_unregister_map_client(client);
ba223c29
AL
2066 }
2067}
2068
6d16c2f8
AL
2069/* Map a physical memory region into a host virtual address.
2070 * May map a subset of the requested range, given by and returned in *plen.
2071 * May return NULL if resources needed to perform the mapping are exhausted.
2072 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2073 * Use cpu_register_map_client() to know when retrying the map operation is
2074 * likely to succeed.
6d16c2f8 2075 */
ac1970fb 2076void *address_space_map(AddressSpace *as,
a8170e5e
AK
2077 hwaddr addr,
2078 hwaddr *plen,
ac1970fb 2079 bool is_write)
6d16c2f8 2080{
ac1970fb 2081 AddressSpaceDispatch *d = as->dispatch;
a8170e5e
AK
2082 hwaddr len = *plen;
2083 hwaddr todo = 0;
6d16c2f8 2084 int l;
a8170e5e 2085 hwaddr page;
f3705d53 2086 MemoryRegionSection *section;
f15fbc4b 2087 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
2088 ram_addr_t rlen;
2089 void *ret;
6d16c2f8
AL
2090
2091 while (len > 0) {
2092 page = addr & TARGET_PAGE_MASK;
2093 l = (page + TARGET_PAGE_SIZE) - addr;
2094 if (l > len)
2095 l = len;
ac1970fb 2096 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
6d16c2f8 2097
f3705d53 2098 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 2099 if (todo || bounce.buffer) {
6d16c2f8
AL
2100 break;
2101 }
2102 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2103 bounce.addr = addr;
2104 bounce.len = l;
2105 if (!is_write) {
ac1970fb 2106 address_space_read(as, addr, bounce.buffer, l);
6d16c2f8 2107 }
38bee5dc
SS
2108
2109 *plen = l;
2110 return bounce.buffer;
6d16c2f8 2111 }
8ab934f9 2112 if (!todo) {
f3705d53 2113 raddr = memory_region_get_ram_addr(section->mr)
cc5bea60 2114 + memory_region_section_addr(section, addr);
8ab934f9 2115 }
6d16c2f8
AL
2116
2117 len -= l;
2118 addr += l;
38bee5dc 2119 todo += l;
6d16c2f8 2120 }
8ab934f9
SS
2121 rlen = todo;
2122 ret = qemu_ram_ptr_length(raddr, &rlen);
2123 *plen = rlen;
2124 return ret;
6d16c2f8
AL
2125}
2126
ac1970fb 2127/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2128 * Will also mark the memory as dirty if is_write == 1. access_len gives
2129 * the amount of memory that was actually read or written by the caller.
2130 */
a8170e5e
AK
2131void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2132 int is_write, hwaddr access_len)
6d16c2f8
AL
2133{
2134 if (buffer != bounce.buffer) {
2135 if (is_write) {
e890261f 2136 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
2137 while (access_len) {
2138 unsigned l;
2139 l = TARGET_PAGE_SIZE;
2140 if (l > access_len)
2141 l = access_len;
51d7a9eb 2142 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2143 addr1 += l;
2144 access_len -= l;
2145 }
2146 }
868bb33f 2147 if (xen_enabled()) {
e41d7c69 2148 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2149 }
6d16c2f8
AL
2150 return;
2151 }
2152 if (is_write) {
ac1970fb 2153 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2154 }
f8a83245 2155 qemu_vfree(bounce.buffer);
6d16c2f8 2156 bounce.buffer = NULL;
ba223c29 2157 cpu_notify_map_clients();
6d16c2f8 2158}
d0ecd2aa 2159
a8170e5e
AK
2160void *cpu_physical_memory_map(hwaddr addr,
2161 hwaddr *plen,
ac1970fb
AK
2162 int is_write)
2163{
2164 return address_space_map(&address_space_memory, addr, plen, is_write);
2165}
2166
a8170e5e
AK
2167void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2168 int is_write, hwaddr access_len)
ac1970fb
AK
2169{
2170 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2171}
2172
8df1cd07 2173/* warning: addr must be aligned */
a8170e5e 2174static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2175 enum device_endian endian)
8df1cd07 2176{
8df1cd07
FB
2177 uint8_t *ptr;
2178 uint32_t val;
f3705d53 2179 MemoryRegionSection *section;
8df1cd07 2180
ac1970fb 2181 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2182
cc5bea60
BS
2183 if (!(memory_region_is_ram(section->mr) ||
2184 memory_region_is_romd(section->mr))) {
8df1cd07 2185 /* I/O case */
cc5bea60 2186 addr = memory_region_section_addr(section, addr);
37ec01d4 2187 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
2188#if defined(TARGET_WORDS_BIGENDIAN)
2189 if (endian == DEVICE_LITTLE_ENDIAN) {
2190 val = bswap32(val);
2191 }
2192#else
2193 if (endian == DEVICE_BIG_ENDIAN) {
2194 val = bswap32(val);
2195 }
2196#endif
8df1cd07
FB
2197 } else {
2198 /* RAM case */
f3705d53 2199 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2200 & TARGET_PAGE_MASK)
cc5bea60 2201 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2202 switch (endian) {
2203 case DEVICE_LITTLE_ENDIAN:
2204 val = ldl_le_p(ptr);
2205 break;
2206 case DEVICE_BIG_ENDIAN:
2207 val = ldl_be_p(ptr);
2208 break;
2209 default:
2210 val = ldl_p(ptr);
2211 break;
2212 }
8df1cd07
FB
2213 }
2214 return val;
2215}
2216
a8170e5e 2217uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2218{
2219 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2220}
2221
a8170e5e 2222uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2223{
2224 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2225}
2226
a8170e5e 2227uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2228{
2229 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2230}
2231
84b7b8e7 2232/* warning: addr must be aligned */
a8170e5e 2233static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2234 enum device_endian endian)
84b7b8e7 2235{
84b7b8e7
FB
2236 uint8_t *ptr;
2237 uint64_t val;
f3705d53 2238 MemoryRegionSection *section;
84b7b8e7 2239
ac1970fb 2240 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2241
cc5bea60
BS
2242 if (!(memory_region_is_ram(section->mr) ||
2243 memory_region_is_romd(section->mr))) {
84b7b8e7 2244 /* I/O case */
cc5bea60 2245 addr = memory_region_section_addr(section, addr);
1e78bcc1
AG
2246
2247 /* XXX This is broken when device endian != cpu endian.
2248 Fix and add "endian" variable check */
84b7b8e7 2249#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2250 val = io_mem_read(section->mr, addr, 4) << 32;
2251 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 2252#else
37ec01d4
AK
2253 val = io_mem_read(section->mr, addr, 4);
2254 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
2255#endif
2256 } else {
2257 /* RAM case */
f3705d53 2258 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2259 & TARGET_PAGE_MASK)
cc5bea60 2260 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2261 switch (endian) {
2262 case DEVICE_LITTLE_ENDIAN:
2263 val = ldq_le_p(ptr);
2264 break;
2265 case DEVICE_BIG_ENDIAN:
2266 val = ldq_be_p(ptr);
2267 break;
2268 default:
2269 val = ldq_p(ptr);
2270 break;
2271 }
84b7b8e7
FB
2272 }
2273 return val;
2274}
2275
a8170e5e 2276uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2277{
2278 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2279}
2280
a8170e5e 2281uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2282{
2283 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2284}
2285
a8170e5e 2286uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2287{
2288 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2289}
2290
aab33094 2291/* XXX: optimize */
a8170e5e 2292uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2293{
2294 uint8_t val;
2295 cpu_physical_memory_read(addr, &val, 1);
2296 return val;
2297}
2298
733f0b02 2299/* warning: addr must be aligned */
a8170e5e 2300static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2301 enum device_endian endian)
aab33094 2302{
733f0b02
MT
2303 uint8_t *ptr;
2304 uint64_t val;
f3705d53 2305 MemoryRegionSection *section;
733f0b02 2306
ac1970fb 2307 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2308
cc5bea60
BS
2309 if (!(memory_region_is_ram(section->mr) ||
2310 memory_region_is_romd(section->mr))) {
733f0b02 2311 /* I/O case */
cc5bea60 2312 addr = memory_region_section_addr(section, addr);
37ec01d4 2313 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
2314#if defined(TARGET_WORDS_BIGENDIAN)
2315 if (endian == DEVICE_LITTLE_ENDIAN) {
2316 val = bswap16(val);
2317 }
2318#else
2319 if (endian == DEVICE_BIG_ENDIAN) {
2320 val = bswap16(val);
2321 }
2322#endif
733f0b02
MT
2323 } else {
2324 /* RAM case */
f3705d53 2325 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2326 & TARGET_PAGE_MASK)
cc5bea60 2327 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2328 switch (endian) {
2329 case DEVICE_LITTLE_ENDIAN:
2330 val = lduw_le_p(ptr);
2331 break;
2332 case DEVICE_BIG_ENDIAN:
2333 val = lduw_be_p(ptr);
2334 break;
2335 default:
2336 val = lduw_p(ptr);
2337 break;
2338 }
733f0b02
MT
2339 }
2340 return val;
aab33094
FB
2341}
2342
a8170e5e 2343uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2344{
2345 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2346}
2347
a8170e5e 2348uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2349{
2350 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2351}
2352
a8170e5e 2353uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2354{
2355 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2356}
2357
8df1cd07
FB
2358/* warning: addr must be aligned. The ram page is not masked as dirty
2359 and the code inside is not invalidated. It is useful if the dirty
2360 bits are used to track modified PTEs */
a8170e5e 2361void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2362{
8df1cd07 2363 uint8_t *ptr;
f3705d53 2364 MemoryRegionSection *section;
8df1cd07 2365
ac1970fb 2366 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2367
f3705d53 2368 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2369 addr = memory_region_section_addr(section, addr);
f3705d53 2370 if (memory_region_is_ram(section->mr)) {
37ec01d4 2371 section = &phys_sections[phys_section_rom];
06ef3525 2372 }
37ec01d4 2373 io_mem_write(section->mr, addr, val, 4);
8df1cd07 2374 } else {
f3705d53 2375 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 2376 & TARGET_PAGE_MASK)
cc5bea60 2377 + memory_region_section_addr(section, addr);
5579c7f3 2378 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2379 stl_p(ptr, val);
74576198
AL
2380
2381 if (unlikely(in_migration)) {
2382 if (!cpu_physical_memory_is_dirty(addr1)) {
2383 /* invalidate code */
2384 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2385 /* set dirty bit */
f7c11b53
YT
2386 cpu_physical_memory_set_dirty_flags(
2387 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2388 }
2389 }
8df1cd07
FB
2390 }
2391}
2392
a8170e5e 2393void stq_phys_notdirty(hwaddr addr, uint64_t val)
bc98a7ef 2394{
bc98a7ef 2395 uint8_t *ptr;
f3705d53 2396 MemoryRegionSection *section;
bc98a7ef 2397
ac1970fb 2398 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2399
f3705d53 2400 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2401 addr = memory_region_section_addr(section, addr);
f3705d53 2402 if (memory_region_is_ram(section->mr)) {
37ec01d4 2403 section = &phys_sections[phys_section_rom];
06ef3525 2404 }
bc98a7ef 2405#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2406 io_mem_write(section->mr, addr, val >> 32, 4);
2407 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 2408#else
37ec01d4
AK
2409 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2410 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
2411#endif
2412 } else {
f3705d53 2413 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2414 & TARGET_PAGE_MASK)
cc5bea60 2415 + memory_region_section_addr(section, addr));
bc98a7ef
JM
2416 stq_p(ptr, val);
2417 }
2418}
2419
8df1cd07 2420/* warning: addr must be aligned */
a8170e5e 2421static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2422 enum device_endian endian)
8df1cd07 2423{
8df1cd07 2424 uint8_t *ptr;
f3705d53 2425 MemoryRegionSection *section;
8df1cd07 2426
ac1970fb 2427 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2428
f3705d53 2429 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2430 addr = memory_region_section_addr(section, addr);
f3705d53 2431 if (memory_region_is_ram(section->mr)) {
37ec01d4 2432 section = &phys_sections[phys_section_rom];
06ef3525 2433 }
1e78bcc1
AG
2434#if defined(TARGET_WORDS_BIGENDIAN)
2435 if (endian == DEVICE_LITTLE_ENDIAN) {
2436 val = bswap32(val);
2437 }
2438#else
2439 if (endian == DEVICE_BIG_ENDIAN) {
2440 val = bswap32(val);
2441 }
2442#endif
37ec01d4 2443 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
2444 } else {
2445 unsigned long addr1;
f3705d53 2446 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2447 + memory_region_section_addr(section, addr);
8df1cd07 2448 /* RAM case */
5579c7f3 2449 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2450 switch (endian) {
2451 case DEVICE_LITTLE_ENDIAN:
2452 stl_le_p(ptr, val);
2453 break;
2454 case DEVICE_BIG_ENDIAN:
2455 stl_be_p(ptr, val);
2456 break;
2457 default:
2458 stl_p(ptr, val);
2459 break;
2460 }
51d7a9eb 2461 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2462 }
2463}
2464
a8170e5e 2465void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2466{
2467 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2468}
2469
a8170e5e 2470void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2471{
2472 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2473}
2474
a8170e5e 2475void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2476{
2477 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2478}
2479
aab33094 2480/* XXX: optimize */
a8170e5e 2481void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2482{
2483 uint8_t v = val;
2484 cpu_physical_memory_write(addr, &v, 1);
2485}
2486
733f0b02 2487/* warning: addr must be aligned */
a8170e5e 2488static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2489 enum device_endian endian)
aab33094 2490{
733f0b02 2491 uint8_t *ptr;
f3705d53 2492 MemoryRegionSection *section;
733f0b02 2493
ac1970fb 2494 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2495
f3705d53 2496 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2497 addr = memory_region_section_addr(section, addr);
f3705d53 2498 if (memory_region_is_ram(section->mr)) {
37ec01d4 2499 section = &phys_sections[phys_section_rom];
06ef3525 2500 }
1e78bcc1
AG
2501#if defined(TARGET_WORDS_BIGENDIAN)
2502 if (endian == DEVICE_LITTLE_ENDIAN) {
2503 val = bswap16(val);
2504 }
2505#else
2506 if (endian == DEVICE_BIG_ENDIAN) {
2507 val = bswap16(val);
2508 }
2509#endif
37ec01d4 2510 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
2511 } else {
2512 unsigned long addr1;
f3705d53 2513 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2514 + memory_region_section_addr(section, addr);
733f0b02
MT
2515 /* RAM case */
2516 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2517 switch (endian) {
2518 case DEVICE_LITTLE_ENDIAN:
2519 stw_le_p(ptr, val);
2520 break;
2521 case DEVICE_BIG_ENDIAN:
2522 stw_be_p(ptr, val);
2523 break;
2524 default:
2525 stw_p(ptr, val);
2526 break;
2527 }
51d7a9eb 2528 invalidate_and_set_dirty(addr1, 2);
733f0b02 2529 }
aab33094
FB
2530}
2531
a8170e5e 2532void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2533{
2534 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2535}
2536
a8170e5e 2537void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2538{
2539 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2540}
2541
a8170e5e 2542void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2543{
2544 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2545}
2546
aab33094 2547/* XXX: optimize */
a8170e5e 2548void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2549{
2550 val = tswap64(val);
71d2b725 2551 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2552}
2553
a8170e5e 2554void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2555{
2556 val = cpu_to_le64(val);
2557 cpu_physical_memory_write(addr, &val, 8);
2558}
2559
a8170e5e 2560void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2561{
2562 val = cpu_to_be64(val);
2563 cpu_physical_memory_write(addr, &val, 8);
2564}
2565
5e2972fd 2566/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2567int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2568 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2569{
2570 int l;
a8170e5e 2571 hwaddr phys_addr;
9b3c35e0 2572 target_ulong page;
13eb76e0
FB
2573
2574 while (len > 0) {
2575 page = addr & TARGET_PAGE_MASK;
2576 phys_addr = cpu_get_phys_page_debug(env, page);
2577 /* if no physical page mapped, return an error */
2578 if (phys_addr == -1)
2579 return -1;
2580 l = (page + TARGET_PAGE_SIZE) - addr;
2581 if (l > len)
2582 l = len;
5e2972fd 2583 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2584 if (is_write)
2585 cpu_physical_memory_write_rom(phys_addr, buf, l);
2586 else
5e2972fd 2587 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2588 len -= l;
2589 buf += l;
2590 addr += l;
2591 }
2592 return 0;
2593}
a68fe89c 2594#endif
13eb76e0 2595
8e4a424b
BS
2596#if !defined(CONFIG_USER_ONLY)
2597
2598/*
2599 * A helper function for the _utterly broken_ virtio device model to find out if
2600 * it's running on a big endian machine. Don't do this at home kids!
2601 */
2602bool virtio_is_big_endian(void);
2603bool virtio_is_big_endian(void)
2604{
2605#if defined(TARGET_WORDS_BIGENDIAN)
2606 return true;
2607#else
2608 return false;
2609#endif
2610}
2611
2612#endif
2613
76f35538 2614#ifndef CONFIG_USER_ONLY
a8170e5e 2615bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538
WC
2616{
2617 MemoryRegionSection *section;
2618
ac1970fb
AK
2619 section = phys_page_find(address_space_memory.dispatch,
2620 phys_addr >> TARGET_PAGE_BITS);
76f35538
WC
2621
2622 return !(memory_region_is_ram(section->mr) ||
2623 memory_region_is_romd(section->mr));
2624}
2625#endif