]> git.proxmox.com Git - qemu.git/blame - exec.c
cpu: Move halted and interrupt_request fields to CPUState
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
432d268c 34#include "hw/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
67d3b957 53//#define DEBUG_UNASSIGNED
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
e2eef170 56#if !defined(CONFIG_USER_ONLY)
9fa3e853 57int phys_ram_fd;
74576198 58static int in_migration;
94a6b54f 59
a3161038 60RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
61
62static MemoryRegion *system_memory;
309cb471 63static MemoryRegion *system_io;
62152b8a 64
f6790af6
AK
65AddressSpace address_space_io;
66AddressSpace address_space_memory;
9e11908f 67DMAContext dma_context_memory;
2673a5da 68
0e0df1e2 69MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 70static MemoryRegion io_mem_subpage_ram;
0e0df1e2 71
e2eef170 72#endif
9fa3e853 73
9349b4f9 74CPUArchState *first_cpu;
6a00d601
FB
75/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
9349b4f9 77DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 78/* 0 = Do not count executed instructions.
bf20dc07 79 1 = Precise instruction counting.
2e70f6ef 80 2 = Adaptive rate instruction counting. */
5708fc66 81int use_icount;
6a00d601 82
e2eef170 83#if !defined(CONFIG_USER_ONLY)
4346ae3e 84
5312bd8b
AK
85static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
aa102231
AK
88static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
5312bd8b 91
d6f2ea22
AK
92/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
07f07b31 96#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 97
e2eef170 98static void io_mem_init(void);
62152b8a 99static void memory_map_init(void);
8b9c99d9 100static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 101
1ec9b909 102static MemoryRegion io_mem_watch;
6658ffb8 103#endif
fd6ce8f6 104
6d9a1304 105#if !defined(CONFIG_USER_ONLY)
d6f2ea22 106
f7bf5461 107static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 108{
f7bf5461 109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
d6f2ea22
AK
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
f7bf5461
AK
117}
118
119static uint16_t phys_map_node_alloc(void)
120{
121 unsigned i;
122 uint16_t ret;
123
124 ret = phys_map_nodes_nb++;
125 assert(ret != PHYS_MAP_NODE_NIL);
126 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 127 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 128 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 130 }
f7bf5461 131 return ret;
d6f2ea22
AK
132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
92e873b9 139
a8170e5e
AK
140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
2999097b 142 int level)
f7bf5461
AK
143{
144 PhysPageEntry *p;
145 int i;
a8170e5e 146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 147
07f07b31 148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
07f07b31 153 p[i].is_leaf = 1;
c19e8800 154 p[i].ptr = phys_section_unassigned;
4346ae3e 155 }
67c4d23c 156 }
f7bf5461 157 } else {
c19e8800 158 p = phys_map_nodes[lp->ptr];
92e873b9 159 }
2999097b 160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 161
2999097b 162 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
c19e8800 165 lp->ptr = leaf;
07f07b31
AK
166 *index += step;
167 *nb -= step;
2999097b
AK
168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
f7bf5461
AK
172 }
173}
174
ac1970fb 175static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 176 hwaddr index, hwaddr nb,
2999097b 177 uint16_t leaf)
f7bf5461 178{
2999097b 179 /* Wildly overreserve - it doesn't matter much. */
07f07b31 180 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 181
ac1970fb 182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
183}
184
a8170e5e 185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
92e873b9 186{
ac1970fb 187 PhysPageEntry lp = d->phys_map;
31ab2b4a
AK
188 PhysPageEntry *p;
189 int i;
31ab2b4a 190 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 191
07f07b31 192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
194 goto not_found;
195 }
c19e8800 196 p = phys_map_nodes[lp.ptr];
31ab2b4a 197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 198 }
31ab2b4a 199
c19e8800 200 s_index = lp.ptr;
31ab2b4a 201not_found:
f3705d53
AK
202 return &phys_sections[s_index];
203}
204
e5548617
BS
205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 209 && mr != &io_mem_watch;
fd6ce8f6 210}
5b6dd868 211#endif
fd6ce8f6 212
5b6dd868 213void cpu_exec_init_all(void)
fdbb84d1 214{
5b6dd868 215#if !defined(CONFIG_USER_ONLY)
b2a8658e 216 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
217 memory_map_init();
218 io_mem_init();
fdbb84d1 219#endif
5b6dd868 220}
fdbb84d1 221
b170fce3 222#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
223
224static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 225{
259186a7 226 CPUState *cpu = opaque;
a513fe19 227
5b6dd868
BS
228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
259186a7
AF
230 cpu->interrupt_request &= ~0x01;
231 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
232
233 return 0;
a513fe19 234}
7501267e 235
5b6dd868
BS
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
259186a7
AF
243 VMSTATE_UINT32(halted, CPUState),
244 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
245 VMSTATE_END_OF_LIST()
246 }
247};
b170fce3
AF
248#else
249#define vmstate_cpu_common vmstate_dummy
5b6dd868 250#endif
ea041c0e 251
38d8f5c8 252CPUState *qemu_get_cpu(int index)
ea041c0e 253{
5b6dd868 254 CPUArchState *env = first_cpu;
38d8f5c8 255 CPUState *cpu = NULL;
ea041c0e 256
5b6dd868 257 while (env) {
55e5c285
AF
258 cpu = ENV_GET_CPU(env);
259 if (cpu->cpu_index == index) {
5b6dd868 260 break;
55e5c285 261 }
5b6dd868 262 env = env->next_cpu;
ea041c0e 263 }
5b6dd868 264
d76fddae 265 return env ? cpu : NULL;
ea041c0e
FB
266}
267
5b6dd868 268void cpu_exec_init(CPUArchState *env)
ea041c0e 269{
5b6dd868 270 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 271 CPUClass *cc = CPU_GET_CLASS(cpu);
5b6dd868
BS
272 CPUArchState **penv;
273 int cpu_index;
274
275#if defined(CONFIG_USER_ONLY)
276 cpu_list_lock();
277#endif
278 env->next_cpu = NULL;
279 penv = &first_cpu;
280 cpu_index = 0;
281 while (*penv != NULL) {
282 penv = &(*penv)->next_cpu;
283 cpu_index++;
284 }
55e5c285 285 cpu->cpu_index = cpu_index;
1b1ed8dc 286 cpu->numa_node = 0;
5b6dd868
BS
287 QTAILQ_INIT(&env->breakpoints);
288 QTAILQ_INIT(&env->watchpoints);
289#ifndef CONFIG_USER_ONLY
290 cpu->thread_id = qemu_get_thread_id();
291#endif
292 *penv = env;
293#if defined(CONFIG_USER_ONLY)
294 cpu_list_unlock();
295#endif
259186a7 296 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
b170fce3 297#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
298 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
299 cpu_save, cpu_load, env);
b170fce3 300 assert(cc->vmsd == NULL);
5b6dd868 301#endif
b170fce3
AF
302 if (cc->vmsd != NULL) {
303 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
304 }
ea041c0e
FB
305}
306
1fddef4b 307#if defined(TARGET_HAS_ICE)
94df27fd 308#if defined(CONFIG_USER_ONLY)
9349b4f9 309static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
310{
311 tb_invalidate_phys_page_range(pc, pc + 1, 0);
312}
313#else
1e7855a5
MF
314static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
315{
9d70c4b7
MF
316 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
317 (pc & ~TARGET_PAGE_MASK));
1e7855a5 318}
c27004ec 319#endif
94df27fd 320#endif /* TARGET_HAS_ICE */
d720b93d 321
c527ee8f 322#if defined(CONFIG_USER_ONLY)
9349b4f9 323void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
324
325{
326}
327
9349b4f9 328int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
329 int flags, CPUWatchpoint **watchpoint)
330{
331 return -ENOSYS;
332}
333#else
6658ffb8 334/* Add a watchpoint. */
9349b4f9 335int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 336 int flags, CPUWatchpoint **watchpoint)
6658ffb8 337{
b4051334 338 target_ulong len_mask = ~(len - 1);
c0ce998e 339 CPUWatchpoint *wp;
6658ffb8 340
b4051334 341 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
342 if ((len & (len - 1)) || (addr & ~len_mask) ||
343 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
344 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
345 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
346 return -EINVAL;
347 }
7267c094 348 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
349
350 wp->vaddr = addr;
b4051334 351 wp->len_mask = len_mask;
a1d1bb31
AL
352 wp->flags = flags;
353
2dc9f411 354 /* keep all GDB-injected watchpoints in front */
c0ce998e 355 if (flags & BP_GDB)
72cf2d4f 356 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 357 else
72cf2d4f 358 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 359
6658ffb8 360 tlb_flush_page(env, addr);
a1d1bb31
AL
361
362 if (watchpoint)
363 *watchpoint = wp;
364 return 0;
6658ffb8
PB
365}
366
a1d1bb31 367/* Remove a specific watchpoint. */
9349b4f9 368int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 369 int flags)
6658ffb8 370{
b4051334 371 target_ulong len_mask = ~(len - 1);
a1d1bb31 372 CPUWatchpoint *wp;
6658ffb8 373
72cf2d4f 374 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 375 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 376 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 377 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
378 return 0;
379 }
380 }
a1d1bb31 381 return -ENOENT;
6658ffb8
PB
382}
383
a1d1bb31 384/* Remove a specific watchpoint by reference. */
9349b4f9 385void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 386{
72cf2d4f 387 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 388
a1d1bb31
AL
389 tlb_flush_page(env, watchpoint->vaddr);
390
7267c094 391 g_free(watchpoint);
a1d1bb31
AL
392}
393
394/* Remove all matching watchpoints. */
9349b4f9 395void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 396{
c0ce998e 397 CPUWatchpoint *wp, *next;
a1d1bb31 398
72cf2d4f 399 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
400 if (wp->flags & mask)
401 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 402 }
7d03f82f 403}
c527ee8f 404#endif
7d03f82f 405
a1d1bb31 406/* Add a breakpoint. */
9349b4f9 407int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 408 CPUBreakpoint **breakpoint)
4c3a88a2 409{
1fddef4b 410#if defined(TARGET_HAS_ICE)
c0ce998e 411 CPUBreakpoint *bp;
3b46e624 412
7267c094 413 bp = g_malloc(sizeof(*bp));
4c3a88a2 414
a1d1bb31
AL
415 bp->pc = pc;
416 bp->flags = flags;
417
2dc9f411 418 /* keep all GDB-injected breakpoints in front */
c0ce998e 419 if (flags & BP_GDB)
72cf2d4f 420 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 421 else
72cf2d4f 422 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 423
d720b93d 424 breakpoint_invalidate(env, pc);
a1d1bb31
AL
425
426 if (breakpoint)
427 *breakpoint = bp;
4c3a88a2
FB
428 return 0;
429#else
a1d1bb31 430 return -ENOSYS;
4c3a88a2
FB
431#endif
432}
433
a1d1bb31 434/* Remove a specific breakpoint. */
9349b4f9 435int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 436{
7d03f82f 437#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
438 CPUBreakpoint *bp;
439
72cf2d4f 440 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
441 if (bp->pc == pc && bp->flags == flags) {
442 cpu_breakpoint_remove_by_ref(env, bp);
443 return 0;
444 }
7d03f82f 445 }
a1d1bb31
AL
446 return -ENOENT;
447#else
448 return -ENOSYS;
7d03f82f
EI
449#endif
450}
451
a1d1bb31 452/* Remove a specific breakpoint by reference. */
9349b4f9 453void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 454{
1fddef4b 455#if defined(TARGET_HAS_ICE)
72cf2d4f 456 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 457
a1d1bb31
AL
458 breakpoint_invalidate(env, breakpoint->pc);
459
7267c094 460 g_free(breakpoint);
a1d1bb31
AL
461#endif
462}
463
464/* Remove all matching breakpoints. */
9349b4f9 465void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
466{
467#if defined(TARGET_HAS_ICE)
c0ce998e 468 CPUBreakpoint *bp, *next;
a1d1bb31 469
72cf2d4f 470 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
471 if (bp->flags & mask)
472 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 473 }
4c3a88a2
FB
474#endif
475}
476
c33a346e
FB
477/* enable or disable single step mode. EXCP_DEBUG is returned by the
478 CPU loop after each instruction */
9349b4f9 479void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 480{
1fddef4b 481#if defined(TARGET_HAS_ICE)
c33a346e
FB
482 if (env->singlestep_enabled != enabled) {
483 env->singlestep_enabled = enabled;
e22a25c9
AL
484 if (kvm_enabled())
485 kvm_update_guest_debug(env, 0);
486 else {
ccbb4d44 487 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
488 /* XXX: only flush what is necessary */
489 tb_flush(env);
490 }
c33a346e
FB
491 }
492#endif
493}
494
9349b4f9 495void cpu_reset_interrupt(CPUArchState *env, int mask)
b54ad049 496{
259186a7
AF
497 CPUState *cpu = ENV_GET_CPU(env);
498
499 cpu->interrupt_request &= ~mask;
b54ad049
FB
500}
501
9349b4f9 502void cpu_exit(CPUArchState *env)
3098dba0 503{
fcd7d003
AF
504 CPUState *cpu = ENV_GET_CPU(env);
505
506 cpu->exit_request = 1;
378df4b2 507 cpu->tcg_exit_req = 1;
3098dba0
AJ
508}
509
9349b4f9 510void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
511{
512 va_list ap;
493ae1f0 513 va_list ap2;
7501267e
FB
514
515 va_start(ap, fmt);
493ae1f0 516 va_copy(ap2, ap);
7501267e
FB
517 fprintf(stderr, "qemu: fatal: ");
518 vfprintf(stderr, fmt, ap);
519 fprintf(stderr, "\n");
6fd2a026 520 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
521 if (qemu_log_enabled()) {
522 qemu_log("qemu: fatal: ");
523 qemu_log_vprintf(fmt, ap2);
524 qemu_log("\n");
6fd2a026 525 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 526 qemu_log_flush();
93fcfe39 527 qemu_log_close();
924edcae 528 }
493ae1f0 529 va_end(ap2);
f9373291 530 va_end(ap);
fd052bf6
RV
531#if defined(CONFIG_USER_ONLY)
532 {
533 struct sigaction act;
534 sigfillset(&act.sa_mask);
535 act.sa_handler = SIG_DFL;
536 sigaction(SIGABRT, &act, NULL);
537 }
538#endif
7501267e
FB
539 abort();
540}
541
9349b4f9 542CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 543{
9349b4f9
AF
544 CPUArchState *new_env = cpu_init(env->cpu_model_str);
545 CPUArchState *next_cpu = new_env->next_cpu;
5a38f081
AL
546#if defined(TARGET_HAS_ICE)
547 CPUBreakpoint *bp;
548 CPUWatchpoint *wp;
549#endif
550
9349b4f9 551 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 552
55e5c285 553 /* Preserve chaining. */
c5be9f08 554 new_env->next_cpu = next_cpu;
5a38f081
AL
555
556 /* Clone all break/watchpoints.
557 Note: Once we support ptrace with hw-debug register access, make sure
558 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
559 QTAILQ_INIT(&env->breakpoints);
560 QTAILQ_INIT(&env->watchpoints);
5a38f081 561#if defined(TARGET_HAS_ICE)
72cf2d4f 562 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
563 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
564 }
72cf2d4f 565 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
566 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
567 wp->flags, NULL);
568 }
569#endif
570
c5be9f08
TS
571 return new_env;
572}
573
0124311e 574#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
575static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
576 uintptr_t length)
577{
578 uintptr_t start1;
579
580 /* we modify the TLB cache so that the dirty bit will be set again
581 when accessing the range */
582 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
583 /* Check that we don't span multiple blocks - this breaks the
584 address comparisons below. */
585 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
586 != (end - 1) - start) {
587 abort();
588 }
589 cpu_tlb_reset_dirty_all(start1, length);
590
591}
592
5579c7f3 593/* Note: start and end must be within the same ram block. */
c227f099 594void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 595 int dirty_flags)
1ccde1cb 596{
d24981d3 597 uintptr_t length;
1ccde1cb
FB
598
599 start &= TARGET_PAGE_MASK;
600 end = TARGET_PAGE_ALIGN(end);
601
602 length = end - start;
603 if (length == 0)
604 return;
f7c11b53 605 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 606
d24981d3
JQ
607 if (tcg_enabled()) {
608 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 609 }
1ccde1cb
FB
610}
611
8b9c99d9 612static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 613{
f6f3fbca 614 int ret = 0;
74576198 615 in_migration = enable;
f6f3fbca 616 return ret;
74576198
AL
617}
618
a8170e5e 619hwaddr memory_region_section_get_iotlb(CPUArchState *env,
e5548617
BS
620 MemoryRegionSection *section,
621 target_ulong vaddr,
a8170e5e 622 hwaddr paddr,
e5548617
BS
623 int prot,
624 target_ulong *address)
625{
a8170e5e 626 hwaddr iotlb;
e5548617
BS
627 CPUWatchpoint *wp;
628
cc5bea60 629 if (memory_region_is_ram(section->mr)) {
e5548617
BS
630 /* Normal RAM. */
631 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 632 + memory_region_section_addr(section, paddr);
e5548617
BS
633 if (!section->readonly) {
634 iotlb |= phys_section_notdirty;
635 } else {
636 iotlb |= phys_section_rom;
637 }
638 } else {
639 /* IO handlers are currently passed a physical address.
640 It would be nice to pass an offset from the base address
641 of that region. This would avoid having to special case RAM,
642 and avoid full address decoding in every device.
643 We can't use the high bits of pd for this because
644 IO_MEM_ROMD uses these as a ram address. */
645 iotlb = section - phys_sections;
cc5bea60 646 iotlb += memory_region_section_addr(section, paddr);
e5548617
BS
647 }
648
649 /* Make accesses to pages with watchpoints go via the
650 watchpoint trap routines. */
651 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
652 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
653 /* Avoid trapping reads of pages with a write breakpoint. */
654 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
655 iotlb = phys_section_watch + paddr;
656 *address |= TLB_MMIO;
657 break;
658 }
659 }
660 }
661
662 return iotlb;
663}
9fa3e853
FB
664#endif /* defined(CONFIG_USER_ONLY) */
665
e2eef170 666#if !defined(CONFIG_USER_ONLY)
8da3ff18 667
c04b2b78
PB
668#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
669typedef struct subpage_t {
70c68e44 670 MemoryRegion iomem;
a8170e5e 671 hwaddr base;
5312bd8b 672 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
673} subpage_t;
674
c227f099 675static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 676 uint16_t section);
a8170e5e 677static subpage_t *subpage_init(hwaddr base);
5312bd8b 678static void destroy_page_desc(uint16_t section_index)
54688b1e 679{
5312bd8b
AK
680 MemoryRegionSection *section = &phys_sections[section_index];
681 MemoryRegion *mr = section->mr;
54688b1e
AK
682
683 if (mr->subpage) {
684 subpage_t *subpage = container_of(mr, subpage_t, iomem);
685 memory_region_destroy(&subpage->iomem);
686 g_free(subpage);
687 }
688}
689
4346ae3e 690static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
691{
692 unsigned i;
d6f2ea22 693 PhysPageEntry *p;
54688b1e 694
c19e8800 695 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
696 return;
697 }
698
c19e8800 699 p = phys_map_nodes[lp->ptr];
4346ae3e 700 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 701 if (!p[i].is_leaf) {
54688b1e 702 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 703 } else {
c19e8800 704 destroy_page_desc(p[i].ptr);
54688b1e 705 }
54688b1e 706 }
07f07b31 707 lp->is_leaf = 0;
c19e8800 708 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
709}
710
ac1970fb 711static void destroy_all_mappings(AddressSpaceDispatch *d)
54688b1e 712{
ac1970fb 713 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
d6f2ea22 714 phys_map_nodes_reset();
54688b1e
AK
715}
716
5312bd8b
AK
717static uint16_t phys_section_add(MemoryRegionSection *section)
718{
719 if (phys_sections_nb == phys_sections_nb_alloc) {
720 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
721 phys_sections = g_renew(MemoryRegionSection, phys_sections,
722 phys_sections_nb_alloc);
723 }
724 phys_sections[phys_sections_nb] = *section;
725 return phys_sections_nb++;
726}
727
728static void phys_sections_clear(void)
729{
730 phys_sections_nb = 0;
731}
732
ac1970fb 733static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
734{
735 subpage_t *subpage;
a8170e5e 736 hwaddr base = section->offset_within_address_space
0f0cb164 737 & TARGET_PAGE_MASK;
ac1970fb 738 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
0f0cb164
AK
739 MemoryRegionSection subsection = {
740 .offset_within_address_space = base,
741 .size = TARGET_PAGE_SIZE,
742 };
a8170e5e 743 hwaddr start, end;
0f0cb164 744
f3705d53 745 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 746
f3705d53 747 if (!(existing->mr->subpage)) {
0f0cb164
AK
748 subpage = subpage_init(base);
749 subsection.mr = &subpage->iomem;
ac1970fb 750 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 751 phys_section_add(&subsection));
0f0cb164 752 } else {
f3705d53 753 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
754 }
755 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
adb2a9b5 756 end = start + section->size - 1;
0f0cb164
AK
757 subpage_register(subpage, start, end, phys_section_add(section));
758}
759
760
ac1970fb 761static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
33417e70 762{
a8170e5e 763 hwaddr start_addr = section->offset_within_address_space;
dd81124b 764 ram_addr_t size = section->size;
a8170e5e 765 hwaddr addr;
5312bd8b 766 uint16_t section_index = phys_section_add(section);
dd81124b 767
3b8e6a2d 768 assert(size);
f6f3fbca 769
3b8e6a2d 770 addr = start_addr;
ac1970fb 771 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2999097b 772 section_index);
33417e70
FB
773}
774
ac1970fb 775static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 776{
ac1970fb 777 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
0f0cb164
AK
778 MemoryRegionSection now = *section, remain = *section;
779
780 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
781 || (now.size < TARGET_PAGE_SIZE)) {
782 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
783 - now.offset_within_address_space,
784 now.size);
ac1970fb 785 register_subpage(d, &now);
0f0cb164
AK
786 remain.size -= now.size;
787 remain.offset_within_address_space += now.size;
788 remain.offset_within_region += now.size;
789 }
69b67646
TH
790 while (remain.size >= TARGET_PAGE_SIZE) {
791 now = remain;
792 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
793 now.size = TARGET_PAGE_SIZE;
ac1970fb 794 register_subpage(d, &now);
69b67646
TH
795 } else {
796 now.size &= TARGET_PAGE_MASK;
ac1970fb 797 register_multipage(d, &now);
69b67646 798 }
0f0cb164
AK
799 remain.size -= now.size;
800 remain.offset_within_address_space += now.size;
801 remain.offset_within_region += now.size;
802 }
803 now = remain;
804 if (now.size) {
ac1970fb 805 register_subpage(d, &now);
0f0cb164
AK
806 }
807}
808
62a2744c
SY
809void qemu_flush_coalesced_mmio_buffer(void)
810{
811 if (kvm_enabled())
812 kvm_flush_coalesced_mmio_buffer();
813}
814
b2a8658e
UD
815void qemu_mutex_lock_ramlist(void)
816{
817 qemu_mutex_lock(&ram_list.mutex);
818}
819
820void qemu_mutex_unlock_ramlist(void)
821{
822 qemu_mutex_unlock(&ram_list.mutex);
823}
824
c902760f
MT
825#if defined(__linux__) && !defined(TARGET_S390X)
826
827#include <sys/vfs.h>
828
829#define HUGETLBFS_MAGIC 0x958458f6
830
831static long gethugepagesize(const char *path)
832{
833 struct statfs fs;
834 int ret;
835
836 do {
9742bf26 837 ret = statfs(path, &fs);
c902760f
MT
838 } while (ret != 0 && errno == EINTR);
839
840 if (ret != 0) {
9742bf26
YT
841 perror(path);
842 return 0;
c902760f
MT
843 }
844
845 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 846 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
847
848 return fs.f_bsize;
849}
850
04b16653
AW
851static void *file_ram_alloc(RAMBlock *block,
852 ram_addr_t memory,
853 const char *path)
c902760f
MT
854{
855 char *filename;
856 void *area;
857 int fd;
858#ifdef MAP_POPULATE
859 int flags;
860#endif
861 unsigned long hpagesize;
862
863 hpagesize = gethugepagesize(path);
864 if (!hpagesize) {
9742bf26 865 return NULL;
c902760f
MT
866 }
867
868 if (memory < hpagesize) {
869 return NULL;
870 }
871
872 if (kvm_enabled() && !kvm_has_sync_mmu()) {
873 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
874 return NULL;
875 }
876
e4ada482 877 filename = g_strdup_printf("%s/qemu_back_mem.XXXXXX", path);
c902760f
MT
878
879 fd = mkstemp(filename);
880 if (fd < 0) {
9742bf26 881 perror("unable to create backing store for hugepages");
e4ada482 882 g_free(filename);
9742bf26 883 return NULL;
c902760f
MT
884 }
885 unlink(filename);
e4ada482 886 g_free(filename);
c902760f
MT
887
888 memory = (memory+hpagesize-1) & ~(hpagesize-1);
889
890 /*
891 * ftruncate is not supported by hugetlbfs in older
892 * hosts, so don't bother bailing out on errors.
893 * If anything goes wrong with it under other filesystems,
894 * mmap will fail.
895 */
896 if (ftruncate(fd, memory))
9742bf26 897 perror("ftruncate");
c902760f
MT
898
899#ifdef MAP_POPULATE
900 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
901 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
902 * to sidestep this quirk.
903 */
904 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
905 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
906#else
907 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
908#endif
909 if (area == MAP_FAILED) {
9742bf26
YT
910 perror("file_ram_alloc: can't mmap RAM pages");
911 close(fd);
912 return (NULL);
c902760f 913 }
04b16653 914 block->fd = fd;
c902760f
MT
915 return area;
916}
917#endif
918
d17b5288 919static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
920{
921 RAMBlock *block, *next_block;
3e837b2c 922 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 923
a3161038 924 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
925 return 0;
926
a3161038 927 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 928 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
929
930 end = block->offset + block->length;
931
a3161038 932 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
933 if (next_block->offset >= end) {
934 next = MIN(next, next_block->offset);
935 }
936 }
937 if (next - end >= size && next - end < mingap) {
3e837b2c 938 offset = end;
04b16653
AW
939 mingap = next - end;
940 }
941 }
3e837b2c
AW
942
943 if (offset == RAM_ADDR_MAX) {
944 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
945 (uint64_t)size);
946 abort();
947 }
948
04b16653
AW
949 return offset;
950}
951
652d7ec2 952ram_addr_t last_ram_offset(void)
d17b5288
AW
953{
954 RAMBlock *block;
955 ram_addr_t last = 0;
956
a3161038 957 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
958 last = MAX(last, block->offset + block->length);
959
960 return last;
961}
962
ddb97f1d
JB
963static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
964{
965 int ret;
966 QemuOpts *machine_opts;
967
968 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
969 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
970 if (machine_opts &&
971 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
972 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
973 if (ret) {
974 perror("qemu_madvise");
975 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
976 "but dump_guest_core=off specified\n");
977 }
978 }
979}
980
c5705a77 981void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
982{
983 RAMBlock *new_block, *block;
984
c5705a77 985 new_block = NULL;
a3161038 986 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
987 if (block->offset == addr) {
988 new_block = block;
989 break;
990 }
991 }
992 assert(new_block);
993 assert(!new_block->idstr[0]);
84b89d78 994
09e5ab63
AL
995 if (dev) {
996 char *id = qdev_get_dev_path(dev);
84b89d78
CM
997 if (id) {
998 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 999 g_free(id);
84b89d78
CM
1000 }
1001 }
1002 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1003
b2a8658e
UD
1004 /* This assumes the iothread lock is taken here too. */
1005 qemu_mutex_lock_ramlist();
a3161038 1006 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1007 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1008 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1009 new_block->idstr);
1010 abort();
1011 }
1012 }
b2a8658e 1013 qemu_mutex_unlock_ramlist();
c5705a77
AK
1014}
1015
8490fc78
LC
1016static int memory_try_enable_merging(void *addr, size_t len)
1017{
1018 QemuOpts *opts;
1019
1020 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1021 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1022 /* disabled by the user */
1023 return 0;
1024 }
1025
1026 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1027}
1028
c5705a77
AK
1029ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1030 MemoryRegion *mr)
1031{
abb26d63 1032 RAMBlock *block, *new_block;
c5705a77
AK
1033
1034 size = TARGET_PAGE_ALIGN(size);
1035 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1036
b2a8658e
UD
1037 /* This assumes the iothread lock is taken here too. */
1038 qemu_mutex_lock_ramlist();
7c637366 1039 new_block->mr = mr;
432d268c 1040 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1041 if (host) {
1042 new_block->host = host;
cd19cfa2 1043 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1044 } else {
1045 if (mem_path) {
c902760f 1046#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1047 new_block->host = file_ram_alloc(new_block, size, mem_path);
1048 if (!new_block->host) {
1049 new_block->host = qemu_vmalloc(size);
8490fc78 1050 memory_try_enable_merging(new_block->host, size);
6977dfe6 1051 }
c902760f 1052#else
6977dfe6
YT
1053 fprintf(stderr, "-mem-path option unsupported\n");
1054 exit(1);
c902760f 1055#endif
6977dfe6 1056 } else {
868bb33f 1057 if (xen_enabled()) {
fce537d4 1058 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1059 } else if (kvm_enabled()) {
1060 /* some s390/kvm configurations have special constraints */
1061 new_block->host = kvm_vmalloc(size);
432d268c
JN
1062 } else {
1063 new_block->host = qemu_vmalloc(size);
1064 }
8490fc78 1065 memory_try_enable_merging(new_block->host, size);
6977dfe6 1066 }
c902760f 1067 }
94a6b54f
PB
1068 new_block->length = size;
1069
abb26d63
PB
1070 /* Keep the list sorted from biggest to smallest block. */
1071 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1072 if (block->length < new_block->length) {
1073 break;
1074 }
1075 }
1076 if (block) {
1077 QTAILQ_INSERT_BEFORE(block, new_block, next);
1078 } else {
1079 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1080 }
0d6d3c87 1081 ram_list.mru_block = NULL;
94a6b54f 1082
f798b07f 1083 ram_list.version++;
b2a8658e 1084 qemu_mutex_unlock_ramlist();
f798b07f 1085
7267c094 1086 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1087 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1088 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1089 0, size >> TARGET_PAGE_BITS);
1720aeee 1090 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1091
ddb97f1d 1092 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1093 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1094
6f0437e8
JK
1095 if (kvm_enabled())
1096 kvm_setup_guest_memory(new_block->host, size);
1097
94a6b54f
PB
1098 return new_block->offset;
1099}
e9a1ab19 1100
c5705a77 1101ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1102{
c5705a77 1103 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1104}
1105
1f2e98b6
AW
1106void qemu_ram_free_from_ptr(ram_addr_t addr)
1107{
1108 RAMBlock *block;
1109
b2a8658e
UD
1110 /* This assumes the iothread lock is taken here too. */
1111 qemu_mutex_lock_ramlist();
a3161038 1112 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1113 if (addr == block->offset) {
a3161038 1114 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1115 ram_list.mru_block = NULL;
f798b07f 1116 ram_list.version++;
7267c094 1117 g_free(block);
b2a8658e 1118 break;
1f2e98b6
AW
1119 }
1120 }
b2a8658e 1121 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1122}
1123
c227f099 1124void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1125{
04b16653
AW
1126 RAMBlock *block;
1127
b2a8658e
UD
1128 /* This assumes the iothread lock is taken here too. */
1129 qemu_mutex_lock_ramlist();
a3161038 1130 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1131 if (addr == block->offset) {
a3161038 1132 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1133 ram_list.mru_block = NULL;
f798b07f 1134 ram_list.version++;
cd19cfa2
HY
1135 if (block->flags & RAM_PREALLOC_MASK) {
1136 ;
1137 } else if (mem_path) {
04b16653
AW
1138#if defined (__linux__) && !defined(TARGET_S390X)
1139 if (block->fd) {
1140 munmap(block->host, block->length);
1141 close(block->fd);
1142 } else {
1143 qemu_vfree(block->host);
1144 }
fd28aa13
JK
1145#else
1146 abort();
04b16653
AW
1147#endif
1148 } else {
1149#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1150 munmap(block->host, block->length);
1151#else
868bb33f 1152 if (xen_enabled()) {
e41d7c69 1153 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
1154 } else {
1155 qemu_vfree(block->host);
1156 }
04b16653
AW
1157#endif
1158 }
7267c094 1159 g_free(block);
b2a8658e 1160 break;
04b16653
AW
1161 }
1162 }
b2a8658e 1163 qemu_mutex_unlock_ramlist();
04b16653 1164
e9a1ab19
FB
1165}
1166
cd19cfa2
HY
1167#ifndef _WIN32
1168void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1169{
1170 RAMBlock *block;
1171 ram_addr_t offset;
1172 int flags;
1173 void *area, *vaddr;
1174
a3161038 1175 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1176 offset = addr - block->offset;
1177 if (offset < block->length) {
1178 vaddr = block->host + offset;
1179 if (block->flags & RAM_PREALLOC_MASK) {
1180 ;
1181 } else {
1182 flags = MAP_FIXED;
1183 munmap(vaddr, length);
1184 if (mem_path) {
1185#if defined(__linux__) && !defined(TARGET_S390X)
1186 if (block->fd) {
1187#ifdef MAP_POPULATE
1188 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1189 MAP_PRIVATE;
1190#else
1191 flags |= MAP_PRIVATE;
1192#endif
1193 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1194 flags, block->fd, offset);
1195 } else {
1196 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1197 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1198 flags, -1, 0);
1199 }
fd28aa13
JK
1200#else
1201 abort();
cd19cfa2
HY
1202#endif
1203 } else {
1204#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1205 flags |= MAP_SHARED | MAP_ANONYMOUS;
1206 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1207 flags, -1, 0);
1208#else
1209 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1210 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1211 flags, -1, 0);
1212#endif
1213 }
1214 if (area != vaddr) {
f15fbc4b
AP
1215 fprintf(stderr, "Could not remap addr: "
1216 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1217 length, addr);
1218 exit(1);
1219 }
8490fc78 1220 memory_try_enable_merging(vaddr, length);
ddb97f1d 1221 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1222 }
1223 return;
1224 }
1225 }
1226}
1227#endif /* !_WIN32 */
1228
dc828ca1 1229/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
1230 With the exception of the softmmu code in this file, this should
1231 only be used for local memory (e.g. video ram) that the device owns,
1232 and knows it isn't going to access beyond the end of the block.
1233
1234 It should not be used for general purpose DMA.
1235 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1236 */
c227f099 1237void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 1238{
94a6b54f
PB
1239 RAMBlock *block;
1240
b2a8658e 1241 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1242 block = ram_list.mru_block;
1243 if (block && addr - block->offset < block->length) {
1244 goto found;
1245 }
a3161038 1246 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1247 if (addr - block->offset < block->length) {
0d6d3c87 1248 goto found;
f471a17e 1249 }
94a6b54f 1250 }
f471a17e
AW
1251
1252 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1253 abort();
1254
0d6d3c87
PB
1255found:
1256 ram_list.mru_block = block;
1257 if (xen_enabled()) {
1258 /* We need to check if the requested address is in the RAM
1259 * because we don't want to map the entire memory in QEMU.
1260 * In that case just map until the end of the page.
1261 */
1262 if (block->offset == 0) {
1263 return xen_map_cache(addr, 0, 0);
1264 } else if (block->host == NULL) {
1265 block->host =
1266 xen_map_cache(block->offset, block->length, 1);
1267 }
1268 }
1269 return block->host + (addr - block->offset);
dc828ca1
PB
1270}
1271
0d6d3c87
PB
1272/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1273 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1274 *
1275 * ??? Is this still necessary?
b2e0a138 1276 */
8b9c99d9 1277static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1278{
1279 RAMBlock *block;
1280
b2a8658e 1281 /* The list is protected by the iothread lock here. */
a3161038 1282 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1283 if (addr - block->offset < block->length) {
868bb33f 1284 if (xen_enabled()) {
432d268c
JN
1285 /* We need to check if the requested address is in the RAM
1286 * because we don't want to map the entire memory in QEMU.
712c2b41 1287 * In that case just map until the end of the page.
432d268c
JN
1288 */
1289 if (block->offset == 0) {
e41d7c69 1290 return xen_map_cache(addr, 0, 0);
432d268c 1291 } else if (block->host == NULL) {
e41d7c69
JK
1292 block->host =
1293 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1294 }
1295 }
b2e0a138
MT
1296 return block->host + (addr - block->offset);
1297 }
1298 }
1299
1300 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1301 abort();
1302
1303 return NULL;
1304}
1305
38bee5dc
SS
1306/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1307 * but takes a size argument */
8b9c99d9 1308static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1309{
8ab934f9
SS
1310 if (*size == 0) {
1311 return NULL;
1312 }
868bb33f 1313 if (xen_enabled()) {
e41d7c69 1314 return xen_map_cache(addr, *size, 1);
868bb33f 1315 } else {
38bee5dc
SS
1316 RAMBlock *block;
1317
a3161038 1318 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1319 if (addr - block->offset < block->length) {
1320 if (addr - block->offset + *size > block->length)
1321 *size = block->length - addr + block->offset;
1322 return block->host + (addr - block->offset);
1323 }
1324 }
1325
1326 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1327 abort();
38bee5dc
SS
1328 }
1329}
1330
050a0ddf
AP
1331void qemu_put_ram_ptr(void *addr)
1332{
1333 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
1334}
1335
e890261f 1336int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1337{
94a6b54f
PB
1338 RAMBlock *block;
1339 uint8_t *host = ptr;
1340
868bb33f 1341 if (xen_enabled()) {
e41d7c69 1342 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
1343 return 0;
1344 }
1345
a3161038 1346 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1347 /* This case append when the block is not mapped. */
1348 if (block->host == NULL) {
1349 continue;
1350 }
f471a17e 1351 if (host - block->host < block->length) {
e890261f
MT
1352 *ram_addr = block->offset + (host - block->host);
1353 return 0;
f471a17e 1354 }
94a6b54f 1355 }
432d268c 1356
e890261f
MT
1357 return -1;
1358}
f471a17e 1359
e890261f
MT
1360/* Some of the softmmu routines need to translate from a host pointer
1361 (typically a TLB entry) back to a ram offset. */
1362ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1363{
1364 ram_addr_t ram_addr;
f471a17e 1365
e890261f
MT
1366 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1367 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1368 abort();
1369 }
1370 return ram_addr;
5579c7f3
PB
1371}
1372
a8170e5e 1373static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1374 unsigned size)
e18231a3
BS
1375{
1376#ifdef DEBUG_UNASSIGNED
1377 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1378#endif
5b450407 1379#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1380 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
1381#endif
1382 return 0;
1383}
1384
a8170e5e 1385static void unassigned_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1386 uint64_t val, unsigned size)
e18231a3
BS
1387{
1388#ifdef DEBUG_UNASSIGNED
0e0df1e2 1389 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 1390#endif
5b450407 1391#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1392 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 1393#endif
33417e70
FB
1394}
1395
0e0df1e2
AK
1396static const MemoryRegionOps unassigned_mem_ops = {
1397 .read = unassigned_mem_read,
1398 .write = unassigned_mem_write,
1399 .endianness = DEVICE_NATIVE_ENDIAN,
1400};
e18231a3 1401
a8170e5e 1402static uint64_t error_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1403 unsigned size)
e18231a3 1404{
0e0df1e2 1405 abort();
e18231a3
BS
1406}
1407
a8170e5e 1408static void error_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1409 uint64_t value, unsigned size)
e18231a3 1410{
0e0df1e2 1411 abort();
33417e70
FB
1412}
1413
0e0df1e2
AK
1414static const MemoryRegionOps error_mem_ops = {
1415 .read = error_mem_read,
1416 .write = error_mem_write,
1417 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1418};
1419
0e0df1e2
AK
1420static const MemoryRegionOps rom_mem_ops = {
1421 .read = error_mem_read,
1422 .write = unassigned_mem_write,
1423 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1424};
1425
a8170e5e 1426static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1427 uint64_t val, unsigned size)
9fa3e853 1428{
3a7d929e 1429 int dirty_flags;
f7c11b53 1430 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1431 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1432#if !defined(CONFIG_USER_ONLY)
0e0df1e2 1433 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1434 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 1435#endif
3a7d929e 1436 }
0e0df1e2
AK
1437 switch (size) {
1438 case 1:
1439 stb_p(qemu_get_ram_ptr(ram_addr), val);
1440 break;
1441 case 2:
1442 stw_p(qemu_get_ram_ptr(ram_addr), val);
1443 break;
1444 case 4:
1445 stl_p(qemu_get_ram_ptr(ram_addr), val);
1446 break;
1447 default:
1448 abort();
3a7d929e 1449 }
f23db169 1450 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1451 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1452 /* we remove the notdirty callback only if the code has been
1453 flushed */
1454 if (dirty_flags == 0xff)
2e70f6ef 1455 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1456}
1457
0e0df1e2
AK
1458static const MemoryRegionOps notdirty_mem_ops = {
1459 .read = error_mem_read,
1460 .write = notdirty_mem_write,
1461 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1462};
1463
0f459d16 1464/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1465static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1466{
9349b4f9 1467 CPUArchState *env = cpu_single_env;
06d55cc1 1468 target_ulong pc, cs_base;
0f459d16 1469 target_ulong vaddr;
a1d1bb31 1470 CPUWatchpoint *wp;
06d55cc1 1471 int cpu_flags;
0f459d16 1472
06d55cc1
AL
1473 if (env->watchpoint_hit) {
1474 /* We re-entered the check after replacing the TB. Now raise
1475 * the debug interrupt so that is will trigger after the
1476 * current instruction. */
1477 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1478 return;
1479 }
2e70f6ef 1480 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1481 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1482 if ((vaddr == (wp->vaddr & len_mask) ||
1483 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1484 wp->flags |= BP_WATCHPOINT_HIT;
1485 if (!env->watchpoint_hit) {
1486 env->watchpoint_hit = wp;
5a316526 1487 tb_check_watchpoint(env);
6e140f28
AL
1488 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1489 env->exception_index = EXCP_DEBUG;
488d6577 1490 cpu_loop_exit(env);
6e140f28
AL
1491 } else {
1492 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1493 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1494 cpu_resume_from_signal(env, NULL);
6e140f28 1495 }
06d55cc1 1496 }
6e140f28
AL
1497 } else {
1498 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1499 }
1500 }
1501}
1502
6658ffb8
PB
1503/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1504 so these check for a hit then pass through to the normal out-of-line
1505 phys routines. */
a8170e5e 1506static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1507 unsigned size)
6658ffb8 1508{
1ec9b909
AK
1509 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1510 switch (size) {
1511 case 1: return ldub_phys(addr);
1512 case 2: return lduw_phys(addr);
1513 case 4: return ldl_phys(addr);
1514 default: abort();
1515 }
6658ffb8
PB
1516}
1517
a8170e5e 1518static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1519 uint64_t val, unsigned size)
6658ffb8 1520{
1ec9b909
AK
1521 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1522 switch (size) {
67364150
MF
1523 case 1:
1524 stb_phys(addr, val);
1525 break;
1526 case 2:
1527 stw_phys(addr, val);
1528 break;
1529 case 4:
1530 stl_phys(addr, val);
1531 break;
1ec9b909
AK
1532 default: abort();
1533 }
6658ffb8
PB
1534}
1535
1ec9b909
AK
1536static const MemoryRegionOps watch_mem_ops = {
1537 .read = watch_mem_read,
1538 .write = watch_mem_write,
1539 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1540};
6658ffb8 1541
a8170e5e 1542static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1543 unsigned len)
db7b5426 1544{
70c68e44 1545 subpage_t *mmio = opaque;
f6405247 1546 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1547 MemoryRegionSection *section;
db7b5426
BS
1548#if defined(DEBUG_SUBPAGE)
1549 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1550 mmio, len, addr, idx);
1551#endif
db7b5426 1552
5312bd8b
AK
1553 section = &phys_sections[mmio->sub_section[idx]];
1554 addr += mmio->base;
1555 addr -= section->offset_within_address_space;
1556 addr += section->offset_within_region;
37ec01d4 1557 return io_mem_read(section->mr, addr, len);
db7b5426
BS
1558}
1559
a8170e5e 1560static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1561 uint64_t value, unsigned len)
db7b5426 1562{
70c68e44 1563 subpage_t *mmio = opaque;
f6405247 1564 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1565 MemoryRegionSection *section;
db7b5426 1566#if defined(DEBUG_SUBPAGE)
70c68e44
AK
1567 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1568 " idx %d value %"PRIx64"\n",
f6405247 1569 __func__, mmio, len, addr, idx, value);
db7b5426 1570#endif
f6405247 1571
5312bd8b
AK
1572 section = &phys_sections[mmio->sub_section[idx]];
1573 addr += mmio->base;
1574 addr -= section->offset_within_address_space;
1575 addr += section->offset_within_region;
37ec01d4 1576 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
1577}
1578
70c68e44
AK
1579static const MemoryRegionOps subpage_ops = {
1580 .read = subpage_read,
1581 .write = subpage_write,
1582 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1583};
1584
a8170e5e 1585static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
de712f94 1586 unsigned size)
56384e8b
AF
1587{
1588 ram_addr_t raddr = addr;
1589 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1590 switch (size) {
1591 case 1: return ldub_p(ptr);
1592 case 2: return lduw_p(ptr);
1593 case 4: return ldl_p(ptr);
1594 default: abort();
1595 }
56384e8b
AF
1596}
1597
a8170e5e 1598static void subpage_ram_write(void *opaque, hwaddr addr,
de712f94 1599 uint64_t value, unsigned size)
56384e8b
AF
1600{
1601 ram_addr_t raddr = addr;
1602 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1603 switch (size) {
1604 case 1: return stb_p(ptr, value);
1605 case 2: return stw_p(ptr, value);
1606 case 4: return stl_p(ptr, value);
1607 default: abort();
1608 }
56384e8b
AF
1609}
1610
de712f94
AK
1611static const MemoryRegionOps subpage_ram_ops = {
1612 .read = subpage_ram_read,
1613 .write = subpage_ram_write,
1614 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
1615};
1616
c227f099 1617static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1618 uint16_t section)
db7b5426
BS
1619{
1620 int idx, eidx;
1621
1622 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1623 return -1;
1624 idx = SUBPAGE_IDX(start);
1625 eidx = SUBPAGE_IDX(end);
1626#if defined(DEBUG_SUBPAGE)
0bf9e31a 1627 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1628 mmio, start, end, idx, eidx, memory);
1629#endif
5312bd8b
AK
1630 if (memory_region_is_ram(phys_sections[section].mr)) {
1631 MemoryRegionSection new_section = phys_sections[section];
1632 new_section.mr = &io_mem_subpage_ram;
1633 section = phys_section_add(&new_section);
56384e8b 1634 }
db7b5426 1635 for (; idx <= eidx; idx++) {
5312bd8b 1636 mmio->sub_section[idx] = section;
db7b5426
BS
1637 }
1638
1639 return 0;
1640}
1641
a8170e5e 1642static subpage_t *subpage_init(hwaddr base)
db7b5426 1643{
c227f099 1644 subpage_t *mmio;
db7b5426 1645
7267c094 1646 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
1647
1648 mmio->base = base;
70c68e44
AK
1649 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1650 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1651 mmio->iomem.subpage = true;
db7b5426 1652#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1653 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1654 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1655#endif
0f0cb164 1656 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
1657
1658 return mmio;
1659}
1660
5312bd8b
AK
1661static uint16_t dummy_section(MemoryRegion *mr)
1662{
1663 MemoryRegionSection section = {
1664 .mr = mr,
1665 .offset_within_address_space = 0,
1666 .offset_within_region = 0,
1667 .size = UINT64_MAX,
1668 };
1669
1670 return phys_section_add(&section);
1671}
1672
a8170e5e 1673MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1674{
37ec01d4 1675 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1676}
1677
e9179ce1
AK
1678static void io_mem_init(void)
1679{
0e0df1e2 1680 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
1681 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1682 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1683 "unassigned", UINT64_MAX);
1684 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1685 "notdirty", UINT64_MAX);
de712f94
AK
1686 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1687 "subpage-ram", UINT64_MAX);
1ec9b909
AK
1688 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1689 "watch", UINT64_MAX);
e9179ce1
AK
1690}
1691
ac1970fb
AK
1692static void mem_begin(MemoryListener *listener)
1693{
1694 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1695
1696 destroy_all_mappings(d);
1697 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1698}
1699
50c1e149
AK
1700static void core_begin(MemoryListener *listener)
1701{
5312bd8b
AK
1702 phys_sections_clear();
1703 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
1704 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1705 phys_section_rom = dummy_section(&io_mem_rom);
1706 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
1707}
1708
1d71148e 1709static void tcg_commit(MemoryListener *listener)
50c1e149 1710{
9349b4f9 1711 CPUArchState *env;
117712c3
AK
1712
1713 /* since each CPU stores ram addresses in its TLB cache, we must
1714 reset the modified entries */
1715 /* XXX: slow ! */
1716 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1717 tlb_flush(env, 1);
1718 }
50c1e149
AK
1719}
1720
93632747
AK
1721static void core_log_global_start(MemoryListener *listener)
1722{
1723 cpu_physical_memory_set_dirty_tracking(1);
1724}
1725
1726static void core_log_global_stop(MemoryListener *listener)
1727{
1728 cpu_physical_memory_set_dirty_tracking(0);
1729}
1730
4855d41a
AK
1731static void io_region_add(MemoryListener *listener,
1732 MemoryRegionSection *section)
1733{
a2d33521
AK
1734 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1735
1736 mrio->mr = section->mr;
1737 mrio->offset = section->offset_within_region;
1738 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 1739 section->offset_within_address_space, section->size);
a2d33521 1740 ioport_register(&mrio->iorange);
4855d41a
AK
1741}
1742
1743static void io_region_del(MemoryListener *listener,
1744 MemoryRegionSection *section)
1745{
1746 isa_unassign_ioport(section->offset_within_address_space, section->size);
1747}
1748
93632747 1749static MemoryListener core_memory_listener = {
50c1e149 1750 .begin = core_begin,
93632747
AK
1751 .log_global_start = core_log_global_start,
1752 .log_global_stop = core_log_global_stop,
ac1970fb 1753 .priority = 1,
93632747
AK
1754};
1755
4855d41a
AK
1756static MemoryListener io_memory_listener = {
1757 .region_add = io_region_add,
1758 .region_del = io_region_del,
4855d41a
AK
1759 .priority = 0,
1760};
1761
1d71148e
AK
1762static MemoryListener tcg_memory_listener = {
1763 .commit = tcg_commit,
1764};
1765
ac1970fb
AK
1766void address_space_init_dispatch(AddressSpace *as)
1767{
1768 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1769
1770 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1771 d->listener = (MemoryListener) {
1772 .begin = mem_begin,
1773 .region_add = mem_add,
1774 .region_nop = mem_add,
1775 .priority = 0,
1776 };
1777 as->dispatch = d;
1778 memory_listener_register(&d->listener, as);
1779}
1780
83f3c251
AK
1781void address_space_destroy_dispatch(AddressSpace *as)
1782{
1783 AddressSpaceDispatch *d = as->dispatch;
1784
1785 memory_listener_unregister(&d->listener);
1786 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1787 g_free(d);
1788 as->dispatch = NULL;
1789}
1790
62152b8a
AK
1791static void memory_map_init(void)
1792{
7267c094 1793 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 1794 memory_region_init(system_memory, "system", INT64_MAX);
2673a5da
AK
1795 address_space_init(&address_space_memory, system_memory);
1796 address_space_memory.name = "memory";
309cb471 1797
7267c094 1798 system_io = g_malloc(sizeof(*system_io));
309cb471 1799 memory_region_init(system_io, "io", 65536);
2673a5da
AK
1800 address_space_init(&address_space_io, system_io);
1801 address_space_io.name = "I/O";
93632747 1802
f6790af6
AK
1803 memory_listener_register(&core_memory_listener, &address_space_memory);
1804 memory_listener_register(&io_memory_listener, &address_space_io);
1805 memory_listener_register(&tcg_memory_listener, &address_space_memory);
9e11908f
PM
1806
1807 dma_context_init(&dma_context_memory, &address_space_memory,
1808 NULL, NULL, NULL);
62152b8a
AK
1809}
1810
1811MemoryRegion *get_system_memory(void)
1812{
1813 return system_memory;
1814}
1815
309cb471
AK
1816MemoryRegion *get_system_io(void)
1817{
1818 return system_io;
1819}
1820
e2eef170
PB
1821#endif /* !defined(CONFIG_USER_ONLY) */
1822
13eb76e0
FB
1823/* physical memory access (slow version, mainly for debug) */
1824#if defined(CONFIG_USER_ONLY)
9349b4f9 1825int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1826 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1827{
1828 int l, flags;
1829 target_ulong page;
53a5960a 1830 void * p;
13eb76e0
FB
1831
1832 while (len > 0) {
1833 page = addr & TARGET_PAGE_MASK;
1834 l = (page + TARGET_PAGE_SIZE) - addr;
1835 if (l > len)
1836 l = len;
1837 flags = page_get_flags(page);
1838 if (!(flags & PAGE_VALID))
a68fe89c 1839 return -1;
13eb76e0
FB
1840 if (is_write) {
1841 if (!(flags & PAGE_WRITE))
a68fe89c 1842 return -1;
579a97f7 1843 /* XXX: this code should not depend on lock_user */
72fb7daa 1844 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1845 return -1;
72fb7daa
AJ
1846 memcpy(p, buf, l);
1847 unlock_user(p, addr, l);
13eb76e0
FB
1848 } else {
1849 if (!(flags & PAGE_READ))
a68fe89c 1850 return -1;
579a97f7 1851 /* XXX: this code should not depend on lock_user */
72fb7daa 1852 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1853 return -1;
72fb7daa 1854 memcpy(buf, p, l);
5b257578 1855 unlock_user(p, addr, 0);
13eb76e0
FB
1856 }
1857 len -= l;
1858 buf += l;
1859 addr += l;
1860 }
a68fe89c 1861 return 0;
13eb76e0 1862}
8df1cd07 1863
13eb76e0 1864#else
51d7a9eb 1865
a8170e5e
AK
1866static void invalidate_and_set_dirty(hwaddr addr,
1867 hwaddr length)
51d7a9eb
AP
1868{
1869 if (!cpu_physical_memory_is_dirty(addr)) {
1870 /* invalidate code */
1871 tb_invalidate_phys_page_range(addr, addr + length, 0);
1872 /* set dirty bit */
1873 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1874 }
e226939d 1875 xen_modified_memory(addr, length);
51d7a9eb
AP
1876}
1877
a8170e5e 1878void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1879 int len, bool is_write)
13eb76e0 1880{
ac1970fb 1881 AddressSpaceDispatch *d = as->dispatch;
37ec01d4 1882 int l;
13eb76e0
FB
1883 uint8_t *ptr;
1884 uint32_t val;
a8170e5e 1885 hwaddr page;
f3705d53 1886 MemoryRegionSection *section;
3b46e624 1887
13eb76e0
FB
1888 while (len > 0) {
1889 page = addr & TARGET_PAGE_MASK;
1890 l = (page + TARGET_PAGE_SIZE) - addr;
1891 if (l > len)
1892 l = len;
ac1970fb 1893 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1894
13eb76e0 1895 if (is_write) {
f3705d53 1896 if (!memory_region_is_ram(section->mr)) {
a8170e5e 1897 hwaddr addr1;
cc5bea60 1898 addr1 = memory_region_section_addr(section, addr);
6a00d601
FB
1899 /* XXX: could force cpu_single_env to NULL to avoid
1900 potential bugs */
6c2934db 1901 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 1902 /* 32 bit write access */
c27004ec 1903 val = ldl_p(buf);
37ec01d4 1904 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 1905 l = 4;
6c2934db 1906 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 1907 /* 16 bit write access */
c27004ec 1908 val = lduw_p(buf);
37ec01d4 1909 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
1910 l = 2;
1911 } else {
1c213d19 1912 /* 8 bit write access */
c27004ec 1913 val = ldub_p(buf);
37ec01d4 1914 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
1915 l = 1;
1916 }
f3705d53 1917 } else if (!section->readonly) {
8ca5692d 1918 ram_addr_t addr1;
f3705d53 1919 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 1920 + memory_region_section_addr(section, addr);
13eb76e0 1921 /* RAM case */
5579c7f3 1922 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1923 memcpy(ptr, buf, l);
51d7a9eb 1924 invalidate_and_set_dirty(addr1, l);
050a0ddf 1925 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1926 }
1927 } else {
cc5bea60
BS
1928 if (!(memory_region_is_ram(section->mr) ||
1929 memory_region_is_romd(section->mr))) {
a8170e5e 1930 hwaddr addr1;
13eb76e0 1931 /* I/O case */
cc5bea60 1932 addr1 = memory_region_section_addr(section, addr);
6c2934db 1933 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 1934 /* 32 bit read access */
37ec01d4 1935 val = io_mem_read(section->mr, addr1, 4);
c27004ec 1936 stl_p(buf, val);
13eb76e0 1937 l = 4;
6c2934db 1938 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 1939 /* 16 bit read access */
37ec01d4 1940 val = io_mem_read(section->mr, addr1, 2);
c27004ec 1941 stw_p(buf, val);
13eb76e0
FB
1942 l = 2;
1943 } else {
1c213d19 1944 /* 8 bit read access */
37ec01d4 1945 val = io_mem_read(section->mr, addr1, 1);
c27004ec 1946 stb_p(buf, val);
13eb76e0
FB
1947 l = 1;
1948 }
1949 } else {
1950 /* RAM case */
0a1b357f 1951 ptr = qemu_get_ram_ptr(section->mr->ram_addr
cc5bea60
BS
1952 + memory_region_section_addr(section,
1953 addr));
f3705d53 1954 memcpy(buf, ptr, l);
050a0ddf 1955 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1956 }
1957 }
1958 len -= l;
1959 buf += l;
1960 addr += l;
1961 }
1962}
8df1cd07 1963
a8170e5e 1964void address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1965 const uint8_t *buf, int len)
1966{
1967 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1968}
1969
1970/**
1971 * address_space_read: read from an address space.
1972 *
1973 * @as: #AddressSpace to be accessed
1974 * @addr: address within that address space
1975 * @buf: buffer with the data transferred
1976 */
a8170e5e 1977void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb
AK
1978{
1979 address_space_rw(as, addr, buf, len, false);
1980}
1981
1982
a8170e5e 1983void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1984 int len, int is_write)
1985{
1986 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1987}
1988
d0ecd2aa 1989/* used for ROM loading : can write in RAM and ROM */
a8170e5e 1990void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
1991 const uint8_t *buf, int len)
1992{
ac1970fb 1993 AddressSpaceDispatch *d = address_space_memory.dispatch;
d0ecd2aa
FB
1994 int l;
1995 uint8_t *ptr;
a8170e5e 1996 hwaddr page;
f3705d53 1997 MemoryRegionSection *section;
3b46e624 1998
d0ecd2aa
FB
1999 while (len > 0) {
2000 page = addr & TARGET_PAGE_MASK;
2001 l = (page + TARGET_PAGE_SIZE) - addr;
2002 if (l > len)
2003 l = len;
ac1970fb 2004 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 2005
cc5bea60
BS
2006 if (!(memory_region_is_ram(section->mr) ||
2007 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
2008 /* do nothing */
2009 } else {
2010 unsigned long addr1;
f3705d53 2011 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 2012 + memory_region_section_addr(section, addr);
d0ecd2aa 2013 /* ROM/RAM case */
5579c7f3 2014 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2015 memcpy(ptr, buf, l);
51d7a9eb 2016 invalidate_and_set_dirty(addr1, l);
050a0ddf 2017 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
2018 }
2019 len -= l;
2020 buf += l;
2021 addr += l;
2022 }
2023}
2024
6d16c2f8
AL
2025typedef struct {
2026 void *buffer;
a8170e5e
AK
2027 hwaddr addr;
2028 hwaddr len;
6d16c2f8
AL
2029} BounceBuffer;
2030
2031static BounceBuffer bounce;
2032
ba223c29
AL
2033typedef struct MapClient {
2034 void *opaque;
2035 void (*callback)(void *opaque);
72cf2d4f 2036 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2037} MapClient;
2038
72cf2d4f
BS
2039static QLIST_HEAD(map_client_list, MapClient) map_client_list
2040 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2041
2042void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2043{
7267c094 2044 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2045
2046 client->opaque = opaque;
2047 client->callback = callback;
72cf2d4f 2048 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2049 return client;
2050}
2051
8b9c99d9 2052static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2053{
2054 MapClient *client = (MapClient *)_client;
2055
72cf2d4f 2056 QLIST_REMOVE(client, link);
7267c094 2057 g_free(client);
ba223c29
AL
2058}
2059
2060static void cpu_notify_map_clients(void)
2061{
2062 MapClient *client;
2063
72cf2d4f
BS
2064 while (!QLIST_EMPTY(&map_client_list)) {
2065 client = QLIST_FIRST(&map_client_list);
ba223c29 2066 client->callback(client->opaque);
34d5e948 2067 cpu_unregister_map_client(client);
ba223c29
AL
2068 }
2069}
2070
6d16c2f8
AL
2071/* Map a physical memory region into a host virtual address.
2072 * May map a subset of the requested range, given by and returned in *plen.
2073 * May return NULL if resources needed to perform the mapping are exhausted.
2074 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2075 * Use cpu_register_map_client() to know when retrying the map operation is
2076 * likely to succeed.
6d16c2f8 2077 */
ac1970fb 2078void *address_space_map(AddressSpace *as,
a8170e5e
AK
2079 hwaddr addr,
2080 hwaddr *plen,
ac1970fb 2081 bool is_write)
6d16c2f8 2082{
ac1970fb 2083 AddressSpaceDispatch *d = as->dispatch;
a8170e5e
AK
2084 hwaddr len = *plen;
2085 hwaddr todo = 0;
6d16c2f8 2086 int l;
a8170e5e 2087 hwaddr page;
f3705d53 2088 MemoryRegionSection *section;
f15fbc4b 2089 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
2090 ram_addr_t rlen;
2091 void *ret;
6d16c2f8
AL
2092
2093 while (len > 0) {
2094 page = addr & TARGET_PAGE_MASK;
2095 l = (page + TARGET_PAGE_SIZE) - addr;
2096 if (l > len)
2097 l = len;
ac1970fb 2098 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
6d16c2f8 2099
f3705d53 2100 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 2101 if (todo || bounce.buffer) {
6d16c2f8
AL
2102 break;
2103 }
2104 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2105 bounce.addr = addr;
2106 bounce.len = l;
2107 if (!is_write) {
ac1970fb 2108 address_space_read(as, addr, bounce.buffer, l);
6d16c2f8 2109 }
38bee5dc
SS
2110
2111 *plen = l;
2112 return bounce.buffer;
6d16c2f8 2113 }
8ab934f9 2114 if (!todo) {
f3705d53 2115 raddr = memory_region_get_ram_addr(section->mr)
cc5bea60 2116 + memory_region_section_addr(section, addr);
8ab934f9 2117 }
6d16c2f8
AL
2118
2119 len -= l;
2120 addr += l;
38bee5dc 2121 todo += l;
6d16c2f8 2122 }
8ab934f9
SS
2123 rlen = todo;
2124 ret = qemu_ram_ptr_length(raddr, &rlen);
2125 *plen = rlen;
2126 return ret;
6d16c2f8
AL
2127}
2128
ac1970fb 2129/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2130 * Will also mark the memory as dirty if is_write == 1. access_len gives
2131 * the amount of memory that was actually read or written by the caller.
2132 */
a8170e5e
AK
2133void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2134 int is_write, hwaddr access_len)
6d16c2f8
AL
2135{
2136 if (buffer != bounce.buffer) {
2137 if (is_write) {
e890261f 2138 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
2139 while (access_len) {
2140 unsigned l;
2141 l = TARGET_PAGE_SIZE;
2142 if (l > access_len)
2143 l = access_len;
51d7a9eb 2144 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2145 addr1 += l;
2146 access_len -= l;
2147 }
2148 }
868bb33f 2149 if (xen_enabled()) {
e41d7c69 2150 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2151 }
6d16c2f8
AL
2152 return;
2153 }
2154 if (is_write) {
ac1970fb 2155 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2156 }
f8a83245 2157 qemu_vfree(bounce.buffer);
6d16c2f8 2158 bounce.buffer = NULL;
ba223c29 2159 cpu_notify_map_clients();
6d16c2f8 2160}
d0ecd2aa 2161
a8170e5e
AK
2162void *cpu_physical_memory_map(hwaddr addr,
2163 hwaddr *plen,
ac1970fb
AK
2164 int is_write)
2165{
2166 return address_space_map(&address_space_memory, addr, plen, is_write);
2167}
2168
a8170e5e
AK
2169void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2170 int is_write, hwaddr access_len)
ac1970fb
AK
2171{
2172 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2173}
2174
8df1cd07 2175/* warning: addr must be aligned */
a8170e5e 2176static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2177 enum device_endian endian)
8df1cd07 2178{
8df1cd07
FB
2179 uint8_t *ptr;
2180 uint32_t val;
f3705d53 2181 MemoryRegionSection *section;
8df1cd07 2182
ac1970fb 2183 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2184
cc5bea60
BS
2185 if (!(memory_region_is_ram(section->mr) ||
2186 memory_region_is_romd(section->mr))) {
8df1cd07 2187 /* I/O case */
cc5bea60 2188 addr = memory_region_section_addr(section, addr);
37ec01d4 2189 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
2190#if defined(TARGET_WORDS_BIGENDIAN)
2191 if (endian == DEVICE_LITTLE_ENDIAN) {
2192 val = bswap32(val);
2193 }
2194#else
2195 if (endian == DEVICE_BIG_ENDIAN) {
2196 val = bswap32(val);
2197 }
2198#endif
8df1cd07
FB
2199 } else {
2200 /* RAM case */
f3705d53 2201 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2202 & TARGET_PAGE_MASK)
cc5bea60 2203 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2204 switch (endian) {
2205 case DEVICE_LITTLE_ENDIAN:
2206 val = ldl_le_p(ptr);
2207 break;
2208 case DEVICE_BIG_ENDIAN:
2209 val = ldl_be_p(ptr);
2210 break;
2211 default:
2212 val = ldl_p(ptr);
2213 break;
2214 }
8df1cd07
FB
2215 }
2216 return val;
2217}
2218
a8170e5e 2219uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2220{
2221 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2222}
2223
a8170e5e 2224uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2225{
2226 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2227}
2228
a8170e5e 2229uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2230{
2231 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2232}
2233
84b7b8e7 2234/* warning: addr must be aligned */
a8170e5e 2235static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2236 enum device_endian endian)
84b7b8e7 2237{
84b7b8e7
FB
2238 uint8_t *ptr;
2239 uint64_t val;
f3705d53 2240 MemoryRegionSection *section;
84b7b8e7 2241
ac1970fb 2242 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2243
cc5bea60
BS
2244 if (!(memory_region_is_ram(section->mr) ||
2245 memory_region_is_romd(section->mr))) {
84b7b8e7 2246 /* I/O case */
cc5bea60 2247 addr = memory_region_section_addr(section, addr);
1e78bcc1
AG
2248
2249 /* XXX This is broken when device endian != cpu endian.
2250 Fix and add "endian" variable check */
84b7b8e7 2251#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2252 val = io_mem_read(section->mr, addr, 4) << 32;
2253 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 2254#else
37ec01d4
AK
2255 val = io_mem_read(section->mr, addr, 4);
2256 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
2257#endif
2258 } else {
2259 /* RAM case */
f3705d53 2260 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2261 & TARGET_PAGE_MASK)
cc5bea60 2262 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2263 switch (endian) {
2264 case DEVICE_LITTLE_ENDIAN:
2265 val = ldq_le_p(ptr);
2266 break;
2267 case DEVICE_BIG_ENDIAN:
2268 val = ldq_be_p(ptr);
2269 break;
2270 default:
2271 val = ldq_p(ptr);
2272 break;
2273 }
84b7b8e7
FB
2274 }
2275 return val;
2276}
2277
a8170e5e 2278uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2279{
2280 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2281}
2282
a8170e5e 2283uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2284{
2285 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2286}
2287
a8170e5e 2288uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2289{
2290 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2291}
2292
aab33094 2293/* XXX: optimize */
a8170e5e 2294uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2295{
2296 uint8_t val;
2297 cpu_physical_memory_read(addr, &val, 1);
2298 return val;
2299}
2300
733f0b02 2301/* warning: addr must be aligned */
a8170e5e 2302static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2303 enum device_endian endian)
aab33094 2304{
733f0b02
MT
2305 uint8_t *ptr;
2306 uint64_t val;
f3705d53 2307 MemoryRegionSection *section;
733f0b02 2308
ac1970fb 2309 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2310
cc5bea60
BS
2311 if (!(memory_region_is_ram(section->mr) ||
2312 memory_region_is_romd(section->mr))) {
733f0b02 2313 /* I/O case */
cc5bea60 2314 addr = memory_region_section_addr(section, addr);
37ec01d4 2315 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
2316#if defined(TARGET_WORDS_BIGENDIAN)
2317 if (endian == DEVICE_LITTLE_ENDIAN) {
2318 val = bswap16(val);
2319 }
2320#else
2321 if (endian == DEVICE_BIG_ENDIAN) {
2322 val = bswap16(val);
2323 }
2324#endif
733f0b02
MT
2325 } else {
2326 /* RAM case */
f3705d53 2327 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2328 & TARGET_PAGE_MASK)
cc5bea60 2329 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2330 switch (endian) {
2331 case DEVICE_LITTLE_ENDIAN:
2332 val = lduw_le_p(ptr);
2333 break;
2334 case DEVICE_BIG_ENDIAN:
2335 val = lduw_be_p(ptr);
2336 break;
2337 default:
2338 val = lduw_p(ptr);
2339 break;
2340 }
733f0b02
MT
2341 }
2342 return val;
aab33094
FB
2343}
2344
a8170e5e 2345uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2346{
2347 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2348}
2349
a8170e5e 2350uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2351{
2352 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2353}
2354
a8170e5e 2355uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2356{
2357 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2358}
2359
8df1cd07
FB
2360/* warning: addr must be aligned. The ram page is not masked as dirty
2361 and the code inside is not invalidated. It is useful if the dirty
2362 bits are used to track modified PTEs */
a8170e5e 2363void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2364{
8df1cd07 2365 uint8_t *ptr;
f3705d53 2366 MemoryRegionSection *section;
8df1cd07 2367
ac1970fb 2368 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2369
f3705d53 2370 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2371 addr = memory_region_section_addr(section, addr);
f3705d53 2372 if (memory_region_is_ram(section->mr)) {
37ec01d4 2373 section = &phys_sections[phys_section_rom];
06ef3525 2374 }
37ec01d4 2375 io_mem_write(section->mr, addr, val, 4);
8df1cd07 2376 } else {
f3705d53 2377 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 2378 & TARGET_PAGE_MASK)
cc5bea60 2379 + memory_region_section_addr(section, addr);
5579c7f3 2380 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2381 stl_p(ptr, val);
74576198
AL
2382
2383 if (unlikely(in_migration)) {
2384 if (!cpu_physical_memory_is_dirty(addr1)) {
2385 /* invalidate code */
2386 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2387 /* set dirty bit */
f7c11b53
YT
2388 cpu_physical_memory_set_dirty_flags(
2389 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2390 }
2391 }
8df1cd07
FB
2392 }
2393}
2394
a8170e5e 2395void stq_phys_notdirty(hwaddr addr, uint64_t val)
bc98a7ef 2396{
bc98a7ef 2397 uint8_t *ptr;
f3705d53 2398 MemoryRegionSection *section;
bc98a7ef 2399
ac1970fb 2400 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2401
f3705d53 2402 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2403 addr = memory_region_section_addr(section, addr);
f3705d53 2404 if (memory_region_is_ram(section->mr)) {
37ec01d4 2405 section = &phys_sections[phys_section_rom];
06ef3525 2406 }
bc98a7ef 2407#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2408 io_mem_write(section->mr, addr, val >> 32, 4);
2409 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 2410#else
37ec01d4
AK
2411 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2412 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
2413#endif
2414 } else {
f3705d53 2415 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2416 & TARGET_PAGE_MASK)
cc5bea60 2417 + memory_region_section_addr(section, addr));
bc98a7ef
JM
2418 stq_p(ptr, val);
2419 }
2420}
2421
8df1cd07 2422/* warning: addr must be aligned */
a8170e5e 2423static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2424 enum device_endian endian)
8df1cd07 2425{
8df1cd07 2426 uint8_t *ptr;
f3705d53 2427 MemoryRegionSection *section;
8df1cd07 2428
ac1970fb 2429 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2430
f3705d53 2431 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2432 addr = memory_region_section_addr(section, addr);
f3705d53 2433 if (memory_region_is_ram(section->mr)) {
37ec01d4 2434 section = &phys_sections[phys_section_rom];
06ef3525 2435 }
1e78bcc1
AG
2436#if defined(TARGET_WORDS_BIGENDIAN)
2437 if (endian == DEVICE_LITTLE_ENDIAN) {
2438 val = bswap32(val);
2439 }
2440#else
2441 if (endian == DEVICE_BIG_ENDIAN) {
2442 val = bswap32(val);
2443 }
2444#endif
37ec01d4 2445 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
2446 } else {
2447 unsigned long addr1;
f3705d53 2448 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2449 + memory_region_section_addr(section, addr);
8df1cd07 2450 /* RAM case */
5579c7f3 2451 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2452 switch (endian) {
2453 case DEVICE_LITTLE_ENDIAN:
2454 stl_le_p(ptr, val);
2455 break;
2456 case DEVICE_BIG_ENDIAN:
2457 stl_be_p(ptr, val);
2458 break;
2459 default:
2460 stl_p(ptr, val);
2461 break;
2462 }
51d7a9eb 2463 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2464 }
2465}
2466
a8170e5e 2467void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2468{
2469 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2470}
2471
a8170e5e 2472void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2473{
2474 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2475}
2476
a8170e5e 2477void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2478{
2479 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2480}
2481
aab33094 2482/* XXX: optimize */
a8170e5e 2483void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2484{
2485 uint8_t v = val;
2486 cpu_physical_memory_write(addr, &v, 1);
2487}
2488
733f0b02 2489/* warning: addr must be aligned */
a8170e5e 2490static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2491 enum device_endian endian)
aab33094 2492{
733f0b02 2493 uint8_t *ptr;
f3705d53 2494 MemoryRegionSection *section;
733f0b02 2495
ac1970fb 2496 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2497
f3705d53 2498 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2499 addr = memory_region_section_addr(section, addr);
f3705d53 2500 if (memory_region_is_ram(section->mr)) {
37ec01d4 2501 section = &phys_sections[phys_section_rom];
06ef3525 2502 }
1e78bcc1
AG
2503#if defined(TARGET_WORDS_BIGENDIAN)
2504 if (endian == DEVICE_LITTLE_ENDIAN) {
2505 val = bswap16(val);
2506 }
2507#else
2508 if (endian == DEVICE_BIG_ENDIAN) {
2509 val = bswap16(val);
2510 }
2511#endif
37ec01d4 2512 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
2513 } else {
2514 unsigned long addr1;
f3705d53 2515 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2516 + memory_region_section_addr(section, addr);
733f0b02
MT
2517 /* RAM case */
2518 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2519 switch (endian) {
2520 case DEVICE_LITTLE_ENDIAN:
2521 stw_le_p(ptr, val);
2522 break;
2523 case DEVICE_BIG_ENDIAN:
2524 stw_be_p(ptr, val);
2525 break;
2526 default:
2527 stw_p(ptr, val);
2528 break;
2529 }
51d7a9eb 2530 invalidate_and_set_dirty(addr1, 2);
733f0b02 2531 }
aab33094
FB
2532}
2533
a8170e5e 2534void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2535{
2536 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2537}
2538
a8170e5e 2539void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2540{
2541 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2542}
2543
a8170e5e 2544void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2545{
2546 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2547}
2548
aab33094 2549/* XXX: optimize */
a8170e5e 2550void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2551{
2552 val = tswap64(val);
71d2b725 2553 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2554}
2555
a8170e5e 2556void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2557{
2558 val = cpu_to_le64(val);
2559 cpu_physical_memory_write(addr, &val, 8);
2560}
2561
a8170e5e 2562void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2563{
2564 val = cpu_to_be64(val);
2565 cpu_physical_memory_write(addr, &val, 8);
2566}
2567
5e2972fd 2568/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2569int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2570 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2571{
2572 int l;
a8170e5e 2573 hwaddr phys_addr;
9b3c35e0 2574 target_ulong page;
13eb76e0
FB
2575
2576 while (len > 0) {
2577 page = addr & TARGET_PAGE_MASK;
2578 phys_addr = cpu_get_phys_page_debug(env, page);
2579 /* if no physical page mapped, return an error */
2580 if (phys_addr == -1)
2581 return -1;
2582 l = (page + TARGET_PAGE_SIZE) - addr;
2583 if (l > len)
2584 l = len;
5e2972fd 2585 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2586 if (is_write)
2587 cpu_physical_memory_write_rom(phys_addr, buf, l);
2588 else
5e2972fd 2589 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2590 len -= l;
2591 buf += l;
2592 addr += l;
2593 }
2594 return 0;
2595}
a68fe89c 2596#endif
13eb76e0 2597
8e4a424b
BS
2598#if !defined(CONFIG_USER_ONLY)
2599
2600/*
2601 * A helper function for the _utterly broken_ virtio device model to find out if
2602 * it's running on a big endian machine. Don't do this at home kids!
2603 */
2604bool virtio_is_big_endian(void);
2605bool virtio_is_big_endian(void)
2606{
2607#if defined(TARGET_WORDS_BIGENDIAN)
2608 return true;
2609#else
2610 return false;
2611#endif
2612}
2613
2614#endif
2615
76f35538 2616#ifndef CONFIG_USER_ONLY
a8170e5e 2617bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538
WC
2618{
2619 MemoryRegionSection *section;
2620
ac1970fb
AK
2621 section = phys_page_find(address_space_memory.dispatch,
2622 phys_addr >> TARGET_PAGE_BITS);
76f35538
WC
2623
2624 return !(memory_region_is_ram(section->mr) ||
2625 memory_region_is_romd(section->mr));
2626}
2627#endif