]> git.proxmox.com Git - qemu.git/blame - exec.c
Merge remote-tracking branch 'riku/linux-user-for-upstream' into staging
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
432d268c 34#include "hw/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
67d3b957 53//#define DEBUG_UNASSIGNED
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
e2eef170 56#if !defined(CONFIG_USER_ONLY)
9fa3e853 57int phys_ram_fd;
74576198 58static int in_migration;
94a6b54f 59
a3161038 60RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
61
62static MemoryRegion *system_memory;
309cb471 63static MemoryRegion *system_io;
62152b8a 64
f6790af6
AK
65AddressSpace address_space_io;
66AddressSpace address_space_memory;
9e11908f 67DMAContext dma_context_memory;
2673a5da 68
0e0df1e2 69MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 70static MemoryRegion io_mem_subpage_ram;
0e0df1e2 71
e2eef170 72#endif
9fa3e853 73
9349b4f9 74CPUArchState *first_cpu;
6a00d601
FB
75/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
9349b4f9 77DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 78/* 0 = Do not count executed instructions.
bf20dc07 79 1 = Precise instruction counting.
2e70f6ef 80 2 = Adaptive rate instruction counting. */
5708fc66 81int use_icount;
6a00d601 82
e2eef170 83#if !defined(CONFIG_USER_ONLY)
4346ae3e 84
5312bd8b
AK
85static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
aa102231
AK
88static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
5312bd8b 91
d6f2ea22
AK
92/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
07f07b31 96#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 97
e2eef170 98static void io_mem_init(void);
62152b8a 99static void memory_map_init(void);
8b9c99d9 100static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 101
1ec9b909 102static MemoryRegion io_mem_watch;
6658ffb8 103#endif
fd6ce8f6 104
6d9a1304 105#if !defined(CONFIG_USER_ONLY)
d6f2ea22 106
f7bf5461 107static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 108{
f7bf5461 109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
d6f2ea22
AK
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
f7bf5461
AK
117}
118
119static uint16_t phys_map_node_alloc(void)
120{
121 unsigned i;
122 uint16_t ret;
123
124 ret = phys_map_nodes_nb++;
125 assert(ret != PHYS_MAP_NODE_NIL);
126 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 127 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 128 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 130 }
f7bf5461 131 return ret;
d6f2ea22
AK
132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
92e873b9 139
a8170e5e
AK
140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
2999097b 142 int level)
f7bf5461
AK
143{
144 PhysPageEntry *p;
145 int i;
a8170e5e 146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 147
07f07b31 148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
07f07b31 153 p[i].is_leaf = 1;
c19e8800 154 p[i].ptr = phys_section_unassigned;
4346ae3e 155 }
67c4d23c 156 }
f7bf5461 157 } else {
c19e8800 158 p = phys_map_nodes[lp->ptr];
92e873b9 159 }
2999097b 160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 161
2999097b 162 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
c19e8800 165 lp->ptr = leaf;
07f07b31
AK
166 *index += step;
167 *nb -= step;
2999097b
AK
168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
f7bf5461
AK
172 }
173}
174
ac1970fb 175static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 176 hwaddr index, hwaddr nb,
2999097b 177 uint16_t leaf)
f7bf5461 178{
2999097b 179 /* Wildly overreserve - it doesn't matter much. */
07f07b31 180 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 181
ac1970fb 182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
183}
184
a8170e5e 185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
92e873b9 186{
ac1970fb 187 PhysPageEntry lp = d->phys_map;
31ab2b4a
AK
188 PhysPageEntry *p;
189 int i;
31ab2b4a 190 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 191
07f07b31 192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
194 goto not_found;
195 }
c19e8800 196 p = phys_map_nodes[lp.ptr];
31ab2b4a 197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 198 }
31ab2b4a 199
c19e8800 200 s_index = lp.ptr;
31ab2b4a 201not_found:
f3705d53
AK
202 return &phys_sections[s_index];
203}
204
e5548617
BS
205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 209 && mr != &io_mem_watch;
fd6ce8f6 210}
5b6dd868 211#endif
fd6ce8f6 212
5b6dd868 213void cpu_exec_init_all(void)
fdbb84d1 214{
5b6dd868 215#if !defined(CONFIG_USER_ONLY)
b2a8658e 216 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
217 memory_map_init();
218 io_mem_init();
fdbb84d1 219#endif
5b6dd868 220}
fdbb84d1 221
5b6dd868
BS
222#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
223
224static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 225{
5b6dd868 226 CPUArchState *env = opaque;
a513fe19 227
5b6dd868
BS
228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
230 env->interrupt_request &= ~0x01;
231 tlb_flush(env, 1);
232
233 return 0;
a513fe19 234}
7501267e 235
5b6dd868
BS
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
243 VMSTATE_UINT32(halted, CPUArchState),
244 VMSTATE_UINT32(interrupt_request, CPUArchState),
245 VMSTATE_END_OF_LIST()
246 }
247};
248#endif
ea041c0e 249
38d8f5c8 250CPUState *qemu_get_cpu(int index)
ea041c0e 251{
5b6dd868 252 CPUArchState *env = first_cpu;
38d8f5c8 253 CPUState *cpu = NULL;
ea041c0e 254
5b6dd868 255 while (env) {
55e5c285
AF
256 cpu = ENV_GET_CPU(env);
257 if (cpu->cpu_index == index) {
5b6dd868 258 break;
55e5c285 259 }
5b6dd868 260 env = env->next_cpu;
ea041c0e 261 }
5b6dd868 262
38d8f5c8 263 return cpu;
ea041c0e
FB
264}
265
5b6dd868 266void cpu_exec_init(CPUArchState *env)
ea041c0e 267{
5b6dd868 268 CPUState *cpu = ENV_GET_CPU(env);
5b6dd868
BS
269 CPUArchState **penv;
270 int cpu_index;
271
272#if defined(CONFIG_USER_ONLY)
273 cpu_list_lock();
274#endif
275 env->next_cpu = NULL;
276 penv = &first_cpu;
277 cpu_index = 0;
278 while (*penv != NULL) {
279 penv = &(*penv)->next_cpu;
280 cpu_index++;
281 }
55e5c285 282 cpu->cpu_index = cpu_index;
1b1ed8dc 283 cpu->numa_node = 0;
5b6dd868
BS
284 QTAILQ_INIT(&env->breakpoints);
285 QTAILQ_INIT(&env->watchpoints);
286#ifndef CONFIG_USER_ONLY
287 cpu->thread_id = qemu_get_thread_id();
288#endif
289 *penv = env;
290#if defined(CONFIG_USER_ONLY)
291 cpu_list_unlock();
292#endif
293#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
294 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
295 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
296 cpu_save, cpu_load, env);
297#endif
ea041c0e
FB
298}
299
1fddef4b 300#if defined(TARGET_HAS_ICE)
94df27fd 301#if defined(CONFIG_USER_ONLY)
9349b4f9 302static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
303{
304 tb_invalidate_phys_page_range(pc, pc + 1, 0);
305}
306#else
1e7855a5
MF
307static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
308{
9d70c4b7
MF
309 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
310 (pc & ~TARGET_PAGE_MASK));
1e7855a5 311}
c27004ec 312#endif
94df27fd 313#endif /* TARGET_HAS_ICE */
d720b93d 314
c527ee8f 315#if defined(CONFIG_USER_ONLY)
9349b4f9 316void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
317
318{
319}
320
9349b4f9 321int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
322 int flags, CPUWatchpoint **watchpoint)
323{
324 return -ENOSYS;
325}
326#else
6658ffb8 327/* Add a watchpoint. */
9349b4f9 328int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 329 int flags, CPUWatchpoint **watchpoint)
6658ffb8 330{
b4051334 331 target_ulong len_mask = ~(len - 1);
c0ce998e 332 CPUWatchpoint *wp;
6658ffb8 333
b4051334 334 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
335 if ((len & (len - 1)) || (addr & ~len_mask) ||
336 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
337 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
338 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
339 return -EINVAL;
340 }
7267c094 341 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
342
343 wp->vaddr = addr;
b4051334 344 wp->len_mask = len_mask;
a1d1bb31
AL
345 wp->flags = flags;
346
2dc9f411 347 /* keep all GDB-injected watchpoints in front */
c0ce998e 348 if (flags & BP_GDB)
72cf2d4f 349 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 350 else
72cf2d4f 351 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 352
6658ffb8 353 tlb_flush_page(env, addr);
a1d1bb31
AL
354
355 if (watchpoint)
356 *watchpoint = wp;
357 return 0;
6658ffb8
PB
358}
359
a1d1bb31 360/* Remove a specific watchpoint. */
9349b4f9 361int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 362 int flags)
6658ffb8 363{
b4051334 364 target_ulong len_mask = ~(len - 1);
a1d1bb31 365 CPUWatchpoint *wp;
6658ffb8 366
72cf2d4f 367 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 368 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 369 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 370 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
371 return 0;
372 }
373 }
a1d1bb31 374 return -ENOENT;
6658ffb8
PB
375}
376
a1d1bb31 377/* Remove a specific watchpoint by reference. */
9349b4f9 378void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 379{
72cf2d4f 380 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 381
a1d1bb31
AL
382 tlb_flush_page(env, watchpoint->vaddr);
383
7267c094 384 g_free(watchpoint);
a1d1bb31
AL
385}
386
387/* Remove all matching watchpoints. */
9349b4f9 388void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 389{
c0ce998e 390 CPUWatchpoint *wp, *next;
a1d1bb31 391
72cf2d4f 392 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
393 if (wp->flags & mask)
394 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 395 }
7d03f82f 396}
c527ee8f 397#endif
7d03f82f 398
a1d1bb31 399/* Add a breakpoint. */
9349b4f9 400int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 401 CPUBreakpoint **breakpoint)
4c3a88a2 402{
1fddef4b 403#if defined(TARGET_HAS_ICE)
c0ce998e 404 CPUBreakpoint *bp;
3b46e624 405
7267c094 406 bp = g_malloc(sizeof(*bp));
4c3a88a2 407
a1d1bb31
AL
408 bp->pc = pc;
409 bp->flags = flags;
410
2dc9f411 411 /* keep all GDB-injected breakpoints in front */
c0ce998e 412 if (flags & BP_GDB)
72cf2d4f 413 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 414 else
72cf2d4f 415 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 416
d720b93d 417 breakpoint_invalidate(env, pc);
a1d1bb31
AL
418
419 if (breakpoint)
420 *breakpoint = bp;
4c3a88a2
FB
421 return 0;
422#else
a1d1bb31 423 return -ENOSYS;
4c3a88a2
FB
424#endif
425}
426
a1d1bb31 427/* Remove a specific breakpoint. */
9349b4f9 428int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 429{
7d03f82f 430#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
431 CPUBreakpoint *bp;
432
72cf2d4f 433 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
434 if (bp->pc == pc && bp->flags == flags) {
435 cpu_breakpoint_remove_by_ref(env, bp);
436 return 0;
437 }
7d03f82f 438 }
a1d1bb31
AL
439 return -ENOENT;
440#else
441 return -ENOSYS;
7d03f82f
EI
442#endif
443}
444
a1d1bb31 445/* Remove a specific breakpoint by reference. */
9349b4f9 446void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 447{
1fddef4b 448#if defined(TARGET_HAS_ICE)
72cf2d4f 449 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 450
a1d1bb31
AL
451 breakpoint_invalidate(env, breakpoint->pc);
452
7267c094 453 g_free(breakpoint);
a1d1bb31
AL
454#endif
455}
456
457/* Remove all matching breakpoints. */
9349b4f9 458void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
459{
460#if defined(TARGET_HAS_ICE)
c0ce998e 461 CPUBreakpoint *bp, *next;
a1d1bb31 462
72cf2d4f 463 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
464 if (bp->flags & mask)
465 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 466 }
4c3a88a2
FB
467#endif
468}
469
c33a346e
FB
470/* enable or disable single step mode. EXCP_DEBUG is returned by the
471 CPU loop after each instruction */
9349b4f9 472void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 473{
1fddef4b 474#if defined(TARGET_HAS_ICE)
c33a346e
FB
475 if (env->singlestep_enabled != enabled) {
476 env->singlestep_enabled = enabled;
e22a25c9
AL
477 if (kvm_enabled())
478 kvm_update_guest_debug(env, 0);
479 else {
ccbb4d44 480 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
481 /* XXX: only flush what is necessary */
482 tb_flush(env);
483 }
c33a346e
FB
484 }
485#endif
486}
487
9349b4f9 488void cpu_reset_interrupt(CPUArchState *env, int mask)
b54ad049
FB
489{
490 env->interrupt_request &= ~mask;
491}
492
9349b4f9 493void cpu_exit(CPUArchState *env)
3098dba0 494{
fcd7d003
AF
495 CPUState *cpu = ENV_GET_CPU(env);
496
497 cpu->exit_request = 1;
378df4b2 498 cpu->tcg_exit_req = 1;
3098dba0
AJ
499}
500
9349b4f9 501void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
502{
503 va_list ap;
493ae1f0 504 va_list ap2;
7501267e
FB
505
506 va_start(ap, fmt);
493ae1f0 507 va_copy(ap2, ap);
7501267e
FB
508 fprintf(stderr, "qemu: fatal: ");
509 vfprintf(stderr, fmt, ap);
510 fprintf(stderr, "\n");
6fd2a026 511 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
512 if (qemu_log_enabled()) {
513 qemu_log("qemu: fatal: ");
514 qemu_log_vprintf(fmt, ap2);
515 qemu_log("\n");
6fd2a026 516 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 517 qemu_log_flush();
93fcfe39 518 qemu_log_close();
924edcae 519 }
493ae1f0 520 va_end(ap2);
f9373291 521 va_end(ap);
fd052bf6
RV
522#if defined(CONFIG_USER_ONLY)
523 {
524 struct sigaction act;
525 sigfillset(&act.sa_mask);
526 act.sa_handler = SIG_DFL;
527 sigaction(SIGABRT, &act, NULL);
528 }
529#endif
7501267e
FB
530 abort();
531}
532
9349b4f9 533CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 534{
9349b4f9
AF
535 CPUArchState *new_env = cpu_init(env->cpu_model_str);
536 CPUArchState *next_cpu = new_env->next_cpu;
5a38f081
AL
537#if defined(TARGET_HAS_ICE)
538 CPUBreakpoint *bp;
539 CPUWatchpoint *wp;
540#endif
541
9349b4f9 542 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 543
55e5c285 544 /* Preserve chaining. */
c5be9f08 545 new_env->next_cpu = next_cpu;
5a38f081
AL
546
547 /* Clone all break/watchpoints.
548 Note: Once we support ptrace with hw-debug register access, make sure
549 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
550 QTAILQ_INIT(&env->breakpoints);
551 QTAILQ_INIT(&env->watchpoints);
5a38f081 552#if defined(TARGET_HAS_ICE)
72cf2d4f 553 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
554 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
555 }
72cf2d4f 556 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
557 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
558 wp->flags, NULL);
559 }
560#endif
561
c5be9f08
TS
562 return new_env;
563}
564
0124311e 565#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
566static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
567 uintptr_t length)
568{
569 uintptr_t start1;
570
571 /* we modify the TLB cache so that the dirty bit will be set again
572 when accessing the range */
573 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
574 /* Check that we don't span multiple blocks - this breaks the
575 address comparisons below. */
576 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
577 != (end - 1) - start) {
578 abort();
579 }
580 cpu_tlb_reset_dirty_all(start1, length);
581
582}
583
5579c7f3 584/* Note: start and end must be within the same ram block. */
c227f099 585void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 586 int dirty_flags)
1ccde1cb 587{
d24981d3 588 uintptr_t length;
1ccde1cb
FB
589
590 start &= TARGET_PAGE_MASK;
591 end = TARGET_PAGE_ALIGN(end);
592
593 length = end - start;
594 if (length == 0)
595 return;
f7c11b53 596 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 597
d24981d3
JQ
598 if (tcg_enabled()) {
599 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 600 }
1ccde1cb
FB
601}
602
8b9c99d9 603static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 604{
f6f3fbca 605 int ret = 0;
74576198 606 in_migration = enable;
f6f3fbca 607 return ret;
74576198
AL
608}
609
a8170e5e 610hwaddr memory_region_section_get_iotlb(CPUArchState *env,
e5548617
BS
611 MemoryRegionSection *section,
612 target_ulong vaddr,
a8170e5e 613 hwaddr paddr,
e5548617
BS
614 int prot,
615 target_ulong *address)
616{
a8170e5e 617 hwaddr iotlb;
e5548617
BS
618 CPUWatchpoint *wp;
619
cc5bea60 620 if (memory_region_is_ram(section->mr)) {
e5548617
BS
621 /* Normal RAM. */
622 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 623 + memory_region_section_addr(section, paddr);
e5548617
BS
624 if (!section->readonly) {
625 iotlb |= phys_section_notdirty;
626 } else {
627 iotlb |= phys_section_rom;
628 }
629 } else {
630 /* IO handlers are currently passed a physical address.
631 It would be nice to pass an offset from the base address
632 of that region. This would avoid having to special case RAM,
633 and avoid full address decoding in every device.
634 We can't use the high bits of pd for this because
635 IO_MEM_ROMD uses these as a ram address. */
636 iotlb = section - phys_sections;
cc5bea60 637 iotlb += memory_region_section_addr(section, paddr);
e5548617
BS
638 }
639
640 /* Make accesses to pages with watchpoints go via the
641 watchpoint trap routines. */
642 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
643 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
644 /* Avoid trapping reads of pages with a write breakpoint. */
645 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
646 iotlb = phys_section_watch + paddr;
647 *address |= TLB_MMIO;
648 break;
649 }
650 }
651 }
652
653 return iotlb;
654}
9fa3e853
FB
655#endif /* defined(CONFIG_USER_ONLY) */
656
e2eef170 657#if !defined(CONFIG_USER_ONLY)
8da3ff18 658
c04b2b78
PB
659#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
660typedef struct subpage_t {
70c68e44 661 MemoryRegion iomem;
a8170e5e 662 hwaddr base;
5312bd8b 663 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
664} subpage_t;
665
c227f099 666static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 667 uint16_t section);
a8170e5e 668static subpage_t *subpage_init(hwaddr base);
5312bd8b 669static void destroy_page_desc(uint16_t section_index)
54688b1e 670{
5312bd8b
AK
671 MemoryRegionSection *section = &phys_sections[section_index];
672 MemoryRegion *mr = section->mr;
54688b1e
AK
673
674 if (mr->subpage) {
675 subpage_t *subpage = container_of(mr, subpage_t, iomem);
676 memory_region_destroy(&subpage->iomem);
677 g_free(subpage);
678 }
679}
680
4346ae3e 681static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
682{
683 unsigned i;
d6f2ea22 684 PhysPageEntry *p;
54688b1e 685
c19e8800 686 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
687 return;
688 }
689
c19e8800 690 p = phys_map_nodes[lp->ptr];
4346ae3e 691 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 692 if (!p[i].is_leaf) {
54688b1e 693 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 694 } else {
c19e8800 695 destroy_page_desc(p[i].ptr);
54688b1e 696 }
54688b1e 697 }
07f07b31 698 lp->is_leaf = 0;
c19e8800 699 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
700}
701
ac1970fb 702static void destroy_all_mappings(AddressSpaceDispatch *d)
54688b1e 703{
ac1970fb 704 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
d6f2ea22 705 phys_map_nodes_reset();
54688b1e
AK
706}
707
5312bd8b
AK
708static uint16_t phys_section_add(MemoryRegionSection *section)
709{
710 if (phys_sections_nb == phys_sections_nb_alloc) {
711 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
712 phys_sections = g_renew(MemoryRegionSection, phys_sections,
713 phys_sections_nb_alloc);
714 }
715 phys_sections[phys_sections_nb] = *section;
716 return phys_sections_nb++;
717}
718
719static void phys_sections_clear(void)
720{
721 phys_sections_nb = 0;
722}
723
ac1970fb 724static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
725{
726 subpage_t *subpage;
a8170e5e 727 hwaddr base = section->offset_within_address_space
0f0cb164 728 & TARGET_PAGE_MASK;
ac1970fb 729 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
0f0cb164
AK
730 MemoryRegionSection subsection = {
731 .offset_within_address_space = base,
732 .size = TARGET_PAGE_SIZE,
733 };
a8170e5e 734 hwaddr start, end;
0f0cb164 735
f3705d53 736 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 737
f3705d53 738 if (!(existing->mr->subpage)) {
0f0cb164
AK
739 subpage = subpage_init(base);
740 subsection.mr = &subpage->iomem;
ac1970fb 741 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 742 phys_section_add(&subsection));
0f0cb164 743 } else {
f3705d53 744 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
745 }
746 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
adb2a9b5 747 end = start + section->size - 1;
0f0cb164
AK
748 subpage_register(subpage, start, end, phys_section_add(section));
749}
750
751
ac1970fb 752static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
33417e70 753{
a8170e5e 754 hwaddr start_addr = section->offset_within_address_space;
dd81124b 755 ram_addr_t size = section->size;
a8170e5e 756 hwaddr addr;
5312bd8b 757 uint16_t section_index = phys_section_add(section);
dd81124b 758
3b8e6a2d 759 assert(size);
f6f3fbca 760
3b8e6a2d 761 addr = start_addr;
ac1970fb 762 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2999097b 763 section_index);
33417e70
FB
764}
765
ac1970fb 766static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 767{
ac1970fb 768 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
0f0cb164
AK
769 MemoryRegionSection now = *section, remain = *section;
770
771 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
772 || (now.size < TARGET_PAGE_SIZE)) {
773 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
774 - now.offset_within_address_space,
775 now.size);
ac1970fb 776 register_subpage(d, &now);
0f0cb164
AK
777 remain.size -= now.size;
778 remain.offset_within_address_space += now.size;
779 remain.offset_within_region += now.size;
780 }
69b67646
TH
781 while (remain.size >= TARGET_PAGE_SIZE) {
782 now = remain;
783 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
784 now.size = TARGET_PAGE_SIZE;
ac1970fb 785 register_subpage(d, &now);
69b67646
TH
786 } else {
787 now.size &= TARGET_PAGE_MASK;
ac1970fb 788 register_multipage(d, &now);
69b67646 789 }
0f0cb164
AK
790 remain.size -= now.size;
791 remain.offset_within_address_space += now.size;
792 remain.offset_within_region += now.size;
793 }
794 now = remain;
795 if (now.size) {
ac1970fb 796 register_subpage(d, &now);
0f0cb164
AK
797 }
798}
799
62a2744c
SY
800void qemu_flush_coalesced_mmio_buffer(void)
801{
802 if (kvm_enabled())
803 kvm_flush_coalesced_mmio_buffer();
804}
805
b2a8658e
UD
806void qemu_mutex_lock_ramlist(void)
807{
808 qemu_mutex_lock(&ram_list.mutex);
809}
810
811void qemu_mutex_unlock_ramlist(void)
812{
813 qemu_mutex_unlock(&ram_list.mutex);
814}
815
c902760f
MT
816#if defined(__linux__) && !defined(TARGET_S390X)
817
818#include <sys/vfs.h>
819
820#define HUGETLBFS_MAGIC 0x958458f6
821
822static long gethugepagesize(const char *path)
823{
824 struct statfs fs;
825 int ret;
826
827 do {
9742bf26 828 ret = statfs(path, &fs);
c902760f
MT
829 } while (ret != 0 && errno == EINTR);
830
831 if (ret != 0) {
9742bf26
YT
832 perror(path);
833 return 0;
c902760f
MT
834 }
835
836 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 837 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
838
839 return fs.f_bsize;
840}
841
04b16653
AW
842static void *file_ram_alloc(RAMBlock *block,
843 ram_addr_t memory,
844 const char *path)
c902760f
MT
845{
846 char *filename;
8ca761f6
PF
847 char *sanitized_name;
848 char *c;
c902760f
MT
849 void *area;
850 int fd;
851#ifdef MAP_POPULATE
852 int flags;
853#endif
854 unsigned long hpagesize;
855
856 hpagesize = gethugepagesize(path);
857 if (!hpagesize) {
9742bf26 858 return NULL;
c902760f
MT
859 }
860
861 if (memory < hpagesize) {
862 return NULL;
863 }
864
865 if (kvm_enabled() && !kvm_has_sync_mmu()) {
866 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
867 return NULL;
868 }
869
8ca761f6
PF
870 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
871 sanitized_name = g_strdup(block->mr->name);
872 for (c = sanitized_name; *c != '\0'; c++) {
873 if (*c == '/')
874 *c = '_';
875 }
876
877 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
878 sanitized_name);
879 g_free(sanitized_name);
c902760f
MT
880
881 fd = mkstemp(filename);
882 if (fd < 0) {
9742bf26 883 perror("unable to create backing store for hugepages");
e4ada482 884 g_free(filename);
9742bf26 885 return NULL;
c902760f
MT
886 }
887 unlink(filename);
e4ada482 888 g_free(filename);
c902760f
MT
889
890 memory = (memory+hpagesize-1) & ~(hpagesize-1);
891
892 /*
893 * ftruncate is not supported by hugetlbfs in older
894 * hosts, so don't bother bailing out on errors.
895 * If anything goes wrong with it under other filesystems,
896 * mmap will fail.
897 */
898 if (ftruncate(fd, memory))
9742bf26 899 perror("ftruncate");
c902760f
MT
900
901#ifdef MAP_POPULATE
902 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
903 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
904 * to sidestep this quirk.
905 */
906 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
907 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
908#else
909 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
910#endif
911 if (area == MAP_FAILED) {
9742bf26
YT
912 perror("file_ram_alloc: can't mmap RAM pages");
913 close(fd);
914 return (NULL);
c902760f 915 }
04b16653 916 block->fd = fd;
c902760f
MT
917 return area;
918}
919#endif
920
d17b5288 921static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
922{
923 RAMBlock *block, *next_block;
3e837b2c 924 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 925
a3161038 926 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
927 return 0;
928
a3161038 929 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 930 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
931
932 end = block->offset + block->length;
933
a3161038 934 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
935 if (next_block->offset >= end) {
936 next = MIN(next, next_block->offset);
937 }
938 }
939 if (next - end >= size && next - end < mingap) {
3e837b2c 940 offset = end;
04b16653
AW
941 mingap = next - end;
942 }
943 }
3e837b2c
AW
944
945 if (offset == RAM_ADDR_MAX) {
946 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
947 (uint64_t)size);
948 abort();
949 }
950
04b16653
AW
951 return offset;
952}
953
652d7ec2 954ram_addr_t last_ram_offset(void)
d17b5288
AW
955{
956 RAMBlock *block;
957 ram_addr_t last = 0;
958
a3161038 959 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
960 last = MAX(last, block->offset + block->length);
961
962 return last;
963}
964
ddb97f1d
JB
965static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
966{
967 int ret;
968 QemuOpts *machine_opts;
969
970 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
971 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
972 if (machine_opts &&
973 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
974 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
975 if (ret) {
976 perror("qemu_madvise");
977 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
978 "but dump_guest_core=off specified\n");
979 }
980 }
981}
982
c5705a77 983void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
984{
985 RAMBlock *new_block, *block;
986
c5705a77 987 new_block = NULL;
a3161038 988 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
989 if (block->offset == addr) {
990 new_block = block;
991 break;
992 }
993 }
994 assert(new_block);
995 assert(!new_block->idstr[0]);
84b89d78 996
09e5ab63
AL
997 if (dev) {
998 char *id = qdev_get_dev_path(dev);
84b89d78
CM
999 if (id) {
1000 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1001 g_free(id);
84b89d78
CM
1002 }
1003 }
1004 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1005
b2a8658e
UD
1006 /* This assumes the iothread lock is taken here too. */
1007 qemu_mutex_lock_ramlist();
a3161038 1008 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1009 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1010 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1011 new_block->idstr);
1012 abort();
1013 }
1014 }
b2a8658e 1015 qemu_mutex_unlock_ramlist();
c5705a77
AK
1016}
1017
8490fc78
LC
1018static int memory_try_enable_merging(void *addr, size_t len)
1019{
1020 QemuOpts *opts;
1021
1022 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1023 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1024 /* disabled by the user */
1025 return 0;
1026 }
1027
1028 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1029}
1030
c5705a77
AK
1031ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1032 MemoryRegion *mr)
1033{
abb26d63 1034 RAMBlock *block, *new_block;
c5705a77
AK
1035
1036 size = TARGET_PAGE_ALIGN(size);
1037 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1038
b2a8658e
UD
1039 /* This assumes the iothread lock is taken here too. */
1040 qemu_mutex_lock_ramlist();
7c637366 1041 new_block->mr = mr;
432d268c 1042 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1043 if (host) {
1044 new_block->host = host;
cd19cfa2 1045 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1046 } else {
1047 if (mem_path) {
c902760f 1048#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1049 new_block->host = file_ram_alloc(new_block, size, mem_path);
1050 if (!new_block->host) {
1051 new_block->host = qemu_vmalloc(size);
8490fc78 1052 memory_try_enable_merging(new_block->host, size);
6977dfe6 1053 }
c902760f 1054#else
6977dfe6
YT
1055 fprintf(stderr, "-mem-path option unsupported\n");
1056 exit(1);
c902760f 1057#endif
6977dfe6 1058 } else {
868bb33f 1059 if (xen_enabled()) {
fce537d4 1060 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1061 } else if (kvm_enabled()) {
1062 /* some s390/kvm configurations have special constraints */
1063 new_block->host = kvm_vmalloc(size);
432d268c
JN
1064 } else {
1065 new_block->host = qemu_vmalloc(size);
1066 }
8490fc78 1067 memory_try_enable_merging(new_block->host, size);
6977dfe6 1068 }
c902760f 1069 }
94a6b54f
PB
1070 new_block->length = size;
1071
abb26d63
PB
1072 /* Keep the list sorted from biggest to smallest block. */
1073 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1074 if (block->length < new_block->length) {
1075 break;
1076 }
1077 }
1078 if (block) {
1079 QTAILQ_INSERT_BEFORE(block, new_block, next);
1080 } else {
1081 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1082 }
0d6d3c87 1083 ram_list.mru_block = NULL;
94a6b54f 1084
f798b07f 1085 ram_list.version++;
b2a8658e 1086 qemu_mutex_unlock_ramlist();
f798b07f 1087
7267c094 1088 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1089 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1090 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1091 0, size >> TARGET_PAGE_BITS);
1720aeee 1092 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1093
ddb97f1d 1094 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1095 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1096
6f0437e8
JK
1097 if (kvm_enabled())
1098 kvm_setup_guest_memory(new_block->host, size);
1099
94a6b54f
PB
1100 return new_block->offset;
1101}
e9a1ab19 1102
c5705a77 1103ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1104{
c5705a77 1105 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1106}
1107
1f2e98b6
AW
1108void qemu_ram_free_from_ptr(ram_addr_t addr)
1109{
1110 RAMBlock *block;
1111
b2a8658e
UD
1112 /* This assumes the iothread lock is taken here too. */
1113 qemu_mutex_lock_ramlist();
a3161038 1114 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1115 if (addr == block->offset) {
a3161038 1116 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1117 ram_list.mru_block = NULL;
f798b07f 1118 ram_list.version++;
7267c094 1119 g_free(block);
b2a8658e 1120 break;
1f2e98b6
AW
1121 }
1122 }
b2a8658e 1123 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1124}
1125
c227f099 1126void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1127{
04b16653
AW
1128 RAMBlock *block;
1129
b2a8658e
UD
1130 /* This assumes the iothread lock is taken here too. */
1131 qemu_mutex_lock_ramlist();
a3161038 1132 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1133 if (addr == block->offset) {
a3161038 1134 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1135 ram_list.mru_block = NULL;
f798b07f 1136 ram_list.version++;
cd19cfa2
HY
1137 if (block->flags & RAM_PREALLOC_MASK) {
1138 ;
1139 } else if (mem_path) {
04b16653
AW
1140#if defined (__linux__) && !defined(TARGET_S390X)
1141 if (block->fd) {
1142 munmap(block->host, block->length);
1143 close(block->fd);
1144 } else {
1145 qemu_vfree(block->host);
1146 }
fd28aa13
JK
1147#else
1148 abort();
04b16653
AW
1149#endif
1150 } else {
1151#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1152 munmap(block->host, block->length);
1153#else
868bb33f 1154 if (xen_enabled()) {
e41d7c69 1155 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
1156 } else {
1157 qemu_vfree(block->host);
1158 }
04b16653
AW
1159#endif
1160 }
7267c094 1161 g_free(block);
b2a8658e 1162 break;
04b16653
AW
1163 }
1164 }
b2a8658e 1165 qemu_mutex_unlock_ramlist();
04b16653 1166
e9a1ab19
FB
1167}
1168
cd19cfa2
HY
1169#ifndef _WIN32
1170void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1171{
1172 RAMBlock *block;
1173 ram_addr_t offset;
1174 int flags;
1175 void *area, *vaddr;
1176
a3161038 1177 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1178 offset = addr - block->offset;
1179 if (offset < block->length) {
1180 vaddr = block->host + offset;
1181 if (block->flags & RAM_PREALLOC_MASK) {
1182 ;
1183 } else {
1184 flags = MAP_FIXED;
1185 munmap(vaddr, length);
1186 if (mem_path) {
1187#if defined(__linux__) && !defined(TARGET_S390X)
1188 if (block->fd) {
1189#ifdef MAP_POPULATE
1190 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1191 MAP_PRIVATE;
1192#else
1193 flags |= MAP_PRIVATE;
1194#endif
1195 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1196 flags, block->fd, offset);
1197 } else {
1198 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1199 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1200 flags, -1, 0);
1201 }
fd28aa13
JK
1202#else
1203 abort();
cd19cfa2
HY
1204#endif
1205 } else {
1206#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1207 flags |= MAP_SHARED | MAP_ANONYMOUS;
1208 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1209 flags, -1, 0);
1210#else
1211 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1212 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1213 flags, -1, 0);
1214#endif
1215 }
1216 if (area != vaddr) {
f15fbc4b
AP
1217 fprintf(stderr, "Could not remap addr: "
1218 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1219 length, addr);
1220 exit(1);
1221 }
8490fc78 1222 memory_try_enable_merging(vaddr, length);
ddb97f1d 1223 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1224 }
1225 return;
1226 }
1227 }
1228}
1229#endif /* !_WIN32 */
1230
dc828ca1 1231/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
1232 With the exception of the softmmu code in this file, this should
1233 only be used for local memory (e.g. video ram) that the device owns,
1234 and knows it isn't going to access beyond the end of the block.
1235
1236 It should not be used for general purpose DMA.
1237 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1238 */
c227f099 1239void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 1240{
94a6b54f
PB
1241 RAMBlock *block;
1242
b2a8658e 1243 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1244 block = ram_list.mru_block;
1245 if (block && addr - block->offset < block->length) {
1246 goto found;
1247 }
a3161038 1248 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1249 if (addr - block->offset < block->length) {
0d6d3c87 1250 goto found;
f471a17e 1251 }
94a6b54f 1252 }
f471a17e
AW
1253
1254 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1255 abort();
1256
0d6d3c87
PB
1257found:
1258 ram_list.mru_block = block;
1259 if (xen_enabled()) {
1260 /* We need to check if the requested address is in the RAM
1261 * because we don't want to map the entire memory in QEMU.
1262 * In that case just map until the end of the page.
1263 */
1264 if (block->offset == 0) {
1265 return xen_map_cache(addr, 0, 0);
1266 } else if (block->host == NULL) {
1267 block->host =
1268 xen_map_cache(block->offset, block->length, 1);
1269 }
1270 }
1271 return block->host + (addr - block->offset);
dc828ca1
PB
1272}
1273
0d6d3c87
PB
1274/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1275 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1276 *
1277 * ??? Is this still necessary?
b2e0a138 1278 */
8b9c99d9 1279static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1280{
1281 RAMBlock *block;
1282
b2a8658e 1283 /* The list is protected by the iothread lock here. */
a3161038 1284 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1285 if (addr - block->offset < block->length) {
868bb33f 1286 if (xen_enabled()) {
432d268c
JN
1287 /* We need to check if the requested address is in the RAM
1288 * because we don't want to map the entire memory in QEMU.
712c2b41 1289 * In that case just map until the end of the page.
432d268c
JN
1290 */
1291 if (block->offset == 0) {
e41d7c69 1292 return xen_map_cache(addr, 0, 0);
432d268c 1293 } else if (block->host == NULL) {
e41d7c69
JK
1294 block->host =
1295 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1296 }
1297 }
b2e0a138
MT
1298 return block->host + (addr - block->offset);
1299 }
1300 }
1301
1302 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1303 abort();
1304
1305 return NULL;
1306}
1307
38bee5dc
SS
1308/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1309 * but takes a size argument */
8b9c99d9 1310static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1311{
8ab934f9
SS
1312 if (*size == 0) {
1313 return NULL;
1314 }
868bb33f 1315 if (xen_enabled()) {
e41d7c69 1316 return xen_map_cache(addr, *size, 1);
868bb33f 1317 } else {
38bee5dc
SS
1318 RAMBlock *block;
1319
a3161038 1320 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1321 if (addr - block->offset < block->length) {
1322 if (addr - block->offset + *size > block->length)
1323 *size = block->length - addr + block->offset;
1324 return block->host + (addr - block->offset);
1325 }
1326 }
1327
1328 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1329 abort();
38bee5dc
SS
1330 }
1331}
1332
050a0ddf
AP
1333void qemu_put_ram_ptr(void *addr)
1334{
1335 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
1336}
1337
e890261f 1338int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1339{
94a6b54f
PB
1340 RAMBlock *block;
1341 uint8_t *host = ptr;
1342
868bb33f 1343 if (xen_enabled()) {
e41d7c69 1344 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
1345 return 0;
1346 }
1347
a3161038 1348 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1349 /* This case append when the block is not mapped. */
1350 if (block->host == NULL) {
1351 continue;
1352 }
f471a17e 1353 if (host - block->host < block->length) {
e890261f
MT
1354 *ram_addr = block->offset + (host - block->host);
1355 return 0;
f471a17e 1356 }
94a6b54f 1357 }
432d268c 1358
e890261f
MT
1359 return -1;
1360}
f471a17e 1361
e890261f
MT
1362/* Some of the softmmu routines need to translate from a host pointer
1363 (typically a TLB entry) back to a ram offset. */
1364ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1365{
1366 ram_addr_t ram_addr;
f471a17e 1367
e890261f
MT
1368 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1369 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1370 abort();
1371 }
1372 return ram_addr;
5579c7f3
PB
1373}
1374
a8170e5e 1375static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1376 unsigned size)
e18231a3
BS
1377{
1378#ifdef DEBUG_UNASSIGNED
1379 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1380#endif
5b450407 1381#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1382 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
1383#endif
1384 return 0;
1385}
1386
a8170e5e 1387static void unassigned_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1388 uint64_t val, unsigned size)
e18231a3
BS
1389{
1390#ifdef DEBUG_UNASSIGNED
0e0df1e2 1391 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 1392#endif
5b450407 1393#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1394 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 1395#endif
33417e70
FB
1396}
1397
0e0df1e2
AK
1398static const MemoryRegionOps unassigned_mem_ops = {
1399 .read = unassigned_mem_read,
1400 .write = unassigned_mem_write,
1401 .endianness = DEVICE_NATIVE_ENDIAN,
1402};
e18231a3 1403
a8170e5e 1404static uint64_t error_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1405 unsigned size)
e18231a3 1406{
0e0df1e2 1407 abort();
e18231a3
BS
1408}
1409
a8170e5e 1410static void error_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1411 uint64_t value, unsigned size)
e18231a3 1412{
0e0df1e2 1413 abort();
33417e70
FB
1414}
1415
0e0df1e2
AK
1416static const MemoryRegionOps error_mem_ops = {
1417 .read = error_mem_read,
1418 .write = error_mem_write,
1419 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1420};
1421
0e0df1e2
AK
1422static const MemoryRegionOps rom_mem_ops = {
1423 .read = error_mem_read,
1424 .write = unassigned_mem_write,
1425 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1426};
1427
a8170e5e 1428static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1429 uint64_t val, unsigned size)
9fa3e853 1430{
3a7d929e 1431 int dirty_flags;
f7c11b53 1432 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1433 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1434#if !defined(CONFIG_USER_ONLY)
0e0df1e2 1435 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1436 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 1437#endif
3a7d929e 1438 }
0e0df1e2
AK
1439 switch (size) {
1440 case 1:
1441 stb_p(qemu_get_ram_ptr(ram_addr), val);
1442 break;
1443 case 2:
1444 stw_p(qemu_get_ram_ptr(ram_addr), val);
1445 break;
1446 case 4:
1447 stl_p(qemu_get_ram_ptr(ram_addr), val);
1448 break;
1449 default:
1450 abort();
3a7d929e 1451 }
f23db169 1452 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1453 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1454 /* we remove the notdirty callback only if the code has been
1455 flushed */
1456 if (dirty_flags == 0xff)
2e70f6ef 1457 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1458}
1459
0e0df1e2
AK
1460static const MemoryRegionOps notdirty_mem_ops = {
1461 .read = error_mem_read,
1462 .write = notdirty_mem_write,
1463 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1464};
1465
0f459d16 1466/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1467static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1468{
9349b4f9 1469 CPUArchState *env = cpu_single_env;
06d55cc1 1470 target_ulong pc, cs_base;
0f459d16 1471 target_ulong vaddr;
a1d1bb31 1472 CPUWatchpoint *wp;
06d55cc1 1473 int cpu_flags;
0f459d16 1474
06d55cc1
AL
1475 if (env->watchpoint_hit) {
1476 /* We re-entered the check after replacing the TB. Now raise
1477 * the debug interrupt so that is will trigger after the
1478 * current instruction. */
1479 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1480 return;
1481 }
2e70f6ef 1482 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1483 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1484 if ((vaddr == (wp->vaddr & len_mask) ||
1485 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1486 wp->flags |= BP_WATCHPOINT_HIT;
1487 if (!env->watchpoint_hit) {
1488 env->watchpoint_hit = wp;
5a316526 1489 tb_check_watchpoint(env);
6e140f28
AL
1490 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1491 env->exception_index = EXCP_DEBUG;
488d6577 1492 cpu_loop_exit(env);
6e140f28
AL
1493 } else {
1494 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1495 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1496 cpu_resume_from_signal(env, NULL);
6e140f28 1497 }
06d55cc1 1498 }
6e140f28
AL
1499 } else {
1500 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1501 }
1502 }
1503}
1504
6658ffb8
PB
1505/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1506 so these check for a hit then pass through to the normal out-of-line
1507 phys routines. */
a8170e5e 1508static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1509 unsigned size)
6658ffb8 1510{
1ec9b909
AK
1511 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1512 switch (size) {
1513 case 1: return ldub_phys(addr);
1514 case 2: return lduw_phys(addr);
1515 case 4: return ldl_phys(addr);
1516 default: abort();
1517 }
6658ffb8
PB
1518}
1519
a8170e5e 1520static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1521 uint64_t val, unsigned size)
6658ffb8 1522{
1ec9b909
AK
1523 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1524 switch (size) {
67364150
MF
1525 case 1:
1526 stb_phys(addr, val);
1527 break;
1528 case 2:
1529 stw_phys(addr, val);
1530 break;
1531 case 4:
1532 stl_phys(addr, val);
1533 break;
1ec9b909
AK
1534 default: abort();
1535 }
6658ffb8
PB
1536}
1537
1ec9b909
AK
1538static const MemoryRegionOps watch_mem_ops = {
1539 .read = watch_mem_read,
1540 .write = watch_mem_write,
1541 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1542};
6658ffb8 1543
a8170e5e 1544static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1545 unsigned len)
db7b5426 1546{
70c68e44 1547 subpage_t *mmio = opaque;
f6405247 1548 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1549 MemoryRegionSection *section;
db7b5426
BS
1550#if defined(DEBUG_SUBPAGE)
1551 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1552 mmio, len, addr, idx);
1553#endif
db7b5426 1554
5312bd8b
AK
1555 section = &phys_sections[mmio->sub_section[idx]];
1556 addr += mmio->base;
1557 addr -= section->offset_within_address_space;
1558 addr += section->offset_within_region;
37ec01d4 1559 return io_mem_read(section->mr, addr, len);
db7b5426
BS
1560}
1561
a8170e5e 1562static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1563 uint64_t value, unsigned len)
db7b5426 1564{
70c68e44 1565 subpage_t *mmio = opaque;
f6405247 1566 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1567 MemoryRegionSection *section;
db7b5426 1568#if defined(DEBUG_SUBPAGE)
70c68e44
AK
1569 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1570 " idx %d value %"PRIx64"\n",
f6405247 1571 __func__, mmio, len, addr, idx, value);
db7b5426 1572#endif
f6405247 1573
5312bd8b
AK
1574 section = &phys_sections[mmio->sub_section[idx]];
1575 addr += mmio->base;
1576 addr -= section->offset_within_address_space;
1577 addr += section->offset_within_region;
37ec01d4 1578 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
1579}
1580
70c68e44
AK
1581static const MemoryRegionOps subpage_ops = {
1582 .read = subpage_read,
1583 .write = subpage_write,
1584 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1585};
1586
a8170e5e 1587static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
de712f94 1588 unsigned size)
56384e8b
AF
1589{
1590 ram_addr_t raddr = addr;
1591 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1592 switch (size) {
1593 case 1: return ldub_p(ptr);
1594 case 2: return lduw_p(ptr);
1595 case 4: return ldl_p(ptr);
1596 default: abort();
1597 }
56384e8b
AF
1598}
1599
a8170e5e 1600static void subpage_ram_write(void *opaque, hwaddr addr,
de712f94 1601 uint64_t value, unsigned size)
56384e8b
AF
1602{
1603 ram_addr_t raddr = addr;
1604 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1605 switch (size) {
1606 case 1: return stb_p(ptr, value);
1607 case 2: return stw_p(ptr, value);
1608 case 4: return stl_p(ptr, value);
1609 default: abort();
1610 }
56384e8b
AF
1611}
1612
de712f94
AK
1613static const MemoryRegionOps subpage_ram_ops = {
1614 .read = subpage_ram_read,
1615 .write = subpage_ram_write,
1616 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
1617};
1618
c227f099 1619static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1620 uint16_t section)
db7b5426
BS
1621{
1622 int idx, eidx;
1623
1624 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1625 return -1;
1626 idx = SUBPAGE_IDX(start);
1627 eidx = SUBPAGE_IDX(end);
1628#if defined(DEBUG_SUBPAGE)
0bf9e31a 1629 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1630 mmio, start, end, idx, eidx, memory);
1631#endif
5312bd8b
AK
1632 if (memory_region_is_ram(phys_sections[section].mr)) {
1633 MemoryRegionSection new_section = phys_sections[section];
1634 new_section.mr = &io_mem_subpage_ram;
1635 section = phys_section_add(&new_section);
56384e8b 1636 }
db7b5426 1637 for (; idx <= eidx; idx++) {
5312bd8b 1638 mmio->sub_section[idx] = section;
db7b5426
BS
1639 }
1640
1641 return 0;
1642}
1643
a8170e5e 1644static subpage_t *subpage_init(hwaddr base)
db7b5426 1645{
c227f099 1646 subpage_t *mmio;
db7b5426 1647
7267c094 1648 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
1649
1650 mmio->base = base;
70c68e44
AK
1651 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1652 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1653 mmio->iomem.subpage = true;
db7b5426 1654#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1655 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1656 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1657#endif
0f0cb164 1658 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
1659
1660 return mmio;
1661}
1662
5312bd8b
AK
1663static uint16_t dummy_section(MemoryRegion *mr)
1664{
1665 MemoryRegionSection section = {
1666 .mr = mr,
1667 .offset_within_address_space = 0,
1668 .offset_within_region = 0,
1669 .size = UINT64_MAX,
1670 };
1671
1672 return phys_section_add(&section);
1673}
1674
a8170e5e 1675MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1676{
37ec01d4 1677 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1678}
1679
e9179ce1
AK
1680static void io_mem_init(void)
1681{
0e0df1e2 1682 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
1683 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1684 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1685 "unassigned", UINT64_MAX);
1686 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1687 "notdirty", UINT64_MAX);
de712f94
AK
1688 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1689 "subpage-ram", UINT64_MAX);
1ec9b909
AK
1690 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1691 "watch", UINT64_MAX);
e9179ce1
AK
1692}
1693
ac1970fb
AK
1694static void mem_begin(MemoryListener *listener)
1695{
1696 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1697
1698 destroy_all_mappings(d);
1699 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1700}
1701
50c1e149
AK
1702static void core_begin(MemoryListener *listener)
1703{
5312bd8b
AK
1704 phys_sections_clear();
1705 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
1706 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1707 phys_section_rom = dummy_section(&io_mem_rom);
1708 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
1709}
1710
1d71148e 1711static void tcg_commit(MemoryListener *listener)
50c1e149 1712{
9349b4f9 1713 CPUArchState *env;
117712c3
AK
1714
1715 /* since each CPU stores ram addresses in its TLB cache, we must
1716 reset the modified entries */
1717 /* XXX: slow ! */
1718 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1719 tlb_flush(env, 1);
1720 }
50c1e149
AK
1721}
1722
93632747
AK
1723static void core_log_global_start(MemoryListener *listener)
1724{
1725 cpu_physical_memory_set_dirty_tracking(1);
1726}
1727
1728static void core_log_global_stop(MemoryListener *listener)
1729{
1730 cpu_physical_memory_set_dirty_tracking(0);
1731}
1732
4855d41a
AK
1733static void io_region_add(MemoryListener *listener,
1734 MemoryRegionSection *section)
1735{
a2d33521
AK
1736 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1737
1738 mrio->mr = section->mr;
1739 mrio->offset = section->offset_within_region;
1740 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 1741 section->offset_within_address_space, section->size);
a2d33521 1742 ioport_register(&mrio->iorange);
4855d41a
AK
1743}
1744
1745static void io_region_del(MemoryListener *listener,
1746 MemoryRegionSection *section)
1747{
1748 isa_unassign_ioport(section->offset_within_address_space, section->size);
1749}
1750
93632747 1751static MemoryListener core_memory_listener = {
50c1e149 1752 .begin = core_begin,
93632747
AK
1753 .log_global_start = core_log_global_start,
1754 .log_global_stop = core_log_global_stop,
ac1970fb 1755 .priority = 1,
93632747
AK
1756};
1757
4855d41a
AK
1758static MemoryListener io_memory_listener = {
1759 .region_add = io_region_add,
1760 .region_del = io_region_del,
4855d41a
AK
1761 .priority = 0,
1762};
1763
1d71148e
AK
1764static MemoryListener tcg_memory_listener = {
1765 .commit = tcg_commit,
1766};
1767
ac1970fb
AK
1768void address_space_init_dispatch(AddressSpace *as)
1769{
1770 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1771
1772 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1773 d->listener = (MemoryListener) {
1774 .begin = mem_begin,
1775 .region_add = mem_add,
1776 .region_nop = mem_add,
1777 .priority = 0,
1778 };
1779 as->dispatch = d;
1780 memory_listener_register(&d->listener, as);
1781}
1782
83f3c251
AK
1783void address_space_destroy_dispatch(AddressSpace *as)
1784{
1785 AddressSpaceDispatch *d = as->dispatch;
1786
1787 memory_listener_unregister(&d->listener);
1788 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1789 g_free(d);
1790 as->dispatch = NULL;
1791}
1792
62152b8a
AK
1793static void memory_map_init(void)
1794{
7267c094 1795 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 1796 memory_region_init(system_memory, "system", INT64_MAX);
2673a5da
AK
1797 address_space_init(&address_space_memory, system_memory);
1798 address_space_memory.name = "memory";
309cb471 1799
7267c094 1800 system_io = g_malloc(sizeof(*system_io));
309cb471 1801 memory_region_init(system_io, "io", 65536);
2673a5da
AK
1802 address_space_init(&address_space_io, system_io);
1803 address_space_io.name = "I/O";
93632747 1804
f6790af6
AK
1805 memory_listener_register(&core_memory_listener, &address_space_memory);
1806 memory_listener_register(&io_memory_listener, &address_space_io);
1807 memory_listener_register(&tcg_memory_listener, &address_space_memory);
9e11908f
PM
1808
1809 dma_context_init(&dma_context_memory, &address_space_memory,
1810 NULL, NULL, NULL);
62152b8a
AK
1811}
1812
1813MemoryRegion *get_system_memory(void)
1814{
1815 return system_memory;
1816}
1817
309cb471
AK
1818MemoryRegion *get_system_io(void)
1819{
1820 return system_io;
1821}
1822
e2eef170
PB
1823#endif /* !defined(CONFIG_USER_ONLY) */
1824
13eb76e0
FB
1825/* physical memory access (slow version, mainly for debug) */
1826#if defined(CONFIG_USER_ONLY)
9349b4f9 1827int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1828 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1829{
1830 int l, flags;
1831 target_ulong page;
53a5960a 1832 void * p;
13eb76e0
FB
1833
1834 while (len > 0) {
1835 page = addr & TARGET_PAGE_MASK;
1836 l = (page + TARGET_PAGE_SIZE) - addr;
1837 if (l > len)
1838 l = len;
1839 flags = page_get_flags(page);
1840 if (!(flags & PAGE_VALID))
a68fe89c 1841 return -1;
13eb76e0
FB
1842 if (is_write) {
1843 if (!(flags & PAGE_WRITE))
a68fe89c 1844 return -1;
579a97f7 1845 /* XXX: this code should not depend on lock_user */
72fb7daa 1846 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1847 return -1;
72fb7daa
AJ
1848 memcpy(p, buf, l);
1849 unlock_user(p, addr, l);
13eb76e0
FB
1850 } else {
1851 if (!(flags & PAGE_READ))
a68fe89c 1852 return -1;
579a97f7 1853 /* XXX: this code should not depend on lock_user */
72fb7daa 1854 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1855 return -1;
72fb7daa 1856 memcpy(buf, p, l);
5b257578 1857 unlock_user(p, addr, 0);
13eb76e0
FB
1858 }
1859 len -= l;
1860 buf += l;
1861 addr += l;
1862 }
a68fe89c 1863 return 0;
13eb76e0 1864}
8df1cd07 1865
13eb76e0 1866#else
51d7a9eb 1867
a8170e5e
AK
1868static void invalidate_and_set_dirty(hwaddr addr,
1869 hwaddr length)
51d7a9eb
AP
1870{
1871 if (!cpu_physical_memory_is_dirty(addr)) {
1872 /* invalidate code */
1873 tb_invalidate_phys_page_range(addr, addr + length, 0);
1874 /* set dirty bit */
1875 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1876 }
e226939d 1877 xen_modified_memory(addr, length);
51d7a9eb
AP
1878}
1879
a8170e5e 1880void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1881 int len, bool is_write)
13eb76e0 1882{
ac1970fb 1883 AddressSpaceDispatch *d = as->dispatch;
37ec01d4 1884 int l;
13eb76e0
FB
1885 uint8_t *ptr;
1886 uint32_t val;
a8170e5e 1887 hwaddr page;
f3705d53 1888 MemoryRegionSection *section;
3b46e624 1889
13eb76e0
FB
1890 while (len > 0) {
1891 page = addr & TARGET_PAGE_MASK;
1892 l = (page + TARGET_PAGE_SIZE) - addr;
1893 if (l > len)
1894 l = len;
ac1970fb 1895 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1896
13eb76e0 1897 if (is_write) {
f3705d53 1898 if (!memory_region_is_ram(section->mr)) {
a8170e5e 1899 hwaddr addr1;
cc5bea60 1900 addr1 = memory_region_section_addr(section, addr);
6a00d601
FB
1901 /* XXX: could force cpu_single_env to NULL to avoid
1902 potential bugs */
6c2934db 1903 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 1904 /* 32 bit write access */
c27004ec 1905 val = ldl_p(buf);
37ec01d4 1906 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 1907 l = 4;
6c2934db 1908 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 1909 /* 16 bit write access */
c27004ec 1910 val = lduw_p(buf);
37ec01d4 1911 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
1912 l = 2;
1913 } else {
1c213d19 1914 /* 8 bit write access */
c27004ec 1915 val = ldub_p(buf);
37ec01d4 1916 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
1917 l = 1;
1918 }
f3705d53 1919 } else if (!section->readonly) {
8ca5692d 1920 ram_addr_t addr1;
f3705d53 1921 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 1922 + memory_region_section_addr(section, addr);
13eb76e0 1923 /* RAM case */
5579c7f3 1924 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1925 memcpy(ptr, buf, l);
51d7a9eb 1926 invalidate_and_set_dirty(addr1, l);
050a0ddf 1927 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1928 }
1929 } else {
cc5bea60
BS
1930 if (!(memory_region_is_ram(section->mr) ||
1931 memory_region_is_romd(section->mr))) {
a8170e5e 1932 hwaddr addr1;
13eb76e0 1933 /* I/O case */
cc5bea60 1934 addr1 = memory_region_section_addr(section, addr);
6c2934db 1935 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 1936 /* 32 bit read access */
37ec01d4 1937 val = io_mem_read(section->mr, addr1, 4);
c27004ec 1938 stl_p(buf, val);
13eb76e0 1939 l = 4;
6c2934db 1940 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 1941 /* 16 bit read access */
37ec01d4 1942 val = io_mem_read(section->mr, addr1, 2);
c27004ec 1943 stw_p(buf, val);
13eb76e0
FB
1944 l = 2;
1945 } else {
1c213d19 1946 /* 8 bit read access */
37ec01d4 1947 val = io_mem_read(section->mr, addr1, 1);
c27004ec 1948 stb_p(buf, val);
13eb76e0
FB
1949 l = 1;
1950 }
1951 } else {
1952 /* RAM case */
0a1b357f 1953 ptr = qemu_get_ram_ptr(section->mr->ram_addr
cc5bea60
BS
1954 + memory_region_section_addr(section,
1955 addr));
f3705d53 1956 memcpy(buf, ptr, l);
050a0ddf 1957 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1958 }
1959 }
1960 len -= l;
1961 buf += l;
1962 addr += l;
1963 }
1964}
8df1cd07 1965
a8170e5e 1966void address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1967 const uint8_t *buf, int len)
1968{
1969 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1970}
1971
1972/**
1973 * address_space_read: read from an address space.
1974 *
1975 * @as: #AddressSpace to be accessed
1976 * @addr: address within that address space
1977 * @buf: buffer with the data transferred
1978 */
a8170e5e 1979void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb
AK
1980{
1981 address_space_rw(as, addr, buf, len, false);
1982}
1983
1984
a8170e5e 1985void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1986 int len, int is_write)
1987{
1988 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1989}
1990
d0ecd2aa 1991/* used for ROM loading : can write in RAM and ROM */
a8170e5e 1992void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
1993 const uint8_t *buf, int len)
1994{
ac1970fb 1995 AddressSpaceDispatch *d = address_space_memory.dispatch;
d0ecd2aa
FB
1996 int l;
1997 uint8_t *ptr;
a8170e5e 1998 hwaddr page;
f3705d53 1999 MemoryRegionSection *section;
3b46e624 2000
d0ecd2aa
FB
2001 while (len > 0) {
2002 page = addr & TARGET_PAGE_MASK;
2003 l = (page + TARGET_PAGE_SIZE) - addr;
2004 if (l > len)
2005 l = len;
ac1970fb 2006 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 2007
cc5bea60
BS
2008 if (!(memory_region_is_ram(section->mr) ||
2009 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
2010 /* do nothing */
2011 } else {
2012 unsigned long addr1;
f3705d53 2013 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 2014 + memory_region_section_addr(section, addr);
d0ecd2aa 2015 /* ROM/RAM case */
5579c7f3 2016 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2017 memcpy(ptr, buf, l);
51d7a9eb 2018 invalidate_and_set_dirty(addr1, l);
050a0ddf 2019 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
2020 }
2021 len -= l;
2022 buf += l;
2023 addr += l;
2024 }
2025}
2026
6d16c2f8
AL
2027typedef struct {
2028 void *buffer;
a8170e5e
AK
2029 hwaddr addr;
2030 hwaddr len;
6d16c2f8
AL
2031} BounceBuffer;
2032
2033static BounceBuffer bounce;
2034
ba223c29
AL
2035typedef struct MapClient {
2036 void *opaque;
2037 void (*callback)(void *opaque);
72cf2d4f 2038 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2039} MapClient;
2040
72cf2d4f
BS
2041static QLIST_HEAD(map_client_list, MapClient) map_client_list
2042 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2043
2044void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2045{
7267c094 2046 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2047
2048 client->opaque = opaque;
2049 client->callback = callback;
72cf2d4f 2050 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2051 return client;
2052}
2053
8b9c99d9 2054static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2055{
2056 MapClient *client = (MapClient *)_client;
2057
72cf2d4f 2058 QLIST_REMOVE(client, link);
7267c094 2059 g_free(client);
ba223c29
AL
2060}
2061
2062static void cpu_notify_map_clients(void)
2063{
2064 MapClient *client;
2065
72cf2d4f
BS
2066 while (!QLIST_EMPTY(&map_client_list)) {
2067 client = QLIST_FIRST(&map_client_list);
ba223c29 2068 client->callback(client->opaque);
34d5e948 2069 cpu_unregister_map_client(client);
ba223c29
AL
2070 }
2071}
2072
6d16c2f8
AL
2073/* Map a physical memory region into a host virtual address.
2074 * May map a subset of the requested range, given by and returned in *plen.
2075 * May return NULL if resources needed to perform the mapping are exhausted.
2076 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2077 * Use cpu_register_map_client() to know when retrying the map operation is
2078 * likely to succeed.
6d16c2f8 2079 */
ac1970fb 2080void *address_space_map(AddressSpace *as,
a8170e5e
AK
2081 hwaddr addr,
2082 hwaddr *plen,
ac1970fb 2083 bool is_write)
6d16c2f8 2084{
ac1970fb 2085 AddressSpaceDispatch *d = as->dispatch;
a8170e5e
AK
2086 hwaddr len = *plen;
2087 hwaddr todo = 0;
6d16c2f8 2088 int l;
a8170e5e 2089 hwaddr page;
f3705d53 2090 MemoryRegionSection *section;
f15fbc4b 2091 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
2092 ram_addr_t rlen;
2093 void *ret;
6d16c2f8
AL
2094
2095 while (len > 0) {
2096 page = addr & TARGET_PAGE_MASK;
2097 l = (page + TARGET_PAGE_SIZE) - addr;
2098 if (l > len)
2099 l = len;
ac1970fb 2100 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
6d16c2f8 2101
f3705d53 2102 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 2103 if (todo || bounce.buffer) {
6d16c2f8
AL
2104 break;
2105 }
2106 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2107 bounce.addr = addr;
2108 bounce.len = l;
2109 if (!is_write) {
ac1970fb 2110 address_space_read(as, addr, bounce.buffer, l);
6d16c2f8 2111 }
38bee5dc
SS
2112
2113 *plen = l;
2114 return bounce.buffer;
6d16c2f8 2115 }
8ab934f9 2116 if (!todo) {
f3705d53 2117 raddr = memory_region_get_ram_addr(section->mr)
cc5bea60 2118 + memory_region_section_addr(section, addr);
8ab934f9 2119 }
6d16c2f8
AL
2120
2121 len -= l;
2122 addr += l;
38bee5dc 2123 todo += l;
6d16c2f8 2124 }
8ab934f9
SS
2125 rlen = todo;
2126 ret = qemu_ram_ptr_length(raddr, &rlen);
2127 *plen = rlen;
2128 return ret;
6d16c2f8
AL
2129}
2130
ac1970fb 2131/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2132 * Will also mark the memory as dirty if is_write == 1. access_len gives
2133 * the amount of memory that was actually read or written by the caller.
2134 */
a8170e5e
AK
2135void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2136 int is_write, hwaddr access_len)
6d16c2f8
AL
2137{
2138 if (buffer != bounce.buffer) {
2139 if (is_write) {
e890261f 2140 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
2141 while (access_len) {
2142 unsigned l;
2143 l = TARGET_PAGE_SIZE;
2144 if (l > access_len)
2145 l = access_len;
51d7a9eb 2146 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2147 addr1 += l;
2148 access_len -= l;
2149 }
2150 }
868bb33f 2151 if (xen_enabled()) {
e41d7c69 2152 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2153 }
6d16c2f8
AL
2154 return;
2155 }
2156 if (is_write) {
ac1970fb 2157 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2158 }
f8a83245 2159 qemu_vfree(bounce.buffer);
6d16c2f8 2160 bounce.buffer = NULL;
ba223c29 2161 cpu_notify_map_clients();
6d16c2f8 2162}
d0ecd2aa 2163
a8170e5e
AK
2164void *cpu_physical_memory_map(hwaddr addr,
2165 hwaddr *plen,
ac1970fb
AK
2166 int is_write)
2167{
2168 return address_space_map(&address_space_memory, addr, plen, is_write);
2169}
2170
a8170e5e
AK
2171void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2172 int is_write, hwaddr access_len)
ac1970fb
AK
2173{
2174 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2175}
2176
8df1cd07 2177/* warning: addr must be aligned */
a8170e5e 2178static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2179 enum device_endian endian)
8df1cd07 2180{
8df1cd07
FB
2181 uint8_t *ptr;
2182 uint32_t val;
f3705d53 2183 MemoryRegionSection *section;
8df1cd07 2184
ac1970fb 2185 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2186
cc5bea60
BS
2187 if (!(memory_region_is_ram(section->mr) ||
2188 memory_region_is_romd(section->mr))) {
8df1cd07 2189 /* I/O case */
cc5bea60 2190 addr = memory_region_section_addr(section, addr);
37ec01d4 2191 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
2192#if defined(TARGET_WORDS_BIGENDIAN)
2193 if (endian == DEVICE_LITTLE_ENDIAN) {
2194 val = bswap32(val);
2195 }
2196#else
2197 if (endian == DEVICE_BIG_ENDIAN) {
2198 val = bswap32(val);
2199 }
2200#endif
8df1cd07
FB
2201 } else {
2202 /* RAM case */
f3705d53 2203 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2204 & TARGET_PAGE_MASK)
cc5bea60 2205 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2206 switch (endian) {
2207 case DEVICE_LITTLE_ENDIAN:
2208 val = ldl_le_p(ptr);
2209 break;
2210 case DEVICE_BIG_ENDIAN:
2211 val = ldl_be_p(ptr);
2212 break;
2213 default:
2214 val = ldl_p(ptr);
2215 break;
2216 }
8df1cd07
FB
2217 }
2218 return val;
2219}
2220
a8170e5e 2221uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2222{
2223 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2224}
2225
a8170e5e 2226uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2227{
2228 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2229}
2230
a8170e5e 2231uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2232{
2233 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2234}
2235
84b7b8e7 2236/* warning: addr must be aligned */
a8170e5e 2237static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2238 enum device_endian endian)
84b7b8e7 2239{
84b7b8e7
FB
2240 uint8_t *ptr;
2241 uint64_t val;
f3705d53 2242 MemoryRegionSection *section;
84b7b8e7 2243
ac1970fb 2244 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2245
cc5bea60
BS
2246 if (!(memory_region_is_ram(section->mr) ||
2247 memory_region_is_romd(section->mr))) {
84b7b8e7 2248 /* I/O case */
cc5bea60 2249 addr = memory_region_section_addr(section, addr);
1e78bcc1
AG
2250
2251 /* XXX This is broken when device endian != cpu endian.
2252 Fix and add "endian" variable check */
84b7b8e7 2253#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2254 val = io_mem_read(section->mr, addr, 4) << 32;
2255 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 2256#else
37ec01d4
AK
2257 val = io_mem_read(section->mr, addr, 4);
2258 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
2259#endif
2260 } else {
2261 /* RAM case */
f3705d53 2262 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2263 & TARGET_PAGE_MASK)
cc5bea60 2264 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2265 switch (endian) {
2266 case DEVICE_LITTLE_ENDIAN:
2267 val = ldq_le_p(ptr);
2268 break;
2269 case DEVICE_BIG_ENDIAN:
2270 val = ldq_be_p(ptr);
2271 break;
2272 default:
2273 val = ldq_p(ptr);
2274 break;
2275 }
84b7b8e7
FB
2276 }
2277 return val;
2278}
2279
a8170e5e 2280uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2281{
2282 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2283}
2284
a8170e5e 2285uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2286{
2287 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2288}
2289
a8170e5e 2290uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2291{
2292 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2293}
2294
aab33094 2295/* XXX: optimize */
a8170e5e 2296uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2297{
2298 uint8_t val;
2299 cpu_physical_memory_read(addr, &val, 1);
2300 return val;
2301}
2302
733f0b02 2303/* warning: addr must be aligned */
a8170e5e 2304static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2305 enum device_endian endian)
aab33094 2306{
733f0b02
MT
2307 uint8_t *ptr;
2308 uint64_t val;
f3705d53 2309 MemoryRegionSection *section;
733f0b02 2310
ac1970fb 2311 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2312
cc5bea60
BS
2313 if (!(memory_region_is_ram(section->mr) ||
2314 memory_region_is_romd(section->mr))) {
733f0b02 2315 /* I/O case */
cc5bea60 2316 addr = memory_region_section_addr(section, addr);
37ec01d4 2317 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
2318#if defined(TARGET_WORDS_BIGENDIAN)
2319 if (endian == DEVICE_LITTLE_ENDIAN) {
2320 val = bswap16(val);
2321 }
2322#else
2323 if (endian == DEVICE_BIG_ENDIAN) {
2324 val = bswap16(val);
2325 }
2326#endif
733f0b02
MT
2327 } else {
2328 /* RAM case */
f3705d53 2329 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2330 & TARGET_PAGE_MASK)
cc5bea60 2331 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2332 switch (endian) {
2333 case DEVICE_LITTLE_ENDIAN:
2334 val = lduw_le_p(ptr);
2335 break;
2336 case DEVICE_BIG_ENDIAN:
2337 val = lduw_be_p(ptr);
2338 break;
2339 default:
2340 val = lduw_p(ptr);
2341 break;
2342 }
733f0b02
MT
2343 }
2344 return val;
aab33094
FB
2345}
2346
a8170e5e 2347uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2348{
2349 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2350}
2351
a8170e5e 2352uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2353{
2354 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2355}
2356
a8170e5e 2357uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2358{
2359 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2360}
2361
8df1cd07
FB
2362/* warning: addr must be aligned. The ram page is not masked as dirty
2363 and the code inside is not invalidated. It is useful if the dirty
2364 bits are used to track modified PTEs */
a8170e5e 2365void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2366{
8df1cd07 2367 uint8_t *ptr;
f3705d53 2368 MemoryRegionSection *section;
8df1cd07 2369
ac1970fb 2370 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2371
f3705d53 2372 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2373 addr = memory_region_section_addr(section, addr);
f3705d53 2374 if (memory_region_is_ram(section->mr)) {
37ec01d4 2375 section = &phys_sections[phys_section_rom];
06ef3525 2376 }
37ec01d4 2377 io_mem_write(section->mr, addr, val, 4);
8df1cd07 2378 } else {
f3705d53 2379 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 2380 & TARGET_PAGE_MASK)
cc5bea60 2381 + memory_region_section_addr(section, addr);
5579c7f3 2382 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2383 stl_p(ptr, val);
74576198
AL
2384
2385 if (unlikely(in_migration)) {
2386 if (!cpu_physical_memory_is_dirty(addr1)) {
2387 /* invalidate code */
2388 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2389 /* set dirty bit */
f7c11b53
YT
2390 cpu_physical_memory_set_dirty_flags(
2391 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2392 }
2393 }
8df1cd07
FB
2394 }
2395}
2396
a8170e5e 2397void stq_phys_notdirty(hwaddr addr, uint64_t val)
bc98a7ef 2398{
bc98a7ef 2399 uint8_t *ptr;
f3705d53 2400 MemoryRegionSection *section;
bc98a7ef 2401
ac1970fb 2402 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2403
f3705d53 2404 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2405 addr = memory_region_section_addr(section, addr);
f3705d53 2406 if (memory_region_is_ram(section->mr)) {
37ec01d4 2407 section = &phys_sections[phys_section_rom];
06ef3525 2408 }
bc98a7ef 2409#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2410 io_mem_write(section->mr, addr, val >> 32, 4);
2411 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 2412#else
37ec01d4
AK
2413 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2414 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
2415#endif
2416 } else {
f3705d53 2417 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2418 & TARGET_PAGE_MASK)
cc5bea60 2419 + memory_region_section_addr(section, addr));
bc98a7ef
JM
2420 stq_p(ptr, val);
2421 }
2422}
2423
8df1cd07 2424/* warning: addr must be aligned */
a8170e5e 2425static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2426 enum device_endian endian)
8df1cd07 2427{
8df1cd07 2428 uint8_t *ptr;
f3705d53 2429 MemoryRegionSection *section;
8df1cd07 2430
ac1970fb 2431 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2432
f3705d53 2433 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2434 addr = memory_region_section_addr(section, addr);
f3705d53 2435 if (memory_region_is_ram(section->mr)) {
37ec01d4 2436 section = &phys_sections[phys_section_rom];
06ef3525 2437 }
1e78bcc1
AG
2438#if defined(TARGET_WORDS_BIGENDIAN)
2439 if (endian == DEVICE_LITTLE_ENDIAN) {
2440 val = bswap32(val);
2441 }
2442#else
2443 if (endian == DEVICE_BIG_ENDIAN) {
2444 val = bswap32(val);
2445 }
2446#endif
37ec01d4 2447 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
2448 } else {
2449 unsigned long addr1;
f3705d53 2450 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2451 + memory_region_section_addr(section, addr);
8df1cd07 2452 /* RAM case */
5579c7f3 2453 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2454 switch (endian) {
2455 case DEVICE_LITTLE_ENDIAN:
2456 stl_le_p(ptr, val);
2457 break;
2458 case DEVICE_BIG_ENDIAN:
2459 stl_be_p(ptr, val);
2460 break;
2461 default:
2462 stl_p(ptr, val);
2463 break;
2464 }
51d7a9eb 2465 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2466 }
2467}
2468
a8170e5e 2469void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2470{
2471 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2472}
2473
a8170e5e 2474void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2475{
2476 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2477}
2478
a8170e5e 2479void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2480{
2481 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2482}
2483
aab33094 2484/* XXX: optimize */
a8170e5e 2485void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2486{
2487 uint8_t v = val;
2488 cpu_physical_memory_write(addr, &v, 1);
2489}
2490
733f0b02 2491/* warning: addr must be aligned */
a8170e5e 2492static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2493 enum device_endian endian)
aab33094 2494{
733f0b02 2495 uint8_t *ptr;
f3705d53 2496 MemoryRegionSection *section;
733f0b02 2497
ac1970fb 2498 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2499
f3705d53 2500 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2501 addr = memory_region_section_addr(section, addr);
f3705d53 2502 if (memory_region_is_ram(section->mr)) {
37ec01d4 2503 section = &phys_sections[phys_section_rom];
06ef3525 2504 }
1e78bcc1
AG
2505#if defined(TARGET_WORDS_BIGENDIAN)
2506 if (endian == DEVICE_LITTLE_ENDIAN) {
2507 val = bswap16(val);
2508 }
2509#else
2510 if (endian == DEVICE_BIG_ENDIAN) {
2511 val = bswap16(val);
2512 }
2513#endif
37ec01d4 2514 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
2515 } else {
2516 unsigned long addr1;
f3705d53 2517 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2518 + memory_region_section_addr(section, addr);
733f0b02
MT
2519 /* RAM case */
2520 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2521 switch (endian) {
2522 case DEVICE_LITTLE_ENDIAN:
2523 stw_le_p(ptr, val);
2524 break;
2525 case DEVICE_BIG_ENDIAN:
2526 stw_be_p(ptr, val);
2527 break;
2528 default:
2529 stw_p(ptr, val);
2530 break;
2531 }
51d7a9eb 2532 invalidate_and_set_dirty(addr1, 2);
733f0b02 2533 }
aab33094
FB
2534}
2535
a8170e5e 2536void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2537{
2538 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2539}
2540
a8170e5e 2541void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2542{
2543 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2544}
2545
a8170e5e 2546void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2547{
2548 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2549}
2550
aab33094 2551/* XXX: optimize */
a8170e5e 2552void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2553{
2554 val = tswap64(val);
71d2b725 2555 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2556}
2557
a8170e5e 2558void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2559{
2560 val = cpu_to_le64(val);
2561 cpu_physical_memory_write(addr, &val, 8);
2562}
2563
a8170e5e 2564void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2565{
2566 val = cpu_to_be64(val);
2567 cpu_physical_memory_write(addr, &val, 8);
2568}
2569
5e2972fd 2570/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2571int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2572 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2573{
2574 int l;
a8170e5e 2575 hwaddr phys_addr;
9b3c35e0 2576 target_ulong page;
13eb76e0
FB
2577
2578 while (len > 0) {
2579 page = addr & TARGET_PAGE_MASK;
2580 phys_addr = cpu_get_phys_page_debug(env, page);
2581 /* if no physical page mapped, return an error */
2582 if (phys_addr == -1)
2583 return -1;
2584 l = (page + TARGET_PAGE_SIZE) - addr;
2585 if (l > len)
2586 l = len;
5e2972fd 2587 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2588 if (is_write)
2589 cpu_physical_memory_write_rom(phys_addr, buf, l);
2590 else
5e2972fd 2591 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2592 len -= l;
2593 buf += l;
2594 addr += l;
2595 }
2596 return 0;
2597}
a68fe89c 2598#endif
13eb76e0 2599
8e4a424b
BS
2600#if !defined(CONFIG_USER_ONLY)
2601
2602/*
2603 * A helper function for the _utterly broken_ virtio device model to find out if
2604 * it's running on a big endian machine. Don't do this at home kids!
2605 */
2606bool virtio_is_big_endian(void);
2607bool virtio_is_big_endian(void)
2608{
2609#if defined(TARGET_WORDS_BIGENDIAN)
2610 return true;
2611#else
2612 return false;
2613#endif
2614}
2615
2616#endif
2617
76f35538 2618#ifndef CONFIG_USER_ONLY
a8170e5e 2619bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538
WC
2620{
2621 MemoryRegionSection *section;
2622
ac1970fb
AK
2623 section = phys_page_find(address_space_memory.dispatch,
2624 phys_addr >> TARGET_PAGE_BITS);
76f35538
WC
2625
2626 return !(memory_region_is_ram(section->mr) ||
2627 memory_region_is_romd(section->mr));
2628}
2629#endif