]> git.proxmox.com Git - qemu.git/blame - exec.c
virtio-pci: replace byte swap hack
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
432d268c 34#include "hw/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
67d3b957 53//#define DEBUG_UNASSIGNED
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
e2eef170 56#if !defined(CONFIG_USER_ONLY)
9fa3e853 57int phys_ram_fd;
74576198 58static int in_migration;
94a6b54f 59
a3161038 60RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
61
62static MemoryRegion *system_memory;
309cb471 63static MemoryRegion *system_io;
62152b8a 64
f6790af6
AK
65AddressSpace address_space_io;
66AddressSpace address_space_memory;
9e11908f 67DMAContext dma_context_memory;
2673a5da 68
0e0df1e2 69MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 70static MemoryRegion io_mem_subpage_ram;
0e0df1e2 71
e2eef170 72#endif
9fa3e853 73
9349b4f9 74CPUArchState *first_cpu;
6a00d601
FB
75/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
9349b4f9 77DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 78/* 0 = Do not count executed instructions.
bf20dc07 79 1 = Precise instruction counting.
2e70f6ef
PB
80 2 = Adaptive rate instruction counting. */
81int use_icount = 0;
6a00d601 82
e2eef170 83#if !defined(CONFIG_USER_ONLY)
4346ae3e 84
5312bd8b
AK
85static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
aa102231
AK
88static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
5312bd8b 91
d6f2ea22
AK
92/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
07f07b31 96#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 97
e2eef170 98static void io_mem_init(void);
62152b8a 99static void memory_map_init(void);
8b9c99d9 100static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 101
1ec9b909 102static MemoryRegion io_mem_watch;
6658ffb8 103#endif
fd6ce8f6 104
6d9a1304 105#if !defined(CONFIG_USER_ONLY)
d6f2ea22 106
f7bf5461 107static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 108{
f7bf5461 109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
d6f2ea22
AK
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
f7bf5461
AK
117}
118
119static uint16_t phys_map_node_alloc(void)
120{
121 unsigned i;
122 uint16_t ret;
123
124 ret = phys_map_nodes_nb++;
125 assert(ret != PHYS_MAP_NODE_NIL);
126 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 127 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 128 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 130 }
f7bf5461 131 return ret;
d6f2ea22
AK
132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
92e873b9 139
a8170e5e
AK
140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
2999097b 142 int level)
f7bf5461
AK
143{
144 PhysPageEntry *p;
145 int i;
a8170e5e 146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 147
07f07b31 148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
07f07b31 153 p[i].is_leaf = 1;
c19e8800 154 p[i].ptr = phys_section_unassigned;
4346ae3e 155 }
67c4d23c 156 }
f7bf5461 157 } else {
c19e8800 158 p = phys_map_nodes[lp->ptr];
92e873b9 159 }
2999097b 160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 161
2999097b 162 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
c19e8800 165 lp->ptr = leaf;
07f07b31
AK
166 *index += step;
167 *nb -= step;
2999097b
AK
168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
f7bf5461
AK
172 }
173}
174
ac1970fb 175static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 176 hwaddr index, hwaddr nb,
2999097b 177 uint16_t leaf)
f7bf5461 178{
2999097b 179 /* Wildly overreserve - it doesn't matter much. */
07f07b31 180 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 181
ac1970fb 182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
183}
184
a8170e5e 185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
92e873b9 186{
ac1970fb 187 PhysPageEntry lp = d->phys_map;
31ab2b4a
AK
188 PhysPageEntry *p;
189 int i;
31ab2b4a 190 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 191
07f07b31 192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
194 goto not_found;
195 }
c19e8800 196 p = phys_map_nodes[lp.ptr];
31ab2b4a 197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 198 }
31ab2b4a 199
c19e8800 200 s_index = lp.ptr;
31ab2b4a 201not_found:
f3705d53
AK
202 return &phys_sections[s_index];
203}
204
e5548617
BS
205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 209 && mr != &io_mem_watch;
fd6ce8f6 210}
5b6dd868 211#endif
fd6ce8f6 212
5b6dd868 213void cpu_exec_init_all(void)
fdbb84d1 214{
5b6dd868 215#if !defined(CONFIG_USER_ONLY)
b2a8658e 216 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
217 memory_map_init();
218 io_mem_init();
fdbb84d1 219#endif
5b6dd868 220}
fdbb84d1 221
5b6dd868
BS
222#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
223
224static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 225{
5b6dd868 226 CPUArchState *env = opaque;
a513fe19 227
5b6dd868
BS
228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
230 env->interrupt_request &= ~0x01;
231 tlb_flush(env, 1);
232
233 return 0;
a513fe19 234}
7501267e 235
5b6dd868
BS
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
243 VMSTATE_UINT32(halted, CPUArchState),
244 VMSTATE_UINT32(interrupt_request, CPUArchState),
245 VMSTATE_END_OF_LIST()
246 }
247};
248#endif
ea041c0e 249
5b6dd868 250CPUArchState *qemu_get_cpu(int cpu)
ea041c0e 251{
5b6dd868 252 CPUArchState *env = first_cpu;
ea041c0e 253
5b6dd868
BS
254 while (env) {
255 if (env->cpu_index == cpu)
256 break;
257 env = env->next_cpu;
ea041c0e 258 }
5b6dd868
BS
259
260 return env;
ea041c0e
FB
261}
262
5b6dd868 263void cpu_exec_init(CPUArchState *env)
ea041c0e 264{
5b6dd868
BS
265#ifndef CONFIG_USER_ONLY
266 CPUState *cpu = ENV_GET_CPU(env);
267#endif
268 CPUArchState **penv;
269 int cpu_index;
270
271#if defined(CONFIG_USER_ONLY)
272 cpu_list_lock();
273#endif
274 env->next_cpu = NULL;
275 penv = &first_cpu;
276 cpu_index = 0;
277 while (*penv != NULL) {
278 penv = &(*penv)->next_cpu;
279 cpu_index++;
280 }
281 env->cpu_index = cpu_index;
282 env->numa_node = 0;
283 QTAILQ_INIT(&env->breakpoints);
284 QTAILQ_INIT(&env->watchpoints);
285#ifndef CONFIG_USER_ONLY
286 cpu->thread_id = qemu_get_thread_id();
287#endif
288 *penv = env;
289#if defined(CONFIG_USER_ONLY)
290 cpu_list_unlock();
291#endif
292#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
293 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
294 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
295 cpu_save, cpu_load, env);
296#endif
ea041c0e
FB
297}
298
1fddef4b 299#if defined(TARGET_HAS_ICE)
94df27fd 300#if defined(CONFIG_USER_ONLY)
9349b4f9 301static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
302{
303 tb_invalidate_phys_page_range(pc, pc + 1, 0);
304}
305#else
1e7855a5
MF
306static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
307{
9d70c4b7
MF
308 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
309 (pc & ~TARGET_PAGE_MASK));
1e7855a5 310}
c27004ec 311#endif
94df27fd 312#endif /* TARGET_HAS_ICE */
d720b93d 313
c527ee8f 314#if defined(CONFIG_USER_ONLY)
9349b4f9 315void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
316
317{
318}
319
9349b4f9 320int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
321 int flags, CPUWatchpoint **watchpoint)
322{
323 return -ENOSYS;
324}
325#else
6658ffb8 326/* Add a watchpoint. */
9349b4f9 327int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 328 int flags, CPUWatchpoint **watchpoint)
6658ffb8 329{
b4051334 330 target_ulong len_mask = ~(len - 1);
c0ce998e 331 CPUWatchpoint *wp;
6658ffb8 332
b4051334 333 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
334 if ((len & (len - 1)) || (addr & ~len_mask) ||
335 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
336 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
337 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
338 return -EINVAL;
339 }
7267c094 340 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
341
342 wp->vaddr = addr;
b4051334 343 wp->len_mask = len_mask;
a1d1bb31
AL
344 wp->flags = flags;
345
2dc9f411 346 /* keep all GDB-injected watchpoints in front */
c0ce998e 347 if (flags & BP_GDB)
72cf2d4f 348 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 349 else
72cf2d4f 350 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 351
6658ffb8 352 tlb_flush_page(env, addr);
a1d1bb31
AL
353
354 if (watchpoint)
355 *watchpoint = wp;
356 return 0;
6658ffb8
PB
357}
358
a1d1bb31 359/* Remove a specific watchpoint. */
9349b4f9 360int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 361 int flags)
6658ffb8 362{
b4051334 363 target_ulong len_mask = ~(len - 1);
a1d1bb31 364 CPUWatchpoint *wp;
6658ffb8 365
72cf2d4f 366 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 367 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 368 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 369 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
370 return 0;
371 }
372 }
a1d1bb31 373 return -ENOENT;
6658ffb8
PB
374}
375
a1d1bb31 376/* Remove a specific watchpoint by reference. */
9349b4f9 377void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 378{
72cf2d4f 379 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 380
a1d1bb31
AL
381 tlb_flush_page(env, watchpoint->vaddr);
382
7267c094 383 g_free(watchpoint);
a1d1bb31
AL
384}
385
386/* Remove all matching watchpoints. */
9349b4f9 387void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 388{
c0ce998e 389 CPUWatchpoint *wp, *next;
a1d1bb31 390
72cf2d4f 391 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
392 if (wp->flags & mask)
393 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 394 }
7d03f82f 395}
c527ee8f 396#endif
7d03f82f 397
a1d1bb31 398/* Add a breakpoint. */
9349b4f9 399int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 400 CPUBreakpoint **breakpoint)
4c3a88a2 401{
1fddef4b 402#if defined(TARGET_HAS_ICE)
c0ce998e 403 CPUBreakpoint *bp;
3b46e624 404
7267c094 405 bp = g_malloc(sizeof(*bp));
4c3a88a2 406
a1d1bb31
AL
407 bp->pc = pc;
408 bp->flags = flags;
409
2dc9f411 410 /* keep all GDB-injected breakpoints in front */
c0ce998e 411 if (flags & BP_GDB)
72cf2d4f 412 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 413 else
72cf2d4f 414 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 415
d720b93d 416 breakpoint_invalidate(env, pc);
a1d1bb31
AL
417
418 if (breakpoint)
419 *breakpoint = bp;
4c3a88a2
FB
420 return 0;
421#else
a1d1bb31 422 return -ENOSYS;
4c3a88a2
FB
423#endif
424}
425
a1d1bb31 426/* Remove a specific breakpoint. */
9349b4f9 427int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 428{
7d03f82f 429#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
430 CPUBreakpoint *bp;
431
72cf2d4f 432 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
433 if (bp->pc == pc && bp->flags == flags) {
434 cpu_breakpoint_remove_by_ref(env, bp);
435 return 0;
436 }
7d03f82f 437 }
a1d1bb31
AL
438 return -ENOENT;
439#else
440 return -ENOSYS;
7d03f82f
EI
441#endif
442}
443
a1d1bb31 444/* Remove a specific breakpoint by reference. */
9349b4f9 445void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 446{
1fddef4b 447#if defined(TARGET_HAS_ICE)
72cf2d4f 448 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 449
a1d1bb31
AL
450 breakpoint_invalidate(env, breakpoint->pc);
451
7267c094 452 g_free(breakpoint);
a1d1bb31
AL
453#endif
454}
455
456/* Remove all matching breakpoints. */
9349b4f9 457void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
458{
459#if defined(TARGET_HAS_ICE)
c0ce998e 460 CPUBreakpoint *bp, *next;
a1d1bb31 461
72cf2d4f 462 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
463 if (bp->flags & mask)
464 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 465 }
4c3a88a2
FB
466#endif
467}
468
c33a346e
FB
469/* enable or disable single step mode. EXCP_DEBUG is returned by the
470 CPU loop after each instruction */
9349b4f9 471void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 472{
1fddef4b 473#if defined(TARGET_HAS_ICE)
c33a346e
FB
474 if (env->singlestep_enabled != enabled) {
475 env->singlestep_enabled = enabled;
e22a25c9
AL
476 if (kvm_enabled())
477 kvm_update_guest_debug(env, 0);
478 else {
ccbb4d44 479 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
480 /* XXX: only flush what is necessary */
481 tb_flush(env);
482 }
c33a346e
FB
483 }
484#endif
485}
486
9349b4f9 487void cpu_reset_interrupt(CPUArchState *env, int mask)
b54ad049
FB
488{
489 env->interrupt_request &= ~mask;
490}
491
9349b4f9 492void cpu_exit(CPUArchState *env)
3098dba0
AJ
493{
494 env->exit_request = 1;
495 cpu_unlink_tb(env);
496}
497
9349b4f9 498void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
499{
500 va_list ap;
493ae1f0 501 va_list ap2;
7501267e
FB
502
503 va_start(ap, fmt);
493ae1f0 504 va_copy(ap2, ap);
7501267e
FB
505 fprintf(stderr, "qemu: fatal: ");
506 vfprintf(stderr, fmt, ap);
507 fprintf(stderr, "\n");
6fd2a026 508 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
509 if (qemu_log_enabled()) {
510 qemu_log("qemu: fatal: ");
511 qemu_log_vprintf(fmt, ap2);
512 qemu_log("\n");
6fd2a026 513 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 514 qemu_log_flush();
93fcfe39 515 qemu_log_close();
924edcae 516 }
493ae1f0 517 va_end(ap2);
f9373291 518 va_end(ap);
fd052bf6
RV
519#if defined(CONFIG_USER_ONLY)
520 {
521 struct sigaction act;
522 sigfillset(&act.sa_mask);
523 act.sa_handler = SIG_DFL;
524 sigaction(SIGABRT, &act, NULL);
525 }
526#endif
7501267e
FB
527 abort();
528}
529
9349b4f9 530CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 531{
9349b4f9
AF
532 CPUArchState *new_env = cpu_init(env->cpu_model_str);
533 CPUArchState *next_cpu = new_env->next_cpu;
c5be9f08 534 int cpu_index = new_env->cpu_index;
5a38f081
AL
535#if defined(TARGET_HAS_ICE)
536 CPUBreakpoint *bp;
537 CPUWatchpoint *wp;
538#endif
539
9349b4f9 540 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081
AL
541
542 /* Preserve chaining and index. */
c5be9f08
TS
543 new_env->next_cpu = next_cpu;
544 new_env->cpu_index = cpu_index;
5a38f081
AL
545
546 /* Clone all break/watchpoints.
547 Note: Once we support ptrace with hw-debug register access, make sure
548 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
549 QTAILQ_INIT(&env->breakpoints);
550 QTAILQ_INIT(&env->watchpoints);
5a38f081 551#if defined(TARGET_HAS_ICE)
72cf2d4f 552 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
553 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
554 }
72cf2d4f 555 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
556 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
557 wp->flags, NULL);
558 }
559#endif
560
c5be9f08
TS
561 return new_env;
562}
563
0124311e 564#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
565static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
566 uintptr_t length)
567{
568 uintptr_t start1;
569
570 /* we modify the TLB cache so that the dirty bit will be set again
571 when accessing the range */
572 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
573 /* Check that we don't span multiple blocks - this breaks the
574 address comparisons below. */
575 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
576 != (end - 1) - start) {
577 abort();
578 }
579 cpu_tlb_reset_dirty_all(start1, length);
580
581}
582
5579c7f3 583/* Note: start and end must be within the same ram block. */
c227f099 584void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 585 int dirty_flags)
1ccde1cb 586{
d24981d3 587 uintptr_t length;
1ccde1cb
FB
588
589 start &= TARGET_PAGE_MASK;
590 end = TARGET_PAGE_ALIGN(end);
591
592 length = end - start;
593 if (length == 0)
594 return;
f7c11b53 595 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 596
d24981d3
JQ
597 if (tcg_enabled()) {
598 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 599 }
1ccde1cb
FB
600}
601
8b9c99d9 602static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 603{
f6f3fbca 604 int ret = 0;
74576198 605 in_migration = enable;
f6f3fbca 606 return ret;
74576198
AL
607}
608
a8170e5e 609hwaddr memory_region_section_get_iotlb(CPUArchState *env,
e5548617
BS
610 MemoryRegionSection *section,
611 target_ulong vaddr,
a8170e5e 612 hwaddr paddr,
e5548617
BS
613 int prot,
614 target_ulong *address)
615{
a8170e5e 616 hwaddr iotlb;
e5548617
BS
617 CPUWatchpoint *wp;
618
cc5bea60 619 if (memory_region_is_ram(section->mr)) {
e5548617
BS
620 /* Normal RAM. */
621 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 622 + memory_region_section_addr(section, paddr);
e5548617
BS
623 if (!section->readonly) {
624 iotlb |= phys_section_notdirty;
625 } else {
626 iotlb |= phys_section_rom;
627 }
628 } else {
629 /* IO handlers are currently passed a physical address.
630 It would be nice to pass an offset from the base address
631 of that region. This would avoid having to special case RAM,
632 and avoid full address decoding in every device.
633 We can't use the high bits of pd for this because
634 IO_MEM_ROMD uses these as a ram address. */
635 iotlb = section - phys_sections;
cc5bea60 636 iotlb += memory_region_section_addr(section, paddr);
e5548617
BS
637 }
638
639 /* Make accesses to pages with watchpoints go via the
640 watchpoint trap routines. */
641 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
642 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
643 /* Avoid trapping reads of pages with a write breakpoint. */
644 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
645 iotlb = phys_section_watch + paddr;
646 *address |= TLB_MMIO;
647 break;
648 }
649 }
650 }
651
652 return iotlb;
653}
9fa3e853
FB
654#endif /* defined(CONFIG_USER_ONLY) */
655
e2eef170 656#if !defined(CONFIG_USER_ONLY)
8da3ff18 657
c04b2b78
PB
658#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
659typedef struct subpage_t {
70c68e44 660 MemoryRegion iomem;
a8170e5e 661 hwaddr base;
5312bd8b 662 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
663} subpage_t;
664
c227f099 665static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 666 uint16_t section);
a8170e5e 667static subpage_t *subpage_init(hwaddr base);
5312bd8b 668static void destroy_page_desc(uint16_t section_index)
54688b1e 669{
5312bd8b
AK
670 MemoryRegionSection *section = &phys_sections[section_index];
671 MemoryRegion *mr = section->mr;
54688b1e
AK
672
673 if (mr->subpage) {
674 subpage_t *subpage = container_of(mr, subpage_t, iomem);
675 memory_region_destroy(&subpage->iomem);
676 g_free(subpage);
677 }
678}
679
4346ae3e 680static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
681{
682 unsigned i;
d6f2ea22 683 PhysPageEntry *p;
54688b1e 684
c19e8800 685 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
686 return;
687 }
688
c19e8800 689 p = phys_map_nodes[lp->ptr];
4346ae3e 690 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 691 if (!p[i].is_leaf) {
54688b1e 692 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 693 } else {
c19e8800 694 destroy_page_desc(p[i].ptr);
54688b1e 695 }
54688b1e 696 }
07f07b31 697 lp->is_leaf = 0;
c19e8800 698 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
699}
700
ac1970fb 701static void destroy_all_mappings(AddressSpaceDispatch *d)
54688b1e 702{
ac1970fb 703 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
d6f2ea22 704 phys_map_nodes_reset();
54688b1e
AK
705}
706
5312bd8b
AK
707static uint16_t phys_section_add(MemoryRegionSection *section)
708{
709 if (phys_sections_nb == phys_sections_nb_alloc) {
710 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
711 phys_sections = g_renew(MemoryRegionSection, phys_sections,
712 phys_sections_nb_alloc);
713 }
714 phys_sections[phys_sections_nb] = *section;
715 return phys_sections_nb++;
716}
717
718static void phys_sections_clear(void)
719{
720 phys_sections_nb = 0;
721}
722
ac1970fb 723static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
724{
725 subpage_t *subpage;
a8170e5e 726 hwaddr base = section->offset_within_address_space
0f0cb164 727 & TARGET_PAGE_MASK;
ac1970fb 728 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
0f0cb164
AK
729 MemoryRegionSection subsection = {
730 .offset_within_address_space = base,
731 .size = TARGET_PAGE_SIZE,
732 };
a8170e5e 733 hwaddr start, end;
0f0cb164 734
f3705d53 735 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 736
f3705d53 737 if (!(existing->mr->subpage)) {
0f0cb164
AK
738 subpage = subpage_init(base);
739 subsection.mr = &subpage->iomem;
ac1970fb 740 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 741 phys_section_add(&subsection));
0f0cb164 742 } else {
f3705d53 743 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
744 }
745 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
adb2a9b5 746 end = start + section->size - 1;
0f0cb164
AK
747 subpage_register(subpage, start, end, phys_section_add(section));
748}
749
750
ac1970fb 751static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
33417e70 752{
a8170e5e 753 hwaddr start_addr = section->offset_within_address_space;
dd81124b 754 ram_addr_t size = section->size;
a8170e5e 755 hwaddr addr;
5312bd8b 756 uint16_t section_index = phys_section_add(section);
dd81124b 757
3b8e6a2d 758 assert(size);
f6f3fbca 759
3b8e6a2d 760 addr = start_addr;
ac1970fb 761 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2999097b 762 section_index);
33417e70
FB
763}
764
ac1970fb 765static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 766{
ac1970fb 767 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
0f0cb164
AK
768 MemoryRegionSection now = *section, remain = *section;
769
770 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
771 || (now.size < TARGET_PAGE_SIZE)) {
772 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
773 - now.offset_within_address_space,
774 now.size);
ac1970fb 775 register_subpage(d, &now);
0f0cb164
AK
776 remain.size -= now.size;
777 remain.offset_within_address_space += now.size;
778 remain.offset_within_region += now.size;
779 }
69b67646
TH
780 while (remain.size >= TARGET_PAGE_SIZE) {
781 now = remain;
782 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
783 now.size = TARGET_PAGE_SIZE;
ac1970fb 784 register_subpage(d, &now);
69b67646
TH
785 } else {
786 now.size &= TARGET_PAGE_MASK;
ac1970fb 787 register_multipage(d, &now);
69b67646 788 }
0f0cb164
AK
789 remain.size -= now.size;
790 remain.offset_within_address_space += now.size;
791 remain.offset_within_region += now.size;
792 }
793 now = remain;
794 if (now.size) {
ac1970fb 795 register_subpage(d, &now);
0f0cb164
AK
796 }
797}
798
62a2744c
SY
799void qemu_flush_coalesced_mmio_buffer(void)
800{
801 if (kvm_enabled())
802 kvm_flush_coalesced_mmio_buffer();
803}
804
b2a8658e
UD
805void qemu_mutex_lock_ramlist(void)
806{
807 qemu_mutex_lock(&ram_list.mutex);
808}
809
810void qemu_mutex_unlock_ramlist(void)
811{
812 qemu_mutex_unlock(&ram_list.mutex);
813}
814
c902760f
MT
815#if defined(__linux__) && !defined(TARGET_S390X)
816
817#include <sys/vfs.h>
818
819#define HUGETLBFS_MAGIC 0x958458f6
820
821static long gethugepagesize(const char *path)
822{
823 struct statfs fs;
824 int ret;
825
826 do {
9742bf26 827 ret = statfs(path, &fs);
c902760f
MT
828 } while (ret != 0 && errno == EINTR);
829
830 if (ret != 0) {
9742bf26
YT
831 perror(path);
832 return 0;
c902760f
MT
833 }
834
835 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 836 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
837
838 return fs.f_bsize;
839}
840
04b16653
AW
841static void *file_ram_alloc(RAMBlock *block,
842 ram_addr_t memory,
843 const char *path)
c902760f
MT
844{
845 char *filename;
846 void *area;
847 int fd;
848#ifdef MAP_POPULATE
849 int flags;
850#endif
851 unsigned long hpagesize;
852
853 hpagesize = gethugepagesize(path);
854 if (!hpagesize) {
9742bf26 855 return NULL;
c902760f
MT
856 }
857
858 if (memory < hpagesize) {
859 return NULL;
860 }
861
862 if (kvm_enabled() && !kvm_has_sync_mmu()) {
863 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
864 return NULL;
865 }
866
867 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 868 return NULL;
c902760f
MT
869 }
870
871 fd = mkstemp(filename);
872 if (fd < 0) {
9742bf26
YT
873 perror("unable to create backing store for hugepages");
874 free(filename);
875 return NULL;
c902760f
MT
876 }
877 unlink(filename);
878 free(filename);
879
880 memory = (memory+hpagesize-1) & ~(hpagesize-1);
881
882 /*
883 * ftruncate is not supported by hugetlbfs in older
884 * hosts, so don't bother bailing out on errors.
885 * If anything goes wrong with it under other filesystems,
886 * mmap will fail.
887 */
888 if (ftruncate(fd, memory))
9742bf26 889 perror("ftruncate");
c902760f
MT
890
891#ifdef MAP_POPULATE
892 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
893 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
894 * to sidestep this quirk.
895 */
896 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
897 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
898#else
899 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
900#endif
901 if (area == MAP_FAILED) {
9742bf26
YT
902 perror("file_ram_alloc: can't mmap RAM pages");
903 close(fd);
904 return (NULL);
c902760f 905 }
04b16653 906 block->fd = fd;
c902760f
MT
907 return area;
908}
909#endif
910
d17b5288 911static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
912{
913 RAMBlock *block, *next_block;
3e837b2c 914 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 915
a3161038 916 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
917 return 0;
918
a3161038 919 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 920 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
921
922 end = block->offset + block->length;
923
a3161038 924 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
925 if (next_block->offset >= end) {
926 next = MIN(next, next_block->offset);
927 }
928 }
929 if (next - end >= size && next - end < mingap) {
3e837b2c 930 offset = end;
04b16653
AW
931 mingap = next - end;
932 }
933 }
3e837b2c
AW
934
935 if (offset == RAM_ADDR_MAX) {
936 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
937 (uint64_t)size);
938 abort();
939 }
940
04b16653
AW
941 return offset;
942}
943
652d7ec2 944ram_addr_t last_ram_offset(void)
d17b5288
AW
945{
946 RAMBlock *block;
947 ram_addr_t last = 0;
948
a3161038 949 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
950 last = MAX(last, block->offset + block->length);
951
952 return last;
953}
954
ddb97f1d
JB
955static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
956{
957 int ret;
958 QemuOpts *machine_opts;
959
960 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
961 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
962 if (machine_opts &&
963 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
964 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
965 if (ret) {
966 perror("qemu_madvise");
967 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
968 "but dump_guest_core=off specified\n");
969 }
970 }
971}
972
c5705a77 973void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
974{
975 RAMBlock *new_block, *block;
976
c5705a77 977 new_block = NULL;
a3161038 978 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
979 if (block->offset == addr) {
980 new_block = block;
981 break;
982 }
983 }
984 assert(new_block);
985 assert(!new_block->idstr[0]);
84b89d78 986
09e5ab63
AL
987 if (dev) {
988 char *id = qdev_get_dev_path(dev);
84b89d78
CM
989 if (id) {
990 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 991 g_free(id);
84b89d78
CM
992 }
993 }
994 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
995
b2a8658e
UD
996 /* This assumes the iothread lock is taken here too. */
997 qemu_mutex_lock_ramlist();
a3161038 998 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 999 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1000 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1001 new_block->idstr);
1002 abort();
1003 }
1004 }
b2a8658e 1005 qemu_mutex_unlock_ramlist();
c5705a77
AK
1006}
1007
8490fc78
LC
1008static int memory_try_enable_merging(void *addr, size_t len)
1009{
1010 QemuOpts *opts;
1011
1012 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1013 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1014 /* disabled by the user */
1015 return 0;
1016 }
1017
1018 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1019}
1020
c5705a77
AK
1021ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1022 MemoryRegion *mr)
1023{
abb26d63 1024 RAMBlock *block, *new_block;
c5705a77
AK
1025
1026 size = TARGET_PAGE_ALIGN(size);
1027 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1028
b2a8658e
UD
1029 /* This assumes the iothread lock is taken here too. */
1030 qemu_mutex_lock_ramlist();
7c637366 1031 new_block->mr = mr;
432d268c 1032 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1033 if (host) {
1034 new_block->host = host;
cd19cfa2 1035 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1036 } else {
1037 if (mem_path) {
c902760f 1038#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1039 new_block->host = file_ram_alloc(new_block, size, mem_path);
1040 if (!new_block->host) {
1041 new_block->host = qemu_vmalloc(size);
8490fc78 1042 memory_try_enable_merging(new_block->host, size);
6977dfe6 1043 }
c902760f 1044#else
6977dfe6
YT
1045 fprintf(stderr, "-mem-path option unsupported\n");
1046 exit(1);
c902760f 1047#endif
6977dfe6 1048 } else {
868bb33f 1049 if (xen_enabled()) {
fce537d4 1050 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1051 } else if (kvm_enabled()) {
1052 /* some s390/kvm configurations have special constraints */
1053 new_block->host = kvm_vmalloc(size);
432d268c
JN
1054 } else {
1055 new_block->host = qemu_vmalloc(size);
1056 }
8490fc78 1057 memory_try_enable_merging(new_block->host, size);
6977dfe6 1058 }
c902760f 1059 }
94a6b54f
PB
1060 new_block->length = size;
1061
abb26d63
PB
1062 /* Keep the list sorted from biggest to smallest block. */
1063 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1064 if (block->length < new_block->length) {
1065 break;
1066 }
1067 }
1068 if (block) {
1069 QTAILQ_INSERT_BEFORE(block, new_block, next);
1070 } else {
1071 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1072 }
0d6d3c87 1073 ram_list.mru_block = NULL;
94a6b54f 1074
f798b07f 1075 ram_list.version++;
b2a8658e 1076 qemu_mutex_unlock_ramlist();
f798b07f 1077
7267c094 1078 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1079 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1080 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1081 0, size >> TARGET_PAGE_BITS);
1720aeee 1082 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1083
ddb97f1d 1084 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1085 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1086
6f0437e8
JK
1087 if (kvm_enabled())
1088 kvm_setup_guest_memory(new_block->host, size);
1089
94a6b54f
PB
1090 return new_block->offset;
1091}
e9a1ab19 1092
c5705a77 1093ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1094{
c5705a77 1095 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1096}
1097
1f2e98b6
AW
1098void qemu_ram_free_from_ptr(ram_addr_t addr)
1099{
1100 RAMBlock *block;
1101
b2a8658e
UD
1102 /* This assumes the iothread lock is taken here too. */
1103 qemu_mutex_lock_ramlist();
a3161038 1104 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1105 if (addr == block->offset) {
a3161038 1106 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1107 ram_list.mru_block = NULL;
f798b07f 1108 ram_list.version++;
7267c094 1109 g_free(block);
b2a8658e 1110 break;
1f2e98b6
AW
1111 }
1112 }
b2a8658e 1113 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1114}
1115
c227f099 1116void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1117{
04b16653
AW
1118 RAMBlock *block;
1119
b2a8658e
UD
1120 /* This assumes the iothread lock is taken here too. */
1121 qemu_mutex_lock_ramlist();
a3161038 1122 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1123 if (addr == block->offset) {
a3161038 1124 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1125 ram_list.mru_block = NULL;
f798b07f 1126 ram_list.version++;
cd19cfa2
HY
1127 if (block->flags & RAM_PREALLOC_MASK) {
1128 ;
1129 } else if (mem_path) {
04b16653
AW
1130#if defined (__linux__) && !defined(TARGET_S390X)
1131 if (block->fd) {
1132 munmap(block->host, block->length);
1133 close(block->fd);
1134 } else {
1135 qemu_vfree(block->host);
1136 }
fd28aa13
JK
1137#else
1138 abort();
04b16653
AW
1139#endif
1140 } else {
1141#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1142 munmap(block->host, block->length);
1143#else
868bb33f 1144 if (xen_enabled()) {
e41d7c69 1145 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
1146 } else {
1147 qemu_vfree(block->host);
1148 }
04b16653
AW
1149#endif
1150 }
7267c094 1151 g_free(block);
b2a8658e 1152 break;
04b16653
AW
1153 }
1154 }
b2a8658e 1155 qemu_mutex_unlock_ramlist();
04b16653 1156
e9a1ab19
FB
1157}
1158
cd19cfa2
HY
1159#ifndef _WIN32
1160void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1161{
1162 RAMBlock *block;
1163 ram_addr_t offset;
1164 int flags;
1165 void *area, *vaddr;
1166
a3161038 1167 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1168 offset = addr - block->offset;
1169 if (offset < block->length) {
1170 vaddr = block->host + offset;
1171 if (block->flags & RAM_PREALLOC_MASK) {
1172 ;
1173 } else {
1174 flags = MAP_FIXED;
1175 munmap(vaddr, length);
1176 if (mem_path) {
1177#if defined(__linux__) && !defined(TARGET_S390X)
1178 if (block->fd) {
1179#ifdef MAP_POPULATE
1180 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1181 MAP_PRIVATE;
1182#else
1183 flags |= MAP_PRIVATE;
1184#endif
1185 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1186 flags, block->fd, offset);
1187 } else {
1188 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1189 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1190 flags, -1, 0);
1191 }
fd28aa13
JK
1192#else
1193 abort();
cd19cfa2
HY
1194#endif
1195 } else {
1196#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1197 flags |= MAP_SHARED | MAP_ANONYMOUS;
1198 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1199 flags, -1, 0);
1200#else
1201 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1202 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1203 flags, -1, 0);
1204#endif
1205 }
1206 if (area != vaddr) {
f15fbc4b
AP
1207 fprintf(stderr, "Could not remap addr: "
1208 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1209 length, addr);
1210 exit(1);
1211 }
8490fc78 1212 memory_try_enable_merging(vaddr, length);
ddb97f1d 1213 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1214 }
1215 return;
1216 }
1217 }
1218}
1219#endif /* !_WIN32 */
1220
dc828ca1 1221/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
1222 With the exception of the softmmu code in this file, this should
1223 only be used for local memory (e.g. video ram) that the device owns,
1224 and knows it isn't going to access beyond the end of the block.
1225
1226 It should not be used for general purpose DMA.
1227 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1228 */
c227f099 1229void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 1230{
94a6b54f
PB
1231 RAMBlock *block;
1232
b2a8658e 1233 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1234 block = ram_list.mru_block;
1235 if (block && addr - block->offset < block->length) {
1236 goto found;
1237 }
a3161038 1238 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1239 if (addr - block->offset < block->length) {
0d6d3c87 1240 goto found;
f471a17e 1241 }
94a6b54f 1242 }
f471a17e
AW
1243
1244 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1245 abort();
1246
0d6d3c87
PB
1247found:
1248 ram_list.mru_block = block;
1249 if (xen_enabled()) {
1250 /* We need to check if the requested address is in the RAM
1251 * because we don't want to map the entire memory in QEMU.
1252 * In that case just map until the end of the page.
1253 */
1254 if (block->offset == 0) {
1255 return xen_map_cache(addr, 0, 0);
1256 } else if (block->host == NULL) {
1257 block->host =
1258 xen_map_cache(block->offset, block->length, 1);
1259 }
1260 }
1261 return block->host + (addr - block->offset);
dc828ca1
PB
1262}
1263
0d6d3c87
PB
1264/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1265 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1266 *
1267 * ??? Is this still necessary?
b2e0a138 1268 */
8b9c99d9 1269static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1270{
1271 RAMBlock *block;
1272
b2a8658e 1273 /* The list is protected by the iothread lock here. */
a3161038 1274 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1275 if (addr - block->offset < block->length) {
868bb33f 1276 if (xen_enabled()) {
432d268c
JN
1277 /* We need to check if the requested address is in the RAM
1278 * because we don't want to map the entire memory in QEMU.
712c2b41 1279 * In that case just map until the end of the page.
432d268c
JN
1280 */
1281 if (block->offset == 0) {
e41d7c69 1282 return xen_map_cache(addr, 0, 0);
432d268c 1283 } else if (block->host == NULL) {
e41d7c69
JK
1284 block->host =
1285 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1286 }
1287 }
b2e0a138
MT
1288 return block->host + (addr - block->offset);
1289 }
1290 }
1291
1292 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1293 abort();
1294
1295 return NULL;
1296}
1297
38bee5dc
SS
1298/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1299 * but takes a size argument */
8b9c99d9 1300static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1301{
8ab934f9
SS
1302 if (*size == 0) {
1303 return NULL;
1304 }
868bb33f 1305 if (xen_enabled()) {
e41d7c69 1306 return xen_map_cache(addr, *size, 1);
868bb33f 1307 } else {
38bee5dc
SS
1308 RAMBlock *block;
1309
a3161038 1310 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1311 if (addr - block->offset < block->length) {
1312 if (addr - block->offset + *size > block->length)
1313 *size = block->length - addr + block->offset;
1314 return block->host + (addr - block->offset);
1315 }
1316 }
1317
1318 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1319 abort();
38bee5dc
SS
1320 }
1321}
1322
050a0ddf
AP
1323void qemu_put_ram_ptr(void *addr)
1324{
1325 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
1326}
1327
e890261f 1328int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1329{
94a6b54f
PB
1330 RAMBlock *block;
1331 uint8_t *host = ptr;
1332
868bb33f 1333 if (xen_enabled()) {
e41d7c69 1334 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
1335 return 0;
1336 }
1337
a3161038 1338 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1339 /* This case append when the block is not mapped. */
1340 if (block->host == NULL) {
1341 continue;
1342 }
f471a17e 1343 if (host - block->host < block->length) {
e890261f
MT
1344 *ram_addr = block->offset + (host - block->host);
1345 return 0;
f471a17e 1346 }
94a6b54f 1347 }
432d268c 1348
e890261f
MT
1349 return -1;
1350}
f471a17e 1351
e890261f
MT
1352/* Some of the softmmu routines need to translate from a host pointer
1353 (typically a TLB entry) back to a ram offset. */
1354ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1355{
1356 ram_addr_t ram_addr;
f471a17e 1357
e890261f
MT
1358 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1359 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1360 abort();
1361 }
1362 return ram_addr;
5579c7f3
PB
1363}
1364
a8170e5e 1365static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1366 unsigned size)
e18231a3
BS
1367{
1368#ifdef DEBUG_UNASSIGNED
1369 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1370#endif
5b450407 1371#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1372 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
1373#endif
1374 return 0;
1375}
1376
a8170e5e 1377static void unassigned_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1378 uint64_t val, unsigned size)
e18231a3
BS
1379{
1380#ifdef DEBUG_UNASSIGNED
0e0df1e2 1381 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 1382#endif
5b450407 1383#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 1384 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 1385#endif
33417e70
FB
1386}
1387
0e0df1e2
AK
1388static const MemoryRegionOps unassigned_mem_ops = {
1389 .read = unassigned_mem_read,
1390 .write = unassigned_mem_write,
1391 .endianness = DEVICE_NATIVE_ENDIAN,
1392};
e18231a3 1393
a8170e5e 1394static uint64_t error_mem_read(void *opaque, hwaddr addr,
0e0df1e2 1395 unsigned size)
e18231a3 1396{
0e0df1e2 1397 abort();
e18231a3
BS
1398}
1399
a8170e5e 1400static void error_mem_write(void *opaque, hwaddr addr,
0e0df1e2 1401 uint64_t value, unsigned size)
e18231a3 1402{
0e0df1e2 1403 abort();
33417e70
FB
1404}
1405
0e0df1e2
AK
1406static const MemoryRegionOps error_mem_ops = {
1407 .read = error_mem_read,
1408 .write = error_mem_write,
1409 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1410};
1411
0e0df1e2
AK
1412static const MemoryRegionOps rom_mem_ops = {
1413 .read = error_mem_read,
1414 .write = unassigned_mem_write,
1415 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
1416};
1417
a8170e5e 1418static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1419 uint64_t val, unsigned size)
9fa3e853 1420{
3a7d929e 1421 int dirty_flags;
f7c11b53 1422 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1423 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1424#if !defined(CONFIG_USER_ONLY)
0e0df1e2 1425 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1426 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 1427#endif
3a7d929e 1428 }
0e0df1e2
AK
1429 switch (size) {
1430 case 1:
1431 stb_p(qemu_get_ram_ptr(ram_addr), val);
1432 break;
1433 case 2:
1434 stw_p(qemu_get_ram_ptr(ram_addr), val);
1435 break;
1436 case 4:
1437 stl_p(qemu_get_ram_ptr(ram_addr), val);
1438 break;
1439 default:
1440 abort();
3a7d929e 1441 }
f23db169 1442 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1443 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1444 /* we remove the notdirty callback only if the code has been
1445 flushed */
1446 if (dirty_flags == 0xff)
2e70f6ef 1447 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1448}
1449
0e0df1e2
AK
1450static const MemoryRegionOps notdirty_mem_ops = {
1451 .read = error_mem_read,
1452 .write = notdirty_mem_write,
1453 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1454};
1455
0f459d16 1456/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1457static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1458{
9349b4f9 1459 CPUArchState *env = cpu_single_env;
06d55cc1 1460 target_ulong pc, cs_base;
0f459d16 1461 target_ulong vaddr;
a1d1bb31 1462 CPUWatchpoint *wp;
06d55cc1 1463 int cpu_flags;
0f459d16 1464
06d55cc1
AL
1465 if (env->watchpoint_hit) {
1466 /* We re-entered the check after replacing the TB. Now raise
1467 * the debug interrupt so that is will trigger after the
1468 * current instruction. */
1469 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1470 return;
1471 }
2e70f6ef 1472 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1473 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1474 if ((vaddr == (wp->vaddr & len_mask) ||
1475 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1476 wp->flags |= BP_WATCHPOINT_HIT;
1477 if (!env->watchpoint_hit) {
1478 env->watchpoint_hit = wp;
5a316526 1479 tb_check_watchpoint(env);
6e140f28
AL
1480 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1481 env->exception_index = EXCP_DEBUG;
488d6577 1482 cpu_loop_exit(env);
6e140f28
AL
1483 } else {
1484 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1485 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1486 cpu_resume_from_signal(env, NULL);
6e140f28 1487 }
06d55cc1 1488 }
6e140f28
AL
1489 } else {
1490 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1491 }
1492 }
1493}
1494
6658ffb8
PB
1495/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1496 so these check for a hit then pass through to the normal out-of-line
1497 phys routines. */
a8170e5e 1498static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1499 unsigned size)
6658ffb8 1500{
1ec9b909
AK
1501 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1502 switch (size) {
1503 case 1: return ldub_phys(addr);
1504 case 2: return lduw_phys(addr);
1505 case 4: return ldl_phys(addr);
1506 default: abort();
1507 }
6658ffb8
PB
1508}
1509
a8170e5e 1510static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1511 uint64_t val, unsigned size)
6658ffb8 1512{
1ec9b909
AK
1513 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1514 switch (size) {
67364150
MF
1515 case 1:
1516 stb_phys(addr, val);
1517 break;
1518 case 2:
1519 stw_phys(addr, val);
1520 break;
1521 case 4:
1522 stl_phys(addr, val);
1523 break;
1ec9b909
AK
1524 default: abort();
1525 }
6658ffb8
PB
1526}
1527
1ec9b909
AK
1528static const MemoryRegionOps watch_mem_ops = {
1529 .read = watch_mem_read,
1530 .write = watch_mem_write,
1531 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1532};
6658ffb8 1533
a8170e5e 1534static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1535 unsigned len)
db7b5426 1536{
70c68e44 1537 subpage_t *mmio = opaque;
f6405247 1538 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1539 MemoryRegionSection *section;
db7b5426
BS
1540#if defined(DEBUG_SUBPAGE)
1541 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1542 mmio, len, addr, idx);
1543#endif
db7b5426 1544
5312bd8b
AK
1545 section = &phys_sections[mmio->sub_section[idx]];
1546 addr += mmio->base;
1547 addr -= section->offset_within_address_space;
1548 addr += section->offset_within_region;
37ec01d4 1549 return io_mem_read(section->mr, addr, len);
db7b5426
BS
1550}
1551
a8170e5e 1552static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1553 uint64_t value, unsigned len)
db7b5426 1554{
70c68e44 1555 subpage_t *mmio = opaque;
f6405247 1556 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 1557 MemoryRegionSection *section;
db7b5426 1558#if defined(DEBUG_SUBPAGE)
70c68e44
AK
1559 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1560 " idx %d value %"PRIx64"\n",
f6405247 1561 __func__, mmio, len, addr, idx, value);
db7b5426 1562#endif
f6405247 1563
5312bd8b
AK
1564 section = &phys_sections[mmio->sub_section[idx]];
1565 addr += mmio->base;
1566 addr -= section->offset_within_address_space;
1567 addr += section->offset_within_region;
37ec01d4 1568 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
1569}
1570
70c68e44
AK
1571static const MemoryRegionOps subpage_ops = {
1572 .read = subpage_read,
1573 .write = subpage_write,
1574 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1575};
1576
a8170e5e 1577static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
de712f94 1578 unsigned size)
56384e8b
AF
1579{
1580 ram_addr_t raddr = addr;
1581 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1582 switch (size) {
1583 case 1: return ldub_p(ptr);
1584 case 2: return lduw_p(ptr);
1585 case 4: return ldl_p(ptr);
1586 default: abort();
1587 }
56384e8b
AF
1588}
1589
a8170e5e 1590static void subpage_ram_write(void *opaque, hwaddr addr,
de712f94 1591 uint64_t value, unsigned size)
56384e8b
AF
1592{
1593 ram_addr_t raddr = addr;
1594 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
1595 switch (size) {
1596 case 1: return stb_p(ptr, value);
1597 case 2: return stw_p(ptr, value);
1598 case 4: return stl_p(ptr, value);
1599 default: abort();
1600 }
56384e8b
AF
1601}
1602
de712f94
AK
1603static const MemoryRegionOps subpage_ram_ops = {
1604 .read = subpage_ram_read,
1605 .write = subpage_ram_write,
1606 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
1607};
1608
c227f099 1609static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1610 uint16_t section)
db7b5426
BS
1611{
1612 int idx, eidx;
1613
1614 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1615 return -1;
1616 idx = SUBPAGE_IDX(start);
1617 eidx = SUBPAGE_IDX(end);
1618#if defined(DEBUG_SUBPAGE)
0bf9e31a 1619 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1620 mmio, start, end, idx, eidx, memory);
1621#endif
5312bd8b
AK
1622 if (memory_region_is_ram(phys_sections[section].mr)) {
1623 MemoryRegionSection new_section = phys_sections[section];
1624 new_section.mr = &io_mem_subpage_ram;
1625 section = phys_section_add(&new_section);
56384e8b 1626 }
db7b5426 1627 for (; idx <= eidx; idx++) {
5312bd8b 1628 mmio->sub_section[idx] = section;
db7b5426
BS
1629 }
1630
1631 return 0;
1632}
1633
a8170e5e 1634static subpage_t *subpage_init(hwaddr base)
db7b5426 1635{
c227f099 1636 subpage_t *mmio;
db7b5426 1637
7267c094 1638 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
1639
1640 mmio->base = base;
70c68e44
AK
1641 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1642 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1643 mmio->iomem.subpage = true;
db7b5426 1644#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1645 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1646 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1647#endif
0f0cb164 1648 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
1649
1650 return mmio;
1651}
1652
5312bd8b
AK
1653static uint16_t dummy_section(MemoryRegion *mr)
1654{
1655 MemoryRegionSection section = {
1656 .mr = mr,
1657 .offset_within_address_space = 0,
1658 .offset_within_region = 0,
1659 .size = UINT64_MAX,
1660 };
1661
1662 return phys_section_add(&section);
1663}
1664
a8170e5e 1665MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1666{
37ec01d4 1667 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1668}
1669
e9179ce1
AK
1670static void io_mem_init(void)
1671{
0e0df1e2 1672 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
1673 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1674 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1675 "unassigned", UINT64_MAX);
1676 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1677 "notdirty", UINT64_MAX);
de712f94
AK
1678 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1679 "subpage-ram", UINT64_MAX);
1ec9b909
AK
1680 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1681 "watch", UINT64_MAX);
e9179ce1
AK
1682}
1683
ac1970fb
AK
1684static void mem_begin(MemoryListener *listener)
1685{
1686 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1687
1688 destroy_all_mappings(d);
1689 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1690}
1691
50c1e149
AK
1692static void core_begin(MemoryListener *listener)
1693{
5312bd8b
AK
1694 phys_sections_clear();
1695 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
1696 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1697 phys_section_rom = dummy_section(&io_mem_rom);
1698 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
1699}
1700
1d71148e 1701static void tcg_commit(MemoryListener *listener)
50c1e149 1702{
9349b4f9 1703 CPUArchState *env;
117712c3
AK
1704
1705 /* since each CPU stores ram addresses in its TLB cache, we must
1706 reset the modified entries */
1707 /* XXX: slow ! */
1708 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1709 tlb_flush(env, 1);
1710 }
50c1e149
AK
1711}
1712
93632747
AK
1713static void core_log_global_start(MemoryListener *listener)
1714{
1715 cpu_physical_memory_set_dirty_tracking(1);
1716}
1717
1718static void core_log_global_stop(MemoryListener *listener)
1719{
1720 cpu_physical_memory_set_dirty_tracking(0);
1721}
1722
4855d41a
AK
1723static void io_region_add(MemoryListener *listener,
1724 MemoryRegionSection *section)
1725{
a2d33521
AK
1726 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1727
1728 mrio->mr = section->mr;
1729 mrio->offset = section->offset_within_region;
1730 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 1731 section->offset_within_address_space, section->size);
a2d33521 1732 ioport_register(&mrio->iorange);
4855d41a
AK
1733}
1734
1735static void io_region_del(MemoryListener *listener,
1736 MemoryRegionSection *section)
1737{
1738 isa_unassign_ioport(section->offset_within_address_space, section->size);
1739}
1740
93632747 1741static MemoryListener core_memory_listener = {
50c1e149 1742 .begin = core_begin,
93632747
AK
1743 .log_global_start = core_log_global_start,
1744 .log_global_stop = core_log_global_stop,
ac1970fb 1745 .priority = 1,
93632747
AK
1746};
1747
4855d41a
AK
1748static MemoryListener io_memory_listener = {
1749 .region_add = io_region_add,
1750 .region_del = io_region_del,
4855d41a
AK
1751 .priority = 0,
1752};
1753
1d71148e
AK
1754static MemoryListener tcg_memory_listener = {
1755 .commit = tcg_commit,
1756};
1757
ac1970fb
AK
1758void address_space_init_dispatch(AddressSpace *as)
1759{
1760 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1761
1762 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1763 d->listener = (MemoryListener) {
1764 .begin = mem_begin,
1765 .region_add = mem_add,
1766 .region_nop = mem_add,
1767 .priority = 0,
1768 };
1769 as->dispatch = d;
1770 memory_listener_register(&d->listener, as);
1771}
1772
83f3c251
AK
1773void address_space_destroy_dispatch(AddressSpace *as)
1774{
1775 AddressSpaceDispatch *d = as->dispatch;
1776
1777 memory_listener_unregister(&d->listener);
1778 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1779 g_free(d);
1780 as->dispatch = NULL;
1781}
1782
62152b8a
AK
1783static void memory_map_init(void)
1784{
7267c094 1785 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 1786 memory_region_init(system_memory, "system", INT64_MAX);
2673a5da
AK
1787 address_space_init(&address_space_memory, system_memory);
1788 address_space_memory.name = "memory";
309cb471 1789
7267c094 1790 system_io = g_malloc(sizeof(*system_io));
309cb471 1791 memory_region_init(system_io, "io", 65536);
2673a5da
AK
1792 address_space_init(&address_space_io, system_io);
1793 address_space_io.name = "I/O";
93632747 1794
f6790af6
AK
1795 memory_listener_register(&core_memory_listener, &address_space_memory);
1796 memory_listener_register(&io_memory_listener, &address_space_io);
1797 memory_listener_register(&tcg_memory_listener, &address_space_memory);
9e11908f
PM
1798
1799 dma_context_init(&dma_context_memory, &address_space_memory,
1800 NULL, NULL, NULL);
62152b8a
AK
1801}
1802
1803MemoryRegion *get_system_memory(void)
1804{
1805 return system_memory;
1806}
1807
309cb471
AK
1808MemoryRegion *get_system_io(void)
1809{
1810 return system_io;
1811}
1812
e2eef170
PB
1813#endif /* !defined(CONFIG_USER_ONLY) */
1814
13eb76e0
FB
1815/* physical memory access (slow version, mainly for debug) */
1816#if defined(CONFIG_USER_ONLY)
9349b4f9 1817int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1818 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1819{
1820 int l, flags;
1821 target_ulong page;
53a5960a 1822 void * p;
13eb76e0
FB
1823
1824 while (len > 0) {
1825 page = addr & TARGET_PAGE_MASK;
1826 l = (page + TARGET_PAGE_SIZE) - addr;
1827 if (l > len)
1828 l = len;
1829 flags = page_get_flags(page);
1830 if (!(flags & PAGE_VALID))
a68fe89c 1831 return -1;
13eb76e0
FB
1832 if (is_write) {
1833 if (!(flags & PAGE_WRITE))
a68fe89c 1834 return -1;
579a97f7 1835 /* XXX: this code should not depend on lock_user */
72fb7daa 1836 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1837 return -1;
72fb7daa
AJ
1838 memcpy(p, buf, l);
1839 unlock_user(p, addr, l);
13eb76e0
FB
1840 } else {
1841 if (!(flags & PAGE_READ))
a68fe89c 1842 return -1;
579a97f7 1843 /* XXX: this code should not depend on lock_user */
72fb7daa 1844 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1845 return -1;
72fb7daa 1846 memcpy(buf, p, l);
5b257578 1847 unlock_user(p, addr, 0);
13eb76e0
FB
1848 }
1849 len -= l;
1850 buf += l;
1851 addr += l;
1852 }
a68fe89c 1853 return 0;
13eb76e0 1854}
8df1cd07 1855
13eb76e0 1856#else
51d7a9eb 1857
a8170e5e
AK
1858static void invalidate_and_set_dirty(hwaddr addr,
1859 hwaddr length)
51d7a9eb
AP
1860{
1861 if (!cpu_physical_memory_is_dirty(addr)) {
1862 /* invalidate code */
1863 tb_invalidate_phys_page_range(addr, addr + length, 0);
1864 /* set dirty bit */
1865 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1866 }
e226939d 1867 xen_modified_memory(addr, length);
51d7a9eb
AP
1868}
1869
a8170e5e 1870void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1871 int len, bool is_write)
13eb76e0 1872{
ac1970fb 1873 AddressSpaceDispatch *d = as->dispatch;
37ec01d4 1874 int l;
13eb76e0
FB
1875 uint8_t *ptr;
1876 uint32_t val;
a8170e5e 1877 hwaddr page;
f3705d53 1878 MemoryRegionSection *section;
3b46e624 1879
13eb76e0
FB
1880 while (len > 0) {
1881 page = addr & TARGET_PAGE_MASK;
1882 l = (page + TARGET_PAGE_SIZE) - addr;
1883 if (l > len)
1884 l = len;
ac1970fb 1885 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1886
13eb76e0 1887 if (is_write) {
f3705d53 1888 if (!memory_region_is_ram(section->mr)) {
a8170e5e 1889 hwaddr addr1;
cc5bea60 1890 addr1 = memory_region_section_addr(section, addr);
6a00d601
FB
1891 /* XXX: could force cpu_single_env to NULL to avoid
1892 potential bugs */
6c2934db 1893 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 1894 /* 32 bit write access */
c27004ec 1895 val = ldl_p(buf);
37ec01d4 1896 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 1897 l = 4;
6c2934db 1898 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 1899 /* 16 bit write access */
c27004ec 1900 val = lduw_p(buf);
37ec01d4 1901 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
1902 l = 2;
1903 } else {
1c213d19 1904 /* 8 bit write access */
c27004ec 1905 val = ldub_p(buf);
37ec01d4 1906 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
1907 l = 1;
1908 }
f3705d53 1909 } else if (!section->readonly) {
8ca5692d 1910 ram_addr_t addr1;
f3705d53 1911 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 1912 + memory_region_section_addr(section, addr);
13eb76e0 1913 /* RAM case */
5579c7f3 1914 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1915 memcpy(ptr, buf, l);
51d7a9eb 1916 invalidate_and_set_dirty(addr1, l);
050a0ddf 1917 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1918 }
1919 } else {
cc5bea60
BS
1920 if (!(memory_region_is_ram(section->mr) ||
1921 memory_region_is_romd(section->mr))) {
a8170e5e 1922 hwaddr addr1;
13eb76e0 1923 /* I/O case */
cc5bea60 1924 addr1 = memory_region_section_addr(section, addr);
6c2934db 1925 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 1926 /* 32 bit read access */
37ec01d4 1927 val = io_mem_read(section->mr, addr1, 4);
c27004ec 1928 stl_p(buf, val);
13eb76e0 1929 l = 4;
6c2934db 1930 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 1931 /* 16 bit read access */
37ec01d4 1932 val = io_mem_read(section->mr, addr1, 2);
c27004ec 1933 stw_p(buf, val);
13eb76e0
FB
1934 l = 2;
1935 } else {
1c213d19 1936 /* 8 bit read access */
37ec01d4 1937 val = io_mem_read(section->mr, addr1, 1);
c27004ec 1938 stb_p(buf, val);
13eb76e0
FB
1939 l = 1;
1940 }
1941 } else {
1942 /* RAM case */
0a1b357f 1943 ptr = qemu_get_ram_ptr(section->mr->ram_addr
cc5bea60
BS
1944 + memory_region_section_addr(section,
1945 addr));
f3705d53 1946 memcpy(buf, ptr, l);
050a0ddf 1947 qemu_put_ram_ptr(ptr);
13eb76e0
FB
1948 }
1949 }
1950 len -= l;
1951 buf += l;
1952 addr += l;
1953 }
1954}
8df1cd07 1955
a8170e5e 1956void address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1957 const uint8_t *buf, int len)
1958{
1959 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1960}
1961
1962/**
1963 * address_space_read: read from an address space.
1964 *
1965 * @as: #AddressSpace to be accessed
1966 * @addr: address within that address space
1967 * @buf: buffer with the data transferred
1968 */
a8170e5e 1969void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb
AK
1970{
1971 address_space_rw(as, addr, buf, len, false);
1972}
1973
1974
a8170e5e 1975void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1976 int len, int is_write)
1977{
1978 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1979}
1980
d0ecd2aa 1981/* used for ROM loading : can write in RAM and ROM */
a8170e5e 1982void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
1983 const uint8_t *buf, int len)
1984{
ac1970fb 1985 AddressSpaceDispatch *d = address_space_memory.dispatch;
d0ecd2aa
FB
1986 int l;
1987 uint8_t *ptr;
a8170e5e 1988 hwaddr page;
f3705d53 1989 MemoryRegionSection *section;
3b46e624 1990
d0ecd2aa
FB
1991 while (len > 0) {
1992 page = addr & TARGET_PAGE_MASK;
1993 l = (page + TARGET_PAGE_SIZE) - addr;
1994 if (l > len)
1995 l = len;
ac1970fb 1996 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 1997
cc5bea60
BS
1998 if (!(memory_region_is_ram(section->mr) ||
1999 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
2000 /* do nothing */
2001 } else {
2002 unsigned long addr1;
f3705d53 2003 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 2004 + memory_region_section_addr(section, addr);
d0ecd2aa 2005 /* ROM/RAM case */
5579c7f3 2006 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2007 memcpy(ptr, buf, l);
51d7a9eb 2008 invalidate_and_set_dirty(addr1, l);
050a0ddf 2009 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
2010 }
2011 len -= l;
2012 buf += l;
2013 addr += l;
2014 }
2015}
2016
6d16c2f8
AL
2017typedef struct {
2018 void *buffer;
a8170e5e
AK
2019 hwaddr addr;
2020 hwaddr len;
6d16c2f8
AL
2021} BounceBuffer;
2022
2023static BounceBuffer bounce;
2024
ba223c29
AL
2025typedef struct MapClient {
2026 void *opaque;
2027 void (*callback)(void *opaque);
72cf2d4f 2028 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2029} MapClient;
2030
72cf2d4f
BS
2031static QLIST_HEAD(map_client_list, MapClient) map_client_list
2032 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2033
2034void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2035{
7267c094 2036 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2037
2038 client->opaque = opaque;
2039 client->callback = callback;
72cf2d4f 2040 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2041 return client;
2042}
2043
8b9c99d9 2044static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2045{
2046 MapClient *client = (MapClient *)_client;
2047
72cf2d4f 2048 QLIST_REMOVE(client, link);
7267c094 2049 g_free(client);
ba223c29
AL
2050}
2051
2052static void cpu_notify_map_clients(void)
2053{
2054 MapClient *client;
2055
72cf2d4f
BS
2056 while (!QLIST_EMPTY(&map_client_list)) {
2057 client = QLIST_FIRST(&map_client_list);
ba223c29 2058 client->callback(client->opaque);
34d5e948 2059 cpu_unregister_map_client(client);
ba223c29
AL
2060 }
2061}
2062
6d16c2f8
AL
2063/* Map a physical memory region into a host virtual address.
2064 * May map a subset of the requested range, given by and returned in *plen.
2065 * May return NULL if resources needed to perform the mapping are exhausted.
2066 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2067 * Use cpu_register_map_client() to know when retrying the map operation is
2068 * likely to succeed.
6d16c2f8 2069 */
ac1970fb 2070void *address_space_map(AddressSpace *as,
a8170e5e
AK
2071 hwaddr addr,
2072 hwaddr *plen,
ac1970fb 2073 bool is_write)
6d16c2f8 2074{
ac1970fb 2075 AddressSpaceDispatch *d = as->dispatch;
a8170e5e
AK
2076 hwaddr len = *plen;
2077 hwaddr todo = 0;
6d16c2f8 2078 int l;
a8170e5e 2079 hwaddr page;
f3705d53 2080 MemoryRegionSection *section;
f15fbc4b 2081 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
2082 ram_addr_t rlen;
2083 void *ret;
6d16c2f8
AL
2084
2085 while (len > 0) {
2086 page = addr & TARGET_PAGE_MASK;
2087 l = (page + TARGET_PAGE_SIZE) - addr;
2088 if (l > len)
2089 l = len;
ac1970fb 2090 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
6d16c2f8 2091
f3705d53 2092 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 2093 if (todo || bounce.buffer) {
6d16c2f8
AL
2094 break;
2095 }
2096 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2097 bounce.addr = addr;
2098 bounce.len = l;
2099 if (!is_write) {
ac1970fb 2100 address_space_read(as, addr, bounce.buffer, l);
6d16c2f8 2101 }
38bee5dc
SS
2102
2103 *plen = l;
2104 return bounce.buffer;
6d16c2f8 2105 }
8ab934f9 2106 if (!todo) {
f3705d53 2107 raddr = memory_region_get_ram_addr(section->mr)
cc5bea60 2108 + memory_region_section_addr(section, addr);
8ab934f9 2109 }
6d16c2f8
AL
2110
2111 len -= l;
2112 addr += l;
38bee5dc 2113 todo += l;
6d16c2f8 2114 }
8ab934f9
SS
2115 rlen = todo;
2116 ret = qemu_ram_ptr_length(raddr, &rlen);
2117 *plen = rlen;
2118 return ret;
6d16c2f8
AL
2119}
2120
ac1970fb 2121/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2122 * Will also mark the memory as dirty if is_write == 1. access_len gives
2123 * the amount of memory that was actually read or written by the caller.
2124 */
a8170e5e
AK
2125void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2126 int is_write, hwaddr access_len)
6d16c2f8
AL
2127{
2128 if (buffer != bounce.buffer) {
2129 if (is_write) {
e890261f 2130 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
2131 while (access_len) {
2132 unsigned l;
2133 l = TARGET_PAGE_SIZE;
2134 if (l > access_len)
2135 l = access_len;
51d7a9eb 2136 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2137 addr1 += l;
2138 access_len -= l;
2139 }
2140 }
868bb33f 2141 if (xen_enabled()) {
e41d7c69 2142 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2143 }
6d16c2f8
AL
2144 return;
2145 }
2146 if (is_write) {
ac1970fb 2147 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2148 }
f8a83245 2149 qemu_vfree(bounce.buffer);
6d16c2f8 2150 bounce.buffer = NULL;
ba223c29 2151 cpu_notify_map_clients();
6d16c2f8 2152}
d0ecd2aa 2153
a8170e5e
AK
2154void *cpu_physical_memory_map(hwaddr addr,
2155 hwaddr *plen,
ac1970fb
AK
2156 int is_write)
2157{
2158 return address_space_map(&address_space_memory, addr, plen, is_write);
2159}
2160
a8170e5e
AK
2161void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2162 int is_write, hwaddr access_len)
ac1970fb
AK
2163{
2164 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2165}
2166
8df1cd07 2167/* warning: addr must be aligned */
a8170e5e 2168static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2169 enum device_endian endian)
8df1cd07 2170{
8df1cd07
FB
2171 uint8_t *ptr;
2172 uint32_t val;
f3705d53 2173 MemoryRegionSection *section;
8df1cd07 2174
ac1970fb 2175 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2176
cc5bea60
BS
2177 if (!(memory_region_is_ram(section->mr) ||
2178 memory_region_is_romd(section->mr))) {
8df1cd07 2179 /* I/O case */
cc5bea60 2180 addr = memory_region_section_addr(section, addr);
37ec01d4 2181 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
2182#if defined(TARGET_WORDS_BIGENDIAN)
2183 if (endian == DEVICE_LITTLE_ENDIAN) {
2184 val = bswap32(val);
2185 }
2186#else
2187 if (endian == DEVICE_BIG_ENDIAN) {
2188 val = bswap32(val);
2189 }
2190#endif
8df1cd07
FB
2191 } else {
2192 /* RAM case */
f3705d53 2193 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2194 & TARGET_PAGE_MASK)
cc5bea60 2195 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2196 switch (endian) {
2197 case DEVICE_LITTLE_ENDIAN:
2198 val = ldl_le_p(ptr);
2199 break;
2200 case DEVICE_BIG_ENDIAN:
2201 val = ldl_be_p(ptr);
2202 break;
2203 default:
2204 val = ldl_p(ptr);
2205 break;
2206 }
8df1cd07
FB
2207 }
2208 return val;
2209}
2210
a8170e5e 2211uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2212{
2213 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2214}
2215
a8170e5e 2216uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2217{
2218 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2219}
2220
a8170e5e 2221uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2222{
2223 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2224}
2225
84b7b8e7 2226/* warning: addr must be aligned */
a8170e5e 2227static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2228 enum device_endian endian)
84b7b8e7 2229{
84b7b8e7
FB
2230 uint8_t *ptr;
2231 uint64_t val;
f3705d53 2232 MemoryRegionSection *section;
84b7b8e7 2233
ac1970fb 2234 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2235
cc5bea60
BS
2236 if (!(memory_region_is_ram(section->mr) ||
2237 memory_region_is_romd(section->mr))) {
84b7b8e7 2238 /* I/O case */
cc5bea60 2239 addr = memory_region_section_addr(section, addr);
1e78bcc1
AG
2240
2241 /* XXX This is broken when device endian != cpu endian.
2242 Fix and add "endian" variable check */
84b7b8e7 2243#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2244 val = io_mem_read(section->mr, addr, 4) << 32;
2245 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 2246#else
37ec01d4
AK
2247 val = io_mem_read(section->mr, addr, 4);
2248 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
2249#endif
2250 } else {
2251 /* RAM case */
f3705d53 2252 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2253 & TARGET_PAGE_MASK)
cc5bea60 2254 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2255 switch (endian) {
2256 case DEVICE_LITTLE_ENDIAN:
2257 val = ldq_le_p(ptr);
2258 break;
2259 case DEVICE_BIG_ENDIAN:
2260 val = ldq_be_p(ptr);
2261 break;
2262 default:
2263 val = ldq_p(ptr);
2264 break;
2265 }
84b7b8e7
FB
2266 }
2267 return val;
2268}
2269
a8170e5e 2270uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2271{
2272 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2273}
2274
a8170e5e 2275uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2276{
2277 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2278}
2279
a8170e5e 2280uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2281{
2282 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2283}
2284
aab33094 2285/* XXX: optimize */
a8170e5e 2286uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2287{
2288 uint8_t val;
2289 cpu_physical_memory_read(addr, &val, 1);
2290 return val;
2291}
2292
733f0b02 2293/* warning: addr must be aligned */
a8170e5e 2294static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2295 enum device_endian endian)
aab33094 2296{
733f0b02
MT
2297 uint8_t *ptr;
2298 uint64_t val;
f3705d53 2299 MemoryRegionSection *section;
733f0b02 2300
ac1970fb 2301 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2302
cc5bea60
BS
2303 if (!(memory_region_is_ram(section->mr) ||
2304 memory_region_is_romd(section->mr))) {
733f0b02 2305 /* I/O case */
cc5bea60 2306 addr = memory_region_section_addr(section, addr);
37ec01d4 2307 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
2308#if defined(TARGET_WORDS_BIGENDIAN)
2309 if (endian == DEVICE_LITTLE_ENDIAN) {
2310 val = bswap16(val);
2311 }
2312#else
2313 if (endian == DEVICE_BIG_ENDIAN) {
2314 val = bswap16(val);
2315 }
2316#endif
733f0b02
MT
2317 } else {
2318 /* RAM case */
f3705d53 2319 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2320 & TARGET_PAGE_MASK)
cc5bea60 2321 + memory_region_section_addr(section, addr));
1e78bcc1
AG
2322 switch (endian) {
2323 case DEVICE_LITTLE_ENDIAN:
2324 val = lduw_le_p(ptr);
2325 break;
2326 case DEVICE_BIG_ENDIAN:
2327 val = lduw_be_p(ptr);
2328 break;
2329 default:
2330 val = lduw_p(ptr);
2331 break;
2332 }
733f0b02
MT
2333 }
2334 return val;
aab33094
FB
2335}
2336
a8170e5e 2337uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2338{
2339 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2340}
2341
a8170e5e 2342uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2343{
2344 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2345}
2346
a8170e5e 2347uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2348{
2349 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2350}
2351
8df1cd07
FB
2352/* warning: addr must be aligned. The ram page is not masked as dirty
2353 and the code inside is not invalidated. It is useful if the dirty
2354 bits are used to track modified PTEs */
a8170e5e 2355void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2356{
8df1cd07 2357 uint8_t *ptr;
f3705d53 2358 MemoryRegionSection *section;
8df1cd07 2359
ac1970fb 2360 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2361
f3705d53 2362 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2363 addr = memory_region_section_addr(section, addr);
f3705d53 2364 if (memory_region_is_ram(section->mr)) {
37ec01d4 2365 section = &phys_sections[phys_section_rom];
06ef3525 2366 }
37ec01d4 2367 io_mem_write(section->mr, addr, val, 4);
8df1cd07 2368 } else {
f3705d53 2369 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 2370 & TARGET_PAGE_MASK)
cc5bea60 2371 + memory_region_section_addr(section, addr);
5579c7f3 2372 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2373 stl_p(ptr, val);
74576198
AL
2374
2375 if (unlikely(in_migration)) {
2376 if (!cpu_physical_memory_is_dirty(addr1)) {
2377 /* invalidate code */
2378 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2379 /* set dirty bit */
f7c11b53
YT
2380 cpu_physical_memory_set_dirty_flags(
2381 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2382 }
2383 }
8df1cd07
FB
2384 }
2385}
2386
a8170e5e 2387void stq_phys_notdirty(hwaddr addr, uint64_t val)
bc98a7ef 2388{
bc98a7ef 2389 uint8_t *ptr;
f3705d53 2390 MemoryRegionSection *section;
bc98a7ef 2391
ac1970fb 2392 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2393
f3705d53 2394 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2395 addr = memory_region_section_addr(section, addr);
f3705d53 2396 if (memory_region_is_ram(section->mr)) {
37ec01d4 2397 section = &phys_sections[phys_section_rom];
06ef3525 2398 }
bc98a7ef 2399#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
2400 io_mem_write(section->mr, addr, val >> 32, 4);
2401 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 2402#else
37ec01d4
AK
2403 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2404 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
2405#endif
2406 } else {
f3705d53 2407 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2408 & TARGET_PAGE_MASK)
cc5bea60 2409 + memory_region_section_addr(section, addr));
bc98a7ef
JM
2410 stq_p(ptr, val);
2411 }
2412}
2413
8df1cd07 2414/* warning: addr must be aligned */
a8170e5e 2415static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2416 enum device_endian endian)
8df1cd07 2417{
8df1cd07 2418 uint8_t *ptr;
f3705d53 2419 MemoryRegionSection *section;
8df1cd07 2420
ac1970fb 2421 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 2422
f3705d53 2423 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2424 addr = memory_region_section_addr(section, addr);
f3705d53 2425 if (memory_region_is_ram(section->mr)) {
37ec01d4 2426 section = &phys_sections[phys_section_rom];
06ef3525 2427 }
1e78bcc1
AG
2428#if defined(TARGET_WORDS_BIGENDIAN)
2429 if (endian == DEVICE_LITTLE_ENDIAN) {
2430 val = bswap32(val);
2431 }
2432#else
2433 if (endian == DEVICE_BIG_ENDIAN) {
2434 val = bswap32(val);
2435 }
2436#endif
37ec01d4 2437 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
2438 } else {
2439 unsigned long addr1;
f3705d53 2440 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2441 + memory_region_section_addr(section, addr);
8df1cd07 2442 /* RAM case */
5579c7f3 2443 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2444 switch (endian) {
2445 case DEVICE_LITTLE_ENDIAN:
2446 stl_le_p(ptr, val);
2447 break;
2448 case DEVICE_BIG_ENDIAN:
2449 stl_be_p(ptr, val);
2450 break;
2451 default:
2452 stl_p(ptr, val);
2453 break;
2454 }
51d7a9eb 2455 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2456 }
2457}
2458
a8170e5e 2459void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2460{
2461 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2462}
2463
a8170e5e 2464void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2465{
2466 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2467}
2468
a8170e5e 2469void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2470{
2471 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2472}
2473
aab33094 2474/* XXX: optimize */
a8170e5e 2475void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2476{
2477 uint8_t v = val;
2478 cpu_physical_memory_write(addr, &v, 1);
2479}
2480
733f0b02 2481/* warning: addr must be aligned */
a8170e5e 2482static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2483 enum device_endian endian)
aab33094 2484{
733f0b02 2485 uint8_t *ptr;
f3705d53 2486 MemoryRegionSection *section;
733f0b02 2487
ac1970fb 2488 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 2489
f3705d53 2490 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 2491 addr = memory_region_section_addr(section, addr);
f3705d53 2492 if (memory_region_is_ram(section->mr)) {
37ec01d4 2493 section = &phys_sections[phys_section_rom];
06ef3525 2494 }
1e78bcc1
AG
2495#if defined(TARGET_WORDS_BIGENDIAN)
2496 if (endian == DEVICE_LITTLE_ENDIAN) {
2497 val = bswap16(val);
2498 }
2499#else
2500 if (endian == DEVICE_BIG_ENDIAN) {
2501 val = bswap16(val);
2502 }
2503#endif
37ec01d4 2504 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
2505 } else {
2506 unsigned long addr1;
f3705d53 2507 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 2508 + memory_region_section_addr(section, addr);
733f0b02
MT
2509 /* RAM case */
2510 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2511 switch (endian) {
2512 case DEVICE_LITTLE_ENDIAN:
2513 stw_le_p(ptr, val);
2514 break;
2515 case DEVICE_BIG_ENDIAN:
2516 stw_be_p(ptr, val);
2517 break;
2518 default:
2519 stw_p(ptr, val);
2520 break;
2521 }
51d7a9eb 2522 invalidate_and_set_dirty(addr1, 2);
733f0b02 2523 }
aab33094
FB
2524}
2525
a8170e5e 2526void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2527{
2528 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2529}
2530
a8170e5e 2531void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2532{
2533 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2534}
2535
a8170e5e 2536void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2537{
2538 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2539}
2540
aab33094 2541/* XXX: optimize */
a8170e5e 2542void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2543{
2544 val = tswap64(val);
71d2b725 2545 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2546}
2547
a8170e5e 2548void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2549{
2550 val = cpu_to_le64(val);
2551 cpu_physical_memory_write(addr, &val, 8);
2552}
2553
a8170e5e 2554void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2555{
2556 val = cpu_to_be64(val);
2557 cpu_physical_memory_write(addr, &val, 8);
2558}
2559
5e2972fd 2560/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2561int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2562 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2563{
2564 int l;
a8170e5e 2565 hwaddr phys_addr;
9b3c35e0 2566 target_ulong page;
13eb76e0
FB
2567
2568 while (len > 0) {
2569 page = addr & TARGET_PAGE_MASK;
2570 phys_addr = cpu_get_phys_page_debug(env, page);
2571 /* if no physical page mapped, return an error */
2572 if (phys_addr == -1)
2573 return -1;
2574 l = (page + TARGET_PAGE_SIZE) - addr;
2575 if (l > len)
2576 l = len;
5e2972fd 2577 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2578 if (is_write)
2579 cpu_physical_memory_write_rom(phys_addr, buf, l);
2580 else
5e2972fd 2581 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2582 len -= l;
2583 buf += l;
2584 addr += l;
2585 }
2586 return 0;
2587}
a68fe89c 2588#endif
13eb76e0 2589
76f35538 2590#ifndef CONFIG_USER_ONLY
a8170e5e 2591bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538
WC
2592{
2593 MemoryRegionSection *section;
2594
ac1970fb
AK
2595 section = phys_page_find(address_space_memory.dispatch,
2596 phys_addr >> TARGET_PAGE_BITS);
76f35538
WC
2597
2598 return !(memory_region_is_ram(section->mr) ||
2599 memory_region_is_romd(section->mr));
2600}
2601#endif