]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
exec: Implement subpage_read/write via address_space_rw
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
0d09e41a 34#include "hw/xen/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
db7b5426 53//#define DEBUG_SUBPAGE
1196be37 54
e2eef170 55#if !defined(CONFIG_USER_ONLY)
9fa3e853 56int phys_ram_fd;
74576198 57static int in_migration;
94a6b54f 58
a3161038 59RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
60
61static MemoryRegion *system_memory;
309cb471 62static MemoryRegion *system_io;
62152b8a 63
f6790af6
AK
64AddressSpace address_space_io;
65AddressSpace address_space_memory;
9e11908f 66DMAContext dma_context_memory;
2673a5da 67
0844e007 68MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 69static MemoryRegion io_mem_unassigned;
0e0df1e2 70
e2eef170 71#endif
9fa3e853 72
9349b4f9 73CPUArchState *first_cpu;
6a00d601
FB
74/* current CPU in the current thread. It is only valid inside
75 cpu_exec() */
9349b4f9 76DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 77/* 0 = Do not count executed instructions.
bf20dc07 78 1 = Precise instruction counting.
2e70f6ef 79 2 = Adaptive rate instruction counting. */
5708fc66 80int use_icount;
6a00d601 81
e2eef170 82#if !defined(CONFIG_USER_ONLY)
4346ae3e 83
1db8abb1
PB
84typedef struct PhysPageEntry PhysPageEntry;
85
86struct PhysPageEntry {
87 uint16_t is_leaf : 1;
88 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
89 uint16_t ptr : 15;
90};
91
92struct AddressSpaceDispatch {
93 /* This is a multi-level map on the physical address space.
94 * The bottom level has pointers to MemoryRegionSections.
95 */
96 PhysPageEntry phys_map;
97 MemoryListener listener;
acc9d80b 98 AddressSpace *as;
1db8abb1
PB
99};
100
90260c6c
JK
101#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
102typedef struct subpage_t {
103 MemoryRegion iomem;
acc9d80b 104 AddressSpace *as;
90260c6c
JK
105 hwaddr base;
106 uint16_t sub_section[TARGET_PAGE_SIZE];
107} subpage_t;
108
5312bd8b
AK
109static MemoryRegionSection *phys_sections;
110static unsigned phys_sections_nb, phys_sections_nb_alloc;
111static uint16_t phys_section_unassigned;
aa102231
AK
112static uint16_t phys_section_notdirty;
113static uint16_t phys_section_rom;
114static uint16_t phys_section_watch;
5312bd8b 115
d6f2ea22
AK
116/* Simple allocator for PhysPageEntry nodes */
117static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
118static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
119
07f07b31 120#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 121
e2eef170 122static void io_mem_init(void);
62152b8a 123static void memory_map_init(void);
8b9c99d9 124static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 125
1ec9b909 126static MemoryRegion io_mem_watch;
6658ffb8 127#endif
fd6ce8f6 128
6d9a1304 129#if !defined(CONFIG_USER_ONLY)
d6f2ea22 130
f7bf5461 131static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 132{
f7bf5461 133 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
134 typedef PhysPageEntry Node[L2_SIZE];
135 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
136 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
137 phys_map_nodes_nb + nodes);
d6f2ea22
AK
138 phys_map_nodes = g_renew(Node, phys_map_nodes,
139 phys_map_nodes_nb_alloc);
140 }
f7bf5461
AK
141}
142
143static uint16_t phys_map_node_alloc(void)
144{
145 unsigned i;
146 uint16_t ret;
147
148 ret = phys_map_nodes_nb++;
149 assert(ret != PHYS_MAP_NODE_NIL);
150 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 151 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 152 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 153 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 154 }
f7bf5461 155 return ret;
d6f2ea22
AK
156}
157
158static void phys_map_nodes_reset(void)
159{
160 phys_map_nodes_nb = 0;
161}
162
92e873b9 163
a8170e5e
AK
164static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
165 hwaddr *nb, uint16_t leaf,
2999097b 166 int level)
f7bf5461
AK
167{
168 PhysPageEntry *p;
169 int i;
a8170e5e 170 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 171
07f07b31 172 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
173 lp->ptr = phys_map_node_alloc();
174 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
175 if (level == 0) {
176 for (i = 0; i < L2_SIZE; i++) {
07f07b31 177 p[i].is_leaf = 1;
c19e8800 178 p[i].ptr = phys_section_unassigned;
4346ae3e 179 }
67c4d23c 180 }
f7bf5461 181 } else {
c19e8800 182 p = phys_map_nodes[lp->ptr];
92e873b9 183 }
2999097b 184 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 185
2999097b 186 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
187 if ((*index & (step - 1)) == 0 && *nb >= step) {
188 lp->is_leaf = true;
c19e8800 189 lp->ptr = leaf;
07f07b31
AK
190 *index += step;
191 *nb -= step;
2999097b
AK
192 } else {
193 phys_page_set_level(lp, index, nb, leaf, level - 1);
194 }
195 ++lp;
f7bf5461
AK
196 }
197}
198
ac1970fb 199static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 200 hwaddr index, hwaddr nb,
2999097b 201 uint16_t leaf)
f7bf5461 202{
2999097b 203 /* Wildly overreserve - it doesn't matter much. */
07f07b31 204 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 205
ac1970fb 206 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
207}
208
149f54b5 209static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
92e873b9 210{
ac1970fb 211 PhysPageEntry lp = d->phys_map;
31ab2b4a
AK
212 PhysPageEntry *p;
213 int i;
f1f6e3b8 214
07f07b31 215 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 216 if (lp.ptr == PHYS_MAP_NODE_NIL) {
fd298934 217 return &phys_sections[phys_section_unassigned];
31ab2b4a 218 }
c19e8800 219 p = phys_map_nodes[lp.ptr];
31ab2b4a 220 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 221 }
fd298934 222 return &phys_sections[lp.ptr];
f3705d53
AK
223}
224
e5548617
BS
225bool memory_region_is_unassigned(MemoryRegion *mr)
226{
2a8e7499 227 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 228 && mr != &io_mem_watch;
fd6ce8f6 229}
149f54b5 230
9f029603 231static MemoryRegionSection *address_space_lookup_region(AddressSpace *as,
90260c6c
JK
232 hwaddr addr,
233 bool resolve_subpage)
9f029603 234{
90260c6c
JK
235 MemoryRegionSection *section;
236 subpage_t *subpage;
237
238 section = phys_page_find(as->dispatch, addr >> TARGET_PAGE_BITS);
239 if (resolve_subpage && section->mr->subpage) {
240 subpage = container_of(section->mr, subpage_t, iomem);
241 section = &phys_sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
242 }
243 return section;
9f029603
JK
244}
245
90260c6c
JK
246static MemoryRegionSection *
247address_space_translate_internal(AddressSpace *as, hwaddr addr, hwaddr *xlat,
248 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
249{
250 MemoryRegionSection *section;
251 Int128 diff;
252
90260c6c 253 section = address_space_lookup_region(as, addr, resolve_subpage);
149f54b5
PB
254 /* Compute offset within MemoryRegionSection */
255 addr -= section->offset_within_address_space;
256
257 /* Compute offset within MemoryRegion */
258 *xlat = addr + section->offset_within_region;
259
260 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 261 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
262 return section;
263}
90260c6c
JK
264
265MemoryRegionSection *address_space_translate(AddressSpace *as, hwaddr addr,
266 hwaddr *xlat, hwaddr *plen,
267 bool is_write)
268{
269 return address_space_translate_internal(as, addr, xlat, plen, true);
270}
271
272MemoryRegionSection *
273address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
274 hwaddr *plen)
275{
276 return address_space_translate_internal(as, addr, xlat, plen, false);
277}
5b6dd868 278#endif
fd6ce8f6 279
5b6dd868 280void cpu_exec_init_all(void)
fdbb84d1 281{
5b6dd868 282#if !defined(CONFIG_USER_ONLY)
b2a8658e 283 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
284 memory_map_init();
285 io_mem_init();
fdbb84d1 286#endif
5b6dd868 287}
fdbb84d1 288
b170fce3 289#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
290
291static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 292{
259186a7 293 CPUState *cpu = opaque;
a513fe19 294
5b6dd868
BS
295 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
296 version_id is increased. */
259186a7
AF
297 cpu->interrupt_request &= ~0x01;
298 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
299
300 return 0;
a513fe19 301}
7501267e 302
5b6dd868
BS
303static const VMStateDescription vmstate_cpu_common = {
304 .name = "cpu_common",
305 .version_id = 1,
306 .minimum_version_id = 1,
307 .minimum_version_id_old = 1,
308 .post_load = cpu_common_post_load,
309 .fields = (VMStateField []) {
259186a7
AF
310 VMSTATE_UINT32(halted, CPUState),
311 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
312 VMSTATE_END_OF_LIST()
313 }
314};
b170fce3
AF
315#else
316#define vmstate_cpu_common vmstate_dummy
5b6dd868 317#endif
ea041c0e 318
38d8f5c8 319CPUState *qemu_get_cpu(int index)
ea041c0e 320{
5b6dd868 321 CPUArchState *env = first_cpu;
38d8f5c8 322 CPUState *cpu = NULL;
ea041c0e 323
5b6dd868 324 while (env) {
55e5c285
AF
325 cpu = ENV_GET_CPU(env);
326 if (cpu->cpu_index == index) {
5b6dd868 327 break;
55e5c285 328 }
5b6dd868 329 env = env->next_cpu;
ea041c0e 330 }
5b6dd868 331
d76fddae 332 return env ? cpu : NULL;
ea041c0e
FB
333}
334
d6b9e0d6
MT
335void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
336{
337 CPUArchState *env = first_cpu;
338
339 while (env) {
340 func(ENV_GET_CPU(env), data);
341 env = env->next_cpu;
342 }
343}
344
5b6dd868 345void cpu_exec_init(CPUArchState *env)
ea041c0e 346{
5b6dd868 347 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 348 CPUClass *cc = CPU_GET_CLASS(cpu);
5b6dd868
BS
349 CPUArchState **penv;
350 int cpu_index;
351
352#if defined(CONFIG_USER_ONLY)
353 cpu_list_lock();
354#endif
355 env->next_cpu = NULL;
356 penv = &first_cpu;
357 cpu_index = 0;
358 while (*penv != NULL) {
359 penv = &(*penv)->next_cpu;
360 cpu_index++;
361 }
55e5c285 362 cpu->cpu_index = cpu_index;
1b1ed8dc 363 cpu->numa_node = 0;
5b6dd868
BS
364 QTAILQ_INIT(&env->breakpoints);
365 QTAILQ_INIT(&env->watchpoints);
366#ifndef CONFIG_USER_ONLY
367 cpu->thread_id = qemu_get_thread_id();
368#endif
369 *penv = env;
370#if defined(CONFIG_USER_ONLY)
371 cpu_list_unlock();
372#endif
259186a7 373 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
5b6dd868 374#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
375 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
376 cpu_save, cpu_load, env);
b170fce3 377 assert(cc->vmsd == NULL);
5b6dd868 378#endif
b170fce3
AF
379 if (cc->vmsd != NULL) {
380 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
381 }
ea041c0e
FB
382}
383
1fddef4b 384#if defined(TARGET_HAS_ICE)
94df27fd 385#if defined(CONFIG_USER_ONLY)
9349b4f9 386static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
387{
388 tb_invalidate_phys_page_range(pc, pc + 1, 0);
389}
390#else
1e7855a5
MF
391static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
392{
9d70c4b7
MF
393 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
394 (pc & ~TARGET_PAGE_MASK));
1e7855a5 395}
c27004ec 396#endif
94df27fd 397#endif /* TARGET_HAS_ICE */
d720b93d 398
c527ee8f 399#if defined(CONFIG_USER_ONLY)
9349b4f9 400void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
401
402{
403}
404
9349b4f9 405int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
406 int flags, CPUWatchpoint **watchpoint)
407{
408 return -ENOSYS;
409}
410#else
6658ffb8 411/* Add a watchpoint. */
9349b4f9 412int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 413 int flags, CPUWatchpoint **watchpoint)
6658ffb8 414{
b4051334 415 target_ulong len_mask = ~(len - 1);
c0ce998e 416 CPUWatchpoint *wp;
6658ffb8 417
b4051334 418 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
419 if ((len & (len - 1)) || (addr & ~len_mask) ||
420 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
421 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
422 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
423 return -EINVAL;
424 }
7267c094 425 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
426
427 wp->vaddr = addr;
b4051334 428 wp->len_mask = len_mask;
a1d1bb31
AL
429 wp->flags = flags;
430
2dc9f411 431 /* keep all GDB-injected watchpoints in front */
c0ce998e 432 if (flags & BP_GDB)
72cf2d4f 433 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 434 else
72cf2d4f 435 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 436
6658ffb8 437 tlb_flush_page(env, addr);
a1d1bb31
AL
438
439 if (watchpoint)
440 *watchpoint = wp;
441 return 0;
6658ffb8
PB
442}
443
a1d1bb31 444/* Remove a specific watchpoint. */
9349b4f9 445int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 446 int flags)
6658ffb8 447{
b4051334 448 target_ulong len_mask = ~(len - 1);
a1d1bb31 449 CPUWatchpoint *wp;
6658ffb8 450
72cf2d4f 451 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 452 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 453 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 454 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
455 return 0;
456 }
457 }
a1d1bb31 458 return -ENOENT;
6658ffb8
PB
459}
460
a1d1bb31 461/* Remove a specific watchpoint by reference. */
9349b4f9 462void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 463{
72cf2d4f 464 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 465
a1d1bb31
AL
466 tlb_flush_page(env, watchpoint->vaddr);
467
7267c094 468 g_free(watchpoint);
a1d1bb31
AL
469}
470
471/* Remove all matching watchpoints. */
9349b4f9 472void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 473{
c0ce998e 474 CPUWatchpoint *wp, *next;
a1d1bb31 475
72cf2d4f 476 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
477 if (wp->flags & mask)
478 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 479 }
7d03f82f 480}
c527ee8f 481#endif
7d03f82f 482
a1d1bb31 483/* Add a breakpoint. */
9349b4f9 484int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 485 CPUBreakpoint **breakpoint)
4c3a88a2 486{
1fddef4b 487#if defined(TARGET_HAS_ICE)
c0ce998e 488 CPUBreakpoint *bp;
3b46e624 489
7267c094 490 bp = g_malloc(sizeof(*bp));
4c3a88a2 491
a1d1bb31
AL
492 bp->pc = pc;
493 bp->flags = flags;
494
2dc9f411 495 /* keep all GDB-injected breakpoints in front */
c0ce998e 496 if (flags & BP_GDB)
72cf2d4f 497 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 498 else
72cf2d4f 499 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 500
d720b93d 501 breakpoint_invalidate(env, pc);
a1d1bb31
AL
502
503 if (breakpoint)
504 *breakpoint = bp;
4c3a88a2
FB
505 return 0;
506#else
a1d1bb31 507 return -ENOSYS;
4c3a88a2
FB
508#endif
509}
510
a1d1bb31 511/* Remove a specific breakpoint. */
9349b4f9 512int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 513{
7d03f82f 514#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
515 CPUBreakpoint *bp;
516
72cf2d4f 517 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
518 if (bp->pc == pc && bp->flags == flags) {
519 cpu_breakpoint_remove_by_ref(env, bp);
520 return 0;
521 }
7d03f82f 522 }
a1d1bb31
AL
523 return -ENOENT;
524#else
525 return -ENOSYS;
7d03f82f
EI
526#endif
527}
528
a1d1bb31 529/* Remove a specific breakpoint by reference. */
9349b4f9 530void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 531{
1fddef4b 532#if defined(TARGET_HAS_ICE)
72cf2d4f 533 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 534
a1d1bb31
AL
535 breakpoint_invalidate(env, breakpoint->pc);
536
7267c094 537 g_free(breakpoint);
a1d1bb31
AL
538#endif
539}
540
541/* Remove all matching breakpoints. */
9349b4f9 542void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
543{
544#if defined(TARGET_HAS_ICE)
c0ce998e 545 CPUBreakpoint *bp, *next;
a1d1bb31 546
72cf2d4f 547 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
548 if (bp->flags & mask)
549 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 550 }
4c3a88a2
FB
551#endif
552}
553
c33a346e
FB
554/* enable or disable single step mode. EXCP_DEBUG is returned by the
555 CPU loop after each instruction */
9349b4f9 556void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 557{
1fddef4b 558#if defined(TARGET_HAS_ICE)
c33a346e
FB
559 if (env->singlestep_enabled != enabled) {
560 env->singlestep_enabled = enabled;
e22a25c9
AL
561 if (kvm_enabled())
562 kvm_update_guest_debug(env, 0);
563 else {
ccbb4d44 564 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
565 /* XXX: only flush what is necessary */
566 tb_flush(env);
567 }
c33a346e
FB
568 }
569#endif
570}
571
9349b4f9 572void cpu_exit(CPUArchState *env)
3098dba0 573{
fcd7d003
AF
574 CPUState *cpu = ENV_GET_CPU(env);
575
576 cpu->exit_request = 1;
378df4b2 577 cpu->tcg_exit_req = 1;
3098dba0
AJ
578}
579
9349b4f9 580void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
581{
582 va_list ap;
493ae1f0 583 va_list ap2;
7501267e
FB
584
585 va_start(ap, fmt);
493ae1f0 586 va_copy(ap2, ap);
7501267e
FB
587 fprintf(stderr, "qemu: fatal: ");
588 vfprintf(stderr, fmt, ap);
589 fprintf(stderr, "\n");
6fd2a026 590 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
591 if (qemu_log_enabled()) {
592 qemu_log("qemu: fatal: ");
593 qemu_log_vprintf(fmt, ap2);
594 qemu_log("\n");
6fd2a026 595 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 596 qemu_log_flush();
93fcfe39 597 qemu_log_close();
924edcae 598 }
493ae1f0 599 va_end(ap2);
f9373291 600 va_end(ap);
fd052bf6
RV
601#if defined(CONFIG_USER_ONLY)
602 {
603 struct sigaction act;
604 sigfillset(&act.sa_mask);
605 act.sa_handler = SIG_DFL;
606 sigaction(SIGABRT, &act, NULL);
607 }
608#endif
7501267e
FB
609 abort();
610}
611
9349b4f9 612CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 613{
9349b4f9
AF
614 CPUArchState *new_env = cpu_init(env->cpu_model_str);
615 CPUArchState *next_cpu = new_env->next_cpu;
5a38f081
AL
616#if defined(TARGET_HAS_ICE)
617 CPUBreakpoint *bp;
618 CPUWatchpoint *wp;
619#endif
620
9349b4f9 621 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 622
55e5c285 623 /* Preserve chaining. */
c5be9f08 624 new_env->next_cpu = next_cpu;
5a38f081
AL
625
626 /* Clone all break/watchpoints.
627 Note: Once we support ptrace with hw-debug register access, make sure
628 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
629 QTAILQ_INIT(&env->breakpoints);
630 QTAILQ_INIT(&env->watchpoints);
5a38f081 631#if defined(TARGET_HAS_ICE)
72cf2d4f 632 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
633 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
634 }
72cf2d4f 635 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
636 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
637 wp->flags, NULL);
638 }
639#endif
640
c5be9f08
TS
641 return new_env;
642}
643
0124311e 644#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
645static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
646 uintptr_t length)
647{
648 uintptr_t start1;
649
650 /* we modify the TLB cache so that the dirty bit will be set again
651 when accessing the range */
652 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
653 /* Check that we don't span multiple blocks - this breaks the
654 address comparisons below. */
655 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
656 != (end - 1) - start) {
657 abort();
658 }
659 cpu_tlb_reset_dirty_all(start1, length);
660
661}
662
5579c7f3 663/* Note: start and end must be within the same ram block. */
c227f099 664void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 665 int dirty_flags)
1ccde1cb 666{
d24981d3 667 uintptr_t length;
1ccde1cb
FB
668
669 start &= TARGET_PAGE_MASK;
670 end = TARGET_PAGE_ALIGN(end);
671
672 length = end - start;
673 if (length == 0)
674 return;
f7c11b53 675 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 676
d24981d3
JQ
677 if (tcg_enabled()) {
678 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 679 }
1ccde1cb
FB
680}
681
8b9c99d9 682static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 683{
f6f3fbca 684 int ret = 0;
74576198 685 in_migration = enable;
f6f3fbca 686 return ret;
74576198
AL
687}
688
a8170e5e 689hwaddr memory_region_section_get_iotlb(CPUArchState *env,
149f54b5
PB
690 MemoryRegionSection *section,
691 target_ulong vaddr,
692 hwaddr paddr, hwaddr xlat,
693 int prot,
694 target_ulong *address)
e5548617 695{
a8170e5e 696 hwaddr iotlb;
e5548617
BS
697 CPUWatchpoint *wp;
698
cc5bea60 699 if (memory_region_is_ram(section->mr)) {
e5548617
BS
700 /* Normal RAM. */
701 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 702 + xlat;
e5548617
BS
703 if (!section->readonly) {
704 iotlb |= phys_section_notdirty;
705 } else {
706 iotlb |= phys_section_rom;
707 }
708 } else {
e5548617 709 iotlb = section - phys_sections;
149f54b5 710 iotlb += xlat;
e5548617
BS
711 }
712
713 /* Make accesses to pages with watchpoints go via the
714 watchpoint trap routines. */
715 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
716 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
717 /* Avoid trapping reads of pages with a write breakpoint. */
718 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
719 iotlb = phys_section_watch + paddr;
720 *address |= TLB_MMIO;
721 break;
722 }
723 }
724 }
725
726 return iotlb;
727}
9fa3e853
FB
728#endif /* defined(CONFIG_USER_ONLY) */
729
e2eef170 730#if !defined(CONFIG_USER_ONLY)
8da3ff18 731
c227f099 732static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 733 uint16_t section);
acc9d80b 734static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
5312bd8b 735static void destroy_page_desc(uint16_t section_index)
54688b1e 736{
5312bd8b
AK
737 MemoryRegionSection *section = &phys_sections[section_index];
738 MemoryRegion *mr = section->mr;
54688b1e
AK
739
740 if (mr->subpage) {
741 subpage_t *subpage = container_of(mr, subpage_t, iomem);
742 memory_region_destroy(&subpage->iomem);
743 g_free(subpage);
744 }
745}
746
4346ae3e 747static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
748{
749 unsigned i;
d6f2ea22 750 PhysPageEntry *p;
54688b1e 751
c19e8800 752 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
753 return;
754 }
755
c19e8800 756 p = phys_map_nodes[lp->ptr];
4346ae3e 757 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 758 if (!p[i].is_leaf) {
54688b1e 759 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 760 } else {
c19e8800 761 destroy_page_desc(p[i].ptr);
54688b1e 762 }
54688b1e 763 }
07f07b31 764 lp->is_leaf = 0;
c19e8800 765 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
766}
767
ac1970fb 768static void destroy_all_mappings(AddressSpaceDispatch *d)
54688b1e 769{
ac1970fb 770 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
d6f2ea22 771 phys_map_nodes_reset();
54688b1e
AK
772}
773
5312bd8b
AK
774static uint16_t phys_section_add(MemoryRegionSection *section)
775{
68f3f65b
PB
776 /* The physical section number is ORed with a page-aligned
777 * pointer to produce the iotlb entries. Thus it should
778 * never overflow into the page-aligned value.
779 */
780 assert(phys_sections_nb < TARGET_PAGE_SIZE);
781
5312bd8b
AK
782 if (phys_sections_nb == phys_sections_nb_alloc) {
783 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
784 phys_sections = g_renew(MemoryRegionSection, phys_sections,
785 phys_sections_nb_alloc);
786 }
787 phys_sections[phys_sections_nb] = *section;
788 return phys_sections_nb++;
789}
790
791static void phys_sections_clear(void)
792{
793 phys_sections_nb = 0;
794}
795
ac1970fb 796static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
797{
798 subpage_t *subpage;
a8170e5e 799 hwaddr base = section->offset_within_address_space
0f0cb164 800 & TARGET_PAGE_MASK;
ac1970fb 801 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
0f0cb164
AK
802 MemoryRegionSection subsection = {
803 .offset_within_address_space = base,
804 .size = TARGET_PAGE_SIZE,
805 };
a8170e5e 806 hwaddr start, end;
0f0cb164 807
f3705d53 808 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 809
f3705d53 810 if (!(existing->mr->subpage)) {
acc9d80b 811 subpage = subpage_init(d->as, base);
0f0cb164 812 subsection.mr = &subpage->iomem;
ac1970fb 813 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 814 phys_section_add(&subsection));
0f0cb164 815 } else {
f3705d53 816 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
817 }
818 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
adb2a9b5 819 end = start + section->size - 1;
0f0cb164
AK
820 subpage_register(subpage, start, end, phys_section_add(section));
821}
822
823
ac1970fb 824static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
33417e70 825{
a8170e5e 826 hwaddr start_addr = section->offset_within_address_space;
dd81124b 827 ram_addr_t size = section->size;
a8170e5e 828 hwaddr addr;
5312bd8b 829 uint16_t section_index = phys_section_add(section);
dd81124b 830
3b8e6a2d 831 assert(size);
f6f3fbca 832
3b8e6a2d 833 addr = start_addr;
ac1970fb 834 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2999097b 835 section_index);
33417e70
FB
836}
837
86a86236
AK
838QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > MAX_PHYS_ADDR_SPACE_BITS)
839
840static MemoryRegionSection limit(MemoryRegionSection section)
841{
842 section.size = MIN(section.offset_within_address_space + section.size,
843 MAX_PHYS_ADDR + 1)
844 - section.offset_within_address_space;
845
846 return section;
847}
848
ac1970fb 849static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 850{
ac1970fb 851 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
86a86236 852 MemoryRegionSection now = limit(*section), remain = limit(*section);
0f0cb164
AK
853
854 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
855 || (now.size < TARGET_PAGE_SIZE)) {
856 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
857 - now.offset_within_address_space,
858 now.size);
ac1970fb 859 register_subpage(d, &now);
0f0cb164
AK
860 remain.size -= now.size;
861 remain.offset_within_address_space += now.size;
862 remain.offset_within_region += now.size;
863 }
69b67646
TH
864 while (remain.size >= TARGET_PAGE_SIZE) {
865 now = remain;
866 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
867 now.size = TARGET_PAGE_SIZE;
ac1970fb 868 register_subpage(d, &now);
69b67646
TH
869 } else {
870 now.size &= TARGET_PAGE_MASK;
ac1970fb 871 register_multipage(d, &now);
69b67646 872 }
0f0cb164
AK
873 remain.size -= now.size;
874 remain.offset_within_address_space += now.size;
875 remain.offset_within_region += now.size;
876 }
877 now = remain;
878 if (now.size) {
ac1970fb 879 register_subpage(d, &now);
0f0cb164
AK
880 }
881}
882
62a2744c
SY
883void qemu_flush_coalesced_mmio_buffer(void)
884{
885 if (kvm_enabled())
886 kvm_flush_coalesced_mmio_buffer();
887}
888
b2a8658e
UD
889void qemu_mutex_lock_ramlist(void)
890{
891 qemu_mutex_lock(&ram_list.mutex);
892}
893
894void qemu_mutex_unlock_ramlist(void)
895{
896 qemu_mutex_unlock(&ram_list.mutex);
897}
898
c902760f
MT
899#if defined(__linux__) && !defined(TARGET_S390X)
900
901#include <sys/vfs.h>
902
903#define HUGETLBFS_MAGIC 0x958458f6
904
905static long gethugepagesize(const char *path)
906{
907 struct statfs fs;
908 int ret;
909
910 do {
9742bf26 911 ret = statfs(path, &fs);
c902760f
MT
912 } while (ret != 0 && errno == EINTR);
913
914 if (ret != 0) {
9742bf26
YT
915 perror(path);
916 return 0;
c902760f
MT
917 }
918
919 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 920 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
921
922 return fs.f_bsize;
923}
924
04b16653
AW
925static void *file_ram_alloc(RAMBlock *block,
926 ram_addr_t memory,
927 const char *path)
c902760f
MT
928{
929 char *filename;
8ca761f6
PF
930 char *sanitized_name;
931 char *c;
c902760f
MT
932 void *area;
933 int fd;
934#ifdef MAP_POPULATE
935 int flags;
936#endif
937 unsigned long hpagesize;
938
939 hpagesize = gethugepagesize(path);
940 if (!hpagesize) {
9742bf26 941 return NULL;
c902760f
MT
942 }
943
944 if (memory < hpagesize) {
945 return NULL;
946 }
947
948 if (kvm_enabled() && !kvm_has_sync_mmu()) {
949 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
950 return NULL;
951 }
952
8ca761f6
PF
953 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
954 sanitized_name = g_strdup(block->mr->name);
955 for (c = sanitized_name; *c != '\0'; c++) {
956 if (*c == '/')
957 *c = '_';
958 }
959
960 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
961 sanitized_name);
962 g_free(sanitized_name);
c902760f
MT
963
964 fd = mkstemp(filename);
965 if (fd < 0) {
9742bf26 966 perror("unable to create backing store for hugepages");
e4ada482 967 g_free(filename);
9742bf26 968 return NULL;
c902760f
MT
969 }
970 unlink(filename);
e4ada482 971 g_free(filename);
c902760f
MT
972
973 memory = (memory+hpagesize-1) & ~(hpagesize-1);
974
975 /*
976 * ftruncate is not supported by hugetlbfs in older
977 * hosts, so don't bother bailing out on errors.
978 * If anything goes wrong with it under other filesystems,
979 * mmap will fail.
980 */
981 if (ftruncate(fd, memory))
9742bf26 982 perror("ftruncate");
c902760f
MT
983
984#ifdef MAP_POPULATE
985 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
986 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
987 * to sidestep this quirk.
988 */
989 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
990 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
991#else
992 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
993#endif
994 if (area == MAP_FAILED) {
9742bf26
YT
995 perror("file_ram_alloc: can't mmap RAM pages");
996 close(fd);
997 return (NULL);
c902760f 998 }
04b16653 999 block->fd = fd;
c902760f
MT
1000 return area;
1001}
1002#endif
1003
d17b5288 1004static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1005{
1006 RAMBlock *block, *next_block;
3e837b2c 1007 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1008
49cd9ac6
SH
1009 assert(size != 0); /* it would hand out same offset multiple times */
1010
a3161038 1011 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1012 return 0;
1013
a3161038 1014 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1015 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1016
1017 end = block->offset + block->length;
1018
a3161038 1019 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1020 if (next_block->offset >= end) {
1021 next = MIN(next, next_block->offset);
1022 }
1023 }
1024 if (next - end >= size && next - end < mingap) {
3e837b2c 1025 offset = end;
04b16653
AW
1026 mingap = next - end;
1027 }
1028 }
3e837b2c
AW
1029
1030 if (offset == RAM_ADDR_MAX) {
1031 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1032 (uint64_t)size);
1033 abort();
1034 }
1035
04b16653
AW
1036 return offset;
1037}
1038
652d7ec2 1039ram_addr_t last_ram_offset(void)
d17b5288
AW
1040{
1041 RAMBlock *block;
1042 ram_addr_t last = 0;
1043
a3161038 1044 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1045 last = MAX(last, block->offset + block->length);
1046
1047 return last;
1048}
1049
ddb97f1d
JB
1050static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1051{
1052 int ret;
1053 QemuOpts *machine_opts;
1054
1055 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1056 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1057 if (machine_opts &&
1058 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
1059 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1060 if (ret) {
1061 perror("qemu_madvise");
1062 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1063 "but dump_guest_core=off specified\n");
1064 }
1065 }
1066}
1067
c5705a77 1068void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
1069{
1070 RAMBlock *new_block, *block;
1071
c5705a77 1072 new_block = NULL;
a3161038 1073 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
1074 if (block->offset == addr) {
1075 new_block = block;
1076 break;
1077 }
1078 }
1079 assert(new_block);
1080 assert(!new_block->idstr[0]);
84b89d78 1081
09e5ab63
AL
1082 if (dev) {
1083 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1084 if (id) {
1085 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1086 g_free(id);
84b89d78
CM
1087 }
1088 }
1089 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1090
b2a8658e
UD
1091 /* This assumes the iothread lock is taken here too. */
1092 qemu_mutex_lock_ramlist();
a3161038 1093 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1094 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1095 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1096 new_block->idstr);
1097 abort();
1098 }
1099 }
b2a8658e 1100 qemu_mutex_unlock_ramlist();
c5705a77
AK
1101}
1102
8490fc78
LC
1103static int memory_try_enable_merging(void *addr, size_t len)
1104{
1105 QemuOpts *opts;
1106
1107 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1108 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1109 /* disabled by the user */
1110 return 0;
1111 }
1112
1113 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1114}
1115
c5705a77
AK
1116ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1117 MemoryRegion *mr)
1118{
abb26d63 1119 RAMBlock *block, *new_block;
c5705a77
AK
1120
1121 size = TARGET_PAGE_ALIGN(size);
1122 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1123
b2a8658e
UD
1124 /* This assumes the iothread lock is taken here too. */
1125 qemu_mutex_lock_ramlist();
7c637366 1126 new_block->mr = mr;
432d268c 1127 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1128 if (host) {
1129 new_block->host = host;
cd19cfa2 1130 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1131 } else {
1132 if (mem_path) {
c902760f 1133#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1134 new_block->host = file_ram_alloc(new_block, size, mem_path);
1135 if (!new_block->host) {
6eebf958 1136 new_block->host = qemu_anon_ram_alloc(size);
8490fc78 1137 memory_try_enable_merging(new_block->host, size);
6977dfe6 1138 }
c902760f 1139#else
6977dfe6
YT
1140 fprintf(stderr, "-mem-path option unsupported\n");
1141 exit(1);
c902760f 1142#endif
6977dfe6 1143 } else {
868bb33f 1144 if (xen_enabled()) {
fce537d4 1145 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1146 } else if (kvm_enabled()) {
1147 /* some s390/kvm configurations have special constraints */
6eebf958 1148 new_block->host = kvm_ram_alloc(size);
432d268c 1149 } else {
6eebf958 1150 new_block->host = qemu_anon_ram_alloc(size);
432d268c 1151 }
8490fc78 1152 memory_try_enable_merging(new_block->host, size);
6977dfe6 1153 }
c902760f 1154 }
94a6b54f
PB
1155 new_block->length = size;
1156
abb26d63
PB
1157 /* Keep the list sorted from biggest to smallest block. */
1158 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1159 if (block->length < new_block->length) {
1160 break;
1161 }
1162 }
1163 if (block) {
1164 QTAILQ_INSERT_BEFORE(block, new_block, next);
1165 } else {
1166 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1167 }
0d6d3c87 1168 ram_list.mru_block = NULL;
94a6b54f 1169
f798b07f 1170 ram_list.version++;
b2a8658e 1171 qemu_mutex_unlock_ramlist();
f798b07f 1172
7267c094 1173 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1174 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1175 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1176 0, size >> TARGET_PAGE_BITS);
1720aeee 1177 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1178
ddb97f1d 1179 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1180 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1181
6f0437e8
JK
1182 if (kvm_enabled())
1183 kvm_setup_guest_memory(new_block->host, size);
1184
94a6b54f
PB
1185 return new_block->offset;
1186}
e9a1ab19 1187
c5705a77 1188ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1189{
c5705a77 1190 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1191}
1192
1f2e98b6
AW
1193void qemu_ram_free_from_ptr(ram_addr_t addr)
1194{
1195 RAMBlock *block;
1196
b2a8658e
UD
1197 /* This assumes the iothread lock is taken here too. */
1198 qemu_mutex_lock_ramlist();
a3161038 1199 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1200 if (addr == block->offset) {
a3161038 1201 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1202 ram_list.mru_block = NULL;
f798b07f 1203 ram_list.version++;
7267c094 1204 g_free(block);
b2a8658e 1205 break;
1f2e98b6
AW
1206 }
1207 }
b2a8658e 1208 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1209}
1210
c227f099 1211void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1212{
04b16653
AW
1213 RAMBlock *block;
1214
b2a8658e
UD
1215 /* This assumes the iothread lock is taken here too. */
1216 qemu_mutex_lock_ramlist();
a3161038 1217 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1218 if (addr == block->offset) {
a3161038 1219 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1220 ram_list.mru_block = NULL;
f798b07f 1221 ram_list.version++;
cd19cfa2
HY
1222 if (block->flags & RAM_PREALLOC_MASK) {
1223 ;
1224 } else if (mem_path) {
04b16653
AW
1225#if defined (__linux__) && !defined(TARGET_S390X)
1226 if (block->fd) {
1227 munmap(block->host, block->length);
1228 close(block->fd);
1229 } else {
e7a09b92 1230 qemu_anon_ram_free(block->host, block->length);
04b16653 1231 }
fd28aa13
JK
1232#else
1233 abort();
04b16653
AW
1234#endif
1235 } else {
868bb33f 1236 if (xen_enabled()) {
e41d7c69 1237 xen_invalidate_map_cache_entry(block->host);
432d268c 1238 } else {
e7a09b92 1239 qemu_anon_ram_free(block->host, block->length);
432d268c 1240 }
04b16653 1241 }
7267c094 1242 g_free(block);
b2a8658e 1243 break;
04b16653
AW
1244 }
1245 }
b2a8658e 1246 qemu_mutex_unlock_ramlist();
04b16653 1247
e9a1ab19
FB
1248}
1249
cd19cfa2
HY
1250#ifndef _WIN32
1251void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1252{
1253 RAMBlock *block;
1254 ram_addr_t offset;
1255 int flags;
1256 void *area, *vaddr;
1257
a3161038 1258 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1259 offset = addr - block->offset;
1260 if (offset < block->length) {
1261 vaddr = block->host + offset;
1262 if (block->flags & RAM_PREALLOC_MASK) {
1263 ;
1264 } else {
1265 flags = MAP_FIXED;
1266 munmap(vaddr, length);
1267 if (mem_path) {
1268#if defined(__linux__) && !defined(TARGET_S390X)
1269 if (block->fd) {
1270#ifdef MAP_POPULATE
1271 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1272 MAP_PRIVATE;
1273#else
1274 flags |= MAP_PRIVATE;
1275#endif
1276 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1277 flags, block->fd, offset);
1278 } else {
1279 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1280 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1281 flags, -1, 0);
1282 }
fd28aa13
JK
1283#else
1284 abort();
cd19cfa2
HY
1285#endif
1286 } else {
1287#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1288 flags |= MAP_SHARED | MAP_ANONYMOUS;
1289 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1290 flags, -1, 0);
1291#else
1292 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1293 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1294 flags, -1, 0);
1295#endif
1296 }
1297 if (area != vaddr) {
f15fbc4b
AP
1298 fprintf(stderr, "Could not remap addr: "
1299 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1300 length, addr);
1301 exit(1);
1302 }
8490fc78 1303 memory_try_enable_merging(vaddr, length);
ddb97f1d 1304 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1305 }
1306 return;
1307 }
1308 }
1309}
1310#endif /* !_WIN32 */
1311
dc828ca1 1312/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
1313 With the exception of the softmmu code in this file, this should
1314 only be used for local memory (e.g. video ram) that the device owns,
1315 and knows it isn't going to access beyond the end of the block.
1316
1317 It should not be used for general purpose DMA.
1318 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1319 */
c227f099 1320void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 1321{
94a6b54f
PB
1322 RAMBlock *block;
1323
b2a8658e 1324 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1325 block = ram_list.mru_block;
1326 if (block && addr - block->offset < block->length) {
1327 goto found;
1328 }
a3161038 1329 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1330 if (addr - block->offset < block->length) {
0d6d3c87 1331 goto found;
f471a17e 1332 }
94a6b54f 1333 }
f471a17e
AW
1334
1335 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1336 abort();
1337
0d6d3c87
PB
1338found:
1339 ram_list.mru_block = block;
1340 if (xen_enabled()) {
1341 /* We need to check if the requested address is in the RAM
1342 * because we don't want to map the entire memory in QEMU.
1343 * In that case just map until the end of the page.
1344 */
1345 if (block->offset == 0) {
1346 return xen_map_cache(addr, 0, 0);
1347 } else if (block->host == NULL) {
1348 block->host =
1349 xen_map_cache(block->offset, block->length, 1);
1350 }
1351 }
1352 return block->host + (addr - block->offset);
dc828ca1
PB
1353}
1354
0d6d3c87
PB
1355/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1356 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1357 *
1358 * ??? Is this still necessary?
b2e0a138 1359 */
8b9c99d9 1360static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1361{
1362 RAMBlock *block;
1363
b2a8658e 1364 /* The list is protected by the iothread lock here. */
a3161038 1365 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1366 if (addr - block->offset < block->length) {
868bb33f 1367 if (xen_enabled()) {
432d268c
JN
1368 /* We need to check if the requested address is in the RAM
1369 * because we don't want to map the entire memory in QEMU.
712c2b41 1370 * In that case just map until the end of the page.
432d268c
JN
1371 */
1372 if (block->offset == 0) {
e41d7c69 1373 return xen_map_cache(addr, 0, 0);
432d268c 1374 } else if (block->host == NULL) {
e41d7c69
JK
1375 block->host =
1376 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1377 }
1378 }
b2e0a138
MT
1379 return block->host + (addr - block->offset);
1380 }
1381 }
1382
1383 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1384 abort();
1385
1386 return NULL;
1387}
1388
38bee5dc
SS
1389/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1390 * but takes a size argument */
8b9c99d9 1391static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1392{
8ab934f9
SS
1393 if (*size == 0) {
1394 return NULL;
1395 }
868bb33f 1396 if (xen_enabled()) {
e41d7c69 1397 return xen_map_cache(addr, *size, 1);
868bb33f 1398 } else {
38bee5dc
SS
1399 RAMBlock *block;
1400
a3161038 1401 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1402 if (addr - block->offset < block->length) {
1403 if (addr - block->offset + *size > block->length)
1404 *size = block->length - addr + block->offset;
1405 return block->host + (addr - block->offset);
1406 }
1407 }
1408
1409 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1410 abort();
38bee5dc
SS
1411 }
1412}
1413
e890261f 1414int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1415{
94a6b54f
PB
1416 RAMBlock *block;
1417 uint8_t *host = ptr;
1418
868bb33f 1419 if (xen_enabled()) {
e41d7c69 1420 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
1421 return 0;
1422 }
1423
a3161038 1424 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1425 /* This case append when the block is not mapped. */
1426 if (block->host == NULL) {
1427 continue;
1428 }
f471a17e 1429 if (host - block->host < block->length) {
e890261f
MT
1430 *ram_addr = block->offset + (host - block->host);
1431 return 0;
f471a17e 1432 }
94a6b54f 1433 }
432d268c 1434
e890261f
MT
1435 return -1;
1436}
f471a17e 1437
e890261f
MT
1438/* Some of the softmmu routines need to translate from a host pointer
1439 (typically a TLB entry) back to a ram offset. */
1440ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1441{
1442 ram_addr_t ram_addr;
f471a17e 1443
e890261f
MT
1444 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1445 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1446 abort();
1447 }
1448 return ram_addr;
5579c7f3
PB
1449}
1450
a8170e5e 1451static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1452 uint64_t val, unsigned size)
9fa3e853 1453{
3a7d929e 1454 int dirty_flags;
f7c11b53 1455 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1456 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
0e0df1e2 1457 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1458 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1459 }
0e0df1e2
AK
1460 switch (size) {
1461 case 1:
1462 stb_p(qemu_get_ram_ptr(ram_addr), val);
1463 break;
1464 case 2:
1465 stw_p(qemu_get_ram_ptr(ram_addr), val);
1466 break;
1467 case 4:
1468 stl_p(qemu_get_ram_ptr(ram_addr), val);
1469 break;
1470 default:
1471 abort();
3a7d929e 1472 }
f23db169 1473 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1474 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1475 /* we remove the notdirty callback only if the code has been
1476 flushed */
1477 if (dirty_flags == 0xff)
2e70f6ef 1478 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1479}
1480
b018ddf6
PB
1481static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1482 unsigned size, bool is_write)
1483{
1484 return is_write;
1485}
1486
0e0df1e2 1487static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1488 .write = notdirty_mem_write,
b018ddf6 1489 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1490 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1491};
1492
0f459d16 1493/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1494static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1495{
9349b4f9 1496 CPUArchState *env = cpu_single_env;
06d55cc1 1497 target_ulong pc, cs_base;
0f459d16 1498 target_ulong vaddr;
a1d1bb31 1499 CPUWatchpoint *wp;
06d55cc1 1500 int cpu_flags;
0f459d16 1501
06d55cc1
AL
1502 if (env->watchpoint_hit) {
1503 /* We re-entered the check after replacing the TB. Now raise
1504 * the debug interrupt so that is will trigger after the
1505 * current instruction. */
c3affe56 1506 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1507 return;
1508 }
2e70f6ef 1509 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1510 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1511 if ((vaddr == (wp->vaddr & len_mask) ||
1512 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1513 wp->flags |= BP_WATCHPOINT_HIT;
1514 if (!env->watchpoint_hit) {
1515 env->watchpoint_hit = wp;
5a316526 1516 tb_check_watchpoint(env);
6e140f28
AL
1517 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1518 env->exception_index = EXCP_DEBUG;
488d6577 1519 cpu_loop_exit(env);
6e140f28
AL
1520 } else {
1521 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1522 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1523 cpu_resume_from_signal(env, NULL);
6e140f28 1524 }
06d55cc1 1525 }
6e140f28
AL
1526 } else {
1527 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1528 }
1529 }
1530}
1531
6658ffb8
PB
1532/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1533 so these check for a hit then pass through to the normal out-of-line
1534 phys routines. */
a8170e5e 1535static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1536 unsigned size)
6658ffb8 1537{
1ec9b909
AK
1538 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1539 switch (size) {
1540 case 1: return ldub_phys(addr);
1541 case 2: return lduw_phys(addr);
1542 case 4: return ldl_phys(addr);
1543 default: abort();
1544 }
6658ffb8
PB
1545}
1546
a8170e5e 1547static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1548 uint64_t val, unsigned size)
6658ffb8 1549{
1ec9b909
AK
1550 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1551 switch (size) {
67364150
MF
1552 case 1:
1553 stb_phys(addr, val);
1554 break;
1555 case 2:
1556 stw_phys(addr, val);
1557 break;
1558 case 4:
1559 stl_phys(addr, val);
1560 break;
1ec9b909
AK
1561 default: abort();
1562 }
6658ffb8
PB
1563}
1564
1ec9b909
AK
1565static const MemoryRegionOps watch_mem_ops = {
1566 .read = watch_mem_read,
1567 .write = watch_mem_write,
1568 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1569};
6658ffb8 1570
a8170e5e 1571static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1572 unsigned len)
db7b5426 1573{
acc9d80b
JK
1574 subpage_t *subpage = opaque;
1575 uint8_t buf[4];
791af8c8 1576
db7b5426 1577#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1578 printf("%s: subpage %p len %d addr " TARGET_FMT_plx "\n", __func__,
1579 subpage, len, addr);
db7b5426 1580#endif
acc9d80b
JK
1581 address_space_read(subpage->as, addr + subpage->base, buf, len);
1582 switch (len) {
1583 case 1:
1584 return ldub_p(buf);
1585 case 2:
1586 return lduw_p(buf);
1587 case 4:
1588 return ldl_p(buf);
1589 default:
1590 abort();
1591 }
db7b5426
BS
1592}
1593
a8170e5e 1594static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1595 uint64_t value, unsigned len)
db7b5426 1596{
acc9d80b
JK
1597 subpage_t *subpage = opaque;
1598 uint8_t buf[4];
1599
db7b5426 1600#if defined(DEBUG_SUBPAGE)
70c68e44 1601 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
acc9d80b
JK
1602 " value %"PRIx64"\n",
1603 __func__, subpage, len, addr, value);
db7b5426 1604#endif
acc9d80b
JK
1605 switch (len) {
1606 case 1:
1607 stb_p(buf, value);
1608 break;
1609 case 2:
1610 stw_p(buf, value);
1611 break;
1612 case 4:
1613 stl_p(buf, value);
1614 break;
1615 default:
1616 abort();
1617 }
1618 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1619}
1620
c353e4cc
PB
1621static bool subpage_accepts(void *opaque, hwaddr addr,
1622 unsigned size, bool is_write)
1623{
acc9d80b 1624 subpage_t *subpage = opaque;
c353e4cc 1625#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1626 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx "\n",
1627 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1628#endif
1629
acc9d80b
JK
1630 return address_space_access_valid(subpage->as, addr + subpage->base,
1631 size, is_write);
c353e4cc
PB
1632}
1633
70c68e44
AK
1634static const MemoryRegionOps subpage_ops = {
1635 .read = subpage_read,
1636 .write = subpage_write,
c353e4cc 1637 .valid.accepts = subpage_accepts,
70c68e44 1638 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1639};
1640
c227f099 1641static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1642 uint16_t section)
db7b5426
BS
1643{
1644 int idx, eidx;
1645
1646 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1647 return -1;
1648 idx = SUBPAGE_IDX(start);
1649 eidx = SUBPAGE_IDX(end);
1650#if defined(DEBUG_SUBPAGE)
0bf9e31a 1651 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1652 mmio, start, end, idx, eidx, memory);
1653#endif
db7b5426 1654 for (; idx <= eidx; idx++) {
5312bd8b 1655 mmio->sub_section[idx] = section;
db7b5426
BS
1656 }
1657
1658 return 0;
1659}
1660
acc9d80b 1661static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1662{
c227f099 1663 subpage_t *mmio;
db7b5426 1664
7267c094 1665 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1666
acc9d80b 1667 mmio->as = as;
1eec614b 1668 mmio->base = base;
70c68e44
AK
1669 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1670 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1671 mmio->iomem.subpage = true;
db7b5426 1672#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1673 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1674 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1675#endif
0f0cb164 1676 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
1677
1678 return mmio;
1679}
1680
5312bd8b
AK
1681static uint16_t dummy_section(MemoryRegion *mr)
1682{
1683 MemoryRegionSection section = {
1684 .mr = mr,
1685 .offset_within_address_space = 0,
1686 .offset_within_region = 0,
1687 .size = UINT64_MAX,
1688 };
1689
1690 return phys_section_add(&section);
1691}
1692
a8170e5e 1693MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1694{
37ec01d4 1695 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1696}
1697
e9179ce1
AK
1698static void io_mem_init(void)
1699{
bf8d5166 1700 memory_region_init_io(&io_mem_rom, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
0e0df1e2
AK
1701 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1702 "unassigned", UINT64_MAX);
1703 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1704 "notdirty", UINT64_MAX);
1ec9b909
AK
1705 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1706 "watch", UINT64_MAX);
e9179ce1
AK
1707}
1708
ac1970fb
AK
1709static void mem_begin(MemoryListener *listener)
1710{
1711 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1712
1713 destroy_all_mappings(d);
1714 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1715}
1716
50c1e149
AK
1717static void core_begin(MemoryListener *listener)
1718{
5312bd8b
AK
1719 phys_sections_clear();
1720 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
1721 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1722 phys_section_rom = dummy_section(&io_mem_rom);
1723 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
1724}
1725
1d71148e 1726static void tcg_commit(MemoryListener *listener)
50c1e149 1727{
9349b4f9 1728 CPUArchState *env;
117712c3
AK
1729
1730 /* since each CPU stores ram addresses in its TLB cache, we must
1731 reset the modified entries */
1732 /* XXX: slow ! */
1733 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1734 tlb_flush(env, 1);
1735 }
50c1e149
AK
1736}
1737
93632747
AK
1738static void core_log_global_start(MemoryListener *listener)
1739{
1740 cpu_physical_memory_set_dirty_tracking(1);
1741}
1742
1743static void core_log_global_stop(MemoryListener *listener)
1744{
1745 cpu_physical_memory_set_dirty_tracking(0);
1746}
1747
4855d41a
AK
1748static void io_region_add(MemoryListener *listener,
1749 MemoryRegionSection *section)
1750{
a2d33521
AK
1751 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1752
1753 mrio->mr = section->mr;
1754 mrio->offset = section->offset_within_region;
1755 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 1756 section->offset_within_address_space, section->size);
a2d33521 1757 ioport_register(&mrio->iorange);
4855d41a
AK
1758}
1759
1760static void io_region_del(MemoryListener *listener,
1761 MemoryRegionSection *section)
1762{
1763 isa_unassign_ioport(section->offset_within_address_space, section->size);
1764}
1765
93632747 1766static MemoryListener core_memory_listener = {
50c1e149 1767 .begin = core_begin,
93632747
AK
1768 .log_global_start = core_log_global_start,
1769 .log_global_stop = core_log_global_stop,
ac1970fb 1770 .priority = 1,
93632747
AK
1771};
1772
4855d41a
AK
1773static MemoryListener io_memory_listener = {
1774 .region_add = io_region_add,
1775 .region_del = io_region_del,
4855d41a
AK
1776 .priority = 0,
1777};
1778
1d71148e
AK
1779static MemoryListener tcg_memory_listener = {
1780 .commit = tcg_commit,
1781};
1782
ac1970fb
AK
1783void address_space_init_dispatch(AddressSpace *as)
1784{
1785 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1786
1787 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1788 d->listener = (MemoryListener) {
1789 .begin = mem_begin,
1790 .region_add = mem_add,
1791 .region_nop = mem_add,
1792 .priority = 0,
1793 };
acc9d80b 1794 d->as = as;
ac1970fb
AK
1795 as->dispatch = d;
1796 memory_listener_register(&d->listener, as);
1797}
1798
83f3c251
AK
1799void address_space_destroy_dispatch(AddressSpace *as)
1800{
1801 AddressSpaceDispatch *d = as->dispatch;
1802
1803 memory_listener_unregister(&d->listener);
1804 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1805 g_free(d);
1806 as->dispatch = NULL;
1807}
1808
62152b8a
AK
1809static void memory_map_init(void)
1810{
7267c094 1811 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 1812 memory_region_init(system_memory, "system", INT64_MAX);
2673a5da
AK
1813 address_space_init(&address_space_memory, system_memory);
1814 address_space_memory.name = "memory";
309cb471 1815
7267c094 1816 system_io = g_malloc(sizeof(*system_io));
309cb471 1817 memory_region_init(system_io, "io", 65536);
2673a5da
AK
1818 address_space_init(&address_space_io, system_io);
1819 address_space_io.name = "I/O";
93632747 1820
f6790af6
AK
1821 memory_listener_register(&core_memory_listener, &address_space_memory);
1822 memory_listener_register(&io_memory_listener, &address_space_io);
1823 memory_listener_register(&tcg_memory_listener, &address_space_memory);
9e11908f
PM
1824
1825 dma_context_init(&dma_context_memory, &address_space_memory,
1826 NULL, NULL, NULL);
62152b8a
AK
1827}
1828
1829MemoryRegion *get_system_memory(void)
1830{
1831 return system_memory;
1832}
1833
309cb471
AK
1834MemoryRegion *get_system_io(void)
1835{
1836 return system_io;
1837}
1838
e2eef170
PB
1839#endif /* !defined(CONFIG_USER_ONLY) */
1840
13eb76e0
FB
1841/* physical memory access (slow version, mainly for debug) */
1842#if defined(CONFIG_USER_ONLY)
9349b4f9 1843int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1844 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1845{
1846 int l, flags;
1847 target_ulong page;
53a5960a 1848 void * p;
13eb76e0
FB
1849
1850 while (len > 0) {
1851 page = addr & TARGET_PAGE_MASK;
1852 l = (page + TARGET_PAGE_SIZE) - addr;
1853 if (l > len)
1854 l = len;
1855 flags = page_get_flags(page);
1856 if (!(flags & PAGE_VALID))
a68fe89c 1857 return -1;
13eb76e0
FB
1858 if (is_write) {
1859 if (!(flags & PAGE_WRITE))
a68fe89c 1860 return -1;
579a97f7 1861 /* XXX: this code should not depend on lock_user */
72fb7daa 1862 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1863 return -1;
72fb7daa
AJ
1864 memcpy(p, buf, l);
1865 unlock_user(p, addr, l);
13eb76e0
FB
1866 } else {
1867 if (!(flags & PAGE_READ))
a68fe89c 1868 return -1;
579a97f7 1869 /* XXX: this code should not depend on lock_user */
72fb7daa 1870 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1871 return -1;
72fb7daa 1872 memcpy(buf, p, l);
5b257578 1873 unlock_user(p, addr, 0);
13eb76e0
FB
1874 }
1875 len -= l;
1876 buf += l;
1877 addr += l;
1878 }
a68fe89c 1879 return 0;
13eb76e0 1880}
8df1cd07 1881
13eb76e0 1882#else
51d7a9eb 1883
a8170e5e
AK
1884static void invalidate_and_set_dirty(hwaddr addr,
1885 hwaddr length)
51d7a9eb
AP
1886{
1887 if (!cpu_physical_memory_is_dirty(addr)) {
1888 /* invalidate code */
1889 tb_invalidate_phys_page_range(addr, addr + length, 0);
1890 /* set dirty bit */
1891 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1892 }
e226939d 1893 xen_modified_memory(addr, length);
51d7a9eb
AP
1894}
1895
2bbfa05d
PB
1896static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1897{
1898 if (memory_region_is_ram(mr)) {
1899 return !(is_write && mr->readonly);
1900 }
1901 if (memory_region_is_romd(mr)) {
1902 return !is_write;
1903 }
1904
1905 return false;
1906}
1907
f52cc467 1908static inline int memory_access_size(MemoryRegion *mr, int l, hwaddr addr)
82f2563f 1909{
f52cc467 1910 if (l >= 4 && (((addr & 3) == 0 || mr->ops->impl.unaligned))) {
82f2563f
PB
1911 return 4;
1912 }
f52cc467 1913 if (l >= 2 && (((addr & 1) == 0) || mr->ops->impl.unaligned)) {
82f2563f
PB
1914 return 2;
1915 }
1916 return 1;
1917}
1918
fd8aaa76 1919bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1920 int len, bool is_write)
13eb76e0 1921{
149f54b5 1922 hwaddr l;
13eb76e0 1923 uint8_t *ptr;
791af8c8 1924 uint64_t val;
149f54b5 1925 hwaddr addr1;
f3705d53 1926 MemoryRegionSection *section;
fd8aaa76 1927 bool error = false;
3b46e624 1928
13eb76e0 1929 while (len > 0) {
149f54b5
PB
1930 l = len;
1931 section = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 1932
13eb76e0 1933 if (is_write) {
2bbfa05d 1934 if (!memory_access_is_direct(section->mr, is_write)) {
f52cc467 1935 l = memory_access_size(section->mr, l, addr1);
6a00d601
FB
1936 /* XXX: could force cpu_single_env to NULL to avoid
1937 potential bugs */
82f2563f 1938 if (l == 4) {
1c213d19 1939 /* 32 bit write access */
c27004ec 1940 val = ldl_p(buf);
fd8aaa76 1941 error |= io_mem_write(section->mr, addr1, val, 4);
82f2563f 1942 } else if (l == 2) {
1c213d19 1943 /* 16 bit write access */
c27004ec 1944 val = lduw_p(buf);
fd8aaa76 1945 error |= io_mem_write(section->mr, addr1, val, 2);
13eb76e0 1946 } else {
1c213d19 1947 /* 8 bit write access */
c27004ec 1948 val = ldub_p(buf);
fd8aaa76 1949 error |= io_mem_write(section->mr, addr1, val, 1);
13eb76e0 1950 }
2bbfa05d 1951 } else {
149f54b5 1952 addr1 += memory_region_get_ram_addr(section->mr);
13eb76e0 1953 /* RAM case */
5579c7f3 1954 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1955 memcpy(ptr, buf, l);
51d7a9eb 1956 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
1957 }
1958 } else {
2bbfa05d 1959 if (!memory_access_is_direct(section->mr, is_write)) {
13eb76e0 1960 /* I/O case */
f52cc467 1961 l = memory_access_size(section->mr, l, addr1);
82f2563f 1962 if (l == 4) {
13eb76e0 1963 /* 32 bit read access */
fd8aaa76 1964 error |= io_mem_read(section->mr, addr1, &val, 4);
c27004ec 1965 stl_p(buf, val);
82f2563f 1966 } else if (l == 2) {
13eb76e0 1967 /* 16 bit read access */
fd8aaa76 1968 error |= io_mem_read(section->mr, addr1, &val, 2);
c27004ec 1969 stw_p(buf, val);
13eb76e0 1970 } else {
1c213d19 1971 /* 8 bit read access */
fd8aaa76 1972 error |= io_mem_read(section->mr, addr1, &val, 1);
c27004ec 1973 stb_p(buf, val);
13eb76e0
FB
1974 }
1975 } else {
1976 /* RAM case */
149f54b5 1977 ptr = qemu_get_ram_ptr(section->mr->ram_addr + addr1);
f3705d53 1978 memcpy(buf, ptr, l);
13eb76e0
FB
1979 }
1980 }
1981 len -= l;
1982 buf += l;
1983 addr += l;
1984 }
fd8aaa76
PB
1985
1986 return error;
13eb76e0 1987}
8df1cd07 1988
fd8aaa76 1989bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1990 const uint8_t *buf, int len)
1991{
fd8aaa76 1992 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
1993}
1994
fd8aaa76 1995bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 1996{
fd8aaa76 1997 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
1998}
1999
2000
a8170e5e 2001void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2002 int len, int is_write)
2003{
fd8aaa76 2004 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2005}
2006
d0ecd2aa 2007/* used for ROM loading : can write in RAM and ROM */
a8170e5e 2008void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
2009 const uint8_t *buf, int len)
2010{
149f54b5 2011 hwaddr l;
d0ecd2aa 2012 uint8_t *ptr;
149f54b5 2013 hwaddr addr1;
f3705d53 2014 MemoryRegionSection *section;
3b46e624 2015
d0ecd2aa 2016 while (len > 0) {
149f54b5
PB
2017 l = len;
2018 section = address_space_translate(&address_space_memory,
2019 addr, &addr1, &l, true);
3b46e624 2020
cc5bea60
BS
2021 if (!(memory_region_is_ram(section->mr) ||
2022 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
2023 /* do nothing */
2024 } else {
149f54b5 2025 addr1 += memory_region_get_ram_addr(section->mr);
d0ecd2aa 2026 /* ROM/RAM case */
5579c7f3 2027 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2028 memcpy(ptr, buf, l);
51d7a9eb 2029 invalidate_and_set_dirty(addr1, l);
d0ecd2aa
FB
2030 }
2031 len -= l;
2032 buf += l;
2033 addr += l;
2034 }
2035}
2036
6d16c2f8
AL
2037typedef struct {
2038 void *buffer;
a8170e5e
AK
2039 hwaddr addr;
2040 hwaddr len;
6d16c2f8
AL
2041} BounceBuffer;
2042
2043static BounceBuffer bounce;
2044
ba223c29
AL
2045typedef struct MapClient {
2046 void *opaque;
2047 void (*callback)(void *opaque);
72cf2d4f 2048 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2049} MapClient;
2050
72cf2d4f
BS
2051static QLIST_HEAD(map_client_list, MapClient) map_client_list
2052 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2053
2054void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2055{
7267c094 2056 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2057
2058 client->opaque = opaque;
2059 client->callback = callback;
72cf2d4f 2060 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2061 return client;
2062}
2063
8b9c99d9 2064static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2065{
2066 MapClient *client = (MapClient *)_client;
2067
72cf2d4f 2068 QLIST_REMOVE(client, link);
7267c094 2069 g_free(client);
ba223c29
AL
2070}
2071
2072static void cpu_notify_map_clients(void)
2073{
2074 MapClient *client;
2075
72cf2d4f
BS
2076 while (!QLIST_EMPTY(&map_client_list)) {
2077 client = QLIST_FIRST(&map_client_list);
ba223c29 2078 client->callback(client->opaque);
34d5e948 2079 cpu_unregister_map_client(client);
ba223c29
AL
2080 }
2081}
2082
51644ab7
PB
2083bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2084{
2085 MemoryRegionSection *section;
2086 hwaddr l, xlat;
2087
2088 while (len > 0) {
2089 l = len;
2090 section = address_space_translate(as, addr, &xlat, &l, is_write);
2091 if (!memory_access_is_direct(section->mr, is_write)) {
f52cc467 2092 l = memory_access_size(section->mr, l, addr);
51644ab7
PB
2093 if (!memory_region_access_valid(section->mr, xlat, l, is_write)) {
2094 return false;
2095 }
2096 }
2097
2098 len -= l;
2099 addr += l;
2100 }
2101 return true;
2102}
2103
6d16c2f8
AL
2104/* Map a physical memory region into a host virtual address.
2105 * May map a subset of the requested range, given by and returned in *plen.
2106 * May return NULL if resources needed to perform the mapping are exhausted.
2107 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2108 * Use cpu_register_map_client() to know when retrying the map operation is
2109 * likely to succeed.
6d16c2f8 2110 */
ac1970fb 2111void *address_space_map(AddressSpace *as,
a8170e5e
AK
2112 hwaddr addr,
2113 hwaddr *plen,
ac1970fb 2114 bool is_write)
6d16c2f8 2115{
a8170e5e
AK
2116 hwaddr len = *plen;
2117 hwaddr todo = 0;
149f54b5 2118 hwaddr l, xlat;
f3705d53 2119 MemoryRegionSection *section;
f15fbc4b 2120 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
2121 ram_addr_t rlen;
2122 void *ret;
6d16c2f8
AL
2123
2124 while (len > 0) {
149f54b5
PB
2125 l = len;
2126 section = address_space_translate(as, addr, &xlat, &l, is_write);
6d16c2f8 2127
2bbfa05d 2128 if (!memory_access_is_direct(section->mr, is_write)) {
38bee5dc 2129 if (todo || bounce.buffer) {
6d16c2f8
AL
2130 break;
2131 }
2132 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2133 bounce.addr = addr;
2134 bounce.len = l;
2135 if (!is_write) {
ac1970fb 2136 address_space_read(as, addr, bounce.buffer, l);
6d16c2f8 2137 }
38bee5dc
SS
2138
2139 *plen = l;
2140 return bounce.buffer;
6d16c2f8 2141 }
8ab934f9 2142 if (!todo) {
149f54b5
PB
2143 raddr = memory_region_get_ram_addr(section->mr) + xlat;
2144 } else {
2145 if (memory_region_get_ram_addr(section->mr) + xlat != raddr + todo) {
2146 break;
2147 }
8ab934f9 2148 }
6d16c2f8
AL
2149
2150 len -= l;
2151 addr += l;
38bee5dc 2152 todo += l;
6d16c2f8 2153 }
8ab934f9
SS
2154 rlen = todo;
2155 ret = qemu_ram_ptr_length(raddr, &rlen);
2156 *plen = rlen;
2157 return ret;
6d16c2f8
AL
2158}
2159
ac1970fb 2160/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2161 * Will also mark the memory as dirty if is_write == 1. access_len gives
2162 * the amount of memory that was actually read or written by the caller.
2163 */
a8170e5e
AK
2164void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2165 int is_write, hwaddr access_len)
6d16c2f8
AL
2166{
2167 if (buffer != bounce.buffer) {
2168 if (is_write) {
e890261f 2169 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
2170 while (access_len) {
2171 unsigned l;
2172 l = TARGET_PAGE_SIZE;
2173 if (l > access_len)
2174 l = access_len;
51d7a9eb 2175 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2176 addr1 += l;
2177 access_len -= l;
2178 }
2179 }
868bb33f 2180 if (xen_enabled()) {
e41d7c69 2181 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2182 }
6d16c2f8
AL
2183 return;
2184 }
2185 if (is_write) {
ac1970fb 2186 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2187 }
f8a83245 2188 qemu_vfree(bounce.buffer);
6d16c2f8 2189 bounce.buffer = NULL;
ba223c29 2190 cpu_notify_map_clients();
6d16c2f8 2191}
d0ecd2aa 2192
a8170e5e
AK
2193void *cpu_physical_memory_map(hwaddr addr,
2194 hwaddr *plen,
ac1970fb
AK
2195 int is_write)
2196{
2197 return address_space_map(&address_space_memory, addr, plen, is_write);
2198}
2199
a8170e5e
AK
2200void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2201 int is_write, hwaddr access_len)
ac1970fb
AK
2202{
2203 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2204}
2205
8df1cd07 2206/* warning: addr must be aligned */
a8170e5e 2207static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2208 enum device_endian endian)
8df1cd07 2209{
8df1cd07 2210 uint8_t *ptr;
791af8c8 2211 uint64_t val;
f3705d53 2212 MemoryRegionSection *section;
149f54b5
PB
2213 hwaddr l = 4;
2214 hwaddr addr1;
8df1cd07 2215
149f54b5
PB
2216 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2217 false);
2bbfa05d 2218 if (l < 4 || !memory_access_is_direct(section->mr, false)) {
8df1cd07 2219 /* I/O case */
791af8c8 2220 io_mem_read(section->mr, addr1, &val, 4);
1e78bcc1
AG
2221#if defined(TARGET_WORDS_BIGENDIAN)
2222 if (endian == DEVICE_LITTLE_ENDIAN) {
2223 val = bswap32(val);
2224 }
2225#else
2226 if (endian == DEVICE_BIG_ENDIAN) {
2227 val = bswap32(val);
2228 }
2229#endif
8df1cd07
FB
2230 } else {
2231 /* RAM case */
f3705d53 2232 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2233 & TARGET_PAGE_MASK)
149f54b5 2234 + addr1);
1e78bcc1
AG
2235 switch (endian) {
2236 case DEVICE_LITTLE_ENDIAN:
2237 val = ldl_le_p(ptr);
2238 break;
2239 case DEVICE_BIG_ENDIAN:
2240 val = ldl_be_p(ptr);
2241 break;
2242 default:
2243 val = ldl_p(ptr);
2244 break;
2245 }
8df1cd07
FB
2246 }
2247 return val;
2248}
2249
a8170e5e 2250uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2251{
2252 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2253}
2254
a8170e5e 2255uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2256{
2257 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2258}
2259
a8170e5e 2260uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2261{
2262 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2263}
2264
84b7b8e7 2265/* warning: addr must be aligned */
a8170e5e 2266static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2267 enum device_endian endian)
84b7b8e7 2268{
84b7b8e7
FB
2269 uint8_t *ptr;
2270 uint64_t val;
f3705d53 2271 MemoryRegionSection *section;
149f54b5
PB
2272 hwaddr l = 8;
2273 hwaddr addr1;
84b7b8e7 2274
149f54b5
PB
2275 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2276 false);
2bbfa05d 2277 if (l < 8 || !memory_access_is_direct(section->mr, false)) {
84b7b8e7 2278 /* I/O case */
791af8c8 2279 io_mem_read(section->mr, addr1, &val, 8);
968a5627
PB
2280#if defined(TARGET_WORDS_BIGENDIAN)
2281 if (endian == DEVICE_LITTLE_ENDIAN) {
2282 val = bswap64(val);
2283 }
2284#else
2285 if (endian == DEVICE_BIG_ENDIAN) {
2286 val = bswap64(val);
2287 }
84b7b8e7
FB
2288#endif
2289 } else {
2290 /* RAM case */
f3705d53 2291 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2292 & TARGET_PAGE_MASK)
149f54b5 2293 + addr1);
1e78bcc1
AG
2294 switch (endian) {
2295 case DEVICE_LITTLE_ENDIAN:
2296 val = ldq_le_p(ptr);
2297 break;
2298 case DEVICE_BIG_ENDIAN:
2299 val = ldq_be_p(ptr);
2300 break;
2301 default:
2302 val = ldq_p(ptr);
2303 break;
2304 }
84b7b8e7
FB
2305 }
2306 return val;
2307}
2308
a8170e5e 2309uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2310{
2311 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2312}
2313
a8170e5e 2314uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2315{
2316 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2317}
2318
a8170e5e 2319uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2320{
2321 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2322}
2323
aab33094 2324/* XXX: optimize */
a8170e5e 2325uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2326{
2327 uint8_t val;
2328 cpu_physical_memory_read(addr, &val, 1);
2329 return val;
2330}
2331
733f0b02 2332/* warning: addr must be aligned */
a8170e5e 2333static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2334 enum device_endian endian)
aab33094 2335{
733f0b02
MT
2336 uint8_t *ptr;
2337 uint64_t val;
f3705d53 2338 MemoryRegionSection *section;
149f54b5
PB
2339 hwaddr l = 2;
2340 hwaddr addr1;
733f0b02 2341
149f54b5
PB
2342 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2343 false);
2bbfa05d 2344 if (l < 2 || !memory_access_is_direct(section->mr, false)) {
733f0b02 2345 /* I/O case */
791af8c8 2346 io_mem_read(section->mr, addr1, &val, 2);
1e78bcc1
AG
2347#if defined(TARGET_WORDS_BIGENDIAN)
2348 if (endian == DEVICE_LITTLE_ENDIAN) {
2349 val = bswap16(val);
2350 }
2351#else
2352 if (endian == DEVICE_BIG_ENDIAN) {
2353 val = bswap16(val);
2354 }
2355#endif
733f0b02
MT
2356 } else {
2357 /* RAM case */
f3705d53 2358 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 2359 & TARGET_PAGE_MASK)
149f54b5 2360 + addr1);
1e78bcc1
AG
2361 switch (endian) {
2362 case DEVICE_LITTLE_ENDIAN:
2363 val = lduw_le_p(ptr);
2364 break;
2365 case DEVICE_BIG_ENDIAN:
2366 val = lduw_be_p(ptr);
2367 break;
2368 default:
2369 val = lduw_p(ptr);
2370 break;
2371 }
733f0b02
MT
2372 }
2373 return val;
aab33094
FB
2374}
2375
a8170e5e 2376uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2377{
2378 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2379}
2380
a8170e5e 2381uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2382{
2383 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2384}
2385
a8170e5e 2386uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2387{
2388 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2389}
2390
8df1cd07
FB
2391/* warning: addr must be aligned. The ram page is not masked as dirty
2392 and the code inside is not invalidated. It is useful if the dirty
2393 bits are used to track modified PTEs */
a8170e5e 2394void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2395{
8df1cd07 2396 uint8_t *ptr;
f3705d53 2397 MemoryRegionSection *section;
149f54b5
PB
2398 hwaddr l = 4;
2399 hwaddr addr1;
8df1cd07 2400
149f54b5
PB
2401 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2402 true);
2bbfa05d 2403 if (l < 4 || !memory_access_is_direct(section->mr, true)) {
149f54b5 2404 io_mem_write(section->mr, addr1, val, 4);
8df1cd07 2405 } else {
149f54b5 2406 addr1 += memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK;
5579c7f3 2407 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2408 stl_p(ptr, val);
74576198
AL
2409
2410 if (unlikely(in_migration)) {
2411 if (!cpu_physical_memory_is_dirty(addr1)) {
2412 /* invalidate code */
2413 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2414 /* set dirty bit */
f7c11b53
YT
2415 cpu_physical_memory_set_dirty_flags(
2416 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2417 }
2418 }
8df1cd07
FB
2419 }
2420}
2421
2422/* warning: addr must be aligned */
a8170e5e 2423static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2424 enum device_endian endian)
8df1cd07 2425{
8df1cd07 2426 uint8_t *ptr;
f3705d53 2427 MemoryRegionSection *section;
149f54b5
PB
2428 hwaddr l = 4;
2429 hwaddr addr1;
8df1cd07 2430
149f54b5
PB
2431 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2432 true);
2bbfa05d 2433 if (l < 4 || !memory_access_is_direct(section->mr, true)) {
1e78bcc1
AG
2434#if defined(TARGET_WORDS_BIGENDIAN)
2435 if (endian == DEVICE_LITTLE_ENDIAN) {
2436 val = bswap32(val);
2437 }
2438#else
2439 if (endian == DEVICE_BIG_ENDIAN) {
2440 val = bswap32(val);
2441 }
2442#endif
149f54b5 2443 io_mem_write(section->mr, addr1, val, 4);
8df1cd07 2444 } else {
8df1cd07 2445 /* RAM case */
149f54b5 2446 addr1 += memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK;
5579c7f3 2447 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2448 switch (endian) {
2449 case DEVICE_LITTLE_ENDIAN:
2450 stl_le_p(ptr, val);
2451 break;
2452 case DEVICE_BIG_ENDIAN:
2453 stl_be_p(ptr, val);
2454 break;
2455 default:
2456 stl_p(ptr, val);
2457 break;
2458 }
51d7a9eb 2459 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2460 }
2461}
2462
a8170e5e 2463void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2464{
2465 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2466}
2467
a8170e5e 2468void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2469{
2470 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2471}
2472
a8170e5e 2473void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2474{
2475 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2476}
2477
aab33094 2478/* XXX: optimize */
a8170e5e 2479void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2480{
2481 uint8_t v = val;
2482 cpu_physical_memory_write(addr, &v, 1);
2483}
2484
733f0b02 2485/* warning: addr must be aligned */
a8170e5e 2486static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2487 enum device_endian endian)
aab33094 2488{
733f0b02 2489 uint8_t *ptr;
f3705d53 2490 MemoryRegionSection *section;
149f54b5
PB
2491 hwaddr l = 2;
2492 hwaddr addr1;
733f0b02 2493
149f54b5
PB
2494 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2495 true);
2bbfa05d 2496 if (l < 2 || !memory_access_is_direct(section->mr, true)) {
1e78bcc1
AG
2497#if defined(TARGET_WORDS_BIGENDIAN)
2498 if (endian == DEVICE_LITTLE_ENDIAN) {
2499 val = bswap16(val);
2500 }
2501#else
2502 if (endian == DEVICE_BIG_ENDIAN) {
2503 val = bswap16(val);
2504 }
2505#endif
149f54b5 2506 io_mem_write(section->mr, addr1, val, 2);
733f0b02 2507 } else {
733f0b02 2508 /* RAM case */
149f54b5 2509 addr1 += memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK;
733f0b02 2510 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2511 switch (endian) {
2512 case DEVICE_LITTLE_ENDIAN:
2513 stw_le_p(ptr, val);
2514 break;
2515 case DEVICE_BIG_ENDIAN:
2516 stw_be_p(ptr, val);
2517 break;
2518 default:
2519 stw_p(ptr, val);
2520 break;
2521 }
51d7a9eb 2522 invalidate_and_set_dirty(addr1, 2);
733f0b02 2523 }
aab33094
FB
2524}
2525
a8170e5e 2526void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2527{
2528 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2529}
2530
a8170e5e 2531void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2532{
2533 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2534}
2535
a8170e5e 2536void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2537{
2538 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2539}
2540
aab33094 2541/* XXX: optimize */
a8170e5e 2542void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2543{
2544 val = tswap64(val);
71d2b725 2545 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2546}
2547
a8170e5e 2548void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2549{
2550 val = cpu_to_le64(val);
2551 cpu_physical_memory_write(addr, &val, 8);
2552}
2553
a8170e5e 2554void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2555{
2556 val = cpu_to_be64(val);
2557 cpu_physical_memory_write(addr, &val, 8);
2558}
2559
5e2972fd 2560/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2561int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2562 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2563{
2564 int l;
a8170e5e 2565 hwaddr phys_addr;
9b3c35e0 2566 target_ulong page;
13eb76e0
FB
2567
2568 while (len > 0) {
2569 page = addr & TARGET_PAGE_MASK;
2570 phys_addr = cpu_get_phys_page_debug(env, page);
2571 /* if no physical page mapped, return an error */
2572 if (phys_addr == -1)
2573 return -1;
2574 l = (page + TARGET_PAGE_SIZE) - addr;
2575 if (l > len)
2576 l = len;
5e2972fd 2577 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2578 if (is_write)
2579 cpu_physical_memory_write_rom(phys_addr, buf, l);
2580 else
5e2972fd 2581 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2582 len -= l;
2583 buf += l;
2584 addr += l;
2585 }
2586 return 0;
2587}
a68fe89c 2588#endif
13eb76e0 2589
8e4a424b
BS
2590#if !defined(CONFIG_USER_ONLY)
2591
2592/*
2593 * A helper function for the _utterly broken_ virtio device model to find out if
2594 * it's running on a big endian machine. Don't do this at home kids!
2595 */
2596bool virtio_is_big_endian(void);
2597bool virtio_is_big_endian(void)
2598{
2599#if defined(TARGET_WORDS_BIGENDIAN)
2600 return true;
2601#else
2602 return false;
2603#endif
2604}
2605
2606#endif
2607
76f35538 2608#ifndef CONFIG_USER_ONLY
a8170e5e 2609bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538
WC
2610{
2611 MemoryRegionSection *section;
149f54b5 2612 hwaddr l = 1;
76f35538 2613
149f54b5
PB
2614 section = address_space_translate(&address_space_memory,
2615 phys_addr, &phys_addr, &l, false);
76f35538
WC
2616
2617 return !(memory_region_is_ram(section->mr) ||
2618 memory_region_is_romd(section->mr));
2619}
2620#endif