]> git.proxmox.com Git - qemu.git/blame - exec.c
target-moxie: gen_intermediate_code_internal() should be inlined
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
0d09e41a 34#include "hw/xen/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
db7b5426 53//#define DEBUG_SUBPAGE
1196be37 54
e2eef170 55#if !defined(CONFIG_USER_ONLY)
9fa3e853 56int phys_ram_fd;
74576198 57static int in_migration;
94a6b54f 58
a3161038 59RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
60
61static MemoryRegion *system_memory;
309cb471 62static MemoryRegion *system_io;
62152b8a 63
f6790af6
AK
64AddressSpace address_space_io;
65AddressSpace address_space_memory;
2673a5da 66
0844e007 67MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 68static MemoryRegion io_mem_unassigned;
0e0df1e2 69
e2eef170 70#endif
9fa3e853 71
182735ef 72CPUState *first_cpu;
6a00d601
FB
73/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
4917cf44 75DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 76/* 0 = Do not count executed instructions.
bf20dc07 77 1 = Precise instruction counting.
2e70f6ef 78 2 = Adaptive rate instruction counting. */
5708fc66 79int use_icount;
6a00d601 80
e2eef170 81#if !defined(CONFIG_USER_ONLY)
4346ae3e 82
1db8abb1
PB
83typedef struct PhysPageEntry PhysPageEntry;
84
85struct PhysPageEntry {
86 uint16_t is_leaf : 1;
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
88 uint16_t ptr : 15;
89};
90
0475d94f
PB
91typedef PhysPageEntry Node[L2_SIZE];
92
1db8abb1
PB
93struct AddressSpaceDispatch {
94 /* This is a multi-level map on the physical address space.
95 * The bottom level has pointers to MemoryRegionSections.
96 */
97 PhysPageEntry phys_map;
0475d94f
PB
98 Node *nodes;
99 MemoryRegionSection *sections;
acc9d80b 100 AddressSpace *as;
1db8abb1
PB
101};
102
90260c6c
JK
103#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
104typedef struct subpage_t {
105 MemoryRegion iomem;
acc9d80b 106 AddressSpace *as;
90260c6c
JK
107 hwaddr base;
108 uint16_t sub_section[TARGET_PAGE_SIZE];
109} subpage_t;
110
b41aac4f
LPF
111#define PHYS_SECTION_UNASSIGNED 0
112#define PHYS_SECTION_NOTDIRTY 1
113#define PHYS_SECTION_ROM 2
114#define PHYS_SECTION_WATCH 3
5312bd8b 115
9affd6fc
PB
116typedef struct PhysPageMap {
117 unsigned sections_nb;
118 unsigned sections_nb_alloc;
119 unsigned nodes_nb;
120 unsigned nodes_nb_alloc;
121 Node *nodes;
122 MemoryRegionSection *sections;
123} PhysPageMap;
124
6092666e 125static PhysPageMap *prev_map;
9affd6fc 126static PhysPageMap next_map;
d6f2ea22 127
07f07b31 128#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 129
e2eef170 130static void io_mem_init(void);
62152b8a 131static void memory_map_init(void);
8b9c99d9 132static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 133
1ec9b909 134static MemoryRegion io_mem_watch;
6658ffb8 135#endif
fd6ce8f6 136
6d9a1304 137#if !defined(CONFIG_USER_ONLY)
d6f2ea22 138
f7bf5461 139static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 140{
9affd6fc
PB
141 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
142 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
143 16);
144 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
145 next_map.nodes_nb + nodes);
146 next_map.nodes = g_renew(Node, next_map.nodes,
147 next_map.nodes_nb_alloc);
d6f2ea22 148 }
f7bf5461
AK
149}
150
151static uint16_t phys_map_node_alloc(void)
152{
153 unsigned i;
154 uint16_t ret;
155
9affd6fc 156 ret = next_map.nodes_nb++;
f7bf5461 157 assert(ret != PHYS_MAP_NODE_NIL);
9affd6fc 158 assert(ret != next_map.nodes_nb_alloc);
d6f2ea22 159 for (i = 0; i < L2_SIZE; ++i) {
9affd6fc
PB
160 next_map.nodes[ret][i].is_leaf = 0;
161 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 162 }
f7bf5461 163 return ret;
d6f2ea22
AK
164}
165
a8170e5e
AK
166static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
167 hwaddr *nb, uint16_t leaf,
2999097b 168 int level)
f7bf5461
AK
169{
170 PhysPageEntry *p;
171 int i;
a8170e5e 172 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 173
07f07b31 174 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800 175 lp->ptr = phys_map_node_alloc();
9affd6fc 176 p = next_map.nodes[lp->ptr];
f7bf5461
AK
177 if (level == 0) {
178 for (i = 0; i < L2_SIZE; i++) {
07f07b31 179 p[i].is_leaf = 1;
b41aac4f 180 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 181 }
67c4d23c 182 }
f7bf5461 183 } else {
9affd6fc 184 p = next_map.nodes[lp->ptr];
92e873b9 185 }
2999097b 186 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 187
2999097b 188 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
189 if ((*index & (step - 1)) == 0 && *nb >= step) {
190 lp->is_leaf = true;
c19e8800 191 lp->ptr = leaf;
07f07b31
AK
192 *index += step;
193 *nb -= step;
2999097b
AK
194 } else {
195 phys_page_set_level(lp, index, nb, leaf, level - 1);
196 }
197 ++lp;
f7bf5461
AK
198 }
199}
200
ac1970fb 201static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 202 hwaddr index, hwaddr nb,
2999097b 203 uint16_t leaf)
f7bf5461 204{
2999097b 205 /* Wildly overreserve - it doesn't matter much. */
07f07b31 206 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 207
ac1970fb 208 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
209}
210
9affd6fc
PB
211static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
212 Node *nodes, MemoryRegionSection *sections)
92e873b9 213{
31ab2b4a
AK
214 PhysPageEntry *p;
215 int i;
f1f6e3b8 216
07f07b31 217 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 218 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 219 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 220 }
9affd6fc 221 p = nodes[lp.ptr];
31ab2b4a 222 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 223 }
9affd6fc 224 return &sections[lp.ptr];
f3705d53
AK
225}
226
e5548617
BS
227bool memory_region_is_unassigned(MemoryRegion *mr)
228{
2a8e7499 229 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 230 && mr != &io_mem_watch;
fd6ce8f6 231}
149f54b5 232
c7086b4a 233static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
234 hwaddr addr,
235 bool resolve_subpage)
9f029603 236{
90260c6c
JK
237 MemoryRegionSection *section;
238 subpage_t *subpage;
239
0475d94f
PB
240 section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
241 d->nodes, d->sections);
90260c6c
JK
242 if (resolve_subpage && section->mr->subpage) {
243 subpage = container_of(section->mr, subpage_t, iomem);
0475d94f 244 section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
245 }
246 return section;
9f029603
JK
247}
248
90260c6c 249static MemoryRegionSection *
c7086b4a 250address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 251 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
252{
253 MemoryRegionSection *section;
254 Int128 diff;
255
c7086b4a 256 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
257 /* Compute offset within MemoryRegionSection */
258 addr -= section->offset_within_address_space;
259
260 /* Compute offset within MemoryRegion */
261 *xlat = addr + section->offset_within_region;
262
263 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 264 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
265 return section;
266}
90260c6c 267
5c8a00ce
PB
268MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
269 hwaddr *xlat, hwaddr *plen,
270 bool is_write)
90260c6c 271{
30951157
AK
272 IOMMUTLBEntry iotlb;
273 MemoryRegionSection *section;
274 MemoryRegion *mr;
275 hwaddr len = *plen;
276
277 for (;;) {
c7086b4a 278 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
279 mr = section->mr;
280
281 if (!mr->iommu_ops) {
282 break;
283 }
284
285 iotlb = mr->iommu_ops->translate(mr, addr);
286 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
287 | (addr & iotlb.addr_mask));
288 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
289 if (!(iotlb.perm & (1 << is_write))) {
290 mr = &io_mem_unassigned;
291 break;
292 }
293
294 as = iotlb.target_as;
295 }
296
297 *plen = len;
298 *xlat = addr;
299 return mr;
90260c6c
JK
300}
301
302MemoryRegionSection *
303address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
304 hwaddr *plen)
305{
30951157 306 MemoryRegionSection *section;
c7086b4a 307 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
308
309 assert(!section->mr->iommu_ops);
310 return section;
90260c6c 311}
5b6dd868 312#endif
fd6ce8f6 313
5b6dd868 314void cpu_exec_init_all(void)
fdbb84d1 315{
5b6dd868 316#if !defined(CONFIG_USER_ONLY)
b2a8658e 317 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
318 memory_map_init();
319 io_mem_init();
fdbb84d1 320#endif
5b6dd868 321}
fdbb84d1 322
b170fce3 323#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
324
325static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 326{
259186a7 327 CPUState *cpu = opaque;
a513fe19 328
5b6dd868
BS
329 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
330 version_id is increased. */
259186a7
AF
331 cpu->interrupt_request &= ~0x01;
332 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
333
334 return 0;
a513fe19 335}
7501267e 336
1a1562f5 337const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
338 .name = "cpu_common",
339 .version_id = 1,
340 .minimum_version_id = 1,
341 .minimum_version_id_old = 1,
342 .post_load = cpu_common_post_load,
343 .fields = (VMStateField []) {
259186a7
AF
344 VMSTATE_UINT32(halted, CPUState),
345 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
346 VMSTATE_END_OF_LIST()
347 }
348};
1a1562f5 349
5b6dd868 350#endif
ea041c0e 351
38d8f5c8 352CPUState *qemu_get_cpu(int index)
ea041c0e 353{
182735ef 354 CPUState *cpu = first_cpu;
ea041c0e 355
182735ef 356 while (cpu) {
55e5c285 357 if (cpu->cpu_index == index) {
5b6dd868 358 break;
55e5c285 359 }
182735ef 360 cpu = cpu->next_cpu;
ea041c0e 361 }
5b6dd868 362
182735ef 363 return cpu;
ea041c0e
FB
364}
365
d6b9e0d6
MT
366void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
367{
182735ef 368 CPUState *cpu;
d6b9e0d6 369
182735ef
AF
370 cpu = first_cpu;
371 while (cpu) {
372 func(cpu, data);
373 cpu = cpu->next_cpu;
d6b9e0d6
MT
374 }
375}
376
5b6dd868 377void cpu_exec_init(CPUArchState *env)
ea041c0e 378{
5b6dd868 379 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 380 CPUClass *cc = CPU_GET_CLASS(cpu);
182735ef 381 CPUState **pcpu;
5b6dd868
BS
382 int cpu_index;
383
384#if defined(CONFIG_USER_ONLY)
385 cpu_list_lock();
386#endif
182735ef
AF
387 cpu->next_cpu = NULL;
388 pcpu = &first_cpu;
5b6dd868 389 cpu_index = 0;
182735ef
AF
390 while (*pcpu != NULL) {
391 pcpu = &(*pcpu)->next_cpu;
5b6dd868
BS
392 cpu_index++;
393 }
55e5c285 394 cpu->cpu_index = cpu_index;
1b1ed8dc 395 cpu->numa_node = 0;
5b6dd868
BS
396 QTAILQ_INIT(&env->breakpoints);
397 QTAILQ_INIT(&env->watchpoints);
398#ifndef CONFIG_USER_ONLY
399 cpu->thread_id = qemu_get_thread_id();
400#endif
182735ef 401 *pcpu = cpu;
5b6dd868
BS
402#if defined(CONFIG_USER_ONLY)
403 cpu_list_unlock();
404#endif
259186a7 405 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
5b6dd868 406#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
407 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
408 cpu_save, cpu_load, env);
b170fce3 409 assert(cc->vmsd == NULL);
5b6dd868 410#endif
b170fce3
AF
411 if (cc->vmsd != NULL) {
412 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
413 }
ea041c0e
FB
414}
415
1fddef4b 416#if defined(TARGET_HAS_ICE)
94df27fd 417#if defined(CONFIG_USER_ONLY)
9349b4f9 418static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
419{
420 tb_invalidate_phys_page_range(pc, pc + 1, 0);
421}
422#else
1e7855a5
MF
423static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
424{
9d70c4b7
MF
425 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
426 (pc & ~TARGET_PAGE_MASK));
1e7855a5 427}
c27004ec 428#endif
94df27fd 429#endif /* TARGET_HAS_ICE */
d720b93d 430
c527ee8f 431#if defined(CONFIG_USER_ONLY)
9349b4f9 432void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
433
434{
435}
436
9349b4f9 437int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
438 int flags, CPUWatchpoint **watchpoint)
439{
440 return -ENOSYS;
441}
442#else
6658ffb8 443/* Add a watchpoint. */
9349b4f9 444int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 445 int flags, CPUWatchpoint **watchpoint)
6658ffb8 446{
b4051334 447 target_ulong len_mask = ~(len - 1);
c0ce998e 448 CPUWatchpoint *wp;
6658ffb8 449
b4051334 450 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
451 if ((len & (len - 1)) || (addr & ~len_mask) ||
452 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
453 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
454 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
455 return -EINVAL;
456 }
7267c094 457 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
458
459 wp->vaddr = addr;
b4051334 460 wp->len_mask = len_mask;
a1d1bb31
AL
461 wp->flags = flags;
462
2dc9f411 463 /* keep all GDB-injected watchpoints in front */
c0ce998e 464 if (flags & BP_GDB)
72cf2d4f 465 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 466 else
72cf2d4f 467 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 468
6658ffb8 469 tlb_flush_page(env, addr);
a1d1bb31
AL
470
471 if (watchpoint)
472 *watchpoint = wp;
473 return 0;
6658ffb8
PB
474}
475
a1d1bb31 476/* Remove a specific watchpoint. */
9349b4f9 477int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 478 int flags)
6658ffb8 479{
b4051334 480 target_ulong len_mask = ~(len - 1);
a1d1bb31 481 CPUWatchpoint *wp;
6658ffb8 482
72cf2d4f 483 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 484 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 485 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 486 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
487 return 0;
488 }
489 }
a1d1bb31 490 return -ENOENT;
6658ffb8
PB
491}
492
a1d1bb31 493/* Remove a specific watchpoint by reference. */
9349b4f9 494void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 495{
72cf2d4f 496 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 497
a1d1bb31
AL
498 tlb_flush_page(env, watchpoint->vaddr);
499
7267c094 500 g_free(watchpoint);
a1d1bb31
AL
501}
502
503/* Remove all matching watchpoints. */
9349b4f9 504void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 505{
c0ce998e 506 CPUWatchpoint *wp, *next;
a1d1bb31 507
72cf2d4f 508 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
509 if (wp->flags & mask)
510 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 511 }
7d03f82f 512}
c527ee8f 513#endif
7d03f82f 514
a1d1bb31 515/* Add a breakpoint. */
9349b4f9 516int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 517 CPUBreakpoint **breakpoint)
4c3a88a2 518{
1fddef4b 519#if defined(TARGET_HAS_ICE)
c0ce998e 520 CPUBreakpoint *bp;
3b46e624 521
7267c094 522 bp = g_malloc(sizeof(*bp));
4c3a88a2 523
a1d1bb31
AL
524 bp->pc = pc;
525 bp->flags = flags;
526
2dc9f411 527 /* keep all GDB-injected breakpoints in front */
c0ce998e 528 if (flags & BP_GDB)
72cf2d4f 529 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 530 else
72cf2d4f 531 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 532
d720b93d 533 breakpoint_invalidate(env, pc);
a1d1bb31
AL
534
535 if (breakpoint)
536 *breakpoint = bp;
4c3a88a2
FB
537 return 0;
538#else
a1d1bb31 539 return -ENOSYS;
4c3a88a2
FB
540#endif
541}
542
a1d1bb31 543/* Remove a specific breakpoint. */
9349b4f9 544int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 545{
7d03f82f 546#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
547 CPUBreakpoint *bp;
548
72cf2d4f 549 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
550 if (bp->pc == pc && bp->flags == flags) {
551 cpu_breakpoint_remove_by_ref(env, bp);
552 return 0;
553 }
7d03f82f 554 }
a1d1bb31
AL
555 return -ENOENT;
556#else
557 return -ENOSYS;
7d03f82f
EI
558#endif
559}
560
a1d1bb31 561/* Remove a specific breakpoint by reference. */
9349b4f9 562void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 563{
1fddef4b 564#if defined(TARGET_HAS_ICE)
72cf2d4f 565 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 566
a1d1bb31
AL
567 breakpoint_invalidate(env, breakpoint->pc);
568
7267c094 569 g_free(breakpoint);
a1d1bb31
AL
570#endif
571}
572
573/* Remove all matching breakpoints. */
9349b4f9 574void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
575{
576#if defined(TARGET_HAS_ICE)
c0ce998e 577 CPUBreakpoint *bp, *next;
a1d1bb31 578
72cf2d4f 579 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
580 if (bp->flags & mask)
581 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 582 }
4c3a88a2
FB
583#endif
584}
585
c33a346e
FB
586/* enable or disable single step mode. EXCP_DEBUG is returned by the
587 CPU loop after each instruction */
9349b4f9 588void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 589{
1fddef4b 590#if defined(TARGET_HAS_ICE)
c33a346e
FB
591 if (env->singlestep_enabled != enabled) {
592 env->singlestep_enabled = enabled;
e22a25c9
AL
593 if (kvm_enabled())
594 kvm_update_guest_debug(env, 0);
595 else {
ccbb4d44 596 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
597 /* XXX: only flush what is necessary */
598 tb_flush(env);
599 }
c33a346e
FB
600 }
601#endif
602}
603
9349b4f9 604void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e 605{
878096ee 606 CPUState *cpu = ENV_GET_CPU(env);
7501267e 607 va_list ap;
493ae1f0 608 va_list ap2;
7501267e
FB
609
610 va_start(ap, fmt);
493ae1f0 611 va_copy(ap2, ap);
7501267e
FB
612 fprintf(stderr, "qemu: fatal: ");
613 vfprintf(stderr, fmt, ap);
614 fprintf(stderr, "\n");
878096ee 615 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
616 if (qemu_log_enabled()) {
617 qemu_log("qemu: fatal: ");
618 qemu_log_vprintf(fmt, ap2);
619 qemu_log("\n");
6fd2a026 620 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 621 qemu_log_flush();
93fcfe39 622 qemu_log_close();
924edcae 623 }
493ae1f0 624 va_end(ap2);
f9373291 625 va_end(ap);
fd052bf6
RV
626#if defined(CONFIG_USER_ONLY)
627 {
628 struct sigaction act;
629 sigfillset(&act.sa_mask);
630 act.sa_handler = SIG_DFL;
631 sigaction(SIGABRT, &act, NULL);
632 }
633#endif
7501267e
FB
634 abort();
635}
636
9349b4f9 637CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 638{
9349b4f9 639 CPUArchState *new_env = cpu_init(env->cpu_model_str);
5a38f081
AL
640#if defined(TARGET_HAS_ICE)
641 CPUBreakpoint *bp;
642 CPUWatchpoint *wp;
643#endif
644
9349b4f9 645 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 646
5a38f081
AL
647 /* Clone all break/watchpoints.
648 Note: Once we support ptrace with hw-debug register access, make sure
649 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
650 QTAILQ_INIT(&env->breakpoints);
651 QTAILQ_INIT(&env->watchpoints);
5a38f081 652#if defined(TARGET_HAS_ICE)
72cf2d4f 653 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
654 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
655 }
72cf2d4f 656 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
657 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
658 wp->flags, NULL);
659 }
660#endif
661
c5be9f08
TS
662 return new_env;
663}
664
0124311e 665#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
666static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
667 uintptr_t length)
668{
669 uintptr_t start1;
670
671 /* we modify the TLB cache so that the dirty bit will be set again
672 when accessing the range */
673 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
674 /* Check that we don't span multiple blocks - this breaks the
675 address comparisons below. */
676 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
677 != (end - 1) - start) {
678 abort();
679 }
680 cpu_tlb_reset_dirty_all(start1, length);
681
682}
683
5579c7f3 684/* Note: start and end must be within the same ram block. */
c227f099 685void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 686 int dirty_flags)
1ccde1cb 687{
d24981d3 688 uintptr_t length;
1ccde1cb
FB
689
690 start &= TARGET_PAGE_MASK;
691 end = TARGET_PAGE_ALIGN(end);
692
693 length = end - start;
694 if (length == 0)
695 return;
f7c11b53 696 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 697
d24981d3
JQ
698 if (tcg_enabled()) {
699 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 700 }
1ccde1cb
FB
701}
702
8b9c99d9 703static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 704{
f6f3fbca 705 int ret = 0;
74576198 706 in_migration = enable;
f6f3fbca 707 return ret;
74576198
AL
708}
709
a8170e5e 710hwaddr memory_region_section_get_iotlb(CPUArchState *env,
149f54b5
PB
711 MemoryRegionSection *section,
712 target_ulong vaddr,
713 hwaddr paddr, hwaddr xlat,
714 int prot,
715 target_ulong *address)
e5548617 716{
a8170e5e 717 hwaddr iotlb;
e5548617
BS
718 CPUWatchpoint *wp;
719
cc5bea60 720 if (memory_region_is_ram(section->mr)) {
e5548617
BS
721 /* Normal RAM. */
722 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 723 + xlat;
e5548617 724 if (!section->readonly) {
b41aac4f 725 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 726 } else {
b41aac4f 727 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
728 }
729 } else {
0475d94f 730 iotlb = section - address_space_memory.dispatch->sections;
149f54b5 731 iotlb += xlat;
e5548617
BS
732 }
733
734 /* Make accesses to pages with watchpoints go via the
735 watchpoint trap routines. */
736 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
737 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
738 /* Avoid trapping reads of pages with a write breakpoint. */
739 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 740 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
741 *address |= TLB_MMIO;
742 break;
743 }
744 }
745 }
746
747 return iotlb;
748}
9fa3e853
FB
749#endif /* defined(CONFIG_USER_ONLY) */
750
e2eef170 751#if !defined(CONFIG_USER_ONLY)
8da3ff18 752
c227f099 753static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 754 uint16_t section);
acc9d80b 755static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 756
5312bd8b
AK
757static uint16_t phys_section_add(MemoryRegionSection *section)
758{
68f3f65b
PB
759 /* The physical section number is ORed with a page-aligned
760 * pointer to produce the iotlb entries. Thus it should
761 * never overflow into the page-aligned value.
762 */
9affd6fc 763 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
68f3f65b 764
9affd6fc
PB
765 if (next_map.sections_nb == next_map.sections_nb_alloc) {
766 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
767 16);
768 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
769 next_map.sections_nb_alloc);
5312bd8b 770 }
9affd6fc 771 next_map.sections[next_map.sections_nb] = *section;
dfde4e6e 772 memory_region_ref(section->mr);
9affd6fc 773 return next_map.sections_nb++;
5312bd8b
AK
774}
775
058bc4b5
PB
776static void phys_section_destroy(MemoryRegion *mr)
777{
dfde4e6e
PB
778 memory_region_unref(mr);
779
058bc4b5
PB
780 if (mr->subpage) {
781 subpage_t *subpage = container_of(mr, subpage_t, iomem);
782 memory_region_destroy(&subpage->iomem);
783 g_free(subpage);
784 }
785}
786
6092666e 787static void phys_sections_free(PhysPageMap *map)
5312bd8b 788{
9affd6fc
PB
789 while (map->sections_nb > 0) {
790 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
791 phys_section_destroy(section->mr);
792 }
9affd6fc
PB
793 g_free(map->sections);
794 g_free(map->nodes);
6092666e 795 g_free(map);
5312bd8b
AK
796}
797
ac1970fb 798static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
799{
800 subpage_t *subpage;
a8170e5e 801 hwaddr base = section->offset_within_address_space
0f0cb164 802 & TARGET_PAGE_MASK;
9affd6fc
PB
803 MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
804 next_map.nodes, next_map.sections);
0f0cb164
AK
805 MemoryRegionSection subsection = {
806 .offset_within_address_space = base,
052e87b0 807 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 808 };
a8170e5e 809 hwaddr start, end;
0f0cb164 810
f3705d53 811 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 812
f3705d53 813 if (!(existing->mr->subpage)) {
acc9d80b 814 subpage = subpage_init(d->as, base);
0f0cb164 815 subsection.mr = &subpage->iomem;
ac1970fb 816 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 817 phys_section_add(&subsection));
0f0cb164 818 } else {
f3705d53 819 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
820 }
821 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 822 end = start + int128_get64(section->size) - 1;
0f0cb164
AK
823 subpage_register(subpage, start, end, phys_section_add(section));
824}
825
826
052e87b0
PB
827static void register_multipage(AddressSpaceDispatch *d,
828 MemoryRegionSection *section)
33417e70 829{
a8170e5e 830 hwaddr start_addr = section->offset_within_address_space;
5312bd8b 831 uint16_t section_index = phys_section_add(section);
052e87b0
PB
832 uint64_t num_pages = int128_get64(int128_rshift(section->size,
833 TARGET_PAGE_BITS));
dd81124b 834
733d5ef5
PB
835 assert(num_pages);
836 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
837}
838
ac1970fb 839static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 840{
89ae337a 841 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 842 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 843 MemoryRegionSection now = *section, remain = *section;
052e87b0 844 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 845
733d5ef5
PB
846 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
847 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
848 - now.offset_within_address_space;
849
052e87b0 850 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 851 register_subpage(d, &now);
733d5ef5 852 } else {
052e87b0 853 now.size = int128_zero();
733d5ef5 854 }
052e87b0
PB
855 while (int128_ne(remain.size, now.size)) {
856 remain.size = int128_sub(remain.size, now.size);
857 remain.offset_within_address_space += int128_get64(now.size);
858 remain.offset_within_region += int128_get64(now.size);
69b67646 859 now = remain;
052e87b0 860 if (int128_lt(remain.size, page_size)) {
733d5ef5
PB
861 register_subpage(d, &now);
862 } else if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
052e87b0 863 now.size = page_size;
ac1970fb 864 register_subpage(d, &now);
69b67646 865 } else {
052e87b0 866 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 867 register_multipage(d, &now);
69b67646 868 }
0f0cb164
AK
869 }
870}
871
62a2744c
SY
872void qemu_flush_coalesced_mmio_buffer(void)
873{
874 if (kvm_enabled())
875 kvm_flush_coalesced_mmio_buffer();
876}
877
b2a8658e
UD
878void qemu_mutex_lock_ramlist(void)
879{
880 qemu_mutex_lock(&ram_list.mutex);
881}
882
883void qemu_mutex_unlock_ramlist(void)
884{
885 qemu_mutex_unlock(&ram_list.mutex);
886}
887
c902760f
MT
888#if defined(__linux__) && !defined(TARGET_S390X)
889
890#include <sys/vfs.h>
891
892#define HUGETLBFS_MAGIC 0x958458f6
893
894static long gethugepagesize(const char *path)
895{
896 struct statfs fs;
897 int ret;
898
899 do {
9742bf26 900 ret = statfs(path, &fs);
c902760f
MT
901 } while (ret != 0 && errno == EINTR);
902
903 if (ret != 0) {
9742bf26
YT
904 perror(path);
905 return 0;
c902760f
MT
906 }
907
908 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 909 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
910
911 return fs.f_bsize;
912}
913
04b16653
AW
914static void *file_ram_alloc(RAMBlock *block,
915 ram_addr_t memory,
916 const char *path)
c902760f
MT
917{
918 char *filename;
8ca761f6
PF
919 char *sanitized_name;
920 char *c;
c902760f
MT
921 void *area;
922 int fd;
923#ifdef MAP_POPULATE
924 int flags;
925#endif
926 unsigned long hpagesize;
927
928 hpagesize = gethugepagesize(path);
929 if (!hpagesize) {
9742bf26 930 return NULL;
c902760f
MT
931 }
932
933 if (memory < hpagesize) {
934 return NULL;
935 }
936
937 if (kvm_enabled() && !kvm_has_sync_mmu()) {
938 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
939 return NULL;
940 }
941
8ca761f6
PF
942 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
943 sanitized_name = g_strdup(block->mr->name);
944 for (c = sanitized_name; *c != '\0'; c++) {
945 if (*c == '/')
946 *c = '_';
947 }
948
949 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
950 sanitized_name);
951 g_free(sanitized_name);
c902760f
MT
952
953 fd = mkstemp(filename);
954 if (fd < 0) {
9742bf26 955 perror("unable to create backing store for hugepages");
e4ada482 956 g_free(filename);
9742bf26 957 return NULL;
c902760f
MT
958 }
959 unlink(filename);
e4ada482 960 g_free(filename);
c902760f
MT
961
962 memory = (memory+hpagesize-1) & ~(hpagesize-1);
963
964 /*
965 * ftruncate is not supported by hugetlbfs in older
966 * hosts, so don't bother bailing out on errors.
967 * If anything goes wrong with it under other filesystems,
968 * mmap will fail.
969 */
970 if (ftruncate(fd, memory))
9742bf26 971 perror("ftruncate");
c902760f
MT
972
973#ifdef MAP_POPULATE
974 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
975 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
976 * to sidestep this quirk.
977 */
978 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
979 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
980#else
981 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
982#endif
983 if (area == MAP_FAILED) {
9742bf26
YT
984 perror("file_ram_alloc: can't mmap RAM pages");
985 close(fd);
986 return (NULL);
c902760f 987 }
04b16653 988 block->fd = fd;
c902760f
MT
989 return area;
990}
991#endif
992
d17b5288 993static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
994{
995 RAMBlock *block, *next_block;
3e837b2c 996 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 997
49cd9ac6
SH
998 assert(size != 0); /* it would hand out same offset multiple times */
999
a3161038 1000 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1001 return 0;
1002
a3161038 1003 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1004 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1005
1006 end = block->offset + block->length;
1007
a3161038 1008 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1009 if (next_block->offset >= end) {
1010 next = MIN(next, next_block->offset);
1011 }
1012 }
1013 if (next - end >= size && next - end < mingap) {
3e837b2c 1014 offset = end;
04b16653
AW
1015 mingap = next - end;
1016 }
1017 }
3e837b2c
AW
1018
1019 if (offset == RAM_ADDR_MAX) {
1020 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1021 (uint64_t)size);
1022 abort();
1023 }
1024
04b16653
AW
1025 return offset;
1026}
1027
652d7ec2 1028ram_addr_t last_ram_offset(void)
d17b5288
AW
1029{
1030 RAMBlock *block;
1031 ram_addr_t last = 0;
1032
a3161038 1033 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1034 last = MAX(last, block->offset + block->length);
1035
1036 return last;
1037}
1038
ddb97f1d
JB
1039static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1040{
1041 int ret;
1042 QemuOpts *machine_opts;
1043
1044 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1045 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1046 if (machine_opts &&
1047 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
1048 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1049 if (ret) {
1050 perror("qemu_madvise");
1051 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1052 "but dump_guest_core=off specified\n");
1053 }
1054 }
1055}
1056
c5705a77 1057void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
1058{
1059 RAMBlock *new_block, *block;
1060
c5705a77 1061 new_block = NULL;
a3161038 1062 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
1063 if (block->offset == addr) {
1064 new_block = block;
1065 break;
1066 }
1067 }
1068 assert(new_block);
1069 assert(!new_block->idstr[0]);
84b89d78 1070
09e5ab63
AL
1071 if (dev) {
1072 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1073 if (id) {
1074 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1075 g_free(id);
84b89d78
CM
1076 }
1077 }
1078 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1079
b2a8658e
UD
1080 /* This assumes the iothread lock is taken here too. */
1081 qemu_mutex_lock_ramlist();
a3161038 1082 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1083 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1084 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1085 new_block->idstr);
1086 abort();
1087 }
1088 }
b2a8658e 1089 qemu_mutex_unlock_ramlist();
c5705a77
AK
1090}
1091
8490fc78
LC
1092static int memory_try_enable_merging(void *addr, size_t len)
1093{
1094 QemuOpts *opts;
1095
1096 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1097 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1098 /* disabled by the user */
1099 return 0;
1100 }
1101
1102 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1103}
1104
c5705a77
AK
1105ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1106 MemoryRegion *mr)
1107{
abb26d63 1108 RAMBlock *block, *new_block;
c5705a77
AK
1109
1110 size = TARGET_PAGE_ALIGN(size);
1111 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1112
b2a8658e
UD
1113 /* This assumes the iothread lock is taken here too. */
1114 qemu_mutex_lock_ramlist();
7c637366 1115 new_block->mr = mr;
432d268c 1116 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1117 if (host) {
1118 new_block->host = host;
cd19cfa2 1119 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1120 } else {
1121 if (mem_path) {
c902760f 1122#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1123 new_block->host = file_ram_alloc(new_block, size, mem_path);
1124 if (!new_block->host) {
6eebf958 1125 new_block->host = qemu_anon_ram_alloc(size);
8490fc78 1126 memory_try_enable_merging(new_block->host, size);
6977dfe6 1127 }
c902760f 1128#else
6977dfe6
YT
1129 fprintf(stderr, "-mem-path option unsupported\n");
1130 exit(1);
c902760f 1131#endif
6977dfe6 1132 } else {
868bb33f 1133 if (xen_enabled()) {
fce537d4 1134 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1135 } else if (kvm_enabled()) {
1136 /* some s390/kvm configurations have special constraints */
6eebf958 1137 new_block->host = kvm_ram_alloc(size);
432d268c 1138 } else {
6eebf958 1139 new_block->host = qemu_anon_ram_alloc(size);
432d268c 1140 }
8490fc78 1141 memory_try_enable_merging(new_block->host, size);
6977dfe6 1142 }
c902760f 1143 }
94a6b54f
PB
1144 new_block->length = size;
1145
abb26d63
PB
1146 /* Keep the list sorted from biggest to smallest block. */
1147 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1148 if (block->length < new_block->length) {
1149 break;
1150 }
1151 }
1152 if (block) {
1153 QTAILQ_INSERT_BEFORE(block, new_block, next);
1154 } else {
1155 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1156 }
0d6d3c87 1157 ram_list.mru_block = NULL;
94a6b54f 1158
f798b07f 1159 ram_list.version++;
b2a8658e 1160 qemu_mutex_unlock_ramlist();
f798b07f 1161
7267c094 1162 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1163 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1164 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1165 0, size >> TARGET_PAGE_BITS);
1720aeee 1166 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1167
ddb97f1d 1168 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1169 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1170
6f0437e8
JK
1171 if (kvm_enabled())
1172 kvm_setup_guest_memory(new_block->host, size);
1173
94a6b54f
PB
1174 return new_block->offset;
1175}
e9a1ab19 1176
c5705a77 1177ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1178{
c5705a77 1179 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1180}
1181
1f2e98b6
AW
1182void qemu_ram_free_from_ptr(ram_addr_t addr)
1183{
1184 RAMBlock *block;
1185
b2a8658e
UD
1186 /* This assumes the iothread lock is taken here too. */
1187 qemu_mutex_lock_ramlist();
a3161038 1188 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1189 if (addr == block->offset) {
a3161038 1190 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1191 ram_list.mru_block = NULL;
f798b07f 1192 ram_list.version++;
7267c094 1193 g_free(block);
b2a8658e 1194 break;
1f2e98b6
AW
1195 }
1196 }
b2a8658e 1197 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1198}
1199
c227f099 1200void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1201{
04b16653
AW
1202 RAMBlock *block;
1203
b2a8658e
UD
1204 /* This assumes the iothread lock is taken here too. */
1205 qemu_mutex_lock_ramlist();
a3161038 1206 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1207 if (addr == block->offset) {
a3161038 1208 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1209 ram_list.mru_block = NULL;
f798b07f 1210 ram_list.version++;
cd19cfa2
HY
1211 if (block->flags & RAM_PREALLOC_MASK) {
1212 ;
1213 } else if (mem_path) {
04b16653
AW
1214#if defined (__linux__) && !defined(TARGET_S390X)
1215 if (block->fd) {
1216 munmap(block->host, block->length);
1217 close(block->fd);
1218 } else {
e7a09b92 1219 qemu_anon_ram_free(block->host, block->length);
04b16653 1220 }
fd28aa13
JK
1221#else
1222 abort();
04b16653
AW
1223#endif
1224 } else {
868bb33f 1225 if (xen_enabled()) {
e41d7c69 1226 xen_invalidate_map_cache_entry(block->host);
432d268c 1227 } else {
e7a09b92 1228 qemu_anon_ram_free(block->host, block->length);
432d268c 1229 }
04b16653 1230 }
7267c094 1231 g_free(block);
b2a8658e 1232 break;
04b16653
AW
1233 }
1234 }
b2a8658e 1235 qemu_mutex_unlock_ramlist();
04b16653 1236
e9a1ab19
FB
1237}
1238
cd19cfa2
HY
1239#ifndef _WIN32
1240void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1241{
1242 RAMBlock *block;
1243 ram_addr_t offset;
1244 int flags;
1245 void *area, *vaddr;
1246
a3161038 1247 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1248 offset = addr - block->offset;
1249 if (offset < block->length) {
1250 vaddr = block->host + offset;
1251 if (block->flags & RAM_PREALLOC_MASK) {
1252 ;
1253 } else {
1254 flags = MAP_FIXED;
1255 munmap(vaddr, length);
1256 if (mem_path) {
1257#if defined(__linux__) && !defined(TARGET_S390X)
1258 if (block->fd) {
1259#ifdef MAP_POPULATE
1260 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1261 MAP_PRIVATE;
1262#else
1263 flags |= MAP_PRIVATE;
1264#endif
1265 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1266 flags, block->fd, offset);
1267 } else {
1268 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1269 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1270 flags, -1, 0);
1271 }
fd28aa13
JK
1272#else
1273 abort();
cd19cfa2
HY
1274#endif
1275 } else {
1276#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1277 flags |= MAP_SHARED | MAP_ANONYMOUS;
1278 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1279 flags, -1, 0);
1280#else
1281 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1282 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1283 flags, -1, 0);
1284#endif
1285 }
1286 if (area != vaddr) {
f15fbc4b
AP
1287 fprintf(stderr, "Could not remap addr: "
1288 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1289 length, addr);
1290 exit(1);
1291 }
8490fc78 1292 memory_try_enable_merging(vaddr, length);
ddb97f1d 1293 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1294 }
1295 return;
1296 }
1297 }
1298}
1299#endif /* !_WIN32 */
1300
1b5ec234 1301static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
dc828ca1 1302{
94a6b54f
PB
1303 RAMBlock *block;
1304
b2a8658e 1305 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1306 block = ram_list.mru_block;
1307 if (block && addr - block->offset < block->length) {
1308 goto found;
1309 }
a3161038 1310 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1311 if (addr - block->offset < block->length) {
0d6d3c87 1312 goto found;
f471a17e 1313 }
94a6b54f 1314 }
f471a17e
AW
1315
1316 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1317 abort();
1318
0d6d3c87
PB
1319found:
1320 ram_list.mru_block = block;
1b5ec234
PB
1321 return block;
1322}
1323
1324/* Return a host pointer to ram allocated with qemu_ram_alloc.
1325 With the exception of the softmmu code in this file, this should
1326 only be used for local memory (e.g. video ram) that the device owns,
1327 and knows it isn't going to access beyond the end of the block.
1328
1329 It should not be used for general purpose DMA.
1330 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1331 */
1332void *qemu_get_ram_ptr(ram_addr_t addr)
1333{
1334 RAMBlock *block = qemu_get_ram_block(addr);
1335
0d6d3c87
PB
1336 if (xen_enabled()) {
1337 /* We need to check if the requested address is in the RAM
1338 * because we don't want to map the entire memory in QEMU.
1339 * In that case just map until the end of the page.
1340 */
1341 if (block->offset == 0) {
1342 return xen_map_cache(addr, 0, 0);
1343 } else if (block->host == NULL) {
1344 block->host =
1345 xen_map_cache(block->offset, block->length, 1);
1346 }
1347 }
1348 return block->host + (addr - block->offset);
dc828ca1
PB
1349}
1350
0d6d3c87
PB
1351/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1352 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1353 *
1354 * ??? Is this still necessary?
b2e0a138 1355 */
8b9c99d9 1356static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1357{
1358 RAMBlock *block;
1359
b2a8658e 1360 /* The list is protected by the iothread lock here. */
a3161038 1361 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1362 if (addr - block->offset < block->length) {
868bb33f 1363 if (xen_enabled()) {
432d268c
JN
1364 /* We need to check if the requested address is in the RAM
1365 * because we don't want to map the entire memory in QEMU.
712c2b41 1366 * In that case just map until the end of the page.
432d268c
JN
1367 */
1368 if (block->offset == 0) {
e41d7c69 1369 return xen_map_cache(addr, 0, 0);
432d268c 1370 } else if (block->host == NULL) {
e41d7c69
JK
1371 block->host =
1372 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1373 }
1374 }
b2e0a138
MT
1375 return block->host + (addr - block->offset);
1376 }
1377 }
1378
1379 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1380 abort();
1381
1382 return NULL;
1383}
1384
38bee5dc
SS
1385/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1386 * but takes a size argument */
8b9c99d9 1387static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1388{
8ab934f9
SS
1389 if (*size == 0) {
1390 return NULL;
1391 }
868bb33f 1392 if (xen_enabled()) {
e41d7c69 1393 return xen_map_cache(addr, *size, 1);
868bb33f 1394 } else {
38bee5dc
SS
1395 RAMBlock *block;
1396
a3161038 1397 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1398 if (addr - block->offset < block->length) {
1399 if (addr - block->offset + *size > block->length)
1400 *size = block->length - addr + block->offset;
1401 return block->host + (addr - block->offset);
1402 }
1403 }
1404
1405 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1406 abort();
38bee5dc
SS
1407 }
1408}
1409
7443b437
PB
1410/* Some of the softmmu routines need to translate from a host pointer
1411 (typically a TLB entry) back to a ram offset. */
1b5ec234 1412MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1413{
94a6b54f
PB
1414 RAMBlock *block;
1415 uint8_t *host = ptr;
1416
868bb33f 1417 if (xen_enabled()) {
e41d7c69 1418 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1419 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1420 }
1421
23887b79
PB
1422 block = ram_list.mru_block;
1423 if (block && block->host && host - block->host < block->length) {
1424 goto found;
1425 }
1426
a3161038 1427 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1428 /* This case append when the block is not mapped. */
1429 if (block->host == NULL) {
1430 continue;
1431 }
f471a17e 1432 if (host - block->host < block->length) {
23887b79 1433 goto found;
f471a17e 1434 }
94a6b54f 1435 }
432d268c 1436
1b5ec234 1437 return NULL;
23887b79
PB
1438
1439found:
1440 *ram_addr = block->offset + (host - block->host);
1b5ec234 1441 return block->mr;
e890261f 1442}
f471a17e 1443
a8170e5e 1444static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1445 uint64_t val, unsigned size)
9fa3e853 1446{
3a7d929e 1447 int dirty_flags;
f7c11b53 1448 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1449 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
0e0df1e2 1450 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1451 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1452 }
0e0df1e2
AK
1453 switch (size) {
1454 case 1:
1455 stb_p(qemu_get_ram_ptr(ram_addr), val);
1456 break;
1457 case 2:
1458 stw_p(qemu_get_ram_ptr(ram_addr), val);
1459 break;
1460 case 4:
1461 stl_p(qemu_get_ram_ptr(ram_addr), val);
1462 break;
1463 default:
1464 abort();
3a7d929e 1465 }
f23db169 1466 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1467 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1468 /* we remove the notdirty callback only if the code has been
1469 flushed */
4917cf44
AF
1470 if (dirty_flags == 0xff) {
1471 CPUArchState *env = current_cpu->env_ptr;
1472 tlb_set_dirty(env, env->mem_io_vaddr);
1473 }
9fa3e853
FB
1474}
1475
b018ddf6
PB
1476static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1477 unsigned size, bool is_write)
1478{
1479 return is_write;
1480}
1481
0e0df1e2 1482static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1483 .write = notdirty_mem_write,
b018ddf6 1484 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1485 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1486};
1487
0f459d16 1488/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1489static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1490{
4917cf44 1491 CPUArchState *env = current_cpu->env_ptr;
06d55cc1 1492 target_ulong pc, cs_base;
0f459d16 1493 target_ulong vaddr;
a1d1bb31 1494 CPUWatchpoint *wp;
06d55cc1 1495 int cpu_flags;
0f459d16 1496
06d55cc1
AL
1497 if (env->watchpoint_hit) {
1498 /* We re-entered the check after replacing the TB. Now raise
1499 * the debug interrupt so that is will trigger after the
1500 * current instruction. */
c3affe56 1501 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1502 return;
1503 }
2e70f6ef 1504 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1505 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1506 if ((vaddr == (wp->vaddr & len_mask) ||
1507 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1508 wp->flags |= BP_WATCHPOINT_HIT;
1509 if (!env->watchpoint_hit) {
1510 env->watchpoint_hit = wp;
5a316526 1511 tb_check_watchpoint(env);
6e140f28
AL
1512 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1513 env->exception_index = EXCP_DEBUG;
488d6577 1514 cpu_loop_exit(env);
6e140f28
AL
1515 } else {
1516 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1517 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1518 cpu_resume_from_signal(env, NULL);
6e140f28 1519 }
06d55cc1 1520 }
6e140f28
AL
1521 } else {
1522 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1523 }
1524 }
1525}
1526
6658ffb8
PB
1527/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1528 so these check for a hit then pass through to the normal out-of-line
1529 phys routines. */
a8170e5e 1530static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1531 unsigned size)
6658ffb8 1532{
1ec9b909
AK
1533 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1534 switch (size) {
1535 case 1: return ldub_phys(addr);
1536 case 2: return lduw_phys(addr);
1537 case 4: return ldl_phys(addr);
1538 default: abort();
1539 }
6658ffb8
PB
1540}
1541
a8170e5e 1542static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1543 uint64_t val, unsigned size)
6658ffb8 1544{
1ec9b909
AK
1545 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1546 switch (size) {
67364150
MF
1547 case 1:
1548 stb_phys(addr, val);
1549 break;
1550 case 2:
1551 stw_phys(addr, val);
1552 break;
1553 case 4:
1554 stl_phys(addr, val);
1555 break;
1ec9b909
AK
1556 default: abort();
1557 }
6658ffb8
PB
1558}
1559
1ec9b909
AK
1560static const MemoryRegionOps watch_mem_ops = {
1561 .read = watch_mem_read,
1562 .write = watch_mem_write,
1563 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1564};
6658ffb8 1565
a8170e5e 1566static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1567 unsigned len)
db7b5426 1568{
acc9d80b
JK
1569 subpage_t *subpage = opaque;
1570 uint8_t buf[4];
791af8c8 1571
db7b5426 1572#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1573 printf("%s: subpage %p len %d addr " TARGET_FMT_plx "\n", __func__,
1574 subpage, len, addr);
db7b5426 1575#endif
acc9d80b
JK
1576 address_space_read(subpage->as, addr + subpage->base, buf, len);
1577 switch (len) {
1578 case 1:
1579 return ldub_p(buf);
1580 case 2:
1581 return lduw_p(buf);
1582 case 4:
1583 return ldl_p(buf);
1584 default:
1585 abort();
1586 }
db7b5426
BS
1587}
1588
a8170e5e 1589static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1590 uint64_t value, unsigned len)
db7b5426 1591{
acc9d80b
JK
1592 subpage_t *subpage = opaque;
1593 uint8_t buf[4];
1594
db7b5426 1595#if defined(DEBUG_SUBPAGE)
70c68e44 1596 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
acc9d80b
JK
1597 " value %"PRIx64"\n",
1598 __func__, subpage, len, addr, value);
db7b5426 1599#endif
acc9d80b
JK
1600 switch (len) {
1601 case 1:
1602 stb_p(buf, value);
1603 break;
1604 case 2:
1605 stw_p(buf, value);
1606 break;
1607 case 4:
1608 stl_p(buf, value);
1609 break;
1610 default:
1611 abort();
1612 }
1613 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1614}
1615
c353e4cc
PB
1616static bool subpage_accepts(void *opaque, hwaddr addr,
1617 unsigned size, bool is_write)
1618{
acc9d80b 1619 subpage_t *subpage = opaque;
c353e4cc 1620#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1621 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx "\n",
1622 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1623#endif
1624
acc9d80b
JK
1625 return address_space_access_valid(subpage->as, addr + subpage->base,
1626 size, is_write);
c353e4cc
PB
1627}
1628
70c68e44
AK
1629static const MemoryRegionOps subpage_ops = {
1630 .read = subpage_read,
1631 .write = subpage_write,
c353e4cc 1632 .valid.accepts = subpage_accepts,
70c68e44 1633 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1634};
1635
c227f099 1636static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1637 uint16_t section)
db7b5426
BS
1638{
1639 int idx, eidx;
1640
1641 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1642 return -1;
1643 idx = SUBPAGE_IDX(start);
1644 eidx = SUBPAGE_IDX(end);
1645#if defined(DEBUG_SUBPAGE)
0bf9e31a 1646 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1647 mmio, start, end, idx, eidx, memory);
1648#endif
db7b5426 1649 for (; idx <= eidx; idx++) {
5312bd8b 1650 mmio->sub_section[idx] = section;
db7b5426
BS
1651 }
1652
1653 return 0;
1654}
1655
acc9d80b 1656static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1657{
c227f099 1658 subpage_t *mmio;
db7b5426 1659
7267c094 1660 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1661
acc9d80b 1662 mmio->as = as;
1eec614b 1663 mmio->base = base;
2c9b15ca 1664 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
70c68e44 1665 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1666 mmio->iomem.subpage = true;
db7b5426 1667#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1668 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1669 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1670#endif
b41aac4f 1671 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1672
1673 return mmio;
1674}
1675
5312bd8b
AK
1676static uint16_t dummy_section(MemoryRegion *mr)
1677{
1678 MemoryRegionSection section = {
1679 .mr = mr,
1680 .offset_within_address_space = 0,
1681 .offset_within_region = 0,
052e87b0 1682 .size = int128_2_64(),
5312bd8b
AK
1683 };
1684
1685 return phys_section_add(&section);
1686}
1687
a8170e5e 1688MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1689{
0475d94f 1690 return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1691}
1692
e9179ce1
AK
1693static void io_mem_init(void)
1694{
2c9b15ca
PB
1695 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1696 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
0e0df1e2 1697 "unassigned", UINT64_MAX);
2c9b15ca 1698 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
0e0df1e2 1699 "notdirty", UINT64_MAX);
2c9b15ca 1700 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1ec9b909 1701 "watch", UINT64_MAX);
e9179ce1
AK
1702}
1703
ac1970fb 1704static void mem_begin(MemoryListener *listener)
00752703
PB
1705{
1706 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1707 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1708
1709 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1710 d->as = as;
1711 as->next_dispatch = d;
1712}
1713
1714static void mem_commit(MemoryListener *listener)
ac1970fb 1715{
89ae337a 1716 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
1717 AddressSpaceDispatch *cur = as->dispatch;
1718 AddressSpaceDispatch *next = as->next_dispatch;
1719
1720 next->nodes = next_map.nodes;
1721 next->sections = next_map.sections;
ac1970fb 1722
0475d94f
PB
1723 as->dispatch = next;
1724 g_free(cur);
ac1970fb
AK
1725}
1726
50c1e149
AK
1727static void core_begin(MemoryListener *listener)
1728{
b41aac4f
LPF
1729 uint16_t n;
1730
6092666e
PB
1731 prev_map = g_new(PhysPageMap, 1);
1732 *prev_map = next_map;
1733
9affd6fc 1734 memset(&next_map, 0, sizeof(next_map));
b41aac4f
LPF
1735 n = dummy_section(&io_mem_unassigned);
1736 assert(n == PHYS_SECTION_UNASSIGNED);
1737 n = dummy_section(&io_mem_notdirty);
1738 assert(n == PHYS_SECTION_NOTDIRTY);
1739 n = dummy_section(&io_mem_rom);
1740 assert(n == PHYS_SECTION_ROM);
1741 n = dummy_section(&io_mem_watch);
1742 assert(n == PHYS_SECTION_WATCH);
50c1e149
AK
1743}
1744
9affd6fc
PB
1745/* This listener's commit run after the other AddressSpaceDispatch listeners'.
1746 * All AddressSpaceDispatch instances have switched to the next map.
1747 */
1748static void core_commit(MemoryListener *listener)
1749{
6092666e 1750 phys_sections_free(prev_map);
9affd6fc
PB
1751}
1752
1d71148e 1753static void tcg_commit(MemoryListener *listener)
50c1e149 1754{
182735ef 1755 CPUState *cpu;
117712c3
AK
1756
1757 /* since each CPU stores ram addresses in its TLB cache, we must
1758 reset the modified entries */
1759 /* XXX: slow ! */
182735ef
AF
1760 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
1761 CPUArchState *env = cpu->env_ptr;
1762
117712c3
AK
1763 tlb_flush(env, 1);
1764 }
50c1e149
AK
1765}
1766
93632747
AK
1767static void core_log_global_start(MemoryListener *listener)
1768{
1769 cpu_physical_memory_set_dirty_tracking(1);
1770}
1771
1772static void core_log_global_stop(MemoryListener *listener)
1773{
1774 cpu_physical_memory_set_dirty_tracking(0);
1775}
1776
93632747 1777static MemoryListener core_memory_listener = {
50c1e149 1778 .begin = core_begin,
9affd6fc 1779 .commit = core_commit,
93632747
AK
1780 .log_global_start = core_log_global_start,
1781 .log_global_stop = core_log_global_stop,
ac1970fb 1782 .priority = 1,
93632747
AK
1783};
1784
1d71148e
AK
1785static MemoryListener tcg_memory_listener = {
1786 .commit = tcg_commit,
1787};
1788
ac1970fb
AK
1789void address_space_init_dispatch(AddressSpace *as)
1790{
00752703 1791 as->dispatch = NULL;
89ae337a 1792 as->dispatch_listener = (MemoryListener) {
ac1970fb 1793 .begin = mem_begin,
00752703 1794 .commit = mem_commit,
ac1970fb
AK
1795 .region_add = mem_add,
1796 .region_nop = mem_add,
1797 .priority = 0,
1798 };
89ae337a 1799 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
1800}
1801
83f3c251
AK
1802void address_space_destroy_dispatch(AddressSpace *as)
1803{
1804 AddressSpaceDispatch *d = as->dispatch;
1805
89ae337a 1806 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
1807 g_free(d);
1808 as->dispatch = NULL;
1809}
1810
62152b8a
AK
1811static void memory_map_init(void)
1812{
7267c094 1813 system_memory = g_malloc(sizeof(*system_memory));
2c9b15ca 1814 memory_region_init(system_memory, NULL, "system", INT64_MAX);
7dca8043 1815 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1816
7267c094 1817 system_io = g_malloc(sizeof(*system_io));
2c9b15ca 1818 memory_region_init(system_io, NULL, "io", 65536);
7dca8043 1819 address_space_init(&address_space_io, system_io, "I/O");
93632747 1820
f6790af6 1821 memory_listener_register(&core_memory_listener, &address_space_memory);
f6790af6 1822 memory_listener_register(&tcg_memory_listener, &address_space_memory);
62152b8a
AK
1823}
1824
1825MemoryRegion *get_system_memory(void)
1826{
1827 return system_memory;
1828}
1829
309cb471
AK
1830MemoryRegion *get_system_io(void)
1831{
1832 return system_io;
1833}
1834
e2eef170
PB
1835#endif /* !defined(CONFIG_USER_ONLY) */
1836
13eb76e0
FB
1837/* physical memory access (slow version, mainly for debug) */
1838#if defined(CONFIG_USER_ONLY)
9349b4f9 1839int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1840 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1841{
1842 int l, flags;
1843 target_ulong page;
53a5960a 1844 void * p;
13eb76e0
FB
1845
1846 while (len > 0) {
1847 page = addr & TARGET_PAGE_MASK;
1848 l = (page + TARGET_PAGE_SIZE) - addr;
1849 if (l > len)
1850 l = len;
1851 flags = page_get_flags(page);
1852 if (!(flags & PAGE_VALID))
a68fe89c 1853 return -1;
13eb76e0
FB
1854 if (is_write) {
1855 if (!(flags & PAGE_WRITE))
a68fe89c 1856 return -1;
579a97f7 1857 /* XXX: this code should not depend on lock_user */
72fb7daa 1858 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1859 return -1;
72fb7daa
AJ
1860 memcpy(p, buf, l);
1861 unlock_user(p, addr, l);
13eb76e0
FB
1862 } else {
1863 if (!(flags & PAGE_READ))
a68fe89c 1864 return -1;
579a97f7 1865 /* XXX: this code should not depend on lock_user */
72fb7daa 1866 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1867 return -1;
72fb7daa 1868 memcpy(buf, p, l);
5b257578 1869 unlock_user(p, addr, 0);
13eb76e0
FB
1870 }
1871 len -= l;
1872 buf += l;
1873 addr += l;
1874 }
a68fe89c 1875 return 0;
13eb76e0 1876}
8df1cd07 1877
13eb76e0 1878#else
51d7a9eb 1879
a8170e5e
AK
1880static void invalidate_and_set_dirty(hwaddr addr,
1881 hwaddr length)
51d7a9eb
AP
1882{
1883 if (!cpu_physical_memory_is_dirty(addr)) {
1884 /* invalidate code */
1885 tb_invalidate_phys_page_range(addr, addr + length, 0);
1886 /* set dirty bit */
1887 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1888 }
e226939d 1889 xen_modified_memory(addr, length);
51d7a9eb
AP
1890}
1891
2bbfa05d
PB
1892static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1893{
1894 if (memory_region_is_ram(mr)) {
1895 return !(is_write && mr->readonly);
1896 }
1897 if (memory_region_is_romd(mr)) {
1898 return !is_write;
1899 }
1900
1901 return false;
1902}
1903
f52cc467 1904static inline int memory_access_size(MemoryRegion *mr, int l, hwaddr addr)
82f2563f 1905{
f52cc467 1906 if (l >= 4 && (((addr & 3) == 0 || mr->ops->impl.unaligned))) {
82f2563f
PB
1907 return 4;
1908 }
f52cc467 1909 if (l >= 2 && (((addr & 1) == 0) || mr->ops->impl.unaligned)) {
82f2563f
PB
1910 return 2;
1911 }
1912 return 1;
1913}
1914
fd8aaa76 1915bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1916 int len, bool is_write)
13eb76e0 1917{
149f54b5 1918 hwaddr l;
13eb76e0 1919 uint8_t *ptr;
791af8c8 1920 uint64_t val;
149f54b5 1921 hwaddr addr1;
5c8a00ce 1922 MemoryRegion *mr;
fd8aaa76 1923 bool error = false;
3b46e624 1924
13eb76e0 1925 while (len > 0) {
149f54b5 1926 l = len;
5c8a00ce 1927 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 1928
13eb76e0 1929 if (is_write) {
5c8a00ce
PB
1930 if (!memory_access_is_direct(mr, is_write)) {
1931 l = memory_access_size(mr, l, addr1);
4917cf44 1932 /* XXX: could force current_cpu to NULL to avoid
6a00d601 1933 potential bugs */
82f2563f 1934 if (l == 4) {
1c213d19 1935 /* 32 bit write access */
c27004ec 1936 val = ldl_p(buf);
5c8a00ce 1937 error |= io_mem_write(mr, addr1, val, 4);
82f2563f 1938 } else if (l == 2) {
1c213d19 1939 /* 16 bit write access */
c27004ec 1940 val = lduw_p(buf);
5c8a00ce 1941 error |= io_mem_write(mr, addr1, val, 2);
13eb76e0 1942 } else {
1c213d19 1943 /* 8 bit write access */
c27004ec 1944 val = ldub_p(buf);
5c8a00ce 1945 error |= io_mem_write(mr, addr1, val, 1);
13eb76e0 1946 }
2bbfa05d 1947 } else {
5c8a00ce 1948 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 1949 /* RAM case */
5579c7f3 1950 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1951 memcpy(ptr, buf, l);
51d7a9eb 1952 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
1953 }
1954 } else {
5c8a00ce 1955 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 1956 /* I/O case */
5c8a00ce 1957 l = memory_access_size(mr, l, addr1);
82f2563f 1958 if (l == 4) {
13eb76e0 1959 /* 32 bit read access */
5c8a00ce 1960 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 1961 stl_p(buf, val);
82f2563f 1962 } else if (l == 2) {
13eb76e0 1963 /* 16 bit read access */
5c8a00ce 1964 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 1965 stw_p(buf, val);
13eb76e0 1966 } else {
1c213d19 1967 /* 8 bit read access */
5c8a00ce 1968 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 1969 stb_p(buf, val);
13eb76e0
FB
1970 }
1971 } else {
1972 /* RAM case */
5c8a00ce 1973 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 1974 memcpy(buf, ptr, l);
13eb76e0
FB
1975 }
1976 }
1977 len -= l;
1978 buf += l;
1979 addr += l;
1980 }
fd8aaa76
PB
1981
1982 return error;
13eb76e0 1983}
8df1cd07 1984
fd8aaa76 1985bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1986 const uint8_t *buf, int len)
1987{
fd8aaa76 1988 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
1989}
1990
fd8aaa76 1991bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 1992{
fd8aaa76 1993 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
1994}
1995
1996
a8170e5e 1997void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1998 int len, int is_write)
1999{
fd8aaa76 2000 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2001}
2002
d0ecd2aa 2003/* used for ROM loading : can write in RAM and ROM */
a8170e5e 2004void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
2005 const uint8_t *buf, int len)
2006{
149f54b5 2007 hwaddr l;
d0ecd2aa 2008 uint8_t *ptr;
149f54b5 2009 hwaddr addr1;
5c8a00ce 2010 MemoryRegion *mr;
3b46e624 2011
d0ecd2aa 2012 while (len > 0) {
149f54b5 2013 l = len;
5c8a00ce
PB
2014 mr = address_space_translate(&address_space_memory,
2015 addr, &addr1, &l, true);
3b46e624 2016
5c8a00ce
PB
2017 if (!(memory_region_is_ram(mr) ||
2018 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2019 /* do nothing */
2020 } else {
5c8a00ce 2021 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2022 /* ROM/RAM case */
5579c7f3 2023 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2024 memcpy(ptr, buf, l);
51d7a9eb 2025 invalidate_and_set_dirty(addr1, l);
d0ecd2aa
FB
2026 }
2027 len -= l;
2028 buf += l;
2029 addr += l;
2030 }
2031}
2032
6d16c2f8 2033typedef struct {
d3e71559 2034 MemoryRegion *mr;
6d16c2f8 2035 void *buffer;
a8170e5e
AK
2036 hwaddr addr;
2037 hwaddr len;
6d16c2f8
AL
2038} BounceBuffer;
2039
2040static BounceBuffer bounce;
2041
ba223c29
AL
2042typedef struct MapClient {
2043 void *opaque;
2044 void (*callback)(void *opaque);
72cf2d4f 2045 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2046} MapClient;
2047
72cf2d4f
BS
2048static QLIST_HEAD(map_client_list, MapClient) map_client_list
2049 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2050
2051void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2052{
7267c094 2053 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2054
2055 client->opaque = opaque;
2056 client->callback = callback;
72cf2d4f 2057 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2058 return client;
2059}
2060
8b9c99d9 2061static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2062{
2063 MapClient *client = (MapClient *)_client;
2064
72cf2d4f 2065 QLIST_REMOVE(client, link);
7267c094 2066 g_free(client);
ba223c29
AL
2067}
2068
2069static void cpu_notify_map_clients(void)
2070{
2071 MapClient *client;
2072
72cf2d4f
BS
2073 while (!QLIST_EMPTY(&map_client_list)) {
2074 client = QLIST_FIRST(&map_client_list);
ba223c29 2075 client->callback(client->opaque);
34d5e948 2076 cpu_unregister_map_client(client);
ba223c29
AL
2077 }
2078}
2079
51644ab7
PB
2080bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2081{
5c8a00ce 2082 MemoryRegion *mr;
51644ab7
PB
2083 hwaddr l, xlat;
2084
2085 while (len > 0) {
2086 l = len;
5c8a00ce
PB
2087 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2088 if (!memory_access_is_direct(mr, is_write)) {
2089 l = memory_access_size(mr, l, addr);
2090 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2091 return false;
2092 }
2093 }
2094
2095 len -= l;
2096 addr += l;
2097 }
2098 return true;
2099}
2100
6d16c2f8
AL
2101/* Map a physical memory region into a host virtual address.
2102 * May map a subset of the requested range, given by and returned in *plen.
2103 * May return NULL if resources needed to perform the mapping are exhausted.
2104 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2105 * Use cpu_register_map_client() to know when retrying the map operation is
2106 * likely to succeed.
6d16c2f8 2107 */
ac1970fb 2108void *address_space_map(AddressSpace *as,
a8170e5e
AK
2109 hwaddr addr,
2110 hwaddr *plen,
ac1970fb 2111 bool is_write)
6d16c2f8 2112{
a8170e5e 2113 hwaddr len = *plen;
e3127ae0
PB
2114 hwaddr done = 0;
2115 hwaddr l, xlat, base;
2116 MemoryRegion *mr, *this_mr;
2117 ram_addr_t raddr;
6d16c2f8 2118
e3127ae0
PB
2119 if (len == 0) {
2120 return NULL;
2121 }
38bee5dc 2122
e3127ae0
PB
2123 l = len;
2124 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2125 if (!memory_access_is_direct(mr, is_write)) {
2126 if (bounce.buffer) {
2127 return NULL;
6d16c2f8 2128 }
e3127ae0
PB
2129 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2130 bounce.addr = addr;
2131 bounce.len = l;
d3e71559
PB
2132
2133 memory_region_ref(mr);
2134 bounce.mr = mr;
e3127ae0
PB
2135 if (!is_write) {
2136 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2137 }
6d16c2f8 2138
e3127ae0
PB
2139 *plen = l;
2140 return bounce.buffer;
2141 }
2142
2143 base = xlat;
2144 raddr = memory_region_get_ram_addr(mr);
2145
2146 for (;;) {
6d16c2f8
AL
2147 len -= l;
2148 addr += l;
e3127ae0
PB
2149 done += l;
2150 if (len == 0) {
2151 break;
2152 }
2153
2154 l = len;
2155 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2156 if (this_mr != mr || xlat != base + done) {
2157 break;
2158 }
6d16c2f8 2159 }
e3127ae0 2160
d3e71559 2161 memory_region_ref(mr);
e3127ae0
PB
2162 *plen = done;
2163 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2164}
2165
ac1970fb 2166/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2167 * Will also mark the memory as dirty if is_write == 1. access_len gives
2168 * the amount of memory that was actually read or written by the caller.
2169 */
a8170e5e
AK
2170void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2171 int is_write, hwaddr access_len)
6d16c2f8
AL
2172{
2173 if (buffer != bounce.buffer) {
d3e71559
PB
2174 MemoryRegion *mr;
2175 ram_addr_t addr1;
2176
2177 mr = qemu_ram_addr_from_host(buffer, &addr1);
2178 assert(mr != NULL);
6d16c2f8 2179 if (is_write) {
6d16c2f8
AL
2180 while (access_len) {
2181 unsigned l;
2182 l = TARGET_PAGE_SIZE;
2183 if (l > access_len)
2184 l = access_len;
51d7a9eb 2185 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2186 addr1 += l;
2187 access_len -= l;
2188 }
2189 }
868bb33f 2190 if (xen_enabled()) {
e41d7c69 2191 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2192 }
d3e71559 2193 memory_region_unref(mr);
6d16c2f8
AL
2194 return;
2195 }
2196 if (is_write) {
ac1970fb 2197 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2198 }
f8a83245 2199 qemu_vfree(bounce.buffer);
6d16c2f8 2200 bounce.buffer = NULL;
d3e71559 2201 memory_region_unref(bounce.mr);
ba223c29 2202 cpu_notify_map_clients();
6d16c2f8 2203}
d0ecd2aa 2204
a8170e5e
AK
2205void *cpu_physical_memory_map(hwaddr addr,
2206 hwaddr *plen,
ac1970fb
AK
2207 int is_write)
2208{
2209 return address_space_map(&address_space_memory, addr, plen, is_write);
2210}
2211
a8170e5e
AK
2212void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2213 int is_write, hwaddr access_len)
ac1970fb
AK
2214{
2215 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2216}
2217
8df1cd07 2218/* warning: addr must be aligned */
a8170e5e 2219static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2220 enum device_endian endian)
8df1cd07 2221{
8df1cd07 2222 uint8_t *ptr;
791af8c8 2223 uint64_t val;
5c8a00ce 2224 MemoryRegion *mr;
149f54b5
PB
2225 hwaddr l = 4;
2226 hwaddr addr1;
8df1cd07 2227
5c8a00ce
PB
2228 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2229 false);
2230 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2231 /* I/O case */
5c8a00ce 2232 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2233#if defined(TARGET_WORDS_BIGENDIAN)
2234 if (endian == DEVICE_LITTLE_ENDIAN) {
2235 val = bswap32(val);
2236 }
2237#else
2238 if (endian == DEVICE_BIG_ENDIAN) {
2239 val = bswap32(val);
2240 }
2241#endif
8df1cd07
FB
2242 } else {
2243 /* RAM case */
5c8a00ce 2244 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2245 & TARGET_PAGE_MASK)
149f54b5 2246 + addr1);
1e78bcc1
AG
2247 switch (endian) {
2248 case DEVICE_LITTLE_ENDIAN:
2249 val = ldl_le_p(ptr);
2250 break;
2251 case DEVICE_BIG_ENDIAN:
2252 val = ldl_be_p(ptr);
2253 break;
2254 default:
2255 val = ldl_p(ptr);
2256 break;
2257 }
8df1cd07
FB
2258 }
2259 return val;
2260}
2261
a8170e5e 2262uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2263{
2264 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2265}
2266
a8170e5e 2267uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2268{
2269 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2270}
2271
a8170e5e 2272uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2273{
2274 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2275}
2276
84b7b8e7 2277/* warning: addr must be aligned */
a8170e5e 2278static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2279 enum device_endian endian)
84b7b8e7 2280{
84b7b8e7
FB
2281 uint8_t *ptr;
2282 uint64_t val;
5c8a00ce 2283 MemoryRegion *mr;
149f54b5
PB
2284 hwaddr l = 8;
2285 hwaddr addr1;
84b7b8e7 2286
5c8a00ce
PB
2287 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2288 false);
2289 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2290 /* I/O case */
5c8a00ce 2291 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2292#if defined(TARGET_WORDS_BIGENDIAN)
2293 if (endian == DEVICE_LITTLE_ENDIAN) {
2294 val = bswap64(val);
2295 }
2296#else
2297 if (endian == DEVICE_BIG_ENDIAN) {
2298 val = bswap64(val);
2299 }
84b7b8e7
FB
2300#endif
2301 } else {
2302 /* RAM case */
5c8a00ce 2303 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2304 & TARGET_PAGE_MASK)
149f54b5 2305 + addr1);
1e78bcc1
AG
2306 switch (endian) {
2307 case DEVICE_LITTLE_ENDIAN:
2308 val = ldq_le_p(ptr);
2309 break;
2310 case DEVICE_BIG_ENDIAN:
2311 val = ldq_be_p(ptr);
2312 break;
2313 default:
2314 val = ldq_p(ptr);
2315 break;
2316 }
84b7b8e7
FB
2317 }
2318 return val;
2319}
2320
a8170e5e 2321uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2322{
2323 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2324}
2325
a8170e5e 2326uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2327{
2328 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2329}
2330
a8170e5e 2331uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2332{
2333 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2334}
2335
aab33094 2336/* XXX: optimize */
a8170e5e 2337uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2338{
2339 uint8_t val;
2340 cpu_physical_memory_read(addr, &val, 1);
2341 return val;
2342}
2343
733f0b02 2344/* warning: addr must be aligned */
a8170e5e 2345static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2346 enum device_endian endian)
aab33094 2347{
733f0b02
MT
2348 uint8_t *ptr;
2349 uint64_t val;
5c8a00ce 2350 MemoryRegion *mr;
149f54b5
PB
2351 hwaddr l = 2;
2352 hwaddr addr1;
733f0b02 2353
5c8a00ce
PB
2354 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2355 false);
2356 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2357 /* I/O case */
5c8a00ce 2358 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2359#if defined(TARGET_WORDS_BIGENDIAN)
2360 if (endian == DEVICE_LITTLE_ENDIAN) {
2361 val = bswap16(val);
2362 }
2363#else
2364 if (endian == DEVICE_BIG_ENDIAN) {
2365 val = bswap16(val);
2366 }
2367#endif
733f0b02
MT
2368 } else {
2369 /* RAM case */
5c8a00ce 2370 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2371 & TARGET_PAGE_MASK)
149f54b5 2372 + addr1);
1e78bcc1
AG
2373 switch (endian) {
2374 case DEVICE_LITTLE_ENDIAN:
2375 val = lduw_le_p(ptr);
2376 break;
2377 case DEVICE_BIG_ENDIAN:
2378 val = lduw_be_p(ptr);
2379 break;
2380 default:
2381 val = lduw_p(ptr);
2382 break;
2383 }
733f0b02
MT
2384 }
2385 return val;
aab33094
FB
2386}
2387
a8170e5e 2388uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2389{
2390 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2391}
2392
a8170e5e 2393uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2394{
2395 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2396}
2397
a8170e5e 2398uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2399{
2400 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2401}
2402
8df1cd07
FB
2403/* warning: addr must be aligned. The ram page is not masked as dirty
2404 and the code inside is not invalidated. It is useful if the dirty
2405 bits are used to track modified PTEs */
a8170e5e 2406void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2407{
8df1cd07 2408 uint8_t *ptr;
5c8a00ce 2409 MemoryRegion *mr;
149f54b5
PB
2410 hwaddr l = 4;
2411 hwaddr addr1;
8df1cd07 2412
5c8a00ce
PB
2413 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2414 true);
2415 if (l < 4 || !memory_access_is_direct(mr, true)) {
2416 io_mem_write(mr, addr1, val, 4);
8df1cd07 2417 } else {
5c8a00ce 2418 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2419 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2420 stl_p(ptr, val);
74576198
AL
2421
2422 if (unlikely(in_migration)) {
2423 if (!cpu_physical_memory_is_dirty(addr1)) {
2424 /* invalidate code */
2425 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2426 /* set dirty bit */
f7c11b53
YT
2427 cpu_physical_memory_set_dirty_flags(
2428 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2429 }
2430 }
8df1cd07
FB
2431 }
2432}
2433
2434/* warning: addr must be aligned */
a8170e5e 2435static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2436 enum device_endian endian)
8df1cd07 2437{
8df1cd07 2438 uint8_t *ptr;
5c8a00ce 2439 MemoryRegion *mr;
149f54b5
PB
2440 hwaddr l = 4;
2441 hwaddr addr1;
8df1cd07 2442
5c8a00ce
PB
2443 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2444 true);
2445 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2446#if defined(TARGET_WORDS_BIGENDIAN)
2447 if (endian == DEVICE_LITTLE_ENDIAN) {
2448 val = bswap32(val);
2449 }
2450#else
2451 if (endian == DEVICE_BIG_ENDIAN) {
2452 val = bswap32(val);
2453 }
2454#endif
5c8a00ce 2455 io_mem_write(mr, addr1, val, 4);
8df1cd07 2456 } else {
8df1cd07 2457 /* RAM case */
5c8a00ce 2458 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2459 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2460 switch (endian) {
2461 case DEVICE_LITTLE_ENDIAN:
2462 stl_le_p(ptr, val);
2463 break;
2464 case DEVICE_BIG_ENDIAN:
2465 stl_be_p(ptr, val);
2466 break;
2467 default:
2468 stl_p(ptr, val);
2469 break;
2470 }
51d7a9eb 2471 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2472 }
2473}
2474
a8170e5e 2475void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2476{
2477 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2478}
2479
a8170e5e 2480void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2481{
2482 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2483}
2484
a8170e5e 2485void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2486{
2487 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2488}
2489
aab33094 2490/* XXX: optimize */
a8170e5e 2491void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2492{
2493 uint8_t v = val;
2494 cpu_physical_memory_write(addr, &v, 1);
2495}
2496
733f0b02 2497/* warning: addr must be aligned */
a8170e5e 2498static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2499 enum device_endian endian)
aab33094 2500{
733f0b02 2501 uint8_t *ptr;
5c8a00ce 2502 MemoryRegion *mr;
149f54b5
PB
2503 hwaddr l = 2;
2504 hwaddr addr1;
733f0b02 2505
5c8a00ce
PB
2506 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2507 true);
2508 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2509#if defined(TARGET_WORDS_BIGENDIAN)
2510 if (endian == DEVICE_LITTLE_ENDIAN) {
2511 val = bswap16(val);
2512 }
2513#else
2514 if (endian == DEVICE_BIG_ENDIAN) {
2515 val = bswap16(val);
2516 }
2517#endif
5c8a00ce 2518 io_mem_write(mr, addr1, val, 2);
733f0b02 2519 } else {
733f0b02 2520 /* RAM case */
5c8a00ce 2521 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2522 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2523 switch (endian) {
2524 case DEVICE_LITTLE_ENDIAN:
2525 stw_le_p(ptr, val);
2526 break;
2527 case DEVICE_BIG_ENDIAN:
2528 stw_be_p(ptr, val);
2529 break;
2530 default:
2531 stw_p(ptr, val);
2532 break;
2533 }
51d7a9eb 2534 invalidate_and_set_dirty(addr1, 2);
733f0b02 2535 }
aab33094
FB
2536}
2537
a8170e5e 2538void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2539{
2540 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2541}
2542
a8170e5e 2543void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2544{
2545 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2546}
2547
a8170e5e 2548void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2549{
2550 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2551}
2552
aab33094 2553/* XXX: optimize */
a8170e5e 2554void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2555{
2556 val = tswap64(val);
71d2b725 2557 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2558}
2559
a8170e5e 2560void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2561{
2562 val = cpu_to_le64(val);
2563 cpu_physical_memory_write(addr, &val, 8);
2564}
2565
a8170e5e 2566void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2567{
2568 val = cpu_to_be64(val);
2569 cpu_physical_memory_write(addr, &val, 8);
2570}
2571
5e2972fd 2572/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2573int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2574 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2575{
2576 int l;
a8170e5e 2577 hwaddr phys_addr;
9b3c35e0 2578 target_ulong page;
13eb76e0
FB
2579
2580 while (len > 0) {
2581 page = addr & TARGET_PAGE_MASK;
2582 phys_addr = cpu_get_phys_page_debug(env, page);
2583 /* if no physical page mapped, return an error */
2584 if (phys_addr == -1)
2585 return -1;
2586 l = (page + TARGET_PAGE_SIZE) - addr;
2587 if (l > len)
2588 l = len;
5e2972fd 2589 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2590 if (is_write)
2591 cpu_physical_memory_write_rom(phys_addr, buf, l);
2592 else
5e2972fd 2593 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2594 len -= l;
2595 buf += l;
2596 addr += l;
2597 }
2598 return 0;
2599}
a68fe89c 2600#endif
13eb76e0 2601
8e4a424b
BS
2602#if !defined(CONFIG_USER_ONLY)
2603
2604/*
2605 * A helper function for the _utterly broken_ virtio device model to find out if
2606 * it's running on a big endian machine. Don't do this at home kids!
2607 */
2608bool virtio_is_big_endian(void);
2609bool virtio_is_big_endian(void)
2610{
2611#if defined(TARGET_WORDS_BIGENDIAN)
2612 return true;
2613#else
2614 return false;
2615#endif
2616}
2617
2618#endif
2619
76f35538 2620#ifndef CONFIG_USER_ONLY
a8170e5e 2621bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2622{
5c8a00ce 2623 MemoryRegion*mr;
149f54b5 2624 hwaddr l = 1;
76f35538 2625
5c8a00ce
PB
2626 mr = address_space_translate(&address_space_memory,
2627 phys_addr, &phys_addr, &l, false);
76f35538 2628
5c8a00ce
PB
2629 return !(memory_region_is_ram(mr) ||
2630 memory_region_is_romd(mr));
76f35538 2631}
bd2fa51f
MH
2632
2633void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2634{
2635 RAMBlock *block;
2636
2637 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2638 func(block->host, block->offset, block->length, opaque);
2639 }
2640}
ec3f8c99 2641#endif