]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
hw/alpha: Don't machine check on missing pci i/o
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
2ff3de68 34#include "sysemu/sysemu.h"
0d09e41a 35#include "hw/xen/xen.h"
1de7afc9
PB
36#include "qemu/timer.h"
37#include "qemu/config-file.h"
022c62cb 38#include "exec/memory.h"
9c17d615 39#include "sysemu/dma.h"
022c62cb 40#include "exec/address-spaces.h"
53a5960a
PB
41#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
432d268c 43#else /* !CONFIG_USER_ONLY */
9c17d615 44#include "sysemu/xen-mapcache.h"
6506e4f9 45#include "trace.h"
53a5960a 46#endif
0d6d3c87 47#include "exec/cpu-all.h"
54936004 48
022c62cb 49#include "exec/cputlb.h"
5b6dd868 50#include "translate-all.h"
0cac1b66 51
022c62cb 52#include "exec/memory-internal.h"
67d95c15 53
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
e2eef170 56#if !defined(CONFIG_USER_ONLY)
74576198 57static int in_migration;
94a6b54f 58
a3161038 59RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
60
61static MemoryRegion *system_memory;
309cb471 62static MemoryRegion *system_io;
62152b8a 63
f6790af6
AK
64AddressSpace address_space_io;
65AddressSpace address_space_memory;
2673a5da 66
0844e007 67MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 68static MemoryRegion io_mem_unassigned;
0e0df1e2 69
e2eef170 70#endif
9fa3e853 71
182735ef 72CPUState *first_cpu;
6a00d601
FB
73/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
4917cf44 75DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 76/* 0 = Do not count executed instructions.
bf20dc07 77 1 = Precise instruction counting.
2e70f6ef 78 2 = Adaptive rate instruction counting. */
5708fc66 79int use_icount;
6a00d601 80
e2eef170 81#if !defined(CONFIG_USER_ONLY)
4346ae3e 82
1db8abb1
PB
83typedef struct PhysPageEntry PhysPageEntry;
84
85struct PhysPageEntry {
86 uint16_t is_leaf : 1;
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
88 uint16_t ptr : 15;
89};
90
0475d94f
PB
91typedef PhysPageEntry Node[L2_SIZE];
92
1db8abb1
PB
93struct AddressSpaceDispatch {
94 /* This is a multi-level map on the physical address space.
95 * The bottom level has pointers to MemoryRegionSections.
96 */
97 PhysPageEntry phys_map;
0475d94f
PB
98 Node *nodes;
99 MemoryRegionSection *sections;
acc9d80b 100 AddressSpace *as;
1db8abb1
PB
101};
102
90260c6c
JK
103#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
104typedef struct subpage_t {
105 MemoryRegion iomem;
acc9d80b 106 AddressSpace *as;
90260c6c
JK
107 hwaddr base;
108 uint16_t sub_section[TARGET_PAGE_SIZE];
109} subpage_t;
110
b41aac4f
LPF
111#define PHYS_SECTION_UNASSIGNED 0
112#define PHYS_SECTION_NOTDIRTY 1
113#define PHYS_SECTION_ROM 2
114#define PHYS_SECTION_WATCH 3
5312bd8b 115
9affd6fc
PB
116typedef struct PhysPageMap {
117 unsigned sections_nb;
118 unsigned sections_nb_alloc;
119 unsigned nodes_nb;
120 unsigned nodes_nb_alloc;
121 Node *nodes;
122 MemoryRegionSection *sections;
123} PhysPageMap;
124
6092666e 125static PhysPageMap *prev_map;
9affd6fc 126static PhysPageMap next_map;
d6f2ea22 127
07f07b31 128#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 129
e2eef170 130static void io_mem_init(void);
62152b8a 131static void memory_map_init(void);
8b9c99d9 132static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 133
1ec9b909 134static MemoryRegion io_mem_watch;
6658ffb8 135#endif
fd6ce8f6 136
6d9a1304 137#if !defined(CONFIG_USER_ONLY)
d6f2ea22 138
f7bf5461 139static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 140{
9affd6fc
PB
141 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
142 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
143 16);
144 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
145 next_map.nodes_nb + nodes);
146 next_map.nodes = g_renew(Node, next_map.nodes,
147 next_map.nodes_nb_alloc);
d6f2ea22 148 }
f7bf5461
AK
149}
150
151static uint16_t phys_map_node_alloc(void)
152{
153 unsigned i;
154 uint16_t ret;
155
9affd6fc 156 ret = next_map.nodes_nb++;
f7bf5461 157 assert(ret != PHYS_MAP_NODE_NIL);
9affd6fc 158 assert(ret != next_map.nodes_nb_alloc);
d6f2ea22 159 for (i = 0; i < L2_SIZE; ++i) {
9affd6fc
PB
160 next_map.nodes[ret][i].is_leaf = 0;
161 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 162 }
f7bf5461 163 return ret;
d6f2ea22
AK
164}
165
a8170e5e
AK
166static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
167 hwaddr *nb, uint16_t leaf,
2999097b 168 int level)
f7bf5461
AK
169{
170 PhysPageEntry *p;
171 int i;
a8170e5e 172 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 173
07f07b31 174 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800 175 lp->ptr = phys_map_node_alloc();
9affd6fc 176 p = next_map.nodes[lp->ptr];
f7bf5461
AK
177 if (level == 0) {
178 for (i = 0; i < L2_SIZE; i++) {
07f07b31 179 p[i].is_leaf = 1;
b41aac4f 180 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 181 }
67c4d23c 182 }
f7bf5461 183 } else {
9affd6fc 184 p = next_map.nodes[lp->ptr];
92e873b9 185 }
2999097b 186 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 187
2999097b 188 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
189 if ((*index & (step - 1)) == 0 && *nb >= step) {
190 lp->is_leaf = true;
c19e8800 191 lp->ptr = leaf;
07f07b31
AK
192 *index += step;
193 *nb -= step;
2999097b
AK
194 } else {
195 phys_page_set_level(lp, index, nb, leaf, level - 1);
196 }
197 ++lp;
f7bf5461
AK
198 }
199}
200
ac1970fb 201static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 202 hwaddr index, hwaddr nb,
2999097b 203 uint16_t leaf)
f7bf5461 204{
2999097b 205 /* Wildly overreserve - it doesn't matter much. */
07f07b31 206 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 207
ac1970fb 208 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
209}
210
9affd6fc
PB
211static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
212 Node *nodes, MemoryRegionSection *sections)
92e873b9 213{
31ab2b4a
AK
214 PhysPageEntry *p;
215 int i;
f1f6e3b8 216
07f07b31 217 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 218 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 219 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 220 }
9affd6fc 221 p = nodes[lp.ptr];
31ab2b4a 222 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 223 }
9affd6fc 224 return &sections[lp.ptr];
f3705d53
AK
225}
226
e5548617
BS
227bool memory_region_is_unassigned(MemoryRegion *mr)
228{
2a8e7499 229 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 230 && mr != &io_mem_watch;
fd6ce8f6 231}
149f54b5 232
c7086b4a 233static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
234 hwaddr addr,
235 bool resolve_subpage)
9f029603 236{
90260c6c
JK
237 MemoryRegionSection *section;
238 subpage_t *subpage;
239
0475d94f
PB
240 section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
241 d->nodes, d->sections);
90260c6c
JK
242 if (resolve_subpage && section->mr->subpage) {
243 subpage = container_of(section->mr, subpage_t, iomem);
0475d94f 244 section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
245 }
246 return section;
9f029603
JK
247}
248
90260c6c 249static MemoryRegionSection *
c7086b4a 250address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 251 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
252{
253 MemoryRegionSection *section;
254 Int128 diff;
255
c7086b4a 256 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
257 /* Compute offset within MemoryRegionSection */
258 addr -= section->offset_within_address_space;
259
260 /* Compute offset within MemoryRegion */
261 *xlat = addr + section->offset_within_region;
262
263 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 264 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
265 return section;
266}
90260c6c 267
5c8a00ce
PB
268MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
269 hwaddr *xlat, hwaddr *plen,
270 bool is_write)
90260c6c 271{
30951157
AK
272 IOMMUTLBEntry iotlb;
273 MemoryRegionSection *section;
274 MemoryRegion *mr;
275 hwaddr len = *plen;
276
277 for (;;) {
c7086b4a 278 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
279 mr = section->mr;
280
281 if (!mr->iommu_ops) {
282 break;
283 }
284
285 iotlb = mr->iommu_ops->translate(mr, addr);
286 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
287 | (addr & iotlb.addr_mask));
288 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
289 if (!(iotlb.perm & (1 << is_write))) {
290 mr = &io_mem_unassigned;
291 break;
292 }
293
294 as = iotlb.target_as;
295 }
296
297 *plen = len;
298 *xlat = addr;
299 return mr;
90260c6c
JK
300}
301
302MemoryRegionSection *
303address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
304 hwaddr *plen)
305{
30951157 306 MemoryRegionSection *section;
c7086b4a 307 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
308
309 assert(!section->mr->iommu_ops);
310 return section;
90260c6c 311}
5b6dd868 312#endif
fd6ce8f6 313
5b6dd868 314void cpu_exec_init_all(void)
fdbb84d1 315{
5b6dd868 316#if !defined(CONFIG_USER_ONLY)
b2a8658e 317 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
318 memory_map_init();
319 io_mem_init();
fdbb84d1 320#endif
5b6dd868 321}
fdbb84d1 322
b170fce3 323#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
324
325static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 326{
259186a7 327 CPUState *cpu = opaque;
a513fe19 328
5b6dd868
BS
329 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
330 version_id is increased. */
259186a7
AF
331 cpu->interrupt_request &= ~0x01;
332 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
333
334 return 0;
a513fe19 335}
7501267e 336
1a1562f5 337const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
338 .name = "cpu_common",
339 .version_id = 1,
340 .minimum_version_id = 1,
341 .minimum_version_id_old = 1,
342 .post_load = cpu_common_post_load,
343 .fields = (VMStateField []) {
259186a7
AF
344 VMSTATE_UINT32(halted, CPUState),
345 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
346 VMSTATE_END_OF_LIST()
347 }
348};
1a1562f5 349
5b6dd868 350#endif
ea041c0e 351
38d8f5c8 352CPUState *qemu_get_cpu(int index)
ea041c0e 353{
182735ef 354 CPUState *cpu = first_cpu;
ea041c0e 355
182735ef 356 while (cpu) {
55e5c285 357 if (cpu->cpu_index == index) {
5b6dd868 358 break;
55e5c285 359 }
182735ef 360 cpu = cpu->next_cpu;
ea041c0e 361 }
5b6dd868 362
182735ef 363 return cpu;
ea041c0e
FB
364}
365
d6b9e0d6
MT
366void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
367{
182735ef 368 CPUState *cpu;
d6b9e0d6 369
182735ef
AF
370 cpu = first_cpu;
371 while (cpu) {
372 func(cpu, data);
373 cpu = cpu->next_cpu;
d6b9e0d6
MT
374 }
375}
376
5b6dd868 377void cpu_exec_init(CPUArchState *env)
ea041c0e 378{
5b6dd868 379 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 380 CPUClass *cc = CPU_GET_CLASS(cpu);
182735ef 381 CPUState **pcpu;
5b6dd868
BS
382 int cpu_index;
383
384#if defined(CONFIG_USER_ONLY)
385 cpu_list_lock();
386#endif
182735ef
AF
387 cpu->next_cpu = NULL;
388 pcpu = &first_cpu;
5b6dd868 389 cpu_index = 0;
182735ef
AF
390 while (*pcpu != NULL) {
391 pcpu = &(*pcpu)->next_cpu;
5b6dd868
BS
392 cpu_index++;
393 }
55e5c285 394 cpu->cpu_index = cpu_index;
1b1ed8dc 395 cpu->numa_node = 0;
5b6dd868
BS
396 QTAILQ_INIT(&env->breakpoints);
397 QTAILQ_INIT(&env->watchpoints);
398#ifndef CONFIG_USER_ONLY
399 cpu->thread_id = qemu_get_thread_id();
400#endif
182735ef 401 *pcpu = cpu;
5b6dd868
BS
402#if defined(CONFIG_USER_ONLY)
403 cpu_list_unlock();
404#endif
259186a7 405 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
5b6dd868 406#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
407 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
408 cpu_save, cpu_load, env);
b170fce3 409 assert(cc->vmsd == NULL);
5b6dd868 410#endif
b170fce3
AF
411 if (cc->vmsd != NULL) {
412 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
413 }
ea041c0e
FB
414}
415
1fddef4b 416#if defined(TARGET_HAS_ICE)
94df27fd 417#if defined(CONFIG_USER_ONLY)
9349b4f9 418static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
419{
420 tb_invalidate_phys_page_range(pc, pc + 1, 0);
421}
422#else
1e7855a5
MF
423static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
424{
9d70c4b7
MF
425 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
426 (pc & ~TARGET_PAGE_MASK));
1e7855a5 427}
c27004ec 428#endif
94df27fd 429#endif /* TARGET_HAS_ICE */
d720b93d 430
c527ee8f 431#if defined(CONFIG_USER_ONLY)
9349b4f9 432void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
433
434{
435}
436
9349b4f9 437int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
438 int flags, CPUWatchpoint **watchpoint)
439{
440 return -ENOSYS;
441}
442#else
6658ffb8 443/* Add a watchpoint. */
9349b4f9 444int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 445 int flags, CPUWatchpoint **watchpoint)
6658ffb8 446{
b4051334 447 target_ulong len_mask = ~(len - 1);
c0ce998e 448 CPUWatchpoint *wp;
6658ffb8 449
b4051334 450 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
451 if ((len & (len - 1)) || (addr & ~len_mask) ||
452 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
453 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
454 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
455 return -EINVAL;
456 }
7267c094 457 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
458
459 wp->vaddr = addr;
b4051334 460 wp->len_mask = len_mask;
a1d1bb31
AL
461 wp->flags = flags;
462
2dc9f411 463 /* keep all GDB-injected watchpoints in front */
c0ce998e 464 if (flags & BP_GDB)
72cf2d4f 465 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 466 else
72cf2d4f 467 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 468
6658ffb8 469 tlb_flush_page(env, addr);
a1d1bb31
AL
470
471 if (watchpoint)
472 *watchpoint = wp;
473 return 0;
6658ffb8
PB
474}
475
a1d1bb31 476/* Remove a specific watchpoint. */
9349b4f9 477int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 478 int flags)
6658ffb8 479{
b4051334 480 target_ulong len_mask = ~(len - 1);
a1d1bb31 481 CPUWatchpoint *wp;
6658ffb8 482
72cf2d4f 483 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 484 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 485 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 486 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
487 return 0;
488 }
489 }
a1d1bb31 490 return -ENOENT;
6658ffb8
PB
491}
492
a1d1bb31 493/* Remove a specific watchpoint by reference. */
9349b4f9 494void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 495{
72cf2d4f 496 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 497
a1d1bb31
AL
498 tlb_flush_page(env, watchpoint->vaddr);
499
7267c094 500 g_free(watchpoint);
a1d1bb31
AL
501}
502
503/* Remove all matching watchpoints. */
9349b4f9 504void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 505{
c0ce998e 506 CPUWatchpoint *wp, *next;
a1d1bb31 507
72cf2d4f 508 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
509 if (wp->flags & mask)
510 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 511 }
7d03f82f 512}
c527ee8f 513#endif
7d03f82f 514
a1d1bb31 515/* Add a breakpoint. */
9349b4f9 516int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 517 CPUBreakpoint **breakpoint)
4c3a88a2 518{
1fddef4b 519#if defined(TARGET_HAS_ICE)
c0ce998e 520 CPUBreakpoint *bp;
3b46e624 521
7267c094 522 bp = g_malloc(sizeof(*bp));
4c3a88a2 523
a1d1bb31
AL
524 bp->pc = pc;
525 bp->flags = flags;
526
2dc9f411 527 /* keep all GDB-injected breakpoints in front */
c0ce998e 528 if (flags & BP_GDB)
72cf2d4f 529 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 530 else
72cf2d4f 531 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 532
d720b93d 533 breakpoint_invalidate(env, pc);
a1d1bb31
AL
534
535 if (breakpoint)
536 *breakpoint = bp;
4c3a88a2
FB
537 return 0;
538#else
a1d1bb31 539 return -ENOSYS;
4c3a88a2
FB
540#endif
541}
542
a1d1bb31 543/* Remove a specific breakpoint. */
9349b4f9 544int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 545{
7d03f82f 546#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
547 CPUBreakpoint *bp;
548
72cf2d4f 549 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
550 if (bp->pc == pc && bp->flags == flags) {
551 cpu_breakpoint_remove_by_ref(env, bp);
552 return 0;
553 }
7d03f82f 554 }
a1d1bb31
AL
555 return -ENOENT;
556#else
557 return -ENOSYS;
7d03f82f
EI
558#endif
559}
560
a1d1bb31 561/* Remove a specific breakpoint by reference. */
9349b4f9 562void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 563{
1fddef4b 564#if defined(TARGET_HAS_ICE)
72cf2d4f 565 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 566
a1d1bb31
AL
567 breakpoint_invalidate(env, breakpoint->pc);
568
7267c094 569 g_free(breakpoint);
a1d1bb31
AL
570#endif
571}
572
573/* Remove all matching breakpoints. */
9349b4f9 574void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
575{
576#if defined(TARGET_HAS_ICE)
c0ce998e 577 CPUBreakpoint *bp, *next;
a1d1bb31 578
72cf2d4f 579 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
580 if (bp->flags & mask)
581 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 582 }
4c3a88a2
FB
583#endif
584}
585
c33a346e
FB
586/* enable or disable single step mode. EXCP_DEBUG is returned by the
587 CPU loop after each instruction */
9349b4f9 588void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 589{
1fddef4b 590#if defined(TARGET_HAS_ICE)
c33a346e
FB
591 if (env->singlestep_enabled != enabled) {
592 env->singlestep_enabled = enabled;
e22a25c9
AL
593 if (kvm_enabled())
594 kvm_update_guest_debug(env, 0);
595 else {
ccbb4d44 596 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
597 /* XXX: only flush what is necessary */
598 tb_flush(env);
599 }
c33a346e
FB
600 }
601#endif
602}
603
9349b4f9 604void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e 605{
878096ee 606 CPUState *cpu = ENV_GET_CPU(env);
7501267e 607 va_list ap;
493ae1f0 608 va_list ap2;
7501267e
FB
609
610 va_start(ap, fmt);
493ae1f0 611 va_copy(ap2, ap);
7501267e
FB
612 fprintf(stderr, "qemu: fatal: ");
613 vfprintf(stderr, fmt, ap);
614 fprintf(stderr, "\n");
878096ee 615 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
616 if (qemu_log_enabled()) {
617 qemu_log("qemu: fatal: ");
618 qemu_log_vprintf(fmt, ap2);
619 qemu_log("\n");
a0762859 620 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 621 qemu_log_flush();
93fcfe39 622 qemu_log_close();
924edcae 623 }
493ae1f0 624 va_end(ap2);
f9373291 625 va_end(ap);
fd052bf6
RV
626#if defined(CONFIG_USER_ONLY)
627 {
628 struct sigaction act;
629 sigfillset(&act.sa_mask);
630 act.sa_handler = SIG_DFL;
631 sigaction(SIGABRT, &act, NULL);
632 }
633#endif
7501267e
FB
634 abort();
635}
636
9349b4f9 637CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 638{
9349b4f9 639 CPUArchState *new_env = cpu_init(env->cpu_model_str);
5a38f081
AL
640#if defined(TARGET_HAS_ICE)
641 CPUBreakpoint *bp;
642 CPUWatchpoint *wp;
643#endif
644
9349b4f9 645 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 646
5a38f081
AL
647 /* Clone all break/watchpoints.
648 Note: Once we support ptrace with hw-debug register access, make sure
649 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
650 QTAILQ_INIT(&env->breakpoints);
651 QTAILQ_INIT(&env->watchpoints);
5a38f081 652#if defined(TARGET_HAS_ICE)
72cf2d4f 653 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
654 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
655 }
72cf2d4f 656 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
657 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
658 wp->flags, NULL);
659 }
660#endif
661
c5be9f08
TS
662 return new_env;
663}
664
0124311e 665#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
666static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
667 uintptr_t length)
668{
669 uintptr_t start1;
670
671 /* we modify the TLB cache so that the dirty bit will be set again
672 when accessing the range */
673 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
674 /* Check that we don't span multiple blocks - this breaks the
675 address comparisons below. */
676 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
677 != (end - 1) - start) {
678 abort();
679 }
680 cpu_tlb_reset_dirty_all(start1, length);
681
682}
683
5579c7f3 684/* Note: start and end must be within the same ram block. */
c227f099 685void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 686 int dirty_flags)
1ccde1cb 687{
d24981d3 688 uintptr_t length;
1ccde1cb
FB
689
690 start &= TARGET_PAGE_MASK;
691 end = TARGET_PAGE_ALIGN(end);
692
693 length = end - start;
694 if (length == 0)
695 return;
f7c11b53 696 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 697
d24981d3
JQ
698 if (tcg_enabled()) {
699 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 700 }
1ccde1cb
FB
701}
702
8b9c99d9 703static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 704{
f6f3fbca 705 int ret = 0;
74576198 706 in_migration = enable;
f6f3fbca 707 return ret;
74576198
AL
708}
709
a8170e5e 710hwaddr memory_region_section_get_iotlb(CPUArchState *env,
149f54b5
PB
711 MemoryRegionSection *section,
712 target_ulong vaddr,
713 hwaddr paddr, hwaddr xlat,
714 int prot,
715 target_ulong *address)
e5548617 716{
a8170e5e 717 hwaddr iotlb;
e5548617
BS
718 CPUWatchpoint *wp;
719
cc5bea60 720 if (memory_region_is_ram(section->mr)) {
e5548617
BS
721 /* Normal RAM. */
722 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 723 + xlat;
e5548617 724 if (!section->readonly) {
b41aac4f 725 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 726 } else {
b41aac4f 727 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
728 }
729 } else {
0475d94f 730 iotlb = section - address_space_memory.dispatch->sections;
149f54b5 731 iotlb += xlat;
e5548617
BS
732 }
733
734 /* Make accesses to pages with watchpoints go via the
735 watchpoint trap routines. */
736 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
737 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
738 /* Avoid trapping reads of pages with a write breakpoint. */
739 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 740 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
741 *address |= TLB_MMIO;
742 break;
743 }
744 }
745 }
746
747 return iotlb;
748}
9fa3e853
FB
749#endif /* defined(CONFIG_USER_ONLY) */
750
e2eef170 751#if !defined(CONFIG_USER_ONLY)
8da3ff18 752
c227f099 753static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 754 uint16_t section);
acc9d80b 755static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 756
5312bd8b
AK
757static uint16_t phys_section_add(MemoryRegionSection *section)
758{
68f3f65b
PB
759 /* The physical section number is ORed with a page-aligned
760 * pointer to produce the iotlb entries. Thus it should
761 * never overflow into the page-aligned value.
762 */
9affd6fc 763 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
68f3f65b 764
9affd6fc
PB
765 if (next_map.sections_nb == next_map.sections_nb_alloc) {
766 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
767 16);
768 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
769 next_map.sections_nb_alloc);
5312bd8b 770 }
9affd6fc 771 next_map.sections[next_map.sections_nb] = *section;
dfde4e6e 772 memory_region_ref(section->mr);
9affd6fc 773 return next_map.sections_nb++;
5312bd8b
AK
774}
775
058bc4b5
PB
776static void phys_section_destroy(MemoryRegion *mr)
777{
dfde4e6e
PB
778 memory_region_unref(mr);
779
058bc4b5
PB
780 if (mr->subpage) {
781 subpage_t *subpage = container_of(mr, subpage_t, iomem);
782 memory_region_destroy(&subpage->iomem);
783 g_free(subpage);
784 }
785}
786
6092666e 787static void phys_sections_free(PhysPageMap *map)
5312bd8b 788{
9affd6fc
PB
789 while (map->sections_nb > 0) {
790 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
791 phys_section_destroy(section->mr);
792 }
9affd6fc
PB
793 g_free(map->sections);
794 g_free(map->nodes);
6092666e 795 g_free(map);
5312bd8b
AK
796}
797
ac1970fb 798static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
799{
800 subpage_t *subpage;
a8170e5e 801 hwaddr base = section->offset_within_address_space
0f0cb164 802 & TARGET_PAGE_MASK;
9affd6fc
PB
803 MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
804 next_map.nodes, next_map.sections);
0f0cb164
AK
805 MemoryRegionSection subsection = {
806 .offset_within_address_space = base,
052e87b0 807 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 808 };
a8170e5e 809 hwaddr start, end;
0f0cb164 810
f3705d53 811 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 812
f3705d53 813 if (!(existing->mr->subpage)) {
acc9d80b 814 subpage = subpage_init(d->as, base);
0f0cb164 815 subsection.mr = &subpage->iomem;
ac1970fb 816 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 817 phys_section_add(&subsection));
0f0cb164 818 } else {
f3705d53 819 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
820 }
821 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 822 end = start + int128_get64(section->size) - 1;
0f0cb164
AK
823 subpage_register(subpage, start, end, phys_section_add(section));
824}
825
826
052e87b0
PB
827static void register_multipage(AddressSpaceDispatch *d,
828 MemoryRegionSection *section)
33417e70 829{
a8170e5e 830 hwaddr start_addr = section->offset_within_address_space;
5312bd8b 831 uint16_t section_index = phys_section_add(section);
052e87b0
PB
832 uint64_t num_pages = int128_get64(int128_rshift(section->size,
833 TARGET_PAGE_BITS));
dd81124b 834
733d5ef5
PB
835 assert(num_pages);
836 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
837}
838
ac1970fb 839static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 840{
89ae337a 841 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 842 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 843 MemoryRegionSection now = *section, remain = *section;
052e87b0 844 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 845
733d5ef5
PB
846 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
847 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
848 - now.offset_within_address_space;
849
052e87b0 850 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 851 register_subpage(d, &now);
733d5ef5 852 } else {
052e87b0 853 now.size = int128_zero();
733d5ef5 854 }
052e87b0
PB
855 while (int128_ne(remain.size, now.size)) {
856 remain.size = int128_sub(remain.size, now.size);
857 remain.offset_within_address_space += int128_get64(now.size);
858 remain.offset_within_region += int128_get64(now.size);
69b67646 859 now = remain;
052e87b0 860 if (int128_lt(remain.size, page_size)) {
733d5ef5
PB
861 register_subpage(d, &now);
862 } else if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
052e87b0 863 now.size = page_size;
ac1970fb 864 register_subpage(d, &now);
69b67646 865 } else {
052e87b0 866 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 867 register_multipage(d, &now);
69b67646 868 }
0f0cb164
AK
869 }
870}
871
62a2744c
SY
872void qemu_flush_coalesced_mmio_buffer(void)
873{
874 if (kvm_enabled())
875 kvm_flush_coalesced_mmio_buffer();
876}
877
b2a8658e
UD
878void qemu_mutex_lock_ramlist(void)
879{
880 qemu_mutex_lock(&ram_list.mutex);
881}
882
883void qemu_mutex_unlock_ramlist(void)
884{
885 qemu_mutex_unlock(&ram_list.mutex);
886}
887
c902760f
MT
888#if defined(__linux__) && !defined(TARGET_S390X)
889
890#include <sys/vfs.h>
891
892#define HUGETLBFS_MAGIC 0x958458f6
893
894static long gethugepagesize(const char *path)
895{
896 struct statfs fs;
897 int ret;
898
899 do {
9742bf26 900 ret = statfs(path, &fs);
c902760f
MT
901 } while (ret != 0 && errno == EINTR);
902
903 if (ret != 0) {
9742bf26
YT
904 perror(path);
905 return 0;
c902760f
MT
906 }
907
908 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 909 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
910
911 return fs.f_bsize;
912}
913
04b16653
AW
914static void *file_ram_alloc(RAMBlock *block,
915 ram_addr_t memory,
916 const char *path)
c902760f
MT
917{
918 char *filename;
8ca761f6
PF
919 char *sanitized_name;
920 char *c;
c902760f
MT
921 void *area;
922 int fd;
923#ifdef MAP_POPULATE
924 int flags;
925#endif
926 unsigned long hpagesize;
927
928 hpagesize = gethugepagesize(path);
929 if (!hpagesize) {
9742bf26 930 return NULL;
c902760f
MT
931 }
932
933 if (memory < hpagesize) {
934 return NULL;
935 }
936
937 if (kvm_enabled() && !kvm_has_sync_mmu()) {
938 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
939 return NULL;
940 }
941
8ca761f6
PF
942 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
943 sanitized_name = g_strdup(block->mr->name);
944 for (c = sanitized_name; *c != '\0'; c++) {
945 if (*c == '/')
946 *c = '_';
947 }
948
949 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
950 sanitized_name);
951 g_free(sanitized_name);
c902760f
MT
952
953 fd = mkstemp(filename);
954 if (fd < 0) {
9742bf26 955 perror("unable to create backing store for hugepages");
e4ada482 956 g_free(filename);
9742bf26 957 return NULL;
c902760f
MT
958 }
959 unlink(filename);
e4ada482 960 g_free(filename);
c902760f
MT
961
962 memory = (memory+hpagesize-1) & ~(hpagesize-1);
963
964 /*
965 * ftruncate is not supported by hugetlbfs in older
966 * hosts, so don't bother bailing out on errors.
967 * If anything goes wrong with it under other filesystems,
968 * mmap will fail.
969 */
970 if (ftruncate(fd, memory))
9742bf26 971 perror("ftruncate");
c902760f
MT
972
973#ifdef MAP_POPULATE
974 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
975 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
976 * to sidestep this quirk.
977 */
978 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
979 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
980#else
981 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
982#endif
983 if (area == MAP_FAILED) {
9742bf26
YT
984 perror("file_ram_alloc: can't mmap RAM pages");
985 close(fd);
986 return (NULL);
c902760f 987 }
04b16653 988 block->fd = fd;
c902760f
MT
989 return area;
990}
991#endif
992
d17b5288 993static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
994{
995 RAMBlock *block, *next_block;
3e837b2c 996 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 997
49cd9ac6
SH
998 assert(size != 0); /* it would hand out same offset multiple times */
999
a3161038 1000 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1001 return 0;
1002
a3161038 1003 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1004 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1005
1006 end = block->offset + block->length;
1007
a3161038 1008 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1009 if (next_block->offset >= end) {
1010 next = MIN(next, next_block->offset);
1011 }
1012 }
1013 if (next - end >= size && next - end < mingap) {
3e837b2c 1014 offset = end;
04b16653
AW
1015 mingap = next - end;
1016 }
1017 }
3e837b2c
AW
1018
1019 if (offset == RAM_ADDR_MAX) {
1020 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1021 (uint64_t)size);
1022 abort();
1023 }
1024
04b16653
AW
1025 return offset;
1026}
1027
652d7ec2 1028ram_addr_t last_ram_offset(void)
d17b5288
AW
1029{
1030 RAMBlock *block;
1031 ram_addr_t last = 0;
1032
a3161038 1033 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1034 last = MAX(last, block->offset + block->length);
1035
1036 return last;
1037}
1038
ddb97f1d
JB
1039static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1040{
1041 int ret;
ddb97f1d
JB
1042
1043 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1044 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1045 "dump-guest-core", true)) {
ddb97f1d
JB
1046 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1047 if (ret) {
1048 perror("qemu_madvise");
1049 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1050 "but dump_guest_core=off specified\n");
1051 }
1052 }
1053}
1054
c5705a77 1055void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
1056{
1057 RAMBlock *new_block, *block;
1058
c5705a77 1059 new_block = NULL;
a3161038 1060 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
1061 if (block->offset == addr) {
1062 new_block = block;
1063 break;
1064 }
1065 }
1066 assert(new_block);
1067 assert(!new_block->idstr[0]);
84b89d78 1068
09e5ab63
AL
1069 if (dev) {
1070 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1071 if (id) {
1072 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1073 g_free(id);
84b89d78
CM
1074 }
1075 }
1076 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1077
b2a8658e
UD
1078 /* This assumes the iothread lock is taken here too. */
1079 qemu_mutex_lock_ramlist();
a3161038 1080 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1081 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1082 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1083 new_block->idstr);
1084 abort();
1085 }
1086 }
b2a8658e 1087 qemu_mutex_unlock_ramlist();
c5705a77
AK
1088}
1089
8490fc78
LC
1090static int memory_try_enable_merging(void *addr, size_t len)
1091{
2ff3de68 1092 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1093 /* disabled by the user */
1094 return 0;
1095 }
1096
1097 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1098}
1099
c5705a77
AK
1100ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1101 MemoryRegion *mr)
1102{
abb26d63 1103 RAMBlock *block, *new_block;
c5705a77
AK
1104
1105 size = TARGET_PAGE_ALIGN(size);
1106 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1107
b2a8658e
UD
1108 /* This assumes the iothread lock is taken here too. */
1109 qemu_mutex_lock_ramlist();
7c637366 1110 new_block->mr = mr;
432d268c 1111 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1112 if (host) {
1113 new_block->host = host;
cd19cfa2 1114 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1115 } else {
1116 if (mem_path) {
c902760f 1117#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1118 new_block->host = file_ram_alloc(new_block, size, mem_path);
1119 if (!new_block->host) {
6eebf958 1120 new_block->host = qemu_anon_ram_alloc(size);
8490fc78 1121 memory_try_enable_merging(new_block->host, size);
6977dfe6 1122 }
c902760f 1123#else
6977dfe6
YT
1124 fprintf(stderr, "-mem-path option unsupported\n");
1125 exit(1);
c902760f 1126#endif
6977dfe6 1127 } else {
868bb33f 1128 if (xen_enabled()) {
fce537d4 1129 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1130 } else if (kvm_enabled()) {
1131 /* some s390/kvm configurations have special constraints */
6eebf958 1132 new_block->host = kvm_ram_alloc(size);
432d268c 1133 } else {
6eebf958 1134 new_block->host = qemu_anon_ram_alloc(size);
432d268c 1135 }
8490fc78 1136 memory_try_enable_merging(new_block->host, size);
6977dfe6 1137 }
c902760f 1138 }
94a6b54f
PB
1139 new_block->length = size;
1140
abb26d63
PB
1141 /* Keep the list sorted from biggest to smallest block. */
1142 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1143 if (block->length < new_block->length) {
1144 break;
1145 }
1146 }
1147 if (block) {
1148 QTAILQ_INSERT_BEFORE(block, new_block, next);
1149 } else {
1150 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1151 }
0d6d3c87 1152 ram_list.mru_block = NULL;
94a6b54f 1153
f798b07f 1154 ram_list.version++;
b2a8658e 1155 qemu_mutex_unlock_ramlist();
f798b07f 1156
7267c094 1157 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1158 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1159 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1160 0, size >> TARGET_PAGE_BITS);
1720aeee 1161 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1162
ddb97f1d 1163 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1164 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1165
6f0437e8
JK
1166 if (kvm_enabled())
1167 kvm_setup_guest_memory(new_block->host, size);
1168
94a6b54f
PB
1169 return new_block->offset;
1170}
e9a1ab19 1171
c5705a77 1172ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1173{
c5705a77 1174 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1175}
1176
1f2e98b6
AW
1177void qemu_ram_free_from_ptr(ram_addr_t addr)
1178{
1179 RAMBlock *block;
1180
b2a8658e
UD
1181 /* This assumes the iothread lock is taken here too. */
1182 qemu_mutex_lock_ramlist();
a3161038 1183 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1184 if (addr == block->offset) {
a3161038 1185 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1186 ram_list.mru_block = NULL;
f798b07f 1187 ram_list.version++;
7267c094 1188 g_free(block);
b2a8658e 1189 break;
1f2e98b6
AW
1190 }
1191 }
b2a8658e 1192 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1193}
1194
c227f099 1195void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1196{
04b16653
AW
1197 RAMBlock *block;
1198
b2a8658e
UD
1199 /* This assumes the iothread lock is taken here too. */
1200 qemu_mutex_lock_ramlist();
a3161038 1201 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1202 if (addr == block->offset) {
a3161038 1203 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1204 ram_list.mru_block = NULL;
f798b07f 1205 ram_list.version++;
cd19cfa2
HY
1206 if (block->flags & RAM_PREALLOC_MASK) {
1207 ;
1208 } else if (mem_path) {
04b16653
AW
1209#if defined (__linux__) && !defined(TARGET_S390X)
1210 if (block->fd) {
1211 munmap(block->host, block->length);
1212 close(block->fd);
1213 } else {
e7a09b92 1214 qemu_anon_ram_free(block->host, block->length);
04b16653 1215 }
fd28aa13
JK
1216#else
1217 abort();
04b16653
AW
1218#endif
1219 } else {
868bb33f 1220 if (xen_enabled()) {
e41d7c69 1221 xen_invalidate_map_cache_entry(block->host);
432d268c 1222 } else {
e7a09b92 1223 qemu_anon_ram_free(block->host, block->length);
432d268c 1224 }
04b16653 1225 }
7267c094 1226 g_free(block);
b2a8658e 1227 break;
04b16653
AW
1228 }
1229 }
b2a8658e 1230 qemu_mutex_unlock_ramlist();
04b16653 1231
e9a1ab19
FB
1232}
1233
cd19cfa2
HY
1234#ifndef _WIN32
1235void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1236{
1237 RAMBlock *block;
1238 ram_addr_t offset;
1239 int flags;
1240 void *area, *vaddr;
1241
a3161038 1242 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1243 offset = addr - block->offset;
1244 if (offset < block->length) {
1245 vaddr = block->host + offset;
1246 if (block->flags & RAM_PREALLOC_MASK) {
1247 ;
1248 } else {
1249 flags = MAP_FIXED;
1250 munmap(vaddr, length);
1251 if (mem_path) {
1252#if defined(__linux__) && !defined(TARGET_S390X)
1253 if (block->fd) {
1254#ifdef MAP_POPULATE
1255 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1256 MAP_PRIVATE;
1257#else
1258 flags |= MAP_PRIVATE;
1259#endif
1260 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1261 flags, block->fd, offset);
1262 } else {
1263 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1264 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1265 flags, -1, 0);
1266 }
fd28aa13
JK
1267#else
1268 abort();
cd19cfa2
HY
1269#endif
1270 } else {
1271#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1272 flags |= MAP_SHARED | MAP_ANONYMOUS;
1273 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1274 flags, -1, 0);
1275#else
1276 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1277 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1278 flags, -1, 0);
1279#endif
1280 }
1281 if (area != vaddr) {
f15fbc4b
AP
1282 fprintf(stderr, "Could not remap addr: "
1283 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1284 length, addr);
1285 exit(1);
1286 }
8490fc78 1287 memory_try_enable_merging(vaddr, length);
ddb97f1d 1288 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1289 }
1290 return;
1291 }
1292 }
1293}
1294#endif /* !_WIN32 */
1295
1b5ec234 1296static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
dc828ca1 1297{
94a6b54f
PB
1298 RAMBlock *block;
1299
b2a8658e 1300 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1301 block = ram_list.mru_block;
1302 if (block && addr - block->offset < block->length) {
1303 goto found;
1304 }
a3161038 1305 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1306 if (addr - block->offset < block->length) {
0d6d3c87 1307 goto found;
f471a17e 1308 }
94a6b54f 1309 }
f471a17e
AW
1310
1311 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1312 abort();
1313
0d6d3c87
PB
1314found:
1315 ram_list.mru_block = block;
1b5ec234
PB
1316 return block;
1317}
1318
1319/* Return a host pointer to ram allocated with qemu_ram_alloc.
1320 With the exception of the softmmu code in this file, this should
1321 only be used for local memory (e.g. video ram) that the device owns,
1322 and knows it isn't going to access beyond the end of the block.
1323
1324 It should not be used for general purpose DMA.
1325 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1326 */
1327void *qemu_get_ram_ptr(ram_addr_t addr)
1328{
1329 RAMBlock *block = qemu_get_ram_block(addr);
1330
0d6d3c87
PB
1331 if (xen_enabled()) {
1332 /* We need to check if the requested address is in the RAM
1333 * because we don't want to map the entire memory in QEMU.
1334 * In that case just map until the end of the page.
1335 */
1336 if (block->offset == 0) {
1337 return xen_map_cache(addr, 0, 0);
1338 } else if (block->host == NULL) {
1339 block->host =
1340 xen_map_cache(block->offset, block->length, 1);
1341 }
1342 }
1343 return block->host + (addr - block->offset);
dc828ca1
PB
1344}
1345
0d6d3c87
PB
1346/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1347 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1348 *
1349 * ??? Is this still necessary?
b2e0a138 1350 */
8b9c99d9 1351static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1352{
1353 RAMBlock *block;
1354
b2a8658e 1355 /* The list is protected by the iothread lock here. */
a3161038 1356 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1357 if (addr - block->offset < block->length) {
868bb33f 1358 if (xen_enabled()) {
432d268c
JN
1359 /* We need to check if the requested address is in the RAM
1360 * because we don't want to map the entire memory in QEMU.
712c2b41 1361 * In that case just map until the end of the page.
432d268c
JN
1362 */
1363 if (block->offset == 0) {
e41d7c69 1364 return xen_map_cache(addr, 0, 0);
432d268c 1365 } else if (block->host == NULL) {
e41d7c69
JK
1366 block->host =
1367 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1368 }
1369 }
b2e0a138
MT
1370 return block->host + (addr - block->offset);
1371 }
1372 }
1373
1374 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1375 abort();
1376
1377 return NULL;
1378}
1379
38bee5dc
SS
1380/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1381 * but takes a size argument */
8b9c99d9 1382static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1383{
8ab934f9
SS
1384 if (*size == 0) {
1385 return NULL;
1386 }
868bb33f 1387 if (xen_enabled()) {
e41d7c69 1388 return xen_map_cache(addr, *size, 1);
868bb33f 1389 } else {
38bee5dc
SS
1390 RAMBlock *block;
1391
a3161038 1392 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1393 if (addr - block->offset < block->length) {
1394 if (addr - block->offset + *size > block->length)
1395 *size = block->length - addr + block->offset;
1396 return block->host + (addr - block->offset);
1397 }
1398 }
1399
1400 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1401 abort();
38bee5dc
SS
1402 }
1403}
1404
7443b437
PB
1405/* Some of the softmmu routines need to translate from a host pointer
1406 (typically a TLB entry) back to a ram offset. */
1b5ec234 1407MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1408{
94a6b54f
PB
1409 RAMBlock *block;
1410 uint8_t *host = ptr;
1411
868bb33f 1412 if (xen_enabled()) {
e41d7c69 1413 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1414 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1415 }
1416
23887b79
PB
1417 block = ram_list.mru_block;
1418 if (block && block->host && host - block->host < block->length) {
1419 goto found;
1420 }
1421
a3161038 1422 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1423 /* This case append when the block is not mapped. */
1424 if (block->host == NULL) {
1425 continue;
1426 }
f471a17e 1427 if (host - block->host < block->length) {
23887b79 1428 goto found;
f471a17e 1429 }
94a6b54f 1430 }
432d268c 1431
1b5ec234 1432 return NULL;
23887b79
PB
1433
1434found:
1435 *ram_addr = block->offset + (host - block->host);
1b5ec234 1436 return block->mr;
e890261f 1437}
f471a17e 1438
a8170e5e 1439static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1440 uint64_t val, unsigned size)
9fa3e853 1441{
3a7d929e 1442 int dirty_flags;
f7c11b53 1443 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1444 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
0e0df1e2 1445 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1446 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1447 }
0e0df1e2
AK
1448 switch (size) {
1449 case 1:
1450 stb_p(qemu_get_ram_ptr(ram_addr), val);
1451 break;
1452 case 2:
1453 stw_p(qemu_get_ram_ptr(ram_addr), val);
1454 break;
1455 case 4:
1456 stl_p(qemu_get_ram_ptr(ram_addr), val);
1457 break;
1458 default:
1459 abort();
3a7d929e 1460 }
f23db169 1461 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1462 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1463 /* we remove the notdirty callback only if the code has been
1464 flushed */
4917cf44
AF
1465 if (dirty_flags == 0xff) {
1466 CPUArchState *env = current_cpu->env_ptr;
1467 tlb_set_dirty(env, env->mem_io_vaddr);
1468 }
9fa3e853
FB
1469}
1470
b018ddf6
PB
1471static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1472 unsigned size, bool is_write)
1473{
1474 return is_write;
1475}
1476
0e0df1e2 1477static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1478 .write = notdirty_mem_write,
b018ddf6 1479 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1480 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1481};
1482
0f459d16 1483/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1484static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1485{
4917cf44 1486 CPUArchState *env = current_cpu->env_ptr;
06d55cc1 1487 target_ulong pc, cs_base;
0f459d16 1488 target_ulong vaddr;
a1d1bb31 1489 CPUWatchpoint *wp;
06d55cc1 1490 int cpu_flags;
0f459d16 1491
06d55cc1
AL
1492 if (env->watchpoint_hit) {
1493 /* We re-entered the check after replacing the TB. Now raise
1494 * the debug interrupt so that is will trigger after the
1495 * current instruction. */
c3affe56 1496 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1497 return;
1498 }
2e70f6ef 1499 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1500 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1501 if ((vaddr == (wp->vaddr & len_mask) ||
1502 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1503 wp->flags |= BP_WATCHPOINT_HIT;
1504 if (!env->watchpoint_hit) {
1505 env->watchpoint_hit = wp;
5a316526 1506 tb_check_watchpoint(env);
6e140f28
AL
1507 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1508 env->exception_index = EXCP_DEBUG;
488d6577 1509 cpu_loop_exit(env);
6e140f28
AL
1510 } else {
1511 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1512 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1513 cpu_resume_from_signal(env, NULL);
6e140f28 1514 }
06d55cc1 1515 }
6e140f28
AL
1516 } else {
1517 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1518 }
1519 }
1520}
1521
6658ffb8
PB
1522/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1523 so these check for a hit then pass through to the normal out-of-line
1524 phys routines. */
a8170e5e 1525static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1526 unsigned size)
6658ffb8 1527{
1ec9b909
AK
1528 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1529 switch (size) {
1530 case 1: return ldub_phys(addr);
1531 case 2: return lduw_phys(addr);
1532 case 4: return ldl_phys(addr);
1533 default: abort();
1534 }
6658ffb8
PB
1535}
1536
a8170e5e 1537static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1538 uint64_t val, unsigned size)
6658ffb8 1539{
1ec9b909
AK
1540 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1541 switch (size) {
67364150
MF
1542 case 1:
1543 stb_phys(addr, val);
1544 break;
1545 case 2:
1546 stw_phys(addr, val);
1547 break;
1548 case 4:
1549 stl_phys(addr, val);
1550 break;
1ec9b909
AK
1551 default: abort();
1552 }
6658ffb8
PB
1553}
1554
1ec9b909
AK
1555static const MemoryRegionOps watch_mem_ops = {
1556 .read = watch_mem_read,
1557 .write = watch_mem_write,
1558 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1559};
6658ffb8 1560
a8170e5e 1561static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1562 unsigned len)
db7b5426 1563{
acc9d80b
JK
1564 subpage_t *subpage = opaque;
1565 uint8_t buf[4];
791af8c8 1566
db7b5426 1567#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1568 printf("%s: subpage %p len %d addr " TARGET_FMT_plx "\n", __func__,
1569 subpage, len, addr);
db7b5426 1570#endif
acc9d80b
JK
1571 address_space_read(subpage->as, addr + subpage->base, buf, len);
1572 switch (len) {
1573 case 1:
1574 return ldub_p(buf);
1575 case 2:
1576 return lduw_p(buf);
1577 case 4:
1578 return ldl_p(buf);
1579 default:
1580 abort();
1581 }
db7b5426
BS
1582}
1583
a8170e5e 1584static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1585 uint64_t value, unsigned len)
db7b5426 1586{
acc9d80b
JK
1587 subpage_t *subpage = opaque;
1588 uint8_t buf[4];
1589
db7b5426 1590#if defined(DEBUG_SUBPAGE)
70c68e44 1591 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
acc9d80b
JK
1592 " value %"PRIx64"\n",
1593 __func__, subpage, len, addr, value);
db7b5426 1594#endif
acc9d80b
JK
1595 switch (len) {
1596 case 1:
1597 stb_p(buf, value);
1598 break;
1599 case 2:
1600 stw_p(buf, value);
1601 break;
1602 case 4:
1603 stl_p(buf, value);
1604 break;
1605 default:
1606 abort();
1607 }
1608 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1609}
1610
c353e4cc
PB
1611static bool subpage_accepts(void *opaque, hwaddr addr,
1612 unsigned size, bool is_write)
1613{
acc9d80b 1614 subpage_t *subpage = opaque;
c353e4cc 1615#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1616 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx "\n",
1617 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1618#endif
1619
acc9d80b
JK
1620 return address_space_access_valid(subpage->as, addr + subpage->base,
1621 size, is_write);
c353e4cc
PB
1622}
1623
70c68e44
AK
1624static const MemoryRegionOps subpage_ops = {
1625 .read = subpage_read,
1626 .write = subpage_write,
c353e4cc 1627 .valid.accepts = subpage_accepts,
70c68e44 1628 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1629};
1630
c227f099 1631static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1632 uint16_t section)
db7b5426
BS
1633{
1634 int idx, eidx;
1635
1636 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1637 return -1;
1638 idx = SUBPAGE_IDX(start);
1639 eidx = SUBPAGE_IDX(end);
1640#if defined(DEBUG_SUBPAGE)
0bf9e31a 1641 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1642 mmio, start, end, idx, eidx, memory);
1643#endif
db7b5426 1644 for (; idx <= eidx; idx++) {
5312bd8b 1645 mmio->sub_section[idx] = section;
db7b5426
BS
1646 }
1647
1648 return 0;
1649}
1650
acc9d80b 1651static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1652{
c227f099 1653 subpage_t *mmio;
db7b5426 1654
7267c094 1655 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1656
acc9d80b 1657 mmio->as = as;
1eec614b 1658 mmio->base = base;
2c9b15ca 1659 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
70c68e44 1660 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1661 mmio->iomem.subpage = true;
db7b5426 1662#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1663 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1664 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1665#endif
b41aac4f 1666 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1667
1668 return mmio;
1669}
1670
5312bd8b
AK
1671static uint16_t dummy_section(MemoryRegion *mr)
1672{
1673 MemoryRegionSection section = {
1674 .mr = mr,
1675 .offset_within_address_space = 0,
1676 .offset_within_region = 0,
052e87b0 1677 .size = int128_2_64(),
5312bd8b
AK
1678 };
1679
1680 return phys_section_add(&section);
1681}
1682
a8170e5e 1683MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1684{
0475d94f 1685 return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1686}
1687
e9179ce1
AK
1688static void io_mem_init(void)
1689{
2c9b15ca
PB
1690 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1691 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
0e0df1e2 1692 "unassigned", UINT64_MAX);
2c9b15ca 1693 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
0e0df1e2 1694 "notdirty", UINT64_MAX);
2c9b15ca 1695 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1ec9b909 1696 "watch", UINT64_MAX);
e9179ce1
AK
1697}
1698
ac1970fb 1699static void mem_begin(MemoryListener *listener)
00752703
PB
1700{
1701 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1702 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1703
1704 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1705 d->as = as;
1706 as->next_dispatch = d;
1707}
1708
1709static void mem_commit(MemoryListener *listener)
ac1970fb 1710{
89ae337a 1711 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
1712 AddressSpaceDispatch *cur = as->dispatch;
1713 AddressSpaceDispatch *next = as->next_dispatch;
1714
1715 next->nodes = next_map.nodes;
1716 next->sections = next_map.sections;
ac1970fb 1717
0475d94f
PB
1718 as->dispatch = next;
1719 g_free(cur);
ac1970fb
AK
1720}
1721
50c1e149
AK
1722static void core_begin(MemoryListener *listener)
1723{
b41aac4f
LPF
1724 uint16_t n;
1725
6092666e
PB
1726 prev_map = g_new(PhysPageMap, 1);
1727 *prev_map = next_map;
1728
9affd6fc 1729 memset(&next_map, 0, sizeof(next_map));
b41aac4f
LPF
1730 n = dummy_section(&io_mem_unassigned);
1731 assert(n == PHYS_SECTION_UNASSIGNED);
1732 n = dummy_section(&io_mem_notdirty);
1733 assert(n == PHYS_SECTION_NOTDIRTY);
1734 n = dummy_section(&io_mem_rom);
1735 assert(n == PHYS_SECTION_ROM);
1736 n = dummy_section(&io_mem_watch);
1737 assert(n == PHYS_SECTION_WATCH);
50c1e149
AK
1738}
1739
9affd6fc
PB
1740/* This listener's commit run after the other AddressSpaceDispatch listeners'.
1741 * All AddressSpaceDispatch instances have switched to the next map.
1742 */
1743static void core_commit(MemoryListener *listener)
1744{
6092666e 1745 phys_sections_free(prev_map);
9affd6fc
PB
1746}
1747
1d71148e 1748static void tcg_commit(MemoryListener *listener)
50c1e149 1749{
182735ef 1750 CPUState *cpu;
117712c3
AK
1751
1752 /* since each CPU stores ram addresses in its TLB cache, we must
1753 reset the modified entries */
1754 /* XXX: slow ! */
182735ef
AF
1755 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
1756 CPUArchState *env = cpu->env_ptr;
1757
117712c3
AK
1758 tlb_flush(env, 1);
1759 }
50c1e149
AK
1760}
1761
93632747
AK
1762static void core_log_global_start(MemoryListener *listener)
1763{
1764 cpu_physical_memory_set_dirty_tracking(1);
1765}
1766
1767static void core_log_global_stop(MemoryListener *listener)
1768{
1769 cpu_physical_memory_set_dirty_tracking(0);
1770}
1771
93632747 1772static MemoryListener core_memory_listener = {
50c1e149 1773 .begin = core_begin,
9affd6fc 1774 .commit = core_commit,
93632747
AK
1775 .log_global_start = core_log_global_start,
1776 .log_global_stop = core_log_global_stop,
ac1970fb 1777 .priority = 1,
93632747
AK
1778};
1779
1d71148e
AK
1780static MemoryListener tcg_memory_listener = {
1781 .commit = tcg_commit,
1782};
1783
ac1970fb
AK
1784void address_space_init_dispatch(AddressSpace *as)
1785{
00752703 1786 as->dispatch = NULL;
89ae337a 1787 as->dispatch_listener = (MemoryListener) {
ac1970fb 1788 .begin = mem_begin,
00752703 1789 .commit = mem_commit,
ac1970fb
AK
1790 .region_add = mem_add,
1791 .region_nop = mem_add,
1792 .priority = 0,
1793 };
89ae337a 1794 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
1795}
1796
83f3c251
AK
1797void address_space_destroy_dispatch(AddressSpace *as)
1798{
1799 AddressSpaceDispatch *d = as->dispatch;
1800
89ae337a 1801 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
1802 g_free(d);
1803 as->dispatch = NULL;
1804}
1805
62152b8a
AK
1806static void memory_map_init(void)
1807{
7267c094 1808 system_memory = g_malloc(sizeof(*system_memory));
2c9b15ca 1809 memory_region_init(system_memory, NULL, "system", INT64_MAX);
7dca8043 1810 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1811
7267c094 1812 system_io = g_malloc(sizeof(*system_io));
2c9b15ca 1813 memory_region_init(system_io, NULL, "io", 65536);
7dca8043 1814 address_space_init(&address_space_io, system_io, "I/O");
93632747 1815
f6790af6 1816 memory_listener_register(&core_memory_listener, &address_space_memory);
f6790af6 1817 memory_listener_register(&tcg_memory_listener, &address_space_memory);
62152b8a
AK
1818}
1819
1820MemoryRegion *get_system_memory(void)
1821{
1822 return system_memory;
1823}
1824
309cb471
AK
1825MemoryRegion *get_system_io(void)
1826{
1827 return system_io;
1828}
1829
e2eef170
PB
1830#endif /* !defined(CONFIG_USER_ONLY) */
1831
13eb76e0
FB
1832/* physical memory access (slow version, mainly for debug) */
1833#if defined(CONFIG_USER_ONLY)
9349b4f9 1834int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1835 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1836{
1837 int l, flags;
1838 target_ulong page;
53a5960a 1839 void * p;
13eb76e0
FB
1840
1841 while (len > 0) {
1842 page = addr & TARGET_PAGE_MASK;
1843 l = (page + TARGET_PAGE_SIZE) - addr;
1844 if (l > len)
1845 l = len;
1846 flags = page_get_flags(page);
1847 if (!(flags & PAGE_VALID))
a68fe89c 1848 return -1;
13eb76e0
FB
1849 if (is_write) {
1850 if (!(flags & PAGE_WRITE))
a68fe89c 1851 return -1;
579a97f7 1852 /* XXX: this code should not depend on lock_user */
72fb7daa 1853 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1854 return -1;
72fb7daa
AJ
1855 memcpy(p, buf, l);
1856 unlock_user(p, addr, l);
13eb76e0
FB
1857 } else {
1858 if (!(flags & PAGE_READ))
a68fe89c 1859 return -1;
579a97f7 1860 /* XXX: this code should not depend on lock_user */
72fb7daa 1861 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1862 return -1;
72fb7daa 1863 memcpy(buf, p, l);
5b257578 1864 unlock_user(p, addr, 0);
13eb76e0
FB
1865 }
1866 len -= l;
1867 buf += l;
1868 addr += l;
1869 }
a68fe89c 1870 return 0;
13eb76e0 1871}
8df1cd07 1872
13eb76e0 1873#else
51d7a9eb 1874
a8170e5e
AK
1875static void invalidate_and_set_dirty(hwaddr addr,
1876 hwaddr length)
51d7a9eb
AP
1877{
1878 if (!cpu_physical_memory_is_dirty(addr)) {
1879 /* invalidate code */
1880 tb_invalidate_phys_page_range(addr, addr + length, 0);
1881 /* set dirty bit */
1882 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1883 }
e226939d 1884 xen_modified_memory(addr, length);
51d7a9eb
AP
1885}
1886
2bbfa05d
PB
1887static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1888{
1889 if (memory_region_is_ram(mr)) {
1890 return !(is_write && mr->readonly);
1891 }
1892 if (memory_region_is_romd(mr)) {
1893 return !is_write;
1894 }
1895
1896 return false;
1897}
1898
f52cc467 1899static inline int memory_access_size(MemoryRegion *mr, int l, hwaddr addr)
82f2563f 1900{
f52cc467 1901 if (l >= 4 && (((addr & 3) == 0 || mr->ops->impl.unaligned))) {
82f2563f
PB
1902 return 4;
1903 }
f52cc467 1904 if (l >= 2 && (((addr & 1) == 0) || mr->ops->impl.unaligned)) {
82f2563f
PB
1905 return 2;
1906 }
1907 return 1;
1908}
1909
fd8aaa76 1910bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1911 int len, bool is_write)
13eb76e0 1912{
149f54b5 1913 hwaddr l;
13eb76e0 1914 uint8_t *ptr;
791af8c8 1915 uint64_t val;
149f54b5 1916 hwaddr addr1;
5c8a00ce 1917 MemoryRegion *mr;
fd8aaa76 1918 bool error = false;
3b46e624 1919
13eb76e0 1920 while (len > 0) {
149f54b5 1921 l = len;
5c8a00ce 1922 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 1923
13eb76e0 1924 if (is_write) {
5c8a00ce
PB
1925 if (!memory_access_is_direct(mr, is_write)) {
1926 l = memory_access_size(mr, l, addr1);
4917cf44 1927 /* XXX: could force current_cpu to NULL to avoid
6a00d601 1928 potential bugs */
82f2563f 1929 if (l == 4) {
1c213d19 1930 /* 32 bit write access */
c27004ec 1931 val = ldl_p(buf);
5c8a00ce 1932 error |= io_mem_write(mr, addr1, val, 4);
82f2563f 1933 } else if (l == 2) {
1c213d19 1934 /* 16 bit write access */
c27004ec 1935 val = lduw_p(buf);
5c8a00ce 1936 error |= io_mem_write(mr, addr1, val, 2);
13eb76e0 1937 } else {
1c213d19 1938 /* 8 bit write access */
c27004ec 1939 val = ldub_p(buf);
5c8a00ce 1940 error |= io_mem_write(mr, addr1, val, 1);
13eb76e0 1941 }
2bbfa05d 1942 } else {
5c8a00ce 1943 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 1944 /* RAM case */
5579c7f3 1945 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1946 memcpy(ptr, buf, l);
51d7a9eb 1947 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
1948 }
1949 } else {
5c8a00ce 1950 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 1951 /* I/O case */
5c8a00ce 1952 l = memory_access_size(mr, l, addr1);
82f2563f 1953 if (l == 4) {
13eb76e0 1954 /* 32 bit read access */
5c8a00ce 1955 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 1956 stl_p(buf, val);
82f2563f 1957 } else if (l == 2) {
13eb76e0 1958 /* 16 bit read access */
5c8a00ce 1959 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 1960 stw_p(buf, val);
13eb76e0 1961 } else {
1c213d19 1962 /* 8 bit read access */
5c8a00ce 1963 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 1964 stb_p(buf, val);
13eb76e0
FB
1965 }
1966 } else {
1967 /* RAM case */
5c8a00ce 1968 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 1969 memcpy(buf, ptr, l);
13eb76e0
FB
1970 }
1971 }
1972 len -= l;
1973 buf += l;
1974 addr += l;
1975 }
fd8aaa76
PB
1976
1977 return error;
13eb76e0 1978}
8df1cd07 1979
fd8aaa76 1980bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1981 const uint8_t *buf, int len)
1982{
fd8aaa76 1983 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
1984}
1985
fd8aaa76 1986bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 1987{
fd8aaa76 1988 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
1989}
1990
1991
a8170e5e 1992void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1993 int len, int is_write)
1994{
fd8aaa76 1995 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
1996}
1997
d0ecd2aa 1998/* used for ROM loading : can write in RAM and ROM */
a8170e5e 1999void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
2000 const uint8_t *buf, int len)
2001{
149f54b5 2002 hwaddr l;
d0ecd2aa 2003 uint8_t *ptr;
149f54b5 2004 hwaddr addr1;
5c8a00ce 2005 MemoryRegion *mr;
3b46e624 2006
d0ecd2aa 2007 while (len > 0) {
149f54b5 2008 l = len;
5c8a00ce
PB
2009 mr = address_space_translate(&address_space_memory,
2010 addr, &addr1, &l, true);
3b46e624 2011
5c8a00ce
PB
2012 if (!(memory_region_is_ram(mr) ||
2013 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2014 /* do nothing */
2015 } else {
5c8a00ce 2016 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2017 /* ROM/RAM case */
5579c7f3 2018 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2019 memcpy(ptr, buf, l);
51d7a9eb 2020 invalidate_and_set_dirty(addr1, l);
d0ecd2aa
FB
2021 }
2022 len -= l;
2023 buf += l;
2024 addr += l;
2025 }
2026}
2027
6d16c2f8 2028typedef struct {
d3e71559 2029 MemoryRegion *mr;
6d16c2f8 2030 void *buffer;
a8170e5e
AK
2031 hwaddr addr;
2032 hwaddr len;
6d16c2f8
AL
2033} BounceBuffer;
2034
2035static BounceBuffer bounce;
2036
ba223c29
AL
2037typedef struct MapClient {
2038 void *opaque;
2039 void (*callback)(void *opaque);
72cf2d4f 2040 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2041} MapClient;
2042
72cf2d4f
BS
2043static QLIST_HEAD(map_client_list, MapClient) map_client_list
2044 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2045
2046void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2047{
7267c094 2048 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2049
2050 client->opaque = opaque;
2051 client->callback = callback;
72cf2d4f 2052 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2053 return client;
2054}
2055
8b9c99d9 2056static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2057{
2058 MapClient *client = (MapClient *)_client;
2059
72cf2d4f 2060 QLIST_REMOVE(client, link);
7267c094 2061 g_free(client);
ba223c29
AL
2062}
2063
2064static void cpu_notify_map_clients(void)
2065{
2066 MapClient *client;
2067
72cf2d4f
BS
2068 while (!QLIST_EMPTY(&map_client_list)) {
2069 client = QLIST_FIRST(&map_client_list);
ba223c29 2070 client->callback(client->opaque);
34d5e948 2071 cpu_unregister_map_client(client);
ba223c29
AL
2072 }
2073}
2074
51644ab7
PB
2075bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2076{
5c8a00ce 2077 MemoryRegion *mr;
51644ab7
PB
2078 hwaddr l, xlat;
2079
2080 while (len > 0) {
2081 l = len;
5c8a00ce
PB
2082 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2083 if (!memory_access_is_direct(mr, is_write)) {
2084 l = memory_access_size(mr, l, addr);
2085 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2086 return false;
2087 }
2088 }
2089
2090 len -= l;
2091 addr += l;
2092 }
2093 return true;
2094}
2095
6d16c2f8
AL
2096/* Map a physical memory region into a host virtual address.
2097 * May map a subset of the requested range, given by and returned in *plen.
2098 * May return NULL if resources needed to perform the mapping are exhausted.
2099 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2100 * Use cpu_register_map_client() to know when retrying the map operation is
2101 * likely to succeed.
6d16c2f8 2102 */
ac1970fb 2103void *address_space_map(AddressSpace *as,
a8170e5e
AK
2104 hwaddr addr,
2105 hwaddr *plen,
ac1970fb 2106 bool is_write)
6d16c2f8 2107{
a8170e5e 2108 hwaddr len = *plen;
e3127ae0
PB
2109 hwaddr done = 0;
2110 hwaddr l, xlat, base;
2111 MemoryRegion *mr, *this_mr;
2112 ram_addr_t raddr;
6d16c2f8 2113
e3127ae0
PB
2114 if (len == 0) {
2115 return NULL;
2116 }
38bee5dc 2117
e3127ae0
PB
2118 l = len;
2119 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2120 if (!memory_access_is_direct(mr, is_write)) {
2121 if (bounce.buffer) {
2122 return NULL;
6d16c2f8 2123 }
e3127ae0
PB
2124 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2125 bounce.addr = addr;
2126 bounce.len = l;
d3e71559
PB
2127
2128 memory_region_ref(mr);
2129 bounce.mr = mr;
e3127ae0
PB
2130 if (!is_write) {
2131 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2132 }
6d16c2f8 2133
e3127ae0
PB
2134 *plen = l;
2135 return bounce.buffer;
2136 }
2137
2138 base = xlat;
2139 raddr = memory_region_get_ram_addr(mr);
2140
2141 for (;;) {
6d16c2f8
AL
2142 len -= l;
2143 addr += l;
e3127ae0
PB
2144 done += l;
2145 if (len == 0) {
2146 break;
2147 }
2148
2149 l = len;
2150 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2151 if (this_mr != mr || xlat != base + done) {
2152 break;
2153 }
6d16c2f8 2154 }
e3127ae0 2155
d3e71559 2156 memory_region_ref(mr);
e3127ae0
PB
2157 *plen = done;
2158 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2159}
2160
ac1970fb 2161/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2162 * Will also mark the memory as dirty if is_write == 1. access_len gives
2163 * the amount of memory that was actually read or written by the caller.
2164 */
a8170e5e
AK
2165void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2166 int is_write, hwaddr access_len)
6d16c2f8
AL
2167{
2168 if (buffer != bounce.buffer) {
d3e71559
PB
2169 MemoryRegion *mr;
2170 ram_addr_t addr1;
2171
2172 mr = qemu_ram_addr_from_host(buffer, &addr1);
2173 assert(mr != NULL);
6d16c2f8 2174 if (is_write) {
6d16c2f8
AL
2175 while (access_len) {
2176 unsigned l;
2177 l = TARGET_PAGE_SIZE;
2178 if (l > access_len)
2179 l = access_len;
51d7a9eb 2180 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2181 addr1 += l;
2182 access_len -= l;
2183 }
2184 }
868bb33f 2185 if (xen_enabled()) {
e41d7c69 2186 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2187 }
d3e71559 2188 memory_region_unref(mr);
6d16c2f8
AL
2189 return;
2190 }
2191 if (is_write) {
ac1970fb 2192 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2193 }
f8a83245 2194 qemu_vfree(bounce.buffer);
6d16c2f8 2195 bounce.buffer = NULL;
d3e71559 2196 memory_region_unref(bounce.mr);
ba223c29 2197 cpu_notify_map_clients();
6d16c2f8 2198}
d0ecd2aa 2199
a8170e5e
AK
2200void *cpu_physical_memory_map(hwaddr addr,
2201 hwaddr *plen,
ac1970fb
AK
2202 int is_write)
2203{
2204 return address_space_map(&address_space_memory, addr, plen, is_write);
2205}
2206
a8170e5e
AK
2207void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2208 int is_write, hwaddr access_len)
ac1970fb
AK
2209{
2210 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2211}
2212
8df1cd07 2213/* warning: addr must be aligned */
a8170e5e 2214static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2215 enum device_endian endian)
8df1cd07 2216{
8df1cd07 2217 uint8_t *ptr;
791af8c8 2218 uint64_t val;
5c8a00ce 2219 MemoryRegion *mr;
149f54b5
PB
2220 hwaddr l = 4;
2221 hwaddr addr1;
8df1cd07 2222
5c8a00ce
PB
2223 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2224 false);
2225 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2226 /* I/O case */
5c8a00ce 2227 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2228#if defined(TARGET_WORDS_BIGENDIAN)
2229 if (endian == DEVICE_LITTLE_ENDIAN) {
2230 val = bswap32(val);
2231 }
2232#else
2233 if (endian == DEVICE_BIG_ENDIAN) {
2234 val = bswap32(val);
2235 }
2236#endif
8df1cd07
FB
2237 } else {
2238 /* RAM case */
5c8a00ce 2239 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2240 & TARGET_PAGE_MASK)
149f54b5 2241 + addr1);
1e78bcc1
AG
2242 switch (endian) {
2243 case DEVICE_LITTLE_ENDIAN:
2244 val = ldl_le_p(ptr);
2245 break;
2246 case DEVICE_BIG_ENDIAN:
2247 val = ldl_be_p(ptr);
2248 break;
2249 default:
2250 val = ldl_p(ptr);
2251 break;
2252 }
8df1cd07
FB
2253 }
2254 return val;
2255}
2256
a8170e5e 2257uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2258{
2259 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2260}
2261
a8170e5e 2262uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2263{
2264 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2265}
2266
a8170e5e 2267uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2268{
2269 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2270}
2271
84b7b8e7 2272/* warning: addr must be aligned */
a8170e5e 2273static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2274 enum device_endian endian)
84b7b8e7 2275{
84b7b8e7
FB
2276 uint8_t *ptr;
2277 uint64_t val;
5c8a00ce 2278 MemoryRegion *mr;
149f54b5
PB
2279 hwaddr l = 8;
2280 hwaddr addr1;
84b7b8e7 2281
5c8a00ce
PB
2282 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2283 false);
2284 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2285 /* I/O case */
5c8a00ce 2286 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2287#if defined(TARGET_WORDS_BIGENDIAN)
2288 if (endian == DEVICE_LITTLE_ENDIAN) {
2289 val = bswap64(val);
2290 }
2291#else
2292 if (endian == DEVICE_BIG_ENDIAN) {
2293 val = bswap64(val);
2294 }
84b7b8e7
FB
2295#endif
2296 } else {
2297 /* RAM case */
5c8a00ce 2298 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2299 & TARGET_PAGE_MASK)
149f54b5 2300 + addr1);
1e78bcc1
AG
2301 switch (endian) {
2302 case DEVICE_LITTLE_ENDIAN:
2303 val = ldq_le_p(ptr);
2304 break;
2305 case DEVICE_BIG_ENDIAN:
2306 val = ldq_be_p(ptr);
2307 break;
2308 default:
2309 val = ldq_p(ptr);
2310 break;
2311 }
84b7b8e7
FB
2312 }
2313 return val;
2314}
2315
a8170e5e 2316uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2317{
2318 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2319}
2320
a8170e5e 2321uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2322{
2323 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2324}
2325
a8170e5e 2326uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2327{
2328 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2329}
2330
aab33094 2331/* XXX: optimize */
a8170e5e 2332uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2333{
2334 uint8_t val;
2335 cpu_physical_memory_read(addr, &val, 1);
2336 return val;
2337}
2338
733f0b02 2339/* warning: addr must be aligned */
a8170e5e 2340static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2341 enum device_endian endian)
aab33094 2342{
733f0b02
MT
2343 uint8_t *ptr;
2344 uint64_t val;
5c8a00ce 2345 MemoryRegion *mr;
149f54b5
PB
2346 hwaddr l = 2;
2347 hwaddr addr1;
733f0b02 2348
5c8a00ce
PB
2349 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2350 false);
2351 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2352 /* I/O case */
5c8a00ce 2353 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2354#if defined(TARGET_WORDS_BIGENDIAN)
2355 if (endian == DEVICE_LITTLE_ENDIAN) {
2356 val = bswap16(val);
2357 }
2358#else
2359 if (endian == DEVICE_BIG_ENDIAN) {
2360 val = bswap16(val);
2361 }
2362#endif
733f0b02
MT
2363 } else {
2364 /* RAM case */
5c8a00ce 2365 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2366 & TARGET_PAGE_MASK)
149f54b5 2367 + addr1);
1e78bcc1
AG
2368 switch (endian) {
2369 case DEVICE_LITTLE_ENDIAN:
2370 val = lduw_le_p(ptr);
2371 break;
2372 case DEVICE_BIG_ENDIAN:
2373 val = lduw_be_p(ptr);
2374 break;
2375 default:
2376 val = lduw_p(ptr);
2377 break;
2378 }
733f0b02
MT
2379 }
2380 return val;
aab33094
FB
2381}
2382
a8170e5e 2383uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2384{
2385 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2386}
2387
a8170e5e 2388uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2389{
2390 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2391}
2392
a8170e5e 2393uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2394{
2395 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2396}
2397
8df1cd07
FB
2398/* warning: addr must be aligned. The ram page is not masked as dirty
2399 and the code inside is not invalidated. It is useful if the dirty
2400 bits are used to track modified PTEs */
a8170e5e 2401void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2402{
8df1cd07 2403 uint8_t *ptr;
5c8a00ce 2404 MemoryRegion *mr;
149f54b5
PB
2405 hwaddr l = 4;
2406 hwaddr addr1;
8df1cd07 2407
5c8a00ce
PB
2408 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2409 true);
2410 if (l < 4 || !memory_access_is_direct(mr, true)) {
2411 io_mem_write(mr, addr1, val, 4);
8df1cd07 2412 } else {
5c8a00ce 2413 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2414 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2415 stl_p(ptr, val);
74576198
AL
2416
2417 if (unlikely(in_migration)) {
2418 if (!cpu_physical_memory_is_dirty(addr1)) {
2419 /* invalidate code */
2420 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2421 /* set dirty bit */
f7c11b53
YT
2422 cpu_physical_memory_set_dirty_flags(
2423 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2424 }
2425 }
8df1cd07
FB
2426 }
2427}
2428
2429/* warning: addr must be aligned */
a8170e5e 2430static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2431 enum device_endian endian)
8df1cd07 2432{
8df1cd07 2433 uint8_t *ptr;
5c8a00ce 2434 MemoryRegion *mr;
149f54b5
PB
2435 hwaddr l = 4;
2436 hwaddr addr1;
8df1cd07 2437
5c8a00ce
PB
2438 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2439 true);
2440 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2441#if defined(TARGET_WORDS_BIGENDIAN)
2442 if (endian == DEVICE_LITTLE_ENDIAN) {
2443 val = bswap32(val);
2444 }
2445#else
2446 if (endian == DEVICE_BIG_ENDIAN) {
2447 val = bswap32(val);
2448 }
2449#endif
5c8a00ce 2450 io_mem_write(mr, addr1, val, 4);
8df1cd07 2451 } else {
8df1cd07 2452 /* RAM case */
5c8a00ce 2453 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2454 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2455 switch (endian) {
2456 case DEVICE_LITTLE_ENDIAN:
2457 stl_le_p(ptr, val);
2458 break;
2459 case DEVICE_BIG_ENDIAN:
2460 stl_be_p(ptr, val);
2461 break;
2462 default:
2463 stl_p(ptr, val);
2464 break;
2465 }
51d7a9eb 2466 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2467 }
2468}
2469
a8170e5e 2470void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2471{
2472 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2473}
2474
a8170e5e 2475void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2476{
2477 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2478}
2479
a8170e5e 2480void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2481{
2482 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2483}
2484
aab33094 2485/* XXX: optimize */
a8170e5e 2486void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2487{
2488 uint8_t v = val;
2489 cpu_physical_memory_write(addr, &v, 1);
2490}
2491
733f0b02 2492/* warning: addr must be aligned */
a8170e5e 2493static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2494 enum device_endian endian)
aab33094 2495{
733f0b02 2496 uint8_t *ptr;
5c8a00ce 2497 MemoryRegion *mr;
149f54b5
PB
2498 hwaddr l = 2;
2499 hwaddr addr1;
733f0b02 2500
5c8a00ce
PB
2501 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2502 true);
2503 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2504#if defined(TARGET_WORDS_BIGENDIAN)
2505 if (endian == DEVICE_LITTLE_ENDIAN) {
2506 val = bswap16(val);
2507 }
2508#else
2509 if (endian == DEVICE_BIG_ENDIAN) {
2510 val = bswap16(val);
2511 }
2512#endif
5c8a00ce 2513 io_mem_write(mr, addr1, val, 2);
733f0b02 2514 } else {
733f0b02 2515 /* RAM case */
5c8a00ce 2516 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2517 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2518 switch (endian) {
2519 case DEVICE_LITTLE_ENDIAN:
2520 stw_le_p(ptr, val);
2521 break;
2522 case DEVICE_BIG_ENDIAN:
2523 stw_be_p(ptr, val);
2524 break;
2525 default:
2526 stw_p(ptr, val);
2527 break;
2528 }
51d7a9eb 2529 invalidate_and_set_dirty(addr1, 2);
733f0b02 2530 }
aab33094
FB
2531}
2532
a8170e5e 2533void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2534{
2535 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2536}
2537
a8170e5e 2538void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2539{
2540 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2541}
2542
a8170e5e 2543void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2544{
2545 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2546}
2547
aab33094 2548/* XXX: optimize */
a8170e5e 2549void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2550{
2551 val = tswap64(val);
71d2b725 2552 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2553}
2554
a8170e5e 2555void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2556{
2557 val = cpu_to_le64(val);
2558 cpu_physical_memory_write(addr, &val, 8);
2559}
2560
a8170e5e 2561void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2562{
2563 val = cpu_to_be64(val);
2564 cpu_physical_memory_write(addr, &val, 8);
2565}
2566
5e2972fd 2567/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2568int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2569 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2570{
2571 int l;
a8170e5e 2572 hwaddr phys_addr;
9b3c35e0 2573 target_ulong page;
13eb76e0
FB
2574
2575 while (len > 0) {
2576 page = addr & TARGET_PAGE_MASK;
2577 phys_addr = cpu_get_phys_page_debug(env, page);
2578 /* if no physical page mapped, return an error */
2579 if (phys_addr == -1)
2580 return -1;
2581 l = (page + TARGET_PAGE_SIZE) - addr;
2582 if (l > len)
2583 l = len;
5e2972fd 2584 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2585 if (is_write)
2586 cpu_physical_memory_write_rom(phys_addr, buf, l);
2587 else
5e2972fd 2588 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2589 len -= l;
2590 buf += l;
2591 addr += l;
2592 }
2593 return 0;
2594}
a68fe89c 2595#endif
13eb76e0 2596
8e4a424b
BS
2597#if !defined(CONFIG_USER_ONLY)
2598
2599/*
2600 * A helper function for the _utterly broken_ virtio device model to find out if
2601 * it's running on a big endian machine. Don't do this at home kids!
2602 */
2603bool virtio_is_big_endian(void);
2604bool virtio_is_big_endian(void)
2605{
2606#if defined(TARGET_WORDS_BIGENDIAN)
2607 return true;
2608#else
2609 return false;
2610#endif
2611}
2612
2613#endif
2614
76f35538 2615#ifndef CONFIG_USER_ONLY
a8170e5e 2616bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2617{
5c8a00ce 2618 MemoryRegion*mr;
149f54b5 2619 hwaddr l = 1;
76f35538 2620
5c8a00ce
PB
2621 mr = address_space_translate(&address_space_memory,
2622 phys_addr, &phys_addr, &l, false);
76f35538 2623
5c8a00ce
PB
2624 return !(memory_region_is_ram(mr) ||
2625 memory_region_is_romd(mr));
76f35538 2626}
bd2fa51f
MH
2627
2628void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2629{
2630 RAMBlock *block;
2631
2632 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2633 func(block->host, block->offset, block->length, opaque);
2634 }
2635}
ec3f8c99 2636#endif