]> git.proxmox.com Git - qemu.git/blame - exec.c
sun4m: add display width and height to the firmware configuration
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
2ff3de68 34#include "sysemu/sysemu.h"
0d09e41a 35#include "hw/xen/xen.h"
1de7afc9
PB
36#include "qemu/timer.h"
37#include "qemu/config-file.h"
022c62cb 38#include "exec/memory.h"
9c17d615 39#include "sysemu/dma.h"
022c62cb 40#include "exec/address-spaces.h"
53a5960a
PB
41#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
432d268c 43#else /* !CONFIG_USER_ONLY */
9c17d615 44#include "sysemu/xen-mapcache.h"
6506e4f9 45#include "trace.h"
53a5960a 46#endif
0d6d3c87 47#include "exec/cpu-all.h"
54936004 48
022c62cb 49#include "exec/cputlb.h"
5b6dd868 50#include "translate-all.h"
0cac1b66 51
022c62cb 52#include "exec/memory-internal.h"
67d95c15 53
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
e2eef170 56#if !defined(CONFIG_USER_ONLY)
74576198 57static int in_migration;
94a6b54f 58
a3161038 59RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
60
61static MemoryRegion *system_memory;
309cb471 62static MemoryRegion *system_io;
62152b8a 63
f6790af6
AK
64AddressSpace address_space_io;
65AddressSpace address_space_memory;
2673a5da 66
0844e007 67MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 68static MemoryRegion io_mem_unassigned;
0e0df1e2 69
e2eef170 70#endif
9fa3e853 71
182735ef 72CPUState *first_cpu;
6a00d601
FB
73/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
4917cf44 75DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 76/* 0 = Do not count executed instructions.
bf20dc07 77 1 = Precise instruction counting.
2e70f6ef 78 2 = Adaptive rate instruction counting. */
5708fc66 79int use_icount;
6a00d601 80
e2eef170 81#if !defined(CONFIG_USER_ONLY)
4346ae3e 82
1db8abb1
PB
83typedef struct PhysPageEntry PhysPageEntry;
84
85struct PhysPageEntry {
86 uint16_t is_leaf : 1;
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
88 uint16_t ptr : 15;
89};
90
0475d94f
PB
91typedef PhysPageEntry Node[L2_SIZE];
92
1db8abb1
PB
93struct AddressSpaceDispatch {
94 /* This is a multi-level map on the physical address space.
95 * The bottom level has pointers to MemoryRegionSections.
96 */
97 PhysPageEntry phys_map;
0475d94f
PB
98 Node *nodes;
99 MemoryRegionSection *sections;
acc9d80b 100 AddressSpace *as;
1db8abb1
PB
101};
102
90260c6c
JK
103#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
104typedef struct subpage_t {
105 MemoryRegion iomem;
acc9d80b 106 AddressSpace *as;
90260c6c
JK
107 hwaddr base;
108 uint16_t sub_section[TARGET_PAGE_SIZE];
109} subpage_t;
110
b41aac4f
LPF
111#define PHYS_SECTION_UNASSIGNED 0
112#define PHYS_SECTION_NOTDIRTY 1
113#define PHYS_SECTION_ROM 2
114#define PHYS_SECTION_WATCH 3
5312bd8b 115
9affd6fc
PB
116typedef struct PhysPageMap {
117 unsigned sections_nb;
118 unsigned sections_nb_alloc;
119 unsigned nodes_nb;
120 unsigned nodes_nb_alloc;
121 Node *nodes;
122 MemoryRegionSection *sections;
123} PhysPageMap;
124
6092666e 125static PhysPageMap *prev_map;
9affd6fc 126static PhysPageMap next_map;
d6f2ea22 127
07f07b31 128#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 129
e2eef170 130static void io_mem_init(void);
62152b8a 131static void memory_map_init(void);
8b9c99d9 132static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 133
1ec9b909 134static MemoryRegion io_mem_watch;
6658ffb8 135#endif
fd6ce8f6 136
6d9a1304 137#if !defined(CONFIG_USER_ONLY)
d6f2ea22 138
f7bf5461 139static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 140{
9affd6fc
PB
141 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
142 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
143 16);
144 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
145 next_map.nodes_nb + nodes);
146 next_map.nodes = g_renew(Node, next_map.nodes,
147 next_map.nodes_nb_alloc);
d6f2ea22 148 }
f7bf5461
AK
149}
150
151static uint16_t phys_map_node_alloc(void)
152{
153 unsigned i;
154 uint16_t ret;
155
9affd6fc 156 ret = next_map.nodes_nb++;
f7bf5461 157 assert(ret != PHYS_MAP_NODE_NIL);
9affd6fc 158 assert(ret != next_map.nodes_nb_alloc);
d6f2ea22 159 for (i = 0; i < L2_SIZE; ++i) {
9affd6fc
PB
160 next_map.nodes[ret][i].is_leaf = 0;
161 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 162 }
f7bf5461 163 return ret;
d6f2ea22
AK
164}
165
a8170e5e
AK
166static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
167 hwaddr *nb, uint16_t leaf,
2999097b 168 int level)
f7bf5461
AK
169{
170 PhysPageEntry *p;
171 int i;
a8170e5e 172 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 173
07f07b31 174 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800 175 lp->ptr = phys_map_node_alloc();
9affd6fc 176 p = next_map.nodes[lp->ptr];
f7bf5461
AK
177 if (level == 0) {
178 for (i = 0; i < L2_SIZE; i++) {
07f07b31 179 p[i].is_leaf = 1;
b41aac4f 180 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 181 }
67c4d23c 182 }
f7bf5461 183 } else {
9affd6fc 184 p = next_map.nodes[lp->ptr];
92e873b9 185 }
2999097b 186 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 187
2999097b 188 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
189 if ((*index & (step - 1)) == 0 && *nb >= step) {
190 lp->is_leaf = true;
c19e8800 191 lp->ptr = leaf;
07f07b31
AK
192 *index += step;
193 *nb -= step;
2999097b
AK
194 } else {
195 phys_page_set_level(lp, index, nb, leaf, level - 1);
196 }
197 ++lp;
f7bf5461
AK
198 }
199}
200
ac1970fb 201static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 202 hwaddr index, hwaddr nb,
2999097b 203 uint16_t leaf)
f7bf5461 204{
2999097b 205 /* Wildly overreserve - it doesn't matter much. */
07f07b31 206 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 207
ac1970fb 208 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
209}
210
9affd6fc
PB
211static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
212 Node *nodes, MemoryRegionSection *sections)
92e873b9 213{
31ab2b4a
AK
214 PhysPageEntry *p;
215 int i;
f1f6e3b8 216
07f07b31 217 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 218 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 219 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 220 }
9affd6fc 221 p = nodes[lp.ptr];
31ab2b4a 222 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 223 }
9affd6fc 224 return &sections[lp.ptr];
f3705d53
AK
225}
226
e5548617
BS
227bool memory_region_is_unassigned(MemoryRegion *mr)
228{
2a8e7499 229 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 230 && mr != &io_mem_watch;
fd6ce8f6 231}
149f54b5 232
c7086b4a 233static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
234 hwaddr addr,
235 bool resolve_subpage)
9f029603 236{
90260c6c
JK
237 MemoryRegionSection *section;
238 subpage_t *subpage;
239
0475d94f
PB
240 section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
241 d->nodes, d->sections);
90260c6c
JK
242 if (resolve_subpage && section->mr->subpage) {
243 subpage = container_of(section->mr, subpage_t, iomem);
0475d94f 244 section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
245 }
246 return section;
9f029603
JK
247}
248
90260c6c 249static MemoryRegionSection *
c7086b4a 250address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 251 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
252{
253 MemoryRegionSection *section;
254 Int128 diff;
255
c7086b4a 256 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
257 /* Compute offset within MemoryRegionSection */
258 addr -= section->offset_within_address_space;
259
260 /* Compute offset within MemoryRegion */
261 *xlat = addr + section->offset_within_region;
262
263 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 264 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
265 return section;
266}
90260c6c 267
5c8a00ce
PB
268MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
269 hwaddr *xlat, hwaddr *plen,
270 bool is_write)
90260c6c 271{
30951157
AK
272 IOMMUTLBEntry iotlb;
273 MemoryRegionSection *section;
274 MemoryRegion *mr;
275 hwaddr len = *plen;
276
277 for (;;) {
c7086b4a 278 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
279 mr = section->mr;
280
281 if (!mr->iommu_ops) {
282 break;
283 }
284
285 iotlb = mr->iommu_ops->translate(mr, addr);
286 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
287 | (addr & iotlb.addr_mask));
288 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
289 if (!(iotlb.perm & (1 << is_write))) {
290 mr = &io_mem_unassigned;
291 break;
292 }
293
294 as = iotlb.target_as;
295 }
296
297 *plen = len;
298 *xlat = addr;
299 return mr;
90260c6c
JK
300}
301
302MemoryRegionSection *
303address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
304 hwaddr *plen)
305{
30951157 306 MemoryRegionSection *section;
c7086b4a 307 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
308
309 assert(!section->mr->iommu_ops);
310 return section;
90260c6c 311}
5b6dd868 312#endif
fd6ce8f6 313
5b6dd868 314void cpu_exec_init_all(void)
fdbb84d1 315{
5b6dd868 316#if !defined(CONFIG_USER_ONLY)
b2a8658e 317 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
318 memory_map_init();
319 io_mem_init();
fdbb84d1 320#endif
5b6dd868 321}
fdbb84d1 322
b170fce3 323#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
324
325static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 326{
259186a7 327 CPUState *cpu = opaque;
a513fe19 328
5b6dd868
BS
329 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
330 version_id is increased. */
259186a7
AF
331 cpu->interrupt_request &= ~0x01;
332 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
333
334 return 0;
a513fe19 335}
7501267e 336
1a1562f5 337const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
338 .name = "cpu_common",
339 .version_id = 1,
340 .minimum_version_id = 1,
341 .minimum_version_id_old = 1,
342 .post_load = cpu_common_post_load,
343 .fields = (VMStateField []) {
259186a7
AF
344 VMSTATE_UINT32(halted, CPUState),
345 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
346 VMSTATE_END_OF_LIST()
347 }
348};
1a1562f5 349
5b6dd868 350#endif
ea041c0e 351
38d8f5c8 352CPUState *qemu_get_cpu(int index)
ea041c0e 353{
182735ef 354 CPUState *cpu = first_cpu;
ea041c0e 355
182735ef 356 while (cpu) {
55e5c285 357 if (cpu->cpu_index == index) {
5b6dd868 358 break;
55e5c285 359 }
182735ef 360 cpu = cpu->next_cpu;
ea041c0e 361 }
5b6dd868 362
182735ef 363 return cpu;
ea041c0e
FB
364}
365
d6b9e0d6
MT
366void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
367{
182735ef 368 CPUState *cpu;
d6b9e0d6 369
182735ef
AF
370 cpu = first_cpu;
371 while (cpu) {
372 func(cpu, data);
373 cpu = cpu->next_cpu;
d6b9e0d6
MT
374 }
375}
376
5b6dd868 377void cpu_exec_init(CPUArchState *env)
ea041c0e 378{
5b6dd868 379 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 380 CPUClass *cc = CPU_GET_CLASS(cpu);
182735ef 381 CPUState **pcpu;
5b6dd868
BS
382 int cpu_index;
383
384#if defined(CONFIG_USER_ONLY)
385 cpu_list_lock();
386#endif
182735ef
AF
387 cpu->next_cpu = NULL;
388 pcpu = &first_cpu;
5b6dd868 389 cpu_index = 0;
182735ef
AF
390 while (*pcpu != NULL) {
391 pcpu = &(*pcpu)->next_cpu;
5b6dd868
BS
392 cpu_index++;
393 }
55e5c285 394 cpu->cpu_index = cpu_index;
1b1ed8dc 395 cpu->numa_node = 0;
5b6dd868
BS
396 QTAILQ_INIT(&env->breakpoints);
397 QTAILQ_INIT(&env->watchpoints);
398#ifndef CONFIG_USER_ONLY
399 cpu->thread_id = qemu_get_thread_id();
400#endif
182735ef 401 *pcpu = cpu;
5b6dd868
BS
402#if defined(CONFIG_USER_ONLY)
403 cpu_list_unlock();
404#endif
259186a7 405 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
5b6dd868 406#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
407 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
408 cpu_save, cpu_load, env);
b170fce3 409 assert(cc->vmsd == NULL);
5b6dd868 410#endif
b170fce3
AF
411 if (cc->vmsd != NULL) {
412 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
413 }
ea041c0e
FB
414}
415
1fddef4b 416#if defined(TARGET_HAS_ICE)
94df27fd 417#if defined(CONFIG_USER_ONLY)
00b941e5 418static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
419{
420 tb_invalidate_phys_page_range(pc, pc + 1, 0);
421}
422#else
00b941e5 423static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 424{
00b941e5 425 tb_invalidate_phys_addr(cpu_get_phys_page_debug(cpu, pc) |
9d70c4b7 426 (pc & ~TARGET_PAGE_MASK));
1e7855a5 427}
c27004ec 428#endif
94df27fd 429#endif /* TARGET_HAS_ICE */
d720b93d 430
c527ee8f 431#if defined(CONFIG_USER_ONLY)
9349b4f9 432void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
433
434{
435}
436
9349b4f9 437int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
438 int flags, CPUWatchpoint **watchpoint)
439{
440 return -ENOSYS;
441}
442#else
6658ffb8 443/* Add a watchpoint. */
9349b4f9 444int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 445 int flags, CPUWatchpoint **watchpoint)
6658ffb8 446{
b4051334 447 target_ulong len_mask = ~(len - 1);
c0ce998e 448 CPUWatchpoint *wp;
6658ffb8 449
b4051334 450 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
451 if ((len & (len - 1)) || (addr & ~len_mask) ||
452 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
453 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
454 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
455 return -EINVAL;
456 }
7267c094 457 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
458
459 wp->vaddr = addr;
b4051334 460 wp->len_mask = len_mask;
a1d1bb31
AL
461 wp->flags = flags;
462
2dc9f411 463 /* keep all GDB-injected watchpoints in front */
c0ce998e 464 if (flags & BP_GDB)
72cf2d4f 465 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 466 else
72cf2d4f 467 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 468
6658ffb8 469 tlb_flush_page(env, addr);
a1d1bb31
AL
470
471 if (watchpoint)
472 *watchpoint = wp;
473 return 0;
6658ffb8
PB
474}
475
a1d1bb31 476/* Remove a specific watchpoint. */
9349b4f9 477int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 478 int flags)
6658ffb8 479{
b4051334 480 target_ulong len_mask = ~(len - 1);
a1d1bb31 481 CPUWatchpoint *wp;
6658ffb8 482
72cf2d4f 483 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 484 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 485 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 486 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
487 return 0;
488 }
489 }
a1d1bb31 490 return -ENOENT;
6658ffb8
PB
491}
492
a1d1bb31 493/* Remove a specific watchpoint by reference. */
9349b4f9 494void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 495{
72cf2d4f 496 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 497
a1d1bb31
AL
498 tlb_flush_page(env, watchpoint->vaddr);
499
7267c094 500 g_free(watchpoint);
a1d1bb31
AL
501}
502
503/* Remove all matching watchpoints. */
9349b4f9 504void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 505{
c0ce998e 506 CPUWatchpoint *wp, *next;
a1d1bb31 507
72cf2d4f 508 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
509 if (wp->flags & mask)
510 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 511 }
7d03f82f 512}
c527ee8f 513#endif
7d03f82f 514
a1d1bb31 515/* Add a breakpoint. */
9349b4f9 516int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 517 CPUBreakpoint **breakpoint)
4c3a88a2 518{
1fddef4b 519#if defined(TARGET_HAS_ICE)
c0ce998e 520 CPUBreakpoint *bp;
3b46e624 521
7267c094 522 bp = g_malloc(sizeof(*bp));
4c3a88a2 523
a1d1bb31
AL
524 bp->pc = pc;
525 bp->flags = flags;
526
2dc9f411 527 /* keep all GDB-injected breakpoints in front */
00b941e5 528 if (flags & BP_GDB) {
72cf2d4f 529 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
00b941e5 530 } else {
72cf2d4f 531 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
00b941e5 532 }
3b46e624 533
00b941e5 534 breakpoint_invalidate(ENV_GET_CPU(env), pc);
a1d1bb31 535
00b941e5 536 if (breakpoint) {
a1d1bb31 537 *breakpoint = bp;
00b941e5 538 }
4c3a88a2
FB
539 return 0;
540#else
a1d1bb31 541 return -ENOSYS;
4c3a88a2
FB
542#endif
543}
544
a1d1bb31 545/* Remove a specific breakpoint. */
9349b4f9 546int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 547{
7d03f82f 548#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
549 CPUBreakpoint *bp;
550
72cf2d4f 551 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
552 if (bp->pc == pc && bp->flags == flags) {
553 cpu_breakpoint_remove_by_ref(env, bp);
554 return 0;
555 }
7d03f82f 556 }
a1d1bb31
AL
557 return -ENOENT;
558#else
559 return -ENOSYS;
7d03f82f
EI
560#endif
561}
562
a1d1bb31 563/* Remove a specific breakpoint by reference. */
9349b4f9 564void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 565{
1fddef4b 566#if defined(TARGET_HAS_ICE)
72cf2d4f 567 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 568
00b941e5 569 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
a1d1bb31 570
7267c094 571 g_free(breakpoint);
a1d1bb31
AL
572#endif
573}
574
575/* Remove all matching breakpoints. */
9349b4f9 576void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
577{
578#if defined(TARGET_HAS_ICE)
c0ce998e 579 CPUBreakpoint *bp, *next;
a1d1bb31 580
72cf2d4f 581 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
582 if (bp->flags & mask)
583 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 584 }
4c3a88a2
FB
585#endif
586}
587
c33a346e
FB
588/* enable or disable single step mode. EXCP_DEBUG is returned by the
589 CPU loop after each instruction */
3825b28f 590void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 591{
1fddef4b 592#if defined(TARGET_HAS_ICE)
3825b28f 593 CPUArchState *env = cpu->env_ptr;
ed2803da
AF
594
595 if (cpu->singlestep_enabled != enabled) {
596 cpu->singlestep_enabled = enabled;
597 if (kvm_enabled()) {
e22a25c9 598 kvm_update_guest_debug(env, 0);
ed2803da 599 } else {
ccbb4d44 600 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
601 /* XXX: only flush what is necessary */
602 tb_flush(env);
603 }
c33a346e
FB
604 }
605#endif
606}
607
9349b4f9 608void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e 609{
878096ee 610 CPUState *cpu = ENV_GET_CPU(env);
7501267e 611 va_list ap;
493ae1f0 612 va_list ap2;
7501267e
FB
613
614 va_start(ap, fmt);
493ae1f0 615 va_copy(ap2, ap);
7501267e
FB
616 fprintf(stderr, "qemu: fatal: ");
617 vfprintf(stderr, fmt, ap);
618 fprintf(stderr, "\n");
878096ee 619 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
620 if (qemu_log_enabled()) {
621 qemu_log("qemu: fatal: ");
622 qemu_log_vprintf(fmt, ap2);
623 qemu_log("\n");
a0762859 624 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 625 qemu_log_flush();
93fcfe39 626 qemu_log_close();
924edcae 627 }
493ae1f0 628 va_end(ap2);
f9373291 629 va_end(ap);
fd052bf6
RV
630#if defined(CONFIG_USER_ONLY)
631 {
632 struct sigaction act;
633 sigfillset(&act.sa_mask);
634 act.sa_handler = SIG_DFL;
635 sigaction(SIGABRT, &act, NULL);
636 }
637#endif
7501267e
FB
638 abort();
639}
640
9349b4f9 641CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 642{
9349b4f9 643 CPUArchState *new_env = cpu_init(env->cpu_model_str);
5a38f081
AL
644#if defined(TARGET_HAS_ICE)
645 CPUBreakpoint *bp;
646 CPUWatchpoint *wp;
647#endif
648
b24c882b
AG
649 /* Reset non arch specific state */
650 cpu_reset(ENV_GET_CPU(new_env));
651
652 /* Copy arch specific state into the new CPU */
9349b4f9 653 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 654
5a38f081
AL
655 /* Clone all break/watchpoints.
656 Note: Once we support ptrace with hw-debug register access, make sure
657 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
658 QTAILQ_INIT(&env->breakpoints);
659 QTAILQ_INIT(&env->watchpoints);
5a38f081 660#if defined(TARGET_HAS_ICE)
72cf2d4f 661 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
662 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
663 }
72cf2d4f 664 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
665 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
666 wp->flags, NULL);
667 }
668#endif
669
c5be9f08
TS
670 return new_env;
671}
672
0124311e 673#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
674static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
675 uintptr_t length)
676{
677 uintptr_t start1;
678
679 /* we modify the TLB cache so that the dirty bit will be set again
680 when accessing the range */
681 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
682 /* Check that we don't span multiple blocks - this breaks the
683 address comparisons below. */
684 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
685 != (end - 1) - start) {
686 abort();
687 }
688 cpu_tlb_reset_dirty_all(start1, length);
689
690}
691
5579c7f3 692/* Note: start and end must be within the same ram block. */
c227f099 693void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 694 int dirty_flags)
1ccde1cb 695{
d24981d3 696 uintptr_t length;
1ccde1cb
FB
697
698 start &= TARGET_PAGE_MASK;
699 end = TARGET_PAGE_ALIGN(end);
700
701 length = end - start;
702 if (length == 0)
703 return;
f7c11b53 704 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 705
d24981d3
JQ
706 if (tcg_enabled()) {
707 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 708 }
1ccde1cb
FB
709}
710
8b9c99d9 711static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 712{
f6f3fbca 713 int ret = 0;
74576198 714 in_migration = enable;
f6f3fbca 715 return ret;
74576198
AL
716}
717
a8170e5e 718hwaddr memory_region_section_get_iotlb(CPUArchState *env,
149f54b5
PB
719 MemoryRegionSection *section,
720 target_ulong vaddr,
721 hwaddr paddr, hwaddr xlat,
722 int prot,
723 target_ulong *address)
e5548617 724{
a8170e5e 725 hwaddr iotlb;
e5548617
BS
726 CPUWatchpoint *wp;
727
cc5bea60 728 if (memory_region_is_ram(section->mr)) {
e5548617
BS
729 /* Normal RAM. */
730 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 731 + xlat;
e5548617 732 if (!section->readonly) {
b41aac4f 733 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 734 } else {
b41aac4f 735 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
736 }
737 } else {
0475d94f 738 iotlb = section - address_space_memory.dispatch->sections;
149f54b5 739 iotlb += xlat;
e5548617
BS
740 }
741
742 /* Make accesses to pages with watchpoints go via the
743 watchpoint trap routines. */
744 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
745 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
746 /* Avoid trapping reads of pages with a write breakpoint. */
747 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 748 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
749 *address |= TLB_MMIO;
750 break;
751 }
752 }
753 }
754
755 return iotlb;
756}
9fa3e853
FB
757#endif /* defined(CONFIG_USER_ONLY) */
758
e2eef170 759#if !defined(CONFIG_USER_ONLY)
8da3ff18 760
c227f099 761static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 762 uint16_t section);
acc9d80b 763static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 764
5312bd8b
AK
765static uint16_t phys_section_add(MemoryRegionSection *section)
766{
68f3f65b
PB
767 /* The physical section number is ORed with a page-aligned
768 * pointer to produce the iotlb entries. Thus it should
769 * never overflow into the page-aligned value.
770 */
9affd6fc 771 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
68f3f65b 772
9affd6fc
PB
773 if (next_map.sections_nb == next_map.sections_nb_alloc) {
774 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
775 16);
776 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
777 next_map.sections_nb_alloc);
5312bd8b 778 }
9affd6fc 779 next_map.sections[next_map.sections_nb] = *section;
dfde4e6e 780 memory_region_ref(section->mr);
9affd6fc 781 return next_map.sections_nb++;
5312bd8b
AK
782}
783
058bc4b5
PB
784static void phys_section_destroy(MemoryRegion *mr)
785{
dfde4e6e
PB
786 memory_region_unref(mr);
787
058bc4b5
PB
788 if (mr->subpage) {
789 subpage_t *subpage = container_of(mr, subpage_t, iomem);
790 memory_region_destroy(&subpage->iomem);
791 g_free(subpage);
792 }
793}
794
6092666e 795static void phys_sections_free(PhysPageMap *map)
5312bd8b 796{
9affd6fc
PB
797 while (map->sections_nb > 0) {
798 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
799 phys_section_destroy(section->mr);
800 }
9affd6fc
PB
801 g_free(map->sections);
802 g_free(map->nodes);
6092666e 803 g_free(map);
5312bd8b
AK
804}
805
ac1970fb 806static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
807{
808 subpage_t *subpage;
a8170e5e 809 hwaddr base = section->offset_within_address_space
0f0cb164 810 & TARGET_PAGE_MASK;
9affd6fc
PB
811 MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
812 next_map.nodes, next_map.sections);
0f0cb164
AK
813 MemoryRegionSection subsection = {
814 .offset_within_address_space = base,
052e87b0 815 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 816 };
a8170e5e 817 hwaddr start, end;
0f0cb164 818
f3705d53 819 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 820
f3705d53 821 if (!(existing->mr->subpage)) {
acc9d80b 822 subpage = subpage_init(d->as, base);
0f0cb164 823 subsection.mr = &subpage->iomem;
ac1970fb 824 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 825 phys_section_add(&subsection));
0f0cb164 826 } else {
f3705d53 827 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
828 }
829 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 830 end = start + int128_get64(section->size) - 1;
0f0cb164
AK
831 subpage_register(subpage, start, end, phys_section_add(section));
832}
833
834
052e87b0
PB
835static void register_multipage(AddressSpaceDispatch *d,
836 MemoryRegionSection *section)
33417e70 837{
a8170e5e 838 hwaddr start_addr = section->offset_within_address_space;
5312bd8b 839 uint16_t section_index = phys_section_add(section);
052e87b0
PB
840 uint64_t num_pages = int128_get64(int128_rshift(section->size,
841 TARGET_PAGE_BITS));
dd81124b 842
733d5ef5
PB
843 assert(num_pages);
844 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
845}
846
ac1970fb 847static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 848{
89ae337a 849 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 850 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 851 MemoryRegionSection now = *section, remain = *section;
052e87b0 852 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 853
733d5ef5
PB
854 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
855 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
856 - now.offset_within_address_space;
857
052e87b0 858 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 859 register_subpage(d, &now);
733d5ef5 860 } else {
052e87b0 861 now.size = int128_zero();
733d5ef5 862 }
052e87b0
PB
863 while (int128_ne(remain.size, now.size)) {
864 remain.size = int128_sub(remain.size, now.size);
865 remain.offset_within_address_space += int128_get64(now.size);
866 remain.offset_within_region += int128_get64(now.size);
69b67646 867 now = remain;
052e87b0 868 if (int128_lt(remain.size, page_size)) {
733d5ef5
PB
869 register_subpage(d, &now);
870 } else if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
052e87b0 871 now.size = page_size;
ac1970fb 872 register_subpage(d, &now);
69b67646 873 } else {
052e87b0 874 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 875 register_multipage(d, &now);
69b67646 876 }
0f0cb164
AK
877 }
878}
879
62a2744c
SY
880void qemu_flush_coalesced_mmio_buffer(void)
881{
882 if (kvm_enabled())
883 kvm_flush_coalesced_mmio_buffer();
884}
885
b2a8658e
UD
886void qemu_mutex_lock_ramlist(void)
887{
888 qemu_mutex_lock(&ram_list.mutex);
889}
890
891void qemu_mutex_unlock_ramlist(void)
892{
893 qemu_mutex_unlock(&ram_list.mutex);
894}
895
c902760f
MT
896#if defined(__linux__) && !defined(TARGET_S390X)
897
898#include <sys/vfs.h>
899
900#define HUGETLBFS_MAGIC 0x958458f6
901
902static long gethugepagesize(const char *path)
903{
904 struct statfs fs;
905 int ret;
906
907 do {
9742bf26 908 ret = statfs(path, &fs);
c902760f
MT
909 } while (ret != 0 && errno == EINTR);
910
911 if (ret != 0) {
9742bf26
YT
912 perror(path);
913 return 0;
c902760f
MT
914 }
915
916 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 917 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
918
919 return fs.f_bsize;
920}
921
04b16653
AW
922static void *file_ram_alloc(RAMBlock *block,
923 ram_addr_t memory,
924 const char *path)
c902760f
MT
925{
926 char *filename;
8ca761f6
PF
927 char *sanitized_name;
928 char *c;
c902760f
MT
929 void *area;
930 int fd;
931#ifdef MAP_POPULATE
932 int flags;
933#endif
934 unsigned long hpagesize;
935
936 hpagesize = gethugepagesize(path);
937 if (!hpagesize) {
9742bf26 938 return NULL;
c902760f
MT
939 }
940
941 if (memory < hpagesize) {
942 return NULL;
943 }
944
945 if (kvm_enabled() && !kvm_has_sync_mmu()) {
946 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
947 return NULL;
948 }
949
8ca761f6
PF
950 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
951 sanitized_name = g_strdup(block->mr->name);
952 for (c = sanitized_name; *c != '\0'; c++) {
953 if (*c == '/')
954 *c = '_';
955 }
956
957 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
958 sanitized_name);
959 g_free(sanitized_name);
c902760f
MT
960
961 fd = mkstemp(filename);
962 if (fd < 0) {
9742bf26 963 perror("unable to create backing store for hugepages");
e4ada482 964 g_free(filename);
9742bf26 965 return NULL;
c902760f
MT
966 }
967 unlink(filename);
e4ada482 968 g_free(filename);
c902760f
MT
969
970 memory = (memory+hpagesize-1) & ~(hpagesize-1);
971
972 /*
973 * ftruncate is not supported by hugetlbfs in older
974 * hosts, so don't bother bailing out on errors.
975 * If anything goes wrong with it under other filesystems,
976 * mmap will fail.
977 */
978 if (ftruncate(fd, memory))
9742bf26 979 perror("ftruncate");
c902760f
MT
980
981#ifdef MAP_POPULATE
982 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
983 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
984 * to sidestep this quirk.
985 */
986 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
987 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
988#else
989 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
990#endif
991 if (area == MAP_FAILED) {
9742bf26
YT
992 perror("file_ram_alloc: can't mmap RAM pages");
993 close(fd);
994 return (NULL);
c902760f 995 }
04b16653 996 block->fd = fd;
c902760f
MT
997 return area;
998}
999#endif
1000
d17b5288 1001static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1002{
1003 RAMBlock *block, *next_block;
3e837b2c 1004 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1005
49cd9ac6
SH
1006 assert(size != 0); /* it would hand out same offset multiple times */
1007
a3161038 1008 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1009 return 0;
1010
a3161038 1011 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1012 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1013
1014 end = block->offset + block->length;
1015
a3161038 1016 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1017 if (next_block->offset >= end) {
1018 next = MIN(next, next_block->offset);
1019 }
1020 }
1021 if (next - end >= size && next - end < mingap) {
3e837b2c 1022 offset = end;
04b16653
AW
1023 mingap = next - end;
1024 }
1025 }
3e837b2c
AW
1026
1027 if (offset == RAM_ADDR_MAX) {
1028 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1029 (uint64_t)size);
1030 abort();
1031 }
1032
04b16653
AW
1033 return offset;
1034}
1035
652d7ec2 1036ram_addr_t last_ram_offset(void)
d17b5288
AW
1037{
1038 RAMBlock *block;
1039 ram_addr_t last = 0;
1040
a3161038 1041 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1042 last = MAX(last, block->offset + block->length);
1043
1044 return last;
1045}
1046
ddb97f1d
JB
1047static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1048{
1049 int ret;
ddb97f1d
JB
1050
1051 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1052 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1053 "dump-guest-core", true)) {
ddb97f1d
JB
1054 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1055 if (ret) {
1056 perror("qemu_madvise");
1057 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1058 "but dump_guest_core=off specified\n");
1059 }
1060 }
1061}
1062
c5705a77 1063void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
1064{
1065 RAMBlock *new_block, *block;
1066
c5705a77 1067 new_block = NULL;
a3161038 1068 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
1069 if (block->offset == addr) {
1070 new_block = block;
1071 break;
1072 }
1073 }
1074 assert(new_block);
1075 assert(!new_block->idstr[0]);
84b89d78 1076
09e5ab63
AL
1077 if (dev) {
1078 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1079 if (id) {
1080 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1081 g_free(id);
84b89d78
CM
1082 }
1083 }
1084 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1085
b2a8658e
UD
1086 /* This assumes the iothread lock is taken here too. */
1087 qemu_mutex_lock_ramlist();
a3161038 1088 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1089 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1090 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1091 new_block->idstr);
1092 abort();
1093 }
1094 }
b2a8658e 1095 qemu_mutex_unlock_ramlist();
c5705a77
AK
1096}
1097
8490fc78
LC
1098static int memory_try_enable_merging(void *addr, size_t len)
1099{
2ff3de68 1100 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1101 /* disabled by the user */
1102 return 0;
1103 }
1104
1105 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1106}
1107
c5705a77
AK
1108ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1109 MemoryRegion *mr)
1110{
abb26d63 1111 RAMBlock *block, *new_block;
c5705a77
AK
1112
1113 size = TARGET_PAGE_ALIGN(size);
1114 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1115
b2a8658e
UD
1116 /* This assumes the iothread lock is taken here too. */
1117 qemu_mutex_lock_ramlist();
7c637366 1118 new_block->mr = mr;
432d268c 1119 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1120 if (host) {
1121 new_block->host = host;
cd19cfa2 1122 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1123 } else {
1124 if (mem_path) {
c902760f 1125#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1126 new_block->host = file_ram_alloc(new_block, size, mem_path);
1127 if (!new_block->host) {
6eebf958 1128 new_block->host = qemu_anon_ram_alloc(size);
8490fc78 1129 memory_try_enable_merging(new_block->host, size);
6977dfe6 1130 }
c902760f 1131#else
6977dfe6
YT
1132 fprintf(stderr, "-mem-path option unsupported\n");
1133 exit(1);
c902760f 1134#endif
6977dfe6 1135 } else {
868bb33f 1136 if (xen_enabled()) {
fce537d4 1137 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1138 } else if (kvm_enabled()) {
1139 /* some s390/kvm configurations have special constraints */
6eebf958 1140 new_block->host = kvm_ram_alloc(size);
432d268c 1141 } else {
6eebf958 1142 new_block->host = qemu_anon_ram_alloc(size);
432d268c 1143 }
8490fc78 1144 memory_try_enable_merging(new_block->host, size);
6977dfe6 1145 }
c902760f 1146 }
94a6b54f
PB
1147 new_block->length = size;
1148
abb26d63
PB
1149 /* Keep the list sorted from biggest to smallest block. */
1150 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1151 if (block->length < new_block->length) {
1152 break;
1153 }
1154 }
1155 if (block) {
1156 QTAILQ_INSERT_BEFORE(block, new_block, next);
1157 } else {
1158 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1159 }
0d6d3c87 1160 ram_list.mru_block = NULL;
94a6b54f 1161
f798b07f 1162 ram_list.version++;
b2a8658e 1163 qemu_mutex_unlock_ramlist();
f798b07f 1164
7267c094 1165 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1166 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1167 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1168 0, size >> TARGET_PAGE_BITS);
1720aeee 1169 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1170
ddb97f1d 1171 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1172 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1173
6f0437e8
JK
1174 if (kvm_enabled())
1175 kvm_setup_guest_memory(new_block->host, size);
1176
94a6b54f
PB
1177 return new_block->offset;
1178}
e9a1ab19 1179
c5705a77 1180ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1181{
c5705a77 1182 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1183}
1184
1f2e98b6
AW
1185void qemu_ram_free_from_ptr(ram_addr_t addr)
1186{
1187 RAMBlock *block;
1188
b2a8658e
UD
1189 /* This assumes the iothread lock is taken here too. */
1190 qemu_mutex_lock_ramlist();
a3161038 1191 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1192 if (addr == block->offset) {
a3161038 1193 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1194 ram_list.mru_block = NULL;
f798b07f 1195 ram_list.version++;
7267c094 1196 g_free(block);
b2a8658e 1197 break;
1f2e98b6
AW
1198 }
1199 }
b2a8658e 1200 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1201}
1202
c227f099 1203void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1204{
04b16653
AW
1205 RAMBlock *block;
1206
b2a8658e
UD
1207 /* This assumes the iothread lock is taken here too. */
1208 qemu_mutex_lock_ramlist();
a3161038 1209 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1210 if (addr == block->offset) {
a3161038 1211 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1212 ram_list.mru_block = NULL;
f798b07f 1213 ram_list.version++;
cd19cfa2
HY
1214 if (block->flags & RAM_PREALLOC_MASK) {
1215 ;
1216 } else if (mem_path) {
04b16653
AW
1217#if defined (__linux__) && !defined(TARGET_S390X)
1218 if (block->fd) {
1219 munmap(block->host, block->length);
1220 close(block->fd);
1221 } else {
e7a09b92 1222 qemu_anon_ram_free(block->host, block->length);
04b16653 1223 }
fd28aa13
JK
1224#else
1225 abort();
04b16653
AW
1226#endif
1227 } else {
868bb33f 1228 if (xen_enabled()) {
e41d7c69 1229 xen_invalidate_map_cache_entry(block->host);
432d268c 1230 } else {
e7a09b92 1231 qemu_anon_ram_free(block->host, block->length);
432d268c 1232 }
04b16653 1233 }
7267c094 1234 g_free(block);
b2a8658e 1235 break;
04b16653
AW
1236 }
1237 }
b2a8658e 1238 qemu_mutex_unlock_ramlist();
04b16653 1239
e9a1ab19
FB
1240}
1241
cd19cfa2
HY
1242#ifndef _WIN32
1243void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1244{
1245 RAMBlock *block;
1246 ram_addr_t offset;
1247 int flags;
1248 void *area, *vaddr;
1249
a3161038 1250 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1251 offset = addr - block->offset;
1252 if (offset < block->length) {
1253 vaddr = block->host + offset;
1254 if (block->flags & RAM_PREALLOC_MASK) {
1255 ;
1256 } else {
1257 flags = MAP_FIXED;
1258 munmap(vaddr, length);
1259 if (mem_path) {
1260#if defined(__linux__) && !defined(TARGET_S390X)
1261 if (block->fd) {
1262#ifdef MAP_POPULATE
1263 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1264 MAP_PRIVATE;
1265#else
1266 flags |= MAP_PRIVATE;
1267#endif
1268 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1269 flags, block->fd, offset);
1270 } else {
1271 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1272 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1273 flags, -1, 0);
1274 }
fd28aa13
JK
1275#else
1276 abort();
cd19cfa2
HY
1277#endif
1278 } else {
1279#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1280 flags |= MAP_SHARED | MAP_ANONYMOUS;
1281 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1282 flags, -1, 0);
1283#else
1284 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1285 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1286 flags, -1, 0);
1287#endif
1288 }
1289 if (area != vaddr) {
f15fbc4b
AP
1290 fprintf(stderr, "Could not remap addr: "
1291 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1292 length, addr);
1293 exit(1);
1294 }
8490fc78 1295 memory_try_enable_merging(vaddr, length);
ddb97f1d 1296 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1297 }
1298 return;
1299 }
1300 }
1301}
1302#endif /* !_WIN32 */
1303
1b5ec234 1304static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
dc828ca1 1305{
94a6b54f
PB
1306 RAMBlock *block;
1307
b2a8658e 1308 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1309 block = ram_list.mru_block;
1310 if (block && addr - block->offset < block->length) {
1311 goto found;
1312 }
a3161038 1313 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1314 if (addr - block->offset < block->length) {
0d6d3c87 1315 goto found;
f471a17e 1316 }
94a6b54f 1317 }
f471a17e
AW
1318
1319 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1320 abort();
1321
0d6d3c87
PB
1322found:
1323 ram_list.mru_block = block;
1b5ec234
PB
1324 return block;
1325}
1326
1327/* Return a host pointer to ram allocated with qemu_ram_alloc.
1328 With the exception of the softmmu code in this file, this should
1329 only be used for local memory (e.g. video ram) that the device owns,
1330 and knows it isn't going to access beyond the end of the block.
1331
1332 It should not be used for general purpose DMA.
1333 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1334 */
1335void *qemu_get_ram_ptr(ram_addr_t addr)
1336{
1337 RAMBlock *block = qemu_get_ram_block(addr);
1338
0d6d3c87
PB
1339 if (xen_enabled()) {
1340 /* We need to check if the requested address is in the RAM
1341 * because we don't want to map the entire memory in QEMU.
1342 * In that case just map until the end of the page.
1343 */
1344 if (block->offset == 0) {
1345 return xen_map_cache(addr, 0, 0);
1346 } else if (block->host == NULL) {
1347 block->host =
1348 xen_map_cache(block->offset, block->length, 1);
1349 }
1350 }
1351 return block->host + (addr - block->offset);
dc828ca1
PB
1352}
1353
0d6d3c87
PB
1354/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1355 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1356 *
1357 * ??? Is this still necessary?
b2e0a138 1358 */
8b9c99d9 1359static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1360{
1361 RAMBlock *block;
1362
b2a8658e 1363 /* The list is protected by the iothread lock here. */
a3161038 1364 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1365 if (addr - block->offset < block->length) {
868bb33f 1366 if (xen_enabled()) {
432d268c
JN
1367 /* We need to check if the requested address is in the RAM
1368 * because we don't want to map the entire memory in QEMU.
712c2b41 1369 * In that case just map until the end of the page.
432d268c
JN
1370 */
1371 if (block->offset == 0) {
e41d7c69 1372 return xen_map_cache(addr, 0, 0);
432d268c 1373 } else if (block->host == NULL) {
e41d7c69
JK
1374 block->host =
1375 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1376 }
1377 }
b2e0a138
MT
1378 return block->host + (addr - block->offset);
1379 }
1380 }
1381
1382 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1383 abort();
1384
1385 return NULL;
1386}
1387
38bee5dc
SS
1388/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1389 * but takes a size argument */
cb85f7ab 1390static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1391{
8ab934f9
SS
1392 if (*size == 0) {
1393 return NULL;
1394 }
868bb33f 1395 if (xen_enabled()) {
e41d7c69 1396 return xen_map_cache(addr, *size, 1);
868bb33f 1397 } else {
38bee5dc
SS
1398 RAMBlock *block;
1399
a3161038 1400 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1401 if (addr - block->offset < block->length) {
1402 if (addr - block->offset + *size > block->length)
1403 *size = block->length - addr + block->offset;
1404 return block->host + (addr - block->offset);
1405 }
1406 }
1407
1408 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1409 abort();
38bee5dc
SS
1410 }
1411}
1412
7443b437
PB
1413/* Some of the softmmu routines need to translate from a host pointer
1414 (typically a TLB entry) back to a ram offset. */
1b5ec234 1415MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1416{
94a6b54f
PB
1417 RAMBlock *block;
1418 uint8_t *host = ptr;
1419
868bb33f 1420 if (xen_enabled()) {
e41d7c69 1421 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1422 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1423 }
1424
23887b79
PB
1425 block = ram_list.mru_block;
1426 if (block && block->host && host - block->host < block->length) {
1427 goto found;
1428 }
1429
a3161038 1430 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1431 /* This case append when the block is not mapped. */
1432 if (block->host == NULL) {
1433 continue;
1434 }
f471a17e 1435 if (host - block->host < block->length) {
23887b79 1436 goto found;
f471a17e 1437 }
94a6b54f 1438 }
432d268c 1439
1b5ec234 1440 return NULL;
23887b79
PB
1441
1442found:
1443 *ram_addr = block->offset + (host - block->host);
1b5ec234 1444 return block->mr;
e890261f 1445}
f471a17e 1446
a8170e5e 1447static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1448 uint64_t val, unsigned size)
9fa3e853 1449{
3a7d929e 1450 int dirty_flags;
f7c11b53 1451 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1452 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
0e0df1e2 1453 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1454 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1455 }
0e0df1e2
AK
1456 switch (size) {
1457 case 1:
1458 stb_p(qemu_get_ram_ptr(ram_addr), val);
1459 break;
1460 case 2:
1461 stw_p(qemu_get_ram_ptr(ram_addr), val);
1462 break;
1463 case 4:
1464 stl_p(qemu_get_ram_ptr(ram_addr), val);
1465 break;
1466 default:
1467 abort();
3a7d929e 1468 }
f23db169 1469 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1470 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1471 /* we remove the notdirty callback only if the code has been
1472 flushed */
4917cf44
AF
1473 if (dirty_flags == 0xff) {
1474 CPUArchState *env = current_cpu->env_ptr;
1475 tlb_set_dirty(env, env->mem_io_vaddr);
1476 }
9fa3e853
FB
1477}
1478
b018ddf6
PB
1479static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1480 unsigned size, bool is_write)
1481{
1482 return is_write;
1483}
1484
0e0df1e2 1485static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1486 .write = notdirty_mem_write,
b018ddf6 1487 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1488 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1489};
1490
0f459d16 1491/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1492static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1493{
4917cf44 1494 CPUArchState *env = current_cpu->env_ptr;
06d55cc1 1495 target_ulong pc, cs_base;
0f459d16 1496 target_ulong vaddr;
a1d1bb31 1497 CPUWatchpoint *wp;
06d55cc1 1498 int cpu_flags;
0f459d16 1499
06d55cc1
AL
1500 if (env->watchpoint_hit) {
1501 /* We re-entered the check after replacing the TB. Now raise
1502 * the debug interrupt so that is will trigger after the
1503 * current instruction. */
c3affe56 1504 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1505 return;
1506 }
2e70f6ef 1507 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1508 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1509 if ((vaddr == (wp->vaddr & len_mask) ||
1510 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1511 wp->flags |= BP_WATCHPOINT_HIT;
1512 if (!env->watchpoint_hit) {
1513 env->watchpoint_hit = wp;
5a316526 1514 tb_check_watchpoint(env);
6e140f28
AL
1515 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1516 env->exception_index = EXCP_DEBUG;
488d6577 1517 cpu_loop_exit(env);
6e140f28
AL
1518 } else {
1519 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1520 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1521 cpu_resume_from_signal(env, NULL);
6e140f28 1522 }
06d55cc1 1523 }
6e140f28
AL
1524 } else {
1525 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1526 }
1527 }
1528}
1529
6658ffb8
PB
1530/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1531 so these check for a hit then pass through to the normal out-of-line
1532 phys routines. */
a8170e5e 1533static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1534 unsigned size)
6658ffb8 1535{
1ec9b909
AK
1536 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1537 switch (size) {
1538 case 1: return ldub_phys(addr);
1539 case 2: return lduw_phys(addr);
1540 case 4: return ldl_phys(addr);
1541 default: abort();
1542 }
6658ffb8
PB
1543}
1544
a8170e5e 1545static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1546 uint64_t val, unsigned size)
6658ffb8 1547{
1ec9b909
AK
1548 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1549 switch (size) {
67364150
MF
1550 case 1:
1551 stb_phys(addr, val);
1552 break;
1553 case 2:
1554 stw_phys(addr, val);
1555 break;
1556 case 4:
1557 stl_phys(addr, val);
1558 break;
1ec9b909
AK
1559 default: abort();
1560 }
6658ffb8
PB
1561}
1562
1ec9b909
AK
1563static const MemoryRegionOps watch_mem_ops = {
1564 .read = watch_mem_read,
1565 .write = watch_mem_write,
1566 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1567};
6658ffb8 1568
a8170e5e 1569static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1570 unsigned len)
db7b5426 1571{
acc9d80b
JK
1572 subpage_t *subpage = opaque;
1573 uint8_t buf[4];
791af8c8 1574
db7b5426 1575#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1576 printf("%s: subpage %p len %d addr " TARGET_FMT_plx "\n", __func__,
1577 subpage, len, addr);
db7b5426 1578#endif
acc9d80b
JK
1579 address_space_read(subpage->as, addr + subpage->base, buf, len);
1580 switch (len) {
1581 case 1:
1582 return ldub_p(buf);
1583 case 2:
1584 return lduw_p(buf);
1585 case 4:
1586 return ldl_p(buf);
1587 default:
1588 abort();
1589 }
db7b5426
BS
1590}
1591
a8170e5e 1592static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1593 uint64_t value, unsigned len)
db7b5426 1594{
acc9d80b
JK
1595 subpage_t *subpage = opaque;
1596 uint8_t buf[4];
1597
db7b5426 1598#if defined(DEBUG_SUBPAGE)
70c68e44 1599 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
acc9d80b
JK
1600 " value %"PRIx64"\n",
1601 __func__, subpage, len, addr, value);
db7b5426 1602#endif
acc9d80b
JK
1603 switch (len) {
1604 case 1:
1605 stb_p(buf, value);
1606 break;
1607 case 2:
1608 stw_p(buf, value);
1609 break;
1610 case 4:
1611 stl_p(buf, value);
1612 break;
1613 default:
1614 abort();
1615 }
1616 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1617}
1618
c353e4cc
PB
1619static bool subpage_accepts(void *opaque, hwaddr addr,
1620 unsigned size, bool is_write)
1621{
acc9d80b 1622 subpage_t *subpage = opaque;
c353e4cc 1623#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1624 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx "\n",
1625 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1626#endif
1627
acc9d80b
JK
1628 return address_space_access_valid(subpage->as, addr + subpage->base,
1629 size, is_write);
c353e4cc
PB
1630}
1631
70c68e44
AK
1632static const MemoryRegionOps subpage_ops = {
1633 .read = subpage_read,
1634 .write = subpage_write,
c353e4cc 1635 .valid.accepts = subpage_accepts,
70c68e44 1636 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1637};
1638
c227f099 1639static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1640 uint16_t section)
db7b5426
BS
1641{
1642 int idx, eidx;
1643
1644 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1645 return -1;
1646 idx = SUBPAGE_IDX(start);
1647 eidx = SUBPAGE_IDX(end);
1648#if defined(DEBUG_SUBPAGE)
0bf9e31a 1649 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1650 mmio, start, end, idx, eidx, memory);
1651#endif
db7b5426 1652 for (; idx <= eidx; idx++) {
5312bd8b 1653 mmio->sub_section[idx] = section;
db7b5426
BS
1654 }
1655
1656 return 0;
1657}
1658
acc9d80b 1659static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1660{
c227f099 1661 subpage_t *mmio;
db7b5426 1662
7267c094 1663 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1664
acc9d80b 1665 mmio->as = as;
1eec614b 1666 mmio->base = base;
2c9b15ca 1667 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
70c68e44 1668 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1669 mmio->iomem.subpage = true;
db7b5426 1670#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1671 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1672 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1673#endif
b41aac4f 1674 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1675
1676 return mmio;
1677}
1678
5312bd8b
AK
1679static uint16_t dummy_section(MemoryRegion *mr)
1680{
1681 MemoryRegionSection section = {
1682 .mr = mr,
1683 .offset_within_address_space = 0,
1684 .offset_within_region = 0,
052e87b0 1685 .size = int128_2_64(),
5312bd8b
AK
1686 };
1687
1688 return phys_section_add(&section);
1689}
1690
a8170e5e 1691MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1692{
0475d94f 1693 return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1694}
1695
e9179ce1
AK
1696static void io_mem_init(void)
1697{
2c9b15ca
PB
1698 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1699 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
0e0df1e2 1700 "unassigned", UINT64_MAX);
2c9b15ca 1701 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
0e0df1e2 1702 "notdirty", UINT64_MAX);
2c9b15ca 1703 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1ec9b909 1704 "watch", UINT64_MAX);
e9179ce1
AK
1705}
1706
ac1970fb 1707static void mem_begin(MemoryListener *listener)
00752703
PB
1708{
1709 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1710 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1711
1712 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1713 d->as = as;
1714 as->next_dispatch = d;
1715}
1716
1717static void mem_commit(MemoryListener *listener)
ac1970fb 1718{
89ae337a 1719 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
1720 AddressSpaceDispatch *cur = as->dispatch;
1721 AddressSpaceDispatch *next = as->next_dispatch;
1722
1723 next->nodes = next_map.nodes;
1724 next->sections = next_map.sections;
ac1970fb 1725
0475d94f
PB
1726 as->dispatch = next;
1727 g_free(cur);
ac1970fb
AK
1728}
1729
50c1e149
AK
1730static void core_begin(MemoryListener *listener)
1731{
b41aac4f
LPF
1732 uint16_t n;
1733
6092666e
PB
1734 prev_map = g_new(PhysPageMap, 1);
1735 *prev_map = next_map;
1736
9affd6fc 1737 memset(&next_map, 0, sizeof(next_map));
b41aac4f
LPF
1738 n = dummy_section(&io_mem_unassigned);
1739 assert(n == PHYS_SECTION_UNASSIGNED);
1740 n = dummy_section(&io_mem_notdirty);
1741 assert(n == PHYS_SECTION_NOTDIRTY);
1742 n = dummy_section(&io_mem_rom);
1743 assert(n == PHYS_SECTION_ROM);
1744 n = dummy_section(&io_mem_watch);
1745 assert(n == PHYS_SECTION_WATCH);
50c1e149
AK
1746}
1747
9affd6fc
PB
1748/* This listener's commit run after the other AddressSpaceDispatch listeners'.
1749 * All AddressSpaceDispatch instances have switched to the next map.
1750 */
1751static void core_commit(MemoryListener *listener)
1752{
6092666e 1753 phys_sections_free(prev_map);
9affd6fc
PB
1754}
1755
1d71148e 1756static void tcg_commit(MemoryListener *listener)
50c1e149 1757{
182735ef 1758 CPUState *cpu;
117712c3
AK
1759
1760 /* since each CPU stores ram addresses in its TLB cache, we must
1761 reset the modified entries */
1762 /* XXX: slow ! */
182735ef
AF
1763 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
1764 CPUArchState *env = cpu->env_ptr;
1765
117712c3
AK
1766 tlb_flush(env, 1);
1767 }
50c1e149
AK
1768}
1769
93632747
AK
1770static void core_log_global_start(MemoryListener *listener)
1771{
1772 cpu_physical_memory_set_dirty_tracking(1);
1773}
1774
1775static void core_log_global_stop(MemoryListener *listener)
1776{
1777 cpu_physical_memory_set_dirty_tracking(0);
1778}
1779
93632747 1780static MemoryListener core_memory_listener = {
50c1e149 1781 .begin = core_begin,
9affd6fc 1782 .commit = core_commit,
93632747
AK
1783 .log_global_start = core_log_global_start,
1784 .log_global_stop = core_log_global_stop,
ac1970fb 1785 .priority = 1,
93632747
AK
1786};
1787
1d71148e
AK
1788static MemoryListener tcg_memory_listener = {
1789 .commit = tcg_commit,
1790};
1791
ac1970fb
AK
1792void address_space_init_dispatch(AddressSpace *as)
1793{
00752703 1794 as->dispatch = NULL;
89ae337a 1795 as->dispatch_listener = (MemoryListener) {
ac1970fb 1796 .begin = mem_begin,
00752703 1797 .commit = mem_commit,
ac1970fb
AK
1798 .region_add = mem_add,
1799 .region_nop = mem_add,
1800 .priority = 0,
1801 };
89ae337a 1802 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
1803}
1804
83f3c251
AK
1805void address_space_destroy_dispatch(AddressSpace *as)
1806{
1807 AddressSpaceDispatch *d = as->dispatch;
1808
89ae337a 1809 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
1810 g_free(d);
1811 as->dispatch = NULL;
1812}
1813
62152b8a
AK
1814static void memory_map_init(void)
1815{
7267c094 1816 system_memory = g_malloc(sizeof(*system_memory));
2c9b15ca 1817 memory_region_init(system_memory, NULL, "system", INT64_MAX);
7dca8043 1818 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1819
7267c094 1820 system_io = g_malloc(sizeof(*system_io));
2c9b15ca 1821 memory_region_init(system_io, NULL, "io", 65536);
7dca8043 1822 address_space_init(&address_space_io, system_io, "I/O");
93632747 1823
f6790af6 1824 memory_listener_register(&core_memory_listener, &address_space_memory);
f6790af6 1825 memory_listener_register(&tcg_memory_listener, &address_space_memory);
62152b8a
AK
1826}
1827
1828MemoryRegion *get_system_memory(void)
1829{
1830 return system_memory;
1831}
1832
309cb471
AK
1833MemoryRegion *get_system_io(void)
1834{
1835 return system_io;
1836}
1837
e2eef170
PB
1838#endif /* !defined(CONFIG_USER_ONLY) */
1839
13eb76e0
FB
1840/* physical memory access (slow version, mainly for debug) */
1841#if defined(CONFIG_USER_ONLY)
f17ec444 1842int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 1843 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1844{
1845 int l, flags;
1846 target_ulong page;
53a5960a 1847 void * p;
13eb76e0
FB
1848
1849 while (len > 0) {
1850 page = addr & TARGET_PAGE_MASK;
1851 l = (page + TARGET_PAGE_SIZE) - addr;
1852 if (l > len)
1853 l = len;
1854 flags = page_get_flags(page);
1855 if (!(flags & PAGE_VALID))
a68fe89c 1856 return -1;
13eb76e0
FB
1857 if (is_write) {
1858 if (!(flags & PAGE_WRITE))
a68fe89c 1859 return -1;
579a97f7 1860 /* XXX: this code should not depend on lock_user */
72fb7daa 1861 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1862 return -1;
72fb7daa
AJ
1863 memcpy(p, buf, l);
1864 unlock_user(p, addr, l);
13eb76e0
FB
1865 } else {
1866 if (!(flags & PAGE_READ))
a68fe89c 1867 return -1;
579a97f7 1868 /* XXX: this code should not depend on lock_user */
72fb7daa 1869 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1870 return -1;
72fb7daa 1871 memcpy(buf, p, l);
5b257578 1872 unlock_user(p, addr, 0);
13eb76e0
FB
1873 }
1874 len -= l;
1875 buf += l;
1876 addr += l;
1877 }
a68fe89c 1878 return 0;
13eb76e0 1879}
8df1cd07 1880
13eb76e0 1881#else
51d7a9eb 1882
a8170e5e
AK
1883static void invalidate_and_set_dirty(hwaddr addr,
1884 hwaddr length)
51d7a9eb
AP
1885{
1886 if (!cpu_physical_memory_is_dirty(addr)) {
1887 /* invalidate code */
1888 tb_invalidate_phys_page_range(addr, addr + length, 0);
1889 /* set dirty bit */
1890 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1891 }
e226939d 1892 xen_modified_memory(addr, length);
51d7a9eb
AP
1893}
1894
2bbfa05d
PB
1895static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1896{
1897 if (memory_region_is_ram(mr)) {
1898 return !(is_write && mr->readonly);
1899 }
1900 if (memory_region_is_romd(mr)) {
1901 return !is_write;
1902 }
1903
1904 return false;
1905}
1906
23326164 1907static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 1908{
e1622f4b 1909 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
1910
1911 /* Regions are assumed to support 1-4 byte accesses unless
1912 otherwise specified. */
23326164
RH
1913 if (access_size_max == 0) {
1914 access_size_max = 4;
1915 }
1916
1917 /* Bound the maximum access by the alignment of the address. */
1918 if (!mr->ops->impl.unaligned) {
1919 unsigned align_size_max = addr & -addr;
1920 if (align_size_max != 0 && align_size_max < access_size_max) {
1921 access_size_max = align_size_max;
1922 }
82f2563f 1923 }
23326164
RH
1924
1925 /* Don't attempt accesses larger than the maximum. */
1926 if (l > access_size_max) {
1927 l = access_size_max;
82f2563f 1928 }
23326164
RH
1929
1930 return l;
82f2563f
PB
1931}
1932
fd8aaa76 1933bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1934 int len, bool is_write)
13eb76e0 1935{
149f54b5 1936 hwaddr l;
13eb76e0 1937 uint8_t *ptr;
791af8c8 1938 uint64_t val;
149f54b5 1939 hwaddr addr1;
5c8a00ce 1940 MemoryRegion *mr;
fd8aaa76 1941 bool error = false;
3b46e624 1942
13eb76e0 1943 while (len > 0) {
149f54b5 1944 l = len;
5c8a00ce 1945 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 1946
13eb76e0 1947 if (is_write) {
5c8a00ce
PB
1948 if (!memory_access_is_direct(mr, is_write)) {
1949 l = memory_access_size(mr, l, addr1);
4917cf44 1950 /* XXX: could force current_cpu to NULL to avoid
6a00d601 1951 potential bugs */
23326164
RH
1952 switch (l) {
1953 case 8:
1954 /* 64 bit write access */
1955 val = ldq_p(buf);
1956 error |= io_mem_write(mr, addr1, val, 8);
1957 break;
1958 case 4:
1c213d19 1959 /* 32 bit write access */
c27004ec 1960 val = ldl_p(buf);
5c8a00ce 1961 error |= io_mem_write(mr, addr1, val, 4);
23326164
RH
1962 break;
1963 case 2:
1c213d19 1964 /* 16 bit write access */
c27004ec 1965 val = lduw_p(buf);
5c8a00ce 1966 error |= io_mem_write(mr, addr1, val, 2);
23326164
RH
1967 break;
1968 case 1:
1c213d19 1969 /* 8 bit write access */
c27004ec 1970 val = ldub_p(buf);
5c8a00ce 1971 error |= io_mem_write(mr, addr1, val, 1);
23326164
RH
1972 break;
1973 default:
1974 abort();
13eb76e0 1975 }
2bbfa05d 1976 } else {
5c8a00ce 1977 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 1978 /* RAM case */
5579c7f3 1979 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1980 memcpy(ptr, buf, l);
51d7a9eb 1981 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
1982 }
1983 } else {
5c8a00ce 1984 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 1985 /* I/O case */
5c8a00ce 1986 l = memory_access_size(mr, l, addr1);
23326164
RH
1987 switch (l) {
1988 case 8:
1989 /* 64 bit read access */
1990 error |= io_mem_read(mr, addr1, &val, 8);
1991 stq_p(buf, val);
1992 break;
1993 case 4:
13eb76e0 1994 /* 32 bit read access */
5c8a00ce 1995 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 1996 stl_p(buf, val);
23326164
RH
1997 break;
1998 case 2:
13eb76e0 1999 /* 16 bit read access */
5c8a00ce 2000 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 2001 stw_p(buf, val);
23326164
RH
2002 break;
2003 case 1:
1c213d19 2004 /* 8 bit read access */
5c8a00ce 2005 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 2006 stb_p(buf, val);
23326164
RH
2007 break;
2008 default:
2009 abort();
13eb76e0
FB
2010 }
2011 } else {
2012 /* RAM case */
5c8a00ce 2013 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2014 memcpy(buf, ptr, l);
13eb76e0
FB
2015 }
2016 }
2017 len -= l;
2018 buf += l;
2019 addr += l;
2020 }
fd8aaa76
PB
2021
2022 return error;
13eb76e0 2023}
8df1cd07 2024
fd8aaa76 2025bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
2026 const uint8_t *buf, int len)
2027{
fd8aaa76 2028 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
2029}
2030
fd8aaa76 2031bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 2032{
fd8aaa76 2033 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
2034}
2035
2036
a8170e5e 2037void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2038 int len, int is_write)
2039{
fd8aaa76 2040 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2041}
2042
d0ecd2aa 2043/* used for ROM loading : can write in RAM and ROM */
a8170e5e 2044void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
2045 const uint8_t *buf, int len)
2046{
149f54b5 2047 hwaddr l;
d0ecd2aa 2048 uint8_t *ptr;
149f54b5 2049 hwaddr addr1;
5c8a00ce 2050 MemoryRegion *mr;
3b46e624 2051
d0ecd2aa 2052 while (len > 0) {
149f54b5 2053 l = len;
5c8a00ce
PB
2054 mr = address_space_translate(&address_space_memory,
2055 addr, &addr1, &l, true);
3b46e624 2056
5c8a00ce
PB
2057 if (!(memory_region_is_ram(mr) ||
2058 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2059 /* do nothing */
2060 } else {
5c8a00ce 2061 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2062 /* ROM/RAM case */
5579c7f3 2063 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2064 memcpy(ptr, buf, l);
51d7a9eb 2065 invalidate_and_set_dirty(addr1, l);
d0ecd2aa
FB
2066 }
2067 len -= l;
2068 buf += l;
2069 addr += l;
2070 }
2071}
2072
6d16c2f8 2073typedef struct {
d3e71559 2074 MemoryRegion *mr;
6d16c2f8 2075 void *buffer;
a8170e5e
AK
2076 hwaddr addr;
2077 hwaddr len;
6d16c2f8
AL
2078} BounceBuffer;
2079
2080static BounceBuffer bounce;
2081
ba223c29
AL
2082typedef struct MapClient {
2083 void *opaque;
2084 void (*callback)(void *opaque);
72cf2d4f 2085 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2086} MapClient;
2087
72cf2d4f
BS
2088static QLIST_HEAD(map_client_list, MapClient) map_client_list
2089 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2090
2091void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2092{
7267c094 2093 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2094
2095 client->opaque = opaque;
2096 client->callback = callback;
72cf2d4f 2097 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2098 return client;
2099}
2100
8b9c99d9 2101static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2102{
2103 MapClient *client = (MapClient *)_client;
2104
72cf2d4f 2105 QLIST_REMOVE(client, link);
7267c094 2106 g_free(client);
ba223c29
AL
2107}
2108
2109static void cpu_notify_map_clients(void)
2110{
2111 MapClient *client;
2112
72cf2d4f
BS
2113 while (!QLIST_EMPTY(&map_client_list)) {
2114 client = QLIST_FIRST(&map_client_list);
ba223c29 2115 client->callback(client->opaque);
34d5e948 2116 cpu_unregister_map_client(client);
ba223c29
AL
2117 }
2118}
2119
51644ab7
PB
2120bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2121{
5c8a00ce 2122 MemoryRegion *mr;
51644ab7
PB
2123 hwaddr l, xlat;
2124
2125 while (len > 0) {
2126 l = len;
5c8a00ce
PB
2127 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2128 if (!memory_access_is_direct(mr, is_write)) {
2129 l = memory_access_size(mr, l, addr);
2130 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2131 return false;
2132 }
2133 }
2134
2135 len -= l;
2136 addr += l;
2137 }
2138 return true;
2139}
2140
6d16c2f8
AL
2141/* Map a physical memory region into a host virtual address.
2142 * May map a subset of the requested range, given by and returned in *plen.
2143 * May return NULL if resources needed to perform the mapping are exhausted.
2144 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2145 * Use cpu_register_map_client() to know when retrying the map operation is
2146 * likely to succeed.
6d16c2f8 2147 */
ac1970fb 2148void *address_space_map(AddressSpace *as,
a8170e5e
AK
2149 hwaddr addr,
2150 hwaddr *plen,
ac1970fb 2151 bool is_write)
6d16c2f8 2152{
a8170e5e 2153 hwaddr len = *plen;
e3127ae0
PB
2154 hwaddr done = 0;
2155 hwaddr l, xlat, base;
2156 MemoryRegion *mr, *this_mr;
2157 ram_addr_t raddr;
6d16c2f8 2158
e3127ae0
PB
2159 if (len == 0) {
2160 return NULL;
2161 }
38bee5dc 2162
e3127ae0
PB
2163 l = len;
2164 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2165 if (!memory_access_is_direct(mr, is_write)) {
2166 if (bounce.buffer) {
2167 return NULL;
6d16c2f8 2168 }
e3127ae0
PB
2169 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2170 bounce.addr = addr;
2171 bounce.len = l;
d3e71559
PB
2172
2173 memory_region_ref(mr);
2174 bounce.mr = mr;
e3127ae0
PB
2175 if (!is_write) {
2176 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2177 }
6d16c2f8 2178
e3127ae0
PB
2179 *plen = l;
2180 return bounce.buffer;
2181 }
2182
2183 base = xlat;
2184 raddr = memory_region_get_ram_addr(mr);
2185
2186 for (;;) {
6d16c2f8
AL
2187 len -= l;
2188 addr += l;
e3127ae0
PB
2189 done += l;
2190 if (len == 0) {
2191 break;
2192 }
2193
2194 l = len;
2195 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2196 if (this_mr != mr || xlat != base + done) {
2197 break;
2198 }
6d16c2f8 2199 }
e3127ae0 2200
d3e71559 2201 memory_region_ref(mr);
e3127ae0
PB
2202 *plen = done;
2203 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2204}
2205
ac1970fb 2206/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2207 * Will also mark the memory as dirty if is_write == 1. access_len gives
2208 * the amount of memory that was actually read or written by the caller.
2209 */
a8170e5e
AK
2210void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2211 int is_write, hwaddr access_len)
6d16c2f8
AL
2212{
2213 if (buffer != bounce.buffer) {
d3e71559
PB
2214 MemoryRegion *mr;
2215 ram_addr_t addr1;
2216
2217 mr = qemu_ram_addr_from_host(buffer, &addr1);
2218 assert(mr != NULL);
6d16c2f8 2219 if (is_write) {
6d16c2f8
AL
2220 while (access_len) {
2221 unsigned l;
2222 l = TARGET_PAGE_SIZE;
2223 if (l > access_len)
2224 l = access_len;
51d7a9eb 2225 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2226 addr1 += l;
2227 access_len -= l;
2228 }
2229 }
868bb33f 2230 if (xen_enabled()) {
e41d7c69 2231 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2232 }
d3e71559 2233 memory_region_unref(mr);
6d16c2f8
AL
2234 return;
2235 }
2236 if (is_write) {
ac1970fb 2237 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2238 }
f8a83245 2239 qemu_vfree(bounce.buffer);
6d16c2f8 2240 bounce.buffer = NULL;
d3e71559 2241 memory_region_unref(bounce.mr);
ba223c29 2242 cpu_notify_map_clients();
6d16c2f8 2243}
d0ecd2aa 2244
a8170e5e
AK
2245void *cpu_physical_memory_map(hwaddr addr,
2246 hwaddr *plen,
ac1970fb
AK
2247 int is_write)
2248{
2249 return address_space_map(&address_space_memory, addr, plen, is_write);
2250}
2251
a8170e5e
AK
2252void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2253 int is_write, hwaddr access_len)
ac1970fb
AK
2254{
2255 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2256}
2257
8df1cd07 2258/* warning: addr must be aligned */
a8170e5e 2259static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2260 enum device_endian endian)
8df1cd07 2261{
8df1cd07 2262 uint8_t *ptr;
791af8c8 2263 uint64_t val;
5c8a00ce 2264 MemoryRegion *mr;
149f54b5
PB
2265 hwaddr l = 4;
2266 hwaddr addr1;
8df1cd07 2267
5c8a00ce
PB
2268 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2269 false);
2270 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2271 /* I/O case */
5c8a00ce 2272 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2273#if defined(TARGET_WORDS_BIGENDIAN)
2274 if (endian == DEVICE_LITTLE_ENDIAN) {
2275 val = bswap32(val);
2276 }
2277#else
2278 if (endian == DEVICE_BIG_ENDIAN) {
2279 val = bswap32(val);
2280 }
2281#endif
8df1cd07
FB
2282 } else {
2283 /* RAM case */
5c8a00ce 2284 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2285 & TARGET_PAGE_MASK)
149f54b5 2286 + addr1);
1e78bcc1
AG
2287 switch (endian) {
2288 case DEVICE_LITTLE_ENDIAN:
2289 val = ldl_le_p(ptr);
2290 break;
2291 case DEVICE_BIG_ENDIAN:
2292 val = ldl_be_p(ptr);
2293 break;
2294 default:
2295 val = ldl_p(ptr);
2296 break;
2297 }
8df1cd07
FB
2298 }
2299 return val;
2300}
2301
a8170e5e 2302uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2303{
2304 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2305}
2306
a8170e5e 2307uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2308{
2309 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2310}
2311
a8170e5e 2312uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2313{
2314 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2315}
2316
84b7b8e7 2317/* warning: addr must be aligned */
a8170e5e 2318static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2319 enum device_endian endian)
84b7b8e7 2320{
84b7b8e7
FB
2321 uint8_t *ptr;
2322 uint64_t val;
5c8a00ce 2323 MemoryRegion *mr;
149f54b5
PB
2324 hwaddr l = 8;
2325 hwaddr addr1;
84b7b8e7 2326
5c8a00ce
PB
2327 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2328 false);
2329 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2330 /* I/O case */
5c8a00ce 2331 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2332#if defined(TARGET_WORDS_BIGENDIAN)
2333 if (endian == DEVICE_LITTLE_ENDIAN) {
2334 val = bswap64(val);
2335 }
2336#else
2337 if (endian == DEVICE_BIG_ENDIAN) {
2338 val = bswap64(val);
2339 }
84b7b8e7
FB
2340#endif
2341 } else {
2342 /* RAM case */
5c8a00ce 2343 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2344 & TARGET_PAGE_MASK)
149f54b5 2345 + addr1);
1e78bcc1
AG
2346 switch (endian) {
2347 case DEVICE_LITTLE_ENDIAN:
2348 val = ldq_le_p(ptr);
2349 break;
2350 case DEVICE_BIG_ENDIAN:
2351 val = ldq_be_p(ptr);
2352 break;
2353 default:
2354 val = ldq_p(ptr);
2355 break;
2356 }
84b7b8e7
FB
2357 }
2358 return val;
2359}
2360
a8170e5e 2361uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2362{
2363 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2364}
2365
a8170e5e 2366uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2367{
2368 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2369}
2370
a8170e5e 2371uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2372{
2373 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2374}
2375
aab33094 2376/* XXX: optimize */
a8170e5e 2377uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2378{
2379 uint8_t val;
2380 cpu_physical_memory_read(addr, &val, 1);
2381 return val;
2382}
2383
733f0b02 2384/* warning: addr must be aligned */
a8170e5e 2385static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2386 enum device_endian endian)
aab33094 2387{
733f0b02
MT
2388 uint8_t *ptr;
2389 uint64_t val;
5c8a00ce 2390 MemoryRegion *mr;
149f54b5
PB
2391 hwaddr l = 2;
2392 hwaddr addr1;
733f0b02 2393
5c8a00ce
PB
2394 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2395 false);
2396 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2397 /* I/O case */
5c8a00ce 2398 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2399#if defined(TARGET_WORDS_BIGENDIAN)
2400 if (endian == DEVICE_LITTLE_ENDIAN) {
2401 val = bswap16(val);
2402 }
2403#else
2404 if (endian == DEVICE_BIG_ENDIAN) {
2405 val = bswap16(val);
2406 }
2407#endif
733f0b02
MT
2408 } else {
2409 /* RAM case */
5c8a00ce 2410 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2411 & TARGET_PAGE_MASK)
149f54b5 2412 + addr1);
1e78bcc1
AG
2413 switch (endian) {
2414 case DEVICE_LITTLE_ENDIAN:
2415 val = lduw_le_p(ptr);
2416 break;
2417 case DEVICE_BIG_ENDIAN:
2418 val = lduw_be_p(ptr);
2419 break;
2420 default:
2421 val = lduw_p(ptr);
2422 break;
2423 }
733f0b02
MT
2424 }
2425 return val;
aab33094
FB
2426}
2427
a8170e5e 2428uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2429{
2430 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2431}
2432
a8170e5e 2433uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2434{
2435 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2436}
2437
a8170e5e 2438uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2439{
2440 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2441}
2442
8df1cd07
FB
2443/* warning: addr must be aligned. The ram page is not masked as dirty
2444 and the code inside is not invalidated. It is useful if the dirty
2445 bits are used to track modified PTEs */
a8170e5e 2446void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2447{
8df1cd07 2448 uint8_t *ptr;
5c8a00ce 2449 MemoryRegion *mr;
149f54b5
PB
2450 hwaddr l = 4;
2451 hwaddr addr1;
8df1cd07 2452
5c8a00ce
PB
2453 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2454 true);
2455 if (l < 4 || !memory_access_is_direct(mr, true)) {
2456 io_mem_write(mr, addr1, val, 4);
8df1cd07 2457 } else {
5c8a00ce 2458 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2459 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2460 stl_p(ptr, val);
74576198
AL
2461
2462 if (unlikely(in_migration)) {
2463 if (!cpu_physical_memory_is_dirty(addr1)) {
2464 /* invalidate code */
2465 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2466 /* set dirty bit */
f7c11b53
YT
2467 cpu_physical_memory_set_dirty_flags(
2468 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2469 }
2470 }
8df1cd07
FB
2471 }
2472}
2473
2474/* warning: addr must be aligned */
a8170e5e 2475static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2476 enum device_endian endian)
8df1cd07 2477{
8df1cd07 2478 uint8_t *ptr;
5c8a00ce 2479 MemoryRegion *mr;
149f54b5
PB
2480 hwaddr l = 4;
2481 hwaddr addr1;
8df1cd07 2482
5c8a00ce
PB
2483 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2484 true);
2485 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2486#if defined(TARGET_WORDS_BIGENDIAN)
2487 if (endian == DEVICE_LITTLE_ENDIAN) {
2488 val = bswap32(val);
2489 }
2490#else
2491 if (endian == DEVICE_BIG_ENDIAN) {
2492 val = bswap32(val);
2493 }
2494#endif
5c8a00ce 2495 io_mem_write(mr, addr1, val, 4);
8df1cd07 2496 } else {
8df1cd07 2497 /* RAM case */
5c8a00ce 2498 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2499 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2500 switch (endian) {
2501 case DEVICE_LITTLE_ENDIAN:
2502 stl_le_p(ptr, val);
2503 break;
2504 case DEVICE_BIG_ENDIAN:
2505 stl_be_p(ptr, val);
2506 break;
2507 default:
2508 stl_p(ptr, val);
2509 break;
2510 }
51d7a9eb 2511 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2512 }
2513}
2514
a8170e5e 2515void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2516{
2517 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2518}
2519
a8170e5e 2520void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2521{
2522 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2523}
2524
a8170e5e 2525void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2526{
2527 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2528}
2529
aab33094 2530/* XXX: optimize */
a8170e5e 2531void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2532{
2533 uint8_t v = val;
2534 cpu_physical_memory_write(addr, &v, 1);
2535}
2536
733f0b02 2537/* warning: addr must be aligned */
a8170e5e 2538static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2539 enum device_endian endian)
aab33094 2540{
733f0b02 2541 uint8_t *ptr;
5c8a00ce 2542 MemoryRegion *mr;
149f54b5
PB
2543 hwaddr l = 2;
2544 hwaddr addr1;
733f0b02 2545
5c8a00ce
PB
2546 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2547 true);
2548 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2549#if defined(TARGET_WORDS_BIGENDIAN)
2550 if (endian == DEVICE_LITTLE_ENDIAN) {
2551 val = bswap16(val);
2552 }
2553#else
2554 if (endian == DEVICE_BIG_ENDIAN) {
2555 val = bswap16(val);
2556 }
2557#endif
5c8a00ce 2558 io_mem_write(mr, addr1, val, 2);
733f0b02 2559 } else {
733f0b02 2560 /* RAM case */
5c8a00ce 2561 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2562 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2563 switch (endian) {
2564 case DEVICE_LITTLE_ENDIAN:
2565 stw_le_p(ptr, val);
2566 break;
2567 case DEVICE_BIG_ENDIAN:
2568 stw_be_p(ptr, val);
2569 break;
2570 default:
2571 stw_p(ptr, val);
2572 break;
2573 }
51d7a9eb 2574 invalidate_and_set_dirty(addr1, 2);
733f0b02 2575 }
aab33094
FB
2576}
2577
a8170e5e 2578void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2579{
2580 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2581}
2582
a8170e5e 2583void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2584{
2585 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2586}
2587
a8170e5e 2588void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2589{
2590 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2591}
2592
aab33094 2593/* XXX: optimize */
a8170e5e 2594void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2595{
2596 val = tswap64(val);
71d2b725 2597 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2598}
2599
a8170e5e 2600void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2601{
2602 val = cpu_to_le64(val);
2603 cpu_physical_memory_write(addr, &val, 8);
2604}
2605
a8170e5e 2606void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2607{
2608 val = cpu_to_be64(val);
2609 cpu_physical_memory_write(addr, &val, 8);
2610}
2611
5e2972fd 2612/* virtual memory access for debug (includes writing to ROM) */
f17ec444 2613int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 2614 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2615{
2616 int l;
a8170e5e 2617 hwaddr phys_addr;
9b3c35e0 2618 target_ulong page;
13eb76e0
FB
2619
2620 while (len > 0) {
2621 page = addr & TARGET_PAGE_MASK;
f17ec444 2622 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
2623 /* if no physical page mapped, return an error */
2624 if (phys_addr == -1)
2625 return -1;
2626 l = (page + TARGET_PAGE_SIZE) - addr;
2627 if (l > len)
2628 l = len;
5e2972fd 2629 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2630 if (is_write)
2631 cpu_physical_memory_write_rom(phys_addr, buf, l);
2632 else
5e2972fd 2633 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2634 len -= l;
2635 buf += l;
2636 addr += l;
2637 }
2638 return 0;
2639}
a68fe89c 2640#endif
13eb76e0 2641
8e4a424b
BS
2642#if !defined(CONFIG_USER_ONLY)
2643
2644/*
2645 * A helper function for the _utterly broken_ virtio device model to find out if
2646 * it's running on a big endian machine. Don't do this at home kids!
2647 */
2648bool virtio_is_big_endian(void);
2649bool virtio_is_big_endian(void)
2650{
2651#if defined(TARGET_WORDS_BIGENDIAN)
2652 return true;
2653#else
2654 return false;
2655#endif
2656}
2657
2658#endif
2659
76f35538 2660#ifndef CONFIG_USER_ONLY
a8170e5e 2661bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2662{
5c8a00ce 2663 MemoryRegion*mr;
149f54b5 2664 hwaddr l = 1;
76f35538 2665
5c8a00ce
PB
2666 mr = address_space_translate(&address_space_memory,
2667 phys_addr, &phys_addr, &l, false);
76f35538 2668
5c8a00ce
PB
2669 return !(memory_region_is_ram(mr) ||
2670 memory_region_is_romd(mr));
76f35538 2671}
bd2fa51f
MH
2672
2673void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2674{
2675 RAMBlock *block;
2676
2677 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2678 func(block->host, block->offset, block->length, opaque);
2679 }
2680}
ec3f8c99 2681#endif