]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
memory: reorganize file-based allocation
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
cc9e98cb 29#include "hw/qdev.h"
1de7afc9 30#include "qemu/osdep.h"
9c17d615 31#include "sysemu/kvm.h"
2ff3de68 32#include "sysemu/sysemu.h"
0d09e41a 33#include "hw/xen/xen.h"
1de7afc9
PB
34#include "qemu/timer.h"
35#include "qemu/config-file.h"
75a34036 36#include "qemu/error-report.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
220c3ebd 52#include "exec/ram_addr.h"
582b55a9 53#include "qemu/cache-utils.h"
67d95c15 54
b35ba30f
MT
55#include "qemu/range.h"
56
db7b5426 57//#define DEBUG_SUBPAGE
1196be37 58
e2eef170 59#if !defined(CONFIG_USER_ONLY)
981fdf23 60static bool in_migration;
94a6b54f 61
a3161038 62RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
63
64static MemoryRegion *system_memory;
309cb471 65static MemoryRegion *system_io;
62152b8a 66
f6790af6
AK
67AddressSpace address_space_io;
68AddressSpace address_space_memory;
2673a5da 69
0844e007 70MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 71static MemoryRegion io_mem_unassigned;
0e0df1e2 72
e2eef170 73#endif
9fa3e853 74
bdc44640 75struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
76/* current CPU in the current thread. It is only valid inside
77 cpu_exec() */
4917cf44 78DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 79/* 0 = Do not count executed instructions.
bf20dc07 80 1 = Precise instruction counting.
2e70f6ef 81 2 = Adaptive rate instruction counting. */
5708fc66 82int use_icount;
6a00d601 83
e2eef170 84#if !defined(CONFIG_USER_ONLY)
4346ae3e 85
1db8abb1
PB
86typedef struct PhysPageEntry PhysPageEntry;
87
88struct PhysPageEntry {
9736e55b 89 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 90 uint32_t skip : 6;
9736e55b 91 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 92 uint32_t ptr : 26;
1db8abb1
PB
93};
94
8b795765
MT
95#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
96
03f49957 97/* Size of the L2 (and L3, etc) page tables. */
57271d63 98#define ADDR_SPACE_BITS 64
03f49957 99
026736ce 100#define P_L2_BITS 9
03f49957
PB
101#define P_L2_SIZE (1 << P_L2_BITS)
102
103#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
104
105typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 106
53cb28cb
MA
107typedef struct PhysPageMap {
108 unsigned sections_nb;
109 unsigned sections_nb_alloc;
110 unsigned nodes_nb;
111 unsigned nodes_nb_alloc;
112 Node *nodes;
113 MemoryRegionSection *sections;
114} PhysPageMap;
115
1db8abb1
PB
116struct AddressSpaceDispatch {
117 /* This is a multi-level map on the physical address space.
118 * The bottom level has pointers to MemoryRegionSections.
119 */
120 PhysPageEntry phys_map;
53cb28cb 121 PhysPageMap map;
acc9d80b 122 AddressSpace *as;
1db8abb1
PB
123};
124
90260c6c
JK
125#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
126typedef struct subpage_t {
127 MemoryRegion iomem;
acc9d80b 128 AddressSpace *as;
90260c6c
JK
129 hwaddr base;
130 uint16_t sub_section[TARGET_PAGE_SIZE];
131} subpage_t;
132
b41aac4f
LPF
133#define PHYS_SECTION_UNASSIGNED 0
134#define PHYS_SECTION_NOTDIRTY 1
135#define PHYS_SECTION_ROM 2
136#define PHYS_SECTION_WATCH 3
5312bd8b 137
e2eef170 138static void io_mem_init(void);
62152b8a 139static void memory_map_init(void);
09daed84 140static void tcg_commit(MemoryListener *listener);
e2eef170 141
1ec9b909 142static MemoryRegion io_mem_watch;
6658ffb8 143#endif
fd6ce8f6 144
6d9a1304 145#if !defined(CONFIG_USER_ONLY)
d6f2ea22 146
53cb28cb 147static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 148{
53cb28cb
MA
149 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
150 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
151 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
152 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 153 }
f7bf5461
AK
154}
155
53cb28cb 156static uint32_t phys_map_node_alloc(PhysPageMap *map)
f7bf5461
AK
157{
158 unsigned i;
8b795765 159 uint32_t ret;
f7bf5461 160
53cb28cb 161 ret = map->nodes_nb++;
f7bf5461 162 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 163 assert(ret != map->nodes_nb_alloc);
03f49957 164 for (i = 0; i < P_L2_SIZE; ++i) {
53cb28cb
MA
165 map->nodes[ret][i].skip = 1;
166 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 167 }
f7bf5461 168 return ret;
d6f2ea22
AK
169}
170
53cb28cb
MA
171static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
172 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 173 int level)
f7bf5461
AK
174{
175 PhysPageEntry *p;
176 int i;
03f49957 177 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 178
9736e55b 179 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
53cb28cb
MA
180 lp->ptr = phys_map_node_alloc(map);
181 p = map->nodes[lp->ptr];
f7bf5461 182 if (level == 0) {
03f49957 183 for (i = 0; i < P_L2_SIZE; i++) {
9736e55b 184 p[i].skip = 0;
b41aac4f 185 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 186 }
67c4d23c 187 }
f7bf5461 188 } else {
53cb28cb 189 p = map->nodes[lp->ptr];
92e873b9 190 }
03f49957 191 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 192
03f49957 193 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 194 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 195 lp->skip = 0;
c19e8800 196 lp->ptr = leaf;
07f07b31
AK
197 *index += step;
198 *nb -= step;
2999097b 199 } else {
53cb28cb 200 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
201 }
202 ++lp;
f7bf5461
AK
203 }
204}
205
ac1970fb 206static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 207 hwaddr index, hwaddr nb,
2999097b 208 uint16_t leaf)
f7bf5461 209{
2999097b 210 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 211 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 212
53cb28cb 213 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
214}
215
b35ba30f
MT
216/* Compact a non leaf page entry. Simply detect that the entry has a single child,
217 * and update our entry so we can skip it and go directly to the destination.
218 */
219static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
220{
221 unsigned valid_ptr = P_L2_SIZE;
222 int valid = 0;
223 PhysPageEntry *p;
224 int i;
225
226 if (lp->ptr == PHYS_MAP_NODE_NIL) {
227 return;
228 }
229
230 p = nodes[lp->ptr];
231 for (i = 0; i < P_L2_SIZE; i++) {
232 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
233 continue;
234 }
235
236 valid_ptr = i;
237 valid++;
238 if (p[i].skip) {
239 phys_page_compact(&p[i], nodes, compacted);
240 }
241 }
242
243 /* We can only compress if there's only one child. */
244 if (valid != 1) {
245 return;
246 }
247
248 assert(valid_ptr < P_L2_SIZE);
249
250 /* Don't compress if it won't fit in the # of bits we have. */
251 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
252 return;
253 }
254
255 lp->ptr = p[valid_ptr].ptr;
256 if (!p[valid_ptr].skip) {
257 /* If our only child is a leaf, make this a leaf. */
258 /* By design, we should have made this node a leaf to begin with so we
259 * should never reach here.
260 * But since it's so simple to handle this, let's do it just in case we
261 * change this rule.
262 */
263 lp->skip = 0;
264 } else {
265 lp->skip += p[valid_ptr].skip;
266 }
267}
268
269static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
270{
271 DECLARE_BITMAP(compacted, nodes_nb);
272
273 if (d->phys_map.skip) {
53cb28cb 274 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
275 }
276}
277
97115a8d 278static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 279 Node *nodes, MemoryRegionSection *sections)
92e873b9 280{
31ab2b4a 281 PhysPageEntry *p;
97115a8d 282 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 283 int i;
f1f6e3b8 284
9736e55b 285 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 286 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 287 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 288 }
9affd6fc 289 p = nodes[lp.ptr];
03f49957 290 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 291 }
b35ba30f
MT
292
293 if (sections[lp.ptr].size.hi ||
294 range_covers_byte(sections[lp.ptr].offset_within_address_space,
295 sections[lp.ptr].size.lo, addr)) {
296 return &sections[lp.ptr];
297 } else {
298 return &sections[PHYS_SECTION_UNASSIGNED];
299 }
f3705d53
AK
300}
301
e5548617
BS
302bool memory_region_is_unassigned(MemoryRegion *mr)
303{
2a8e7499 304 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 305 && mr != &io_mem_watch;
fd6ce8f6 306}
149f54b5 307
c7086b4a 308static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
309 hwaddr addr,
310 bool resolve_subpage)
9f029603 311{
90260c6c
JK
312 MemoryRegionSection *section;
313 subpage_t *subpage;
314
53cb28cb 315 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
316 if (resolve_subpage && section->mr->subpage) {
317 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 318 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
319 }
320 return section;
9f029603
JK
321}
322
90260c6c 323static MemoryRegionSection *
c7086b4a 324address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 325 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
326{
327 MemoryRegionSection *section;
a87f3954 328 Int128 diff;
149f54b5 329
c7086b4a 330 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
331 /* Compute offset within MemoryRegionSection */
332 addr -= section->offset_within_address_space;
333
334 /* Compute offset within MemoryRegion */
335 *xlat = addr + section->offset_within_region;
336
337 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 338 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
339 return section;
340}
90260c6c 341
a87f3954
PB
342static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
343{
344 if (memory_region_is_ram(mr)) {
345 return !(is_write && mr->readonly);
346 }
347 if (memory_region_is_romd(mr)) {
348 return !is_write;
349 }
350
351 return false;
352}
353
5c8a00ce
PB
354MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
355 hwaddr *xlat, hwaddr *plen,
356 bool is_write)
90260c6c 357{
30951157
AK
358 IOMMUTLBEntry iotlb;
359 MemoryRegionSection *section;
360 MemoryRegion *mr;
361 hwaddr len = *plen;
362
363 for (;;) {
a87f3954 364 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
365 mr = section->mr;
366
367 if (!mr->iommu_ops) {
368 break;
369 }
370
371 iotlb = mr->iommu_ops->translate(mr, addr);
372 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
373 | (addr & iotlb.addr_mask));
374 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
375 if (!(iotlb.perm & (1 << is_write))) {
376 mr = &io_mem_unassigned;
377 break;
378 }
379
380 as = iotlb.target_as;
381 }
382
fe680d0d 383 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954
PB
384 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
385 len = MIN(page, len);
386 }
387
30951157
AK
388 *plen = len;
389 *xlat = addr;
390 return mr;
90260c6c
JK
391}
392
393MemoryRegionSection *
394address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
395 hwaddr *plen)
396{
30951157 397 MemoryRegionSection *section;
c7086b4a 398 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
399
400 assert(!section->mr->iommu_ops);
401 return section;
90260c6c 402}
5b6dd868 403#endif
fd6ce8f6 404
5b6dd868 405void cpu_exec_init_all(void)
fdbb84d1 406{
5b6dd868 407#if !defined(CONFIG_USER_ONLY)
b2a8658e 408 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
409 memory_map_init();
410 io_mem_init();
fdbb84d1 411#endif
5b6dd868 412}
fdbb84d1 413
b170fce3 414#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
415
416static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 417{
259186a7 418 CPUState *cpu = opaque;
a513fe19 419
5b6dd868
BS
420 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
421 version_id is increased. */
259186a7 422 cpu->interrupt_request &= ~0x01;
c01a71c1 423 tlb_flush(cpu, 1);
5b6dd868
BS
424
425 return 0;
a513fe19 426}
7501267e 427
1a1562f5 428const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
429 .name = "cpu_common",
430 .version_id = 1,
431 .minimum_version_id = 1,
5b6dd868 432 .post_load = cpu_common_post_load,
35d08458 433 .fields = (VMStateField[]) {
259186a7
AF
434 VMSTATE_UINT32(halted, CPUState),
435 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
436 VMSTATE_END_OF_LIST()
437 }
438};
1a1562f5 439
5b6dd868 440#endif
ea041c0e 441
38d8f5c8 442CPUState *qemu_get_cpu(int index)
ea041c0e 443{
bdc44640 444 CPUState *cpu;
ea041c0e 445
bdc44640 446 CPU_FOREACH(cpu) {
55e5c285 447 if (cpu->cpu_index == index) {
bdc44640 448 return cpu;
55e5c285 449 }
ea041c0e 450 }
5b6dd868 451
bdc44640 452 return NULL;
ea041c0e
FB
453}
454
09daed84
EI
455#if !defined(CONFIG_USER_ONLY)
456void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
457{
458 /* We only support one address space per cpu at the moment. */
459 assert(cpu->as == as);
460
461 if (cpu->tcg_as_listener) {
462 memory_listener_unregister(cpu->tcg_as_listener);
463 } else {
464 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
465 }
466 cpu->tcg_as_listener->commit = tcg_commit;
467 memory_listener_register(cpu->tcg_as_listener, as);
468}
469#endif
470
5b6dd868 471void cpu_exec_init(CPUArchState *env)
ea041c0e 472{
5b6dd868 473 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 474 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 475 CPUState *some_cpu;
5b6dd868
BS
476 int cpu_index;
477
478#if defined(CONFIG_USER_ONLY)
479 cpu_list_lock();
480#endif
5b6dd868 481 cpu_index = 0;
bdc44640 482 CPU_FOREACH(some_cpu) {
5b6dd868
BS
483 cpu_index++;
484 }
55e5c285 485 cpu->cpu_index = cpu_index;
1b1ed8dc 486 cpu->numa_node = 0;
f0c3c505 487 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 488 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 489#ifndef CONFIG_USER_ONLY
09daed84 490 cpu->as = &address_space_memory;
5b6dd868
BS
491 cpu->thread_id = qemu_get_thread_id();
492#endif
bdc44640 493 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
494#if defined(CONFIG_USER_ONLY)
495 cpu_list_unlock();
496#endif
e0d47944
AF
497 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
498 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
499 }
5b6dd868 500#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
501 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
502 cpu_save, cpu_load, env);
b170fce3 503 assert(cc->vmsd == NULL);
e0d47944 504 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 505#endif
b170fce3
AF
506 if (cc->vmsd != NULL) {
507 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
508 }
ea041c0e
FB
509}
510
1fddef4b 511#if defined(TARGET_HAS_ICE)
94df27fd 512#if defined(CONFIG_USER_ONLY)
00b941e5 513static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
514{
515 tb_invalidate_phys_page_range(pc, pc + 1, 0);
516}
517#else
00b941e5 518static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 519{
e8262a1b
MF
520 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
521 if (phys != -1) {
09daed84 522 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 523 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 524 }
1e7855a5 525}
c27004ec 526#endif
94df27fd 527#endif /* TARGET_HAS_ICE */
d720b93d 528
c527ee8f 529#if defined(CONFIG_USER_ONLY)
75a34036 530void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
531
532{
533}
534
75a34036 535int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
536 int flags, CPUWatchpoint **watchpoint)
537{
538 return -ENOSYS;
539}
540#else
6658ffb8 541/* Add a watchpoint. */
75a34036 542int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 543 int flags, CPUWatchpoint **watchpoint)
6658ffb8 544{
75a34036 545 vaddr len_mask = ~(len - 1);
c0ce998e 546 CPUWatchpoint *wp;
6658ffb8 547
b4051334 548 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
549 if ((len & (len - 1)) || (addr & ~len_mask) ||
550 len == 0 || len > TARGET_PAGE_SIZE) {
75a34036
AF
551 error_report("tried to set invalid watchpoint at %"
552 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
553 return -EINVAL;
554 }
7267c094 555 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
556
557 wp->vaddr = addr;
b4051334 558 wp->len_mask = len_mask;
a1d1bb31
AL
559 wp->flags = flags;
560
2dc9f411 561 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
562 if (flags & BP_GDB) {
563 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
564 } else {
565 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
566 }
6658ffb8 567
31b030d4 568 tlb_flush_page(cpu, addr);
a1d1bb31
AL
569
570 if (watchpoint)
571 *watchpoint = wp;
572 return 0;
6658ffb8
PB
573}
574
a1d1bb31 575/* Remove a specific watchpoint. */
75a34036 576int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 577 int flags)
6658ffb8 578{
75a34036 579 vaddr len_mask = ~(len - 1);
a1d1bb31 580 CPUWatchpoint *wp;
6658ffb8 581
ff4700b0 582 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
b4051334 583 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 584 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 585 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
586 return 0;
587 }
588 }
a1d1bb31 589 return -ENOENT;
6658ffb8
PB
590}
591
a1d1bb31 592/* Remove a specific watchpoint by reference. */
75a34036 593void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 594{
ff4700b0 595 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 596
31b030d4 597 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 598
7267c094 599 g_free(watchpoint);
a1d1bb31
AL
600}
601
602/* Remove all matching watchpoints. */
75a34036 603void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 604{
c0ce998e 605 CPUWatchpoint *wp, *next;
a1d1bb31 606
ff4700b0 607 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
608 if (wp->flags & mask) {
609 cpu_watchpoint_remove_by_ref(cpu, wp);
610 }
c0ce998e 611 }
7d03f82f 612}
c527ee8f 613#endif
7d03f82f 614
a1d1bb31 615/* Add a breakpoint. */
b3310ab3 616int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 617 CPUBreakpoint **breakpoint)
4c3a88a2 618{
1fddef4b 619#if defined(TARGET_HAS_ICE)
c0ce998e 620 CPUBreakpoint *bp;
3b46e624 621
7267c094 622 bp = g_malloc(sizeof(*bp));
4c3a88a2 623
a1d1bb31
AL
624 bp->pc = pc;
625 bp->flags = flags;
626
2dc9f411 627 /* keep all GDB-injected breakpoints in front */
00b941e5 628 if (flags & BP_GDB) {
f0c3c505 629 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 630 } else {
f0c3c505 631 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 632 }
3b46e624 633
f0c3c505 634 breakpoint_invalidate(cpu, pc);
a1d1bb31 635
00b941e5 636 if (breakpoint) {
a1d1bb31 637 *breakpoint = bp;
00b941e5 638 }
4c3a88a2
FB
639 return 0;
640#else
a1d1bb31 641 return -ENOSYS;
4c3a88a2
FB
642#endif
643}
644
a1d1bb31 645/* Remove a specific breakpoint. */
b3310ab3 646int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 647{
7d03f82f 648#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
649 CPUBreakpoint *bp;
650
f0c3c505 651 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 652 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 653 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
654 return 0;
655 }
7d03f82f 656 }
a1d1bb31
AL
657 return -ENOENT;
658#else
659 return -ENOSYS;
7d03f82f
EI
660#endif
661}
662
a1d1bb31 663/* Remove a specific breakpoint by reference. */
b3310ab3 664void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 665{
1fddef4b 666#if defined(TARGET_HAS_ICE)
f0c3c505
AF
667 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
668
669 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 670
7267c094 671 g_free(breakpoint);
a1d1bb31
AL
672#endif
673}
674
675/* Remove all matching breakpoints. */
b3310ab3 676void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31
AL
677{
678#if defined(TARGET_HAS_ICE)
c0ce998e 679 CPUBreakpoint *bp, *next;
a1d1bb31 680
f0c3c505 681 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
682 if (bp->flags & mask) {
683 cpu_breakpoint_remove_by_ref(cpu, bp);
684 }
c0ce998e 685 }
4c3a88a2
FB
686#endif
687}
688
c33a346e
FB
689/* enable or disable single step mode. EXCP_DEBUG is returned by the
690 CPU loop after each instruction */
3825b28f 691void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 692{
1fddef4b 693#if defined(TARGET_HAS_ICE)
ed2803da
AF
694 if (cpu->singlestep_enabled != enabled) {
695 cpu->singlestep_enabled = enabled;
696 if (kvm_enabled()) {
38e478ec 697 kvm_update_guest_debug(cpu, 0);
ed2803da 698 } else {
ccbb4d44 699 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 700 /* XXX: only flush what is necessary */
38e478ec 701 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
702 tb_flush(env);
703 }
c33a346e
FB
704 }
705#endif
706}
707
a47dddd7 708void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
709{
710 va_list ap;
493ae1f0 711 va_list ap2;
7501267e
FB
712
713 va_start(ap, fmt);
493ae1f0 714 va_copy(ap2, ap);
7501267e
FB
715 fprintf(stderr, "qemu: fatal: ");
716 vfprintf(stderr, fmt, ap);
717 fprintf(stderr, "\n");
878096ee 718 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
719 if (qemu_log_enabled()) {
720 qemu_log("qemu: fatal: ");
721 qemu_log_vprintf(fmt, ap2);
722 qemu_log("\n");
a0762859 723 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 724 qemu_log_flush();
93fcfe39 725 qemu_log_close();
924edcae 726 }
493ae1f0 727 va_end(ap2);
f9373291 728 va_end(ap);
fd052bf6
RV
729#if defined(CONFIG_USER_ONLY)
730 {
731 struct sigaction act;
732 sigfillset(&act.sa_mask);
733 act.sa_handler = SIG_DFL;
734 sigaction(SIGABRT, &act, NULL);
735 }
736#endif
7501267e
FB
737 abort();
738}
739
0124311e 740#if !defined(CONFIG_USER_ONLY)
041603fe
PB
741static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
742{
743 RAMBlock *block;
744
745 /* The list is protected by the iothread lock here. */
746 block = ram_list.mru_block;
747 if (block && addr - block->offset < block->length) {
748 goto found;
749 }
750 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
751 if (addr - block->offset < block->length) {
752 goto found;
753 }
754 }
755
756 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
757 abort();
758
759found:
760 ram_list.mru_block = block;
761 return block;
762}
763
a2f4d5be 764static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 765{
041603fe 766 ram_addr_t start1;
a2f4d5be
JQ
767 RAMBlock *block;
768 ram_addr_t end;
769
770 end = TARGET_PAGE_ALIGN(start + length);
771 start &= TARGET_PAGE_MASK;
d24981d3 772
041603fe
PB
773 block = qemu_get_ram_block(start);
774 assert(block == qemu_get_ram_block(end - 1));
775 start1 = (uintptr_t)block->host + (start - block->offset);
776 cpu_tlb_reset_dirty_all(start1, length);
d24981d3
JQ
777}
778
5579c7f3 779/* Note: start and end must be within the same ram block. */
a2f4d5be 780void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
52159192 781 unsigned client)
1ccde1cb 782{
1ccde1cb
FB
783 if (length == 0)
784 return;
ace694cc 785 cpu_physical_memory_clear_dirty_range(start, length, client);
f23db169 786
d24981d3 787 if (tcg_enabled()) {
a2f4d5be 788 tlb_reset_dirty_range_all(start, length);
5579c7f3 789 }
1ccde1cb
FB
790}
791
981fdf23 792static void cpu_physical_memory_set_dirty_tracking(bool enable)
74576198
AL
793{
794 in_migration = enable;
74576198
AL
795}
796
bb0e627a 797hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
798 MemoryRegionSection *section,
799 target_ulong vaddr,
800 hwaddr paddr, hwaddr xlat,
801 int prot,
802 target_ulong *address)
e5548617 803{
a8170e5e 804 hwaddr iotlb;
e5548617
BS
805 CPUWatchpoint *wp;
806
cc5bea60 807 if (memory_region_is_ram(section->mr)) {
e5548617
BS
808 /* Normal RAM. */
809 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 810 + xlat;
e5548617 811 if (!section->readonly) {
b41aac4f 812 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 813 } else {
b41aac4f 814 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
815 }
816 } else {
1b3fb98f 817 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 818 iotlb += xlat;
e5548617
BS
819 }
820
821 /* Make accesses to pages with watchpoints go via the
822 watchpoint trap routines. */
ff4700b0 823 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
e5548617
BS
824 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
825 /* Avoid trapping reads of pages with a write breakpoint. */
826 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 827 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
828 *address |= TLB_MMIO;
829 break;
830 }
831 }
832 }
833
834 return iotlb;
835}
9fa3e853
FB
836#endif /* defined(CONFIG_USER_ONLY) */
837
e2eef170 838#if !defined(CONFIG_USER_ONLY)
8da3ff18 839
c227f099 840static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 841 uint16_t section);
acc9d80b 842static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 843
575ddeb4 844static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
91138037
MA
845
846/*
847 * Set a custom physical guest memory alloator.
848 * Accelerators with unusual needs may need this. Hopefully, we can
849 * get rid of it eventually.
850 */
575ddeb4 851void phys_mem_set_alloc(void *(*alloc)(size_t))
91138037
MA
852{
853 phys_mem_alloc = alloc;
854}
855
53cb28cb
MA
856static uint16_t phys_section_add(PhysPageMap *map,
857 MemoryRegionSection *section)
5312bd8b 858{
68f3f65b
PB
859 /* The physical section number is ORed with a page-aligned
860 * pointer to produce the iotlb entries. Thus it should
861 * never overflow into the page-aligned value.
862 */
53cb28cb 863 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 864
53cb28cb
MA
865 if (map->sections_nb == map->sections_nb_alloc) {
866 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
867 map->sections = g_renew(MemoryRegionSection, map->sections,
868 map->sections_nb_alloc);
5312bd8b 869 }
53cb28cb 870 map->sections[map->sections_nb] = *section;
dfde4e6e 871 memory_region_ref(section->mr);
53cb28cb 872 return map->sections_nb++;
5312bd8b
AK
873}
874
058bc4b5
PB
875static void phys_section_destroy(MemoryRegion *mr)
876{
dfde4e6e
PB
877 memory_region_unref(mr);
878
058bc4b5
PB
879 if (mr->subpage) {
880 subpage_t *subpage = container_of(mr, subpage_t, iomem);
881 memory_region_destroy(&subpage->iomem);
882 g_free(subpage);
883 }
884}
885
6092666e 886static void phys_sections_free(PhysPageMap *map)
5312bd8b 887{
9affd6fc
PB
888 while (map->sections_nb > 0) {
889 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
890 phys_section_destroy(section->mr);
891 }
9affd6fc
PB
892 g_free(map->sections);
893 g_free(map->nodes);
5312bd8b
AK
894}
895
ac1970fb 896static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
897{
898 subpage_t *subpage;
a8170e5e 899 hwaddr base = section->offset_within_address_space
0f0cb164 900 & TARGET_PAGE_MASK;
97115a8d 901 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 902 d->map.nodes, d->map.sections);
0f0cb164
AK
903 MemoryRegionSection subsection = {
904 .offset_within_address_space = base,
052e87b0 905 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 906 };
a8170e5e 907 hwaddr start, end;
0f0cb164 908
f3705d53 909 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 910
f3705d53 911 if (!(existing->mr->subpage)) {
acc9d80b 912 subpage = subpage_init(d->as, base);
3be91e86 913 subsection.address_space = d->as;
0f0cb164 914 subsection.mr = &subpage->iomem;
ac1970fb 915 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 916 phys_section_add(&d->map, &subsection));
0f0cb164 917 } else {
f3705d53 918 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
919 }
920 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 921 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
922 subpage_register(subpage, start, end,
923 phys_section_add(&d->map, section));
0f0cb164
AK
924}
925
926
052e87b0
PB
927static void register_multipage(AddressSpaceDispatch *d,
928 MemoryRegionSection *section)
33417e70 929{
a8170e5e 930 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 931 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
932 uint64_t num_pages = int128_get64(int128_rshift(section->size,
933 TARGET_PAGE_BITS));
dd81124b 934
733d5ef5
PB
935 assert(num_pages);
936 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
937}
938
ac1970fb 939static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 940{
89ae337a 941 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 942 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 943 MemoryRegionSection now = *section, remain = *section;
052e87b0 944 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 945
733d5ef5
PB
946 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
947 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
948 - now.offset_within_address_space;
949
052e87b0 950 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 951 register_subpage(d, &now);
733d5ef5 952 } else {
052e87b0 953 now.size = int128_zero();
733d5ef5 954 }
052e87b0
PB
955 while (int128_ne(remain.size, now.size)) {
956 remain.size = int128_sub(remain.size, now.size);
957 remain.offset_within_address_space += int128_get64(now.size);
958 remain.offset_within_region += int128_get64(now.size);
69b67646 959 now = remain;
052e87b0 960 if (int128_lt(remain.size, page_size)) {
733d5ef5 961 register_subpage(d, &now);
88266249 962 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 963 now.size = page_size;
ac1970fb 964 register_subpage(d, &now);
69b67646 965 } else {
052e87b0 966 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 967 register_multipage(d, &now);
69b67646 968 }
0f0cb164
AK
969 }
970}
971
62a2744c
SY
972void qemu_flush_coalesced_mmio_buffer(void)
973{
974 if (kvm_enabled())
975 kvm_flush_coalesced_mmio_buffer();
976}
977
b2a8658e
UD
978void qemu_mutex_lock_ramlist(void)
979{
980 qemu_mutex_lock(&ram_list.mutex);
981}
982
983void qemu_mutex_unlock_ramlist(void)
984{
985 qemu_mutex_unlock(&ram_list.mutex);
986}
987
e1e84ba0 988#ifdef __linux__
c902760f
MT
989
990#include <sys/vfs.h>
991
992#define HUGETLBFS_MAGIC 0x958458f6
993
994static long gethugepagesize(const char *path)
995{
996 struct statfs fs;
997 int ret;
998
999 do {
9742bf26 1000 ret = statfs(path, &fs);
c902760f
MT
1001 } while (ret != 0 && errno == EINTR);
1002
1003 if (ret != 0) {
9742bf26
YT
1004 perror(path);
1005 return 0;
c902760f
MT
1006 }
1007
1008 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1009 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1010
1011 return fs.f_bsize;
1012}
1013
ef36fa14
MT
1014static sigjmp_buf sigjump;
1015
1016static void sigbus_handler(int signal)
1017{
1018 siglongjmp(sigjump, 1);
1019}
1020
04b16653
AW
1021static void *file_ram_alloc(RAMBlock *block,
1022 ram_addr_t memory,
1023 const char *path)
c902760f
MT
1024{
1025 char *filename;
8ca761f6
PF
1026 char *sanitized_name;
1027 char *c;
c902760f
MT
1028 void *area;
1029 int fd;
c902760f
MT
1030 unsigned long hpagesize;
1031
1032 hpagesize = gethugepagesize(path);
1033 if (!hpagesize) {
f9a49dfa 1034 goto error;
c902760f
MT
1035 }
1036
1037 if (memory < hpagesize) {
1038 return NULL;
1039 }
1040
1041 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1042 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
f9a49dfa 1043 goto error;
c902760f
MT
1044 }
1045
8ca761f6
PF
1046 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1047 sanitized_name = g_strdup(block->mr->name);
1048 for (c = sanitized_name; *c != '\0'; c++) {
1049 if (*c == '/')
1050 *c = '_';
1051 }
1052
1053 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1054 sanitized_name);
1055 g_free(sanitized_name);
c902760f
MT
1056
1057 fd = mkstemp(filename);
1058 if (fd < 0) {
9742bf26 1059 perror("unable to create backing store for hugepages");
e4ada482 1060 g_free(filename);
f9a49dfa 1061 goto error;
c902760f
MT
1062 }
1063 unlink(filename);
e4ada482 1064 g_free(filename);
c902760f
MT
1065
1066 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1067
1068 /*
1069 * ftruncate is not supported by hugetlbfs in older
1070 * hosts, so don't bother bailing out on errors.
1071 * If anything goes wrong with it under other filesystems,
1072 * mmap will fail.
1073 */
1074 if (ftruncate(fd, memory))
9742bf26 1075 perror("ftruncate");
c902760f 1076
c902760f 1077 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
c902760f 1078 if (area == MAP_FAILED) {
9742bf26
YT
1079 perror("file_ram_alloc: can't mmap RAM pages");
1080 close(fd);
f9a49dfa 1081 goto error;
c902760f 1082 }
ef36fa14
MT
1083
1084 if (mem_prealloc) {
1085 int ret, i;
1086 struct sigaction act, oldact;
1087 sigset_t set, oldset;
1088
1089 memset(&act, 0, sizeof(act));
1090 act.sa_handler = &sigbus_handler;
1091 act.sa_flags = 0;
1092
1093 ret = sigaction(SIGBUS, &act, &oldact);
1094 if (ret) {
1095 perror("file_ram_alloc: failed to install signal handler");
1096 exit(1);
1097 }
1098
1099 /* unblock SIGBUS */
1100 sigemptyset(&set);
1101 sigaddset(&set, SIGBUS);
1102 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1103
1104 if (sigsetjmp(sigjump, 1)) {
1105 fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1106 exit(1);
1107 }
1108
1109 /* MAP_POPULATE silently ignores failures */
2ba82852 1110 for (i = 0; i < (memory/hpagesize); i++) {
ef36fa14
MT
1111 memset(area + (hpagesize*i), 0, 1);
1112 }
1113
1114 ret = sigaction(SIGBUS, &oldact, NULL);
1115 if (ret) {
1116 perror("file_ram_alloc: failed to reinstall signal handler");
1117 exit(1);
1118 }
1119
1120 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1121 }
1122
04b16653 1123 block->fd = fd;
c902760f 1124 return area;
f9a49dfa
MT
1125
1126error:
1127 if (mem_prealloc) {
1128 exit(1);
1129 }
1130 return NULL;
c902760f 1131}
e1e84ba0
MA
1132#else
1133static void *file_ram_alloc(RAMBlock *block,
1134 ram_addr_t memory,
1135 const char *path)
1136{
1137 fprintf(stderr, "-mem-path not supported on this host\n");
1138 exit(1);
1139}
c902760f
MT
1140#endif
1141
d17b5288 1142static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1143{
1144 RAMBlock *block, *next_block;
3e837b2c 1145 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1146
49cd9ac6
SH
1147 assert(size != 0); /* it would hand out same offset multiple times */
1148
a3161038 1149 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1150 return 0;
1151
a3161038 1152 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1153 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1154
1155 end = block->offset + block->length;
1156
a3161038 1157 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1158 if (next_block->offset >= end) {
1159 next = MIN(next, next_block->offset);
1160 }
1161 }
1162 if (next - end >= size && next - end < mingap) {
3e837b2c 1163 offset = end;
04b16653
AW
1164 mingap = next - end;
1165 }
1166 }
3e837b2c
AW
1167
1168 if (offset == RAM_ADDR_MAX) {
1169 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1170 (uint64_t)size);
1171 abort();
1172 }
1173
04b16653
AW
1174 return offset;
1175}
1176
652d7ec2 1177ram_addr_t last_ram_offset(void)
d17b5288
AW
1178{
1179 RAMBlock *block;
1180 ram_addr_t last = 0;
1181
a3161038 1182 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1183 last = MAX(last, block->offset + block->length);
1184
1185 return last;
1186}
1187
ddb97f1d
JB
1188static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1189{
1190 int ret;
ddb97f1d
JB
1191
1192 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1193 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1194 "dump-guest-core", true)) {
ddb97f1d
JB
1195 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1196 if (ret) {
1197 perror("qemu_madvise");
1198 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1199 "but dump_guest_core=off specified\n");
1200 }
1201 }
1202}
1203
20cfe881 1204static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1205{
20cfe881 1206 RAMBlock *block;
84b89d78 1207
a3161038 1208 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1209 if (block->offset == addr) {
20cfe881 1210 return block;
c5705a77
AK
1211 }
1212 }
20cfe881
HT
1213
1214 return NULL;
1215}
1216
1217void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1218{
1219 RAMBlock *new_block = find_ram_block(addr);
1220 RAMBlock *block;
1221
c5705a77
AK
1222 assert(new_block);
1223 assert(!new_block->idstr[0]);
84b89d78 1224
09e5ab63
AL
1225 if (dev) {
1226 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1227 if (id) {
1228 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1229 g_free(id);
84b89d78
CM
1230 }
1231 }
1232 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1233
b2a8658e
UD
1234 /* This assumes the iothread lock is taken here too. */
1235 qemu_mutex_lock_ramlist();
a3161038 1236 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1237 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1238 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1239 new_block->idstr);
1240 abort();
1241 }
1242 }
b2a8658e 1243 qemu_mutex_unlock_ramlist();
c5705a77
AK
1244}
1245
20cfe881
HT
1246void qemu_ram_unset_idstr(ram_addr_t addr)
1247{
1248 RAMBlock *block = find_ram_block(addr);
1249
1250 if (block) {
1251 memset(block->idstr, 0, sizeof(block->idstr));
1252 }
1253}
1254
8490fc78
LC
1255static int memory_try_enable_merging(void *addr, size_t len)
1256{
2ff3de68 1257 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1258 /* disabled by the user */
1259 return 0;
1260 }
1261
1262 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1263}
1264
e1c57ab8 1265static ram_addr_t ram_block_add(RAMBlock *new_block)
c5705a77 1266{
e1c57ab8 1267 RAMBlock *block;
2152f5ca
JQ
1268 ram_addr_t old_ram_size, new_ram_size;
1269
1270 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1271
b2a8658e
UD
1272 /* This assumes the iothread lock is taken here too. */
1273 qemu_mutex_lock_ramlist();
e1c57ab8
PB
1274 new_block->offset = find_ram_offset(new_block->length);
1275
1276 if (!new_block->host) {
1277 if (xen_enabled()) {
1278 xen_ram_alloc(new_block->offset, new_block->length, new_block->mr);
1279 } else {
1280 new_block->host = phys_mem_alloc(new_block->length);
39228250
MA
1281 if (!new_block->host) {
1282 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1283 new_block->mr->name, strerror(errno));
1284 exit(1);
1285 }
e1c57ab8 1286 memory_try_enable_merging(new_block->host, new_block->length);
6977dfe6 1287 }
c902760f 1288 }
94a6b54f 1289
abb26d63
PB
1290 /* Keep the list sorted from biggest to smallest block. */
1291 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1292 if (block->length < new_block->length) {
1293 break;
1294 }
1295 }
1296 if (block) {
1297 QTAILQ_INSERT_BEFORE(block, new_block, next);
1298 } else {
1299 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1300 }
0d6d3c87 1301 ram_list.mru_block = NULL;
94a6b54f 1302
f798b07f 1303 ram_list.version++;
b2a8658e 1304 qemu_mutex_unlock_ramlist();
f798b07f 1305
2152f5ca
JQ
1306 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1307
1308 if (new_ram_size > old_ram_size) {
1ab4c8ce
JQ
1309 int i;
1310 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1311 ram_list.dirty_memory[i] =
1312 bitmap_zero_extend(ram_list.dirty_memory[i],
1313 old_ram_size, new_ram_size);
1314 }
2152f5ca 1315 }
e1c57ab8 1316 cpu_physical_memory_set_dirty_range(new_block->offset, new_block->length);
94a6b54f 1317
e1c57ab8
PB
1318 qemu_ram_setup_dump(new_block->host, new_block->length);
1319 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_HUGEPAGE);
1320 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_DONTFORK);
ddb97f1d 1321
e1c57ab8
PB
1322 if (kvm_enabled()) {
1323 kvm_setup_guest_memory(new_block->host, new_block->length);
1324 }
6f0437e8 1325
94a6b54f
PB
1326 return new_block->offset;
1327}
e9a1ab19 1328
e1c57ab8
PB
1329ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1330 const char *mem_path)
1331{
1332 RAMBlock *new_block;
1333
1334 if (xen_enabled()) {
1335 fprintf(stderr, "-mem-path not supported with Xen\n");
1336 exit(1);
1337 }
1338
1339 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1340 /*
1341 * file_ram_alloc() needs to allocate just like
1342 * phys_mem_alloc, but we haven't bothered to provide
1343 * a hook there.
1344 */
1345 fprintf(stderr,
1346 "-mem-path not supported with this accelerator\n");
1347 exit(1);
1348 }
1349
1350 size = TARGET_PAGE_ALIGN(size);
1351 new_block = g_malloc0(sizeof(*new_block));
1352 new_block->mr = mr;
1353 new_block->length = size;
1354 new_block->host = file_ram_alloc(new_block, size, mem_path);
1355 return ram_block_add(new_block);
1356}
1357
1358ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1359 MemoryRegion *mr)
1360{
1361 RAMBlock *new_block;
1362
1363 size = TARGET_PAGE_ALIGN(size);
1364 new_block = g_malloc0(sizeof(*new_block));
1365 new_block->mr = mr;
1366 new_block->length = size;
1367 new_block->fd = -1;
1368 new_block->host = host;
1369 if (host) {
1370 new_block->flags |= RAM_PREALLOC_MASK;
1371 }
1372 return ram_block_add(new_block);
1373}
1374
c5705a77 1375ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1376{
c5705a77 1377 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1378}
1379
1f2e98b6
AW
1380void qemu_ram_free_from_ptr(ram_addr_t addr)
1381{
1382 RAMBlock *block;
1383
b2a8658e
UD
1384 /* This assumes the iothread lock is taken here too. */
1385 qemu_mutex_lock_ramlist();
a3161038 1386 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1387 if (addr == block->offset) {
a3161038 1388 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1389 ram_list.mru_block = NULL;
f798b07f 1390 ram_list.version++;
7267c094 1391 g_free(block);
b2a8658e 1392 break;
1f2e98b6
AW
1393 }
1394 }
b2a8658e 1395 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1396}
1397
c227f099 1398void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1399{
04b16653
AW
1400 RAMBlock *block;
1401
b2a8658e
UD
1402 /* This assumes the iothread lock is taken here too. */
1403 qemu_mutex_lock_ramlist();
a3161038 1404 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1405 if (addr == block->offset) {
a3161038 1406 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1407 ram_list.mru_block = NULL;
f798b07f 1408 ram_list.version++;
cd19cfa2
HY
1409 if (block->flags & RAM_PREALLOC_MASK) {
1410 ;
dfeaf2ab
MA
1411 } else if (xen_enabled()) {
1412 xen_invalidate_map_cache_entry(block->host);
089f3f76 1413#ifndef _WIN32
3435f395
MA
1414 } else if (block->fd >= 0) {
1415 munmap(block->host, block->length);
1416 close(block->fd);
089f3f76 1417#endif
04b16653 1418 } else {
dfeaf2ab 1419 qemu_anon_ram_free(block->host, block->length);
04b16653 1420 }
7267c094 1421 g_free(block);
b2a8658e 1422 break;
04b16653
AW
1423 }
1424 }
b2a8658e 1425 qemu_mutex_unlock_ramlist();
04b16653 1426
e9a1ab19
FB
1427}
1428
cd19cfa2
HY
1429#ifndef _WIN32
1430void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1431{
1432 RAMBlock *block;
1433 ram_addr_t offset;
1434 int flags;
1435 void *area, *vaddr;
1436
a3161038 1437 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1438 offset = addr - block->offset;
1439 if (offset < block->length) {
1440 vaddr = block->host + offset;
1441 if (block->flags & RAM_PREALLOC_MASK) {
1442 ;
dfeaf2ab
MA
1443 } else if (xen_enabled()) {
1444 abort();
cd19cfa2
HY
1445 } else {
1446 flags = MAP_FIXED;
1447 munmap(vaddr, length);
3435f395 1448 if (block->fd >= 0) {
cd19cfa2 1449#ifdef MAP_POPULATE
3435f395
MA
1450 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1451 MAP_PRIVATE;
fd28aa13 1452#else
3435f395 1453 flags |= MAP_PRIVATE;
cd19cfa2 1454#endif
3435f395
MA
1455 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1456 flags, block->fd, offset);
cd19cfa2 1457 } else {
2eb9fbaa
MA
1458 /*
1459 * Remap needs to match alloc. Accelerators that
1460 * set phys_mem_alloc never remap. If they did,
1461 * we'd need a remap hook here.
1462 */
1463 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1464
cd19cfa2
HY
1465 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1466 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1467 flags, -1, 0);
cd19cfa2
HY
1468 }
1469 if (area != vaddr) {
f15fbc4b
AP
1470 fprintf(stderr, "Could not remap addr: "
1471 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1472 length, addr);
1473 exit(1);
1474 }
8490fc78 1475 memory_try_enable_merging(vaddr, length);
ddb97f1d 1476 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1477 }
1478 return;
1479 }
1480 }
1481}
1482#endif /* !_WIN32 */
1483
1b5ec234
PB
1484/* Return a host pointer to ram allocated with qemu_ram_alloc.
1485 With the exception of the softmmu code in this file, this should
1486 only be used for local memory (e.g. video ram) that the device owns,
1487 and knows it isn't going to access beyond the end of the block.
1488
1489 It should not be used for general purpose DMA.
1490 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1491 */
1492void *qemu_get_ram_ptr(ram_addr_t addr)
1493{
1494 RAMBlock *block = qemu_get_ram_block(addr);
1495
0d6d3c87
PB
1496 if (xen_enabled()) {
1497 /* We need to check if the requested address is in the RAM
1498 * because we don't want to map the entire memory in QEMU.
1499 * In that case just map until the end of the page.
1500 */
1501 if (block->offset == 0) {
1502 return xen_map_cache(addr, 0, 0);
1503 } else if (block->host == NULL) {
1504 block->host =
1505 xen_map_cache(block->offset, block->length, 1);
1506 }
1507 }
1508 return block->host + (addr - block->offset);
dc828ca1
PB
1509}
1510
38bee5dc
SS
1511/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1512 * but takes a size argument */
cb85f7ab 1513static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1514{
8ab934f9
SS
1515 if (*size == 0) {
1516 return NULL;
1517 }
868bb33f 1518 if (xen_enabled()) {
e41d7c69 1519 return xen_map_cache(addr, *size, 1);
868bb33f 1520 } else {
38bee5dc
SS
1521 RAMBlock *block;
1522
a3161038 1523 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1524 if (addr - block->offset < block->length) {
1525 if (addr - block->offset + *size > block->length)
1526 *size = block->length - addr + block->offset;
1527 return block->host + (addr - block->offset);
1528 }
1529 }
1530
1531 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1532 abort();
38bee5dc
SS
1533 }
1534}
1535
7443b437
PB
1536/* Some of the softmmu routines need to translate from a host pointer
1537 (typically a TLB entry) back to a ram offset. */
1b5ec234 1538MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1539{
94a6b54f
PB
1540 RAMBlock *block;
1541 uint8_t *host = ptr;
1542
868bb33f 1543 if (xen_enabled()) {
e41d7c69 1544 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1545 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1546 }
1547
23887b79
PB
1548 block = ram_list.mru_block;
1549 if (block && block->host && host - block->host < block->length) {
1550 goto found;
1551 }
1552
a3161038 1553 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1554 /* This case append when the block is not mapped. */
1555 if (block->host == NULL) {
1556 continue;
1557 }
f471a17e 1558 if (host - block->host < block->length) {
23887b79 1559 goto found;
f471a17e 1560 }
94a6b54f 1561 }
432d268c 1562
1b5ec234 1563 return NULL;
23887b79
PB
1564
1565found:
1566 *ram_addr = block->offset + (host - block->host);
1b5ec234 1567 return block->mr;
e890261f 1568}
f471a17e 1569
a8170e5e 1570static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1571 uint64_t val, unsigned size)
9fa3e853 1572{
52159192 1573 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1574 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1575 }
0e0df1e2
AK
1576 switch (size) {
1577 case 1:
1578 stb_p(qemu_get_ram_ptr(ram_addr), val);
1579 break;
1580 case 2:
1581 stw_p(qemu_get_ram_ptr(ram_addr), val);
1582 break;
1583 case 4:
1584 stl_p(qemu_get_ram_ptr(ram_addr), val);
1585 break;
1586 default:
1587 abort();
3a7d929e 1588 }
52159192
JQ
1589 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_MIGRATION);
1590 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_VGA);
f23db169
FB
1591 /* we remove the notdirty callback only if the code has been
1592 flushed */
a2cd8c85 1593 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1594 CPUArchState *env = current_cpu->env_ptr;
93afeade 1595 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1596 }
9fa3e853
FB
1597}
1598
b018ddf6
PB
1599static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1600 unsigned size, bool is_write)
1601{
1602 return is_write;
1603}
1604
0e0df1e2 1605static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1606 .write = notdirty_mem_write,
b018ddf6 1607 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1608 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1609};
1610
0f459d16 1611/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1612static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1613{
93afeade
AF
1614 CPUState *cpu = current_cpu;
1615 CPUArchState *env = cpu->env_ptr;
06d55cc1 1616 target_ulong pc, cs_base;
0f459d16 1617 target_ulong vaddr;
a1d1bb31 1618 CPUWatchpoint *wp;
06d55cc1 1619 int cpu_flags;
0f459d16 1620
ff4700b0 1621 if (cpu->watchpoint_hit) {
06d55cc1
AL
1622 /* We re-entered the check after replacing the TB. Now raise
1623 * the debug interrupt so that is will trigger after the
1624 * current instruction. */
93afeade 1625 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1626 return;
1627 }
93afeade 1628 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1629 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
b4051334
AL
1630 if ((vaddr == (wp->vaddr & len_mask) ||
1631 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28 1632 wp->flags |= BP_WATCHPOINT_HIT;
ff4700b0
AF
1633 if (!cpu->watchpoint_hit) {
1634 cpu->watchpoint_hit = wp;
239c51a5 1635 tb_check_watchpoint(cpu);
6e140f28 1636 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1637 cpu->exception_index = EXCP_DEBUG;
5638d180 1638 cpu_loop_exit(cpu);
6e140f28
AL
1639 } else {
1640 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1641 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1642 cpu_resume_from_signal(cpu, NULL);
6e140f28 1643 }
06d55cc1 1644 }
6e140f28
AL
1645 } else {
1646 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1647 }
1648 }
1649}
1650
6658ffb8
PB
1651/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1652 so these check for a hit then pass through to the normal out-of-line
1653 phys routines. */
a8170e5e 1654static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1655 unsigned size)
6658ffb8 1656{
1ec9b909
AK
1657 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1658 switch (size) {
2c17449b 1659 case 1: return ldub_phys(&address_space_memory, addr);
41701aa4 1660 case 2: return lduw_phys(&address_space_memory, addr);
fdfba1a2 1661 case 4: return ldl_phys(&address_space_memory, addr);
1ec9b909
AK
1662 default: abort();
1663 }
6658ffb8
PB
1664}
1665
a8170e5e 1666static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1667 uint64_t val, unsigned size)
6658ffb8 1668{
1ec9b909
AK
1669 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1670 switch (size) {
67364150 1671 case 1:
db3be60d 1672 stb_phys(&address_space_memory, addr, val);
67364150
MF
1673 break;
1674 case 2:
5ce5944d 1675 stw_phys(&address_space_memory, addr, val);
67364150
MF
1676 break;
1677 case 4:
ab1da857 1678 stl_phys(&address_space_memory, addr, val);
67364150 1679 break;
1ec9b909
AK
1680 default: abort();
1681 }
6658ffb8
PB
1682}
1683
1ec9b909
AK
1684static const MemoryRegionOps watch_mem_ops = {
1685 .read = watch_mem_read,
1686 .write = watch_mem_write,
1687 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1688};
6658ffb8 1689
a8170e5e 1690static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1691 unsigned len)
db7b5426 1692{
acc9d80b
JK
1693 subpage_t *subpage = opaque;
1694 uint8_t buf[4];
791af8c8 1695
db7b5426 1696#if defined(DEBUG_SUBPAGE)
016e9d62 1697 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1698 subpage, len, addr);
db7b5426 1699#endif
acc9d80b
JK
1700 address_space_read(subpage->as, addr + subpage->base, buf, len);
1701 switch (len) {
1702 case 1:
1703 return ldub_p(buf);
1704 case 2:
1705 return lduw_p(buf);
1706 case 4:
1707 return ldl_p(buf);
1708 default:
1709 abort();
1710 }
db7b5426
BS
1711}
1712
a8170e5e 1713static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1714 uint64_t value, unsigned len)
db7b5426 1715{
acc9d80b
JK
1716 subpage_t *subpage = opaque;
1717 uint8_t buf[4];
1718
db7b5426 1719#if defined(DEBUG_SUBPAGE)
016e9d62 1720 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1721 " value %"PRIx64"\n",
1722 __func__, subpage, len, addr, value);
db7b5426 1723#endif
acc9d80b
JK
1724 switch (len) {
1725 case 1:
1726 stb_p(buf, value);
1727 break;
1728 case 2:
1729 stw_p(buf, value);
1730 break;
1731 case 4:
1732 stl_p(buf, value);
1733 break;
1734 default:
1735 abort();
1736 }
1737 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1738}
1739
c353e4cc 1740static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 1741 unsigned len, bool is_write)
c353e4cc 1742{
acc9d80b 1743 subpage_t *subpage = opaque;
c353e4cc 1744#if defined(DEBUG_SUBPAGE)
016e9d62 1745 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 1746 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1747#endif
1748
acc9d80b 1749 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 1750 len, is_write);
c353e4cc
PB
1751}
1752
70c68e44
AK
1753static const MemoryRegionOps subpage_ops = {
1754 .read = subpage_read,
1755 .write = subpage_write,
c353e4cc 1756 .valid.accepts = subpage_accepts,
70c68e44 1757 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1758};
1759
c227f099 1760static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1761 uint16_t section)
db7b5426
BS
1762{
1763 int idx, eidx;
1764
1765 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1766 return -1;
1767 idx = SUBPAGE_IDX(start);
1768 eidx = SUBPAGE_IDX(end);
1769#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1770 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1771 __func__, mmio, start, end, idx, eidx, section);
db7b5426 1772#endif
db7b5426 1773 for (; idx <= eidx; idx++) {
5312bd8b 1774 mmio->sub_section[idx] = section;
db7b5426
BS
1775 }
1776
1777 return 0;
1778}
1779
acc9d80b 1780static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1781{
c227f099 1782 subpage_t *mmio;
db7b5426 1783
7267c094 1784 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1785
acc9d80b 1786 mmio->as = as;
1eec614b 1787 mmio->base = base;
2c9b15ca 1788 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
70c68e44 1789 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1790 mmio->iomem.subpage = true;
db7b5426 1791#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1792 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1793 mmio, base, TARGET_PAGE_SIZE);
db7b5426 1794#endif
b41aac4f 1795 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1796
1797 return mmio;
1798}
1799
a656e22f
PC
1800static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1801 MemoryRegion *mr)
5312bd8b 1802{
a656e22f 1803 assert(as);
5312bd8b 1804 MemoryRegionSection section = {
a656e22f 1805 .address_space = as,
5312bd8b
AK
1806 .mr = mr,
1807 .offset_within_address_space = 0,
1808 .offset_within_region = 0,
052e87b0 1809 .size = int128_2_64(),
5312bd8b
AK
1810 };
1811
53cb28cb 1812 return phys_section_add(map, &section);
5312bd8b
AK
1813}
1814
77717094 1815MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
aa102231 1816{
77717094 1817 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1818}
1819
e9179ce1
AK
1820static void io_mem_init(void)
1821{
2c9b15ca
PB
1822 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1823 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
0e0df1e2 1824 "unassigned", UINT64_MAX);
2c9b15ca 1825 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
0e0df1e2 1826 "notdirty", UINT64_MAX);
2c9b15ca 1827 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1ec9b909 1828 "watch", UINT64_MAX);
e9179ce1
AK
1829}
1830
ac1970fb 1831static void mem_begin(MemoryListener *listener)
00752703
PB
1832{
1833 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
1834 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1835 uint16_t n;
1836
a656e22f 1837 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 1838 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 1839 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 1840 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 1841 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 1842 assert(n == PHYS_SECTION_ROM);
a656e22f 1843 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 1844 assert(n == PHYS_SECTION_WATCH);
00752703 1845
9736e55b 1846 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
1847 d->as = as;
1848 as->next_dispatch = d;
1849}
1850
1851static void mem_commit(MemoryListener *listener)
ac1970fb 1852{
89ae337a 1853 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
1854 AddressSpaceDispatch *cur = as->dispatch;
1855 AddressSpaceDispatch *next = as->next_dispatch;
1856
53cb28cb 1857 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 1858
0475d94f 1859 as->dispatch = next;
b41aac4f 1860
53cb28cb
MA
1861 if (cur) {
1862 phys_sections_free(&cur->map);
1863 g_free(cur);
1864 }
9affd6fc
PB
1865}
1866
1d71148e 1867static void tcg_commit(MemoryListener *listener)
50c1e149 1868{
182735ef 1869 CPUState *cpu;
117712c3
AK
1870
1871 /* since each CPU stores ram addresses in its TLB cache, we must
1872 reset the modified entries */
1873 /* XXX: slow ! */
bdc44640 1874 CPU_FOREACH(cpu) {
33bde2e1
EI
1875 /* FIXME: Disentangle the cpu.h circular files deps so we can
1876 directly get the right CPU from listener. */
1877 if (cpu->tcg_as_listener != listener) {
1878 continue;
1879 }
00c8cb0a 1880 tlb_flush(cpu, 1);
117712c3 1881 }
50c1e149
AK
1882}
1883
93632747
AK
1884static void core_log_global_start(MemoryListener *listener)
1885{
981fdf23 1886 cpu_physical_memory_set_dirty_tracking(true);
93632747
AK
1887}
1888
1889static void core_log_global_stop(MemoryListener *listener)
1890{
981fdf23 1891 cpu_physical_memory_set_dirty_tracking(false);
93632747
AK
1892}
1893
93632747 1894static MemoryListener core_memory_listener = {
93632747
AK
1895 .log_global_start = core_log_global_start,
1896 .log_global_stop = core_log_global_stop,
ac1970fb 1897 .priority = 1,
93632747
AK
1898};
1899
ac1970fb
AK
1900void address_space_init_dispatch(AddressSpace *as)
1901{
00752703 1902 as->dispatch = NULL;
89ae337a 1903 as->dispatch_listener = (MemoryListener) {
ac1970fb 1904 .begin = mem_begin,
00752703 1905 .commit = mem_commit,
ac1970fb
AK
1906 .region_add = mem_add,
1907 .region_nop = mem_add,
1908 .priority = 0,
1909 };
89ae337a 1910 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
1911}
1912
83f3c251
AK
1913void address_space_destroy_dispatch(AddressSpace *as)
1914{
1915 AddressSpaceDispatch *d = as->dispatch;
1916
89ae337a 1917 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
1918 g_free(d);
1919 as->dispatch = NULL;
1920}
1921
62152b8a
AK
1922static void memory_map_init(void)
1923{
7267c094 1924 system_memory = g_malloc(sizeof(*system_memory));
03f49957 1925
57271d63 1926 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 1927 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1928
7267c094 1929 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
1930 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1931 65536);
7dca8043 1932 address_space_init(&address_space_io, system_io, "I/O");
93632747 1933
f6790af6 1934 memory_listener_register(&core_memory_listener, &address_space_memory);
62152b8a
AK
1935}
1936
1937MemoryRegion *get_system_memory(void)
1938{
1939 return system_memory;
1940}
1941
309cb471
AK
1942MemoryRegion *get_system_io(void)
1943{
1944 return system_io;
1945}
1946
e2eef170
PB
1947#endif /* !defined(CONFIG_USER_ONLY) */
1948
13eb76e0
FB
1949/* physical memory access (slow version, mainly for debug) */
1950#if defined(CONFIG_USER_ONLY)
f17ec444 1951int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 1952 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1953{
1954 int l, flags;
1955 target_ulong page;
53a5960a 1956 void * p;
13eb76e0
FB
1957
1958 while (len > 0) {
1959 page = addr & TARGET_PAGE_MASK;
1960 l = (page + TARGET_PAGE_SIZE) - addr;
1961 if (l > len)
1962 l = len;
1963 flags = page_get_flags(page);
1964 if (!(flags & PAGE_VALID))
a68fe89c 1965 return -1;
13eb76e0
FB
1966 if (is_write) {
1967 if (!(flags & PAGE_WRITE))
a68fe89c 1968 return -1;
579a97f7 1969 /* XXX: this code should not depend on lock_user */
72fb7daa 1970 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1971 return -1;
72fb7daa
AJ
1972 memcpy(p, buf, l);
1973 unlock_user(p, addr, l);
13eb76e0
FB
1974 } else {
1975 if (!(flags & PAGE_READ))
a68fe89c 1976 return -1;
579a97f7 1977 /* XXX: this code should not depend on lock_user */
72fb7daa 1978 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1979 return -1;
72fb7daa 1980 memcpy(buf, p, l);
5b257578 1981 unlock_user(p, addr, 0);
13eb76e0
FB
1982 }
1983 len -= l;
1984 buf += l;
1985 addr += l;
1986 }
a68fe89c 1987 return 0;
13eb76e0 1988}
8df1cd07 1989
13eb76e0 1990#else
51d7a9eb 1991
a8170e5e
AK
1992static void invalidate_and_set_dirty(hwaddr addr,
1993 hwaddr length)
51d7a9eb 1994{
a2cd8c85 1995 if (cpu_physical_memory_is_clean(addr)) {
51d7a9eb
AP
1996 /* invalidate code */
1997 tb_invalidate_phys_page_range(addr, addr + length, 0);
1998 /* set dirty bit */
52159192
JQ
1999 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
2000 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
51d7a9eb 2001 }
e226939d 2002 xen_modified_memory(addr, length);
51d7a9eb
AP
2003}
2004
23326164 2005static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2006{
e1622f4b 2007 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2008
2009 /* Regions are assumed to support 1-4 byte accesses unless
2010 otherwise specified. */
23326164
RH
2011 if (access_size_max == 0) {
2012 access_size_max = 4;
2013 }
2014
2015 /* Bound the maximum access by the alignment of the address. */
2016 if (!mr->ops->impl.unaligned) {
2017 unsigned align_size_max = addr & -addr;
2018 if (align_size_max != 0 && align_size_max < access_size_max) {
2019 access_size_max = align_size_max;
2020 }
82f2563f 2021 }
23326164
RH
2022
2023 /* Don't attempt accesses larger than the maximum. */
2024 if (l > access_size_max) {
2025 l = access_size_max;
82f2563f 2026 }
098178f2
PB
2027 if (l & (l - 1)) {
2028 l = 1 << (qemu_fls(l) - 1);
2029 }
23326164
RH
2030
2031 return l;
82f2563f
PB
2032}
2033
fd8aaa76 2034bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 2035 int len, bool is_write)
13eb76e0 2036{
149f54b5 2037 hwaddr l;
13eb76e0 2038 uint8_t *ptr;
791af8c8 2039 uint64_t val;
149f54b5 2040 hwaddr addr1;
5c8a00ce 2041 MemoryRegion *mr;
fd8aaa76 2042 bool error = false;
3b46e624 2043
13eb76e0 2044 while (len > 0) {
149f54b5 2045 l = len;
5c8a00ce 2046 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2047
13eb76e0 2048 if (is_write) {
5c8a00ce
PB
2049 if (!memory_access_is_direct(mr, is_write)) {
2050 l = memory_access_size(mr, l, addr1);
4917cf44 2051 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2052 potential bugs */
23326164
RH
2053 switch (l) {
2054 case 8:
2055 /* 64 bit write access */
2056 val = ldq_p(buf);
2057 error |= io_mem_write(mr, addr1, val, 8);
2058 break;
2059 case 4:
1c213d19 2060 /* 32 bit write access */
c27004ec 2061 val = ldl_p(buf);
5c8a00ce 2062 error |= io_mem_write(mr, addr1, val, 4);
23326164
RH
2063 break;
2064 case 2:
1c213d19 2065 /* 16 bit write access */
c27004ec 2066 val = lduw_p(buf);
5c8a00ce 2067 error |= io_mem_write(mr, addr1, val, 2);
23326164
RH
2068 break;
2069 case 1:
1c213d19 2070 /* 8 bit write access */
c27004ec 2071 val = ldub_p(buf);
5c8a00ce 2072 error |= io_mem_write(mr, addr1, val, 1);
23326164
RH
2073 break;
2074 default:
2075 abort();
13eb76e0 2076 }
2bbfa05d 2077 } else {
5c8a00ce 2078 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2079 /* RAM case */
5579c7f3 2080 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2081 memcpy(ptr, buf, l);
51d7a9eb 2082 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
2083 }
2084 } else {
5c8a00ce 2085 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2086 /* I/O case */
5c8a00ce 2087 l = memory_access_size(mr, l, addr1);
23326164
RH
2088 switch (l) {
2089 case 8:
2090 /* 64 bit read access */
2091 error |= io_mem_read(mr, addr1, &val, 8);
2092 stq_p(buf, val);
2093 break;
2094 case 4:
13eb76e0 2095 /* 32 bit read access */
5c8a00ce 2096 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 2097 stl_p(buf, val);
23326164
RH
2098 break;
2099 case 2:
13eb76e0 2100 /* 16 bit read access */
5c8a00ce 2101 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 2102 stw_p(buf, val);
23326164
RH
2103 break;
2104 case 1:
1c213d19 2105 /* 8 bit read access */
5c8a00ce 2106 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 2107 stb_p(buf, val);
23326164
RH
2108 break;
2109 default:
2110 abort();
13eb76e0
FB
2111 }
2112 } else {
2113 /* RAM case */
5c8a00ce 2114 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2115 memcpy(buf, ptr, l);
13eb76e0
FB
2116 }
2117 }
2118 len -= l;
2119 buf += l;
2120 addr += l;
2121 }
fd8aaa76
PB
2122
2123 return error;
13eb76e0 2124}
8df1cd07 2125
fd8aaa76 2126bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
2127 const uint8_t *buf, int len)
2128{
fd8aaa76 2129 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
2130}
2131
fd8aaa76 2132bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 2133{
fd8aaa76 2134 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
2135}
2136
2137
a8170e5e 2138void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2139 int len, int is_write)
2140{
fd8aaa76 2141 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2142}
2143
582b55a9
AG
2144enum write_rom_type {
2145 WRITE_DATA,
2146 FLUSH_CACHE,
2147};
2148
2a221651 2149static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2150 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2151{
149f54b5 2152 hwaddr l;
d0ecd2aa 2153 uint8_t *ptr;
149f54b5 2154 hwaddr addr1;
5c8a00ce 2155 MemoryRegion *mr;
3b46e624 2156
d0ecd2aa 2157 while (len > 0) {
149f54b5 2158 l = len;
2a221651 2159 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2160
5c8a00ce
PB
2161 if (!(memory_region_is_ram(mr) ||
2162 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2163 /* do nothing */
2164 } else {
5c8a00ce 2165 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2166 /* ROM/RAM case */
5579c7f3 2167 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2168 switch (type) {
2169 case WRITE_DATA:
2170 memcpy(ptr, buf, l);
2171 invalidate_and_set_dirty(addr1, l);
2172 break;
2173 case FLUSH_CACHE:
2174 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2175 break;
2176 }
d0ecd2aa
FB
2177 }
2178 len -= l;
2179 buf += l;
2180 addr += l;
2181 }
2182}
2183
582b55a9 2184/* used for ROM loading : can write in RAM and ROM */
2a221651 2185void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2186 const uint8_t *buf, int len)
2187{
2a221651 2188 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2189}
2190
2191void cpu_flush_icache_range(hwaddr start, int len)
2192{
2193 /*
2194 * This function should do the same thing as an icache flush that was
2195 * triggered from within the guest. For TCG we are always cache coherent,
2196 * so there is no need to flush anything. For KVM / Xen we need to flush
2197 * the host's instruction cache at least.
2198 */
2199 if (tcg_enabled()) {
2200 return;
2201 }
2202
2a221651
EI
2203 cpu_physical_memory_write_rom_internal(&address_space_memory,
2204 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2205}
2206
6d16c2f8 2207typedef struct {
d3e71559 2208 MemoryRegion *mr;
6d16c2f8 2209 void *buffer;
a8170e5e
AK
2210 hwaddr addr;
2211 hwaddr len;
6d16c2f8
AL
2212} BounceBuffer;
2213
2214static BounceBuffer bounce;
2215
ba223c29
AL
2216typedef struct MapClient {
2217 void *opaque;
2218 void (*callback)(void *opaque);
72cf2d4f 2219 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2220} MapClient;
2221
72cf2d4f
BS
2222static QLIST_HEAD(map_client_list, MapClient) map_client_list
2223 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2224
2225void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2226{
7267c094 2227 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2228
2229 client->opaque = opaque;
2230 client->callback = callback;
72cf2d4f 2231 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2232 return client;
2233}
2234
8b9c99d9 2235static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2236{
2237 MapClient *client = (MapClient *)_client;
2238
72cf2d4f 2239 QLIST_REMOVE(client, link);
7267c094 2240 g_free(client);
ba223c29
AL
2241}
2242
2243static void cpu_notify_map_clients(void)
2244{
2245 MapClient *client;
2246
72cf2d4f
BS
2247 while (!QLIST_EMPTY(&map_client_list)) {
2248 client = QLIST_FIRST(&map_client_list);
ba223c29 2249 client->callback(client->opaque);
34d5e948 2250 cpu_unregister_map_client(client);
ba223c29
AL
2251 }
2252}
2253
51644ab7
PB
2254bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2255{
5c8a00ce 2256 MemoryRegion *mr;
51644ab7
PB
2257 hwaddr l, xlat;
2258
2259 while (len > 0) {
2260 l = len;
5c8a00ce
PB
2261 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2262 if (!memory_access_is_direct(mr, is_write)) {
2263 l = memory_access_size(mr, l, addr);
2264 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2265 return false;
2266 }
2267 }
2268
2269 len -= l;
2270 addr += l;
2271 }
2272 return true;
2273}
2274
6d16c2f8
AL
2275/* Map a physical memory region into a host virtual address.
2276 * May map a subset of the requested range, given by and returned in *plen.
2277 * May return NULL if resources needed to perform the mapping are exhausted.
2278 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2279 * Use cpu_register_map_client() to know when retrying the map operation is
2280 * likely to succeed.
6d16c2f8 2281 */
ac1970fb 2282void *address_space_map(AddressSpace *as,
a8170e5e
AK
2283 hwaddr addr,
2284 hwaddr *plen,
ac1970fb 2285 bool is_write)
6d16c2f8 2286{
a8170e5e 2287 hwaddr len = *plen;
e3127ae0
PB
2288 hwaddr done = 0;
2289 hwaddr l, xlat, base;
2290 MemoryRegion *mr, *this_mr;
2291 ram_addr_t raddr;
6d16c2f8 2292
e3127ae0
PB
2293 if (len == 0) {
2294 return NULL;
2295 }
38bee5dc 2296
e3127ae0
PB
2297 l = len;
2298 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2299 if (!memory_access_is_direct(mr, is_write)) {
2300 if (bounce.buffer) {
2301 return NULL;
6d16c2f8 2302 }
e85d9db5
KW
2303 /* Avoid unbounded allocations */
2304 l = MIN(l, TARGET_PAGE_SIZE);
2305 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2306 bounce.addr = addr;
2307 bounce.len = l;
d3e71559
PB
2308
2309 memory_region_ref(mr);
2310 bounce.mr = mr;
e3127ae0
PB
2311 if (!is_write) {
2312 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2313 }
6d16c2f8 2314
e3127ae0
PB
2315 *plen = l;
2316 return bounce.buffer;
2317 }
2318
2319 base = xlat;
2320 raddr = memory_region_get_ram_addr(mr);
2321
2322 for (;;) {
6d16c2f8
AL
2323 len -= l;
2324 addr += l;
e3127ae0
PB
2325 done += l;
2326 if (len == 0) {
2327 break;
2328 }
2329
2330 l = len;
2331 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2332 if (this_mr != mr || xlat != base + done) {
2333 break;
2334 }
6d16c2f8 2335 }
e3127ae0 2336
d3e71559 2337 memory_region_ref(mr);
e3127ae0
PB
2338 *plen = done;
2339 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2340}
2341
ac1970fb 2342/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2343 * Will also mark the memory as dirty if is_write == 1. access_len gives
2344 * the amount of memory that was actually read or written by the caller.
2345 */
a8170e5e
AK
2346void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2347 int is_write, hwaddr access_len)
6d16c2f8
AL
2348{
2349 if (buffer != bounce.buffer) {
d3e71559
PB
2350 MemoryRegion *mr;
2351 ram_addr_t addr1;
2352
2353 mr = qemu_ram_addr_from_host(buffer, &addr1);
2354 assert(mr != NULL);
6d16c2f8 2355 if (is_write) {
6d16c2f8
AL
2356 while (access_len) {
2357 unsigned l;
2358 l = TARGET_PAGE_SIZE;
2359 if (l > access_len)
2360 l = access_len;
51d7a9eb 2361 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2362 addr1 += l;
2363 access_len -= l;
2364 }
2365 }
868bb33f 2366 if (xen_enabled()) {
e41d7c69 2367 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2368 }
d3e71559 2369 memory_region_unref(mr);
6d16c2f8
AL
2370 return;
2371 }
2372 if (is_write) {
ac1970fb 2373 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2374 }
f8a83245 2375 qemu_vfree(bounce.buffer);
6d16c2f8 2376 bounce.buffer = NULL;
d3e71559 2377 memory_region_unref(bounce.mr);
ba223c29 2378 cpu_notify_map_clients();
6d16c2f8 2379}
d0ecd2aa 2380
a8170e5e
AK
2381void *cpu_physical_memory_map(hwaddr addr,
2382 hwaddr *plen,
ac1970fb
AK
2383 int is_write)
2384{
2385 return address_space_map(&address_space_memory, addr, plen, is_write);
2386}
2387
a8170e5e
AK
2388void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2389 int is_write, hwaddr access_len)
ac1970fb
AK
2390{
2391 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2392}
2393
8df1cd07 2394/* warning: addr must be aligned */
fdfba1a2 2395static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2396 enum device_endian endian)
8df1cd07 2397{
8df1cd07 2398 uint8_t *ptr;
791af8c8 2399 uint64_t val;
5c8a00ce 2400 MemoryRegion *mr;
149f54b5
PB
2401 hwaddr l = 4;
2402 hwaddr addr1;
8df1cd07 2403
fdfba1a2 2404 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2405 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2406 /* I/O case */
5c8a00ce 2407 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2408#if defined(TARGET_WORDS_BIGENDIAN)
2409 if (endian == DEVICE_LITTLE_ENDIAN) {
2410 val = bswap32(val);
2411 }
2412#else
2413 if (endian == DEVICE_BIG_ENDIAN) {
2414 val = bswap32(val);
2415 }
2416#endif
8df1cd07
FB
2417 } else {
2418 /* RAM case */
5c8a00ce 2419 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2420 & TARGET_PAGE_MASK)
149f54b5 2421 + addr1);
1e78bcc1
AG
2422 switch (endian) {
2423 case DEVICE_LITTLE_ENDIAN:
2424 val = ldl_le_p(ptr);
2425 break;
2426 case DEVICE_BIG_ENDIAN:
2427 val = ldl_be_p(ptr);
2428 break;
2429 default:
2430 val = ldl_p(ptr);
2431 break;
2432 }
8df1cd07
FB
2433 }
2434 return val;
2435}
2436
fdfba1a2 2437uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2438{
fdfba1a2 2439 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2440}
2441
fdfba1a2 2442uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2443{
fdfba1a2 2444 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2445}
2446
fdfba1a2 2447uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2448{
fdfba1a2 2449 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2450}
2451
84b7b8e7 2452/* warning: addr must be aligned */
2c17449b 2453static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2454 enum device_endian endian)
84b7b8e7 2455{
84b7b8e7
FB
2456 uint8_t *ptr;
2457 uint64_t val;
5c8a00ce 2458 MemoryRegion *mr;
149f54b5
PB
2459 hwaddr l = 8;
2460 hwaddr addr1;
84b7b8e7 2461
2c17449b 2462 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2463 false);
2464 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2465 /* I/O case */
5c8a00ce 2466 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2467#if defined(TARGET_WORDS_BIGENDIAN)
2468 if (endian == DEVICE_LITTLE_ENDIAN) {
2469 val = bswap64(val);
2470 }
2471#else
2472 if (endian == DEVICE_BIG_ENDIAN) {
2473 val = bswap64(val);
2474 }
84b7b8e7
FB
2475#endif
2476 } else {
2477 /* RAM case */
5c8a00ce 2478 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2479 & TARGET_PAGE_MASK)
149f54b5 2480 + addr1);
1e78bcc1
AG
2481 switch (endian) {
2482 case DEVICE_LITTLE_ENDIAN:
2483 val = ldq_le_p(ptr);
2484 break;
2485 case DEVICE_BIG_ENDIAN:
2486 val = ldq_be_p(ptr);
2487 break;
2488 default:
2489 val = ldq_p(ptr);
2490 break;
2491 }
84b7b8e7
FB
2492 }
2493 return val;
2494}
2495
2c17449b 2496uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2497{
2c17449b 2498 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2499}
2500
2c17449b 2501uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2502{
2c17449b 2503 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2504}
2505
2c17449b 2506uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2507{
2c17449b 2508 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2509}
2510
aab33094 2511/* XXX: optimize */
2c17449b 2512uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
aab33094
FB
2513{
2514 uint8_t val;
2c17449b 2515 address_space_rw(as, addr, &val, 1, 0);
aab33094
FB
2516 return val;
2517}
2518
733f0b02 2519/* warning: addr must be aligned */
41701aa4 2520static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2521 enum device_endian endian)
aab33094 2522{
733f0b02
MT
2523 uint8_t *ptr;
2524 uint64_t val;
5c8a00ce 2525 MemoryRegion *mr;
149f54b5
PB
2526 hwaddr l = 2;
2527 hwaddr addr1;
733f0b02 2528
41701aa4 2529 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2530 false);
2531 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2532 /* I/O case */
5c8a00ce 2533 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2534#if defined(TARGET_WORDS_BIGENDIAN)
2535 if (endian == DEVICE_LITTLE_ENDIAN) {
2536 val = bswap16(val);
2537 }
2538#else
2539 if (endian == DEVICE_BIG_ENDIAN) {
2540 val = bswap16(val);
2541 }
2542#endif
733f0b02
MT
2543 } else {
2544 /* RAM case */
5c8a00ce 2545 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2546 & TARGET_PAGE_MASK)
149f54b5 2547 + addr1);
1e78bcc1
AG
2548 switch (endian) {
2549 case DEVICE_LITTLE_ENDIAN:
2550 val = lduw_le_p(ptr);
2551 break;
2552 case DEVICE_BIG_ENDIAN:
2553 val = lduw_be_p(ptr);
2554 break;
2555 default:
2556 val = lduw_p(ptr);
2557 break;
2558 }
733f0b02
MT
2559 }
2560 return val;
aab33094
FB
2561}
2562
41701aa4 2563uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2564{
41701aa4 2565 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2566}
2567
41701aa4 2568uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2569{
41701aa4 2570 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2571}
2572
41701aa4 2573uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2574{
41701aa4 2575 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2576}
2577
8df1cd07
FB
2578/* warning: addr must be aligned. The ram page is not masked as dirty
2579 and the code inside is not invalidated. It is useful if the dirty
2580 bits are used to track modified PTEs */
2198a121 2581void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
8df1cd07 2582{
8df1cd07 2583 uint8_t *ptr;
5c8a00ce 2584 MemoryRegion *mr;
149f54b5
PB
2585 hwaddr l = 4;
2586 hwaddr addr1;
8df1cd07 2587
2198a121 2588 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2589 true);
2590 if (l < 4 || !memory_access_is_direct(mr, true)) {
2591 io_mem_write(mr, addr1, val, 4);
8df1cd07 2592 } else {
5c8a00ce 2593 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2594 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2595 stl_p(ptr, val);
74576198
AL
2596
2597 if (unlikely(in_migration)) {
a2cd8c85 2598 if (cpu_physical_memory_is_clean(addr1)) {
74576198
AL
2599 /* invalidate code */
2600 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2601 /* set dirty bit */
52159192
JQ
2602 cpu_physical_memory_set_dirty_flag(addr1,
2603 DIRTY_MEMORY_MIGRATION);
2604 cpu_physical_memory_set_dirty_flag(addr1, DIRTY_MEMORY_VGA);
74576198
AL
2605 }
2606 }
8df1cd07
FB
2607 }
2608}
2609
2610/* warning: addr must be aligned */
ab1da857
EI
2611static inline void stl_phys_internal(AddressSpace *as,
2612 hwaddr addr, uint32_t val,
1e78bcc1 2613 enum device_endian endian)
8df1cd07 2614{
8df1cd07 2615 uint8_t *ptr;
5c8a00ce 2616 MemoryRegion *mr;
149f54b5
PB
2617 hwaddr l = 4;
2618 hwaddr addr1;
8df1cd07 2619
ab1da857 2620 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2621 true);
2622 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2623#if defined(TARGET_WORDS_BIGENDIAN)
2624 if (endian == DEVICE_LITTLE_ENDIAN) {
2625 val = bswap32(val);
2626 }
2627#else
2628 if (endian == DEVICE_BIG_ENDIAN) {
2629 val = bswap32(val);
2630 }
2631#endif
5c8a00ce 2632 io_mem_write(mr, addr1, val, 4);
8df1cd07 2633 } else {
8df1cd07 2634 /* RAM case */
5c8a00ce 2635 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2636 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2637 switch (endian) {
2638 case DEVICE_LITTLE_ENDIAN:
2639 stl_le_p(ptr, val);
2640 break;
2641 case DEVICE_BIG_ENDIAN:
2642 stl_be_p(ptr, val);
2643 break;
2644 default:
2645 stl_p(ptr, val);
2646 break;
2647 }
51d7a9eb 2648 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2649 }
2650}
2651
ab1da857 2652void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2653{
ab1da857 2654 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2655}
2656
ab1da857 2657void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2658{
ab1da857 2659 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2660}
2661
ab1da857 2662void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2663{
ab1da857 2664 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2665}
2666
aab33094 2667/* XXX: optimize */
db3be60d 2668void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
aab33094
FB
2669{
2670 uint8_t v = val;
db3be60d 2671 address_space_rw(as, addr, &v, 1, 1);
aab33094
FB
2672}
2673
733f0b02 2674/* warning: addr must be aligned */
5ce5944d
EI
2675static inline void stw_phys_internal(AddressSpace *as,
2676 hwaddr addr, uint32_t val,
1e78bcc1 2677 enum device_endian endian)
aab33094 2678{
733f0b02 2679 uint8_t *ptr;
5c8a00ce 2680 MemoryRegion *mr;
149f54b5
PB
2681 hwaddr l = 2;
2682 hwaddr addr1;
733f0b02 2683
5ce5944d 2684 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 2685 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2686#if defined(TARGET_WORDS_BIGENDIAN)
2687 if (endian == DEVICE_LITTLE_ENDIAN) {
2688 val = bswap16(val);
2689 }
2690#else
2691 if (endian == DEVICE_BIG_ENDIAN) {
2692 val = bswap16(val);
2693 }
2694#endif
5c8a00ce 2695 io_mem_write(mr, addr1, val, 2);
733f0b02 2696 } else {
733f0b02 2697 /* RAM case */
5c8a00ce 2698 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2699 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2700 switch (endian) {
2701 case DEVICE_LITTLE_ENDIAN:
2702 stw_le_p(ptr, val);
2703 break;
2704 case DEVICE_BIG_ENDIAN:
2705 stw_be_p(ptr, val);
2706 break;
2707 default:
2708 stw_p(ptr, val);
2709 break;
2710 }
51d7a9eb 2711 invalidate_and_set_dirty(addr1, 2);
733f0b02 2712 }
aab33094
FB
2713}
2714
5ce5944d 2715void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2716{
5ce5944d 2717 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2718}
2719
5ce5944d 2720void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2721{
5ce5944d 2722 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2723}
2724
5ce5944d 2725void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2726{
5ce5944d 2727 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2728}
2729
aab33094 2730/* XXX: optimize */
f606604f 2731void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
aab33094
FB
2732{
2733 val = tswap64(val);
f606604f 2734 address_space_rw(as, addr, (void *) &val, 8, 1);
aab33094
FB
2735}
2736
f606604f 2737void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2738{
2739 val = cpu_to_le64(val);
f606604f 2740 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2741}
2742
f606604f 2743void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2744{
2745 val = cpu_to_be64(val);
f606604f 2746 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2747}
2748
5e2972fd 2749/* virtual memory access for debug (includes writing to ROM) */
f17ec444 2750int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 2751 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2752{
2753 int l;
a8170e5e 2754 hwaddr phys_addr;
9b3c35e0 2755 target_ulong page;
13eb76e0
FB
2756
2757 while (len > 0) {
2758 page = addr & TARGET_PAGE_MASK;
f17ec444 2759 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
2760 /* if no physical page mapped, return an error */
2761 if (phys_addr == -1)
2762 return -1;
2763 l = (page + TARGET_PAGE_SIZE) - addr;
2764 if (l > len)
2765 l = len;
5e2972fd 2766 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
2767 if (is_write) {
2768 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2769 } else {
2770 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2771 }
13eb76e0
FB
2772 len -= l;
2773 buf += l;
2774 addr += l;
2775 }
2776 return 0;
2777}
a68fe89c 2778#endif
13eb76e0 2779
8e4a424b
BS
2780#if !defined(CONFIG_USER_ONLY)
2781
2782/*
2783 * A helper function for the _utterly broken_ virtio device model to find out if
2784 * it's running on a big endian machine. Don't do this at home kids!
2785 */
2786bool virtio_is_big_endian(void);
2787bool virtio_is_big_endian(void)
2788{
2789#if defined(TARGET_WORDS_BIGENDIAN)
2790 return true;
2791#else
2792 return false;
2793#endif
2794}
2795
2796#endif
2797
76f35538 2798#ifndef CONFIG_USER_ONLY
a8170e5e 2799bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2800{
5c8a00ce 2801 MemoryRegion*mr;
149f54b5 2802 hwaddr l = 1;
76f35538 2803
5c8a00ce
PB
2804 mr = address_space_translate(&address_space_memory,
2805 phys_addr, &phys_addr, &l, false);
76f35538 2806
5c8a00ce
PB
2807 return !(memory_region_is_ram(mr) ||
2808 memory_region_is_romd(mr));
76f35538 2809}
bd2fa51f
MH
2810
2811void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2812{
2813 RAMBlock *block;
2814
2815 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2816 func(block->host, block->offset, block->length, opaque);
2817 }
2818}
ec3f8c99 2819#endif