]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
hw/unicore32/puv3.c: Fix misusing qemu_allocate_irqs for single irq
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
4485bd26 29#if !defined(CONFIG_USER_ONLY)
47c8ca53 30#include "hw/boards.h"
4485bd26 31#endif
cc9e98cb 32#include "hw/qdev.h"
1de7afc9 33#include "qemu/osdep.h"
9c17d615 34#include "sysemu/kvm.h"
2ff3de68 35#include "sysemu/sysemu.h"
0d09e41a 36#include "hw/xen/xen.h"
1de7afc9
PB
37#include "qemu/timer.h"
38#include "qemu/config-file.h"
75a34036 39#include "qemu/error-report.h"
022c62cb 40#include "exec/memory.h"
9c17d615 41#include "sysemu/dma.h"
022c62cb 42#include "exec/address-spaces.h"
53a5960a
PB
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
432d268c 45#else /* !CONFIG_USER_ONLY */
9c17d615 46#include "sysemu/xen-mapcache.h"
6506e4f9 47#include "trace.h"
53a5960a 48#endif
0d6d3c87 49#include "exec/cpu-all.h"
0dc3f44a 50#include "qemu/rcu_queue.h"
022c62cb 51#include "exec/cputlb.h"
5b6dd868 52#include "translate-all.h"
0cac1b66 53
022c62cb 54#include "exec/memory-internal.h"
220c3ebd 55#include "exec/ram_addr.h"
67d95c15 56
b35ba30f
MT
57#include "qemu/range.h"
58
db7b5426 59//#define DEBUG_SUBPAGE
1196be37 60
e2eef170 61#if !defined(CONFIG_USER_ONLY)
981fdf23 62static bool in_migration;
94a6b54f 63
0dc3f44a
MD
64/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
65 * are protected by the ramlist lock.
66 */
0d53d9fe 67RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
68
69static MemoryRegion *system_memory;
309cb471 70static MemoryRegion *system_io;
62152b8a 71
f6790af6
AK
72AddressSpace address_space_io;
73AddressSpace address_space_memory;
2673a5da 74
0844e007 75MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 76static MemoryRegion io_mem_unassigned;
0e0df1e2 77
7bd4f430
PB
78/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
79#define RAM_PREALLOC (1 << 0)
80
dbcb8981
PB
81/* RAM is mmap-ed with MAP_SHARED */
82#define RAM_SHARED (1 << 1)
83
62be4e3a
MT
84/* Only a portion of RAM (used_length) is actually used, and migrated.
85 * This used_length size can change across reboots.
86 */
87#define RAM_RESIZEABLE (1 << 2)
88
e2eef170 89#endif
9fa3e853 90
bdc44640 91struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
92/* current CPU in the current thread. It is only valid inside
93 cpu_exec() */
4917cf44 94DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 95/* 0 = Do not count executed instructions.
bf20dc07 96 1 = Precise instruction counting.
2e70f6ef 97 2 = Adaptive rate instruction counting. */
5708fc66 98int use_icount;
6a00d601 99
e2eef170 100#if !defined(CONFIG_USER_ONLY)
4346ae3e 101
1db8abb1
PB
102typedef struct PhysPageEntry PhysPageEntry;
103
104struct PhysPageEntry {
9736e55b 105 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 106 uint32_t skip : 6;
9736e55b 107 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 108 uint32_t ptr : 26;
1db8abb1
PB
109};
110
8b795765
MT
111#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
112
03f49957 113/* Size of the L2 (and L3, etc) page tables. */
57271d63 114#define ADDR_SPACE_BITS 64
03f49957 115
026736ce 116#define P_L2_BITS 9
03f49957
PB
117#define P_L2_SIZE (1 << P_L2_BITS)
118
119#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
120
121typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 122
53cb28cb 123typedef struct PhysPageMap {
79e2b9ae
PB
124 struct rcu_head rcu;
125
53cb28cb
MA
126 unsigned sections_nb;
127 unsigned sections_nb_alloc;
128 unsigned nodes_nb;
129 unsigned nodes_nb_alloc;
130 Node *nodes;
131 MemoryRegionSection *sections;
132} PhysPageMap;
133
1db8abb1 134struct AddressSpaceDispatch {
79e2b9ae
PB
135 struct rcu_head rcu;
136
1db8abb1
PB
137 /* This is a multi-level map on the physical address space.
138 * The bottom level has pointers to MemoryRegionSections.
139 */
140 PhysPageEntry phys_map;
53cb28cb 141 PhysPageMap map;
acc9d80b 142 AddressSpace *as;
1db8abb1
PB
143};
144
90260c6c
JK
145#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
146typedef struct subpage_t {
147 MemoryRegion iomem;
acc9d80b 148 AddressSpace *as;
90260c6c
JK
149 hwaddr base;
150 uint16_t sub_section[TARGET_PAGE_SIZE];
151} subpage_t;
152
b41aac4f
LPF
153#define PHYS_SECTION_UNASSIGNED 0
154#define PHYS_SECTION_NOTDIRTY 1
155#define PHYS_SECTION_ROM 2
156#define PHYS_SECTION_WATCH 3
5312bd8b 157
e2eef170 158static void io_mem_init(void);
62152b8a 159static void memory_map_init(void);
09daed84 160static void tcg_commit(MemoryListener *listener);
e2eef170 161
1ec9b909 162static MemoryRegion io_mem_watch;
6658ffb8 163#endif
fd6ce8f6 164
6d9a1304 165#if !defined(CONFIG_USER_ONLY)
d6f2ea22 166
53cb28cb 167static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 168{
53cb28cb
MA
169 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
170 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
171 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
172 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 173 }
f7bf5461
AK
174}
175
53cb28cb 176static uint32_t phys_map_node_alloc(PhysPageMap *map)
f7bf5461
AK
177{
178 unsigned i;
8b795765 179 uint32_t ret;
f7bf5461 180
53cb28cb 181 ret = map->nodes_nb++;
f7bf5461 182 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 183 assert(ret != map->nodes_nb_alloc);
03f49957 184 for (i = 0; i < P_L2_SIZE; ++i) {
53cb28cb
MA
185 map->nodes[ret][i].skip = 1;
186 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 187 }
f7bf5461 188 return ret;
d6f2ea22
AK
189}
190
53cb28cb
MA
191static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
192 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 193 int level)
f7bf5461
AK
194{
195 PhysPageEntry *p;
196 int i;
03f49957 197 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 198
9736e55b 199 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
53cb28cb
MA
200 lp->ptr = phys_map_node_alloc(map);
201 p = map->nodes[lp->ptr];
f7bf5461 202 if (level == 0) {
03f49957 203 for (i = 0; i < P_L2_SIZE; i++) {
9736e55b 204 p[i].skip = 0;
b41aac4f 205 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 206 }
67c4d23c 207 }
f7bf5461 208 } else {
53cb28cb 209 p = map->nodes[lp->ptr];
92e873b9 210 }
03f49957 211 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 212
03f49957 213 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 214 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 215 lp->skip = 0;
c19e8800 216 lp->ptr = leaf;
07f07b31
AK
217 *index += step;
218 *nb -= step;
2999097b 219 } else {
53cb28cb 220 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
221 }
222 ++lp;
f7bf5461
AK
223 }
224}
225
ac1970fb 226static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 227 hwaddr index, hwaddr nb,
2999097b 228 uint16_t leaf)
f7bf5461 229{
2999097b 230 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 231 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 232
53cb28cb 233 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
234}
235
b35ba30f
MT
236/* Compact a non leaf page entry. Simply detect that the entry has a single child,
237 * and update our entry so we can skip it and go directly to the destination.
238 */
239static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
240{
241 unsigned valid_ptr = P_L2_SIZE;
242 int valid = 0;
243 PhysPageEntry *p;
244 int i;
245
246 if (lp->ptr == PHYS_MAP_NODE_NIL) {
247 return;
248 }
249
250 p = nodes[lp->ptr];
251 for (i = 0; i < P_L2_SIZE; i++) {
252 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
253 continue;
254 }
255
256 valid_ptr = i;
257 valid++;
258 if (p[i].skip) {
259 phys_page_compact(&p[i], nodes, compacted);
260 }
261 }
262
263 /* We can only compress if there's only one child. */
264 if (valid != 1) {
265 return;
266 }
267
268 assert(valid_ptr < P_L2_SIZE);
269
270 /* Don't compress if it won't fit in the # of bits we have. */
271 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
272 return;
273 }
274
275 lp->ptr = p[valid_ptr].ptr;
276 if (!p[valid_ptr].skip) {
277 /* If our only child is a leaf, make this a leaf. */
278 /* By design, we should have made this node a leaf to begin with so we
279 * should never reach here.
280 * But since it's so simple to handle this, let's do it just in case we
281 * change this rule.
282 */
283 lp->skip = 0;
284 } else {
285 lp->skip += p[valid_ptr].skip;
286 }
287}
288
289static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
290{
291 DECLARE_BITMAP(compacted, nodes_nb);
292
293 if (d->phys_map.skip) {
53cb28cb 294 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
295 }
296}
297
97115a8d 298static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 299 Node *nodes, MemoryRegionSection *sections)
92e873b9 300{
31ab2b4a 301 PhysPageEntry *p;
97115a8d 302 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 303 int i;
f1f6e3b8 304
9736e55b 305 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 306 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 307 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 308 }
9affd6fc 309 p = nodes[lp.ptr];
03f49957 310 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 311 }
b35ba30f
MT
312
313 if (sections[lp.ptr].size.hi ||
314 range_covers_byte(sections[lp.ptr].offset_within_address_space,
315 sections[lp.ptr].size.lo, addr)) {
316 return &sections[lp.ptr];
317 } else {
318 return &sections[PHYS_SECTION_UNASSIGNED];
319 }
f3705d53
AK
320}
321
e5548617
BS
322bool memory_region_is_unassigned(MemoryRegion *mr)
323{
2a8e7499 324 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 325 && mr != &io_mem_watch;
fd6ce8f6 326}
149f54b5 327
79e2b9ae 328/* Called from RCU critical section */
c7086b4a 329static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
330 hwaddr addr,
331 bool resolve_subpage)
9f029603 332{
90260c6c
JK
333 MemoryRegionSection *section;
334 subpage_t *subpage;
335
53cb28cb 336 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
337 if (resolve_subpage && section->mr->subpage) {
338 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 339 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
340 }
341 return section;
9f029603
JK
342}
343
79e2b9ae 344/* Called from RCU critical section */
90260c6c 345static MemoryRegionSection *
c7086b4a 346address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 347 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
348{
349 MemoryRegionSection *section;
a87f3954 350 Int128 diff;
149f54b5 351
c7086b4a 352 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
353 /* Compute offset within MemoryRegionSection */
354 addr -= section->offset_within_address_space;
355
356 /* Compute offset within MemoryRegion */
357 *xlat = addr + section->offset_within_region;
358
359 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 360 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
361 return section;
362}
90260c6c 363
a87f3954
PB
364static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
365{
366 if (memory_region_is_ram(mr)) {
367 return !(is_write && mr->readonly);
368 }
369 if (memory_region_is_romd(mr)) {
370 return !is_write;
371 }
372
373 return false;
374}
375
41063e1e 376/* Called from RCU critical section */
5c8a00ce
PB
377MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
378 hwaddr *xlat, hwaddr *plen,
379 bool is_write)
90260c6c 380{
30951157
AK
381 IOMMUTLBEntry iotlb;
382 MemoryRegionSection *section;
383 MemoryRegion *mr;
30951157
AK
384
385 for (;;) {
79e2b9ae
PB
386 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
387 section = address_space_translate_internal(d, addr, &addr, plen, true);
30951157
AK
388 mr = section->mr;
389
390 if (!mr->iommu_ops) {
391 break;
392 }
393
8d7b8cb9 394 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
30951157
AK
395 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
396 | (addr & iotlb.addr_mask));
23820dbf 397 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
30951157
AK
398 if (!(iotlb.perm & (1 << is_write))) {
399 mr = &io_mem_unassigned;
400 break;
401 }
402
403 as = iotlb.target_as;
404 }
405
fe680d0d 406 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954 407 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
23820dbf 408 *plen = MIN(page, *plen);
a87f3954
PB
409 }
410
30951157
AK
411 *xlat = addr;
412 return mr;
90260c6c
JK
413}
414
79e2b9ae 415/* Called from RCU critical section */
90260c6c 416MemoryRegionSection *
9d82b5a7
PB
417address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
418 hwaddr *xlat, hwaddr *plen)
90260c6c 419{
30951157 420 MemoryRegionSection *section;
9d82b5a7
PB
421 section = address_space_translate_internal(cpu->memory_dispatch,
422 addr, xlat, plen, false);
30951157
AK
423
424 assert(!section->mr->iommu_ops);
425 return section;
90260c6c 426}
5b6dd868 427#endif
fd6ce8f6 428
b170fce3 429#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
430
431static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 432{
259186a7 433 CPUState *cpu = opaque;
a513fe19 434
5b6dd868
BS
435 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
436 version_id is increased. */
259186a7 437 cpu->interrupt_request &= ~0x01;
c01a71c1 438 tlb_flush(cpu, 1);
5b6dd868
BS
439
440 return 0;
a513fe19 441}
7501267e 442
6c3bff0e
PD
443static int cpu_common_pre_load(void *opaque)
444{
445 CPUState *cpu = opaque;
446
adee6424 447 cpu->exception_index = -1;
6c3bff0e
PD
448
449 return 0;
450}
451
452static bool cpu_common_exception_index_needed(void *opaque)
453{
454 CPUState *cpu = opaque;
455
adee6424 456 return tcg_enabled() && cpu->exception_index != -1;
6c3bff0e
PD
457}
458
459static const VMStateDescription vmstate_cpu_common_exception_index = {
460 .name = "cpu_common/exception_index",
461 .version_id = 1,
462 .minimum_version_id = 1,
463 .fields = (VMStateField[]) {
464 VMSTATE_INT32(exception_index, CPUState),
465 VMSTATE_END_OF_LIST()
466 }
467};
468
1a1562f5 469const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
470 .name = "cpu_common",
471 .version_id = 1,
472 .minimum_version_id = 1,
6c3bff0e 473 .pre_load = cpu_common_pre_load,
5b6dd868 474 .post_load = cpu_common_post_load,
35d08458 475 .fields = (VMStateField[]) {
259186a7
AF
476 VMSTATE_UINT32(halted, CPUState),
477 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 478 VMSTATE_END_OF_LIST()
6c3bff0e
PD
479 },
480 .subsections = (VMStateSubsection[]) {
481 {
482 .vmsd = &vmstate_cpu_common_exception_index,
483 .needed = cpu_common_exception_index_needed,
484 } , {
485 /* empty */
486 }
5b6dd868
BS
487 }
488};
1a1562f5 489
5b6dd868 490#endif
ea041c0e 491
38d8f5c8 492CPUState *qemu_get_cpu(int index)
ea041c0e 493{
bdc44640 494 CPUState *cpu;
ea041c0e 495
bdc44640 496 CPU_FOREACH(cpu) {
55e5c285 497 if (cpu->cpu_index == index) {
bdc44640 498 return cpu;
55e5c285 499 }
ea041c0e 500 }
5b6dd868 501
bdc44640 502 return NULL;
ea041c0e
FB
503}
504
09daed84
EI
505#if !defined(CONFIG_USER_ONLY)
506void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
507{
508 /* We only support one address space per cpu at the moment. */
509 assert(cpu->as == as);
510
511 if (cpu->tcg_as_listener) {
512 memory_listener_unregister(cpu->tcg_as_listener);
513 } else {
514 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
515 }
516 cpu->tcg_as_listener->commit = tcg_commit;
517 memory_listener_register(cpu->tcg_as_listener, as);
518}
519#endif
520
5b6dd868 521void cpu_exec_init(CPUArchState *env)
ea041c0e 522{
5b6dd868 523 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 524 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 525 CPUState *some_cpu;
5b6dd868
BS
526 int cpu_index;
527
528#if defined(CONFIG_USER_ONLY)
529 cpu_list_lock();
530#endif
5b6dd868 531 cpu_index = 0;
bdc44640 532 CPU_FOREACH(some_cpu) {
5b6dd868
BS
533 cpu_index++;
534 }
55e5c285 535 cpu->cpu_index = cpu_index;
1b1ed8dc 536 cpu->numa_node = 0;
f0c3c505 537 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 538 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 539#ifndef CONFIG_USER_ONLY
09daed84 540 cpu->as = &address_space_memory;
5b6dd868 541 cpu->thread_id = qemu_get_thread_id();
cba70549 542 cpu_reload_memory_map(cpu);
5b6dd868 543#endif
bdc44640 544 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
545#if defined(CONFIG_USER_ONLY)
546 cpu_list_unlock();
547#endif
e0d47944
AF
548 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
549 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
550 }
5b6dd868 551#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
552 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
553 cpu_save, cpu_load, env);
b170fce3 554 assert(cc->vmsd == NULL);
e0d47944 555 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 556#endif
b170fce3
AF
557 if (cc->vmsd != NULL) {
558 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
559 }
ea041c0e
FB
560}
561
94df27fd 562#if defined(CONFIG_USER_ONLY)
00b941e5 563static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
564{
565 tb_invalidate_phys_page_range(pc, pc + 1, 0);
566}
567#else
00b941e5 568static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 569{
e8262a1b
MF
570 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
571 if (phys != -1) {
09daed84 572 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 573 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 574 }
1e7855a5 575}
c27004ec 576#endif
d720b93d 577
c527ee8f 578#if defined(CONFIG_USER_ONLY)
75a34036 579void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
580
581{
582}
583
3ee887e8
PM
584int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
585 int flags)
586{
587 return -ENOSYS;
588}
589
590void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
591{
592}
593
75a34036 594int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
595 int flags, CPUWatchpoint **watchpoint)
596{
597 return -ENOSYS;
598}
599#else
6658ffb8 600/* Add a watchpoint. */
75a34036 601int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 602 int flags, CPUWatchpoint **watchpoint)
6658ffb8 603{
c0ce998e 604 CPUWatchpoint *wp;
6658ffb8 605
05068c0d 606 /* forbid ranges which are empty or run off the end of the address space */
07e2863d 607 if (len == 0 || (addr + len - 1) < addr) {
75a34036
AF
608 error_report("tried to set invalid watchpoint at %"
609 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
610 return -EINVAL;
611 }
7267c094 612 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
613
614 wp->vaddr = addr;
05068c0d 615 wp->len = len;
a1d1bb31
AL
616 wp->flags = flags;
617
2dc9f411 618 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
619 if (flags & BP_GDB) {
620 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
621 } else {
622 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
623 }
6658ffb8 624
31b030d4 625 tlb_flush_page(cpu, addr);
a1d1bb31
AL
626
627 if (watchpoint)
628 *watchpoint = wp;
629 return 0;
6658ffb8
PB
630}
631
a1d1bb31 632/* Remove a specific watchpoint. */
75a34036 633int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 634 int flags)
6658ffb8 635{
a1d1bb31 636 CPUWatchpoint *wp;
6658ffb8 637
ff4700b0 638 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 639 if (addr == wp->vaddr && len == wp->len
6e140f28 640 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 641 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
642 return 0;
643 }
644 }
a1d1bb31 645 return -ENOENT;
6658ffb8
PB
646}
647
a1d1bb31 648/* Remove a specific watchpoint by reference. */
75a34036 649void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 650{
ff4700b0 651 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 652
31b030d4 653 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 654
7267c094 655 g_free(watchpoint);
a1d1bb31
AL
656}
657
658/* Remove all matching watchpoints. */
75a34036 659void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 660{
c0ce998e 661 CPUWatchpoint *wp, *next;
a1d1bb31 662
ff4700b0 663 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
664 if (wp->flags & mask) {
665 cpu_watchpoint_remove_by_ref(cpu, wp);
666 }
c0ce998e 667 }
7d03f82f 668}
05068c0d
PM
669
670/* Return true if this watchpoint address matches the specified
671 * access (ie the address range covered by the watchpoint overlaps
672 * partially or completely with the address range covered by the
673 * access).
674 */
675static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
676 vaddr addr,
677 vaddr len)
678{
679 /* We know the lengths are non-zero, but a little caution is
680 * required to avoid errors in the case where the range ends
681 * exactly at the top of the address space and so addr + len
682 * wraps round to zero.
683 */
684 vaddr wpend = wp->vaddr + wp->len - 1;
685 vaddr addrend = addr + len - 1;
686
687 return !(addr > wpend || wp->vaddr > addrend);
688}
689
c527ee8f 690#endif
7d03f82f 691
a1d1bb31 692/* Add a breakpoint. */
b3310ab3 693int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 694 CPUBreakpoint **breakpoint)
4c3a88a2 695{
c0ce998e 696 CPUBreakpoint *bp;
3b46e624 697
7267c094 698 bp = g_malloc(sizeof(*bp));
4c3a88a2 699
a1d1bb31
AL
700 bp->pc = pc;
701 bp->flags = flags;
702
2dc9f411 703 /* keep all GDB-injected breakpoints in front */
00b941e5 704 if (flags & BP_GDB) {
f0c3c505 705 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 706 } else {
f0c3c505 707 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 708 }
3b46e624 709
f0c3c505 710 breakpoint_invalidate(cpu, pc);
a1d1bb31 711
00b941e5 712 if (breakpoint) {
a1d1bb31 713 *breakpoint = bp;
00b941e5 714 }
4c3a88a2 715 return 0;
4c3a88a2
FB
716}
717
a1d1bb31 718/* Remove a specific breakpoint. */
b3310ab3 719int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 720{
a1d1bb31
AL
721 CPUBreakpoint *bp;
722
f0c3c505 723 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 724 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 725 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
726 return 0;
727 }
7d03f82f 728 }
a1d1bb31 729 return -ENOENT;
7d03f82f
EI
730}
731
a1d1bb31 732/* Remove a specific breakpoint by reference. */
b3310ab3 733void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 734{
f0c3c505
AF
735 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
736
737 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 738
7267c094 739 g_free(breakpoint);
a1d1bb31
AL
740}
741
742/* Remove all matching breakpoints. */
b3310ab3 743void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 744{
c0ce998e 745 CPUBreakpoint *bp, *next;
a1d1bb31 746
f0c3c505 747 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
748 if (bp->flags & mask) {
749 cpu_breakpoint_remove_by_ref(cpu, bp);
750 }
c0ce998e 751 }
4c3a88a2
FB
752}
753
c33a346e
FB
754/* enable or disable single step mode. EXCP_DEBUG is returned by the
755 CPU loop after each instruction */
3825b28f 756void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 757{
ed2803da
AF
758 if (cpu->singlestep_enabled != enabled) {
759 cpu->singlestep_enabled = enabled;
760 if (kvm_enabled()) {
38e478ec 761 kvm_update_guest_debug(cpu, 0);
ed2803da 762 } else {
ccbb4d44 763 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 764 /* XXX: only flush what is necessary */
38e478ec 765 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
766 tb_flush(env);
767 }
c33a346e 768 }
c33a346e
FB
769}
770
a47dddd7 771void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
772{
773 va_list ap;
493ae1f0 774 va_list ap2;
7501267e
FB
775
776 va_start(ap, fmt);
493ae1f0 777 va_copy(ap2, ap);
7501267e
FB
778 fprintf(stderr, "qemu: fatal: ");
779 vfprintf(stderr, fmt, ap);
780 fprintf(stderr, "\n");
878096ee 781 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
782 if (qemu_log_enabled()) {
783 qemu_log("qemu: fatal: ");
784 qemu_log_vprintf(fmt, ap2);
785 qemu_log("\n");
a0762859 786 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 787 qemu_log_flush();
93fcfe39 788 qemu_log_close();
924edcae 789 }
493ae1f0 790 va_end(ap2);
f9373291 791 va_end(ap);
fd052bf6
RV
792#if defined(CONFIG_USER_ONLY)
793 {
794 struct sigaction act;
795 sigfillset(&act.sa_mask);
796 act.sa_handler = SIG_DFL;
797 sigaction(SIGABRT, &act, NULL);
798 }
799#endif
7501267e
FB
800 abort();
801}
802
0124311e 803#if !defined(CONFIG_USER_ONLY)
0dc3f44a 804/* Called from RCU critical section */
041603fe
PB
805static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
806{
807 RAMBlock *block;
808
43771539 809 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 810 if (block && addr - block->offset < block->max_length) {
041603fe
PB
811 goto found;
812 }
0dc3f44a 813 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5 814 if (addr - block->offset < block->max_length) {
041603fe
PB
815 goto found;
816 }
817 }
818
819 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
820 abort();
821
822found:
43771539
PB
823 /* It is safe to write mru_block outside the iothread lock. This
824 * is what happens:
825 *
826 * mru_block = xxx
827 * rcu_read_unlock()
828 * xxx removed from list
829 * rcu_read_lock()
830 * read mru_block
831 * mru_block = NULL;
832 * call_rcu(reclaim_ramblock, xxx);
833 * rcu_read_unlock()
834 *
835 * atomic_rcu_set is not needed here. The block was already published
836 * when it was placed into the list. Here we're just making an extra
837 * copy of the pointer.
838 */
041603fe
PB
839 ram_list.mru_block = block;
840 return block;
841}
842
a2f4d5be 843static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 844{
041603fe 845 ram_addr_t start1;
a2f4d5be
JQ
846 RAMBlock *block;
847 ram_addr_t end;
848
849 end = TARGET_PAGE_ALIGN(start + length);
850 start &= TARGET_PAGE_MASK;
d24981d3 851
0dc3f44a 852 rcu_read_lock();
041603fe
PB
853 block = qemu_get_ram_block(start);
854 assert(block == qemu_get_ram_block(end - 1));
1240be24 855 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
041603fe 856 cpu_tlb_reset_dirty_all(start1, length);
0dc3f44a 857 rcu_read_unlock();
d24981d3
JQ
858}
859
5579c7f3 860/* Note: start and end must be within the same ram block. */
a2f4d5be 861void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
52159192 862 unsigned client)
1ccde1cb 863{
1ccde1cb
FB
864 if (length == 0)
865 return;
c8d6f66a 866 cpu_physical_memory_clear_dirty_range_type(start, length, client);
f23db169 867
d24981d3 868 if (tcg_enabled()) {
a2f4d5be 869 tlb_reset_dirty_range_all(start, length);
5579c7f3 870 }
1ccde1cb
FB
871}
872
981fdf23 873static void cpu_physical_memory_set_dirty_tracking(bool enable)
74576198
AL
874{
875 in_migration = enable;
74576198
AL
876}
877
79e2b9ae 878/* Called from RCU critical section */
bb0e627a 879hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
880 MemoryRegionSection *section,
881 target_ulong vaddr,
882 hwaddr paddr, hwaddr xlat,
883 int prot,
884 target_ulong *address)
e5548617 885{
a8170e5e 886 hwaddr iotlb;
e5548617
BS
887 CPUWatchpoint *wp;
888
cc5bea60 889 if (memory_region_is_ram(section->mr)) {
e5548617
BS
890 /* Normal RAM. */
891 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 892 + xlat;
e5548617 893 if (!section->readonly) {
b41aac4f 894 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 895 } else {
b41aac4f 896 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
897 }
898 } else {
1b3fb98f 899 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 900 iotlb += xlat;
e5548617
BS
901 }
902
903 /* Make accesses to pages with watchpoints go via the
904 watchpoint trap routines. */
ff4700b0 905 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 906 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
e5548617
BS
907 /* Avoid trapping reads of pages with a write breakpoint. */
908 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 909 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
910 *address |= TLB_MMIO;
911 break;
912 }
913 }
914 }
915
916 return iotlb;
917}
9fa3e853
FB
918#endif /* defined(CONFIG_USER_ONLY) */
919
e2eef170 920#if !defined(CONFIG_USER_ONLY)
8da3ff18 921
c227f099 922static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 923 uint16_t section);
acc9d80b 924static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 925
a2b257d6
IM
926static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
927 qemu_anon_ram_alloc;
91138037
MA
928
929/*
930 * Set a custom physical guest memory alloator.
931 * Accelerators with unusual needs may need this. Hopefully, we can
932 * get rid of it eventually.
933 */
a2b257d6 934void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
91138037
MA
935{
936 phys_mem_alloc = alloc;
937}
938
53cb28cb
MA
939static uint16_t phys_section_add(PhysPageMap *map,
940 MemoryRegionSection *section)
5312bd8b 941{
68f3f65b
PB
942 /* The physical section number is ORed with a page-aligned
943 * pointer to produce the iotlb entries. Thus it should
944 * never overflow into the page-aligned value.
945 */
53cb28cb 946 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 947
53cb28cb
MA
948 if (map->sections_nb == map->sections_nb_alloc) {
949 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
950 map->sections = g_renew(MemoryRegionSection, map->sections,
951 map->sections_nb_alloc);
5312bd8b 952 }
53cb28cb 953 map->sections[map->sections_nb] = *section;
dfde4e6e 954 memory_region_ref(section->mr);
53cb28cb 955 return map->sections_nb++;
5312bd8b
AK
956}
957
058bc4b5
PB
958static void phys_section_destroy(MemoryRegion *mr)
959{
dfde4e6e
PB
960 memory_region_unref(mr);
961
058bc4b5
PB
962 if (mr->subpage) {
963 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 964 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
965 g_free(subpage);
966 }
967}
968
6092666e 969static void phys_sections_free(PhysPageMap *map)
5312bd8b 970{
9affd6fc
PB
971 while (map->sections_nb > 0) {
972 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
973 phys_section_destroy(section->mr);
974 }
9affd6fc
PB
975 g_free(map->sections);
976 g_free(map->nodes);
5312bd8b
AK
977}
978
ac1970fb 979static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
980{
981 subpage_t *subpage;
a8170e5e 982 hwaddr base = section->offset_within_address_space
0f0cb164 983 & TARGET_PAGE_MASK;
97115a8d 984 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 985 d->map.nodes, d->map.sections);
0f0cb164
AK
986 MemoryRegionSection subsection = {
987 .offset_within_address_space = base,
052e87b0 988 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 989 };
a8170e5e 990 hwaddr start, end;
0f0cb164 991
f3705d53 992 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 993
f3705d53 994 if (!(existing->mr->subpage)) {
acc9d80b 995 subpage = subpage_init(d->as, base);
3be91e86 996 subsection.address_space = d->as;
0f0cb164 997 subsection.mr = &subpage->iomem;
ac1970fb 998 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 999 phys_section_add(&d->map, &subsection));
0f0cb164 1000 } else {
f3705d53 1001 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
1002 }
1003 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 1004 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
1005 subpage_register(subpage, start, end,
1006 phys_section_add(&d->map, section));
0f0cb164
AK
1007}
1008
1009
052e87b0
PB
1010static void register_multipage(AddressSpaceDispatch *d,
1011 MemoryRegionSection *section)
33417e70 1012{
a8170e5e 1013 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 1014 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
1015 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1016 TARGET_PAGE_BITS));
dd81124b 1017
733d5ef5
PB
1018 assert(num_pages);
1019 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
1020}
1021
ac1970fb 1022static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 1023{
89ae337a 1024 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 1025 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 1026 MemoryRegionSection now = *section, remain = *section;
052e87b0 1027 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 1028
733d5ef5
PB
1029 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1030 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1031 - now.offset_within_address_space;
1032
052e87b0 1033 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 1034 register_subpage(d, &now);
733d5ef5 1035 } else {
052e87b0 1036 now.size = int128_zero();
733d5ef5 1037 }
052e87b0
PB
1038 while (int128_ne(remain.size, now.size)) {
1039 remain.size = int128_sub(remain.size, now.size);
1040 remain.offset_within_address_space += int128_get64(now.size);
1041 remain.offset_within_region += int128_get64(now.size);
69b67646 1042 now = remain;
052e87b0 1043 if (int128_lt(remain.size, page_size)) {
733d5ef5 1044 register_subpage(d, &now);
88266249 1045 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1046 now.size = page_size;
ac1970fb 1047 register_subpage(d, &now);
69b67646 1048 } else {
052e87b0 1049 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 1050 register_multipage(d, &now);
69b67646 1051 }
0f0cb164
AK
1052 }
1053}
1054
62a2744c
SY
1055void qemu_flush_coalesced_mmio_buffer(void)
1056{
1057 if (kvm_enabled())
1058 kvm_flush_coalesced_mmio_buffer();
1059}
1060
b2a8658e
UD
1061void qemu_mutex_lock_ramlist(void)
1062{
1063 qemu_mutex_lock(&ram_list.mutex);
1064}
1065
1066void qemu_mutex_unlock_ramlist(void)
1067{
1068 qemu_mutex_unlock(&ram_list.mutex);
1069}
1070
e1e84ba0 1071#ifdef __linux__
c902760f
MT
1072
1073#include <sys/vfs.h>
1074
1075#define HUGETLBFS_MAGIC 0x958458f6
1076
fc7a5800 1077static long gethugepagesize(const char *path, Error **errp)
c902760f
MT
1078{
1079 struct statfs fs;
1080 int ret;
1081
1082 do {
9742bf26 1083 ret = statfs(path, &fs);
c902760f
MT
1084 } while (ret != 0 && errno == EINTR);
1085
1086 if (ret != 0) {
fc7a5800
HT
1087 error_setg_errno(errp, errno, "failed to get page size of file %s",
1088 path);
9742bf26 1089 return 0;
c902760f
MT
1090 }
1091
1092 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1093 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1094
1095 return fs.f_bsize;
1096}
1097
04b16653
AW
1098static void *file_ram_alloc(RAMBlock *block,
1099 ram_addr_t memory,
7f56e740
PB
1100 const char *path,
1101 Error **errp)
c902760f
MT
1102{
1103 char *filename;
8ca761f6
PF
1104 char *sanitized_name;
1105 char *c;
557529dd 1106 void *area = NULL;
c902760f 1107 int fd;
557529dd 1108 uint64_t hpagesize;
fc7a5800 1109 Error *local_err = NULL;
c902760f 1110
fc7a5800
HT
1111 hpagesize = gethugepagesize(path, &local_err);
1112 if (local_err) {
1113 error_propagate(errp, local_err);
f9a49dfa 1114 goto error;
c902760f 1115 }
a2b257d6 1116 block->mr->align = hpagesize;
c902760f
MT
1117
1118 if (memory < hpagesize) {
557529dd
HT
1119 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1120 "or larger than huge page size 0x%" PRIx64,
1121 memory, hpagesize);
1122 goto error;
c902760f
MT
1123 }
1124
1125 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1126 error_setg(errp,
1127 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1128 goto error;
c902760f
MT
1129 }
1130
8ca761f6 1131 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
83234bf2 1132 sanitized_name = g_strdup(memory_region_name(block->mr));
8ca761f6
PF
1133 for (c = sanitized_name; *c != '\0'; c++) {
1134 if (*c == '/')
1135 *c = '_';
1136 }
1137
1138 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1139 sanitized_name);
1140 g_free(sanitized_name);
c902760f
MT
1141
1142 fd = mkstemp(filename);
1143 if (fd < 0) {
7f56e740
PB
1144 error_setg_errno(errp, errno,
1145 "unable to create backing store for hugepages");
e4ada482 1146 g_free(filename);
f9a49dfa 1147 goto error;
c902760f
MT
1148 }
1149 unlink(filename);
e4ada482 1150 g_free(filename);
c902760f
MT
1151
1152 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1153
1154 /*
1155 * ftruncate is not supported by hugetlbfs in older
1156 * hosts, so don't bother bailing out on errors.
1157 * If anything goes wrong with it under other filesystems,
1158 * mmap will fail.
1159 */
7f56e740 1160 if (ftruncate(fd, memory)) {
9742bf26 1161 perror("ftruncate");
7f56e740 1162 }
c902760f 1163
dbcb8981
PB
1164 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1165 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1166 fd, 0);
c902760f 1167 if (area == MAP_FAILED) {
7f56e740
PB
1168 error_setg_errno(errp, errno,
1169 "unable to map backing store for hugepages");
9742bf26 1170 close(fd);
f9a49dfa 1171 goto error;
c902760f 1172 }
ef36fa14
MT
1173
1174 if (mem_prealloc) {
38183310 1175 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1176 }
1177
04b16653 1178 block->fd = fd;
c902760f 1179 return area;
f9a49dfa
MT
1180
1181error:
1182 if (mem_prealloc) {
81b07353 1183 error_report("%s", error_get_pretty(*errp));
f9a49dfa
MT
1184 exit(1);
1185 }
1186 return NULL;
c902760f
MT
1187}
1188#endif
1189
0dc3f44a 1190/* Called with the ramlist lock held. */
d17b5288 1191static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1192{
1193 RAMBlock *block, *next_block;
3e837b2c 1194 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1195
49cd9ac6
SH
1196 assert(size != 0); /* it would hand out same offset multiple times */
1197
0dc3f44a 1198 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
04b16653 1199 return 0;
0d53d9fe 1200 }
04b16653 1201
0dc3f44a 1202 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
f15fbc4b 1203 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653 1204
62be4e3a 1205 end = block->offset + block->max_length;
04b16653 1206
0dc3f44a 1207 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
04b16653
AW
1208 if (next_block->offset >= end) {
1209 next = MIN(next, next_block->offset);
1210 }
1211 }
1212 if (next - end >= size && next - end < mingap) {
3e837b2c 1213 offset = end;
04b16653
AW
1214 mingap = next - end;
1215 }
1216 }
3e837b2c
AW
1217
1218 if (offset == RAM_ADDR_MAX) {
1219 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1220 (uint64_t)size);
1221 abort();
1222 }
1223
04b16653
AW
1224 return offset;
1225}
1226
652d7ec2 1227ram_addr_t last_ram_offset(void)
d17b5288
AW
1228{
1229 RAMBlock *block;
1230 ram_addr_t last = 0;
1231
0dc3f44a
MD
1232 rcu_read_lock();
1233 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
62be4e3a 1234 last = MAX(last, block->offset + block->max_length);
0d53d9fe 1235 }
0dc3f44a 1236 rcu_read_unlock();
d17b5288
AW
1237 return last;
1238}
1239
ddb97f1d
JB
1240static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1241{
1242 int ret;
ddb97f1d
JB
1243
1244 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
47c8ca53 1245 if (!machine_dump_guest_core(current_machine)) {
ddb97f1d
JB
1246 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1247 if (ret) {
1248 perror("qemu_madvise");
1249 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1250 "but dump_guest_core=off specified\n");
1251 }
1252 }
1253}
1254
0dc3f44a
MD
1255/* Called within an RCU critical section, or while the ramlist lock
1256 * is held.
1257 */
20cfe881 1258static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1259{
20cfe881 1260 RAMBlock *block;
84b89d78 1261
0dc3f44a 1262 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1263 if (block->offset == addr) {
20cfe881 1264 return block;
c5705a77
AK
1265 }
1266 }
20cfe881
HT
1267
1268 return NULL;
1269}
1270
ae3a7047 1271/* Called with iothread lock held. */
20cfe881
HT
1272void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1273{
ae3a7047 1274 RAMBlock *new_block, *block;
20cfe881 1275
0dc3f44a 1276 rcu_read_lock();
ae3a7047 1277 new_block = find_ram_block(addr);
c5705a77
AK
1278 assert(new_block);
1279 assert(!new_block->idstr[0]);
84b89d78 1280
09e5ab63
AL
1281 if (dev) {
1282 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1283 if (id) {
1284 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1285 g_free(id);
84b89d78
CM
1286 }
1287 }
1288 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1289
0dc3f44a 1290 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1291 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1292 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1293 new_block->idstr);
1294 abort();
1295 }
1296 }
0dc3f44a 1297 rcu_read_unlock();
c5705a77
AK
1298}
1299
ae3a7047 1300/* Called with iothread lock held. */
20cfe881
HT
1301void qemu_ram_unset_idstr(ram_addr_t addr)
1302{
ae3a7047 1303 RAMBlock *block;
20cfe881 1304
ae3a7047
MD
1305 /* FIXME: arch_init.c assumes that this is not called throughout
1306 * migration. Ignore the problem since hot-unplug during migration
1307 * does not work anyway.
1308 */
1309
0dc3f44a 1310 rcu_read_lock();
ae3a7047 1311 block = find_ram_block(addr);
20cfe881
HT
1312 if (block) {
1313 memset(block->idstr, 0, sizeof(block->idstr));
1314 }
0dc3f44a 1315 rcu_read_unlock();
20cfe881
HT
1316}
1317
8490fc78
LC
1318static int memory_try_enable_merging(void *addr, size_t len)
1319{
75cc7f01 1320 if (!machine_mem_merge(current_machine)) {
8490fc78
LC
1321 /* disabled by the user */
1322 return 0;
1323 }
1324
1325 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1326}
1327
62be4e3a
MT
1328/* Only legal before guest might have detected the memory size: e.g. on
1329 * incoming migration, or right after reset.
1330 *
1331 * As memory core doesn't know how is memory accessed, it is up to
1332 * resize callback to update device state and/or add assertions to detect
1333 * misuse, if necessary.
1334 */
1335int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1336{
1337 RAMBlock *block = find_ram_block(base);
1338
1339 assert(block);
1340
129ddaf3
MT
1341 newsize = TARGET_PAGE_ALIGN(newsize);
1342
62be4e3a
MT
1343 if (block->used_length == newsize) {
1344 return 0;
1345 }
1346
1347 if (!(block->flags & RAM_RESIZEABLE)) {
1348 error_setg_errno(errp, EINVAL,
1349 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1350 " in != 0x" RAM_ADDR_FMT, block->idstr,
1351 newsize, block->used_length);
1352 return -EINVAL;
1353 }
1354
1355 if (block->max_length < newsize) {
1356 error_setg_errno(errp, EINVAL,
1357 "Length too large: %s: 0x" RAM_ADDR_FMT
1358 " > 0x" RAM_ADDR_FMT, block->idstr,
1359 newsize, block->max_length);
1360 return -EINVAL;
1361 }
1362
1363 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1364 block->used_length = newsize;
1365 cpu_physical_memory_set_dirty_range(block->offset, block->used_length);
1366 memory_region_set_size(block->mr, newsize);
1367 if (block->resized) {
1368 block->resized(block->idstr, newsize, block->host);
1369 }
1370 return 0;
1371}
1372
ef701d7b 1373static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
c5705a77 1374{
e1c57ab8 1375 RAMBlock *block;
0d53d9fe 1376 RAMBlock *last_block = NULL;
2152f5ca
JQ
1377 ram_addr_t old_ram_size, new_ram_size;
1378
1379 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1380
b2a8658e 1381 qemu_mutex_lock_ramlist();
9b8424d5 1382 new_block->offset = find_ram_offset(new_block->max_length);
e1c57ab8
PB
1383
1384 if (!new_block->host) {
1385 if (xen_enabled()) {
9b8424d5
MT
1386 xen_ram_alloc(new_block->offset, new_block->max_length,
1387 new_block->mr);
e1c57ab8 1388 } else {
9b8424d5 1389 new_block->host = phys_mem_alloc(new_block->max_length,
a2b257d6 1390 &new_block->mr->align);
39228250 1391 if (!new_block->host) {
ef701d7b
HT
1392 error_setg_errno(errp, errno,
1393 "cannot set up guest memory '%s'",
1394 memory_region_name(new_block->mr));
1395 qemu_mutex_unlock_ramlist();
1396 return -1;
39228250 1397 }
9b8424d5 1398 memory_try_enable_merging(new_block->host, new_block->max_length);
6977dfe6 1399 }
c902760f 1400 }
94a6b54f 1401
0d53d9fe
MD
1402 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1403 * QLIST (which has an RCU-friendly variant) does not have insertion at
1404 * tail, so save the last element in last_block.
1405 */
0dc3f44a 1406 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
0d53d9fe 1407 last_block = block;
9b8424d5 1408 if (block->max_length < new_block->max_length) {
abb26d63
PB
1409 break;
1410 }
1411 }
1412 if (block) {
0dc3f44a 1413 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
0d53d9fe 1414 } else if (last_block) {
0dc3f44a 1415 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
0d53d9fe 1416 } else { /* list is empty */
0dc3f44a 1417 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
abb26d63 1418 }
0d6d3c87 1419 ram_list.mru_block = NULL;
94a6b54f 1420
0dc3f44a
MD
1421 /* Write list before version */
1422 smp_wmb();
f798b07f 1423 ram_list.version++;
b2a8658e 1424 qemu_mutex_unlock_ramlist();
f798b07f 1425
2152f5ca
JQ
1426 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1427
1428 if (new_ram_size > old_ram_size) {
1ab4c8ce 1429 int i;
ae3a7047
MD
1430
1431 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1ab4c8ce
JQ
1432 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1433 ram_list.dirty_memory[i] =
1434 bitmap_zero_extend(ram_list.dirty_memory[i],
1435 old_ram_size, new_ram_size);
1436 }
2152f5ca 1437 }
9b8424d5
MT
1438 cpu_physical_memory_set_dirty_range(new_block->offset,
1439 new_block->used_length);
94a6b54f 1440
a904c911
PB
1441 if (new_block->host) {
1442 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1443 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1444 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1445 if (kvm_enabled()) {
1446 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1447 }
e1c57ab8 1448 }
6f0437e8 1449
94a6b54f
PB
1450 return new_block->offset;
1451}
e9a1ab19 1452
0b183fc8 1453#ifdef __linux__
e1c57ab8 1454ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1455 bool share, const char *mem_path,
7f56e740 1456 Error **errp)
e1c57ab8
PB
1457{
1458 RAMBlock *new_block;
ef701d7b
HT
1459 ram_addr_t addr;
1460 Error *local_err = NULL;
e1c57ab8
PB
1461
1462 if (xen_enabled()) {
7f56e740
PB
1463 error_setg(errp, "-mem-path not supported with Xen");
1464 return -1;
e1c57ab8
PB
1465 }
1466
1467 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1468 /*
1469 * file_ram_alloc() needs to allocate just like
1470 * phys_mem_alloc, but we haven't bothered to provide
1471 * a hook there.
1472 */
7f56e740
PB
1473 error_setg(errp,
1474 "-mem-path not supported with this accelerator");
1475 return -1;
e1c57ab8
PB
1476 }
1477
1478 size = TARGET_PAGE_ALIGN(size);
1479 new_block = g_malloc0(sizeof(*new_block));
1480 new_block->mr = mr;
9b8424d5
MT
1481 new_block->used_length = size;
1482 new_block->max_length = size;
dbcb8981 1483 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1484 new_block->host = file_ram_alloc(new_block, size,
1485 mem_path, errp);
1486 if (!new_block->host) {
1487 g_free(new_block);
1488 return -1;
1489 }
1490
ef701d7b
HT
1491 addr = ram_block_add(new_block, &local_err);
1492 if (local_err) {
1493 g_free(new_block);
1494 error_propagate(errp, local_err);
1495 return -1;
1496 }
1497 return addr;
e1c57ab8 1498}
0b183fc8 1499#endif
e1c57ab8 1500
62be4e3a
MT
1501static
1502ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1503 void (*resized)(const char*,
1504 uint64_t length,
1505 void *host),
1506 void *host, bool resizeable,
ef701d7b 1507 MemoryRegion *mr, Error **errp)
e1c57ab8
PB
1508{
1509 RAMBlock *new_block;
ef701d7b
HT
1510 ram_addr_t addr;
1511 Error *local_err = NULL;
e1c57ab8
PB
1512
1513 size = TARGET_PAGE_ALIGN(size);
62be4e3a 1514 max_size = TARGET_PAGE_ALIGN(max_size);
e1c57ab8
PB
1515 new_block = g_malloc0(sizeof(*new_block));
1516 new_block->mr = mr;
62be4e3a 1517 new_block->resized = resized;
9b8424d5
MT
1518 new_block->used_length = size;
1519 new_block->max_length = max_size;
62be4e3a 1520 assert(max_size >= size);
e1c57ab8
PB
1521 new_block->fd = -1;
1522 new_block->host = host;
1523 if (host) {
7bd4f430 1524 new_block->flags |= RAM_PREALLOC;
e1c57ab8 1525 }
62be4e3a
MT
1526 if (resizeable) {
1527 new_block->flags |= RAM_RESIZEABLE;
1528 }
ef701d7b
HT
1529 addr = ram_block_add(new_block, &local_err);
1530 if (local_err) {
1531 g_free(new_block);
1532 error_propagate(errp, local_err);
1533 return -1;
1534 }
1535 return addr;
e1c57ab8
PB
1536}
1537
62be4e3a
MT
1538ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1539 MemoryRegion *mr, Error **errp)
1540{
1541 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1542}
1543
ef701d7b 1544ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
6977dfe6 1545{
62be4e3a
MT
1546 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1547}
1548
1549ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1550 void (*resized)(const char*,
1551 uint64_t length,
1552 void *host),
1553 MemoryRegion *mr, Error **errp)
1554{
1555 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
6977dfe6
YT
1556}
1557
1f2e98b6
AW
1558void qemu_ram_free_from_ptr(ram_addr_t addr)
1559{
1560 RAMBlock *block;
1561
b2a8658e 1562 qemu_mutex_lock_ramlist();
0dc3f44a 1563 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1f2e98b6 1564 if (addr == block->offset) {
0dc3f44a 1565 QLIST_REMOVE_RCU(block, next);
0d6d3c87 1566 ram_list.mru_block = NULL;
0dc3f44a
MD
1567 /* Write list before version */
1568 smp_wmb();
f798b07f 1569 ram_list.version++;
43771539 1570 g_free_rcu(block, rcu);
b2a8658e 1571 break;
1f2e98b6
AW
1572 }
1573 }
b2a8658e 1574 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1575}
1576
43771539
PB
1577static void reclaim_ramblock(RAMBlock *block)
1578{
1579 if (block->flags & RAM_PREALLOC) {
1580 ;
1581 } else if (xen_enabled()) {
1582 xen_invalidate_map_cache_entry(block->host);
1583#ifndef _WIN32
1584 } else if (block->fd >= 0) {
1585 munmap(block->host, block->max_length);
1586 close(block->fd);
1587#endif
1588 } else {
1589 qemu_anon_ram_free(block->host, block->max_length);
1590 }
1591 g_free(block);
1592}
1593
c227f099 1594void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1595{
04b16653
AW
1596 RAMBlock *block;
1597
b2a8658e 1598 qemu_mutex_lock_ramlist();
0dc3f44a 1599 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
04b16653 1600 if (addr == block->offset) {
0dc3f44a 1601 QLIST_REMOVE_RCU(block, next);
0d6d3c87 1602 ram_list.mru_block = NULL;
0dc3f44a
MD
1603 /* Write list before version */
1604 smp_wmb();
f798b07f 1605 ram_list.version++;
43771539 1606 call_rcu(block, reclaim_ramblock, rcu);
b2a8658e 1607 break;
04b16653
AW
1608 }
1609 }
b2a8658e 1610 qemu_mutex_unlock_ramlist();
e9a1ab19
FB
1611}
1612
cd19cfa2
HY
1613#ifndef _WIN32
1614void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1615{
1616 RAMBlock *block;
1617 ram_addr_t offset;
1618 int flags;
1619 void *area, *vaddr;
1620
0dc3f44a 1621 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
cd19cfa2 1622 offset = addr - block->offset;
9b8424d5 1623 if (offset < block->max_length) {
1240be24 1624 vaddr = ramblock_ptr(block, offset);
7bd4f430 1625 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1626 ;
dfeaf2ab
MA
1627 } else if (xen_enabled()) {
1628 abort();
cd19cfa2
HY
1629 } else {
1630 flags = MAP_FIXED;
3435f395 1631 if (block->fd >= 0) {
dbcb8981
PB
1632 flags |= (block->flags & RAM_SHARED ?
1633 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1634 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1635 flags, block->fd, offset);
cd19cfa2 1636 } else {
2eb9fbaa
MA
1637 /*
1638 * Remap needs to match alloc. Accelerators that
1639 * set phys_mem_alloc never remap. If they did,
1640 * we'd need a remap hook here.
1641 */
1642 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1643
cd19cfa2
HY
1644 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1645 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1646 flags, -1, 0);
cd19cfa2
HY
1647 }
1648 if (area != vaddr) {
f15fbc4b
AP
1649 fprintf(stderr, "Could not remap addr: "
1650 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1651 length, addr);
1652 exit(1);
1653 }
8490fc78 1654 memory_try_enable_merging(vaddr, length);
ddb97f1d 1655 qemu_ram_setup_dump(vaddr, length);
cd19cfa2 1656 }
cd19cfa2
HY
1657 }
1658 }
1659}
1660#endif /* !_WIN32 */
1661
a35ba7be
PB
1662int qemu_get_ram_fd(ram_addr_t addr)
1663{
ae3a7047
MD
1664 RAMBlock *block;
1665 int fd;
a35ba7be 1666
0dc3f44a 1667 rcu_read_lock();
ae3a7047
MD
1668 block = qemu_get_ram_block(addr);
1669 fd = block->fd;
0dc3f44a 1670 rcu_read_unlock();
ae3a7047 1671 return fd;
a35ba7be
PB
1672}
1673
3fd74b84
DM
1674void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1675{
ae3a7047
MD
1676 RAMBlock *block;
1677 void *ptr;
3fd74b84 1678
0dc3f44a 1679 rcu_read_lock();
ae3a7047
MD
1680 block = qemu_get_ram_block(addr);
1681 ptr = ramblock_ptr(block, 0);
0dc3f44a 1682 rcu_read_unlock();
ae3a7047 1683 return ptr;
3fd74b84
DM
1684}
1685
1b5ec234 1686/* Return a host pointer to ram allocated with qemu_ram_alloc.
ae3a7047
MD
1687 * This should not be used for general purpose DMA. Use address_space_map
1688 * or address_space_rw instead. For local memory (e.g. video ram) that the
1689 * device owns, use memory_region_get_ram_ptr.
0dc3f44a
MD
1690 *
1691 * By the time this function returns, the returned pointer is not protected
1692 * by RCU anymore. If the caller is not within an RCU critical section and
1693 * does not hold the iothread lock, it must have other means of protecting the
1694 * pointer, such as a reference to the region that includes the incoming
1695 * ram_addr_t.
1b5ec234
PB
1696 */
1697void *qemu_get_ram_ptr(ram_addr_t addr)
1698{
ae3a7047
MD
1699 RAMBlock *block;
1700 void *ptr;
1b5ec234 1701
0dc3f44a 1702 rcu_read_lock();
ae3a7047
MD
1703 block = qemu_get_ram_block(addr);
1704
1705 if (xen_enabled() && block->host == NULL) {
0d6d3c87
PB
1706 /* We need to check if the requested address is in the RAM
1707 * because we don't want to map the entire memory in QEMU.
1708 * In that case just map until the end of the page.
1709 */
1710 if (block->offset == 0) {
ae3a7047 1711 ptr = xen_map_cache(addr, 0, 0);
0dc3f44a 1712 goto unlock;
0d6d3c87 1713 }
ae3a7047
MD
1714
1715 block->host = xen_map_cache(block->offset, block->max_length, 1);
0d6d3c87 1716 }
ae3a7047
MD
1717 ptr = ramblock_ptr(block, addr - block->offset);
1718
0dc3f44a
MD
1719unlock:
1720 rcu_read_unlock();
ae3a7047 1721 return ptr;
dc828ca1
PB
1722}
1723
38bee5dc 1724/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
ae3a7047 1725 * but takes a size argument.
0dc3f44a
MD
1726 *
1727 * By the time this function returns, the returned pointer is not protected
1728 * by RCU anymore. If the caller is not within an RCU critical section and
1729 * does not hold the iothread lock, it must have other means of protecting the
1730 * pointer, such as a reference to the region that includes the incoming
1731 * ram_addr_t.
ae3a7047 1732 */
cb85f7ab 1733static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1734{
ae3a7047 1735 void *ptr;
8ab934f9
SS
1736 if (*size == 0) {
1737 return NULL;
1738 }
868bb33f 1739 if (xen_enabled()) {
e41d7c69 1740 return xen_map_cache(addr, *size, 1);
868bb33f 1741 } else {
38bee5dc 1742 RAMBlock *block;
0dc3f44a
MD
1743 rcu_read_lock();
1744 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5
MT
1745 if (addr - block->offset < block->max_length) {
1746 if (addr - block->offset + *size > block->max_length)
1747 *size = block->max_length - addr + block->offset;
ae3a7047 1748 ptr = ramblock_ptr(block, addr - block->offset);
0dc3f44a 1749 rcu_read_unlock();
ae3a7047 1750 return ptr;
38bee5dc
SS
1751 }
1752 }
1753
1754 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1755 abort();
38bee5dc
SS
1756 }
1757}
1758
7443b437 1759/* Some of the softmmu routines need to translate from a host pointer
ae3a7047
MD
1760 * (typically a TLB entry) back to a ram offset.
1761 *
1762 * By the time this function returns, the returned pointer is not protected
1763 * by RCU anymore. If the caller is not within an RCU critical section and
1764 * does not hold the iothread lock, it must have other means of protecting the
1765 * pointer, such as a reference to the region that includes the incoming
1766 * ram_addr_t.
1767 */
1b5ec234 1768MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1769{
94a6b54f
PB
1770 RAMBlock *block;
1771 uint8_t *host = ptr;
ae3a7047 1772 MemoryRegion *mr;
94a6b54f 1773
868bb33f 1774 if (xen_enabled()) {
0dc3f44a 1775 rcu_read_lock();
e41d7c69 1776 *ram_addr = xen_ram_addr_from_mapcache(ptr);
ae3a7047 1777 mr = qemu_get_ram_block(*ram_addr)->mr;
0dc3f44a 1778 rcu_read_unlock();
ae3a7047 1779 return mr;
712c2b41
SS
1780 }
1781
0dc3f44a
MD
1782 rcu_read_lock();
1783 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 1784 if (block && block->host && host - block->host < block->max_length) {
23887b79
PB
1785 goto found;
1786 }
1787
0dc3f44a 1788 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
432d268c
JN
1789 /* This case append when the block is not mapped. */
1790 if (block->host == NULL) {
1791 continue;
1792 }
9b8424d5 1793 if (host - block->host < block->max_length) {
23887b79 1794 goto found;
f471a17e 1795 }
94a6b54f 1796 }
432d268c 1797
0dc3f44a 1798 rcu_read_unlock();
1b5ec234 1799 return NULL;
23887b79
PB
1800
1801found:
1802 *ram_addr = block->offset + (host - block->host);
ae3a7047 1803 mr = block->mr;
0dc3f44a 1804 rcu_read_unlock();
ae3a7047 1805 return mr;
e890261f 1806}
f471a17e 1807
a8170e5e 1808static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1809 uint64_t val, unsigned size)
9fa3e853 1810{
52159192 1811 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1812 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1813 }
0e0df1e2
AK
1814 switch (size) {
1815 case 1:
1816 stb_p(qemu_get_ram_ptr(ram_addr), val);
1817 break;
1818 case 2:
1819 stw_p(qemu_get_ram_ptr(ram_addr), val);
1820 break;
1821 case 4:
1822 stl_p(qemu_get_ram_ptr(ram_addr), val);
1823 break;
1824 default:
1825 abort();
3a7d929e 1826 }
6886867e 1827 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
f23db169
FB
1828 /* we remove the notdirty callback only if the code has been
1829 flushed */
a2cd8c85 1830 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1831 CPUArchState *env = current_cpu->env_ptr;
93afeade 1832 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1833 }
9fa3e853
FB
1834}
1835
b018ddf6
PB
1836static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1837 unsigned size, bool is_write)
1838{
1839 return is_write;
1840}
1841
0e0df1e2 1842static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1843 .write = notdirty_mem_write,
b018ddf6 1844 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1845 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1846};
1847
0f459d16 1848/* Generate a debug exception if a watchpoint has been hit. */
66b9b43c 1849static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
0f459d16 1850{
93afeade
AF
1851 CPUState *cpu = current_cpu;
1852 CPUArchState *env = cpu->env_ptr;
06d55cc1 1853 target_ulong pc, cs_base;
0f459d16 1854 target_ulong vaddr;
a1d1bb31 1855 CPUWatchpoint *wp;
06d55cc1 1856 int cpu_flags;
0f459d16 1857
ff4700b0 1858 if (cpu->watchpoint_hit) {
06d55cc1
AL
1859 /* We re-entered the check after replacing the TB. Now raise
1860 * the debug interrupt so that is will trigger after the
1861 * current instruction. */
93afeade 1862 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1863 return;
1864 }
93afeade 1865 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1866 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d
PM
1867 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1868 && (wp->flags & flags)) {
08225676
PM
1869 if (flags == BP_MEM_READ) {
1870 wp->flags |= BP_WATCHPOINT_HIT_READ;
1871 } else {
1872 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1873 }
1874 wp->hitaddr = vaddr;
66b9b43c 1875 wp->hitattrs = attrs;
ff4700b0
AF
1876 if (!cpu->watchpoint_hit) {
1877 cpu->watchpoint_hit = wp;
239c51a5 1878 tb_check_watchpoint(cpu);
6e140f28 1879 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1880 cpu->exception_index = EXCP_DEBUG;
5638d180 1881 cpu_loop_exit(cpu);
6e140f28
AL
1882 } else {
1883 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1884 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1885 cpu_resume_from_signal(cpu, NULL);
6e140f28 1886 }
06d55cc1 1887 }
6e140f28
AL
1888 } else {
1889 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1890 }
1891 }
1892}
1893
6658ffb8
PB
1894/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1895 so these check for a hit then pass through to the normal out-of-line
1896 phys routines. */
66b9b43c
PM
1897static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1898 unsigned size, MemTxAttrs attrs)
6658ffb8 1899{
66b9b43c
PM
1900 MemTxResult res;
1901 uint64_t data;
1902
1903 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
1ec9b909 1904 switch (size) {
66b9b43c
PM
1905 case 1:
1906 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
1907 break;
1908 case 2:
1909 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
1910 break;
1911 case 4:
1912 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
1913 break;
1ec9b909
AK
1914 default: abort();
1915 }
66b9b43c
PM
1916 *pdata = data;
1917 return res;
6658ffb8
PB
1918}
1919
66b9b43c
PM
1920static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
1921 uint64_t val, unsigned size,
1922 MemTxAttrs attrs)
6658ffb8 1923{
66b9b43c
PM
1924 MemTxResult res;
1925
1926 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1ec9b909 1927 switch (size) {
67364150 1928 case 1:
66b9b43c 1929 address_space_stb(&address_space_memory, addr, val, attrs, &res);
67364150
MF
1930 break;
1931 case 2:
66b9b43c 1932 address_space_stw(&address_space_memory, addr, val, attrs, &res);
67364150
MF
1933 break;
1934 case 4:
66b9b43c 1935 address_space_stl(&address_space_memory, addr, val, attrs, &res);
67364150 1936 break;
1ec9b909
AK
1937 default: abort();
1938 }
66b9b43c 1939 return res;
6658ffb8
PB
1940}
1941
1ec9b909 1942static const MemoryRegionOps watch_mem_ops = {
66b9b43c
PM
1943 .read_with_attrs = watch_mem_read,
1944 .write_with_attrs = watch_mem_write,
1ec9b909 1945 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1946};
6658ffb8 1947
f25a49e0
PM
1948static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
1949 unsigned len, MemTxAttrs attrs)
db7b5426 1950{
acc9d80b 1951 subpage_t *subpage = opaque;
ff6cff75 1952 uint8_t buf[8];
5c9eb028 1953 MemTxResult res;
791af8c8 1954
db7b5426 1955#if defined(DEBUG_SUBPAGE)
016e9d62 1956 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1957 subpage, len, addr);
db7b5426 1958#endif
5c9eb028
PM
1959 res = address_space_read(subpage->as, addr + subpage->base,
1960 attrs, buf, len);
1961 if (res) {
1962 return res;
f25a49e0 1963 }
acc9d80b
JK
1964 switch (len) {
1965 case 1:
f25a49e0
PM
1966 *data = ldub_p(buf);
1967 return MEMTX_OK;
acc9d80b 1968 case 2:
f25a49e0
PM
1969 *data = lduw_p(buf);
1970 return MEMTX_OK;
acc9d80b 1971 case 4:
f25a49e0
PM
1972 *data = ldl_p(buf);
1973 return MEMTX_OK;
ff6cff75 1974 case 8:
f25a49e0
PM
1975 *data = ldq_p(buf);
1976 return MEMTX_OK;
acc9d80b
JK
1977 default:
1978 abort();
1979 }
db7b5426
BS
1980}
1981
f25a49e0
PM
1982static MemTxResult subpage_write(void *opaque, hwaddr addr,
1983 uint64_t value, unsigned len, MemTxAttrs attrs)
db7b5426 1984{
acc9d80b 1985 subpage_t *subpage = opaque;
ff6cff75 1986 uint8_t buf[8];
acc9d80b 1987
db7b5426 1988#if defined(DEBUG_SUBPAGE)
016e9d62 1989 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1990 " value %"PRIx64"\n",
1991 __func__, subpage, len, addr, value);
db7b5426 1992#endif
acc9d80b
JK
1993 switch (len) {
1994 case 1:
1995 stb_p(buf, value);
1996 break;
1997 case 2:
1998 stw_p(buf, value);
1999 break;
2000 case 4:
2001 stl_p(buf, value);
2002 break;
ff6cff75
PB
2003 case 8:
2004 stq_p(buf, value);
2005 break;
acc9d80b
JK
2006 default:
2007 abort();
2008 }
5c9eb028
PM
2009 return address_space_write(subpage->as, addr + subpage->base,
2010 attrs, buf, len);
db7b5426
BS
2011}
2012
c353e4cc 2013static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 2014 unsigned len, bool is_write)
c353e4cc 2015{
acc9d80b 2016 subpage_t *subpage = opaque;
c353e4cc 2017#if defined(DEBUG_SUBPAGE)
016e9d62 2018 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 2019 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
2020#endif
2021
acc9d80b 2022 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 2023 len, is_write);
c353e4cc
PB
2024}
2025
70c68e44 2026static const MemoryRegionOps subpage_ops = {
f25a49e0
PM
2027 .read_with_attrs = subpage_read,
2028 .write_with_attrs = subpage_write,
ff6cff75
PB
2029 .impl.min_access_size = 1,
2030 .impl.max_access_size = 8,
2031 .valid.min_access_size = 1,
2032 .valid.max_access_size = 8,
c353e4cc 2033 .valid.accepts = subpage_accepts,
70c68e44 2034 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
2035};
2036
c227f099 2037static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2038 uint16_t section)
db7b5426
BS
2039{
2040 int idx, eidx;
2041
2042 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2043 return -1;
2044 idx = SUBPAGE_IDX(start);
2045 eidx = SUBPAGE_IDX(end);
2046#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2047 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2048 __func__, mmio, start, end, idx, eidx, section);
db7b5426 2049#endif
db7b5426 2050 for (; idx <= eidx; idx++) {
5312bd8b 2051 mmio->sub_section[idx] = section;
db7b5426
BS
2052 }
2053
2054 return 0;
2055}
2056
acc9d80b 2057static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 2058{
c227f099 2059 subpage_t *mmio;
db7b5426 2060
7267c094 2061 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 2062
acc9d80b 2063 mmio->as = as;
1eec614b 2064 mmio->base = base;
2c9b15ca 2065 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 2066 NULL, TARGET_PAGE_SIZE);
b3b00c78 2067 mmio->iomem.subpage = true;
db7b5426 2068#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2069 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2070 mmio, base, TARGET_PAGE_SIZE);
db7b5426 2071#endif
b41aac4f 2072 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
2073
2074 return mmio;
2075}
2076
a656e22f
PC
2077static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2078 MemoryRegion *mr)
5312bd8b 2079{
a656e22f 2080 assert(as);
5312bd8b 2081 MemoryRegionSection section = {
a656e22f 2082 .address_space = as,
5312bd8b
AK
2083 .mr = mr,
2084 .offset_within_address_space = 0,
2085 .offset_within_region = 0,
052e87b0 2086 .size = int128_2_64(),
5312bd8b
AK
2087 };
2088
53cb28cb 2089 return phys_section_add(map, &section);
5312bd8b
AK
2090}
2091
9d82b5a7 2092MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
aa102231 2093{
79e2b9ae
PB
2094 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2095 MemoryRegionSection *sections = d->map.sections;
9d82b5a7
PB
2096
2097 return sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
2098}
2099
e9179ce1
AK
2100static void io_mem_init(void)
2101{
1f6245e5 2102 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2c9b15ca 2103 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 2104 NULL, UINT64_MAX);
2c9b15ca 2105 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 2106 NULL, UINT64_MAX);
2c9b15ca 2107 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 2108 NULL, UINT64_MAX);
e9179ce1
AK
2109}
2110
ac1970fb 2111static void mem_begin(MemoryListener *listener)
00752703
PB
2112{
2113 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
2114 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2115 uint16_t n;
2116
a656e22f 2117 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 2118 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 2119 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 2120 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 2121 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 2122 assert(n == PHYS_SECTION_ROM);
a656e22f 2123 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 2124 assert(n == PHYS_SECTION_WATCH);
00752703 2125
9736e55b 2126 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
2127 d->as = as;
2128 as->next_dispatch = d;
2129}
2130
79e2b9ae
PB
2131static void address_space_dispatch_free(AddressSpaceDispatch *d)
2132{
2133 phys_sections_free(&d->map);
2134 g_free(d);
2135}
2136
00752703 2137static void mem_commit(MemoryListener *listener)
ac1970fb 2138{
89ae337a 2139 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
2140 AddressSpaceDispatch *cur = as->dispatch;
2141 AddressSpaceDispatch *next = as->next_dispatch;
2142
53cb28cb 2143 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 2144
79e2b9ae 2145 atomic_rcu_set(&as->dispatch, next);
53cb28cb 2146 if (cur) {
79e2b9ae 2147 call_rcu(cur, address_space_dispatch_free, rcu);
53cb28cb 2148 }
9affd6fc
PB
2149}
2150
1d71148e 2151static void tcg_commit(MemoryListener *listener)
50c1e149 2152{
182735ef 2153 CPUState *cpu;
117712c3
AK
2154
2155 /* since each CPU stores ram addresses in its TLB cache, we must
2156 reset the modified entries */
2157 /* XXX: slow ! */
bdc44640 2158 CPU_FOREACH(cpu) {
33bde2e1
EI
2159 /* FIXME: Disentangle the cpu.h circular files deps so we can
2160 directly get the right CPU from listener. */
2161 if (cpu->tcg_as_listener != listener) {
2162 continue;
2163 }
76e5c76f 2164 cpu_reload_memory_map(cpu);
117712c3 2165 }
50c1e149
AK
2166}
2167
93632747
AK
2168static void core_log_global_start(MemoryListener *listener)
2169{
981fdf23 2170 cpu_physical_memory_set_dirty_tracking(true);
93632747
AK
2171}
2172
2173static void core_log_global_stop(MemoryListener *listener)
2174{
981fdf23 2175 cpu_physical_memory_set_dirty_tracking(false);
93632747
AK
2176}
2177
93632747 2178static MemoryListener core_memory_listener = {
93632747
AK
2179 .log_global_start = core_log_global_start,
2180 .log_global_stop = core_log_global_stop,
ac1970fb 2181 .priority = 1,
93632747
AK
2182};
2183
ac1970fb
AK
2184void address_space_init_dispatch(AddressSpace *as)
2185{
00752703 2186 as->dispatch = NULL;
89ae337a 2187 as->dispatch_listener = (MemoryListener) {
ac1970fb 2188 .begin = mem_begin,
00752703 2189 .commit = mem_commit,
ac1970fb
AK
2190 .region_add = mem_add,
2191 .region_nop = mem_add,
2192 .priority = 0,
2193 };
89ae337a 2194 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
2195}
2196
6e48e8f9
PB
2197void address_space_unregister(AddressSpace *as)
2198{
2199 memory_listener_unregister(&as->dispatch_listener);
2200}
2201
83f3c251
AK
2202void address_space_destroy_dispatch(AddressSpace *as)
2203{
2204 AddressSpaceDispatch *d = as->dispatch;
2205
79e2b9ae
PB
2206 atomic_rcu_set(&as->dispatch, NULL);
2207 if (d) {
2208 call_rcu(d, address_space_dispatch_free, rcu);
2209 }
83f3c251
AK
2210}
2211
62152b8a
AK
2212static void memory_map_init(void)
2213{
7267c094 2214 system_memory = g_malloc(sizeof(*system_memory));
03f49957 2215
57271d63 2216 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 2217 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 2218
7267c094 2219 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
2220 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2221 65536);
7dca8043 2222 address_space_init(&address_space_io, system_io, "I/O");
93632747 2223
f6790af6 2224 memory_listener_register(&core_memory_listener, &address_space_memory);
62152b8a
AK
2225}
2226
2227MemoryRegion *get_system_memory(void)
2228{
2229 return system_memory;
2230}
2231
309cb471
AK
2232MemoryRegion *get_system_io(void)
2233{
2234 return system_io;
2235}
2236
e2eef170
PB
2237#endif /* !defined(CONFIG_USER_ONLY) */
2238
13eb76e0
FB
2239/* physical memory access (slow version, mainly for debug) */
2240#if defined(CONFIG_USER_ONLY)
f17ec444 2241int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 2242 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2243{
2244 int l, flags;
2245 target_ulong page;
53a5960a 2246 void * p;
13eb76e0
FB
2247
2248 while (len > 0) {
2249 page = addr & TARGET_PAGE_MASK;
2250 l = (page + TARGET_PAGE_SIZE) - addr;
2251 if (l > len)
2252 l = len;
2253 flags = page_get_flags(page);
2254 if (!(flags & PAGE_VALID))
a68fe89c 2255 return -1;
13eb76e0
FB
2256 if (is_write) {
2257 if (!(flags & PAGE_WRITE))
a68fe89c 2258 return -1;
579a97f7 2259 /* XXX: this code should not depend on lock_user */
72fb7daa 2260 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 2261 return -1;
72fb7daa
AJ
2262 memcpy(p, buf, l);
2263 unlock_user(p, addr, l);
13eb76e0
FB
2264 } else {
2265 if (!(flags & PAGE_READ))
a68fe89c 2266 return -1;
579a97f7 2267 /* XXX: this code should not depend on lock_user */
72fb7daa 2268 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 2269 return -1;
72fb7daa 2270 memcpy(buf, p, l);
5b257578 2271 unlock_user(p, addr, 0);
13eb76e0
FB
2272 }
2273 len -= l;
2274 buf += l;
2275 addr += l;
2276 }
a68fe89c 2277 return 0;
13eb76e0 2278}
8df1cd07 2279
13eb76e0 2280#else
51d7a9eb 2281
a8170e5e
AK
2282static void invalidate_and_set_dirty(hwaddr addr,
2283 hwaddr length)
51d7a9eb 2284{
f874bf90
PM
2285 if (cpu_physical_memory_range_includes_clean(addr, length)) {
2286 tb_invalidate_phys_range(addr, addr + length, 0);
6886867e 2287 cpu_physical_memory_set_dirty_range_nocode(addr, length);
51d7a9eb 2288 }
e226939d 2289 xen_modified_memory(addr, length);
51d7a9eb
AP
2290}
2291
23326164 2292static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2293{
e1622f4b 2294 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2295
2296 /* Regions are assumed to support 1-4 byte accesses unless
2297 otherwise specified. */
23326164
RH
2298 if (access_size_max == 0) {
2299 access_size_max = 4;
2300 }
2301
2302 /* Bound the maximum access by the alignment of the address. */
2303 if (!mr->ops->impl.unaligned) {
2304 unsigned align_size_max = addr & -addr;
2305 if (align_size_max != 0 && align_size_max < access_size_max) {
2306 access_size_max = align_size_max;
2307 }
82f2563f 2308 }
23326164
RH
2309
2310 /* Don't attempt accesses larger than the maximum. */
2311 if (l > access_size_max) {
2312 l = access_size_max;
82f2563f 2313 }
098178f2
PB
2314 if (l & (l - 1)) {
2315 l = 1 << (qemu_fls(l) - 1);
2316 }
23326164
RH
2317
2318 return l;
82f2563f
PB
2319}
2320
5c9eb028
PM
2321MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2322 uint8_t *buf, int len, bool is_write)
13eb76e0 2323{
149f54b5 2324 hwaddr l;
13eb76e0 2325 uint8_t *ptr;
791af8c8 2326 uint64_t val;
149f54b5 2327 hwaddr addr1;
5c8a00ce 2328 MemoryRegion *mr;
3b643495 2329 MemTxResult result = MEMTX_OK;
3b46e624 2330
41063e1e 2331 rcu_read_lock();
13eb76e0 2332 while (len > 0) {
149f54b5 2333 l = len;
5c8a00ce 2334 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2335
13eb76e0 2336 if (is_write) {
5c8a00ce
PB
2337 if (!memory_access_is_direct(mr, is_write)) {
2338 l = memory_access_size(mr, l, addr1);
4917cf44 2339 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2340 potential bugs */
23326164
RH
2341 switch (l) {
2342 case 8:
2343 /* 64 bit write access */
2344 val = ldq_p(buf);
3b643495
PM
2345 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2346 attrs);
23326164
RH
2347 break;
2348 case 4:
1c213d19 2349 /* 32 bit write access */
c27004ec 2350 val = ldl_p(buf);
3b643495
PM
2351 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2352 attrs);
23326164
RH
2353 break;
2354 case 2:
1c213d19 2355 /* 16 bit write access */
c27004ec 2356 val = lduw_p(buf);
3b643495
PM
2357 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2358 attrs);
23326164
RH
2359 break;
2360 case 1:
1c213d19 2361 /* 8 bit write access */
c27004ec 2362 val = ldub_p(buf);
3b643495
PM
2363 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2364 attrs);
23326164
RH
2365 break;
2366 default:
2367 abort();
13eb76e0 2368 }
2bbfa05d 2369 } else {
5c8a00ce 2370 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2371 /* RAM case */
5579c7f3 2372 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2373 memcpy(ptr, buf, l);
51d7a9eb 2374 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
2375 }
2376 } else {
5c8a00ce 2377 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2378 /* I/O case */
5c8a00ce 2379 l = memory_access_size(mr, l, addr1);
23326164
RH
2380 switch (l) {
2381 case 8:
2382 /* 64 bit read access */
3b643495
PM
2383 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2384 attrs);
23326164
RH
2385 stq_p(buf, val);
2386 break;
2387 case 4:
13eb76e0 2388 /* 32 bit read access */
3b643495
PM
2389 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2390 attrs);
c27004ec 2391 stl_p(buf, val);
23326164
RH
2392 break;
2393 case 2:
13eb76e0 2394 /* 16 bit read access */
3b643495
PM
2395 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2396 attrs);
c27004ec 2397 stw_p(buf, val);
23326164
RH
2398 break;
2399 case 1:
1c213d19 2400 /* 8 bit read access */
3b643495
PM
2401 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2402 attrs);
c27004ec 2403 stb_p(buf, val);
23326164
RH
2404 break;
2405 default:
2406 abort();
13eb76e0
FB
2407 }
2408 } else {
2409 /* RAM case */
5c8a00ce 2410 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2411 memcpy(buf, ptr, l);
13eb76e0
FB
2412 }
2413 }
2414 len -= l;
2415 buf += l;
2416 addr += l;
2417 }
41063e1e 2418 rcu_read_unlock();
fd8aaa76 2419
3b643495 2420 return result;
13eb76e0 2421}
8df1cd07 2422
5c9eb028
PM
2423MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2424 const uint8_t *buf, int len)
ac1970fb 2425{
5c9eb028 2426 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
ac1970fb
AK
2427}
2428
5c9eb028
PM
2429MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2430 uint8_t *buf, int len)
ac1970fb 2431{
5c9eb028 2432 return address_space_rw(as, addr, attrs, buf, len, false);
ac1970fb
AK
2433}
2434
2435
a8170e5e 2436void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2437 int len, int is_write)
2438{
5c9eb028
PM
2439 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2440 buf, len, is_write);
ac1970fb
AK
2441}
2442
582b55a9
AG
2443enum write_rom_type {
2444 WRITE_DATA,
2445 FLUSH_CACHE,
2446};
2447
2a221651 2448static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2449 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2450{
149f54b5 2451 hwaddr l;
d0ecd2aa 2452 uint8_t *ptr;
149f54b5 2453 hwaddr addr1;
5c8a00ce 2454 MemoryRegion *mr;
3b46e624 2455
41063e1e 2456 rcu_read_lock();
d0ecd2aa 2457 while (len > 0) {
149f54b5 2458 l = len;
2a221651 2459 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2460
5c8a00ce
PB
2461 if (!(memory_region_is_ram(mr) ||
2462 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2463 /* do nothing */
2464 } else {
5c8a00ce 2465 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2466 /* ROM/RAM case */
5579c7f3 2467 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2468 switch (type) {
2469 case WRITE_DATA:
2470 memcpy(ptr, buf, l);
2471 invalidate_and_set_dirty(addr1, l);
2472 break;
2473 case FLUSH_CACHE:
2474 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2475 break;
2476 }
d0ecd2aa
FB
2477 }
2478 len -= l;
2479 buf += l;
2480 addr += l;
2481 }
41063e1e 2482 rcu_read_unlock();
d0ecd2aa
FB
2483}
2484
582b55a9 2485/* used for ROM loading : can write in RAM and ROM */
2a221651 2486void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2487 const uint8_t *buf, int len)
2488{
2a221651 2489 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2490}
2491
2492void cpu_flush_icache_range(hwaddr start, int len)
2493{
2494 /*
2495 * This function should do the same thing as an icache flush that was
2496 * triggered from within the guest. For TCG we are always cache coherent,
2497 * so there is no need to flush anything. For KVM / Xen we need to flush
2498 * the host's instruction cache at least.
2499 */
2500 if (tcg_enabled()) {
2501 return;
2502 }
2503
2a221651
EI
2504 cpu_physical_memory_write_rom_internal(&address_space_memory,
2505 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2506}
2507
6d16c2f8 2508typedef struct {
d3e71559 2509 MemoryRegion *mr;
6d16c2f8 2510 void *buffer;
a8170e5e
AK
2511 hwaddr addr;
2512 hwaddr len;
c2cba0ff 2513 bool in_use;
6d16c2f8
AL
2514} BounceBuffer;
2515
2516static BounceBuffer bounce;
2517
ba223c29 2518typedef struct MapClient {
e95205e1 2519 QEMUBH *bh;
72cf2d4f 2520 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2521} MapClient;
2522
38e047b5 2523QemuMutex map_client_list_lock;
72cf2d4f
BS
2524static QLIST_HEAD(map_client_list, MapClient) map_client_list
2525 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29 2526
e95205e1
FZ
2527static void cpu_unregister_map_client_do(MapClient *client)
2528{
2529 QLIST_REMOVE(client, link);
2530 g_free(client);
2531}
2532
33b6c2ed
FZ
2533static void cpu_notify_map_clients_locked(void)
2534{
2535 MapClient *client;
2536
2537 while (!QLIST_EMPTY(&map_client_list)) {
2538 client = QLIST_FIRST(&map_client_list);
e95205e1
FZ
2539 qemu_bh_schedule(client->bh);
2540 cpu_unregister_map_client_do(client);
33b6c2ed
FZ
2541 }
2542}
2543
e95205e1 2544void cpu_register_map_client(QEMUBH *bh)
ba223c29 2545{
7267c094 2546 MapClient *client = g_malloc(sizeof(*client));
ba223c29 2547
38e047b5 2548 qemu_mutex_lock(&map_client_list_lock);
e95205e1 2549 client->bh = bh;
72cf2d4f 2550 QLIST_INSERT_HEAD(&map_client_list, client, link);
33b6c2ed
FZ
2551 if (!atomic_read(&bounce.in_use)) {
2552 cpu_notify_map_clients_locked();
2553 }
38e047b5 2554 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2555}
2556
38e047b5 2557void cpu_exec_init_all(void)
ba223c29 2558{
38e047b5
FZ
2559 qemu_mutex_init(&ram_list.mutex);
2560 memory_map_init();
2561 io_mem_init();
2562 qemu_mutex_init(&map_client_list_lock);
ba223c29
AL
2563}
2564
e95205e1 2565void cpu_unregister_map_client(QEMUBH *bh)
ba223c29
AL
2566{
2567 MapClient *client;
2568
e95205e1
FZ
2569 qemu_mutex_lock(&map_client_list_lock);
2570 QLIST_FOREACH(client, &map_client_list, link) {
2571 if (client->bh == bh) {
2572 cpu_unregister_map_client_do(client);
2573 break;
2574 }
ba223c29 2575 }
e95205e1 2576 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2577}
2578
2579static void cpu_notify_map_clients(void)
2580{
38e047b5 2581 qemu_mutex_lock(&map_client_list_lock);
33b6c2ed 2582 cpu_notify_map_clients_locked();
38e047b5 2583 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2584}
2585
51644ab7
PB
2586bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2587{
5c8a00ce 2588 MemoryRegion *mr;
51644ab7
PB
2589 hwaddr l, xlat;
2590
41063e1e 2591 rcu_read_lock();
51644ab7
PB
2592 while (len > 0) {
2593 l = len;
5c8a00ce
PB
2594 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2595 if (!memory_access_is_direct(mr, is_write)) {
2596 l = memory_access_size(mr, l, addr);
2597 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2598 return false;
2599 }
2600 }
2601
2602 len -= l;
2603 addr += l;
2604 }
41063e1e 2605 rcu_read_unlock();
51644ab7
PB
2606 return true;
2607}
2608
6d16c2f8
AL
2609/* Map a physical memory region into a host virtual address.
2610 * May map a subset of the requested range, given by and returned in *plen.
2611 * May return NULL if resources needed to perform the mapping are exhausted.
2612 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2613 * Use cpu_register_map_client() to know when retrying the map operation is
2614 * likely to succeed.
6d16c2f8 2615 */
ac1970fb 2616void *address_space_map(AddressSpace *as,
a8170e5e
AK
2617 hwaddr addr,
2618 hwaddr *plen,
ac1970fb 2619 bool is_write)
6d16c2f8 2620{
a8170e5e 2621 hwaddr len = *plen;
e3127ae0
PB
2622 hwaddr done = 0;
2623 hwaddr l, xlat, base;
2624 MemoryRegion *mr, *this_mr;
2625 ram_addr_t raddr;
6d16c2f8 2626
e3127ae0
PB
2627 if (len == 0) {
2628 return NULL;
2629 }
38bee5dc 2630
e3127ae0 2631 l = len;
41063e1e 2632 rcu_read_lock();
e3127ae0 2633 mr = address_space_translate(as, addr, &xlat, &l, is_write);
41063e1e 2634
e3127ae0 2635 if (!memory_access_is_direct(mr, is_write)) {
c2cba0ff 2636 if (atomic_xchg(&bounce.in_use, true)) {
41063e1e 2637 rcu_read_unlock();
e3127ae0 2638 return NULL;
6d16c2f8 2639 }
e85d9db5
KW
2640 /* Avoid unbounded allocations */
2641 l = MIN(l, TARGET_PAGE_SIZE);
2642 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2643 bounce.addr = addr;
2644 bounce.len = l;
d3e71559
PB
2645
2646 memory_region_ref(mr);
2647 bounce.mr = mr;
e3127ae0 2648 if (!is_write) {
5c9eb028
PM
2649 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2650 bounce.buffer, l);
8ab934f9 2651 }
6d16c2f8 2652
41063e1e 2653 rcu_read_unlock();
e3127ae0
PB
2654 *plen = l;
2655 return bounce.buffer;
2656 }
2657
2658 base = xlat;
2659 raddr = memory_region_get_ram_addr(mr);
2660
2661 for (;;) {
6d16c2f8
AL
2662 len -= l;
2663 addr += l;
e3127ae0
PB
2664 done += l;
2665 if (len == 0) {
2666 break;
2667 }
2668
2669 l = len;
2670 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2671 if (this_mr != mr || xlat != base + done) {
2672 break;
2673 }
6d16c2f8 2674 }
e3127ae0 2675
d3e71559 2676 memory_region_ref(mr);
41063e1e 2677 rcu_read_unlock();
e3127ae0
PB
2678 *plen = done;
2679 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2680}
2681
ac1970fb 2682/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2683 * Will also mark the memory as dirty if is_write == 1. access_len gives
2684 * the amount of memory that was actually read or written by the caller.
2685 */
a8170e5e
AK
2686void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2687 int is_write, hwaddr access_len)
6d16c2f8
AL
2688{
2689 if (buffer != bounce.buffer) {
d3e71559
PB
2690 MemoryRegion *mr;
2691 ram_addr_t addr1;
2692
2693 mr = qemu_ram_addr_from_host(buffer, &addr1);
2694 assert(mr != NULL);
6d16c2f8 2695 if (is_write) {
6886867e 2696 invalidate_and_set_dirty(addr1, access_len);
6d16c2f8 2697 }
868bb33f 2698 if (xen_enabled()) {
e41d7c69 2699 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2700 }
d3e71559 2701 memory_region_unref(mr);
6d16c2f8
AL
2702 return;
2703 }
2704 if (is_write) {
5c9eb028
PM
2705 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2706 bounce.buffer, access_len);
6d16c2f8 2707 }
f8a83245 2708 qemu_vfree(bounce.buffer);
6d16c2f8 2709 bounce.buffer = NULL;
d3e71559 2710 memory_region_unref(bounce.mr);
c2cba0ff 2711 atomic_mb_set(&bounce.in_use, false);
ba223c29 2712 cpu_notify_map_clients();
6d16c2f8 2713}
d0ecd2aa 2714
a8170e5e
AK
2715void *cpu_physical_memory_map(hwaddr addr,
2716 hwaddr *plen,
ac1970fb
AK
2717 int is_write)
2718{
2719 return address_space_map(&address_space_memory, addr, plen, is_write);
2720}
2721
a8170e5e
AK
2722void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2723 int is_write, hwaddr access_len)
ac1970fb
AK
2724{
2725 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2726}
2727
8df1cd07 2728/* warning: addr must be aligned */
50013115
PM
2729static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2730 MemTxAttrs attrs,
2731 MemTxResult *result,
2732 enum device_endian endian)
8df1cd07 2733{
8df1cd07 2734 uint8_t *ptr;
791af8c8 2735 uint64_t val;
5c8a00ce 2736 MemoryRegion *mr;
149f54b5
PB
2737 hwaddr l = 4;
2738 hwaddr addr1;
50013115 2739 MemTxResult r;
8df1cd07 2740
41063e1e 2741 rcu_read_lock();
fdfba1a2 2742 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2743 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2744 /* I/O case */
50013115 2745 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
1e78bcc1
AG
2746#if defined(TARGET_WORDS_BIGENDIAN)
2747 if (endian == DEVICE_LITTLE_ENDIAN) {
2748 val = bswap32(val);
2749 }
2750#else
2751 if (endian == DEVICE_BIG_ENDIAN) {
2752 val = bswap32(val);
2753 }
2754#endif
8df1cd07
FB
2755 } else {
2756 /* RAM case */
5c8a00ce 2757 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2758 & TARGET_PAGE_MASK)
149f54b5 2759 + addr1);
1e78bcc1
AG
2760 switch (endian) {
2761 case DEVICE_LITTLE_ENDIAN:
2762 val = ldl_le_p(ptr);
2763 break;
2764 case DEVICE_BIG_ENDIAN:
2765 val = ldl_be_p(ptr);
2766 break;
2767 default:
2768 val = ldl_p(ptr);
2769 break;
2770 }
50013115
PM
2771 r = MEMTX_OK;
2772 }
2773 if (result) {
2774 *result = r;
8df1cd07 2775 }
41063e1e 2776 rcu_read_unlock();
8df1cd07
FB
2777 return val;
2778}
2779
50013115
PM
2780uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2781 MemTxAttrs attrs, MemTxResult *result)
2782{
2783 return address_space_ldl_internal(as, addr, attrs, result,
2784 DEVICE_NATIVE_ENDIAN);
2785}
2786
2787uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2788 MemTxAttrs attrs, MemTxResult *result)
2789{
2790 return address_space_ldl_internal(as, addr, attrs, result,
2791 DEVICE_LITTLE_ENDIAN);
2792}
2793
2794uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2795 MemTxAttrs attrs, MemTxResult *result)
2796{
2797 return address_space_ldl_internal(as, addr, attrs, result,
2798 DEVICE_BIG_ENDIAN);
2799}
2800
fdfba1a2 2801uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2802{
50013115 2803 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2804}
2805
fdfba1a2 2806uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2807{
50013115 2808 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2809}
2810
fdfba1a2 2811uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2812{
50013115 2813 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2814}
2815
84b7b8e7 2816/* warning: addr must be aligned */
50013115
PM
2817static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2818 MemTxAttrs attrs,
2819 MemTxResult *result,
2820 enum device_endian endian)
84b7b8e7 2821{
84b7b8e7
FB
2822 uint8_t *ptr;
2823 uint64_t val;
5c8a00ce 2824 MemoryRegion *mr;
149f54b5
PB
2825 hwaddr l = 8;
2826 hwaddr addr1;
50013115 2827 MemTxResult r;
84b7b8e7 2828
41063e1e 2829 rcu_read_lock();
2c17449b 2830 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2831 false);
2832 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2833 /* I/O case */
50013115 2834 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
968a5627
PB
2835#if defined(TARGET_WORDS_BIGENDIAN)
2836 if (endian == DEVICE_LITTLE_ENDIAN) {
2837 val = bswap64(val);
2838 }
2839#else
2840 if (endian == DEVICE_BIG_ENDIAN) {
2841 val = bswap64(val);
2842 }
84b7b8e7
FB
2843#endif
2844 } else {
2845 /* RAM case */
5c8a00ce 2846 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2847 & TARGET_PAGE_MASK)
149f54b5 2848 + addr1);
1e78bcc1
AG
2849 switch (endian) {
2850 case DEVICE_LITTLE_ENDIAN:
2851 val = ldq_le_p(ptr);
2852 break;
2853 case DEVICE_BIG_ENDIAN:
2854 val = ldq_be_p(ptr);
2855 break;
2856 default:
2857 val = ldq_p(ptr);
2858 break;
2859 }
50013115
PM
2860 r = MEMTX_OK;
2861 }
2862 if (result) {
2863 *result = r;
84b7b8e7 2864 }
41063e1e 2865 rcu_read_unlock();
84b7b8e7
FB
2866 return val;
2867}
2868
50013115
PM
2869uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2870 MemTxAttrs attrs, MemTxResult *result)
2871{
2872 return address_space_ldq_internal(as, addr, attrs, result,
2873 DEVICE_NATIVE_ENDIAN);
2874}
2875
2876uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
2877 MemTxAttrs attrs, MemTxResult *result)
2878{
2879 return address_space_ldq_internal(as, addr, attrs, result,
2880 DEVICE_LITTLE_ENDIAN);
2881}
2882
2883uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
2884 MemTxAttrs attrs, MemTxResult *result)
2885{
2886 return address_space_ldq_internal(as, addr, attrs, result,
2887 DEVICE_BIG_ENDIAN);
2888}
2889
2c17449b 2890uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2891{
50013115 2892 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2893}
2894
2c17449b 2895uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2896{
50013115 2897 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2898}
2899
2c17449b 2900uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2901{
50013115 2902 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2903}
2904
aab33094 2905/* XXX: optimize */
50013115
PM
2906uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
2907 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
2908{
2909 uint8_t val;
50013115
PM
2910 MemTxResult r;
2911
2912 r = address_space_rw(as, addr, attrs, &val, 1, 0);
2913 if (result) {
2914 *result = r;
2915 }
aab33094
FB
2916 return val;
2917}
2918
50013115
PM
2919uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2920{
2921 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
2922}
2923
733f0b02 2924/* warning: addr must be aligned */
50013115
PM
2925static inline uint32_t address_space_lduw_internal(AddressSpace *as,
2926 hwaddr addr,
2927 MemTxAttrs attrs,
2928 MemTxResult *result,
2929 enum device_endian endian)
aab33094 2930{
733f0b02
MT
2931 uint8_t *ptr;
2932 uint64_t val;
5c8a00ce 2933 MemoryRegion *mr;
149f54b5
PB
2934 hwaddr l = 2;
2935 hwaddr addr1;
50013115 2936 MemTxResult r;
733f0b02 2937
41063e1e 2938 rcu_read_lock();
41701aa4 2939 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2940 false);
2941 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2942 /* I/O case */
50013115 2943 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
1e78bcc1
AG
2944#if defined(TARGET_WORDS_BIGENDIAN)
2945 if (endian == DEVICE_LITTLE_ENDIAN) {
2946 val = bswap16(val);
2947 }
2948#else
2949 if (endian == DEVICE_BIG_ENDIAN) {
2950 val = bswap16(val);
2951 }
2952#endif
733f0b02
MT
2953 } else {
2954 /* RAM case */
5c8a00ce 2955 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2956 & TARGET_PAGE_MASK)
149f54b5 2957 + addr1);
1e78bcc1
AG
2958 switch (endian) {
2959 case DEVICE_LITTLE_ENDIAN:
2960 val = lduw_le_p(ptr);
2961 break;
2962 case DEVICE_BIG_ENDIAN:
2963 val = lduw_be_p(ptr);
2964 break;
2965 default:
2966 val = lduw_p(ptr);
2967 break;
2968 }
50013115
PM
2969 r = MEMTX_OK;
2970 }
2971 if (result) {
2972 *result = r;
733f0b02 2973 }
41063e1e 2974 rcu_read_unlock();
733f0b02 2975 return val;
aab33094
FB
2976}
2977
50013115
PM
2978uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
2979 MemTxAttrs attrs, MemTxResult *result)
2980{
2981 return address_space_lduw_internal(as, addr, attrs, result,
2982 DEVICE_NATIVE_ENDIAN);
2983}
2984
2985uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
2986 MemTxAttrs attrs, MemTxResult *result)
2987{
2988 return address_space_lduw_internal(as, addr, attrs, result,
2989 DEVICE_LITTLE_ENDIAN);
2990}
2991
2992uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
2993 MemTxAttrs attrs, MemTxResult *result)
2994{
2995 return address_space_lduw_internal(as, addr, attrs, result,
2996 DEVICE_BIG_ENDIAN);
2997}
2998
41701aa4 2999uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3000{
50013115 3001 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3002}
3003
41701aa4 3004uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3005{
50013115 3006 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3007}
3008
41701aa4 3009uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3010{
50013115 3011 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3012}
3013
8df1cd07
FB
3014/* warning: addr must be aligned. The ram page is not masked as dirty
3015 and the code inside is not invalidated. It is useful if the dirty
3016 bits are used to track modified PTEs */
50013115
PM
3017void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3018 MemTxAttrs attrs, MemTxResult *result)
8df1cd07 3019{
8df1cd07 3020 uint8_t *ptr;
5c8a00ce 3021 MemoryRegion *mr;
149f54b5
PB
3022 hwaddr l = 4;
3023 hwaddr addr1;
50013115 3024 MemTxResult r;
8df1cd07 3025
41063e1e 3026 rcu_read_lock();
2198a121 3027 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3028 true);
3029 if (l < 4 || !memory_access_is_direct(mr, true)) {
50013115 3030 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3031 } else {
5c8a00ce 3032 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 3033 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3034 stl_p(ptr, val);
74576198
AL
3035
3036 if (unlikely(in_migration)) {
a2cd8c85 3037 if (cpu_physical_memory_is_clean(addr1)) {
74576198
AL
3038 /* invalidate code */
3039 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3040 /* set dirty bit */
6886867e 3041 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
74576198
AL
3042 }
3043 }
50013115
PM
3044 r = MEMTX_OK;
3045 }
3046 if (result) {
3047 *result = r;
8df1cd07 3048 }
41063e1e 3049 rcu_read_unlock();
8df1cd07
FB
3050}
3051
50013115
PM
3052void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3053{
3054 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3055}
3056
8df1cd07 3057/* warning: addr must be aligned */
50013115
PM
3058static inline void address_space_stl_internal(AddressSpace *as,
3059 hwaddr addr, uint32_t val,
3060 MemTxAttrs attrs,
3061 MemTxResult *result,
3062 enum device_endian endian)
8df1cd07 3063{
8df1cd07 3064 uint8_t *ptr;
5c8a00ce 3065 MemoryRegion *mr;
149f54b5
PB
3066 hwaddr l = 4;
3067 hwaddr addr1;
50013115 3068 MemTxResult r;
8df1cd07 3069
41063e1e 3070 rcu_read_lock();
ab1da857 3071 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3072 true);
3073 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
3074#if defined(TARGET_WORDS_BIGENDIAN)
3075 if (endian == DEVICE_LITTLE_ENDIAN) {
3076 val = bswap32(val);
3077 }
3078#else
3079 if (endian == DEVICE_BIG_ENDIAN) {
3080 val = bswap32(val);
3081 }
3082#endif
50013115 3083 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3084 } else {
8df1cd07 3085 /* RAM case */
5c8a00ce 3086 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 3087 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3088 switch (endian) {
3089 case DEVICE_LITTLE_ENDIAN:
3090 stl_le_p(ptr, val);
3091 break;
3092 case DEVICE_BIG_ENDIAN:
3093 stl_be_p(ptr, val);
3094 break;
3095 default:
3096 stl_p(ptr, val);
3097 break;
3098 }
51d7a9eb 3099 invalidate_and_set_dirty(addr1, 4);
50013115
PM
3100 r = MEMTX_OK;
3101 }
3102 if (result) {
3103 *result = r;
8df1cd07 3104 }
41063e1e 3105 rcu_read_unlock();
8df1cd07
FB
3106}
3107
50013115
PM
3108void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3109 MemTxAttrs attrs, MemTxResult *result)
3110{
3111 address_space_stl_internal(as, addr, val, attrs, result,
3112 DEVICE_NATIVE_ENDIAN);
3113}
3114
3115void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3116 MemTxAttrs attrs, MemTxResult *result)
3117{
3118 address_space_stl_internal(as, addr, val, attrs, result,
3119 DEVICE_LITTLE_ENDIAN);
3120}
3121
3122void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3123 MemTxAttrs attrs, MemTxResult *result)
3124{
3125 address_space_stl_internal(as, addr, val, attrs, result,
3126 DEVICE_BIG_ENDIAN);
3127}
3128
ab1da857 3129void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3130{
50013115 3131 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3132}
3133
ab1da857 3134void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3135{
50013115 3136 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3137}
3138
ab1da857 3139void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3140{
50013115 3141 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3142}
3143
aab33094 3144/* XXX: optimize */
50013115
PM
3145void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3146 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
3147{
3148 uint8_t v = val;
50013115
PM
3149 MemTxResult r;
3150
3151 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3152 if (result) {
3153 *result = r;
3154 }
3155}
3156
3157void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3158{
3159 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
aab33094
FB
3160}
3161
733f0b02 3162/* warning: addr must be aligned */
50013115
PM
3163static inline void address_space_stw_internal(AddressSpace *as,
3164 hwaddr addr, uint32_t val,
3165 MemTxAttrs attrs,
3166 MemTxResult *result,
3167 enum device_endian endian)
aab33094 3168{
733f0b02 3169 uint8_t *ptr;
5c8a00ce 3170 MemoryRegion *mr;
149f54b5
PB
3171 hwaddr l = 2;
3172 hwaddr addr1;
50013115 3173 MemTxResult r;
733f0b02 3174
41063e1e 3175 rcu_read_lock();
5ce5944d 3176 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 3177 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
3178#if defined(TARGET_WORDS_BIGENDIAN)
3179 if (endian == DEVICE_LITTLE_ENDIAN) {
3180 val = bswap16(val);
3181 }
3182#else
3183 if (endian == DEVICE_BIG_ENDIAN) {
3184 val = bswap16(val);
3185 }
3186#endif
50013115 3187 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
733f0b02 3188 } else {
733f0b02 3189 /* RAM case */
5c8a00ce 3190 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 3191 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3192 switch (endian) {
3193 case DEVICE_LITTLE_ENDIAN:
3194 stw_le_p(ptr, val);
3195 break;
3196 case DEVICE_BIG_ENDIAN:
3197 stw_be_p(ptr, val);
3198 break;
3199 default:
3200 stw_p(ptr, val);
3201 break;
3202 }
51d7a9eb 3203 invalidate_and_set_dirty(addr1, 2);
50013115
PM
3204 r = MEMTX_OK;
3205 }
3206 if (result) {
3207 *result = r;
733f0b02 3208 }
41063e1e 3209 rcu_read_unlock();
aab33094
FB
3210}
3211
50013115
PM
3212void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3213 MemTxAttrs attrs, MemTxResult *result)
3214{
3215 address_space_stw_internal(as, addr, val, attrs, result,
3216 DEVICE_NATIVE_ENDIAN);
3217}
3218
3219void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3220 MemTxAttrs attrs, MemTxResult *result)
3221{
3222 address_space_stw_internal(as, addr, val, attrs, result,
3223 DEVICE_LITTLE_ENDIAN);
3224}
3225
3226void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3227 MemTxAttrs attrs, MemTxResult *result)
3228{
3229 address_space_stw_internal(as, addr, val, attrs, result,
3230 DEVICE_BIG_ENDIAN);
3231}
3232
5ce5944d 3233void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3234{
50013115 3235 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3236}
3237
5ce5944d 3238void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3239{
50013115 3240 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3241}
3242
5ce5944d 3243void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3244{
50013115 3245 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3246}
3247
aab33094 3248/* XXX: optimize */
50013115
PM
3249void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3250 MemTxAttrs attrs, MemTxResult *result)
aab33094 3251{
50013115 3252 MemTxResult r;
aab33094 3253 val = tswap64(val);
50013115
PM
3254 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3255 if (result) {
3256 *result = r;
3257 }
aab33094
FB
3258}
3259
50013115
PM
3260void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3261 MemTxAttrs attrs, MemTxResult *result)
1e78bcc1 3262{
50013115 3263 MemTxResult r;
1e78bcc1 3264 val = cpu_to_le64(val);
50013115
PM
3265 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3266 if (result) {
3267 *result = r;
3268 }
3269}
3270void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3271 MemTxAttrs attrs, MemTxResult *result)
3272{
3273 MemTxResult r;
3274 val = cpu_to_be64(val);
3275 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3276 if (result) {
3277 *result = r;
3278 }
3279}
3280
3281void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3282{
3283 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3284}
3285
3286void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3287{
3288 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3289}
3290
f606604f 3291void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1 3292{
50013115 3293 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3294}
3295
5e2972fd 3296/* virtual memory access for debug (includes writing to ROM) */
f17ec444 3297int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 3298 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3299{
3300 int l;
a8170e5e 3301 hwaddr phys_addr;
9b3c35e0 3302 target_ulong page;
13eb76e0
FB
3303
3304 while (len > 0) {
3305 page = addr & TARGET_PAGE_MASK;
f17ec444 3306 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
3307 /* if no physical page mapped, return an error */
3308 if (phys_addr == -1)
3309 return -1;
3310 l = (page + TARGET_PAGE_SIZE) - addr;
3311 if (l > len)
3312 l = len;
5e2972fd 3313 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
3314 if (is_write) {
3315 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3316 } else {
5c9eb028
PM
3317 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3318 buf, l, 0);
2e38847b 3319 }
13eb76e0
FB
3320 len -= l;
3321 buf += l;
3322 addr += l;
3323 }
3324 return 0;
3325}
a68fe89c 3326#endif
13eb76e0 3327
8e4a424b
BS
3328/*
3329 * A helper function for the _utterly broken_ virtio device model to find out if
3330 * it's running on a big endian machine. Don't do this at home kids!
3331 */
98ed8ecf
GK
3332bool target_words_bigendian(void);
3333bool target_words_bigendian(void)
8e4a424b
BS
3334{
3335#if defined(TARGET_WORDS_BIGENDIAN)
3336 return true;
3337#else
3338 return false;
3339#endif
3340}
3341
76f35538 3342#ifndef CONFIG_USER_ONLY
a8170e5e 3343bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 3344{
5c8a00ce 3345 MemoryRegion*mr;
149f54b5 3346 hwaddr l = 1;
41063e1e 3347 bool res;
76f35538 3348
41063e1e 3349 rcu_read_lock();
5c8a00ce
PB
3350 mr = address_space_translate(&address_space_memory,
3351 phys_addr, &phys_addr, &l, false);
76f35538 3352
41063e1e
PB
3353 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3354 rcu_read_unlock();
3355 return res;
76f35538 3356}
bd2fa51f
MH
3357
3358void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3359{
3360 RAMBlock *block;
3361
0dc3f44a
MD
3362 rcu_read_lock();
3363 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5 3364 func(block->host, block->offset, block->used_length, opaque);
bd2fa51f 3365 }
0dc3f44a 3366 rcu_read_unlock();
bd2fa51f 3367}
ec3f8c99 3368#endif