]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
arch_init: Clean up the duplicate variable 'len' defining in ram_load()
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
4485bd26 29#if !defined(CONFIG_USER_ONLY)
47c8ca53 30#include "hw/boards.h"
4485bd26 31#endif
cc9e98cb 32#include "hw/qdev.h"
1de7afc9 33#include "qemu/osdep.h"
9c17d615 34#include "sysemu/kvm.h"
2ff3de68 35#include "sysemu/sysemu.h"
0d09e41a 36#include "hw/xen/xen.h"
1de7afc9
PB
37#include "qemu/timer.h"
38#include "qemu/config-file.h"
75a34036 39#include "qemu/error-report.h"
022c62cb 40#include "exec/memory.h"
9c17d615 41#include "sysemu/dma.h"
022c62cb 42#include "exec/address-spaces.h"
53a5960a
PB
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
432d268c 45#else /* !CONFIG_USER_ONLY */
9c17d615 46#include "sysemu/xen-mapcache.h"
6506e4f9 47#include "trace.h"
53a5960a 48#endif
0d6d3c87 49#include "exec/cpu-all.h"
0dc3f44a 50#include "qemu/rcu_queue.h"
022c62cb 51#include "exec/cputlb.h"
5b6dd868 52#include "translate-all.h"
0cac1b66 53
022c62cb 54#include "exec/memory-internal.h"
220c3ebd 55#include "exec/ram_addr.h"
67d95c15 56
b35ba30f
MT
57#include "qemu/range.h"
58
db7b5426 59//#define DEBUG_SUBPAGE
1196be37 60
e2eef170 61#if !defined(CONFIG_USER_ONLY)
0dc3f44a
MD
62/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
63 * are protected by the ramlist lock.
64 */
0d53d9fe 65RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
66
67static MemoryRegion *system_memory;
309cb471 68static MemoryRegion *system_io;
62152b8a 69
f6790af6
AK
70AddressSpace address_space_io;
71AddressSpace address_space_memory;
2673a5da 72
0844e007 73MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 74static MemoryRegion io_mem_unassigned;
0e0df1e2 75
7bd4f430
PB
76/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
77#define RAM_PREALLOC (1 << 0)
78
dbcb8981
PB
79/* RAM is mmap-ed with MAP_SHARED */
80#define RAM_SHARED (1 << 1)
81
62be4e3a
MT
82/* Only a portion of RAM (used_length) is actually used, and migrated.
83 * This used_length size can change across reboots.
84 */
85#define RAM_RESIZEABLE (1 << 2)
86
e2eef170 87#endif
9fa3e853 88
bdc44640 89struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
90/* current CPU in the current thread. It is only valid inside
91 cpu_exec() */
4917cf44 92DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 93/* 0 = Do not count executed instructions.
bf20dc07 94 1 = Precise instruction counting.
2e70f6ef 95 2 = Adaptive rate instruction counting. */
5708fc66 96int use_icount;
6a00d601 97
e2eef170 98#if !defined(CONFIG_USER_ONLY)
4346ae3e 99
1db8abb1
PB
100typedef struct PhysPageEntry PhysPageEntry;
101
102struct PhysPageEntry {
9736e55b 103 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 104 uint32_t skip : 6;
9736e55b 105 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 106 uint32_t ptr : 26;
1db8abb1
PB
107};
108
8b795765
MT
109#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
110
03f49957 111/* Size of the L2 (and L3, etc) page tables. */
57271d63 112#define ADDR_SPACE_BITS 64
03f49957 113
026736ce 114#define P_L2_BITS 9
03f49957
PB
115#define P_L2_SIZE (1 << P_L2_BITS)
116
117#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
118
119typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 120
53cb28cb 121typedef struct PhysPageMap {
79e2b9ae
PB
122 struct rcu_head rcu;
123
53cb28cb
MA
124 unsigned sections_nb;
125 unsigned sections_nb_alloc;
126 unsigned nodes_nb;
127 unsigned nodes_nb_alloc;
128 Node *nodes;
129 MemoryRegionSection *sections;
130} PhysPageMap;
131
1db8abb1 132struct AddressSpaceDispatch {
79e2b9ae
PB
133 struct rcu_head rcu;
134
1db8abb1
PB
135 /* This is a multi-level map on the physical address space.
136 * The bottom level has pointers to MemoryRegionSections.
137 */
138 PhysPageEntry phys_map;
53cb28cb 139 PhysPageMap map;
acc9d80b 140 AddressSpace *as;
1db8abb1
PB
141};
142
90260c6c
JK
143#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
144typedef struct subpage_t {
145 MemoryRegion iomem;
acc9d80b 146 AddressSpace *as;
90260c6c
JK
147 hwaddr base;
148 uint16_t sub_section[TARGET_PAGE_SIZE];
149} subpage_t;
150
b41aac4f
LPF
151#define PHYS_SECTION_UNASSIGNED 0
152#define PHYS_SECTION_NOTDIRTY 1
153#define PHYS_SECTION_ROM 2
154#define PHYS_SECTION_WATCH 3
5312bd8b 155
e2eef170 156static void io_mem_init(void);
62152b8a 157static void memory_map_init(void);
09daed84 158static void tcg_commit(MemoryListener *listener);
e2eef170 159
1ec9b909 160static MemoryRegion io_mem_watch;
6658ffb8 161#endif
fd6ce8f6 162
6d9a1304 163#if !defined(CONFIG_USER_ONLY)
d6f2ea22 164
53cb28cb 165static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 166{
53cb28cb
MA
167 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
168 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
169 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
170 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 171 }
f7bf5461
AK
172}
173
db94604b 174static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
f7bf5461
AK
175{
176 unsigned i;
8b795765 177 uint32_t ret;
db94604b
PB
178 PhysPageEntry e;
179 PhysPageEntry *p;
f7bf5461 180
53cb28cb 181 ret = map->nodes_nb++;
db94604b 182 p = map->nodes[ret];
f7bf5461 183 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 184 assert(ret != map->nodes_nb_alloc);
db94604b
PB
185
186 e.skip = leaf ? 0 : 1;
187 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
03f49957 188 for (i = 0; i < P_L2_SIZE; ++i) {
db94604b 189 memcpy(&p[i], &e, sizeof(e));
d6f2ea22 190 }
f7bf5461 191 return ret;
d6f2ea22
AK
192}
193
53cb28cb
MA
194static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
195 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 196 int level)
f7bf5461
AK
197{
198 PhysPageEntry *p;
03f49957 199 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 200
9736e55b 201 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
db94604b 202 lp->ptr = phys_map_node_alloc(map, level == 0);
92e873b9 203 }
db94604b 204 p = map->nodes[lp->ptr];
03f49957 205 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 206
03f49957 207 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 208 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 209 lp->skip = 0;
c19e8800 210 lp->ptr = leaf;
07f07b31
AK
211 *index += step;
212 *nb -= step;
2999097b 213 } else {
53cb28cb 214 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
215 }
216 ++lp;
f7bf5461
AK
217 }
218}
219
ac1970fb 220static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 221 hwaddr index, hwaddr nb,
2999097b 222 uint16_t leaf)
f7bf5461 223{
2999097b 224 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 225 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 226
53cb28cb 227 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
228}
229
b35ba30f
MT
230/* Compact a non leaf page entry. Simply detect that the entry has a single child,
231 * and update our entry so we can skip it and go directly to the destination.
232 */
233static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
234{
235 unsigned valid_ptr = P_L2_SIZE;
236 int valid = 0;
237 PhysPageEntry *p;
238 int i;
239
240 if (lp->ptr == PHYS_MAP_NODE_NIL) {
241 return;
242 }
243
244 p = nodes[lp->ptr];
245 for (i = 0; i < P_L2_SIZE; i++) {
246 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
247 continue;
248 }
249
250 valid_ptr = i;
251 valid++;
252 if (p[i].skip) {
253 phys_page_compact(&p[i], nodes, compacted);
254 }
255 }
256
257 /* We can only compress if there's only one child. */
258 if (valid != 1) {
259 return;
260 }
261
262 assert(valid_ptr < P_L2_SIZE);
263
264 /* Don't compress if it won't fit in the # of bits we have. */
265 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
266 return;
267 }
268
269 lp->ptr = p[valid_ptr].ptr;
270 if (!p[valid_ptr].skip) {
271 /* If our only child is a leaf, make this a leaf. */
272 /* By design, we should have made this node a leaf to begin with so we
273 * should never reach here.
274 * But since it's so simple to handle this, let's do it just in case we
275 * change this rule.
276 */
277 lp->skip = 0;
278 } else {
279 lp->skip += p[valid_ptr].skip;
280 }
281}
282
283static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
284{
285 DECLARE_BITMAP(compacted, nodes_nb);
286
287 if (d->phys_map.skip) {
53cb28cb 288 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
289 }
290}
291
97115a8d 292static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 293 Node *nodes, MemoryRegionSection *sections)
92e873b9 294{
31ab2b4a 295 PhysPageEntry *p;
97115a8d 296 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 297 int i;
f1f6e3b8 298
9736e55b 299 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 300 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 301 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 302 }
9affd6fc 303 p = nodes[lp.ptr];
03f49957 304 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 305 }
b35ba30f
MT
306
307 if (sections[lp.ptr].size.hi ||
308 range_covers_byte(sections[lp.ptr].offset_within_address_space,
309 sections[lp.ptr].size.lo, addr)) {
310 return &sections[lp.ptr];
311 } else {
312 return &sections[PHYS_SECTION_UNASSIGNED];
313 }
f3705d53
AK
314}
315
e5548617
BS
316bool memory_region_is_unassigned(MemoryRegion *mr)
317{
2a8e7499 318 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 319 && mr != &io_mem_watch;
fd6ce8f6 320}
149f54b5 321
79e2b9ae 322/* Called from RCU critical section */
c7086b4a 323static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
324 hwaddr addr,
325 bool resolve_subpage)
9f029603 326{
90260c6c
JK
327 MemoryRegionSection *section;
328 subpage_t *subpage;
329
53cb28cb 330 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
331 if (resolve_subpage && section->mr->subpage) {
332 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 333 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
334 }
335 return section;
9f029603
JK
336}
337
79e2b9ae 338/* Called from RCU critical section */
90260c6c 339static MemoryRegionSection *
c7086b4a 340address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 341 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
342{
343 MemoryRegionSection *section;
a87f3954 344 Int128 diff;
149f54b5 345
c7086b4a 346 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
347 /* Compute offset within MemoryRegionSection */
348 addr -= section->offset_within_address_space;
349
350 /* Compute offset within MemoryRegion */
351 *xlat = addr + section->offset_within_region;
352
353 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 354 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
355 return section;
356}
90260c6c 357
a87f3954
PB
358static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
359{
360 if (memory_region_is_ram(mr)) {
361 return !(is_write && mr->readonly);
362 }
363 if (memory_region_is_romd(mr)) {
364 return !is_write;
365 }
366
367 return false;
368}
369
41063e1e 370/* Called from RCU critical section */
5c8a00ce
PB
371MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
372 hwaddr *xlat, hwaddr *plen,
373 bool is_write)
90260c6c 374{
30951157
AK
375 IOMMUTLBEntry iotlb;
376 MemoryRegionSection *section;
377 MemoryRegion *mr;
30951157
AK
378
379 for (;;) {
79e2b9ae
PB
380 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
381 section = address_space_translate_internal(d, addr, &addr, plen, true);
30951157
AK
382 mr = section->mr;
383
384 if (!mr->iommu_ops) {
385 break;
386 }
387
8d7b8cb9 388 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
30951157
AK
389 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
390 | (addr & iotlb.addr_mask));
23820dbf 391 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
30951157
AK
392 if (!(iotlb.perm & (1 << is_write))) {
393 mr = &io_mem_unassigned;
394 break;
395 }
396
397 as = iotlb.target_as;
398 }
399
fe680d0d 400 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954 401 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
23820dbf 402 *plen = MIN(page, *plen);
a87f3954
PB
403 }
404
30951157
AK
405 *xlat = addr;
406 return mr;
90260c6c
JK
407}
408
79e2b9ae 409/* Called from RCU critical section */
90260c6c 410MemoryRegionSection *
9d82b5a7
PB
411address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
412 hwaddr *xlat, hwaddr *plen)
90260c6c 413{
30951157 414 MemoryRegionSection *section;
9d82b5a7
PB
415 section = address_space_translate_internal(cpu->memory_dispatch,
416 addr, xlat, plen, false);
30951157
AK
417
418 assert(!section->mr->iommu_ops);
419 return section;
90260c6c 420}
5b6dd868 421#endif
fd6ce8f6 422
b170fce3 423#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
424
425static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 426{
259186a7 427 CPUState *cpu = opaque;
a513fe19 428
5b6dd868
BS
429 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
430 version_id is increased. */
259186a7 431 cpu->interrupt_request &= ~0x01;
c01a71c1 432 tlb_flush(cpu, 1);
5b6dd868
BS
433
434 return 0;
a513fe19 435}
7501267e 436
6c3bff0e
PD
437static int cpu_common_pre_load(void *opaque)
438{
439 CPUState *cpu = opaque;
440
adee6424 441 cpu->exception_index = -1;
6c3bff0e
PD
442
443 return 0;
444}
445
446static bool cpu_common_exception_index_needed(void *opaque)
447{
448 CPUState *cpu = opaque;
449
adee6424 450 return tcg_enabled() && cpu->exception_index != -1;
6c3bff0e
PD
451}
452
453static const VMStateDescription vmstate_cpu_common_exception_index = {
454 .name = "cpu_common/exception_index",
455 .version_id = 1,
456 .minimum_version_id = 1,
457 .fields = (VMStateField[]) {
458 VMSTATE_INT32(exception_index, CPUState),
459 VMSTATE_END_OF_LIST()
460 }
461};
462
1a1562f5 463const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
464 .name = "cpu_common",
465 .version_id = 1,
466 .minimum_version_id = 1,
6c3bff0e 467 .pre_load = cpu_common_pre_load,
5b6dd868 468 .post_load = cpu_common_post_load,
35d08458 469 .fields = (VMStateField[]) {
259186a7
AF
470 VMSTATE_UINT32(halted, CPUState),
471 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 472 VMSTATE_END_OF_LIST()
6c3bff0e
PD
473 },
474 .subsections = (VMStateSubsection[]) {
475 {
476 .vmsd = &vmstate_cpu_common_exception_index,
477 .needed = cpu_common_exception_index_needed,
478 } , {
479 /* empty */
480 }
5b6dd868
BS
481 }
482};
1a1562f5 483
5b6dd868 484#endif
ea041c0e 485
38d8f5c8 486CPUState *qemu_get_cpu(int index)
ea041c0e 487{
bdc44640 488 CPUState *cpu;
ea041c0e 489
bdc44640 490 CPU_FOREACH(cpu) {
55e5c285 491 if (cpu->cpu_index == index) {
bdc44640 492 return cpu;
55e5c285 493 }
ea041c0e 494 }
5b6dd868 495
bdc44640 496 return NULL;
ea041c0e
FB
497}
498
09daed84
EI
499#if !defined(CONFIG_USER_ONLY)
500void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
501{
502 /* We only support one address space per cpu at the moment. */
503 assert(cpu->as == as);
504
505 if (cpu->tcg_as_listener) {
506 memory_listener_unregister(cpu->tcg_as_listener);
507 } else {
508 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
509 }
510 cpu->tcg_as_listener->commit = tcg_commit;
511 memory_listener_register(cpu->tcg_as_listener, as);
512}
513#endif
514
5b6dd868 515void cpu_exec_init(CPUArchState *env)
ea041c0e 516{
5b6dd868 517 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 518 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 519 CPUState *some_cpu;
5b6dd868
BS
520 int cpu_index;
521
522#if defined(CONFIG_USER_ONLY)
523 cpu_list_lock();
524#endif
5b6dd868 525 cpu_index = 0;
bdc44640 526 CPU_FOREACH(some_cpu) {
5b6dd868
BS
527 cpu_index++;
528 }
55e5c285 529 cpu->cpu_index = cpu_index;
1b1ed8dc 530 cpu->numa_node = 0;
f0c3c505 531 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 532 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 533#ifndef CONFIG_USER_ONLY
09daed84 534 cpu->as = &address_space_memory;
5b6dd868 535 cpu->thread_id = qemu_get_thread_id();
cba70549 536 cpu_reload_memory_map(cpu);
5b6dd868 537#endif
bdc44640 538 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
539#if defined(CONFIG_USER_ONLY)
540 cpu_list_unlock();
541#endif
e0d47944
AF
542 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
543 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
544 }
5b6dd868 545#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
546 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
547 cpu_save, cpu_load, env);
b170fce3 548 assert(cc->vmsd == NULL);
e0d47944 549 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 550#endif
b170fce3
AF
551 if (cc->vmsd != NULL) {
552 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
553 }
ea041c0e
FB
554}
555
94df27fd 556#if defined(CONFIG_USER_ONLY)
00b941e5 557static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
558{
559 tb_invalidate_phys_page_range(pc, pc + 1, 0);
560}
561#else
00b941e5 562static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 563{
e8262a1b
MF
564 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
565 if (phys != -1) {
09daed84 566 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 567 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 568 }
1e7855a5 569}
c27004ec 570#endif
d720b93d 571
c527ee8f 572#if defined(CONFIG_USER_ONLY)
75a34036 573void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
574
575{
576}
577
3ee887e8
PM
578int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
579 int flags)
580{
581 return -ENOSYS;
582}
583
584void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
585{
586}
587
75a34036 588int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
589 int flags, CPUWatchpoint **watchpoint)
590{
591 return -ENOSYS;
592}
593#else
6658ffb8 594/* Add a watchpoint. */
75a34036 595int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 596 int flags, CPUWatchpoint **watchpoint)
6658ffb8 597{
c0ce998e 598 CPUWatchpoint *wp;
6658ffb8 599
05068c0d 600 /* forbid ranges which are empty or run off the end of the address space */
07e2863d 601 if (len == 0 || (addr + len - 1) < addr) {
75a34036
AF
602 error_report("tried to set invalid watchpoint at %"
603 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
604 return -EINVAL;
605 }
7267c094 606 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
607
608 wp->vaddr = addr;
05068c0d 609 wp->len = len;
a1d1bb31
AL
610 wp->flags = flags;
611
2dc9f411 612 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
613 if (flags & BP_GDB) {
614 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
615 } else {
616 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
617 }
6658ffb8 618
31b030d4 619 tlb_flush_page(cpu, addr);
a1d1bb31
AL
620
621 if (watchpoint)
622 *watchpoint = wp;
623 return 0;
6658ffb8
PB
624}
625
a1d1bb31 626/* Remove a specific watchpoint. */
75a34036 627int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 628 int flags)
6658ffb8 629{
a1d1bb31 630 CPUWatchpoint *wp;
6658ffb8 631
ff4700b0 632 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 633 if (addr == wp->vaddr && len == wp->len
6e140f28 634 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 635 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
636 return 0;
637 }
638 }
a1d1bb31 639 return -ENOENT;
6658ffb8
PB
640}
641
a1d1bb31 642/* Remove a specific watchpoint by reference. */
75a34036 643void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 644{
ff4700b0 645 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 646
31b030d4 647 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 648
7267c094 649 g_free(watchpoint);
a1d1bb31
AL
650}
651
652/* Remove all matching watchpoints. */
75a34036 653void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 654{
c0ce998e 655 CPUWatchpoint *wp, *next;
a1d1bb31 656
ff4700b0 657 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
658 if (wp->flags & mask) {
659 cpu_watchpoint_remove_by_ref(cpu, wp);
660 }
c0ce998e 661 }
7d03f82f 662}
05068c0d
PM
663
664/* Return true if this watchpoint address matches the specified
665 * access (ie the address range covered by the watchpoint overlaps
666 * partially or completely with the address range covered by the
667 * access).
668 */
669static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
670 vaddr addr,
671 vaddr len)
672{
673 /* We know the lengths are non-zero, but a little caution is
674 * required to avoid errors in the case where the range ends
675 * exactly at the top of the address space and so addr + len
676 * wraps round to zero.
677 */
678 vaddr wpend = wp->vaddr + wp->len - 1;
679 vaddr addrend = addr + len - 1;
680
681 return !(addr > wpend || wp->vaddr > addrend);
682}
683
c527ee8f 684#endif
7d03f82f 685
a1d1bb31 686/* Add a breakpoint. */
b3310ab3 687int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 688 CPUBreakpoint **breakpoint)
4c3a88a2 689{
c0ce998e 690 CPUBreakpoint *bp;
3b46e624 691
7267c094 692 bp = g_malloc(sizeof(*bp));
4c3a88a2 693
a1d1bb31
AL
694 bp->pc = pc;
695 bp->flags = flags;
696
2dc9f411 697 /* keep all GDB-injected breakpoints in front */
00b941e5 698 if (flags & BP_GDB) {
f0c3c505 699 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 700 } else {
f0c3c505 701 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 702 }
3b46e624 703
f0c3c505 704 breakpoint_invalidate(cpu, pc);
a1d1bb31 705
00b941e5 706 if (breakpoint) {
a1d1bb31 707 *breakpoint = bp;
00b941e5 708 }
4c3a88a2 709 return 0;
4c3a88a2
FB
710}
711
a1d1bb31 712/* Remove a specific breakpoint. */
b3310ab3 713int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 714{
a1d1bb31
AL
715 CPUBreakpoint *bp;
716
f0c3c505 717 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 718 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 719 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
720 return 0;
721 }
7d03f82f 722 }
a1d1bb31 723 return -ENOENT;
7d03f82f
EI
724}
725
a1d1bb31 726/* Remove a specific breakpoint by reference. */
b3310ab3 727void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 728{
f0c3c505
AF
729 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
730
731 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 732
7267c094 733 g_free(breakpoint);
a1d1bb31
AL
734}
735
736/* Remove all matching breakpoints. */
b3310ab3 737void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 738{
c0ce998e 739 CPUBreakpoint *bp, *next;
a1d1bb31 740
f0c3c505 741 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
742 if (bp->flags & mask) {
743 cpu_breakpoint_remove_by_ref(cpu, bp);
744 }
c0ce998e 745 }
4c3a88a2
FB
746}
747
c33a346e
FB
748/* enable or disable single step mode. EXCP_DEBUG is returned by the
749 CPU loop after each instruction */
3825b28f 750void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 751{
ed2803da
AF
752 if (cpu->singlestep_enabled != enabled) {
753 cpu->singlestep_enabled = enabled;
754 if (kvm_enabled()) {
38e478ec 755 kvm_update_guest_debug(cpu, 0);
ed2803da 756 } else {
ccbb4d44 757 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 758 /* XXX: only flush what is necessary */
38e478ec 759 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
760 tb_flush(env);
761 }
c33a346e 762 }
c33a346e
FB
763}
764
a47dddd7 765void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
766{
767 va_list ap;
493ae1f0 768 va_list ap2;
7501267e
FB
769
770 va_start(ap, fmt);
493ae1f0 771 va_copy(ap2, ap);
7501267e
FB
772 fprintf(stderr, "qemu: fatal: ");
773 vfprintf(stderr, fmt, ap);
774 fprintf(stderr, "\n");
878096ee 775 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
776 if (qemu_log_enabled()) {
777 qemu_log("qemu: fatal: ");
778 qemu_log_vprintf(fmt, ap2);
779 qemu_log("\n");
a0762859 780 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 781 qemu_log_flush();
93fcfe39 782 qemu_log_close();
924edcae 783 }
493ae1f0 784 va_end(ap2);
f9373291 785 va_end(ap);
fd052bf6
RV
786#if defined(CONFIG_USER_ONLY)
787 {
788 struct sigaction act;
789 sigfillset(&act.sa_mask);
790 act.sa_handler = SIG_DFL;
791 sigaction(SIGABRT, &act, NULL);
792 }
793#endif
7501267e
FB
794 abort();
795}
796
0124311e 797#if !defined(CONFIG_USER_ONLY)
0dc3f44a 798/* Called from RCU critical section */
041603fe
PB
799static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
800{
801 RAMBlock *block;
802
43771539 803 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 804 if (block && addr - block->offset < block->max_length) {
041603fe
PB
805 goto found;
806 }
0dc3f44a 807 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5 808 if (addr - block->offset < block->max_length) {
041603fe
PB
809 goto found;
810 }
811 }
812
813 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
814 abort();
815
816found:
43771539
PB
817 /* It is safe to write mru_block outside the iothread lock. This
818 * is what happens:
819 *
820 * mru_block = xxx
821 * rcu_read_unlock()
822 * xxx removed from list
823 * rcu_read_lock()
824 * read mru_block
825 * mru_block = NULL;
826 * call_rcu(reclaim_ramblock, xxx);
827 * rcu_read_unlock()
828 *
829 * atomic_rcu_set is not needed here. The block was already published
830 * when it was placed into the list. Here we're just making an extra
831 * copy of the pointer.
832 */
041603fe
PB
833 ram_list.mru_block = block;
834 return block;
835}
836
a2f4d5be 837static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 838{
041603fe 839 ram_addr_t start1;
a2f4d5be
JQ
840 RAMBlock *block;
841 ram_addr_t end;
842
843 end = TARGET_PAGE_ALIGN(start + length);
844 start &= TARGET_PAGE_MASK;
d24981d3 845
0dc3f44a 846 rcu_read_lock();
041603fe
PB
847 block = qemu_get_ram_block(start);
848 assert(block == qemu_get_ram_block(end - 1));
1240be24 849 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
041603fe 850 cpu_tlb_reset_dirty_all(start1, length);
0dc3f44a 851 rcu_read_unlock();
d24981d3
JQ
852}
853
5579c7f3 854/* Note: start and end must be within the same ram block. */
03eebc9e
SH
855bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
856 ram_addr_t length,
857 unsigned client)
1ccde1cb 858{
03eebc9e
SH
859 unsigned long end, page;
860 bool dirty;
861
862 if (length == 0) {
863 return false;
864 }
f23db169 865
03eebc9e
SH
866 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
867 page = start >> TARGET_PAGE_BITS;
868 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
869 page, end - page);
870
871 if (dirty && tcg_enabled()) {
a2f4d5be 872 tlb_reset_dirty_range_all(start, length);
5579c7f3 873 }
03eebc9e
SH
874
875 return dirty;
1ccde1cb
FB
876}
877
79e2b9ae 878/* Called from RCU critical section */
bb0e627a 879hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
880 MemoryRegionSection *section,
881 target_ulong vaddr,
882 hwaddr paddr, hwaddr xlat,
883 int prot,
884 target_ulong *address)
e5548617 885{
a8170e5e 886 hwaddr iotlb;
e5548617
BS
887 CPUWatchpoint *wp;
888
cc5bea60 889 if (memory_region_is_ram(section->mr)) {
e5548617
BS
890 /* Normal RAM. */
891 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 892 + xlat;
e5548617 893 if (!section->readonly) {
b41aac4f 894 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 895 } else {
b41aac4f 896 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
897 }
898 } else {
1b3fb98f 899 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 900 iotlb += xlat;
e5548617
BS
901 }
902
903 /* Make accesses to pages with watchpoints go via the
904 watchpoint trap routines. */
ff4700b0 905 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 906 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
e5548617
BS
907 /* Avoid trapping reads of pages with a write breakpoint. */
908 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 909 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
910 *address |= TLB_MMIO;
911 break;
912 }
913 }
914 }
915
916 return iotlb;
917}
9fa3e853
FB
918#endif /* defined(CONFIG_USER_ONLY) */
919
e2eef170 920#if !defined(CONFIG_USER_ONLY)
8da3ff18 921
c227f099 922static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 923 uint16_t section);
acc9d80b 924static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 925
a2b257d6
IM
926static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
927 qemu_anon_ram_alloc;
91138037
MA
928
929/*
930 * Set a custom physical guest memory alloator.
931 * Accelerators with unusual needs may need this. Hopefully, we can
932 * get rid of it eventually.
933 */
a2b257d6 934void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
91138037
MA
935{
936 phys_mem_alloc = alloc;
937}
938
53cb28cb
MA
939static uint16_t phys_section_add(PhysPageMap *map,
940 MemoryRegionSection *section)
5312bd8b 941{
68f3f65b
PB
942 /* The physical section number is ORed with a page-aligned
943 * pointer to produce the iotlb entries. Thus it should
944 * never overflow into the page-aligned value.
945 */
53cb28cb 946 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 947
53cb28cb
MA
948 if (map->sections_nb == map->sections_nb_alloc) {
949 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
950 map->sections = g_renew(MemoryRegionSection, map->sections,
951 map->sections_nb_alloc);
5312bd8b 952 }
53cb28cb 953 map->sections[map->sections_nb] = *section;
dfde4e6e 954 memory_region_ref(section->mr);
53cb28cb 955 return map->sections_nb++;
5312bd8b
AK
956}
957
058bc4b5
PB
958static void phys_section_destroy(MemoryRegion *mr)
959{
dfde4e6e
PB
960 memory_region_unref(mr);
961
058bc4b5
PB
962 if (mr->subpage) {
963 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 964 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
965 g_free(subpage);
966 }
967}
968
6092666e 969static void phys_sections_free(PhysPageMap *map)
5312bd8b 970{
9affd6fc
PB
971 while (map->sections_nb > 0) {
972 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
973 phys_section_destroy(section->mr);
974 }
9affd6fc
PB
975 g_free(map->sections);
976 g_free(map->nodes);
5312bd8b
AK
977}
978
ac1970fb 979static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
980{
981 subpage_t *subpage;
a8170e5e 982 hwaddr base = section->offset_within_address_space
0f0cb164 983 & TARGET_PAGE_MASK;
97115a8d 984 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 985 d->map.nodes, d->map.sections);
0f0cb164
AK
986 MemoryRegionSection subsection = {
987 .offset_within_address_space = base,
052e87b0 988 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 989 };
a8170e5e 990 hwaddr start, end;
0f0cb164 991
f3705d53 992 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 993
f3705d53 994 if (!(existing->mr->subpage)) {
acc9d80b 995 subpage = subpage_init(d->as, base);
3be91e86 996 subsection.address_space = d->as;
0f0cb164 997 subsection.mr = &subpage->iomem;
ac1970fb 998 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 999 phys_section_add(&d->map, &subsection));
0f0cb164 1000 } else {
f3705d53 1001 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
1002 }
1003 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 1004 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
1005 subpage_register(subpage, start, end,
1006 phys_section_add(&d->map, section));
0f0cb164
AK
1007}
1008
1009
052e87b0
PB
1010static void register_multipage(AddressSpaceDispatch *d,
1011 MemoryRegionSection *section)
33417e70 1012{
a8170e5e 1013 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 1014 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
1015 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1016 TARGET_PAGE_BITS));
dd81124b 1017
733d5ef5
PB
1018 assert(num_pages);
1019 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
1020}
1021
ac1970fb 1022static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 1023{
89ae337a 1024 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 1025 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 1026 MemoryRegionSection now = *section, remain = *section;
052e87b0 1027 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 1028
733d5ef5
PB
1029 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1030 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1031 - now.offset_within_address_space;
1032
052e87b0 1033 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 1034 register_subpage(d, &now);
733d5ef5 1035 } else {
052e87b0 1036 now.size = int128_zero();
733d5ef5 1037 }
052e87b0
PB
1038 while (int128_ne(remain.size, now.size)) {
1039 remain.size = int128_sub(remain.size, now.size);
1040 remain.offset_within_address_space += int128_get64(now.size);
1041 remain.offset_within_region += int128_get64(now.size);
69b67646 1042 now = remain;
052e87b0 1043 if (int128_lt(remain.size, page_size)) {
733d5ef5 1044 register_subpage(d, &now);
88266249 1045 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1046 now.size = page_size;
ac1970fb 1047 register_subpage(d, &now);
69b67646 1048 } else {
052e87b0 1049 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 1050 register_multipage(d, &now);
69b67646 1051 }
0f0cb164
AK
1052 }
1053}
1054
62a2744c
SY
1055void qemu_flush_coalesced_mmio_buffer(void)
1056{
1057 if (kvm_enabled())
1058 kvm_flush_coalesced_mmio_buffer();
1059}
1060
b2a8658e
UD
1061void qemu_mutex_lock_ramlist(void)
1062{
1063 qemu_mutex_lock(&ram_list.mutex);
1064}
1065
1066void qemu_mutex_unlock_ramlist(void)
1067{
1068 qemu_mutex_unlock(&ram_list.mutex);
1069}
1070
e1e84ba0 1071#ifdef __linux__
c902760f
MT
1072
1073#include <sys/vfs.h>
1074
1075#define HUGETLBFS_MAGIC 0x958458f6
1076
fc7a5800 1077static long gethugepagesize(const char *path, Error **errp)
c902760f
MT
1078{
1079 struct statfs fs;
1080 int ret;
1081
1082 do {
9742bf26 1083 ret = statfs(path, &fs);
c902760f
MT
1084 } while (ret != 0 && errno == EINTR);
1085
1086 if (ret != 0) {
fc7a5800
HT
1087 error_setg_errno(errp, errno, "failed to get page size of file %s",
1088 path);
9742bf26 1089 return 0;
c902760f
MT
1090 }
1091
1092 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1093 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1094
1095 return fs.f_bsize;
1096}
1097
04b16653
AW
1098static void *file_ram_alloc(RAMBlock *block,
1099 ram_addr_t memory,
7f56e740
PB
1100 const char *path,
1101 Error **errp)
c902760f
MT
1102{
1103 char *filename;
8ca761f6
PF
1104 char *sanitized_name;
1105 char *c;
557529dd 1106 void *area = NULL;
c902760f 1107 int fd;
557529dd 1108 uint64_t hpagesize;
fc7a5800 1109 Error *local_err = NULL;
c902760f 1110
fc7a5800
HT
1111 hpagesize = gethugepagesize(path, &local_err);
1112 if (local_err) {
1113 error_propagate(errp, local_err);
f9a49dfa 1114 goto error;
c902760f 1115 }
a2b257d6 1116 block->mr->align = hpagesize;
c902760f
MT
1117
1118 if (memory < hpagesize) {
557529dd
HT
1119 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1120 "or larger than huge page size 0x%" PRIx64,
1121 memory, hpagesize);
1122 goto error;
c902760f
MT
1123 }
1124
1125 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1126 error_setg(errp,
1127 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1128 goto error;
c902760f
MT
1129 }
1130
8ca761f6 1131 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
83234bf2 1132 sanitized_name = g_strdup(memory_region_name(block->mr));
8ca761f6
PF
1133 for (c = sanitized_name; *c != '\0'; c++) {
1134 if (*c == '/')
1135 *c = '_';
1136 }
1137
1138 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1139 sanitized_name);
1140 g_free(sanitized_name);
c902760f
MT
1141
1142 fd = mkstemp(filename);
1143 if (fd < 0) {
7f56e740
PB
1144 error_setg_errno(errp, errno,
1145 "unable to create backing store for hugepages");
e4ada482 1146 g_free(filename);
f9a49dfa 1147 goto error;
c902760f
MT
1148 }
1149 unlink(filename);
e4ada482 1150 g_free(filename);
c902760f
MT
1151
1152 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1153
1154 /*
1155 * ftruncate is not supported by hugetlbfs in older
1156 * hosts, so don't bother bailing out on errors.
1157 * If anything goes wrong with it under other filesystems,
1158 * mmap will fail.
1159 */
7f56e740 1160 if (ftruncate(fd, memory)) {
9742bf26 1161 perror("ftruncate");
7f56e740 1162 }
c902760f 1163
dbcb8981
PB
1164 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1165 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1166 fd, 0);
c902760f 1167 if (area == MAP_FAILED) {
7f56e740
PB
1168 error_setg_errno(errp, errno,
1169 "unable to map backing store for hugepages");
9742bf26 1170 close(fd);
f9a49dfa 1171 goto error;
c902760f 1172 }
ef36fa14
MT
1173
1174 if (mem_prealloc) {
38183310 1175 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1176 }
1177
04b16653 1178 block->fd = fd;
c902760f 1179 return area;
f9a49dfa
MT
1180
1181error:
1182 if (mem_prealloc) {
81b07353 1183 error_report("%s", error_get_pretty(*errp));
f9a49dfa
MT
1184 exit(1);
1185 }
1186 return NULL;
c902760f
MT
1187}
1188#endif
1189
0dc3f44a 1190/* Called with the ramlist lock held. */
d17b5288 1191static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1192{
1193 RAMBlock *block, *next_block;
3e837b2c 1194 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1195
49cd9ac6
SH
1196 assert(size != 0); /* it would hand out same offset multiple times */
1197
0dc3f44a 1198 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
04b16653 1199 return 0;
0d53d9fe 1200 }
04b16653 1201
0dc3f44a 1202 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
f15fbc4b 1203 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653 1204
62be4e3a 1205 end = block->offset + block->max_length;
04b16653 1206
0dc3f44a 1207 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
04b16653
AW
1208 if (next_block->offset >= end) {
1209 next = MIN(next, next_block->offset);
1210 }
1211 }
1212 if (next - end >= size && next - end < mingap) {
3e837b2c 1213 offset = end;
04b16653
AW
1214 mingap = next - end;
1215 }
1216 }
3e837b2c
AW
1217
1218 if (offset == RAM_ADDR_MAX) {
1219 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1220 (uint64_t)size);
1221 abort();
1222 }
1223
04b16653
AW
1224 return offset;
1225}
1226
652d7ec2 1227ram_addr_t last_ram_offset(void)
d17b5288
AW
1228{
1229 RAMBlock *block;
1230 ram_addr_t last = 0;
1231
0dc3f44a
MD
1232 rcu_read_lock();
1233 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
62be4e3a 1234 last = MAX(last, block->offset + block->max_length);
0d53d9fe 1235 }
0dc3f44a 1236 rcu_read_unlock();
d17b5288
AW
1237 return last;
1238}
1239
ddb97f1d
JB
1240static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1241{
1242 int ret;
ddb97f1d
JB
1243
1244 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
47c8ca53 1245 if (!machine_dump_guest_core(current_machine)) {
ddb97f1d
JB
1246 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1247 if (ret) {
1248 perror("qemu_madvise");
1249 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1250 "but dump_guest_core=off specified\n");
1251 }
1252 }
1253}
1254
0dc3f44a
MD
1255/* Called within an RCU critical section, or while the ramlist lock
1256 * is held.
1257 */
20cfe881 1258static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1259{
20cfe881 1260 RAMBlock *block;
84b89d78 1261
0dc3f44a 1262 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1263 if (block->offset == addr) {
20cfe881 1264 return block;
c5705a77
AK
1265 }
1266 }
20cfe881
HT
1267
1268 return NULL;
1269}
1270
ae3a7047 1271/* Called with iothread lock held. */
20cfe881
HT
1272void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1273{
ae3a7047 1274 RAMBlock *new_block, *block;
20cfe881 1275
0dc3f44a 1276 rcu_read_lock();
ae3a7047 1277 new_block = find_ram_block(addr);
c5705a77
AK
1278 assert(new_block);
1279 assert(!new_block->idstr[0]);
84b89d78 1280
09e5ab63
AL
1281 if (dev) {
1282 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1283 if (id) {
1284 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1285 g_free(id);
84b89d78
CM
1286 }
1287 }
1288 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1289
0dc3f44a 1290 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1291 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1292 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1293 new_block->idstr);
1294 abort();
1295 }
1296 }
0dc3f44a 1297 rcu_read_unlock();
c5705a77
AK
1298}
1299
ae3a7047 1300/* Called with iothread lock held. */
20cfe881
HT
1301void qemu_ram_unset_idstr(ram_addr_t addr)
1302{
ae3a7047 1303 RAMBlock *block;
20cfe881 1304
ae3a7047
MD
1305 /* FIXME: arch_init.c assumes that this is not called throughout
1306 * migration. Ignore the problem since hot-unplug during migration
1307 * does not work anyway.
1308 */
1309
0dc3f44a 1310 rcu_read_lock();
ae3a7047 1311 block = find_ram_block(addr);
20cfe881
HT
1312 if (block) {
1313 memset(block->idstr, 0, sizeof(block->idstr));
1314 }
0dc3f44a 1315 rcu_read_unlock();
20cfe881
HT
1316}
1317
8490fc78
LC
1318static int memory_try_enable_merging(void *addr, size_t len)
1319{
75cc7f01 1320 if (!machine_mem_merge(current_machine)) {
8490fc78
LC
1321 /* disabled by the user */
1322 return 0;
1323 }
1324
1325 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1326}
1327
62be4e3a
MT
1328/* Only legal before guest might have detected the memory size: e.g. on
1329 * incoming migration, or right after reset.
1330 *
1331 * As memory core doesn't know how is memory accessed, it is up to
1332 * resize callback to update device state and/or add assertions to detect
1333 * misuse, if necessary.
1334 */
1335int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1336{
1337 RAMBlock *block = find_ram_block(base);
1338
1339 assert(block);
1340
129ddaf3
MT
1341 newsize = TARGET_PAGE_ALIGN(newsize);
1342
62be4e3a
MT
1343 if (block->used_length == newsize) {
1344 return 0;
1345 }
1346
1347 if (!(block->flags & RAM_RESIZEABLE)) {
1348 error_setg_errno(errp, EINVAL,
1349 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1350 " in != 0x" RAM_ADDR_FMT, block->idstr,
1351 newsize, block->used_length);
1352 return -EINVAL;
1353 }
1354
1355 if (block->max_length < newsize) {
1356 error_setg_errno(errp, EINVAL,
1357 "Length too large: %s: 0x" RAM_ADDR_FMT
1358 " > 0x" RAM_ADDR_FMT, block->idstr,
1359 newsize, block->max_length);
1360 return -EINVAL;
1361 }
1362
1363 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1364 block->used_length = newsize;
58d2707e
PB
1365 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1366 DIRTY_CLIENTS_ALL);
62be4e3a
MT
1367 memory_region_set_size(block->mr, newsize);
1368 if (block->resized) {
1369 block->resized(block->idstr, newsize, block->host);
1370 }
1371 return 0;
1372}
1373
ef701d7b 1374static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
c5705a77 1375{
e1c57ab8 1376 RAMBlock *block;
0d53d9fe 1377 RAMBlock *last_block = NULL;
2152f5ca
JQ
1378 ram_addr_t old_ram_size, new_ram_size;
1379
1380 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1381
b2a8658e 1382 qemu_mutex_lock_ramlist();
9b8424d5 1383 new_block->offset = find_ram_offset(new_block->max_length);
e1c57ab8
PB
1384
1385 if (!new_block->host) {
1386 if (xen_enabled()) {
9b8424d5
MT
1387 xen_ram_alloc(new_block->offset, new_block->max_length,
1388 new_block->mr);
e1c57ab8 1389 } else {
9b8424d5 1390 new_block->host = phys_mem_alloc(new_block->max_length,
a2b257d6 1391 &new_block->mr->align);
39228250 1392 if (!new_block->host) {
ef701d7b
HT
1393 error_setg_errno(errp, errno,
1394 "cannot set up guest memory '%s'",
1395 memory_region_name(new_block->mr));
1396 qemu_mutex_unlock_ramlist();
1397 return -1;
39228250 1398 }
9b8424d5 1399 memory_try_enable_merging(new_block->host, new_block->max_length);
6977dfe6 1400 }
c902760f 1401 }
94a6b54f 1402
0d53d9fe
MD
1403 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1404 * QLIST (which has an RCU-friendly variant) does not have insertion at
1405 * tail, so save the last element in last_block.
1406 */
0dc3f44a 1407 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
0d53d9fe 1408 last_block = block;
9b8424d5 1409 if (block->max_length < new_block->max_length) {
abb26d63
PB
1410 break;
1411 }
1412 }
1413 if (block) {
0dc3f44a 1414 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
0d53d9fe 1415 } else if (last_block) {
0dc3f44a 1416 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
0d53d9fe 1417 } else { /* list is empty */
0dc3f44a 1418 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
abb26d63 1419 }
0d6d3c87 1420 ram_list.mru_block = NULL;
94a6b54f 1421
0dc3f44a
MD
1422 /* Write list before version */
1423 smp_wmb();
f798b07f 1424 ram_list.version++;
b2a8658e 1425 qemu_mutex_unlock_ramlist();
f798b07f 1426
2152f5ca
JQ
1427 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1428
1429 if (new_ram_size > old_ram_size) {
1ab4c8ce 1430 int i;
ae3a7047
MD
1431
1432 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1ab4c8ce
JQ
1433 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1434 ram_list.dirty_memory[i] =
1435 bitmap_zero_extend(ram_list.dirty_memory[i],
1436 old_ram_size, new_ram_size);
1437 }
2152f5ca 1438 }
9b8424d5 1439 cpu_physical_memory_set_dirty_range(new_block->offset,
58d2707e
PB
1440 new_block->used_length,
1441 DIRTY_CLIENTS_ALL);
94a6b54f 1442
a904c911
PB
1443 if (new_block->host) {
1444 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1445 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1446 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1447 if (kvm_enabled()) {
1448 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1449 }
e1c57ab8 1450 }
6f0437e8 1451
94a6b54f
PB
1452 return new_block->offset;
1453}
e9a1ab19 1454
0b183fc8 1455#ifdef __linux__
e1c57ab8 1456ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1457 bool share, const char *mem_path,
7f56e740 1458 Error **errp)
e1c57ab8
PB
1459{
1460 RAMBlock *new_block;
ef701d7b
HT
1461 ram_addr_t addr;
1462 Error *local_err = NULL;
e1c57ab8
PB
1463
1464 if (xen_enabled()) {
7f56e740
PB
1465 error_setg(errp, "-mem-path not supported with Xen");
1466 return -1;
e1c57ab8
PB
1467 }
1468
1469 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1470 /*
1471 * file_ram_alloc() needs to allocate just like
1472 * phys_mem_alloc, but we haven't bothered to provide
1473 * a hook there.
1474 */
7f56e740
PB
1475 error_setg(errp,
1476 "-mem-path not supported with this accelerator");
1477 return -1;
e1c57ab8
PB
1478 }
1479
1480 size = TARGET_PAGE_ALIGN(size);
1481 new_block = g_malloc0(sizeof(*new_block));
1482 new_block->mr = mr;
9b8424d5
MT
1483 new_block->used_length = size;
1484 new_block->max_length = size;
dbcb8981 1485 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1486 new_block->host = file_ram_alloc(new_block, size,
1487 mem_path, errp);
1488 if (!new_block->host) {
1489 g_free(new_block);
1490 return -1;
1491 }
1492
ef701d7b
HT
1493 addr = ram_block_add(new_block, &local_err);
1494 if (local_err) {
1495 g_free(new_block);
1496 error_propagate(errp, local_err);
1497 return -1;
1498 }
1499 return addr;
e1c57ab8 1500}
0b183fc8 1501#endif
e1c57ab8 1502
62be4e3a
MT
1503static
1504ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1505 void (*resized)(const char*,
1506 uint64_t length,
1507 void *host),
1508 void *host, bool resizeable,
ef701d7b 1509 MemoryRegion *mr, Error **errp)
e1c57ab8
PB
1510{
1511 RAMBlock *new_block;
ef701d7b
HT
1512 ram_addr_t addr;
1513 Error *local_err = NULL;
e1c57ab8
PB
1514
1515 size = TARGET_PAGE_ALIGN(size);
62be4e3a 1516 max_size = TARGET_PAGE_ALIGN(max_size);
e1c57ab8
PB
1517 new_block = g_malloc0(sizeof(*new_block));
1518 new_block->mr = mr;
62be4e3a 1519 new_block->resized = resized;
9b8424d5
MT
1520 new_block->used_length = size;
1521 new_block->max_length = max_size;
62be4e3a 1522 assert(max_size >= size);
e1c57ab8
PB
1523 new_block->fd = -1;
1524 new_block->host = host;
1525 if (host) {
7bd4f430 1526 new_block->flags |= RAM_PREALLOC;
e1c57ab8 1527 }
62be4e3a
MT
1528 if (resizeable) {
1529 new_block->flags |= RAM_RESIZEABLE;
1530 }
ef701d7b
HT
1531 addr = ram_block_add(new_block, &local_err);
1532 if (local_err) {
1533 g_free(new_block);
1534 error_propagate(errp, local_err);
1535 return -1;
1536 }
1537 return addr;
e1c57ab8
PB
1538}
1539
62be4e3a
MT
1540ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1541 MemoryRegion *mr, Error **errp)
1542{
1543 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1544}
1545
ef701d7b 1546ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
6977dfe6 1547{
62be4e3a
MT
1548 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1549}
1550
1551ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1552 void (*resized)(const char*,
1553 uint64_t length,
1554 void *host),
1555 MemoryRegion *mr, Error **errp)
1556{
1557 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
6977dfe6
YT
1558}
1559
1f2e98b6
AW
1560void qemu_ram_free_from_ptr(ram_addr_t addr)
1561{
1562 RAMBlock *block;
1563
b2a8658e 1564 qemu_mutex_lock_ramlist();
0dc3f44a 1565 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1f2e98b6 1566 if (addr == block->offset) {
0dc3f44a 1567 QLIST_REMOVE_RCU(block, next);
0d6d3c87 1568 ram_list.mru_block = NULL;
0dc3f44a
MD
1569 /* Write list before version */
1570 smp_wmb();
f798b07f 1571 ram_list.version++;
43771539 1572 g_free_rcu(block, rcu);
b2a8658e 1573 break;
1f2e98b6
AW
1574 }
1575 }
b2a8658e 1576 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1577}
1578
43771539
PB
1579static void reclaim_ramblock(RAMBlock *block)
1580{
1581 if (block->flags & RAM_PREALLOC) {
1582 ;
1583 } else if (xen_enabled()) {
1584 xen_invalidate_map_cache_entry(block->host);
1585#ifndef _WIN32
1586 } else if (block->fd >= 0) {
1587 munmap(block->host, block->max_length);
1588 close(block->fd);
1589#endif
1590 } else {
1591 qemu_anon_ram_free(block->host, block->max_length);
1592 }
1593 g_free(block);
1594}
1595
c227f099 1596void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1597{
04b16653
AW
1598 RAMBlock *block;
1599
b2a8658e 1600 qemu_mutex_lock_ramlist();
0dc3f44a 1601 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
04b16653 1602 if (addr == block->offset) {
0dc3f44a 1603 QLIST_REMOVE_RCU(block, next);
0d6d3c87 1604 ram_list.mru_block = NULL;
0dc3f44a
MD
1605 /* Write list before version */
1606 smp_wmb();
f798b07f 1607 ram_list.version++;
43771539 1608 call_rcu(block, reclaim_ramblock, rcu);
b2a8658e 1609 break;
04b16653
AW
1610 }
1611 }
b2a8658e 1612 qemu_mutex_unlock_ramlist();
e9a1ab19
FB
1613}
1614
cd19cfa2
HY
1615#ifndef _WIN32
1616void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1617{
1618 RAMBlock *block;
1619 ram_addr_t offset;
1620 int flags;
1621 void *area, *vaddr;
1622
0dc3f44a 1623 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
cd19cfa2 1624 offset = addr - block->offset;
9b8424d5 1625 if (offset < block->max_length) {
1240be24 1626 vaddr = ramblock_ptr(block, offset);
7bd4f430 1627 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1628 ;
dfeaf2ab
MA
1629 } else if (xen_enabled()) {
1630 abort();
cd19cfa2
HY
1631 } else {
1632 flags = MAP_FIXED;
3435f395 1633 if (block->fd >= 0) {
dbcb8981
PB
1634 flags |= (block->flags & RAM_SHARED ?
1635 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1636 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1637 flags, block->fd, offset);
cd19cfa2 1638 } else {
2eb9fbaa
MA
1639 /*
1640 * Remap needs to match alloc. Accelerators that
1641 * set phys_mem_alloc never remap. If they did,
1642 * we'd need a remap hook here.
1643 */
1644 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1645
cd19cfa2
HY
1646 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1647 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1648 flags, -1, 0);
cd19cfa2
HY
1649 }
1650 if (area != vaddr) {
f15fbc4b
AP
1651 fprintf(stderr, "Could not remap addr: "
1652 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1653 length, addr);
1654 exit(1);
1655 }
8490fc78 1656 memory_try_enable_merging(vaddr, length);
ddb97f1d 1657 qemu_ram_setup_dump(vaddr, length);
cd19cfa2 1658 }
cd19cfa2
HY
1659 }
1660 }
1661}
1662#endif /* !_WIN32 */
1663
a35ba7be
PB
1664int qemu_get_ram_fd(ram_addr_t addr)
1665{
ae3a7047
MD
1666 RAMBlock *block;
1667 int fd;
a35ba7be 1668
0dc3f44a 1669 rcu_read_lock();
ae3a7047
MD
1670 block = qemu_get_ram_block(addr);
1671 fd = block->fd;
0dc3f44a 1672 rcu_read_unlock();
ae3a7047 1673 return fd;
a35ba7be
PB
1674}
1675
3fd74b84
DM
1676void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1677{
ae3a7047
MD
1678 RAMBlock *block;
1679 void *ptr;
3fd74b84 1680
0dc3f44a 1681 rcu_read_lock();
ae3a7047
MD
1682 block = qemu_get_ram_block(addr);
1683 ptr = ramblock_ptr(block, 0);
0dc3f44a 1684 rcu_read_unlock();
ae3a7047 1685 return ptr;
3fd74b84
DM
1686}
1687
1b5ec234 1688/* Return a host pointer to ram allocated with qemu_ram_alloc.
ae3a7047
MD
1689 * This should not be used for general purpose DMA. Use address_space_map
1690 * or address_space_rw instead. For local memory (e.g. video ram) that the
1691 * device owns, use memory_region_get_ram_ptr.
0dc3f44a
MD
1692 *
1693 * By the time this function returns, the returned pointer is not protected
1694 * by RCU anymore. If the caller is not within an RCU critical section and
1695 * does not hold the iothread lock, it must have other means of protecting the
1696 * pointer, such as a reference to the region that includes the incoming
1697 * ram_addr_t.
1b5ec234
PB
1698 */
1699void *qemu_get_ram_ptr(ram_addr_t addr)
1700{
ae3a7047
MD
1701 RAMBlock *block;
1702 void *ptr;
1b5ec234 1703
0dc3f44a 1704 rcu_read_lock();
ae3a7047
MD
1705 block = qemu_get_ram_block(addr);
1706
1707 if (xen_enabled() && block->host == NULL) {
0d6d3c87
PB
1708 /* We need to check if the requested address is in the RAM
1709 * because we don't want to map the entire memory in QEMU.
1710 * In that case just map until the end of the page.
1711 */
1712 if (block->offset == 0) {
ae3a7047 1713 ptr = xen_map_cache(addr, 0, 0);
0dc3f44a 1714 goto unlock;
0d6d3c87 1715 }
ae3a7047
MD
1716
1717 block->host = xen_map_cache(block->offset, block->max_length, 1);
0d6d3c87 1718 }
ae3a7047
MD
1719 ptr = ramblock_ptr(block, addr - block->offset);
1720
0dc3f44a
MD
1721unlock:
1722 rcu_read_unlock();
ae3a7047 1723 return ptr;
dc828ca1
PB
1724}
1725
38bee5dc 1726/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
ae3a7047 1727 * but takes a size argument.
0dc3f44a
MD
1728 *
1729 * By the time this function returns, the returned pointer is not protected
1730 * by RCU anymore. If the caller is not within an RCU critical section and
1731 * does not hold the iothread lock, it must have other means of protecting the
1732 * pointer, such as a reference to the region that includes the incoming
1733 * ram_addr_t.
ae3a7047 1734 */
cb85f7ab 1735static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1736{
ae3a7047 1737 void *ptr;
8ab934f9
SS
1738 if (*size == 0) {
1739 return NULL;
1740 }
868bb33f 1741 if (xen_enabled()) {
e41d7c69 1742 return xen_map_cache(addr, *size, 1);
868bb33f 1743 } else {
38bee5dc 1744 RAMBlock *block;
0dc3f44a
MD
1745 rcu_read_lock();
1746 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5
MT
1747 if (addr - block->offset < block->max_length) {
1748 if (addr - block->offset + *size > block->max_length)
1749 *size = block->max_length - addr + block->offset;
ae3a7047 1750 ptr = ramblock_ptr(block, addr - block->offset);
0dc3f44a 1751 rcu_read_unlock();
ae3a7047 1752 return ptr;
38bee5dc
SS
1753 }
1754 }
1755
1756 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1757 abort();
38bee5dc
SS
1758 }
1759}
1760
7443b437 1761/* Some of the softmmu routines need to translate from a host pointer
ae3a7047
MD
1762 * (typically a TLB entry) back to a ram offset.
1763 *
1764 * By the time this function returns, the returned pointer is not protected
1765 * by RCU anymore. If the caller is not within an RCU critical section and
1766 * does not hold the iothread lock, it must have other means of protecting the
1767 * pointer, such as a reference to the region that includes the incoming
1768 * ram_addr_t.
1769 */
1b5ec234 1770MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1771{
94a6b54f
PB
1772 RAMBlock *block;
1773 uint8_t *host = ptr;
ae3a7047 1774 MemoryRegion *mr;
94a6b54f 1775
868bb33f 1776 if (xen_enabled()) {
0dc3f44a 1777 rcu_read_lock();
e41d7c69 1778 *ram_addr = xen_ram_addr_from_mapcache(ptr);
ae3a7047 1779 mr = qemu_get_ram_block(*ram_addr)->mr;
0dc3f44a 1780 rcu_read_unlock();
ae3a7047 1781 return mr;
712c2b41
SS
1782 }
1783
0dc3f44a
MD
1784 rcu_read_lock();
1785 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 1786 if (block && block->host && host - block->host < block->max_length) {
23887b79
PB
1787 goto found;
1788 }
1789
0dc3f44a 1790 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
432d268c
JN
1791 /* This case append when the block is not mapped. */
1792 if (block->host == NULL) {
1793 continue;
1794 }
9b8424d5 1795 if (host - block->host < block->max_length) {
23887b79 1796 goto found;
f471a17e 1797 }
94a6b54f 1798 }
432d268c 1799
0dc3f44a 1800 rcu_read_unlock();
1b5ec234 1801 return NULL;
23887b79
PB
1802
1803found:
1804 *ram_addr = block->offset + (host - block->host);
ae3a7047 1805 mr = block->mr;
0dc3f44a 1806 rcu_read_unlock();
ae3a7047 1807 return mr;
e890261f 1808}
f471a17e 1809
a8170e5e 1810static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1811 uint64_t val, unsigned size)
9fa3e853 1812{
52159192 1813 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1814 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1815 }
0e0df1e2
AK
1816 switch (size) {
1817 case 1:
1818 stb_p(qemu_get_ram_ptr(ram_addr), val);
1819 break;
1820 case 2:
1821 stw_p(qemu_get_ram_ptr(ram_addr), val);
1822 break;
1823 case 4:
1824 stl_p(qemu_get_ram_ptr(ram_addr), val);
1825 break;
1826 default:
1827 abort();
3a7d929e 1828 }
58d2707e
PB
1829 /* Set both VGA and migration bits for simplicity and to remove
1830 * the notdirty callback faster.
1831 */
1832 cpu_physical_memory_set_dirty_range(ram_addr, size,
1833 DIRTY_CLIENTS_NOCODE);
f23db169
FB
1834 /* we remove the notdirty callback only if the code has been
1835 flushed */
a2cd8c85 1836 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1837 CPUArchState *env = current_cpu->env_ptr;
93afeade 1838 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1839 }
9fa3e853
FB
1840}
1841
b018ddf6
PB
1842static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1843 unsigned size, bool is_write)
1844{
1845 return is_write;
1846}
1847
0e0df1e2 1848static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1849 .write = notdirty_mem_write,
b018ddf6 1850 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1851 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1852};
1853
0f459d16 1854/* Generate a debug exception if a watchpoint has been hit. */
66b9b43c 1855static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
0f459d16 1856{
93afeade
AF
1857 CPUState *cpu = current_cpu;
1858 CPUArchState *env = cpu->env_ptr;
06d55cc1 1859 target_ulong pc, cs_base;
0f459d16 1860 target_ulong vaddr;
a1d1bb31 1861 CPUWatchpoint *wp;
06d55cc1 1862 int cpu_flags;
0f459d16 1863
ff4700b0 1864 if (cpu->watchpoint_hit) {
06d55cc1
AL
1865 /* We re-entered the check after replacing the TB. Now raise
1866 * the debug interrupt so that is will trigger after the
1867 * current instruction. */
93afeade 1868 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1869 return;
1870 }
93afeade 1871 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1872 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d
PM
1873 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1874 && (wp->flags & flags)) {
08225676
PM
1875 if (flags == BP_MEM_READ) {
1876 wp->flags |= BP_WATCHPOINT_HIT_READ;
1877 } else {
1878 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1879 }
1880 wp->hitaddr = vaddr;
66b9b43c 1881 wp->hitattrs = attrs;
ff4700b0
AF
1882 if (!cpu->watchpoint_hit) {
1883 cpu->watchpoint_hit = wp;
239c51a5 1884 tb_check_watchpoint(cpu);
6e140f28 1885 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1886 cpu->exception_index = EXCP_DEBUG;
5638d180 1887 cpu_loop_exit(cpu);
6e140f28
AL
1888 } else {
1889 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1890 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1891 cpu_resume_from_signal(cpu, NULL);
6e140f28 1892 }
06d55cc1 1893 }
6e140f28
AL
1894 } else {
1895 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1896 }
1897 }
1898}
1899
6658ffb8
PB
1900/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1901 so these check for a hit then pass through to the normal out-of-line
1902 phys routines. */
66b9b43c
PM
1903static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1904 unsigned size, MemTxAttrs attrs)
6658ffb8 1905{
66b9b43c
PM
1906 MemTxResult res;
1907 uint64_t data;
1908
1909 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
1ec9b909 1910 switch (size) {
66b9b43c
PM
1911 case 1:
1912 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
1913 break;
1914 case 2:
1915 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
1916 break;
1917 case 4:
1918 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
1919 break;
1ec9b909
AK
1920 default: abort();
1921 }
66b9b43c
PM
1922 *pdata = data;
1923 return res;
6658ffb8
PB
1924}
1925
66b9b43c
PM
1926static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
1927 uint64_t val, unsigned size,
1928 MemTxAttrs attrs)
6658ffb8 1929{
66b9b43c
PM
1930 MemTxResult res;
1931
1932 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1ec9b909 1933 switch (size) {
67364150 1934 case 1:
66b9b43c 1935 address_space_stb(&address_space_memory, addr, val, attrs, &res);
67364150
MF
1936 break;
1937 case 2:
66b9b43c 1938 address_space_stw(&address_space_memory, addr, val, attrs, &res);
67364150
MF
1939 break;
1940 case 4:
66b9b43c 1941 address_space_stl(&address_space_memory, addr, val, attrs, &res);
67364150 1942 break;
1ec9b909
AK
1943 default: abort();
1944 }
66b9b43c 1945 return res;
6658ffb8
PB
1946}
1947
1ec9b909 1948static const MemoryRegionOps watch_mem_ops = {
66b9b43c
PM
1949 .read_with_attrs = watch_mem_read,
1950 .write_with_attrs = watch_mem_write,
1ec9b909 1951 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1952};
6658ffb8 1953
f25a49e0
PM
1954static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
1955 unsigned len, MemTxAttrs attrs)
db7b5426 1956{
acc9d80b 1957 subpage_t *subpage = opaque;
ff6cff75 1958 uint8_t buf[8];
5c9eb028 1959 MemTxResult res;
791af8c8 1960
db7b5426 1961#if defined(DEBUG_SUBPAGE)
016e9d62 1962 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1963 subpage, len, addr);
db7b5426 1964#endif
5c9eb028
PM
1965 res = address_space_read(subpage->as, addr + subpage->base,
1966 attrs, buf, len);
1967 if (res) {
1968 return res;
f25a49e0 1969 }
acc9d80b
JK
1970 switch (len) {
1971 case 1:
f25a49e0
PM
1972 *data = ldub_p(buf);
1973 return MEMTX_OK;
acc9d80b 1974 case 2:
f25a49e0
PM
1975 *data = lduw_p(buf);
1976 return MEMTX_OK;
acc9d80b 1977 case 4:
f25a49e0
PM
1978 *data = ldl_p(buf);
1979 return MEMTX_OK;
ff6cff75 1980 case 8:
f25a49e0
PM
1981 *data = ldq_p(buf);
1982 return MEMTX_OK;
acc9d80b
JK
1983 default:
1984 abort();
1985 }
db7b5426
BS
1986}
1987
f25a49e0
PM
1988static MemTxResult subpage_write(void *opaque, hwaddr addr,
1989 uint64_t value, unsigned len, MemTxAttrs attrs)
db7b5426 1990{
acc9d80b 1991 subpage_t *subpage = opaque;
ff6cff75 1992 uint8_t buf[8];
acc9d80b 1993
db7b5426 1994#if defined(DEBUG_SUBPAGE)
016e9d62 1995 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1996 " value %"PRIx64"\n",
1997 __func__, subpage, len, addr, value);
db7b5426 1998#endif
acc9d80b
JK
1999 switch (len) {
2000 case 1:
2001 stb_p(buf, value);
2002 break;
2003 case 2:
2004 stw_p(buf, value);
2005 break;
2006 case 4:
2007 stl_p(buf, value);
2008 break;
ff6cff75
PB
2009 case 8:
2010 stq_p(buf, value);
2011 break;
acc9d80b
JK
2012 default:
2013 abort();
2014 }
5c9eb028
PM
2015 return address_space_write(subpage->as, addr + subpage->base,
2016 attrs, buf, len);
db7b5426
BS
2017}
2018
c353e4cc 2019static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 2020 unsigned len, bool is_write)
c353e4cc 2021{
acc9d80b 2022 subpage_t *subpage = opaque;
c353e4cc 2023#if defined(DEBUG_SUBPAGE)
016e9d62 2024 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 2025 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
2026#endif
2027
acc9d80b 2028 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 2029 len, is_write);
c353e4cc
PB
2030}
2031
70c68e44 2032static const MemoryRegionOps subpage_ops = {
f25a49e0
PM
2033 .read_with_attrs = subpage_read,
2034 .write_with_attrs = subpage_write,
ff6cff75
PB
2035 .impl.min_access_size = 1,
2036 .impl.max_access_size = 8,
2037 .valid.min_access_size = 1,
2038 .valid.max_access_size = 8,
c353e4cc 2039 .valid.accepts = subpage_accepts,
70c68e44 2040 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
2041};
2042
c227f099 2043static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2044 uint16_t section)
db7b5426
BS
2045{
2046 int idx, eidx;
2047
2048 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2049 return -1;
2050 idx = SUBPAGE_IDX(start);
2051 eidx = SUBPAGE_IDX(end);
2052#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2053 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2054 __func__, mmio, start, end, idx, eidx, section);
db7b5426 2055#endif
db7b5426 2056 for (; idx <= eidx; idx++) {
5312bd8b 2057 mmio->sub_section[idx] = section;
db7b5426
BS
2058 }
2059
2060 return 0;
2061}
2062
acc9d80b 2063static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 2064{
c227f099 2065 subpage_t *mmio;
db7b5426 2066
7267c094 2067 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 2068
acc9d80b 2069 mmio->as = as;
1eec614b 2070 mmio->base = base;
2c9b15ca 2071 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 2072 NULL, TARGET_PAGE_SIZE);
b3b00c78 2073 mmio->iomem.subpage = true;
db7b5426 2074#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2075 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2076 mmio, base, TARGET_PAGE_SIZE);
db7b5426 2077#endif
b41aac4f 2078 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
2079
2080 return mmio;
2081}
2082
a656e22f
PC
2083static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2084 MemoryRegion *mr)
5312bd8b 2085{
a656e22f 2086 assert(as);
5312bd8b 2087 MemoryRegionSection section = {
a656e22f 2088 .address_space = as,
5312bd8b
AK
2089 .mr = mr,
2090 .offset_within_address_space = 0,
2091 .offset_within_region = 0,
052e87b0 2092 .size = int128_2_64(),
5312bd8b
AK
2093 };
2094
53cb28cb 2095 return phys_section_add(map, &section);
5312bd8b
AK
2096}
2097
9d82b5a7 2098MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
aa102231 2099{
79e2b9ae
PB
2100 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2101 MemoryRegionSection *sections = d->map.sections;
9d82b5a7
PB
2102
2103 return sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
2104}
2105
e9179ce1
AK
2106static void io_mem_init(void)
2107{
1f6245e5 2108 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2c9b15ca 2109 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 2110 NULL, UINT64_MAX);
2c9b15ca 2111 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 2112 NULL, UINT64_MAX);
2c9b15ca 2113 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 2114 NULL, UINT64_MAX);
e9179ce1
AK
2115}
2116
ac1970fb 2117static void mem_begin(MemoryListener *listener)
00752703
PB
2118{
2119 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
2120 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2121 uint16_t n;
2122
a656e22f 2123 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 2124 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 2125 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 2126 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 2127 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 2128 assert(n == PHYS_SECTION_ROM);
a656e22f 2129 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 2130 assert(n == PHYS_SECTION_WATCH);
00752703 2131
9736e55b 2132 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
2133 d->as = as;
2134 as->next_dispatch = d;
2135}
2136
79e2b9ae
PB
2137static void address_space_dispatch_free(AddressSpaceDispatch *d)
2138{
2139 phys_sections_free(&d->map);
2140 g_free(d);
2141}
2142
00752703 2143static void mem_commit(MemoryListener *listener)
ac1970fb 2144{
89ae337a 2145 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
2146 AddressSpaceDispatch *cur = as->dispatch;
2147 AddressSpaceDispatch *next = as->next_dispatch;
2148
53cb28cb 2149 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 2150
79e2b9ae 2151 atomic_rcu_set(&as->dispatch, next);
53cb28cb 2152 if (cur) {
79e2b9ae 2153 call_rcu(cur, address_space_dispatch_free, rcu);
53cb28cb 2154 }
9affd6fc
PB
2155}
2156
1d71148e 2157static void tcg_commit(MemoryListener *listener)
50c1e149 2158{
182735ef 2159 CPUState *cpu;
117712c3
AK
2160
2161 /* since each CPU stores ram addresses in its TLB cache, we must
2162 reset the modified entries */
2163 /* XXX: slow ! */
bdc44640 2164 CPU_FOREACH(cpu) {
33bde2e1
EI
2165 /* FIXME: Disentangle the cpu.h circular files deps so we can
2166 directly get the right CPU from listener. */
2167 if (cpu->tcg_as_listener != listener) {
2168 continue;
2169 }
76e5c76f 2170 cpu_reload_memory_map(cpu);
117712c3 2171 }
50c1e149
AK
2172}
2173
ac1970fb
AK
2174void address_space_init_dispatch(AddressSpace *as)
2175{
00752703 2176 as->dispatch = NULL;
89ae337a 2177 as->dispatch_listener = (MemoryListener) {
ac1970fb 2178 .begin = mem_begin,
00752703 2179 .commit = mem_commit,
ac1970fb
AK
2180 .region_add = mem_add,
2181 .region_nop = mem_add,
2182 .priority = 0,
2183 };
89ae337a 2184 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
2185}
2186
6e48e8f9
PB
2187void address_space_unregister(AddressSpace *as)
2188{
2189 memory_listener_unregister(&as->dispatch_listener);
2190}
2191
83f3c251
AK
2192void address_space_destroy_dispatch(AddressSpace *as)
2193{
2194 AddressSpaceDispatch *d = as->dispatch;
2195
79e2b9ae
PB
2196 atomic_rcu_set(&as->dispatch, NULL);
2197 if (d) {
2198 call_rcu(d, address_space_dispatch_free, rcu);
2199 }
83f3c251
AK
2200}
2201
62152b8a
AK
2202static void memory_map_init(void)
2203{
7267c094 2204 system_memory = g_malloc(sizeof(*system_memory));
03f49957 2205
57271d63 2206 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 2207 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 2208
7267c094 2209 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
2210 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2211 65536);
7dca8043 2212 address_space_init(&address_space_io, system_io, "I/O");
62152b8a
AK
2213}
2214
2215MemoryRegion *get_system_memory(void)
2216{
2217 return system_memory;
2218}
2219
309cb471
AK
2220MemoryRegion *get_system_io(void)
2221{
2222 return system_io;
2223}
2224
e2eef170
PB
2225#endif /* !defined(CONFIG_USER_ONLY) */
2226
13eb76e0
FB
2227/* physical memory access (slow version, mainly for debug) */
2228#if defined(CONFIG_USER_ONLY)
f17ec444 2229int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 2230 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2231{
2232 int l, flags;
2233 target_ulong page;
53a5960a 2234 void * p;
13eb76e0
FB
2235
2236 while (len > 0) {
2237 page = addr & TARGET_PAGE_MASK;
2238 l = (page + TARGET_PAGE_SIZE) - addr;
2239 if (l > len)
2240 l = len;
2241 flags = page_get_flags(page);
2242 if (!(flags & PAGE_VALID))
a68fe89c 2243 return -1;
13eb76e0
FB
2244 if (is_write) {
2245 if (!(flags & PAGE_WRITE))
a68fe89c 2246 return -1;
579a97f7 2247 /* XXX: this code should not depend on lock_user */
72fb7daa 2248 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 2249 return -1;
72fb7daa
AJ
2250 memcpy(p, buf, l);
2251 unlock_user(p, addr, l);
13eb76e0
FB
2252 } else {
2253 if (!(flags & PAGE_READ))
a68fe89c 2254 return -1;
579a97f7 2255 /* XXX: this code should not depend on lock_user */
72fb7daa 2256 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 2257 return -1;
72fb7daa 2258 memcpy(buf, p, l);
5b257578 2259 unlock_user(p, addr, 0);
13eb76e0
FB
2260 }
2261 len -= l;
2262 buf += l;
2263 addr += l;
2264 }
a68fe89c 2265 return 0;
13eb76e0 2266}
8df1cd07 2267
13eb76e0 2268#else
51d7a9eb 2269
845b6214 2270static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
a8170e5e 2271 hwaddr length)
51d7a9eb 2272{
e87f7778
PB
2273 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2274 /* No early return if dirty_log_mask is or becomes 0, because
2275 * cpu_physical_memory_set_dirty_range will still call
2276 * xen_modified_memory.
2277 */
2278 if (dirty_log_mask) {
2279 dirty_log_mask =
2280 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2281 }
2282 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2283 tb_invalidate_phys_range(addr, addr + length);
2284 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
51d7a9eb 2285 }
e87f7778 2286 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
51d7a9eb
AP
2287}
2288
23326164 2289static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2290{
e1622f4b 2291 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2292
2293 /* Regions are assumed to support 1-4 byte accesses unless
2294 otherwise specified. */
23326164
RH
2295 if (access_size_max == 0) {
2296 access_size_max = 4;
2297 }
2298
2299 /* Bound the maximum access by the alignment of the address. */
2300 if (!mr->ops->impl.unaligned) {
2301 unsigned align_size_max = addr & -addr;
2302 if (align_size_max != 0 && align_size_max < access_size_max) {
2303 access_size_max = align_size_max;
2304 }
82f2563f 2305 }
23326164
RH
2306
2307 /* Don't attempt accesses larger than the maximum. */
2308 if (l > access_size_max) {
2309 l = access_size_max;
82f2563f 2310 }
098178f2
PB
2311 if (l & (l - 1)) {
2312 l = 1 << (qemu_fls(l) - 1);
2313 }
23326164
RH
2314
2315 return l;
82f2563f
PB
2316}
2317
5c9eb028
PM
2318MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2319 uint8_t *buf, int len, bool is_write)
13eb76e0 2320{
149f54b5 2321 hwaddr l;
13eb76e0 2322 uint8_t *ptr;
791af8c8 2323 uint64_t val;
149f54b5 2324 hwaddr addr1;
5c8a00ce 2325 MemoryRegion *mr;
3b643495 2326 MemTxResult result = MEMTX_OK;
3b46e624 2327
41063e1e 2328 rcu_read_lock();
13eb76e0 2329 while (len > 0) {
149f54b5 2330 l = len;
5c8a00ce 2331 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2332
13eb76e0 2333 if (is_write) {
5c8a00ce
PB
2334 if (!memory_access_is_direct(mr, is_write)) {
2335 l = memory_access_size(mr, l, addr1);
4917cf44 2336 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2337 potential bugs */
23326164
RH
2338 switch (l) {
2339 case 8:
2340 /* 64 bit write access */
2341 val = ldq_p(buf);
3b643495
PM
2342 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2343 attrs);
23326164
RH
2344 break;
2345 case 4:
1c213d19 2346 /* 32 bit write access */
c27004ec 2347 val = ldl_p(buf);
3b643495
PM
2348 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2349 attrs);
23326164
RH
2350 break;
2351 case 2:
1c213d19 2352 /* 16 bit write access */
c27004ec 2353 val = lduw_p(buf);
3b643495
PM
2354 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2355 attrs);
23326164
RH
2356 break;
2357 case 1:
1c213d19 2358 /* 8 bit write access */
c27004ec 2359 val = ldub_p(buf);
3b643495
PM
2360 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2361 attrs);
23326164
RH
2362 break;
2363 default:
2364 abort();
13eb76e0 2365 }
2bbfa05d 2366 } else {
5c8a00ce 2367 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2368 /* RAM case */
5579c7f3 2369 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2370 memcpy(ptr, buf, l);
845b6214 2371 invalidate_and_set_dirty(mr, addr1, l);
13eb76e0
FB
2372 }
2373 } else {
5c8a00ce 2374 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2375 /* I/O case */
5c8a00ce 2376 l = memory_access_size(mr, l, addr1);
23326164
RH
2377 switch (l) {
2378 case 8:
2379 /* 64 bit read access */
3b643495
PM
2380 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2381 attrs);
23326164
RH
2382 stq_p(buf, val);
2383 break;
2384 case 4:
13eb76e0 2385 /* 32 bit read access */
3b643495
PM
2386 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2387 attrs);
c27004ec 2388 stl_p(buf, val);
23326164
RH
2389 break;
2390 case 2:
13eb76e0 2391 /* 16 bit read access */
3b643495
PM
2392 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2393 attrs);
c27004ec 2394 stw_p(buf, val);
23326164
RH
2395 break;
2396 case 1:
1c213d19 2397 /* 8 bit read access */
3b643495
PM
2398 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2399 attrs);
c27004ec 2400 stb_p(buf, val);
23326164
RH
2401 break;
2402 default:
2403 abort();
13eb76e0
FB
2404 }
2405 } else {
2406 /* RAM case */
5c8a00ce 2407 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2408 memcpy(buf, ptr, l);
13eb76e0
FB
2409 }
2410 }
2411 len -= l;
2412 buf += l;
2413 addr += l;
2414 }
41063e1e 2415 rcu_read_unlock();
fd8aaa76 2416
3b643495 2417 return result;
13eb76e0 2418}
8df1cd07 2419
5c9eb028
PM
2420MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2421 const uint8_t *buf, int len)
ac1970fb 2422{
5c9eb028 2423 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
ac1970fb
AK
2424}
2425
5c9eb028
PM
2426MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2427 uint8_t *buf, int len)
ac1970fb 2428{
5c9eb028 2429 return address_space_rw(as, addr, attrs, buf, len, false);
ac1970fb
AK
2430}
2431
2432
a8170e5e 2433void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2434 int len, int is_write)
2435{
5c9eb028
PM
2436 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2437 buf, len, is_write);
ac1970fb
AK
2438}
2439
582b55a9
AG
2440enum write_rom_type {
2441 WRITE_DATA,
2442 FLUSH_CACHE,
2443};
2444
2a221651 2445static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2446 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2447{
149f54b5 2448 hwaddr l;
d0ecd2aa 2449 uint8_t *ptr;
149f54b5 2450 hwaddr addr1;
5c8a00ce 2451 MemoryRegion *mr;
3b46e624 2452
41063e1e 2453 rcu_read_lock();
d0ecd2aa 2454 while (len > 0) {
149f54b5 2455 l = len;
2a221651 2456 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2457
5c8a00ce
PB
2458 if (!(memory_region_is_ram(mr) ||
2459 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2460 /* do nothing */
2461 } else {
5c8a00ce 2462 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2463 /* ROM/RAM case */
5579c7f3 2464 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2465 switch (type) {
2466 case WRITE_DATA:
2467 memcpy(ptr, buf, l);
845b6214 2468 invalidate_and_set_dirty(mr, addr1, l);
582b55a9
AG
2469 break;
2470 case FLUSH_CACHE:
2471 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2472 break;
2473 }
d0ecd2aa
FB
2474 }
2475 len -= l;
2476 buf += l;
2477 addr += l;
2478 }
41063e1e 2479 rcu_read_unlock();
d0ecd2aa
FB
2480}
2481
582b55a9 2482/* used for ROM loading : can write in RAM and ROM */
2a221651 2483void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2484 const uint8_t *buf, int len)
2485{
2a221651 2486 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2487}
2488
2489void cpu_flush_icache_range(hwaddr start, int len)
2490{
2491 /*
2492 * This function should do the same thing as an icache flush that was
2493 * triggered from within the guest. For TCG we are always cache coherent,
2494 * so there is no need to flush anything. For KVM / Xen we need to flush
2495 * the host's instruction cache at least.
2496 */
2497 if (tcg_enabled()) {
2498 return;
2499 }
2500
2a221651
EI
2501 cpu_physical_memory_write_rom_internal(&address_space_memory,
2502 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2503}
2504
6d16c2f8 2505typedef struct {
d3e71559 2506 MemoryRegion *mr;
6d16c2f8 2507 void *buffer;
a8170e5e
AK
2508 hwaddr addr;
2509 hwaddr len;
c2cba0ff 2510 bool in_use;
6d16c2f8
AL
2511} BounceBuffer;
2512
2513static BounceBuffer bounce;
2514
ba223c29 2515typedef struct MapClient {
e95205e1 2516 QEMUBH *bh;
72cf2d4f 2517 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2518} MapClient;
2519
38e047b5 2520QemuMutex map_client_list_lock;
72cf2d4f
BS
2521static QLIST_HEAD(map_client_list, MapClient) map_client_list
2522 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29 2523
e95205e1
FZ
2524static void cpu_unregister_map_client_do(MapClient *client)
2525{
2526 QLIST_REMOVE(client, link);
2527 g_free(client);
2528}
2529
33b6c2ed
FZ
2530static void cpu_notify_map_clients_locked(void)
2531{
2532 MapClient *client;
2533
2534 while (!QLIST_EMPTY(&map_client_list)) {
2535 client = QLIST_FIRST(&map_client_list);
e95205e1
FZ
2536 qemu_bh_schedule(client->bh);
2537 cpu_unregister_map_client_do(client);
33b6c2ed
FZ
2538 }
2539}
2540
e95205e1 2541void cpu_register_map_client(QEMUBH *bh)
ba223c29 2542{
7267c094 2543 MapClient *client = g_malloc(sizeof(*client));
ba223c29 2544
38e047b5 2545 qemu_mutex_lock(&map_client_list_lock);
e95205e1 2546 client->bh = bh;
72cf2d4f 2547 QLIST_INSERT_HEAD(&map_client_list, client, link);
33b6c2ed
FZ
2548 if (!atomic_read(&bounce.in_use)) {
2549 cpu_notify_map_clients_locked();
2550 }
38e047b5 2551 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2552}
2553
38e047b5 2554void cpu_exec_init_all(void)
ba223c29 2555{
38e047b5
FZ
2556 qemu_mutex_init(&ram_list.mutex);
2557 memory_map_init();
2558 io_mem_init();
2559 qemu_mutex_init(&map_client_list_lock);
ba223c29
AL
2560}
2561
e95205e1 2562void cpu_unregister_map_client(QEMUBH *bh)
ba223c29
AL
2563{
2564 MapClient *client;
2565
e95205e1
FZ
2566 qemu_mutex_lock(&map_client_list_lock);
2567 QLIST_FOREACH(client, &map_client_list, link) {
2568 if (client->bh == bh) {
2569 cpu_unregister_map_client_do(client);
2570 break;
2571 }
ba223c29 2572 }
e95205e1 2573 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2574}
2575
2576static void cpu_notify_map_clients(void)
2577{
38e047b5 2578 qemu_mutex_lock(&map_client_list_lock);
33b6c2ed 2579 cpu_notify_map_clients_locked();
38e047b5 2580 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2581}
2582
51644ab7
PB
2583bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2584{
5c8a00ce 2585 MemoryRegion *mr;
51644ab7
PB
2586 hwaddr l, xlat;
2587
41063e1e 2588 rcu_read_lock();
51644ab7
PB
2589 while (len > 0) {
2590 l = len;
5c8a00ce
PB
2591 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2592 if (!memory_access_is_direct(mr, is_write)) {
2593 l = memory_access_size(mr, l, addr);
2594 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2595 return false;
2596 }
2597 }
2598
2599 len -= l;
2600 addr += l;
2601 }
41063e1e 2602 rcu_read_unlock();
51644ab7
PB
2603 return true;
2604}
2605
6d16c2f8
AL
2606/* Map a physical memory region into a host virtual address.
2607 * May map a subset of the requested range, given by and returned in *plen.
2608 * May return NULL if resources needed to perform the mapping are exhausted.
2609 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2610 * Use cpu_register_map_client() to know when retrying the map operation is
2611 * likely to succeed.
6d16c2f8 2612 */
ac1970fb 2613void *address_space_map(AddressSpace *as,
a8170e5e
AK
2614 hwaddr addr,
2615 hwaddr *plen,
ac1970fb 2616 bool is_write)
6d16c2f8 2617{
a8170e5e 2618 hwaddr len = *plen;
e3127ae0
PB
2619 hwaddr done = 0;
2620 hwaddr l, xlat, base;
2621 MemoryRegion *mr, *this_mr;
2622 ram_addr_t raddr;
6d16c2f8 2623
e3127ae0
PB
2624 if (len == 0) {
2625 return NULL;
2626 }
38bee5dc 2627
e3127ae0 2628 l = len;
41063e1e 2629 rcu_read_lock();
e3127ae0 2630 mr = address_space_translate(as, addr, &xlat, &l, is_write);
41063e1e 2631
e3127ae0 2632 if (!memory_access_is_direct(mr, is_write)) {
c2cba0ff 2633 if (atomic_xchg(&bounce.in_use, true)) {
41063e1e 2634 rcu_read_unlock();
e3127ae0 2635 return NULL;
6d16c2f8 2636 }
e85d9db5
KW
2637 /* Avoid unbounded allocations */
2638 l = MIN(l, TARGET_PAGE_SIZE);
2639 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2640 bounce.addr = addr;
2641 bounce.len = l;
d3e71559
PB
2642
2643 memory_region_ref(mr);
2644 bounce.mr = mr;
e3127ae0 2645 if (!is_write) {
5c9eb028
PM
2646 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2647 bounce.buffer, l);
8ab934f9 2648 }
6d16c2f8 2649
41063e1e 2650 rcu_read_unlock();
e3127ae0
PB
2651 *plen = l;
2652 return bounce.buffer;
2653 }
2654
2655 base = xlat;
2656 raddr = memory_region_get_ram_addr(mr);
2657
2658 for (;;) {
6d16c2f8
AL
2659 len -= l;
2660 addr += l;
e3127ae0
PB
2661 done += l;
2662 if (len == 0) {
2663 break;
2664 }
2665
2666 l = len;
2667 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2668 if (this_mr != mr || xlat != base + done) {
2669 break;
2670 }
6d16c2f8 2671 }
e3127ae0 2672
d3e71559 2673 memory_region_ref(mr);
41063e1e 2674 rcu_read_unlock();
e3127ae0
PB
2675 *plen = done;
2676 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2677}
2678
ac1970fb 2679/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2680 * Will also mark the memory as dirty if is_write == 1. access_len gives
2681 * the amount of memory that was actually read or written by the caller.
2682 */
a8170e5e
AK
2683void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2684 int is_write, hwaddr access_len)
6d16c2f8
AL
2685{
2686 if (buffer != bounce.buffer) {
d3e71559
PB
2687 MemoryRegion *mr;
2688 ram_addr_t addr1;
2689
2690 mr = qemu_ram_addr_from_host(buffer, &addr1);
2691 assert(mr != NULL);
6d16c2f8 2692 if (is_write) {
845b6214 2693 invalidate_and_set_dirty(mr, addr1, access_len);
6d16c2f8 2694 }
868bb33f 2695 if (xen_enabled()) {
e41d7c69 2696 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2697 }
d3e71559 2698 memory_region_unref(mr);
6d16c2f8
AL
2699 return;
2700 }
2701 if (is_write) {
5c9eb028
PM
2702 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2703 bounce.buffer, access_len);
6d16c2f8 2704 }
f8a83245 2705 qemu_vfree(bounce.buffer);
6d16c2f8 2706 bounce.buffer = NULL;
d3e71559 2707 memory_region_unref(bounce.mr);
c2cba0ff 2708 atomic_mb_set(&bounce.in_use, false);
ba223c29 2709 cpu_notify_map_clients();
6d16c2f8 2710}
d0ecd2aa 2711
a8170e5e
AK
2712void *cpu_physical_memory_map(hwaddr addr,
2713 hwaddr *plen,
ac1970fb
AK
2714 int is_write)
2715{
2716 return address_space_map(&address_space_memory, addr, plen, is_write);
2717}
2718
a8170e5e
AK
2719void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2720 int is_write, hwaddr access_len)
ac1970fb
AK
2721{
2722 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2723}
2724
8df1cd07 2725/* warning: addr must be aligned */
50013115
PM
2726static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2727 MemTxAttrs attrs,
2728 MemTxResult *result,
2729 enum device_endian endian)
8df1cd07 2730{
8df1cd07 2731 uint8_t *ptr;
791af8c8 2732 uint64_t val;
5c8a00ce 2733 MemoryRegion *mr;
149f54b5
PB
2734 hwaddr l = 4;
2735 hwaddr addr1;
50013115 2736 MemTxResult r;
8df1cd07 2737
41063e1e 2738 rcu_read_lock();
fdfba1a2 2739 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2740 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2741 /* I/O case */
50013115 2742 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
1e78bcc1
AG
2743#if defined(TARGET_WORDS_BIGENDIAN)
2744 if (endian == DEVICE_LITTLE_ENDIAN) {
2745 val = bswap32(val);
2746 }
2747#else
2748 if (endian == DEVICE_BIG_ENDIAN) {
2749 val = bswap32(val);
2750 }
2751#endif
8df1cd07
FB
2752 } else {
2753 /* RAM case */
5c8a00ce 2754 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2755 & TARGET_PAGE_MASK)
149f54b5 2756 + addr1);
1e78bcc1
AG
2757 switch (endian) {
2758 case DEVICE_LITTLE_ENDIAN:
2759 val = ldl_le_p(ptr);
2760 break;
2761 case DEVICE_BIG_ENDIAN:
2762 val = ldl_be_p(ptr);
2763 break;
2764 default:
2765 val = ldl_p(ptr);
2766 break;
2767 }
50013115
PM
2768 r = MEMTX_OK;
2769 }
2770 if (result) {
2771 *result = r;
8df1cd07 2772 }
41063e1e 2773 rcu_read_unlock();
8df1cd07
FB
2774 return val;
2775}
2776
50013115
PM
2777uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2778 MemTxAttrs attrs, MemTxResult *result)
2779{
2780 return address_space_ldl_internal(as, addr, attrs, result,
2781 DEVICE_NATIVE_ENDIAN);
2782}
2783
2784uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2785 MemTxAttrs attrs, MemTxResult *result)
2786{
2787 return address_space_ldl_internal(as, addr, attrs, result,
2788 DEVICE_LITTLE_ENDIAN);
2789}
2790
2791uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2792 MemTxAttrs attrs, MemTxResult *result)
2793{
2794 return address_space_ldl_internal(as, addr, attrs, result,
2795 DEVICE_BIG_ENDIAN);
2796}
2797
fdfba1a2 2798uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2799{
50013115 2800 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2801}
2802
fdfba1a2 2803uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2804{
50013115 2805 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2806}
2807
fdfba1a2 2808uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2809{
50013115 2810 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2811}
2812
84b7b8e7 2813/* warning: addr must be aligned */
50013115
PM
2814static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2815 MemTxAttrs attrs,
2816 MemTxResult *result,
2817 enum device_endian endian)
84b7b8e7 2818{
84b7b8e7
FB
2819 uint8_t *ptr;
2820 uint64_t val;
5c8a00ce 2821 MemoryRegion *mr;
149f54b5
PB
2822 hwaddr l = 8;
2823 hwaddr addr1;
50013115 2824 MemTxResult r;
84b7b8e7 2825
41063e1e 2826 rcu_read_lock();
2c17449b 2827 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2828 false);
2829 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2830 /* I/O case */
50013115 2831 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
968a5627
PB
2832#if defined(TARGET_WORDS_BIGENDIAN)
2833 if (endian == DEVICE_LITTLE_ENDIAN) {
2834 val = bswap64(val);
2835 }
2836#else
2837 if (endian == DEVICE_BIG_ENDIAN) {
2838 val = bswap64(val);
2839 }
84b7b8e7
FB
2840#endif
2841 } else {
2842 /* RAM case */
5c8a00ce 2843 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2844 & TARGET_PAGE_MASK)
149f54b5 2845 + addr1);
1e78bcc1
AG
2846 switch (endian) {
2847 case DEVICE_LITTLE_ENDIAN:
2848 val = ldq_le_p(ptr);
2849 break;
2850 case DEVICE_BIG_ENDIAN:
2851 val = ldq_be_p(ptr);
2852 break;
2853 default:
2854 val = ldq_p(ptr);
2855 break;
2856 }
50013115
PM
2857 r = MEMTX_OK;
2858 }
2859 if (result) {
2860 *result = r;
84b7b8e7 2861 }
41063e1e 2862 rcu_read_unlock();
84b7b8e7
FB
2863 return val;
2864}
2865
50013115
PM
2866uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2867 MemTxAttrs attrs, MemTxResult *result)
2868{
2869 return address_space_ldq_internal(as, addr, attrs, result,
2870 DEVICE_NATIVE_ENDIAN);
2871}
2872
2873uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
2874 MemTxAttrs attrs, MemTxResult *result)
2875{
2876 return address_space_ldq_internal(as, addr, attrs, result,
2877 DEVICE_LITTLE_ENDIAN);
2878}
2879
2880uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
2881 MemTxAttrs attrs, MemTxResult *result)
2882{
2883 return address_space_ldq_internal(as, addr, attrs, result,
2884 DEVICE_BIG_ENDIAN);
2885}
2886
2c17449b 2887uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2888{
50013115 2889 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2890}
2891
2c17449b 2892uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2893{
50013115 2894 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2895}
2896
2c17449b 2897uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2898{
50013115 2899 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2900}
2901
aab33094 2902/* XXX: optimize */
50013115
PM
2903uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
2904 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
2905{
2906 uint8_t val;
50013115
PM
2907 MemTxResult r;
2908
2909 r = address_space_rw(as, addr, attrs, &val, 1, 0);
2910 if (result) {
2911 *result = r;
2912 }
aab33094
FB
2913 return val;
2914}
2915
50013115
PM
2916uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2917{
2918 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
2919}
2920
733f0b02 2921/* warning: addr must be aligned */
50013115
PM
2922static inline uint32_t address_space_lduw_internal(AddressSpace *as,
2923 hwaddr addr,
2924 MemTxAttrs attrs,
2925 MemTxResult *result,
2926 enum device_endian endian)
aab33094 2927{
733f0b02
MT
2928 uint8_t *ptr;
2929 uint64_t val;
5c8a00ce 2930 MemoryRegion *mr;
149f54b5
PB
2931 hwaddr l = 2;
2932 hwaddr addr1;
50013115 2933 MemTxResult r;
733f0b02 2934
41063e1e 2935 rcu_read_lock();
41701aa4 2936 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2937 false);
2938 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2939 /* I/O case */
50013115 2940 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
1e78bcc1
AG
2941#if defined(TARGET_WORDS_BIGENDIAN)
2942 if (endian == DEVICE_LITTLE_ENDIAN) {
2943 val = bswap16(val);
2944 }
2945#else
2946 if (endian == DEVICE_BIG_ENDIAN) {
2947 val = bswap16(val);
2948 }
2949#endif
733f0b02
MT
2950 } else {
2951 /* RAM case */
5c8a00ce 2952 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2953 & TARGET_PAGE_MASK)
149f54b5 2954 + addr1);
1e78bcc1
AG
2955 switch (endian) {
2956 case DEVICE_LITTLE_ENDIAN:
2957 val = lduw_le_p(ptr);
2958 break;
2959 case DEVICE_BIG_ENDIAN:
2960 val = lduw_be_p(ptr);
2961 break;
2962 default:
2963 val = lduw_p(ptr);
2964 break;
2965 }
50013115
PM
2966 r = MEMTX_OK;
2967 }
2968 if (result) {
2969 *result = r;
733f0b02 2970 }
41063e1e 2971 rcu_read_unlock();
733f0b02 2972 return val;
aab33094
FB
2973}
2974
50013115
PM
2975uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
2976 MemTxAttrs attrs, MemTxResult *result)
2977{
2978 return address_space_lduw_internal(as, addr, attrs, result,
2979 DEVICE_NATIVE_ENDIAN);
2980}
2981
2982uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
2983 MemTxAttrs attrs, MemTxResult *result)
2984{
2985 return address_space_lduw_internal(as, addr, attrs, result,
2986 DEVICE_LITTLE_ENDIAN);
2987}
2988
2989uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
2990 MemTxAttrs attrs, MemTxResult *result)
2991{
2992 return address_space_lduw_internal(as, addr, attrs, result,
2993 DEVICE_BIG_ENDIAN);
2994}
2995
41701aa4 2996uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2997{
50013115 2998 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2999}
3000
41701aa4 3001uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3002{
50013115 3003 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3004}
3005
41701aa4 3006uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3007{
50013115 3008 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3009}
3010
8df1cd07
FB
3011/* warning: addr must be aligned. The ram page is not masked as dirty
3012 and the code inside is not invalidated. It is useful if the dirty
3013 bits are used to track modified PTEs */
50013115
PM
3014void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3015 MemTxAttrs attrs, MemTxResult *result)
8df1cd07 3016{
8df1cd07 3017 uint8_t *ptr;
5c8a00ce 3018 MemoryRegion *mr;
149f54b5
PB
3019 hwaddr l = 4;
3020 hwaddr addr1;
50013115 3021 MemTxResult r;
845b6214 3022 uint8_t dirty_log_mask;
8df1cd07 3023
41063e1e 3024 rcu_read_lock();
2198a121 3025 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3026 true);
3027 if (l < 4 || !memory_access_is_direct(mr, true)) {
50013115 3028 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3029 } else {
5c8a00ce 3030 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 3031 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3032 stl_p(ptr, val);
74576198 3033
845b6214
PB
3034 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3035 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
58d2707e 3036 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
50013115
PM
3037 r = MEMTX_OK;
3038 }
3039 if (result) {
3040 *result = r;
8df1cd07 3041 }
41063e1e 3042 rcu_read_unlock();
8df1cd07
FB
3043}
3044
50013115
PM
3045void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3046{
3047 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3048}
3049
8df1cd07 3050/* warning: addr must be aligned */
50013115
PM
3051static inline void address_space_stl_internal(AddressSpace *as,
3052 hwaddr addr, uint32_t val,
3053 MemTxAttrs attrs,
3054 MemTxResult *result,
3055 enum device_endian endian)
8df1cd07 3056{
8df1cd07 3057 uint8_t *ptr;
5c8a00ce 3058 MemoryRegion *mr;
149f54b5
PB
3059 hwaddr l = 4;
3060 hwaddr addr1;
50013115 3061 MemTxResult r;
8df1cd07 3062
41063e1e 3063 rcu_read_lock();
ab1da857 3064 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3065 true);
3066 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
3067#if defined(TARGET_WORDS_BIGENDIAN)
3068 if (endian == DEVICE_LITTLE_ENDIAN) {
3069 val = bswap32(val);
3070 }
3071#else
3072 if (endian == DEVICE_BIG_ENDIAN) {
3073 val = bswap32(val);
3074 }
3075#endif
50013115 3076 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3077 } else {
8df1cd07 3078 /* RAM case */
5c8a00ce 3079 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 3080 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3081 switch (endian) {
3082 case DEVICE_LITTLE_ENDIAN:
3083 stl_le_p(ptr, val);
3084 break;
3085 case DEVICE_BIG_ENDIAN:
3086 stl_be_p(ptr, val);
3087 break;
3088 default:
3089 stl_p(ptr, val);
3090 break;
3091 }
845b6214 3092 invalidate_and_set_dirty(mr, addr1, 4);
50013115
PM
3093 r = MEMTX_OK;
3094 }
3095 if (result) {
3096 *result = r;
8df1cd07 3097 }
41063e1e 3098 rcu_read_unlock();
8df1cd07
FB
3099}
3100
50013115
PM
3101void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3102 MemTxAttrs attrs, MemTxResult *result)
3103{
3104 address_space_stl_internal(as, addr, val, attrs, result,
3105 DEVICE_NATIVE_ENDIAN);
3106}
3107
3108void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3109 MemTxAttrs attrs, MemTxResult *result)
3110{
3111 address_space_stl_internal(as, addr, val, attrs, result,
3112 DEVICE_LITTLE_ENDIAN);
3113}
3114
3115void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3116 MemTxAttrs attrs, MemTxResult *result)
3117{
3118 address_space_stl_internal(as, addr, val, attrs, result,
3119 DEVICE_BIG_ENDIAN);
3120}
3121
ab1da857 3122void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3123{
50013115 3124 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3125}
3126
ab1da857 3127void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3128{
50013115 3129 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3130}
3131
ab1da857 3132void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3133{
50013115 3134 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3135}
3136
aab33094 3137/* XXX: optimize */
50013115
PM
3138void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3139 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
3140{
3141 uint8_t v = val;
50013115
PM
3142 MemTxResult r;
3143
3144 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3145 if (result) {
3146 *result = r;
3147 }
3148}
3149
3150void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3151{
3152 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
aab33094
FB
3153}
3154
733f0b02 3155/* warning: addr must be aligned */
50013115
PM
3156static inline void address_space_stw_internal(AddressSpace *as,
3157 hwaddr addr, uint32_t val,
3158 MemTxAttrs attrs,
3159 MemTxResult *result,
3160 enum device_endian endian)
aab33094 3161{
733f0b02 3162 uint8_t *ptr;
5c8a00ce 3163 MemoryRegion *mr;
149f54b5
PB
3164 hwaddr l = 2;
3165 hwaddr addr1;
50013115 3166 MemTxResult r;
733f0b02 3167
41063e1e 3168 rcu_read_lock();
5ce5944d 3169 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 3170 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
3171#if defined(TARGET_WORDS_BIGENDIAN)
3172 if (endian == DEVICE_LITTLE_ENDIAN) {
3173 val = bswap16(val);
3174 }
3175#else
3176 if (endian == DEVICE_BIG_ENDIAN) {
3177 val = bswap16(val);
3178 }
3179#endif
50013115 3180 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
733f0b02 3181 } else {
733f0b02 3182 /* RAM case */
5c8a00ce 3183 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 3184 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3185 switch (endian) {
3186 case DEVICE_LITTLE_ENDIAN:
3187 stw_le_p(ptr, val);
3188 break;
3189 case DEVICE_BIG_ENDIAN:
3190 stw_be_p(ptr, val);
3191 break;
3192 default:
3193 stw_p(ptr, val);
3194 break;
3195 }
845b6214 3196 invalidate_and_set_dirty(mr, addr1, 2);
50013115
PM
3197 r = MEMTX_OK;
3198 }
3199 if (result) {
3200 *result = r;
733f0b02 3201 }
41063e1e 3202 rcu_read_unlock();
aab33094
FB
3203}
3204
50013115
PM
3205void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3206 MemTxAttrs attrs, MemTxResult *result)
3207{
3208 address_space_stw_internal(as, addr, val, attrs, result,
3209 DEVICE_NATIVE_ENDIAN);
3210}
3211
3212void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3213 MemTxAttrs attrs, MemTxResult *result)
3214{
3215 address_space_stw_internal(as, addr, val, attrs, result,
3216 DEVICE_LITTLE_ENDIAN);
3217}
3218
3219void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3220 MemTxAttrs attrs, MemTxResult *result)
3221{
3222 address_space_stw_internal(as, addr, val, attrs, result,
3223 DEVICE_BIG_ENDIAN);
3224}
3225
5ce5944d 3226void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3227{
50013115 3228 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3229}
3230
5ce5944d 3231void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3232{
50013115 3233 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3234}
3235
5ce5944d 3236void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3237{
50013115 3238 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3239}
3240
aab33094 3241/* XXX: optimize */
50013115
PM
3242void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3243 MemTxAttrs attrs, MemTxResult *result)
aab33094 3244{
50013115 3245 MemTxResult r;
aab33094 3246 val = tswap64(val);
50013115
PM
3247 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3248 if (result) {
3249 *result = r;
3250 }
aab33094
FB
3251}
3252
50013115
PM
3253void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3254 MemTxAttrs attrs, MemTxResult *result)
1e78bcc1 3255{
50013115 3256 MemTxResult r;
1e78bcc1 3257 val = cpu_to_le64(val);
50013115
PM
3258 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3259 if (result) {
3260 *result = r;
3261 }
3262}
3263void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3264 MemTxAttrs attrs, MemTxResult *result)
3265{
3266 MemTxResult r;
3267 val = cpu_to_be64(val);
3268 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3269 if (result) {
3270 *result = r;
3271 }
3272}
3273
3274void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3275{
3276 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3277}
3278
3279void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3280{
3281 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3282}
3283
f606604f 3284void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1 3285{
50013115 3286 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3287}
3288
5e2972fd 3289/* virtual memory access for debug (includes writing to ROM) */
f17ec444 3290int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 3291 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3292{
3293 int l;
a8170e5e 3294 hwaddr phys_addr;
9b3c35e0 3295 target_ulong page;
13eb76e0
FB
3296
3297 while (len > 0) {
3298 page = addr & TARGET_PAGE_MASK;
f17ec444 3299 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
3300 /* if no physical page mapped, return an error */
3301 if (phys_addr == -1)
3302 return -1;
3303 l = (page + TARGET_PAGE_SIZE) - addr;
3304 if (l > len)
3305 l = len;
5e2972fd 3306 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
3307 if (is_write) {
3308 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3309 } else {
5c9eb028
PM
3310 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3311 buf, l, 0);
2e38847b 3312 }
13eb76e0
FB
3313 len -= l;
3314 buf += l;
3315 addr += l;
3316 }
3317 return 0;
3318}
a68fe89c 3319#endif
13eb76e0 3320
8e4a424b
BS
3321/*
3322 * A helper function for the _utterly broken_ virtio device model to find out if
3323 * it's running on a big endian machine. Don't do this at home kids!
3324 */
98ed8ecf
GK
3325bool target_words_bigendian(void);
3326bool target_words_bigendian(void)
8e4a424b
BS
3327{
3328#if defined(TARGET_WORDS_BIGENDIAN)
3329 return true;
3330#else
3331 return false;
3332#endif
3333}
3334
76f35538 3335#ifndef CONFIG_USER_ONLY
a8170e5e 3336bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 3337{
5c8a00ce 3338 MemoryRegion*mr;
149f54b5 3339 hwaddr l = 1;
41063e1e 3340 bool res;
76f35538 3341
41063e1e 3342 rcu_read_lock();
5c8a00ce
PB
3343 mr = address_space_translate(&address_space_memory,
3344 phys_addr, &phys_addr, &l, false);
76f35538 3345
41063e1e
PB
3346 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3347 rcu_read_unlock();
3348 return res;
76f35538 3349}
bd2fa51f
MH
3350
3351void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3352{
3353 RAMBlock *block;
3354
0dc3f44a
MD
3355 rcu_read_lock();
3356 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5 3357 func(block->host, block->offset, block->used_length, opaque);
bd2fa51f 3358 }
0dc3f44a 3359 rcu_read_unlock();
bd2fa51f 3360}
ec3f8c99 3361#endif