]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
s390x/migration: Introduce 2.4 machine
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
4485bd26 29#if !defined(CONFIG_USER_ONLY)
47c8ca53 30#include "hw/boards.h"
4485bd26 31#endif
cc9e98cb 32#include "hw/qdev.h"
1de7afc9 33#include "qemu/osdep.h"
9c17d615 34#include "sysemu/kvm.h"
2ff3de68 35#include "sysemu/sysemu.h"
0d09e41a 36#include "hw/xen/xen.h"
1de7afc9
PB
37#include "qemu/timer.h"
38#include "qemu/config-file.h"
75a34036 39#include "qemu/error-report.h"
022c62cb 40#include "exec/memory.h"
9c17d615 41#include "sysemu/dma.h"
022c62cb 42#include "exec/address-spaces.h"
53a5960a
PB
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
432d268c 45#else /* !CONFIG_USER_ONLY */
9c17d615 46#include "sysemu/xen-mapcache.h"
6506e4f9 47#include "trace.h"
53a5960a 48#endif
0d6d3c87 49#include "exec/cpu-all.h"
0dc3f44a 50#include "qemu/rcu_queue.h"
022c62cb 51#include "exec/cputlb.h"
5b6dd868 52#include "translate-all.h"
0cac1b66 53
022c62cb 54#include "exec/memory-internal.h"
220c3ebd 55#include "exec/ram_addr.h"
67d95c15 56
b35ba30f
MT
57#include "qemu/range.h"
58
db7b5426 59//#define DEBUG_SUBPAGE
1196be37 60
e2eef170 61#if !defined(CONFIG_USER_ONLY)
0dc3f44a
MD
62/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
63 * are protected by the ramlist lock.
64 */
0d53d9fe 65RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
66
67static MemoryRegion *system_memory;
309cb471 68static MemoryRegion *system_io;
62152b8a 69
f6790af6
AK
70AddressSpace address_space_io;
71AddressSpace address_space_memory;
2673a5da 72
0844e007 73MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 74static MemoryRegion io_mem_unassigned;
0e0df1e2 75
7bd4f430
PB
76/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
77#define RAM_PREALLOC (1 << 0)
78
dbcb8981
PB
79/* RAM is mmap-ed with MAP_SHARED */
80#define RAM_SHARED (1 << 1)
81
62be4e3a
MT
82/* Only a portion of RAM (used_length) is actually used, and migrated.
83 * This used_length size can change across reboots.
84 */
85#define RAM_RESIZEABLE (1 << 2)
86
e2eef170 87#endif
9fa3e853 88
bdc44640 89struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
90/* current CPU in the current thread. It is only valid inside
91 cpu_exec() */
4917cf44 92DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 93/* 0 = Do not count executed instructions.
bf20dc07 94 1 = Precise instruction counting.
2e70f6ef 95 2 = Adaptive rate instruction counting. */
5708fc66 96int use_icount;
6a00d601 97
e2eef170 98#if !defined(CONFIG_USER_ONLY)
4346ae3e 99
1db8abb1
PB
100typedef struct PhysPageEntry PhysPageEntry;
101
102struct PhysPageEntry {
9736e55b 103 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 104 uint32_t skip : 6;
9736e55b 105 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 106 uint32_t ptr : 26;
1db8abb1
PB
107};
108
8b795765
MT
109#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
110
03f49957 111/* Size of the L2 (and L3, etc) page tables. */
57271d63 112#define ADDR_SPACE_BITS 64
03f49957 113
026736ce 114#define P_L2_BITS 9
03f49957
PB
115#define P_L2_SIZE (1 << P_L2_BITS)
116
117#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
118
119typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 120
53cb28cb 121typedef struct PhysPageMap {
79e2b9ae
PB
122 struct rcu_head rcu;
123
53cb28cb
MA
124 unsigned sections_nb;
125 unsigned sections_nb_alloc;
126 unsigned nodes_nb;
127 unsigned nodes_nb_alloc;
128 Node *nodes;
129 MemoryRegionSection *sections;
130} PhysPageMap;
131
1db8abb1 132struct AddressSpaceDispatch {
79e2b9ae
PB
133 struct rcu_head rcu;
134
1db8abb1
PB
135 /* This is a multi-level map on the physical address space.
136 * The bottom level has pointers to MemoryRegionSections.
137 */
138 PhysPageEntry phys_map;
53cb28cb 139 PhysPageMap map;
acc9d80b 140 AddressSpace *as;
1db8abb1
PB
141};
142
90260c6c
JK
143#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
144typedef struct subpage_t {
145 MemoryRegion iomem;
acc9d80b 146 AddressSpace *as;
90260c6c
JK
147 hwaddr base;
148 uint16_t sub_section[TARGET_PAGE_SIZE];
149} subpage_t;
150
b41aac4f
LPF
151#define PHYS_SECTION_UNASSIGNED 0
152#define PHYS_SECTION_NOTDIRTY 1
153#define PHYS_SECTION_ROM 2
154#define PHYS_SECTION_WATCH 3
5312bd8b 155
e2eef170 156static void io_mem_init(void);
62152b8a 157static void memory_map_init(void);
09daed84 158static void tcg_commit(MemoryListener *listener);
e2eef170 159
1ec9b909 160static MemoryRegion io_mem_watch;
6658ffb8 161#endif
fd6ce8f6 162
6d9a1304 163#if !defined(CONFIG_USER_ONLY)
d6f2ea22 164
53cb28cb 165static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 166{
53cb28cb
MA
167 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
168 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
169 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
170 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 171 }
f7bf5461
AK
172}
173
db94604b 174static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
f7bf5461
AK
175{
176 unsigned i;
8b795765 177 uint32_t ret;
db94604b
PB
178 PhysPageEntry e;
179 PhysPageEntry *p;
f7bf5461 180
53cb28cb 181 ret = map->nodes_nb++;
db94604b 182 p = map->nodes[ret];
f7bf5461 183 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 184 assert(ret != map->nodes_nb_alloc);
db94604b
PB
185
186 e.skip = leaf ? 0 : 1;
187 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
03f49957 188 for (i = 0; i < P_L2_SIZE; ++i) {
db94604b 189 memcpy(&p[i], &e, sizeof(e));
d6f2ea22 190 }
f7bf5461 191 return ret;
d6f2ea22
AK
192}
193
53cb28cb
MA
194static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
195 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 196 int level)
f7bf5461
AK
197{
198 PhysPageEntry *p;
03f49957 199 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 200
9736e55b 201 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
db94604b 202 lp->ptr = phys_map_node_alloc(map, level == 0);
92e873b9 203 }
db94604b 204 p = map->nodes[lp->ptr];
03f49957 205 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 206
03f49957 207 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 208 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 209 lp->skip = 0;
c19e8800 210 lp->ptr = leaf;
07f07b31
AK
211 *index += step;
212 *nb -= step;
2999097b 213 } else {
53cb28cb 214 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
215 }
216 ++lp;
f7bf5461
AK
217 }
218}
219
ac1970fb 220static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 221 hwaddr index, hwaddr nb,
2999097b 222 uint16_t leaf)
f7bf5461 223{
2999097b 224 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 225 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 226
53cb28cb 227 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
228}
229
b35ba30f
MT
230/* Compact a non leaf page entry. Simply detect that the entry has a single child,
231 * and update our entry so we can skip it and go directly to the destination.
232 */
233static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
234{
235 unsigned valid_ptr = P_L2_SIZE;
236 int valid = 0;
237 PhysPageEntry *p;
238 int i;
239
240 if (lp->ptr == PHYS_MAP_NODE_NIL) {
241 return;
242 }
243
244 p = nodes[lp->ptr];
245 for (i = 0; i < P_L2_SIZE; i++) {
246 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
247 continue;
248 }
249
250 valid_ptr = i;
251 valid++;
252 if (p[i].skip) {
253 phys_page_compact(&p[i], nodes, compacted);
254 }
255 }
256
257 /* We can only compress if there's only one child. */
258 if (valid != 1) {
259 return;
260 }
261
262 assert(valid_ptr < P_L2_SIZE);
263
264 /* Don't compress if it won't fit in the # of bits we have. */
265 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
266 return;
267 }
268
269 lp->ptr = p[valid_ptr].ptr;
270 if (!p[valid_ptr].skip) {
271 /* If our only child is a leaf, make this a leaf. */
272 /* By design, we should have made this node a leaf to begin with so we
273 * should never reach here.
274 * But since it's so simple to handle this, let's do it just in case we
275 * change this rule.
276 */
277 lp->skip = 0;
278 } else {
279 lp->skip += p[valid_ptr].skip;
280 }
281}
282
283static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
284{
285 DECLARE_BITMAP(compacted, nodes_nb);
286
287 if (d->phys_map.skip) {
53cb28cb 288 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
289 }
290}
291
97115a8d 292static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 293 Node *nodes, MemoryRegionSection *sections)
92e873b9 294{
31ab2b4a 295 PhysPageEntry *p;
97115a8d 296 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 297 int i;
f1f6e3b8 298
9736e55b 299 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 300 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 301 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 302 }
9affd6fc 303 p = nodes[lp.ptr];
03f49957 304 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 305 }
b35ba30f
MT
306
307 if (sections[lp.ptr].size.hi ||
308 range_covers_byte(sections[lp.ptr].offset_within_address_space,
309 sections[lp.ptr].size.lo, addr)) {
310 return &sections[lp.ptr];
311 } else {
312 return &sections[PHYS_SECTION_UNASSIGNED];
313 }
f3705d53
AK
314}
315
e5548617
BS
316bool memory_region_is_unassigned(MemoryRegion *mr)
317{
2a8e7499 318 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 319 && mr != &io_mem_watch;
fd6ce8f6 320}
149f54b5 321
79e2b9ae 322/* Called from RCU critical section */
c7086b4a 323static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
324 hwaddr addr,
325 bool resolve_subpage)
9f029603 326{
90260c6c
JK
327 MemoryRegionSection *section;
328 subpage_t *subpage;
329
53cb28cb 330 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
331 if (resolve_subpage && section->mr->subpage) {
332 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 333 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
334 }
335 return section;
9f029603
JK
336}
337
79e2b9ae 338/* Called from RCU critical section */
90260c6c 339static MemoryRegionSection *
c7086b4a 340address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 341 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
342{
343 MemoryRegionSection *section;
965eb2fc 344 MemoryRegion *mr;
a87f3954 345 Int128 diff;
149f54b5 346
c7086b4a 347 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
348 /* Compute offset within MemoryRegionSection */
349 addr -= section->offset_within_address_space;
350
351 /* Compute offset within MemoryRegion */
352 *xlat = addr + section->offset_within_region;
353
965eb2fc
PB
354 mr = section->mr;
355 if (memory_region_is_ram(mr)) {
e4a511f8 356 diff = int128_sub(section->size, int128_make64(addr));
965eb2fc
PB
357 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
358 }
149f54b5
PB
359 return section;
360}
90260c6c 361
a87f3954
PB
362static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
363{
364 if (memory_region_is_ram(mr)) {
365 return !(is_write && mr->readonly);
366 }
367 if (memory_region_is_romd(mr)) {
368 return !is_write;
369 }
370
371 return false;
372}
373
41063e1e 374/* Called from RCU critical section */
5c8a00ce
PB
375MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
376 hwaddr *xlat, hwaddr *plen,
377 bool is_write)
90260c6c 378{
30951157
AK
379 IOMMUTLBEntry iotlb;
380 MemoryRegionSection *section;
381 MemoryRegion *mr;
30951157
AK
382
383 for (;;) {
79e2b9ae
PB
384 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
385 section = address_space_translate_internal(d, addr, &addr, plen, true);
30951157
AK
386 mr = section->mr;
387
388 if (!mr->iommu_ops) {
389 break;
390 }
391
8d7b8cb9 392 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
30951157
AK
393 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
394 | (addr & iotlb.addr_mask));
23820dbf 395 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
30951157
AK
396 if (!(iotlb.perm & (1 << is_write))) {
397 mr = &io_mem_unassigned;
398 break;
399 }
400
401 as = iotlb.target_as;
402 }
403
fe680d0d 404 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954 405 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
23820dbf 406 *plen = MIN(page, *plen);
a87f3954
PB
407 }
408
30951157
AK
409 *xlat = addr;
410 return mr;
90260c6c
JK
411}
412
79e2b9ae 413/* Called from RCU critical section */
90260c6c 414MemoryRegionSection *
9d82b5a7
PB
415address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
416 hwaddr *xlat, hwaddr *plen)
90260c6c 417{
30951157 418 MemoryRegionSection *section;
9d82b5a7
PB
419 section = address_space_translate_internal(cpu->memory_dispatch,
420 addr, xlat, plen, false);
30951157
AK
421
422 assert(!section->mr->iommu_ops);
423 return section;
90260c6c 424}
5b6dd868 425#endif
fd6ce8f6 426
b170fce3 427#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
428
429static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 430{
259186a7 431 CPUState *cpu = opaque;
a513fe19 432
5b6dd868
BS
433 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
434 version_id is increased. */
259186a7 435 cpu->interrupt_request &= ~0x01;
c01a71c1 436 tlb_flush(cpu, 1);
5b6dd868
BS
437
438 return 0;
a513fe19 439}
7501267e 440
6c3bff0e
PD
441static int cpu_common_pre_load(void *opaque)
442{
443 CPUState *cpu = opaque;
444
adee6424 445 cpu->exception_index = -1;
6c3bff0e
PD
446
447 return 0;
448}
449
450static bool cpu_common_exception_index_needed(void *opaque)
451{
452 CPUState *cpu = opaque;
453
adee6424 454 return tcg_enabled() && cpu->exception_index != -1;
6c3bff0e
PD
455}
456
457static const VMStateDescription vmstate_cpu_common_exception_index = {
458 .name = "cpu_common/exception_index",
459 .version_id = 1,
460 .minimum_version_id = 1,
5cd8cada 461 .needed = cpu_common_exception_index_needed,
6c3bff0e
PD
462 .fields = (VMStateField[]) {
463 VMSTATE_INT32(exception_index, CPUState),
464 VMSTATE_END_OF_LIST()
465 }
466};
467
1a1562f5 468const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
469 .name = "cpu_common",
470 .version_id = 1,
471 .minimum_version_id = 1,
6c3bff0e 472 .pre_load = cpu_common_pre_load,
5b6dd868 473 .post_load = cpu_common_post_load,
35d08458 474 .fields = (VMStateField[]) {
259186a7
AF
475 VMSTATE_UINT32(halted, CPUState),
476 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 477 VMSTATE_END_OF_LIST()
6c3bff0e 478 },
5cd8cada
JQ
479 .subsections = (const VMStateDescription*[]) {
480 &vmstate_cpu_common_exception_index,
481 NULL
5b6dd868
BS
482 }
483};
1a1562f5 484
5b6dd868 485#endif
ea041c0e 486
38d8f5c8 487CPUState *qemu_get_cpu(int index)
ea041c0e 488{
bdc44640 489 CPUState *cpu;
ea041c0e 490
bdc44640 491 CPU_FOREACH(cpu) {
55e5c285 492 if (cpu->cpu_index == index) {
bdc44640 493 return cpu;
55e5c285 494 }
ea041c0e 495 }
5b6dd868 496
bdc44640 497 return NULL;
ea041c0e
FB
498}
499
09daed84
EI
500#if !defined(CONFIG_USER_ONLY)
501void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
502{
503 /* We only support one address space per cpu at the moment. */
504 assert(cpu->as == as);
505
506 if (cpu->tcg_as_listener) {
507 memory_listener_unregister(cpu->tcg_as_listener);
508 } else {
509 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
510 }
511 cpu->tcg_as_listener->commit = tcg_commit;
512 memory_listener_register(cpu->tcg_as_listener, as);
513}
514#endif
515
5b6dd868 516void cpu_exec_init(CPUArchState *env)
ea041c0e 517{
5b6dd868 518 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 519 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 520 CPUState *some_cpu;
5b6dd868
BS
521 int cpu_index;
522
523#if defined(CONFIG_USER_ONLY)
524 cpu_list_lock();
525#endif
5b6dd868 526 cpu_index = 0;
bdc44640 527 CPU_FOREACH(some_cpu) {
5b6dd868
BS
528 cpu_index++;
529 }
55e5c285 530 cpu->cpu_index = cpu_index;
1b1ed8dc 531 cpu->numa_node = 0;
f0c3c505 532 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 533 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 534#ifndef CONFIG_USER_ONLY
09daed84 535 cpu->as = &address_space_memory;
5b6dd868 536 cpu->thread_id = qemu_get_thread_id();
cba70549 537 cpu_reload_memory_map(cpu);
5b6dd868 538#endif
bdc44640 539 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
540#if defined(CONFIG_USER_ONLY)
541 cpu_list_unlock();
542#endif
e0d47944
AF
543 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
544 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
545 }
5b6dd868 546#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
547 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
548 cpu_save, cpu_load, env);
b170fce3 549 assert(cc->vmsd == NULL);
e0d47944 550 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 551#endif
b170fce3
AF
552 if (cc->vmsd != NULL) {
553 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
554 }
ea041c0e
FB
555}
556
94df27fd 557#if defined(CONFIG_USER_ONLY)
00b941e5 558static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
559{
560 tb_invalidate_phys_page_range(pc, pc + 1, 0);
561}
562#else
00b941e5 563static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 564{
e8262a1b
MF
565 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
566 if (phys != -1) {
09daed84 567 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 568 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 569 }
1e7855a5 570}
c27004ec 571#endif
d720b93d 572
c527ee8f 573#if defined(CONFIG_USER_ONLY)
75a34036 574void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
575
576{
577}
578
3ee887e8
PM
579int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
580 int flags)
581{
582 return -ENOSYS;
583}
584
585void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
586{
587}
588
75a34036 589int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
590 int flags, CPUWatchpoint **watchpoint)
591{
592 return -ENOSYS;
593}
594#else
6658ffb8 595/* Add a watchpoint. */
75a34036 596int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 597 int flags, CPUWatchpoint **watchpoint)
6658ffb8 598{
c0ce998e 599 CPUWatchpoint *wp;
6658ffb8 600
05068c0d 601 /* forbid ranges which are empty or run off the end of the address space */
07e2863d 602 if (len == 0 || (addr + len - 1) < addr) {
75a34036
AF
603 error_report("tried to set invalid watchpoint at %"
604 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
605 return -EINVAL;
606 }
7267c094 607 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
608
609 wp->vaddr = addr;
05068c0d 610 wp->len = len;
a1d1bb31
AL
611 wp->flags = flags;
612
2dc9f411 613 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
614 if (flags & BP_GDB) {
615 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
616 } else {
617 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
618 }
6658ffb8 619
31b030d4 620 tlb_flush_page(cpu, addr);
a1d1bb31
AL
621
622 if (watchpoint)
623 *watchpoint = wp;
624 return 0;
6658ffb8
PB
625}
626
a1d1bb31 627/* Remove a specific watchpoint. */
75a34036 628int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 629 int flags)
6658ffb8 630{
a1d1bb31 631 CPUWatchpoint *wp;
6658ffb8 632
ff4700b0 633 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 634 if (addr == wp->vaddr && len == wp->len
6e140f28 635 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 636 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
637 return 0;
638 }
639 }
a1d1bb31 640 return -ENOENT;
6658ffb8
PB
641}
642
a1d1bb31 643/* Remove a specific watchpoint by reference. */
75a34036 644void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 645{
ff4700b0 646 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 647
31b030d4 648 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 649
7267c094 650 g_free(watchpoint);
a1d1bb31
AL
651}
652
653/* Remove all matching watchpoints. */
75a34036 654void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 655{
c0ce998e 656 CPUWatchpoint *wp, *next;
a1d1bb31 657
ff4700b0 658 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
659 if (wp->flags & mask) {
660 cpu_watchpoint_remove_by_ref(cpu, wp);
661 }
c0ce998e 662 }
7d03f82f 663}
05068c0d
PM
664
665/* Return true if this watchpoint address matches the specified
666 * access (ie the address range covered by the watchpoint overlaps
667 * partially or completely with the address range covered by the
668 * access).
669 */
670static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
671 vaddr addr,
672 vaddr len)
673{
674 /* We know the lengths are non-zero, but a little caution is
675 * required to avoid errors in the case where the range ends
676 * exactly at the top of the address space and so addr + len
677 * wraps round to zero.
678 */
679 vaddr wpend = wp->vaddr + wp->len - 1;
680 vaddr addrend = addr + len - 1;
681
682 return !(addr > wpend || wp->vaddr > addrend);
683}
684
c527ee8f 685#endif
7d03f82f 686
a1d1bb31 687/* Add a breakpoint. */
b3310ab3 688int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 689 CPUBreakpoint **breakpoint)
4c3a88a2 690{
c0ce998e 691 CPUBreakpoint *bp;
3b46e624 692
7267c094 693 bp = g_malloc(sizeof(*bp));
4c3a88a2 694
a1d1bb31
AL
695 bp->pc = pc;
696 bp->flags = flags;
697
2dc9f411 698 /* keep all GDB-injected breakpoints in front */
00b941e5 699 if (flags & BP_GDB) {
f0c3c505 700 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 701 } else {
f0c3c505 702 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 703 }
3b46e624 704
f0c3c505 705 breakpoint_invalidate(cpu, pc);
a1d1bb31 706
00b941e5 707 if (breakpoint) {
a1d1bb31 708 *breakpoint = bp;
00b941e5 709 }
4c3a88a2 710 return 0;
4c3a88a2
FB
711}
712
a1d1bb31 713/* Remove a specific breakpoint. */
b3310ab3 714int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 715{
a1d1bb31
AL
716 CPUBreakpoint *bp;
717
f0c3c505 718 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 719 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 720 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
721 return 0;
722 }
7d03f82f 723 }
a1d1bb31 724 return -ENOENT;
7d03f82f
EI
725}
726
a1d1bb31 727/* Remove a specific breakpoint by reference. */
b3310ab3 728void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 729{
f0c3c505
AF
730 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
731
732 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 733
7267c094 734 g_free(breakpoint);
a1d1bb31
AL
735}
736
737/* Remove all matching breakpoints. */
b3310ab3 738void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 739{
c0ce998e 740 CPUBreakpoint *bp, *next;
a1d1bb31 741
f0c3c505 742 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
743 if (bp->flags & mask) {
744 cpu_breakpoint_remove_by_ref(cpu, bp);
745 }
c0ce998e 746 }
4c3a88a2
FB
747}
748
c33a346e
FB
749/* enable or disable single step mode. EXCP_DEBUG is returned by the
750 CPU loop after each instruction */
3825b28f 751void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 752{
ed2803da
AF
753 if (cpu->singlestep_enabled != enabled) {
754 cpu->singlestep_enabled = enabled;
755 if (kvm_enabled()) {
38e478ec 756 kvm_update_guest_debug(cpu, 0);
ed2803da 757 } else {
ccbb4d44 758 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 759 /* XXX: only flush what is necessary */
38e478ec 760 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
761 tb_flush(env);
762 }
c33a346e 763 }
c33a346e
FB
764}
765
a47dddd7 766void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
767{
768 va_list ap;
493ae1f0 769 va_list ap2;
7501267e
FB
770
771 va_start(ap, fmt);
493ae1f0 772 va_copy(ap2, ap);
7501267e
FB
773 fprintf(stderr, "qemu: fatal: ");
774 vfprintf(stderr, fmt, ap);
775 fprintf(stderr, "\n");
878096ee 776 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
777 if (qemu_log_enabled()) {
778 qemu_log("qemu: fatal: ");
779 qemu_log_vprintf(fmt, ap2);
780 qemu_log("\n");
a0762859 781 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 782 qemu_log_flush();
93fcfe39 783 qemu_log_close();
924edcae 784 }
493ae1f0 785 va_end(ap2);
f9373291 786 va_end(ap);
fd052bf6
RV
787#if defined(CONFIG_USER_ONLY)
788 {
789 struct sigaction act;
790 sigfillset(&act.sa_mask);
791 act.sa_handler = SIG_DFL;
792 sigaction(SIGABRT, &act, NULL);
793 }
794#endif
7501267e
FB
795 abort();
796}
797
0124311e 798#if !defined(CONFIG_USER_ONLY)
0dc3f44a 799/* Called from RCU critical section */
041603fe
PB
800static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
801{
802 RAMBlock *block;
803
43771539 804 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 805 if (block && addr - block->offset < block->max_length) {
041603fe
PB
806 goto found;
807 }
0dc3f44a 808 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5 809 if (addr - block->offset < block->max_length) {
041603fe
PB
810 goto found;
811 }
812 }
813
814 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
815 abort();
816
817found:
43771539
PB
818 /* It is safe to write mru_block outside the iothread lock. This
819 * is what happens:
820 *
821 * mru_block = xxx
822 * rcu_read_unlock()
823 * xxx removed from list
824 * rcu_read_lock()
825 * read mru_block
826 * mru_block = NULL;
827 * call_rcu(reclaim_ramblock, xxx);
828 * rcu_read_unlock()
829 *
830 * atomic_rcu_set is not needed here. The block was already published
831 * when it was placed into the list. Here we're just making an extra
832 * copy of the pointer.
833 */
041603fe
PB
834 ram_list.mru_block = block;
835 return block;
836}
837
a2f4d5be 838static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 839{
041603fe 840 ram_addr_t start1;
a2f4d5be
JQ
841 RAMBlock *block;
842 ram_addr_t end;
843
844 end = TARGET_PAGE_ALIGN(start + length);
845 start &= TARGET_PAGE_MASK;
d24981d3 846
0dc3f44a 847 rcu_read_lock();
041603fe
PB
848 block = qemu_get_ram_block(start);
849 assert(block == qemu_get_ram_block(end - 1));
1240be24 850 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
041603fe 851 cpu_tlb_reset_dirty_all(start1, length);
0dc3f44a 852 rcu_read_unlock();
d24981d3
JQ
853}
854
5579c7f3 855/* Note: start and end must be within the same ram block. */
03eebc9e
SH
856bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
857 ram_addr_t length,
858 unsigned client)
1ccde1cb 859{
03eebc9e
SH
860 unsigned long end, page;
861 bool dirty;
862
863 if (length == 0) {
864 return false;
865 }
f23db169 866
03eebc9e
SH
867 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
868 page = start >> TARGET_PAGE_BITS;
869 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
870 page, end - page);
871
872 if (dirty && tcg_enabled()) {
a2f4d5be 873 tlb_reset_dirty_range_all(start, length);
5579c7f3 874 }
03eebc9e
SH
875
876 return dirty;
1ccde1cb
FB
877}
878
79e2b9ae 879/* Called from RCU critical section */
bb0e627a 880hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
881 MemoryRegionSection *section,
882 target_ulong vaddr,
883 hwaddr paddr, hwaddr xlat,
884 int prot,
885 target_ulong *address)
e5548617 886{
a8170e5e 887 hwaddr iotlb;
e5548617
BS
888 CPUWatchpoint *wp;
889
cc5bea60 890 if (memory_region_is_ram(section->mr)) {
e5548617
BS
891 /* Normal RAM. */
892 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 893 + xlat;
e5548617 894 if (!section->readonly) {
b41aac4f 895 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 896 } else {
b41aac4f 897 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
898 }
899 } else {
1b3fb98f 900 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 901 iotlb += xlat;
e5548617
BS
902 }
903
904 /* Make accesses to pages with watchpoints go via the
905 watchpoint trap routines. */
ff4700b0 906 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 907 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
e5548617
BS
908 /* Avoid trapping reads of pages with a write breakpoint. */
909 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 910 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
911 *address |= TLB_MMIO;
912 break;
913 }
914 }
915 }
916
917 return iotlb;
918}
9fa3e853
FB
919#endif /* defined(CONFIG_USER_ONLY) */
920
e2eef170 921#if !defined(CONFIG_USER_ONLY)
8da3ff18 922
c227f099 923static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 924 uint16_t section);
acc9d80b 925static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 926
a2b257d6
IM
927static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
928 qemu_anon_ram_alloc;
91138037
MA
929
930/*
931 * Set a custom physical guest memory alloator.
932 * Accelerators with unusual needs may need this. Hopefully, we can
933 * get rid of it eventually.
934 */
a2b257d6 935void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
91138037
MA
936{
937 phys_mem_alloc = alloc;
938}
939
53cb28cb
MA
940static uint16_t phys_section_add(PhysPageMap *map,
941 MemoryRegionSection *section)
5312bd8b 942{
68f3f65b
PB
943 /* The physical section number is ORed with a page-aligned
944 * pointer to produce the iotlb entries. Thus it should
945 * never overflow into the page-aligned value.
946 */
53cb28cb 947 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 948
53cb28cb
MA
949 if (map->sections_nb == map->sections_nb_alloc) {
950 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
951 map->sections = g_renew(MemoryRegionSection, map->sections,
952 map->sections_nb_alloc);
5312bd8b 953 }
53cb28cb 954 map->sections[map->sections_nb] = *section;
dfde4e6e 955 memory_region_ref(section->mr);
53cb28cb 956 return map->sections_nb++;
5312bd8b
AK
957}
958
058bc4b5
PB
959static void phys_section_destroy(MemoryRegion *mr)
960{
dfde4e6e
PB
961 memory_region_unref(mr);
962
058bc4b5
PB
963 if (mr->subpage) {
964 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 965 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
966 g_free(subpage);
967 }
968}
969
6092666e 970static void phys_sections_free(PhysPageMap *map)
5312bd8b 971{
9affd6fc
PB
972 while (map->sections_nb > 0) {
973 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
974 phys_section_destroy(section->mr);
975 }
9affd6fc
PB
976 g_free(map->sections);
977 g_free(map->nodes);
5312bd8b
AK
978}
979
ac1970fb 980static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
981{
982 subpage_t *subpage;
a8170e5e 983 hwaddr base = section->offset_within_address_space
0f0cb164 984 & TARGET_PAGE_MASK;
97115a8d 985 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 986 d->map.nodes, d->map.sections);
0f0cb164
AK
987 MemoryRegionSection subsection = {
988 .offset_within_address_space = base,
052e87b0 989 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 990 };
a8170e5e 991 hwaddr start, end;
0f0cb164 992
f3705d53 993 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 994
f3705d53 995 if (!(existing->mr->subpage)) {
acc9d80b 996 subpage = subpage_init(d->as, base);
3be91e86 997 subsection.address_space = d->as;
0f0cb164 998 subsection.mr = &subpage->iomem;
ac1970fb 999 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 1000 phys_section_add(&d->map, &subsection));
0f0cb164 1001 } else {
f3705d53 1002 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
1003 }
1004 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 1005 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
1006 subpage_register(subpage, start, end,
1007 phys_section_add(&d->map, section));
0f0cb164
AK
1008}
1009
1010
052e87b0
PB
1011static void register_multipage(AddressSpaceDispatch *d,
1012 MemoryRegionSection *section)
33417e70 1013{
a8170e5e 1014 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 1015 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
1016 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1017 TARGET_PAGE_BITS));
dd81124b 1018
733d5ef5
PB
1019 assert(num_pages);
1020 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
1021}
1022
ac1970fb 1023static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 1024{
89ae337a 1025 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 1026 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 1027 MemoryRegionSection now = *section, remain = *section;
052e87b0 1028 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 1029
733d5ef5
PB
1030 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1031 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1032 - now.offset_within_address_space;
1033
052e87b0 1034 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 1035 register_subpage(d, &now);
733d5ef5 1036 } else {
052e87b0 1037 now.size = int128_zero();
733d5ef5 1038 }
052e87b0
PB
1039 while (int128_ne(remain.size, now.size)) {
1040 remain.size = int128_sub(remain.size, now.size);
1041 remain.offset_within_address_space += int128_get64(now.size);
1042 remain.offset_within_region += int128_get64(now.size);
69b67646 1043 now = remain;
052e87b0 1044 if (int128_lt(remain.size, page_size)) {
733d5ef5 1045 register_subpage(d, &now);
88266249 1046 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1047 now.size = page_size;
ac1970fb 1048 register_subpage(d, &now);
69b67646 1049 } else {
052e87b0 1050 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 1051 register_multipage(d, &now);
69b67646 1052 }
0f0cb164
AK
1053 }
1054}
1055
62a2744c
SY
1056void qemu_flush_coalesced_mmio_buffer(void)
1057{
1058 if (kvm_enabled())
1059 kvm_flush_coalesced_mmio_buffer();
1060}
1061
b2a8658e
UD
1062void qemu_mutex_lock_ramlist(void)
1063{
1064 qemu_mutex_lock(&ram_list.mutex);
1065}
1066
1067void qemu_mutex_unlock_ramlist(void)
1068{
1069 qemu_mutex_unlock(&ram_list.mutex);
1070}
1071
e1e84ba0 1072#ifdef __linux__
c902760f
MT
1073
1074#include <sys/vfs.h>
1075
1076#define HUGETLBFS_MAGIC 0x958458f6
1077
fc7a5800 1078static long gethugepagesize(const char *path, Error **errp)
c902760f
MT
1079{
1080 struct statfs fs;
1081 int ret;
1082
1083 do {
9742bf26 1084 ret = statfs(path, &fs);
c902760f
MT
1085 } while (ret != 0 && errno == EINTR);
1086
1087 if (ret != 0) {
fc7a5800
HT
1088 error_setg_errno(errp, errno, "failed to get page size of file %s",
1089 path);
9742bf26 1090 return 0;
c902760f
MT
1091 }
1092
1093 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1094 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1095
1096 return fs.f_bsize;
1097}
1098
04b16653
AW
1099static void *file_ram_alloc(RAMBlock *block,
1100 ram_addr_t memory,
7f56e740
PB
1101 const char *path,
1102 Error **errp)
c902760f
MT
1103{
1104 char *filename;
8ca761f6
PF
1105 char *sanitized_name;
1106 char *c;
557529dd 1107 void *area = NULL;
c902760f 1108 int fd;
557529dd 1109 uint64_t hpagesize;
fc7a5800 1110 Error *local_err = NULL;
c902760f 1111
fc7a5800
HT
1112 hpagesize = gethugepagesize(path, &local_err);
1113 if (local_err) {
1114 error_propagate(errp, local_err);
f9a49dfa 1115 goto error;
c902760f 1116 }
a2b257d6 1117 block->mr->align = hpagesize;
c902760f
MT
1118
1119 if (memory < hpagesize) {
557529dd
HT
1120 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1121 "or larger than huge page size 0x%" PRIx64,
1122 memory, hpagesize);
1123 goto error;
c902760f
MT
1124 }
1125
1126 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1127 error_setg(errp,
1128 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1129 goto error;
c902760f
MT
1130 }
1131
8ca761f6 1132 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
83234bf2 1133 sanitized_name = g_strdup(memory_region_name(block->mr));
8ca761f6
PF
1134 for (c = sanitized_name; *c != '\0'; c++) {
1135 if (*c == '/')
1136 *c = '_';
1137 }
1138
1139 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1140 sanitized_name);
1141 g_free(sanitized_name);
c902760f
MT
1142
1143 fd = mkstemp(filename);
1144 if (fd < 0) {
7f56e740
PB
1145 error_setg_errno(errp, errno,
1146 "unable to create backing store for hugepages");
e4ada482 1147 g_free(filename);
f9a49dfa 1148 goto error;
c902760f
MT
1149 }
1150 unlink(filename);
e4ada482 1151 g_free(filename);
c902760f
MT
1152
1153 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1154
1155 /*
1156 * ftruncate is not supported by hugetlbfs in older
1157 * hosts, so don't bother bailing out on errors.
1158 * If anything goes wrong with it under other filesystems,
1159 * mmap will fail.
1160 */
7f56e740 1161 if (ftruncate(fd, memory)) {
9742bf26 1162 perror("ftruncate");
7f56e740 1163 }
c902760f 1164
dbcb8981
PB
1165 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1166 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1167 fd, 0);
c902760f 1168 if (area == MAP_FAILED) {
7f56e740
PB
1169 error_setg_errno(errp, errno,
1170 "unable to map backing store for hugepages");
9742bf26 1171 close(fd);
f9a49dfa 1172 goto error;
c902760f 1173 }
ef36fa14
MT
1174
1175 if (mem_prealloc) {
38183310 1176 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1177 }
1178
04b16653 1179 block->fd = fd;
c902760f 1180 return area;
f9a49dfa
MT
1181
1182error:
1183 if (mem_prealloc) {
81b07353 1184 error_report("%s", error_get_pretty(*errp));
f9a49dfa
MT
1185 exit(1);
1186 }
1187 return NULL;
c902760f
MT
1188}
1189#endif
1190
0dc3f44a 1191/* Called with the ramlist lock held. */
d17b5288 1192static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1193{
1194 RAMBlock *block, *next_block;
3e837b2c 1195 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1196
49cd9ac6
SH
1197 assert(size != 0); /* it would hand out same offset multiple times */
1198
0dc3f44a 1199 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
04b16653 1200 return 0;
0d53d9fe 1201 }
04b16653 1202
0dc3f44a 1203 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
f15fbc4b 1204 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653 1205
62be4e3a 1206 end = block->offset + block->max_length;
04b16653 1207
0dc3f44a 1208 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
04b16653
AW
1209 if (next_block->offset >= end) {
1210 next = MIN(next, next_block->offset);
1211 }
1212 }
1213 if (next - end >= size && next - end < mingap) {
3e837b2c 1214 offset = end;
04b16653
AW
1215 mingap = next - end;
1216 }
1217 }
3e837b2c
AW
1218
1219 if (offset == RAM_ADDR_MAX) {
1220 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1221 (uint64_t)size);
1222 abort();
1223 }
1224
04b16653
AW
1225 return offset;
1226}
1227
652d7ec2 1228ram_addr_t last_ram_offset(void)
d17b5288
AW
1229{
1230 RAMBlock *block;
1231 ram_addr_t last = 0;
1232
0dc3f44a
MD
1233 rcu_read_lock();
1234 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
62be4e3a 1235 last = MAX(last, block->offset + block->max_length);
0d53d9fe 1236 }
0dc3f44a 1237 rcu_read_unlock();
d17b5288
AW
1238 return last;
1239}
1240
ddb97f1d
JB
1241static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1242{
1243 int ret;
ddb97f1d
JB
1244
1245 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
47c8ca53 1246 if (!machine_dump_guest_core(current_machine)) {
ddb97f1d
JB
1247 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1248 if (ret) {
1249 perror("qemu_madvise");
1250 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1251 "but dump_guest_core=off specified\n");
1252 }
1253 }
1254}
1255
0dc3f44a
MD
1256/* Called within an RCU critical section, or while the ramlist lock
1257 * is held.
1258 */
20cfe881 1259static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1260{
20cfe881 1261 RAMBlock *block;
84b89d78 1262
0dc3f44a 1263 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1264 if (block->offset == addr) {
20cfe881 1265 return block;
c5705a77
AK
1266 }
1267 }
20cfe881
HT
1268
1269 return NULL;
1270}
1271
ae3a7047 1272/* Called with iothread lock held. */
20cfe881
HT
1273void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1274{
ae3a7047 1275 RAMBlock *new_block, *block;
20cfe881 1276
0dc3f44a 1277 rcu_read_lock();
ae3a7047 1278 new_block = find_ram_block(addr);
c5705a77
AK
1279 assert(new_block);
1280 assert(!new_block->idstr[0]);
84b89d78 1281
09e5ab63
AL
1282 if (dev) {
1283 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1284 if (id) {
1285 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1286 g_free(id);
84b89d78
CM
1287 }
1288 }
1289 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1290
0dc3f44a 1291 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1292 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1293 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1294 new_block->idstr);
1295 abort();
1296 }
1297 }
0dc3f44a 1298 rcu_read_unlock();
c5705a77
AK
1299}
1300
ae3a7047 1301/* Called with iothread lock held. */
20cfe881
HT
1302void qemu_ram_unset_idstr(ram_addr_t addr)
1303{
ae3a7047 1304 RAMBlock *block;
20cfe881 1305
ae3a7047
MD
1306 /* FIXME: arch_init.c assumes that this is not called throughout
1307 * migration. Ignore the problem since hot-unplug during migration
1308 * does not work anyway.
1309 */
1310
0dc3f44a 1311 rcu_read_lock();
ae3a7047 1312 block = find_ram_block(addr);
20cfe881
HT
1313 if (block) {
1314 memset(block->idstr, 0, sizeof(block->idstr));
1315 }
0dc3f44a 1316 rcu_read_unlock();
20cfe881
HT
1317}
1318
8490fc78
LC
1319static int memory_try_enable_merging(void *addr, size_t len)
1320{
75cc7f01 1321 if (!machine_mem_merge(current_machine)) {
8490fc78
LC
1322 /* disabled by the user */
1323 return 0;
1324 }
1325
1326 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1327}
1328
62be4e3a
MT
1329/* Only legal before guest might have detected the memory size: e.g. on
1330 * incoming migration, or right after reset.
1331 *
1332 * As memory core doesn't know how is memory accessed, it is up to
1333 * resize callback to update device state and/or add assertions to detect
1334 * misuse, if necessary.
1335 */
1336int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1337{
1338 RAMBlock *block = find_ram_block(base);
1339
1340 assert(block);
1341
129ddaf3
MT
1342 newsize = TARGET_PAGE_ALIGN(newsize);
1343
62be4e3a
MT
1344 if (block->used_length == newsize) {
1345 return 0;
1346 }
1347
1348 if (!(block->flags & RAM_RESIZEABLE)) {
1349 error_setg_errno(errp, EINVAL,
1350 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1351 " in != 0x" RAM_ADDR_FMT, block->idstr,
1352 newsize, block->used_length);
1353 return -EINVAL;
1354 }
1355
1356 if (block->max_length < newsize) {
1357 error_setg_errno(errp, EINVAL,
1358 "Length too large: %s: 0x" RAM_ADDR_FMT
1359 " > 0x" RAM_ADDR_FMT, block->idstr,
1360 newsize, block->max_length);
1361 return -EINVAL;
1362 }
1363
1364 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1365 block->used_length = newsize;
58d2707e
PB
1366 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1367 DIRTY_CLIENTS_ALL);
62be4e3a
MT
1368 memory_region_set_size(block->mr, newsize);
1369 if (block->resized) {
1370 block->resized(block->idstr, newsize, block->host);
1371 }
1372 return 0;
1373}
1374
ef701d7b 1375static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
c5705a77 1376{
e1c57ab8 1377 RAMBlock *block;
0d53d9fe 1378 RAMBlock *last_block = NULL;
2152f5ca
JQ
1379 ram_addr_t old_ram_size, new_ram_size;
1380
1381 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1382
b2a8658e 1383 qemu_mutex_lock_ramlist();
9b8424d5 1384 new_block->offset = find_ram_offset(new_block->max_length);
e1c57ab8
PB
1385
1386 if (!new_block->host) {
1387 if (xen_enabled()) {
9b8424d5
MT
1388 xen_ram_alloc(new_block->offset, new_block->max_length,
1389 new_block->mr);
e1c57ab8 1390 } else {
9b8424d5 1391 new_block->host = phys_mem_alloc(new_block->max_length,
a2b257d6 1392 &new_block->mr->align);
39228250 1393 if (!new_block->host) {
ef701d7b
HT
1394 error_setg_errno(errp, errno,
1395 "cannot set up guest memory '%s'",
1396 memory_region_name(new_block->mr));
1397 qemu_mutex_unlock_ramlist();
1398 return -1;
39228250 1399 }
9b8424d5 1400 memory_try_enable_merging(new_block->host, new_block->max_length);
6977dfe6 1401 }
c902760f 1402 }
94a6b54f 1403
0d53d9fe
MD
1404 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1405 * QLIST (which has an RCU-friendly variant) does not have insertion at
1406 * tail, so save the last element in last_block.
1407 */
0dc3f44a 1408 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
0d53d9fe 1409 last_block = block;
9b8424d5 1410 if (block->max_length < new_block->max_length) {
abb26d63
PB
1411 break;
1412 }
1413 }
1414 if (block) {
0dc3f44a 1415 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
0d53d9fe 1416 } else if (last_block) {
0dc3f44a 1417 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
0d53d9fe 1418 } else { /* list is empty */
0dc3f44a 1419 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
abb26d63 1420 }
0d6d3c87 1421 ram_list.mru_block = NULL;
94a6b54f 1422
0dc3f44a
MD
1423 /* Write list before version */
1424 smp_wmb();
f798b07f 1425 ram_list.version++;
b2a8658e 1426 qemu_mutex_unlock_ramlist();
f798b07f 1427
2152f5ca
JQ
1428 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1429
1430 if (new_ram_size > old_ram_size) {
1ab4c8ce 1431 int i;
ae3a7047
MD
1432
1433 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1ab4c8ce
JQ
1434 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1435 ram_list.dirty_memory[i] =
1436 bitmap_zero_extend(ram_list.dirty_memory[i],
1437 old_ram_size, new_ram_size);
1438 }
2152f5ca 1439 }
9b8424d5 1440 cpu_physical_memory_set_dirty_range(new_block->offset,
58d2707e
PB
1441 new_block->used_length,
1442 DIRTY_CLIENTS_ALL);
94a6b54f 1443
a904c911
PB
1444 if (new_block->host) {
1445 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1446 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1447 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1448 if (kvm_enabled()) {
1449 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1450 }
e1c57ab8 1451 }
6f0437e8 1452
94a6b54f
PB
1453 return new_block->offset;
1454}
e9a1ab19 1455
0b183fc8 1456#ifdef __linux__
e1c57ab8 1457ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1458 bool share, const char *mem_path,
7f56e740 1459 Error **errp)
e1c57ab8
PB
1460{
1461 RAMBlock *new_block;
ef701d7b
HT
1462 ram_addr_t addr;
1463 Error *local_err = NULL;
e1c57ab8
PB
1464
1465 if (xen_enabled()) {
7f56e740
PB
1466 error_setg(errp, "-mem-path not supported with Xen");
1467 return -1;
e1c57ab8
PB
1468 }
1469
1470 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1471 /*
1472 * file_ram_alloc() needs to allocate just like
1473 * phys_mem_alloc, but we haven't bothered to provide
1474 * a hook there.
1475 */
7f56e740
PB
1476 error_setg(errp,
1477 "-mem-path not supported with this accelerator");
1478 return -1;
e1c57ab8
PB
1479 }
1480
1481 size = TARGET_PAGE_ALIGN(size);
1482 new_block = g_malloc0(sizeof(*new_block));
1483 new_block->mr = mr;
9b8424d5
MT
1484 new_block->used_length = size;
1485 new_block->max_length = size;
dbcb8981 1486 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1487 new_block->host = file_ram_alloc(new_block, size,
1488 mem_path, errp);
1489 if (!new_block->host) {
1490 g_free(new_block);
1491 return -1;
1492 }
1493
ef701d7b
HT
1494 addr = ram_block_add(new_block, &local_err);
1495 if (local_err) {
1496 g_free(new_block);
1497 error_propagate(errp, local_err);
1498 return -1;
1499 }
1500 return addr;
e1c57ab8 1501}
0b183fc8 1502#endif
e1c57ab8 1503
62be4e3a
MT
1504static
1505ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1506 void (*resized)(const char*,
1507 uint64_t length,
1508 void *host),
1509 void *host, bool resizeable,
ef701d7b 1510 MemoryRegion *mr, Error **errp)
e1c57ab8
PB
1511{
1512 RAMBlock *new_block;
ef701d7b
HT
1513 ram_addr_t addr;
1514 Error *local_err = NULL;
e1c57ab8
PB
1515
1516 size = TARGET_PAGE_ALIGN(size);
62be4e3a 1517 max_size = TARGET_PAGE_ALIGN(max_size);
e1c57ab8
PB
1518 new_block = g_malloc0(sizeof(*new_block));
1519 new_block->mr = mr;
62be4e3a 1520 new_block->resized = resized;
9b8424d5
MT
1521 new_block->used_length = size;
1522 new_block->max_length = max_size;
62be4e3a 1523 assert(max_size >= size);
e1c57ab8
PB
1524 new_block->fd = -1;
1525 new_block->host = host;
1526 if (host) {
7bd4f430 1527 new_block->flags |= RAM_PREALLOC;
e1c57ab8 1528 }
62be4e3a
MT
1529 if (resizeable) {
1530 new_block->flags |= RAM_RESIZEABLE;
1531 }
ef701d7b
HT
1532 addr = ram_block_add(new_block, &local_err);
1533 if (local_err) {
1534 g_free(new_block);
1535 error_propagate(errp, local_err);
1536 return -1;
1537 }
1538 return addr;
e1c57ab8
PB
1539}
1540
62be4e3a
MT
1541ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1542 MemoryRegion *mr, Error **errp)
1543{
1544 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1545}
1546
ef701d7b 1547ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
6977dfe6 1548{
62be4e3a
MT
1549 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1550}
1551
1552ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1553 void (*resized)(const char*,
1554 uint64_t length,
1555 void *host),
1556 MemoryRegion *mr, Error **errp)
1557{
1558 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
6977dfe6
YT
1559}
1560
1f2e98b6
AW
1561void qemu_ram_free_from_ptr(ram_addr_t addr)
1562{
1563 RAMBlock *block;
1564
b2a8658e 1565 qemu_mutex_lock_ramlist();
0dc3f44a 1566 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1f2e98b6 1567 if (addr == block->offset) {
0dc3f44a 1568 QLIST_REMOVE_RCU(block, next);
0d6d3c87 1569 ram_list.mru_block = NULL;
0dc3f44a
MD
1570 /* Write list before version */
1571 smp_wmb();
f798b07f 1572 ram_list.version++;
43771539 1573 g_free_rcu(block, rcu);
b2a8658e 1574 break;
1f2e98b6
AW
1575 }
1576 }
b2a8658e 1577 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1578}
1579
43771539
PB
1580static void reclaim_ramblock(RAMBlock *block)
1581{
1582 if (block->flags & RAM_PREALLOC) {
1583 ;
1584 } else if (xen_enabled()) {
1585 xen_invalidate_map_cache_entry(block->host);
1586#ifndef _WIN32
1587 } else if (block->fd >= 0) {
1588 munmap(block->host, block->max_length);
1589 close(block->fd);
1590#endif
1591 } else {
1592 qemu_anon_ram_free(block->host, block->max_length);
1593 }
1594 g_free(block);
1595}
1596
c227f099 1597void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1598{
04b16653
AW
1599 RAMBlock *block;
1600
b2a8658e 1601 qemu_mutex_lock_ramlist();
0dc3f44a 1602 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
04b16653 1603 if (addr == block->offset) {
0dc3f44a 1604 QLIST_REMOVE_RCU(block, next);
0d6d3c87 1605 ram_list.mru_block = NULL;
0dc3f44a
MD
1606 /* Write list before version */
1607 smp_wmb();
f798b07f 1608 ram_list.version++;
43771539 1609 call_rcu(block, reclaim_ramblock, rcu);
b2a8658e 1610 break;
04b16653
AW
1611 }
1612 }
b2a8658e 1613 qemu_mutex_unlock_ramlist();
e9a1ab19
FB
1614}
1615
cd19cfa2
HY
1616#ifndef _WIN32
1617void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1618{
1619 RAMBlock *block;
1620 ram_addr_t offset;
1621 int flags;
1622 void *area, *vaddr;
1623
0dc3f44a 1624 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
cd19cfa2 1625 offset = addr - block->offset;
9b8424d5 1626 if (offset < block->max_length) {
1240be24 1627 vaddr = ramblock_ptr(block, offset);
7bd4f430 1628 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1629 ;
dfeaf2ab
MA
1630 } else if (xen_enabled()) {
1631 abort();
cd19cfa2
HY
1632 } else {
1633 flags = MAP_FIXED;
3435f395 1634 if (block->fd >= 0) {
dbcb8981
PB
1635 flags |= (block->flags & RAM_SHARED ?
1636 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1637 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1638 flags, block->fd, offset);
cd19cfa2 1639 } else {
2eb9fbaa
MA
1640 /*
1641 * Remap needs to match alloc. Accelerators that
1642 * set phys_mem_alloc never remap. If they did,
1643 * we'd need a remap hook here.
1644 */
1645 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1646
cd19cfa2
HY
1647 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1648 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1649 flags, -1, 0);
cd19cfa2
HY
1650 }
1651 if (area != vaddr) {
f15fbc4b
AP
1652 fprintf(stderr, "Could not remap addr: "
1653 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1654 length, addr);
1655 exit(1);
1656 }
8490fc78 1657 memory_try_enable_merging(vaddr, length);
ddb97f1d 1658 qemu_ram_setup_dump(vaddr, length);
cd19cfa2 1659 }
cd19cfa2
HY
1660 }
1661 }
1662}
1663#endif /* !_WIN32 */
1664
a35ba7be
PB
1665int qemu_get_ram_fd(ram_addr_t addr)
1666{
ae3a7047
MD
1667 RAMBlock *block;
1668 int fd;
a35ba7be 1669
0dc3f44a 1670 rcu_read_lock();
ae3a7047
MD
1671 block = qemu_get_ram_block(addr);
1672 fd = block->fd;
0dc3f44a 1673 rcu_read_unlock();
ae3a7047 1674 return fd;
a35ba7be
PB
1675}
1676
3fd74b84
DM
1677void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1678{
ae3a7047
MD
1679 RAMBlock *block;
1680 void *ptr;
3fd74b84 1681
0dc3f44a 1682 rcu_read_lock();
ae3a7047
MD
1683 block = qemu_get_ram_block(addr);
1684 ptr = ramblock_ptr(block, 0);
0dc3f44a 1685 rcu_read_unlock();
ae3a7047 1686 return ptr;
3fd74b84
DM
1687}
1688
1b5ec234 1689/* Return a host pointer to ram allocated with qemu_ram_alloc.
ae3a7047
MD
1690 * This should not be used for general purpose DMA. Use address_space_map
1691 * or address_space_rw instead. For local memory (e.g. video ram) that the
1692 * device owns, use memory_region_get_ram_ptr.
0dc3f44a
MD
1693 *
1694 * By the time this function returns, the returned pointer is not protected
1695 * by RCU anymore. If the caller is not within an RCU critical section and
1696 * does not hold the iothread lock, it must have other means of protecting the
1697 * pointer, such as a reference to the region that includes the incoming
1698 * ram_addr_t.
1b5ec234
PB
1699 */
1700void *qemu_get_ram_ptr(ram_addr_t addr)
1701{
ae3a7047
MD
1702 RAMBlock *block;
1703 void *ptr;
1b5ec234 1704
0dc3f44a 1705 rcu_read_lock();
ae3a7047
MD
1706 block = qemu_get_ram_block(addr);
1707
1708 if (xen_enabled() && block->host == NULL) {
0d6d3c87
PB
1709 /* We need to check if the requested address is in the RAM
1710 * because we don't want to map the entire memory in QEMU.
1711 * In that case just map until the end of the page.
1712 */
1713 if (block->offset == 0) {
ae3a7047 1714 ptr = xen_map_cache(addr, 0, 0);
0dc3f44a 1715 goto unlock;
0d6d3c87 1716 }
ae3a7047
MD
1717
1718 block->host = xen_map_cache(block->offset, block->max_length, 1);
0d6d3c87 1719 }
ae3a7047
MD
1720 ptr = ramblock_ptr(block, addr - block->offset);
1721
0dc3f44a
MD
1722unlock:
1723 rcu_read_unlock();
ae3a7047 1724 return ptr;
dc828ca1
PB
1725}
1726
38bee5dc 1727/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
ae3a7047 1728 * but takes a size argument.
0dc3f44a
MD
1729 *
1730 * By the time this function returns, the returned pointer is not protected
1731 * by RCU anymore. If the caller is not within an RCU critical section and
1732 * does not hold the iothread lock, it must have other means of protecting the
1733 * pointer, such as a reference to the region that includes the incoming
1734 * ram_addr_t.
ae3a7047 1735 */
cb85f7ab 1736static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1737{
ae3a7047 1738 void *ptr;
8ab934f9
SS
1739 if (*size == 0) {
1740 return NULL;
1741 }
868bb33f 1742 if (xen_enabled()) {
e41d7c69 1743 return xen_map_cache(addr, *size, 1);
868bb33f 1744 } else {
38bee5dc 1745 RAMBlock *block;
0dc3f44a
MD
1746 rcu_read_lock();
1747 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5
MT
1748 if (addr - block->offset < block->max_length) {
1749 if (addr - block->offset + *size > block->max_length)
1750 *size = block->max_length - addr + block->offset;
ae3a7047 1751 ptr = ramblock_ptr(block, addr - block->offset);
0dc3f44a 1752 rcu_read_unlock();
ae3a7047 1753 return ptr;
38bee5dc
SS
1754 }
1755 }
1756
1757 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1758 abort();
38bee5dc
SS
1759 }
1760}
1761
7443b437 1762/* Some of the softmmu routines need to translate from a host pointer
ae3a7047
MD
1763 * (typically a TLB entry) back to a ram offset.
1764 *
1765 * By the time this function returns, the returned pointer is not protected
1766 * by RCU anymore. If the caller is not within an RCU critical section and
1767 * does not hold the iothread lock, it must have other means of protecting the
1768 * pointer, such as a reference to the region that includes the incoming
1769 * ram_addr_t.
1770 */
1b5ec234 1771MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1772{
94a6b54f
PB
1773 RAMBlock *block;
1774 uint8_t *host = ptr;
ae3a7047 1775 MemoryRegion *mr;
94a6b54f 1776
868bb33f 1777 if (xen_enabled()) {
0dc3f44a 1778 rcu_read_lock();
e41d7c69 1779 *ram_addr = xen_ram_addr_from_mapcache(ptr);
ae3a7047 1780 mr = qemu_get_ram_block(*ram_addr)->mr;
0dc3f44a 1781 rcu_read_unlock();
ae3a7047 1782 return mr;
712c2b41
SS
1783 }
1784
0dc3f44a
MD
1785 rcu_read_lock();
1786 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 1787 if (block && block->host && host - block->host < block->max_length) {
23887b79
PB
1788 goto found;
1789 }
1790
0dc3f44a 1791 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
432d268c
JN
1792 /* This case append when the block is not mapped. */
1793 if (block->host == NULL) {
1794 continue;
1795 }
9b8424d5 1796 if (host - block->host < block->max_length) {
23887b79 1797 goto found;
f471a17e 1798 }
94a6b54f 1799 }
432d268c 1800
0dc3f44a 1801 rcu_read_unlock();
1b5ec234 1802 return NULL;
23887b79
PB
1803
1804found:
1805 *ram_addr = block->offset + (host - block->host);
ae3a7047 1806 mr = block->mr;
0dc3f44a 1807 rcu_read_unlock();
ae3a7047 1808 return mr;
e890261f 1809}
f471a17e 1810
a8170e5e 1811static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1812 uint64_t val, unsigned size)
9fa3e853 1813{
52159192 1814 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1815 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1816 }
0e0df1e2
AK
1817 switch (size) {
1818 case 1:
1819 stb_p(qemu_get_ram_ptr(ram_addr), val);
1820 break;
1821 case 2:
1822 stw_p(qemu_get_ram_ptr(ram_addr), val);
1823 break;
1824 case 4:
1825 stl_p(qemu_get_ram_ptr(ram_addr), val);
1826 break;
1827 default:
1828 abort();
3a7d929e 1829 }
58d2707e
PB
1830 /* Set both VGA and migration bits for simplicity and to remove
1831 * the notdirty callback faster.
1832 */
1833 cpu_physical_memory_set_dirty_range(ram_addr, size,
1834 DIRTY_CLIENTS_NOCODE);
f23db169
FB
1835 /* we remove the notdirty callback only if the code has been
1836 flushed */
a2cd8c85 1837 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1838 CPUArchState *env = current_cpu->env_ptr;
93afeade 1839 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1840 }
9fa3e853
FB
1841}
1842
b018ddf6
PB
1843static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1844 unsigned size, bool is_write)
1845{
1846 return is_write;
1847}
1848
0e0df1e2 1849static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1850 .write = notdirty_mem_write,
b018ddf6 1851 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1852 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1853};
1854
0f459d16 1855/* Generate a debug exception if a watchpoint has been hit. */
66b9b43c 1856static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
0f459d16 1857{
93afeade
AF
1858 CPUState *cpu = current_cpu;
1859 CPUArchState *env = cpu->env_ptr;
06d55cc1 1860 target_ulong pc, cs_base;
0f459d16 1861 target_ulong vaddr;
a1d1bb31 1862 CPUWatchpoint *wp;
06d55cc1 1863 int cpu_flags;
0f459d16 1864
ff4700b0 1865 if (cpu->watchpoint_hit) {
06d55cc1
AL
1866 /* We re-entered the check after replacing the TB. Now raise
1867 * the debug interrupt so that is will trigger after the
1868 * current instruction. */
93afeade 1869 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1870 return;
1871 }
93afeade 1872 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1873 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d
PM
1874 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1875 && (wp->flags & flags)) {
08225676
PM
1876 if (flags == BP_MEM_READ) {
1877 wp->flags |= BP_WATCHPOINT_HIT_READ;
1878 } else {
1879 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1880 }
1881 wp->hitaddr = vaddr;
66b9b43c 1882 wp->hitattrs = attrs;
ff4700b0
AF
1883 if (!cpu->watchpoint_hit) {
1884 cpu->watchpoint_hit = wp;
239c51a5 1885 tb_check_watchpoint(cpu);
6e140f28 1886 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1887 cpu->exception_index = EXCP_DEBUG;
5638d180 1888 cpu_loop_exit(cpu);
6e140f28
AL
1889 } else {
1890 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1891 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1892 cpu_resume_from_signal(cpu, NULL);
6e140f28 1893 }
06d55cc1 1894 }
6e140f28
AL
1895 } else {
1896 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1897 }
1898 }
1899}
1900
6658ffb8
PB
1901/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1902 so these check for a hit then pass through to the normal out-of-line
1903 phys routines. */
66b9b43c
PM
1904static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1905 unsigned size, MemTxAttrs attrs)
6658ffb8 1906{
66b9b43c
PM
1907 MemTxResult res;
1908 uint64_t data;
1909
1910 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
1ec9b909 1911 switch (size) {
66b9b43c
PM
1912 case 1:
1913 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
1914 break;
1915 case 2:
1916 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
1917 break;
1918 case 4:
1919 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
1920 break;
1ec9b909
AK
1921 default: abort();
1922 }
66b9b43c
PM
1923 *pdata = data;
1924 return res;
6658ffb8
PB
1925}
1926
66b9b43c
PM
1927static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
1928 uint64_t val, unsigned size,
1929 MemTxAttrs attrs)
6658ffb8 1930{
66b9b43c
PM
1931 MemTxResult res;
1932
1933 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1ec9b909 1934 switch (size) {
67364150 1935 case 1:
66b9b43c 1936 address_space_stb(&address_space_memory, addr, val, attrs, &res);
67364150
MF
1937 break;
1938 case 2:
66b9b43c 1939 address_space_stw(&address_space_memory, addr, val, attrs, &res);
67364150
MF
1940 break;
1941 case 4:
66b9b43c 1942 address_space_stl(&address_space_memory, addr, val, attrs, &res);
67364150 1943 break;
1ec9b909
AK
1944 default: abort();
1945 }
66b9b43c 1946 return res;
6658ffb8
PB
1947}
1948
1ec9b909 1949static const MemoryRegionOps watch_mem_ops = {
66b9b43c
PM
1950 .read_with_attrs = watch_mem_read,
1951 .write_with_attrs = watch_mem_write,
1ec9b909 1952 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1953};
6658ffb8 1954
f25a49e0
PM
1955static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
1956 unsigned len, MemTxAttrs attrs)
db7b5426 1957{
acc9d80b 1958 subpage_t *subpage = opaque;
ff6cff75 1959 uint8_t buf[8];
5c9eb028 1960 MemTxResult res;
791af8c8 1961
db7b5426 1962#if defined(DEBUG_SUBPAGE)
016e9d62 1963 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1964 subpage, len, addr);
db7b5426 1965#endif
5c9eb028
PM
1966 res = address_space_read(subpage->as, addr + subpage->base,
1967 attrs, buf, len);
1968 if (res) {
1969 return res;
f25a49e0 1970 }
acc9d80b
JK
1971 switch (len) {
1972 case 1:
f25a49e0
PM
1973 *data = ldub_p(buf);
1974 return MEMTX_OK;
acc9d80b 1975 case 2:
f25a49e0
PM
1976 *data = lduw_p(buf);
1977 return MEMTX_OK;
acc9d80b 1978 case 4:
f25a49e0
PM
1979 *data = ldl_p(buf);
1980 return MEMTX_OK;
ff6cff75 1981 case 8:
f25a49e0
PM
1982 *data = ldq_p(buf);
1983 return MEMTX_OK;
acc9d80b
JK
1984 default:
1985 abort();
1986 }
db7b5426
BS
1987}
1988
f25a49e0
PM
1989static MemTxResult subpage_write(void *opaque, hwaddr addr,
1990 uint64_t value, unsigned len, MemTxAttrs attrs)
db7b5426 1991{
acc9d80b 1992 subpage_t *subpage = opaque;
ff6cff75 1993 uint8_t buf[8];
acc9d80b 1994
db7b5426 1995#if defined(DEBUG_SUBPAGE)
016e9d62 1996 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1997 " value %"PRIx64"\n",
1998 __func__, subpage, len, addr, value);
db7b5426 1999#endif
acc9d80b
JK
2000 switch (len) {
2001 case 1:
2002 stb_p(buf, value);
2003 break;
2004 case 2:
2005 stw_p(buf, value);
2006 break;
2007 case 4:
2008 stl_p(buf, value);
2009 break;
ff6cff75
PB
2010 case 8:
2011 stq_p(buf, value);
2012 break;
acc9d80b
JK
2013 default:
2014 abort();
2015 }
5c9eb028
PM
2016 return address_space_write(subpage->as, addr + subpage->base,
2017 attrs, buf, len);
db7b5426
BS
2018}
2019
c353e4cc 2020static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 2021 unsigned len, bool is_write)
c353e4cc 2022{
acc9d80b 2023 subpage_t *subpage = opaque;
c353e4cc 2024#if defined(DEBUG_SUBPAGE)
016e9d62 2025 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 2026 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
2027#endif
2028
acc9d80b 2029 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 2030 len, is_write);
c353e4cc
PB
2031}
2032
70c68e44 2033static const MemoryRegionOps subpage_ops = {
f25a49e0
PM
2034 .read_with_attrs = subpage_read,
2035 .write_with_attrs = subpage_write,
ff6cff75
PB
2036 .impl.min_access_size = 1,
2037 .impl.max_access_size = 8,
2038 .valid.min_access_size = 1,
2039 .valid.max_access_size = 8,
c353e4cc 2040 .valid.accepts = subpage_accepts,
70c68e44 2041 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
2042};
2043
c227f099 2044static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2045 uint16_t section)
db7b5426
BS
2046{
2047 int idx, eidx;
2048
2049 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2050 return -1;
2051 idx = SUBPAGE_IDX(start);
2052 eidx = SUBPAGE_IDX(end);
2053#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2054 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2055 __func__, mmio, start, end, idx, eidx, section);
db7b5426 2056#endif
db7b5426 2057 for (; idx <= eidx; idx++) {
5312bd8b 2058 mmio->sub_section[idx] = section;
db7b5426
BS
2059 }
2060
2061 return 0;
2062}
2063
acc9d80b 2064static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 2065{
c227f099 2066 subpage_t *mmio;
db7b5426 2067
7267c094 2068 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 2069
acc9d80b 2070 mmio->as = as;
1eec614b 2071 mmio->base = base;
2c9b15ca 2072 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 2073 NULL, TARGET_PAGE_SIZE);
b3b00c78 2074 mmio->iomem.subpage = true;
db7b5426 2075#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2076 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2077 mmio, base, TARGET_PAGE_SIZE);
db7b5426 2078#endif
b41aac4f 2079 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
2080
2081 return mmio;
2082}
2083
a656e22f
PC
2084static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2085 MemoryRegion *mr)
5312bd8b 2086{
a656e22f 2087 assert(as);
5312bd8b 2088 MemoryRegionSection section = {
a656e22f 2089 .address_space = as,
5312bd8b
AK
2090 .mr = mr,
2091 .offset_within_address_space = 0,
2092 .offset_within_region = 0,
052e87b0 2093 .size = int128_2_64(),
5312bd8b
AK
2094 };
2095
53cb28cb 2096 return phys_section_add(map, &section);
5312bd8b
AK
2097}
2098
9d82b5a7 2099MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
aa102231 2100{
79e2b9ae
PB
2101 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2102 MemoryRegionSection *sections = d->map.sections;
9d82b5a7
PB
2103
2104 return sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
2105}
2106
e9179ce1
AK
2107static void io_mem_init(void)
2108{
1f6245e5 2109 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2c9b15ca 2110 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 2111 NULL, UINT64_MAX);
2c9b15ca 2112 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 2113 NULL, UINT64_MAX);
2c9b15ca 2114 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 2115 NULL, UINT64_MAX);
e9179ce1
AK
2116}
2117
ac1970fb 2118static void mem_begin(MemoryListener *listener)
00752703
PB
2119{
2120 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
2121 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2122 uint16_t n;
2123
a656e22f 2124 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 2125 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 2126 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 2127 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 2128 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 2129 assert(n == PHYS_SECTION_ROM);
a656e22f 2130 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 2131 assert(n == PHYS_SECTION_WATCH);
00752703 2132
9736e55b 2133 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
2134 d->as = as;
2135 as->next_dispatch = d;
2136}
2137
79e2b9ae
PB
2138static void address_space_dispatch_free(AddressSpaceDispatch *d)
2139{
2140 phys_sections_free(&d->map);
2141 g_free(d);
2142}
2143
00752703 2144static void mem_commit(MemoryListener *listener)
ac1970fb 2145{
89ae337a 2146 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
2147 AddressSpaceDispatch *cur = as->dispatch;
2148 AddressSpaceDispatch *next = as->next_dispatch;
2149
53cb28cb 2150 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 2151
79e2b9ae 2152 atomic_rcu_set(&as->dispatch, next);
53cb28cb 2153 if (cur) {
79e2b9ae 2154 call_rcu(cur, address_space_dispatch_free, rcu);
53cb28cb 2155 }
9affd6fc
PB
2156}
2157
1d71148e 2158static void tcg_commit(MemoryListener *listener)
50c1e149 2159{
182735ef 2160 CPUState *cpu;
117712c3
AK
2161
2162 /* since each CPU stores ram addresses in its TLB cache, we must
2163 reset the modified entries */
2164 /* XXX: slow ! */
bdc44640 2165 CPU_FOREACH(cpu) {
33bde2e1
EI
2166 /* FIXME: Disentangle the cpu.h circular files deps so we can
2167 directly get the right CPU from listener. */
2168 if (cpu->tcg_as_listener != listener) {
2169 continue;
2170 }
76e5c76f 2171 cpu_reload_memory_map(cpu);
117712c3 2172 }
50c1e149
AK
2173}
2174
ac1970fb
AK
2175void address_space_init_dispatch(AddressSpace *as)
2176{
00752703 2177 as->dispatch = NULL;
89ae337a 2178 as->dispatch_listener = (MemoryListener) {
ac1970fb 2179 .begin = mem_begin,
00752703 2180 .commit = mem_commit,
ac1970fb
AK
2181 .region_add = mem_add,
2182 .region_nop = mem_add,
2183 .priority = 0,
2184 };
89ae337a 2185 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
2186}
2187
6e48e8f9
PB
2188void address_space_unregister(AddressSpace *as)
2189{
2190 memory_listener_unregister(&as->dispatch_listener);
2191}
2192
83f3c251
AK
2193void address_space_destroy_dispatch(AddressSpace *as)
2194{
2195 AddressSpaceDispatch *d = as->dispatch;
2196
79e2b9ae
PB
2197 atomic_rcu_set(&as->dispatch, NULL);
2198 if (d) {
2199 call_rcu(d, address_space_dispatch_free, rcu);
2200 }
83f3c251
AK
2201}
2202
62152b8a
AK
2203static void memory_map_init(void)
2204{
7267c094 2205 system_memory = g_malloc(sizeof(*system_memory));
03f49957 2206
57271d63 2207 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 2208 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 2209
7267c094 2210 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
2211 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2212 65536);
7dca8043 2213 address_space_init(&address_space_io, system_io, "I/O");
62152b8a
AK
2214}
2215
2216MemoryRegion *get_system_memory(void)
2217{
2218 return system_memory;
2219}
2220
309cb471
AK
2221MemoryRegion *get_system_io(void)
2222{
2223 return system_io;
2224}
2225
e2eef170
PB
2226#endif /* !defined(CONFIG_USER_ONLY) */
2227
13eb76e0
FB
2228/* physical memory access (slow version, mainly for debug) */
2229#if defined(CONFIG_USER_ONLY)
f17ec444 2230int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 2231 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2232{
2233 int l, flags;
2234 target_ulong page;
53a5960a 2235 void * p;
13eb76e0
FB
2236
2237 while (len > 0) {
2238 page = addr & TARGET_PAGE_MASK;
2239 l = (page + TARGET_PAGE_SIZE) - addr;
2240 if (l > len)
2241 l = len;
2242 flags = page_get_flags(page);
2243 if (!(flags & PAGE_VALID))
a68fe89c 2244 return -1;
13eb76e0
FB
2245 if (is_write) {
2246 if (!(flags & PAGE_WRITE))
a68fe89c 2247 return -1;
579a97f7 2248 /* XXX: this code should not depend on lock_user */
72fb7daa 2249 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 2250 return -1;
72fb7daa
AJ
2251 memcpy(p, buf, l);
2252 unlock_user(p, addr, l);
13eb76e0
FB
2253 } else {
2254 if (!(flags & PAGE_READ))
a68fe89c 2255 return -1;
579a97f7 2256 /* XXX: this code should not depend on lock_user */
72fb7daa 2257 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 2258 return -1;
72fb7daa 2259 memcpy(buf, p, l);
5b257578 2260 unlock_user(p, addr, 0);
13eb76e0
FB
2261 }
2262 len -= l;
2263 buf += l;
2264 addr += l;
2265 }
a68fe89c 2266 return 0;
13eb76e0 2267}
8df1cd07 2268
13eb76e0 2269#else
51d7a9eb 2270
845b6214 2271static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
a8170e5e 2272 hwaddr length)
51d7a9eb 2273{
e87f7778
PB
2274 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2275 /* No early return if dirty_log_mask is or becomes 0, because
2276 * cpu_physical_memory_set_dirty_range will still call
2277 * xen_modified_memory.
2278 */
2279 if (dirty_log_mask) {
2280 dirty_log_mask =
2281 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2282 }
2283 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2284 tb_invalidate_phys_range(addr, addr + length);
2285 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
51d7a9eb 2286 }
e87f7778 2287 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
51d7a9eb
AP
2288}
2289
23326164 2290static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2291{
e1622f4b 2292 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2293
2294 /* Regions are assumed to support 1-4 byte accesses unless
2295 otherwise specified. */
23326164
RH
2296 if (access_size_max == 0) {
2297 access_size_max = 4;
2298 }
2299
2300 /* Bound the maximum access by the alignment of the address. */
2301 if (!mr->ops->impl.unaligned) {
2302 unsigned align_size_max = addr & -addr;
2303 if (align_size_max != 0 && align_size_max < access_size_max) {
2304 access_size_max = align_size_max;
2305 }
82f2563f 2306 }
23326164
RH
2307
2308 /* Don't attempt accesses larger than the maximum. */
2309 if (l > access_size_max) {
2310 l = access_size_max;
82f2563f 2311 }
098178f2
PB
2312 if (l & (l - 1)) {
2313 l = 1 << (qemu_fls(l) - 1);
2314 }
23326164
RH
2315
2316 return l;
82f2563f
PB
2317}
2318
5c9eb028
PM
2319MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2320 uint8_t *buf, int len, bool is_write)
13eb76e0 2321{
149f54b5 2322 hwaddr l;
13eb76e0 2323 uint8_t *ptr;
791af8c8 2324 uint64_t val;
149f54b5 2325 hwaddr addr1;
5c8a00ce 2326 MemoryRegion *mr;
3b643495 2327 MemTxResult result = MEMTX_OK;
3b46e624 2328
41063e1e 2329 rcu_read_lock();
13eb76e0 2330 while (len > 0) {
149f54b5 2331 l = len;
5c8a00ce 2332 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2333
13eb76e0 2334 if (is_write) {
5c8a00ce
PB
2335 if (!memory_access_is_direct(mr, is_write)) {
2336 l = memory_access_size(mr, l, addr1);
4917cf44 2337 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2338 potential bugs */
23326164
RH
2339 switch (l) {
2340 case 8:
2341 /* 64 bit write access */
2342 val = ldq_p(buf);
3b643495
PM
2343 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2344 attrs);
23326164
RH
2345 break;
2346 case 4:
1c213d19 2347 /* 32 bit write access */
c27004ec 2348 val = ldl_p(buf);
3b643495
PM
2349 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2350 attrs);
23326164
RH
2351 break;
2352 case 2:
1c213d19 2353 /* 16 bit write access */
c27004ec 2354 val = lduw_p(buf);
3b643495
PM
2355 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2356 attrs);
23326164
RH
2357 break;
2358 case 1:
1c213d19 2359 /* 8 bit write access */
c27004ec 2360 val = ldub_p(buf);
3b643495
PM
2361 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2362 attrs);
23326164
RH
2363 break;
2364 default:
2365 abort();
13eb76e0 2366 }
2bbfa05d 2367 } else {
5c8a00ce 2368 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2369 /* RAM case */
5579c7f3 2370 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2371 memcpy(ptr, buf, l);
845b6214 2372 invalidate_and_set_dirty(mr, addr1, l);
13eb76e0
FB
2373 }
2374 } else {
5c8a00ce 2375 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2376 /* I/O case */
5c8a00ce 2377 l = memory_access_size(mr, l, addr1);
23326164
RH
2378 switch (l) {
2379 case 8:
2380 /* 64 bit read access */
3b643495
PM
2381 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2382 attrs);
23326164
RH
2383 stq_p(buf, val);
2384 break;
2385 case 4:
13eb76e0 2386 /* 32 bit read access */
3b643495
PM
2387 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2388 attrs);
c27004ec 2389 stl_p(buf, val);
23326164
RH
2390 break;
2391 case 2:
13eb76e0 2392 /* 16 bit read access */
3b643495
PM
2393 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2394 attrs);
c27004ec 2395 stw_p(buf, val);
23326164
RH
2396 break;
2397 case 1:
1c213d19 2398 /* 8 bit read access */
3b643495
PM
2399 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2400 attrs);
c27004ec 2401 stb_p(buf, val);
23326164
RH
2402 break;
2403 default:
2404 abort();
13eb76e0
FB
2405 }
2406 } else {
2407 /* RAM case */
5c8a00ce 2408 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2409 memcpy(buf, ptr, l);
13eb76e0
FB
2410 }
2411 }
2412 len -= l;
2413 buf += l;
2414 addr += l;
2415 }
41063e1e 2416 rcu_read_unlock();
fd8aaa76 2417
3b643495 2418 return result;
13eb76e0 2419}
8df1cd07 2420
5c9eb028
PM
2421MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2422 const uint8_t *buf, int len)
ac1970fb 2423{
5c9eb028 2424 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
ac1970fb
AK
2425}
2426
5c9eb028
PM
2427MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2428 uint8_t *buf, int len)
ac1970fb 2429{
5c9eb028 2430 return address_space_rw(as, addr, attrs, buf, len, false);
ac1970fb
AK
2431}
2432
2433
a8170e5e 2434void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2435 int len, int is_write)
2436{
5c9eb028
PM
2437 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2438 buf, len, is_write);
ac1970fb
AK
2439}
2440
582b55a9
AG
2441enum write_rom_type {
2442 WRITE_DATA,
2443 FLUSH_CACHE,
2444};
2445
2a221651 2446static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2447 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2448{
149f54b5 2449 hwaddr l;
d0ecd2aa 2450 uint8_t *ptr;
149f54b5 2451 hwaddr addr1;
5c8a00ce 2452 MemoryRegion *mr;
3b46e624 2453
41063e1e 2454 rcu_read_lock();
d0ecd2aa 2455 while (len > 0) {
149f54b5 2456 l = len;
2a221651 2457 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2458
5c8a00ce
PB
2459 if (!(memory_region_is_ram(mr) ||
2460 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2461 /* do nothing */
2462 } else {
5c8a00ce 2463 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2464 /* ROM/RAM case */
5579c7f3 2465 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2466 switch (type) {
2467 case WRITE_DATA:
2468 memcpy(ptr, buf, l);
845b6214 2469 invalidate_and_set_dirty(mr, addr1, l);
582b55a9
AG
2470 break;
2471 case FLUSH_CACHE:
2472 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2473 break;
2474 }
d0ecd2aa
FB
2475 }
2476 len -= l;
2477 buf += l;
2478 addr += l;
2479 }
41063e1e 2480 rcu_read_unlock();
d0ecd2aa
FB
2481}
2482
582b55a9 2483/* used for ROM loading : can write in RAM and ROM */
2a221651 2484void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2485 const uint8_t *buf, int len)
2486{
2a221651 2487 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2488}
2489
2490void cpu_flush_icache_range(hwaddr start, int len)
2491{
2492 /*
2493 * This function should do the same thing as an icache flush that was
2494 * triggered from within the guest. For TCG we are always cache coherent,
2495 * so there is no need to flush anything. For KVM / Xen we need to flush
2496 * the host's instruction cache at least.
2497 */
2498 if (tcg_enabled()) {
2499 return;
2500 }
2501
2a221651
EI
2502 cpu_physical_memory_write_rom_internal(&address_space_memory,
2503 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2504}
2505
6d16c2f8 2506typedef struct {
d3e71559 2507 MemoryRegion *mr;
6d16c2f8 2508 void *buffer;
a8170e5e
AK
2509 hwaddr addr;
2510 hwaddr len;
c2cba0ff 2511 bool in_use;
6d16c2f8
AL
2512} BounceBuffer;
2513
2514static BounceBuffer bounce;
2515
ba223c29 2516typedef struct MapClient {
e95205e1 2517 QEMUBH *bh;
72cf2d4f 2518 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2519} MapClient;
2520
38e047b5 2521QemuMutex map_client_list_lock;
72cf2d4f
BS
2522static QLIST_HEAD(map_client_list, MapClient) map_client_list
2523 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29 2524
e95205e1
FZ
2525static void cpu_unregister_map_client_do(MapClient *client)
2526{
2527 QLIST_REMOVE(client, link);
2528 g_free(client);
2529}
2530
33b6c2ed
FZ
2531static void cpu_notify_map_clients_locked(void)
2532{
2533 MapClient *client;
2534
2535 while (!QLIST_EMPTY(&map_client_list)) {
2536 client = QLIST_FIRST(&map_client_list);
e95205e1
FZ
2537 qemu_bh_schedule(client->bh);
2538 cpu_unregister_map_client_do(client);
33b6c2ed
FZ
2539 }
2540}
2541
e95205e1 2542void cpu_register_map_client(QEMUBH *bh)
ba223c29 2543{
7267c094 2544 MapClient *client = g_malloc(sizeof(*client));
ba223c29 2545
38e047b5 2546 qemu_mutex_lock(&map_client_list_lock);
e95205e1 2547 client->bh = bh;
72cf2d4f 2548 QLIST_INSERT_HEAD(&map_client_list, client, link);
33b6c2ed
FZ
2549 if (!atomic_read(&bounce.in_use)) {
2550 cpu_notify_map_clients_locked();
2551 }
38e047b5 2552 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2553}
2554
38e047b5 2555void cpu_exec_init_all(void)
ba223c29 2556{
38e047b5
FZ
2557 qemu_mutex_init(&ram_list.mutex);
2558 memory_map_init();
2559 io_mem_init();
2560 qemu_mutex_init(&map_client_list_lock);
ba223c29
AL
2561}
2562
e95205e1 2563void cpu_unregister_map_client(QEMUBH *bh)
ba223c29
AL
2564{
2565 MapClient *client;
2566
e95205e1
FZ
2567 qemu_mutex_lock(&map_client_list_lock);
2568 QLIST_FOREACH(client, &map_client_list, link) {
2569 if (client->bh == bh) {
2570 cpu_unregister_map_client_do(client);
2571 break;
2572 }
ba223c29 2573 }
e95205e1 2574 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2575}
2576
2577static void cpu_notify_map_clients(void)
2578{
38e047b5 2579 qemu_mutex_lock(&map_client_list_lock);
33b6c2ed 2580 cpu_notify_map_clients_locked();
38e047b5 2581 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2582}
2583
51644ab7
PB
2584bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2585{
5c8a00ce 2586 MemoryRegion *mr;
51644ab7
PB
2587 hwaddr l, xlat;
2588
41063e1e 2589 rcu_read_lock();
51644ab7
PB
2590 while (len > 0) {
2591 l = len;
5c8a00ce
PB
2592 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2593 if (!memory_access_is_direct(mr, is_write)) {
2594 l = memory_access_size(mr, l, addr);
2595 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2596 return false;
2597 }
2598 }
2599
2600 len -= l;
2601 addr += l;
2602 }
41063e1e 2603 rcu_read_unlock();
51644ab7
PB
2604 return true;
2605}
2606
6d16c2f8
AL
2607/* Map a physical memory region into a host virtual address.
2608 * May map a subset of the requested range, given by and returned in *plen.
2609 * May return NULL if resources needed to perform the mapping are exhausted.
2610 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2611 * Use cpu_register_map_client() to know when retrying the map operation is
2612 * likely to succeed.
6d16c2f8 2613 */
ac1970fb 2614void *address_space_map(AddressSpace *as,
a8170e5e
AK
2615 hwaddr addr,
2616 hwaddr *plen,
ac1970fb 2617 bool is_write)
6d16c2f8 2618{
a8170e5e 2619 hwaddr len = *plen;
e3127ae0
PB
2620 hwaddr done = 0;
2621 hwaddr l, xlat, base;
2622 MemoryRegion *mr, *this_mr;
2623 ram_addr_t raddr;
6d16c2f8 2624
e3127ae0
PB
2625 if (len == 0) {
2626 return NULL;
2627 }
38bee5dc 2628
e3127ae0 2629 l = len;
41063e1e 2630 rcu_read_lock();
e3127ae0 2631 mr = address_space_translate(as, addr, &xlat, &l, is_write);
41063e1e 2632
e3127ae0 2633 if (!memory_access_is_direct(mr, is_write)) {
c2cba0ff 2634 if (atomic_xchg(&bounce.in_use, true)) {
41063e1e 2635 rcu_read_unlock();
e3127ae0 2636 return NULL;
6d16c2f8 2637 }
e85d9db5
KW
2638 /* Avoid unbounded allocations */
2639 l = MIN(l, TARGET_PAGE_SIZE);
2640 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2641 bounce.addr = addr;
2642 bounce.len = l;
d3e71559
PB
2643
2644 memory_region_ref(mr);
2645 bounce.mr = mr;
e3127ae0 2646 if (!is_write) {
5c9eb028
PM
2647 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2648 bounce.buffer, l);
8ab934f9 2649 }
6d16c2f8 2650
41063e1e 2651 rcu_read_unlock();
e3127ae0
PB
2652 *plen = l;
2653 return bounce.buffer;
2654 }
2655
2656 base = xlat;
2657 raddr = memory_region_get_ram_addr(mr);
2658
2659 for (;;) {
6d16c2f8
AL
2660 len -= l;
2661 addr += l;
e3127ae0
PB
2662 done += l;
2663 if (len == 0) {
2664 break;
2665 }
2666
2667 l = len;
2668 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2669 if (this_mr != mr || xlat != base + done) {
2670 break;
2671 }
6d16c2f8 2672 }
e3127ae0 2673
d3e71559 2674 memory_region_ref(mr);
41063e1e 2675 rcu_read_unlock();
e3127ae0
PB
2676 *plen = done;
2677 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2678}
2679
ac1970fb 2680/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2681 * Will also mark the memory as dirty if is_write == 1. access_len gives
2682 * the amount of memory that was actually read or written by the caller.
2683 */
a8170e5e
AK
2684void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2685 int is_write, hwaddr access_len)
6d16c2f8
AL
2686{
2687 if (buffer != bounce.buffer) {
d3e71559
PB
2688 MemoryRegion *mr;
2689 ram_addr_t addr1;
2690
2691 mr = qemu_ram_addr_from_host(buffer, &addr1);
2692 assert(mr != NULL);
6d16c2f8 2693 if (is_write) {
845b6214 2694 invalidate_and_set_dirty(mr, addr1, access_len);
6d16c2f8 2695 }
868bb33f 2696 if (xen_enabled()) {
e41d7c69 2697 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2698 }
d3e71559 2699 memory_region_unref(mr);
6d16c2f8
AL
2700 return;
2701 }
2702 if (is_write) {
5c9eb028
PM
2703 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2704 bounce.buffer, access_len);
6d16c2f8 2705 }
f8a83245 2706 qemu_vfree(bounce.buffer);
6d16c2f8 2707 bounce.buffer = NULL;
d3e71559 2708 memory_region_unref(bounce.mr);
c2cba0ff 2709 atomic_mb_set(&bounce.in_use, false);
ba223c29 2710 cpu_notify_map_clients();
6d16c2f8 2711}
d0ecd2aa 2712
a8170e5e
AK
2713void *cpu_physical_memory_map(hwaddr addr,
2714 hwaddr *plen,
ac1970fb
AK
2715 int is_write)
2716{
2717 return address_space_map(&address_space_memory, addr, plen, is_write);
2718}
2719
a8170e5e
AK
2720void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2721 int is_write, hwaddr access_len)
ac1970fb
AK
2722{
2723 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2724}
2725
8df1cd07 2726/* warning: addr must be aligned */
50013115
PM
2727static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2728 MemTxAttrs attrs,
2729 MemTxResult *result,
2730 enum device_endian endian)
8df1cd07 2731{
8df1cd07 2732 uint8_t *ptr;
791af8c8 2733 uint64_t val;
5c8a00ce 2734 MemoryRegion *mr;
149f54b5
PB
2735 hwaddr l = 4;
2736 hwaddr addr1;
50013115 2737 MemTxResult r;
8df1cd07 2738
41063e1e 2739 rcu_read_lock();
fdfba1a2 2740 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2741 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2742 /* I/O case */
50013115 2743 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
1e78bcc1
AG
2744#if defined(TARGET_WORDS_BIGENDIAN)
2745 if (endian == DEVICE_LITTLE_ENDIAN) {
2746 val = bswap32(val);
2747 }
2748#else
2749 if (endian == DEVICE_BIG_ENDIAN) {
2750 val = bswap32(val);
2751 }
2752#endif
8df1cd07
FB
2753 } else {
2754 /* RAM case */
5c8a00ce 2755 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2756 & TARGET_PAGE_MASK)
149f54b5 2757 + addr1);
1e78bcc1
AG
2758 switch (endian) {
2759 case DEVICE_LITTLE_ENDIAN:
2760 val = ldl_le_p(ptr);
2761 break;
2762 case DEVICE_BIG_ENDIAN:
2763 val = ldl_be_p(ptr);
2764 break;
2765 default:
2766 val = ldl_p(ptr);
2767 break;
2768 }
50013115
PM
2769 r = MEMTX_OK;
2770 }
2771 if (result) {
2772 *result = r;
8df1cd07 2773 }
41063e1e 2774 rcu_read_unlock();
8df1cd07
FB
2775 return val;
2776}
2777
50013115
PM
2778uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2779 MemTxAttrs attrs, MemTxResult *result)
2780{
2781 return address_space_ldl_internal(as, addr, attrs, result,
2782 DEVICE_NATIVE_ENDIAN);
2783}
2784
2785uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2786 MemTxAttrs attrs, MemTxResult *result)
2787{
2788 return address_space_ldl_internal(as, addr, attrs, result,
2789 DEVICE_LITTLE_ENDIAN);
2790}
2791
2792uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2793 MemTxAttrs attrs, MemTxResult *result)
2794{
2795 return address_space_ldl_internal(as, addr, attrs, result,
2796 DEVICE_BIG_ENDIAN);
2797}
2798
fdfba1a2 2799uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2800{
50013115 2801 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2802}
2803
fdfba1a2 2804uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2805{
50013115 2806 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2807}
2808
fdfba1a2 2809uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2810{
50013115 2811 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2812}
2813
84b7b8e7 2814/* warning: addr must be aligned */
50013115
PM
2815static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2816 MemTxAttrs attrs,
2817 MemTxResult *result,
2818 enum device_endian endian)
84b7b8e7 2819{
84b7b8e7
FB
2820 uint8_t *ptr;
2821 uint64_t val;
5c8a00ce 2822 MemoryRegion *mr;
149f54b5
PB
2823 hwaddr l = 8;
2824 hwaddr addr1;
50013115 2825 MemTxResult r;
84b7b8e7 2826
41063e1e 2827 rcu_read_lock();
2c17449b 2828 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2829 false);
2830 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2831 /* I/O case */
50013115 2832 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
968a5627
PB
2833#if defined(TARGET_WORDS_BIGENDIAN)
2834 if (endian == DEVICE_LITTLE_ENDIAN) {
2835 val = bswap64(val);
2836 }
2837#else
2838 if (endian == DEVICE_BIG_ENDIAN) {
2839 val = bswap64(val);
2840 }
84b7b8e7
FB
2841#endif
2842 } else {
2843 /* RAM case */
5c8a00ce 2844 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2845 & TARGET_PAGE_MASK)
149f54b5 2846 + addr1);
1e78bcc1
AG
2847 switch (endian) {
2848 case DEVICE_LITTLE_ENDIAN:
2849 val = ldq_le_p(ptr);
2850 break;
2851 case DEVICE_BIG_ENDIAN:
2852 val = ldq_be_p(ptr);
2853 break;
2854 default:
2855 val = ldq_p(ptr);
2856 break;
2857 }
50013115
PM
2858 r = MEMTX_OK;
2859 }
2860 if (result) {
2861 *result = r;
84b7b8e7 2862 }
41063e1e 2863 rcu_read_unlock();
84b7b8e7
FB
2864 return val;
2865}
2866
50013115
PM
2867uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2868 MemTxAttrs attrs, MemTxResult *result)
2869{
2870 return address_space_ldq_internal(as, addr, attrs, result,
2871 DEVICE_NATIVE_ENDIAN);
2872}
2873
2874uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
2875 MemTxAttrs attrs, MemTxResult *result)
2876{
2877 return address_space_ldq_internal(as, addr, attrs, result,
2878 DEVICE_LITTLE_ENDIAN);
2879}
2880
2881uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
2882 MemTxAttrs attrs, MemTxResult *result)
2883{
2884 return address_space_ldq_internal(as, addr, attrs, result,
2885 DEVICE_BIG_ENDIAN);
2886}
2887
2c17449b 2888uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2889{
50013115 2890 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2891}
2892
2c17449b 2893uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2894{
50013115 2895 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2896}
2897
2c17449b 2898uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2899{
50013115 2900 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2901}
2902
aab33094 2903/* XXX: optimize */
50013115
PM
2904uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
2905 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
2906{
2907 uint8_t val;
50013115
PM
2908 MemTxResult r;
2909
2910 r = address_space_rw(as, addr, attrs, &val, 1, 0);
2911 if (result) {
2912 *result = r;
2913 }
aab33094
FB
2914 return val;
2915}
2916
50013115
PM
2917uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2918{
2919 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
2920}
2921
733f0b02 2922/* warning: addr must be aligned */
50013115
PM
2923static inline uint32_t address_space_lduw_internal(AddressSpace *as,
2924 hwaddr addr,
2925 MemTxAttrs attrs,
2926 MemTxResult *result,
2927 enum device_endian endian)
aab33094 2928{
733f0b02
MT
2929 uint8_t *ptr;
2930 uint64_t val;
5c8a00ce 2931 MemoryRegion *mr;
149f54b5
PB
2932 hwaddr l = 2;
2933 hwaddr addr1;
50013115 2934 MemTxResult r;
733f0b02 2935
41063e1e 2936 rcu_read_lock();
41701aa4 2937 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2938 false);
2939 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2940 /* I/O case */
50013115 2941 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
1e78bcc1
AG
2942#if defined(TARGET_WORDS_BIGENDIAN)
2943 if (endian == DEVICE_LITTLE_ENDIAN) {
2944 val = bswap16(val);
2945 }
2946#else
2947 if (endian == DEVICE_BIG_ENDIAN) {
2948 val = bswap16(val);
2949 }
2950#endif
733f0b02
MT
2951 } else {
2952 /* RAM case */
5c8a00ce 2953 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2954 & TARGET_PAGE_MASK)
149f54b5 2955 + addr1);
1e78bcc1
AG
2956 switch (endian) {
2957 case DEVICE_LITTLE_ENDIAN:
2958 val = lduw_le_p(ptr);
2959 break;
2960 case DEVICE_BIG_ENDIAN:
2961 val = lduw_be_p(ptr);
2962 break;
2963 default:
2964 val = lduw_p(ptr);
2965 break;
2966 }
50013115
PM
2967 r = MEMTX_OK;
2968 }
2969 if (result) {
2970 *result = r;
733f0b02 2971 }
41063e1e 2972 rcu_read_unlock();
733f0b02 2973 return val;
aab33094
FB
2974}
2975
50013115
PM
2976uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
2977 MemTxAttrs attrs, MemTxResult *result)
2978{
2979 return address_space_lduw_internal(as, addr, attrs, result,
2980 DEVICE_NATIVE_ENDIAN);
2981}
2982
2983uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
2984 MemTxAttrs attrs, MemTxResult *result)
2985{
2986 return address_space_lduw_internal(as, addr, attrs, result,
2987 DEVICE_LITTLE_ENDIAN);
2988}
2989
2990uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
2991 MemTxAttrs attrs, MemTxResult *result)
2992{
2993 return address_space_lduw_internal(as, addr, attrs, result,
2994 DEVICE_BIG_ENDIAN);
2995}
2996
41701aa4 2997uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2998{
50013115 2999 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3000}
3001
41701aa4 3002uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3003{
50013115 3004 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3005}
3006
41701aa4 3007uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3008{
50013115 3009 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3010}
3011
8df1cd07
FB
3012/* warning: addr must be aligned. The ram page is not masked as dirty
3013 and the code inside is not invalidated. It is useful if the dirty
3014 bits are used to track modified PTEs */
50013115
PM
3015void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3016 MemTxAttrs attrs, MemTxResult *result)
8df1cd07 3017{
8df1cd07 3018 uint8_t *ptr;
5c8a00ce 3019 MemoryRegion *mr;
149f54b5
PB
3020 hwaddr l = 4;
3021 hwaddr addr1;
50013115 3022 MemTxResult r;
845b6214 3023 uint8_t dirty_log_mask;
8df1cd07 3024
41063e1e 3025 rcu_read_lock();
2198a121 3026 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3027 true);
3028 if (l < 4 || !memory_access_is_direct(mr, true)) {
50013115 3029 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3030 } else {
5c8a00ce 3031 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 3032 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3033 stl_p(ptr, val);
74576198 3034
845b6214
PB
3035 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3036 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
58d2707e 3037 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
50013115
PM
3038 r = MEMTX_OK;
3039 }
3040 if (result) {
3041 *result = r;
8df1cd07 3042 }
41063e1e 3043 rcu_read_unlock();
8df1cd07
FB
3044}
3045
50013115
PM
3046void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3047{
3048 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3049}
3050
8df1cd07 3051/* warning: addr must be aligned */
50013115
PM
3052static inline void address_space_stl_internal(AddressSpace *as,
3053 hwaddr addr, uint32_t val,
3054 MemTxAttrs attrs,
3055 MemTxResult *result,
3056 enum device_endian endian)
8df1cd07 3057{
8df1cd07 3058 uint8_t *ptr;
5c8a00ce 3059 MemoryRegion *mr;
149f54b5
PB
3060 hwaddr l = 4;
3061 hwaddr addr1;
50013115 3062 MemTxResult r;
8df1cd07 3063
41063e1e 3064 rcu_read_lock();
ab1da857 3065 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3066 true);
3067 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
3068#if defined(TARGET_WORDS_BIGENDIAN)
3069 if (endian == DEVICE_LITTLE_ENDIAN) {
3070 val = bswap32(val);
3071 }
3072#else
3073 if (endian == DEVICE_BIG_ENDIAN) {
3074 val = bswap32(val);
3075 }
3076#endif
50013115 3077 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3078 } else {
8df1cd07 3079 /* RAM case */
5c8a00ce 3080 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 3081 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3082 switch (endian) {
3083 case DEVICE_LITTLE_ENDIAN:
3084 stl_le_p(ptr, val);
3085 break;
3086 case DEVICE_BIG_ENDIAN:
3087 stl_be_p(ptr, val);
3088 break;
3089 default:
3090 stl_p(ptr, val);
3091 break;
3092 }
845b6214 3093 invalidate_and_set_dirty(mr, addr1, 4);
50013115
PM
3094 r = MEMTX_OK;
3095 }
3096 if (result) {
3097 *result = r;
8df1cd07 3098 }
41063e1e 3099 rcu_read_unlock();
8df1cd07
FB
3100}
3101
50013115
PM
3102void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3103 MemTxAttrs attrs, MemTxResult *result)
3104{
3105 address_space_stl_internal(as, addr, val, attrs, result,
3106 DEVICE_NATIVE_ENDIAN);
3107}
3108
3109void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3110 MemTxAttrs attrs, MemTxResult *result)
3111{
3112 address_space_stl_internal(as, addr, val, attrs, result,
3113 DEVICE_LITTLE_ENDIAN);
3114}
3115
3116void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3117 MemTxAttrs attrs, MemTxResult *result)
3118{
3119 address_space_stl_internal(as, addr, val, attrs, result,
3120 DEVICE_BIG_ENDIAN);
3121}
3122
ab1da857 3123void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3124{
50013115 3125 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3126}
3127
ab1da857 3128void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3129{
50013115 3130 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3131}
3132
ab1da857 3133void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3134{
50013115 3135 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3136}
3137
aab33094 3138/* XXX: optimize */
50013115
PM
3139void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3140 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
3141{
3142 uint8_t v = val;
50013115
PM
3143 MemTxResult r;
3144
3145 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3146 if (result) {
3147 *result = r;
3148 }
3149}
3150
3151void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3152{
3153 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
aab33094
FB
3154}
3155
733f0b02 3156/* warning: addr must be aligned */
50013115
PM
3157static inline void address_space_stw_internal(AddressSpace *as,
3158 hwaddr addr, uint32_t val,
3159 MemTxAttrs attrs,
3160 MemTxResult *result,
3161 enum device_endian endian)
aab33094 3162{
733f0b02 3163 uint8_t *ptr;
5c8a00ce 3164 MemoryRegion *mr;
149f54b5
PB
3165 hwaddr l = 2;
3166 hwaddr addr1;
50013115 3167 MemTxResult r;
733f0b02 3168
41063e1e 3169 rcu_read_lock();
5ce5944d 3170 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 3171 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
3172#if defined(TARGET_WORDS_BIGENDIAN)
3173 if (endian == DEVICE_LITTLE_ENDIAN) {
3174 val = bswap16(val);
3175 }
3176#else
3177 if (endian == DEVICE_BIG_ENDIAN) {
3178 val = bswap16(val);
3179 }
3180#endif
50013115 3181 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
733f0b02 3182 } else {
733f0b02 3183 /* RAM case */
5c8a00ce 3184 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 3185 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3186 switch (endian) {
3187 case DEVICE_LITTLE_ENDIAN:
3188 stw_le_p(ptr, val);
3189 break;
3190 case DEVICE_BIG_ENDIAN:
3191 stw_be_p(ptr, val);
3192 break;
3193 default:
3194 stw_p(ptr, val);
3195 break;
3196 }
845b6214 3197 invalidate_and_set_dirty(mr, addr1, 2);
50013115
PM
3198 r = MEMTX_OK;
3199 }
3200 if (result) {
3201 *result = r;
733f0b02 3202 }
41063e1e 3203 rcu_read_unlock();
aab33094
FB
3204}
3205
50013115
PM
3206void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3207 MemTxAttrs attrs, MemTxResult *result)
3208{
3209 address_space_stw_internal(as, addr, val, attrs, result,
3210 DEVICE_NATIVE_ENDIAN);
3211}
3212
3213void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3214 MemTxAttrs attrs, MemTxResult *result)
3215{
3216 address_space_stw_internal(as, addr, val, attrs, result,
3217 DEVICE_LITTLE_ENDIAN);
3218}
3219
3220void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3221 MemTxAttrs attrs, MemTxResult *result)
3222{
3223 address_space_stw_internal(as, addr, val, attrs, result,
3224 DEVICE_BIG_ENDIAN);
3225}
3226
5ce5944d 3227void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3228{
50013115 3229 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3230}
3231
5ce5944d 3232void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3233{
50013115 3234 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3235}
3236
5ce5944d 3237void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3238{
50013115 3239 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3240}
3241
aab33094 3242/* XXX: optimize */
50013115
PM
3243void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3244 MemTxAttrs attrs, MemTxResult *result)
aab33094 3245{
50013115 3246 MemTxResult r;
aab33094 3247 val = tswap64(val);
50013115
PM
3248 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3249 if (result) {
3250 *result = r;
3251 }
aab33094
FB
3252}
3253
50013115
PM
3254void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3255 MemTxAttrs attrs, MemTxResult *result)
1e78bcc1 3256{
50013115 3257 MemTxResult r;
1e78bcc1 3258 val = cpu_to_le64(val);
50013115
PM
3259 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3260 if (result) {
3261 *result = r;
3262 }
3263}
3264void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3265 MemTxAttrs attrs, MemTxResult *result)
3266{
3267 MemTxResult r;
3268 val = cpu_to_be64(val);
3269 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3270 if (result) {
3271 *result = r;
3272 }
3273}
3274
3275void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3276{
3277 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3278}
3279
3280void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3281{
3282 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3283}
3284
f606604f 3285void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1 3286{
50013115 3287 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3288}
3289
5e2972fd 3290/* virtual memory access for debug (includes writing to ROM) */
f17ec444 3291int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 3292 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3293{
3294 int l;
a8170e5e 3295 hwaddr phys_addr;
9b3c35e0 3296 target_ulong page;
13eb76e0
FB
3297
3298 while (len > 0) {
3299 page = addr & TARGET_PAGE_MASK;
f17ec444 3300 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
3301 /* if no physical page mapped, return an error */
3302 if (phys_addr == -1)
3303 return -1;
3304 l = (page + TARGET_PAGE_SIZE) - addr;
3305 if (l > len)
3306 l = len;
5e2972fd 3307 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
3308 if (is_write) {
3309 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3310 } else {
5c9eb028
PM
3311 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3312 buf, l, 0);
2e38847b 3313 }
13eb76e0
FB
3314 len -= l;
3315 buf += l;
3316 addr += l;
3317 }
3318 return 0;
3319}
a68fe89c 3320#endif
13eb76e0 3321
8e4a424b
BS
3322/*
3323 * A helper function for the _utterly broken_ virtio device model to find out if
3324 * it's running on a big endian machine. Don't do this at home kids!
3325 */
98ed8ecf
GK
3326bool target_words_bigendian(void);
3327bool target_words_bigendian(void)
8e4a424b
BS
3328{
3329#if defined(TARGET_WORDS_BIGENDIAN)
3330 return true;
3331#else
3332 return false;
3333#endif
3334}
3335
76f35538 3336#ifndef CONFIG_USER_ONLY
a8170e5e 3337bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 3338{
5c8a00ce 3339 MemoryRegion*mr;
149f54b5 3340 hwaddr l = 1;
41063e1e 3341 bool res;
76f35538 3342
41063e1e 3343 rcu_read_lock();
5c8a00ce
PB
3344 mr = address_space_translate(&address_space_memory,
3345 phys_addr, &phys_addr, &l, false);
76f35538 3346
41063e1e
PB
3347 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3348 rcu_read_unlock();
3349 return res;
76f35538 3350}
bd2fa51f 3351
e3807054 3352int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
bd2fa51f
MH
3353{
3354 RAMBlock *block;
e3807054 3355 int ret = 0;
bd2fa51f 3356
0dc3f44a
MD
3357 rcu_read_lock();
3358 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
e3807054
DDAG
3359 ret = func(block->idstr, block->host, block->offset,
3360 block->used_length, opaque);
3361 if (ret) {
3362 break;
3363 }
bd2fa51f 3364 }
0dc3f44a 3365 rcu_read_unlock();
e3807054 3366 return ret;
bd2fa51f 3367}
ec3f8c99 3368#endif