]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
spapr_vty: lookup should only return valid VTY objects
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
4485bd26 29#if !defined(CONFIG_USER_ONLY)
47c8ca53 30#include "hw/boards.h"
4485bd26 31#endif
cc9e98cb 32#include "hw/qdev.h"
1de7afc9 33#include "qemu/osdep.h"
9c17d615 34#include "sysemu/kvm.h"
2ff3de68 35#include "sysemu/sysemu.h"
0d09e41a 36#include "hw/xen/xen.h"
1de7afc9
PB
37#include "qemu/timer.h"
38#include "qemu/config-file.h"
75a34036 39#include "qemu/error-report.h"
022c62cb 40#include "exec/memory.h"
9c17d615 41#include "sysemu/dma.h"
022c62cb 42#include "exec/address-spaces.h"
53a5960a
PB
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
432d268c 45#else /* !CONFIG_USER_ONLY */
9c17d615 46#include "sysemu/xen-mapcache.h"
6506e4f9 47#include "trace.h"
53a5960a 48#endif
0d6d3c87 49#include "exec/cpu-all.h"
0dc3f44a 50#include "qemu/rcu_queue.h"
4840f10e 51#include "qemu/main-loop.h"
022c62cb 52#include "exec/cputlb.h"
5b6dd868 53#include "translate-all.h"
0cac1b66 54
022c62cb 55#include "exec/memory-internal.h"
220c3ebd 56#include "exec/ram_addr.h"
67d95c15 57
b35ba30f
MT
58#include "qemu/range.h"
59
db7b5426 60//#define DEBUG_SUBPAGE
1196be37 61
e2eef170 62#if !defined(CONFIG_USER_ONLY)
0dc3f44a
MD
63/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
64 * are protected by the ramlist lock.
65 */
0d53d9fe 66RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
67
68static MemoryRegion *system_memory;
309cb471 69static MemoryRegion *system_io;
62152b8a 70
f6790af6
AK
71AddressSpace address_space_io;
72AddressSpace address_space_memory;
2673a5da 73
0844e007 74MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 75static MemoryRegion io_mem_unassigned;
0e0df1e2 76
7bd4f430
PB
77/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
78#define RAM_PREALLOC (1 << 0)
79
dbcb8981
PB
80/* RAM is mmap-ed with MAP_SHARED */
81#define RAM_SHARED (1 << 1)
82
62be4e3a
MT
83/* Only a portion of RAM (used_length) is actually used, and migrated.
84 * This used_length size can change across reboots.
85 */
86#define RAM_RESIZEABLE (1 << 2)
87
e2eef170 88#endif
9fa3e853 89
bdc44640 90struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
91/* current CPU in the current thread. It is only valid inside
92 cpu_exec() */
4917cf44 93DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 94/* 0 = Do not count executed instructions.
bf20dc07 95 1 = Precise instruction counting.
2e70f6ef 96 2 = Adaptive rate instruction counting. */
5708fc66 97int use_icount;
6a00d601 98
e2eef170 99#if !defined(CONFIG_USER_ONLY)
4346ae3e 100
1db8abb1
PB
101typedef struct PhysPageEntry PhysPageEntry;
102
103struct PhysPageEntry {
9736e55b 104 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 105 uint32_t skip : 6;
9736e55b 106 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 107 uint32_t ptr : 26;
1db8abb1
PB
108};
109
8b795765
MT
110#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
111
03f49957 112/* Size of the L2 (and L3, etc) page tables. */
57271d63 113#define ADDR_SPACE_BITS 64
03f49957 114
026736ce 115#define P_L2_BITS 9
03f49957
PB
116#define P_L2_SIZE (1 << P_L2_BITS)
117
118#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
119
120typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 121
53cb28cb 122typedef struct PhysPageMap {
79e2b9ae
PB
123 struct rcu_head rcu;
124
53cb28cb
MA
125 unsigned sections_nb;
126 unsigned sections_nb_alloc;
127 unsigned nodes_nb;
128 unsigned nodes_nb_alloc;
129 Node *nodes;
130 MemoryRegionSection *sections;
131} PhysPageMap;
132
1db8abb1 133struct AddressSpaceDispatch {
79e2b9ae
PB
134 struct rcu_head rcu;
135
1db8abb1
PB
136 /* This is a multi-level map on the physical address space.
137 * The bottom level has pointers to MemoryRegionSections.
138 */
139 PhysPageEntry phys_map;
53cb28cb 140 PhysPageMap map;
acc9d80b 141 AddressSpace *as;
1db8abb1
PB
142};
143
90260c6c
JK
144#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
145typedef struct subpage_t {
146 MemoryRegion iomem;
acc9d80b 147 AddressSpace *as;
90260c6c
JK
148 hwaddr base;
149 uint16_t sub_section[TARGET_PAGE_SIZE];
150} subpage_t;
151
b41aac4f
LPF
152#define PHYS_SECTION_UNASSIGNED 0
153#define PHYS_SECTION_NOTDIRTY 1
154#define PHYS_SECTION_ROM 2
155#define PHYS_SECTION_WATCH 3
5312bd8b 156
e2eef170 157static void io_mem_init(void);
62152b8a 158static void memory_map_init(void);
09daed84 159static void tcg_commit(MemoryListener *listener);
e2eef170 160
1ec9b909 161static MemoryRegion io_mem_watch;
6658ffb8 162#endif
fd6ce8f6 163
6d9a1304 164#if !defined(CONFIG_USER_ONLY)
d6f2ea22 165
53cb28cb 166static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 167{
53cb28cb
MA
168 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
169 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
170 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
171 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 172 }
f7bf5461
AK
173}
174
db94604b 175static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
f7bf5461
AK
176{
177 unsigned i;
8b795765 178 uint32_t ret;
db94604b
PB
179 PhysPageEntry e;
180 PhysPageEntry *p;
f7bf5461 181
53cb28cb 182 ret = map->nodes_nb++;
db94604b 183 p = map->nodes[ret];
f7bf5461 184 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 185 assert(ret != map->nodes_nb_alloc);
db94604b
PB
186
187 e.skip = leaf ? 0 : 1;
188 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
03f49957 189 for (i = 0; i < P_L2_SIZE; ++i) {
db94604b 190 memcpy(&p[i], &e, sizeof(e));
d6f2ea22 191 }
f7bf5461 192 return ret;
d6f2ea22
AK
193}
194
53cb28cb
MA
195static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
196 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 197 int level)
f7bf5461
AK
198{
199 PhysPageEntry *p;
03f49957 200 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 201
9736e55b 202 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
db94604b 203 lp->ptr = phys_map_node_alloc(map, level == 0);
92e873b9 204 }
db94604b 205 p = map->nodes[lp->ptr];
03f49957 206 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 207
03f49957 208 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 209 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 210 lp->skip = 0;
c19e8800 211 lp->ptr = leaf;
07f07b31
AK
212 *index += step;
213 *nb -= step;
2999097b 214 } else {
53cb28cb 215 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
216 }
217 ++lp;
f7bf5461
AK
218 }
219}
220
ac1970fb 221static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 222 hwaddr index, hwaddr nb,
2999097b 223 uint16_t leaf)
f7bf5461 224{
2999097b 225 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 226 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 227
53cb28cb 228 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
229}
230
b35ba30f
MT
231/* Compact a non leaf page entry. Simply detect that the entry has a single child,
232 * and update our entry so we can skip it and go directly to the destination.
233 */
234static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
235{
236 unsigned valid_ptr = P_L2_SIZE;
237 int valid = 0;
238 PhysPageEntry *p;
239 int i;
240
241 if (lp->ptr == PHYS_MAP_NODE_NIL) {
242 return;
243 }
244
245 p = nodes[lp->ptr];
246 for (i = 0; i < P_L2_SIZE; i++) {
247 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
248 continue;
249 }
250
251 valid_ptr = i;
252 valid++;
253 if (p[i].skip) {
254 phys_page_compact(&p[i], nodes, compacted);
255 }
256 }
257
258 /* We can only compress if there's only one child. */
259 if (valid != 1) {
260 return;
261 }
262
263 assert(valid_ptr < P_L2_SIZE);
264
265 /* Don't compress if it won't fit in the # of bits we have. */
266 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
267 return;
268 }
269
270 lp->ptr = p[valid_ptr].ptr;
271 if (!p[valid_ptr].skip) {
272 /* If our only child is a leaf, make this a leaf. */
273 /* By design, we should have made this node a leaf to begin with so we
274 * should never reach here.
275 * But since it's so simple to handle this, let's do it just in case we
276 * change this rule.
277 */
278 lp->skip = 0;
279 } else {
280 lp->skip += p[valid_ptr].skip;
281 }
282}
283
284static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
285{
286 DECLARE_BITMAP(compacted, nodes_nb);
287
288 if (d->phys_map.skip) {
53cb28cb 289 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
290 }
291}
292
97115a8d 293static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 294 Node *nodes, MemoryRegionSection *sections)
92e873b9 295{
31ab2b4a 296 PhysPageEntry *p;
97115a8d 297 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 298 int i;
f1f6e3b8 299
9736e55b 300 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 301 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 302 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 303 }
9affd6fc 304 p = nodes[lp.ptr];
03f49957 305 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 306 }
b35ba30f
MT
307
308 if (sections[lp.ptr].size.hi ||
309 range_covers_byte(sections[lp.ptr].offset_within_address_space,
310 sections[lp.ptr].size.lo, addr)) {
311 return &sections[lp.ptr];
312 } else {
313 return &sections[PHYS_SECTION_UNASSIGNED];
314 }
f3705d53
AK
315}
316
e5548617
BS
317bool memory_region_is_unassigned(MemoryRegion *mr)
318{
2a8e7499 319 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 320 && mr != &io_mem_watch;
fd6ce8f6 321}
149f54b5 322
79e2b9ae 323/* Called from RCU critical section */
c7086b4a 324static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
325 hwaddr addr,
326 bool resolve_subpage)
9f029603 327{
90260c6c
JK
328 MemoryRegionSection *section;
329 subpage_t *subpage;
330
53cb28cb 331 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
332 if (resolve_subpage && section->mr->subpage) {
333 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 334 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
335 }
336 return section;
9f029603
JK
337}
338
79e2b9ae 339/* Called from RCU critical section */
90260c6c 340static MemoryRegionSection *
c7086b4a 341address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 342 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
343{
344 MemoryRegionSection *section;
965eb2fc 345 MemoryRegion *mr;
a87f3954 346 Int128 diff;
149f54b5 347
c7086b4a 348 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
349 /* Compute offset within MemoryRegionSection */
350 addr -= section->offset_within_address_space;
351
352 /* Compute offset within MemoryRegion */
353 *xlat = addr + section->offset_within_region;
354
965eb2fc 355 mr = section->mr;
b242e0e0
PB
356
357 /* MMIO registers can be expected to perform full-width accesses based only
358 * on their address, without considering adjacent registers that could
359 * decode to completely different MemoryRegions. When such registers
360 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
361 * regions overlap wildly. For this reason we cannot clamp the accesses
362 * here.
363 *
364 * If the length is small (as is the case for address_space_ldl/stl),
365 * everything works fine. If the incoming length is large, however,
366 * the caller really has to do the clamping through memory_access_size.
367 */
965eb2fc 368 if (memory_region_is_ram(mr)) {
e4a511f8 369 diff = int128_sub(section->size, int128_make64(addr));
965eb2fc
PB
370 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
371 }
149f54b5
PB
372 return section;
373}
90260c6c 374
a87f3954
PB
375static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
376{
377 if (memory_region_is_ram(mr)) {
378 return !(is_write && mr->readonly);
379 }
380 if (memory_region_is_romd(mr)) {
381 return !is_write;
382 }
383
384 return false;
385}
386
41063e1e 387/* Called from RCU critical section */
5c8a00ce
PB
388MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
389 hwaddr *xlat, hwaddr *plen,
390 bool is_write)
90260c6c 391{
30951157
AK
392 IOMMUTLBEntry iotlb;
393 MemoryRegionSection *section;
394 MemoryRegion *mr;
30951157
AK
395
396 for (;;) {
79e2b9ae
PB
397 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
398 section = address_space_translate_internal(d, addr, &addr, plen, true);
30951157
AK
399 mr = section->mr;
400
401 if (!mr->iommu_ops) {
402 break;
403 }
404
8d7b8cb9 405 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
30951157
AK
406 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
407 | (addr & iotlb.addr_mask));
23820dbf 408 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
30951157
AK
409 if (!(iotlb.perm & (1 << is_write))) {
410 mr = &io_mem_unassigned;
411 break;
412 }
413
414 as = iotlb.target_as;
415 }
416
fe680d0d 417 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954 418 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
23820dbf 419 *plen = MIN(page, *plen);
a87f3954
PB
420 }
421
30951157
AK
422 *xlat = addr;
423 return mr;
90260c6c
JK
424}
425
79e2b9ae 426/* Called from RCU critical section */
90260c6c 427MemoryRegionSection *
9d82b5a7
PB
428address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
429 hwaddr *xlat, hwaddr *plen)
90260c6c 430{
30951157 431 MemoryRegionSection *section;
9d82b5a7
PB
432 section = address_space_translate_internal(cpu->memory_dispatch,
433 addr, xlat, plen, false);
30951157
AK
434
435 assert(!section->mr->iommu_ops);
436 return section;
90260c6c 437}
5b6dd868 438#endif
fd6ce8f6 439
b170fce3 440#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
441
442static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 443{
259186a7 444 CPUState *cpu = opaque;
a513fe19 445
5b6dd868
BS
446 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
447 version_id is increased. */
259186a7 448 cpu->interrupt_request &= ~0x01;
c01a71c1 449 tlb_flush(cpu, 1);
5b6dd868
BS
450
451 return 0;
a513fe19 452}
7501267e 453
6c3bff0e
PD
454static int cpu_common_pre_load(void *opaque)
455{
456 CPUState *cpu = opaque;
457
adee6424 458 cpu->exception_index = -1;
6c3bff0e
PD
459
460 return 0;
461}
462
463static bool cpu_common_exception_index_needed(void *opaque)
464{
465 CPUState *cpu = opaque;
466
adee6424 467 return tcg_enabled() && cpu->exception_index != -1;
6c3bff0e
PD
468}
469
470static const VMStateDescription vmstate_cpu_common_exception_index = {
471 .name = "cpu_common/exception_index",
472 .version_id = 1,
473 .minimum_version_id = 1,
5cd8cada 474 .needed = cpu_common_exception_index_needed,
6c3bff0e
PD
475 .fields = (VMStateField[]) {
476 VMSTATE_INT32(exception_index, CPUState),
477 VMSTATE_END_OF_LIST()
478 }
479};
480
1a1562f5 481const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
482 .name = "cpu_common",
483 .version_id = 1,
484 .minimum_version_id = 1,
6c3bff0e 485 .pre_load = cpu_common_pre_load,
5b6dd868 486 .post_load = cpu_common_post_load,
35d08458 487 .fields = (VMStateField[]) {
259186a7
AF
488 VMSTATE_UINT32(halted, CPUState),
489 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 490 VMSTATE_END_OF_LIST()
6c3bff0e 491 },
5cd8cada
JQ
492 .subsections = (const VMStateDescription*[]) {
493 &vmstate_cpu_common_exception_index,
494 NULL
5b6dd868
BS
495 }
496};
1a1562f5 497
5b6dd868 498#endif
ea041c0e 499
38d8f5c8 500CPUState *qemu_get_cpu(int index)
ea041c0e 501{
bdc44640 502 CPUState *cpu;
ea041c0e 503
bdc44640 504 CPU_FOREACH(cpu) {
55e5c285 505 if (cpu->cpu_index == index) {
bdc44640 506 return cpu;
55e5c285 507 }
ea041c0e 508 }
5b6dd868 509
bdc44640 510 return NULL;
ea041c0e
FB
511}
512
09daed84
EI
513#if !defined(CONFIG_USER_ONLY)
514void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
515{
516 /* We only support one address space per cpu at the moment. */
517 assert(cpu->as == as);
518
519 if (cpu->tcg_as_listener) {
520 memory_listener_unregister(cpu->tcg_as_listener);
521 } else {
522 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
523 }
524 cpu->tcg_as_listener->commit = tcg_commit;
525 memory_listener_register(cpu->tcg_as_listener, as);
526}
527#endif
528
5b6dd868 529void cpu_exec_init(CPUArchState *env)
ea041c0e 530{
5b6dd868 531 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 532 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 533 CPUState *some_cpu;
5b6dd868
BS
534 int cpu_index;
535
536#if defined(CONFIG_USER_ONLY)
537 cpu_list_lock();
538#endif
5b6dd868 539 cpu_index = 0;
bdc44640 540 CPU_FOREACH(some_cpu) {
5b6dd868
BS
541 cpu_index++;
542 }
55e5c285 543 cpu->cpu_index = cpu_index;
1b1ed8dc 544 cpu->numa_node = 0;
f0c3c505 545 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 546 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 547#ifndef CONFIG_USER_ONLY
09daed84 548 cpu->as = &address_space_memory;
5b6dd868 549 cpu->thread_id = qemu_get_thread_id();
cba70549 550 cpu_reload_memory_map(cpu);
5b6dd868 551#endif
bdc44640 552 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
553#if defined(CONFIG_USER_ONLY)
554 cpu_list_unlock();
555#endif
e0d47944
AF
556 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
557 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
558 }
5b6dd868 559#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
560 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
561 cpu_save, cpu_load, env);
b170fce3 562 assert(cc->vmsd == NULL);
e0d47944 563 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 564#endif
b170fce3
AF
565 if (cc->vmsd != NULL) {
566 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
567 }
ea041c0e
FB
568}
569
94df27fd 570#if defined(CONFIG_USER_ONLY)
00b941e5 571static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
572{
573 tb_invalidate_phys_page_range(pc, pc + 1, 0);
574}
575#else
00b941e5 576static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 577{
e8262a1b
MF
578 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
579 if (phys != -1) {
09daed84 580 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 581 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 582 }
1e7855a5 583}
c27004ec 584#endif
d720b93d 585
c527ee8f 586#if defined(CONFIG_USER_ONLY)
75a34036 587void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
588
589{
590}
591
3ee887e8
PM
592int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
593 int flags)
594{
595 return -ENOSYS;
596}
597
598void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
599{
600}
601
75a34036 602int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
603 int flags, CPUWatchpoint **watchpoint)
604{
605 return -ENOSYS;
606}
607#else
6658ffb8 608/* Add a watchpoint. */
75a34036 609int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 610 int flags, CPUWatchpoint **watchpoint)
6658ffb8 611{
c0ce998e 612 CPUWatchpoint *wp;
6658ffb8 613
05068c0d 614 /* forbid ranges which are empty or run off the end of the address space */
07e2863d 615 if (len == 0 || (addr + len - 1) < addr) {
75a34036
AF
616 error_report("tried to set invalid watchpoint at %"
617 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
618 return -EINVAL;
619 }
7267c094 620 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
621
622 wp->vaddr = addr;
05068c0d 623 wp->len = len;
a1d1bb31
AL
624 wp->flags = flags;
625
2dc9f411 626 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
627 if (flags & BP_GDB) {
628 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
629 } else {
630 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
631 }
6658ffb8 632
31b030d4 633 tlb_flush_page(cpu, addr);
a1d1bb31
AL
634
635 if (watchpoint)
636 *watchpoint = wp;
637 return 0;
6658ffb8
PB
638}
639
a1d1bb31 640/* Remove a specific watchpoint. */
75a34036 641int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 642 int flags)
6658ffb8 643{
a1d1bb31 644 CPUWatchpoint *wp;
6658ffb8 645
ff4700b0 646 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 647 if (addr == wp->vaddr && len == wp->len
6e140f28 648 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 649 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
650 return 0;
651 }
652 }
a1d1bb31 653 return -ENOENT;
6658ffb8
PB
654}
655
a1d1bb31 656/* Remove a specific watchpoint by reference. */
75a34036 657void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 658{
ff4700b0 659 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 660
31b030d4 661 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 662
7267c094 663 g_free(watchpoint);
a1d1bb31
AL
664}
665
666/* Remove all matching watchpoints. */
75a34036 667void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 668{
c0ce998e 669 CPUWatchpoint *wp, *next;
a1d1bb31 670
ff4700b0 671 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
672 if (wp->flags & mask) {
673 cpu_watchpoint_remove_by_ref(cpu, wp);
674 }
c0ce998e 675 }
7d03f82f 676}
05068c0d
PM
677
678/* Return true if this watchpoint address matches the specified
679 * access (ie the address range covered by the watchpoint overlaps
680 * partially or completely with the address range covered by the
681 * access).
682 */
683static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
684 vaddr addr,
685 vaddr len)
686{
687 /* We know the lengths are non-zero, but a little caution is
688 * required to avoid errors in the case where the range ends
689 * exactly at the top of the address space and so addr + len
690 * wraps round to zero.
691 */
692 vaddr wpend = wp->vaddr + wp->len - 1;
693 vaddr addrend = addr + len - 1;
694
695 return !(addr > wpend || wp->vaddr > addrend);
696}
697
c527ee8f 698#endif
7d03f82f 699
a1d1bb31 700/* Add a breakpoint. */
b3310ab3 701int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 702 CPUBreakpoint **breakpoint)
4c3a88a2 703{
c0ce998e 704 CPUBreakpoint *bp;
3b46e624 705
7267c094 706 bp = g_malloc(sizeof(*bp));
4c3a88a2 707
a1d1bb31
AL
708 bp->pc = pc;
709 bp->flags = flags;
710
2dc9f411 711 /* keep all GDB-injected breakpoints in front */
00b941e5 712 if (flags & BP_GDB) {
f0c3c505 713 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 714 } else {
f0c3c505 715 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 716 }
3b46e624 717
f0c3c505 718 breakpoint_invalidate(cpu, pc);
a1d1bb31 719
00b941e5 720 if (breakpoint) {
a1d1bb31 721 *breakpoint = bp;
00b941e5 722 }
4c3a88a2 723 return 0;
4c3a88a2
FB
724}
725
a1d1bb31 726/* Remove a specific breakpoint. */
b3310ab3 727int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 728{
a1d1bb31
AL
729 CPUBreakpoint *bp;
730
f0c3c505 731 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 732 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 733 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
734 return 0;
735 }
7d03f82f 736 }
a1d1bb31 737 return -ENOENT;
7d03f82f
EI
738}
739
a1d1bb31 740/* Remove a specific breakpoint by reference. */
b3310ab3 741void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 742{
f0c3c505
AF
743 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
744
745 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 746
7267c094 747 g_free(breakpoint);
a1d1bb31
AL
748}
749
750/* Remove all matching breakpoints. */
b3310ab3 751void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 752{
c0ce998e 753 CPUBreakpoint *bp, *next;
a1d1bb31 754
f0c3c505 755 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
756 if (bp->flags & mask) {
757 cpu_breakpoint_remove_by_ref(cpu, bp);
758 }
c0ce998e 759 }
4c3a88a2
FB
760}
761
c33a346e
FB
762/* enable or disable single step mode. EXCP_DEBUG is returned by the
763 CPU loop after each instruction */
3825b28f 764void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 765{
ed2803da
AF
766 if (cpu->singlestep_enabled != enabled) {
767 cpu->singlestep_enabled = enabled;
768 if (kvm_enabled()) {
38e478ec 769 kvm_update_guest_debug(cpu, 0);
ed2803da 770 } else {
ccbb4d44 771 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 772 /* XXX: only flush what is necessary */
38e478ec 773 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
774 tb_flush(env);
775 }
c33a346e 776 }
c33a346e
FB
777}
778
a47dddd7 779void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
780{
781 va_list ap;
493ae1f0 782 va_list ap2;
7501267e
FB
783
784 va_start(ap, fmt);
493ae1f0 785 va_copy(ap2, ap);
7501267e
FB
786 fprintf(stderr, "qemu: fatal: ");
787 vfprintf(stderr, fmt, ap);
788 fprintf(stderr, "\n");
878096ee 789 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
790 if (qemu_log_enabled()) {
791 qemu_log("qemu: fatal: ");
792 qemu_log_vprintf(fmt, ap2);
793 qemu_log("\n");
a0762859 794 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 795 qemu_log_flush();
93fcfe39 796 qemu_log_close();
924edcae 797 }
493ae1f0 798 va_end(ap2);
f9373291 799 va_end(ap);
fd052bf6
RV
800#if defined(CONFIG_USER_ONLY)
801 {
802 struct sigaction act;
803 sigfillset(&act.sa_mask);
804 act.sa_handler = SIG_DFL;
805 sigaction(SIGABRT, &act, NULL);
806 }
807#endif
7501267e
FB
808 abort();
809}
810
0124311e 811#if !defined(CONFIG_USER_ONLY)
0dc3f44a 812/* Called from RCU critical section */
041603fe
PB
813static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
814{
815 RAMBlock *block;
816
43771539 817 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 818 if (block && addr - block->offset < block->max_length) {
041603fe
PB
819 goto found;
820 }
0dc3f44a 821 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5 822 if (addr - block->offset < block->max_length) {
041603fe
PB
823 goto found;
824 }
825 }
826
827 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
828 abort();
829
830found:
43771539
PB
831 /* It is safe to write mru_block outside the iothread lock. This
832 * is what happens:
833 *
834 * mru_block = xxx
835 * rcu_read_unlock()
836 * xxx removed from list
837 * rcu_read_lock()
838 * read mru_block
839 * mru_block = NULL;
840 * call_rcu(reclaim_ramblock, xxx);
841 * rcu_read_unlock()
842 *
843 * atomic_rcu_set is not needed here. The block was already published
844 * when it was placed into the list. Here we're just making an extra
845 * copy of the pointer.
846 */
041603fe
PB
847 ram_list.mru_block = block;
848 return block;
849}
850
a2f4d5be 851static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 852{
041603fe 853 ram_addr_t start1;
a2f4d5be
JQ
854 RAMBlock *block;
855 ram_addr_t end;
856
857 end = TARGET_PAGE_ALIGN(start + length);
858 start &= TARGET_PAGE_MASK;
d24981d3 859
0dc3f44a 860 rcu_read_lock();
041603fe
PB
861 block = qemu_get_ram_block(start);
862 assert(block == qemu_get_ram_block(end - 1));
1240be24 863 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
041603fe 864 cpu_tlb_reset_dirty_all(start1, length);
0dc3f44a 865 rcu_read_unlock();
d24981d3
JQ
866}
867
5579c7f3 868/* Note: start and end must be within the same ram block. */
03eebc9e
SH
869bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
870 ram_addr_t length,
871 unsigned client)
1ccde1cb 872{
03eebc9e
SH
873 unsigned long end, page;
874 bool dirty;
875
876 if (length == 0) {
877 return false;
878 }
f23db169 879
03eebc9e
SH
880 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
881 page = start >> TARGET_PAGE_BITS;
882 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
883 page, end - page);
884
885 if (dirty && tcg_enabled()) {
a2f4d5be 886 tlb_reset_dirty_range_all(start, length);
5579c7f3 887 }
03eebc9e
SH
888
889 return dirty;
1ccde1cb
FB
890}
891
79e2b9ae 892/* Called from RCU critical section */
bb0e627a 893hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
894 MemoryRegionSection *section,
895 target_ulong vaddr,
896 hwaddr paddr, hwaddr xlat,
897 int prot,
898 target_ulong *address)
e5548617 899{
a8170e5e 900 hwaddr iotlb;
e5548617
BS
901 CPUWatchpoint *wp;
902
cc5bea60 903 if (memory_region_is_ram(section->mr)) {
e5548617
BS
904 /* Normal RAM. */
905 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 906 + xlat;
e5548617 907 if (!section->readonly) {
b41aac4f 908 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 909 } else {
b41aac4f 910 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
911 }
912 } else {
1b3fb98f 913 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 914 iotlb += xlat;
e5548617
BS
915 }
916
917 /* Make accesses to pages with watchpoints go via the
918 watchpoint trap routines. */
ff4700b0 919 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 920 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
e5548617
BS
921 /* Avoid trapping reads of pages with a write breakpoint. */
922 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 923 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
924 *address |= TLB_MMIO;
925 break;
926 }
927 }
928 }
929
930 return iotlb;
931}
9fa3e853
FB
932#endif /* defined(CONFIG_USER_ONLY) */
933
e2eef170 934#if !defined(CONFIG_USER_ONLY)
8da3ff18 935
c227f099 936static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 937 uint16_t section);
acc9d80b 938static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 939
a2b257d6
IM
940static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
941 qemu_anon_ram_alloc;
91138037
MA
942
943/*
944 * Set a custom physical guest memory alloator.
945 * Accelerators with unusual needs may need this. Hopefully, we can
946 * get rid of it eventually.
947 */
a2b257d6 948void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
91138037
MA
949{
950 phys_mem_alloc = alloc;
951}
952
53cb28cb
MA
953static uint16_t phys_section_add(PhysPageMap *map,
954 MemoryRegionSection *section)
5312bd8b 955{
68f3f65b
PB
956 /* The physical section number is ORed with a page-aligned
957 * pointer to produce the iotlb entries. Thus it should
958 * never overflow into the page-aligned value.
959 */
53cb28cb 960 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 961
53cb28cb
MA
962 if (map->sections_nb == map->sections_nb_alloc) {
963 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
964 map->sections = g_renew(MemoryRegionSection, map->sections,
965 map->sections_nb_alloc);
5312bd8b 966 }
53cb28cb 967 map->sections[map->sections_nb] = *section;
dfde4e6e 968 memory_region_ref(section->mr);
53cb28cb 969 return map->sections_nb++;
5312bd8b
AK
970}
971
058bc4b5
PB
972static void phys_section_destroy(MemoryRegion *mr)
973{
dfde4e6e
PB
974 memory_region_unref(mr);
975
058bc4b5
PB
976 if (mr->subpage) {
977 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 978 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
979 g_free(subpage);
980 }
981}
982
6092666e 983static void phys_sections_free(PhysPageMap *map)
5312bd8b 984{
9affd6fc
PB
985 while (map->sections_nb > 0) {
986 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
987 phys_section_destroy(section->mr);
988 }
9affd6fc
PB
989 g_free(map->sections);
990 g_free(map->nodes);
5312bd8b
AK
991}
992
ac1970fb 993static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
994{
995 subpage_t *subpage;
a8170e5e 996 hwaddr base = section->offset_within_address_space
0f0cb164 997 & TARGET_PAGE_MASK;
97115a8d 998 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 999 d->map.nodes, d->map.sections);
0f0cb164
AK
1000 MemoryRegionSection subsection = {
1001 .offset_within_address_space = base,
052e87b0 1002 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 1003 };
a8170e5e 1004 hwaddr start, end;
0f0cb164 1005
f3705d53 1006 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 1007
f3705d53 1008 if (!(existing->mr->subpage)) {
acc9d80b 1009 subpage = subpage_init(d->as, base);
3be91e86 1010 subsection.address_space = d->as;
0f0cb164 1011 subsection.mr = &subpage->iomem;
ac1970fb 1012 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 1013 phys_section_add(&d->map, &subsection));
0f0cb164 1014 } else {
f3705d53 1015 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
1016 }
1017 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 1018 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
1019 subpage_register(subpage, start, end,
1020 phys_section_add(&d->map, section));
0f0cb164
AK
1021}
1022
1023
052e87b0
PB
1024static void register_multipage(AddressSpaceDispatch *d,
1025 MemoryRegionSection *section)
33417e70 1026{
a8170e5e 1027 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 1028 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
1029 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1030 TARGET_PAGE_BITS));
dd81124b 1031
733d5ef5
PB
1032 assert(num_pages);
1033 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
1034}
1035
ac1970fb 1036static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 1037{
89ae337a 1038 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 1039 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 1040 MemoryRegionSection now = *section, remain = *section;
052e87b0 1041 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 1042
733d5ef5
PB
1043 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1044 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1045 - now.offset_within_address_space;
1046
052e87b0 1047 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 1048 register_subpage(d, &now);
733d5ef5 1049 } else {
052e87b0 1050 now.size = int128_zero();
733d5ef5 1051 }
052e87b0
PB
1052 while (int128_ne(remain.size, now.size)) {
1053 remain.size = int128_sub(remain.size, now.size);
1054 remain.offset_within_address_space += int128_get64(now.size);
1055 remain.offset_within_region += int128_get64(now.size);
69b67646 1056 now = remain;
052e87b0 1057 if (int128_lt(remain.size, page_size)) {
733d5ef5 1058 register_subpage(d, &now);
88266249 1059 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1060 now.size = page_size;
ac1970fb 1061 register_subpage(d, &now);
69b67646 1062 } else {
052e87b0 1063 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 1064 register_multipage(d, &now);
69b67646 1065 }
0f0cb164
AK
1066 }
1067}
1068
62a2744c
SY
1069void qemu_flush_coalesced_mmio_buffer(void)
1070{
1071 if (kvm_enabled())
1072 kvm_flush_coalesced_mmio_buffer();
1073}
1074
b2a8658e
UD
1075void qemu_mutex_lock_ramlist(void)
1076{
1077 qemu_mutex_lock(&ram_list.mutex);
1078}
1079
1080void qemu_mutex_unlock_ramlist(void)
1081{
1082 qemu_mutex_unlock(&ram_list.mutex);
1083}
1084
e1e84ba0 1085#ifdef __linux__
c902760f
MT
1086
1087#include <sys/vfs.h>
1088
1089#define HUGETLBFS_MAGIC 0x958458f6
1090
fc7a5800 1091static long gethugepagesize(const char *path, Error **errp)
c902760f
MT
1092{
1093 struct statfs fs;
1094 int ret;
1095
1096 do {
9742bf26 1097 ret = statfs(path, &fs);
c902760f
MT
1098 } while (ret != 0 && errno == EINTR);
1099
1100 if (ret != 0) {
fc7a5800
HT
1101 error_setg_errno(errp, errno, "failed to get page size of file %s",
1102 path);
9742bf26 1103 return 0;
c902760f
MT
1104 }
1105
1106 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1107 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1108
1109 return fs.f_bsize;
1110}
1111
04b16653
AW
1112static void *file_ram_alloc(RAMBlock *block,
1113 ram_addr_t memory,
7f56e740
PB
1114 const char *path,
1115 Error **errp)
c902760f
MT
1116{
1117 char *filename;
8ca761f6
PF
1118 char *sanitized_name;
1119 char *c;
557529dd 1120 void *area = NULL;
c902760f 1121 int fd;
557529dd 1122 uint64_t hpagesize;
fc7a5800 1123 Error *local_err = NULL;
c902760f 1124
fc7a5800
HT
1125 hpagesize = gethugepagesize(path, &local_err);
1126 if (local_err) {
1127 error_propagate(errp, local_err);
f9a49dfa 1128 goto error;
c902760f 1129 }
a2b257d6 1130 block->mr->align = hpagesize;
c902760f
MT
1131
1132 if (memory < hpagesize) {
557529dd
HT
1133 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1134 "or larger than huge page size 0x%" PRIx64,
1135 memory, hpagesize);
1136 goto error;
c902760f
MT
1137 }
1138
1139 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1140 error_setg(errp,
1141 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1142 goto error;
c902760f
MT
1143 }
1144
8ca761f6 1145 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
83234bf2 1146 sanitized_name = g_strdup(memory_region_name(block->mr));
8ca761f6
PF
1147 for (c = sanitized_name; *c != '\0'; c++) {
1148 if (*c == '/')
1149 *c = '_';
1150 }
1151
1152 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1153 sanitized_name);
1154 g_free(sanitized_name);
c902760f
MT
1155
1156 fd = mkstemp(filename);
1157 if (fd < 0) {
7f56e740
PB
1158 error_setg_errno(errp, errno,
1159 "unable to create backing store for hugepages");
e4ada482 1160 g_free(filename);
f9a49dfa 1161 goto error;
c902760f
MT
1162 }
1163 unlink(filename);
e4ada482 1164 g_free(filename);
c902760f
MT
1165
1166 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1167
1168 /*
1169 * ftruncate is not supported by hugetlbfs in older
1170 * hosts, so don't bother bailing out on errors.
1171 * If anything goes wrong with it under other filesystems,
1172 * mmap will fail.
1173 */
7f56e740 1174 if (ftruncate(fd, memory)) {
9742bf26 1175 perror("ftruncate");
7f56e740 1176 }
c902760f 1177
dbcb8981
PB
1178 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1179 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1180 fd, 0);
c902760f 1181 if (area == MAP_FAILED) {
7f56e740
PB
1182 error_setg_errno(errp, errno,
1183 "unable to map backing store for hugepages");
9742bf26 1184 close(fd);
f9a49dfa 1185 goto error;
c902760f 1186 }
ef36fa14
MT
1187
1188 if (mem_prealloc) {
38183310 1189 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1190 }
1191
04b16653 1192 block->fd = fd;
c902760f 1193 return area;
f9a49dfa
MT
1194
1195error:
1196 if (mem_prealloc) {
81b07353 1197 error_report("%s", error_get_pretty(*errp));
f9a49dfa
MT
1198 exit(1);
1199 }
1200 return NULL;
c902760f
MT
1201}
1202#endif
1203
0dc3f44a 1204/* Called with the ramlist lock held. */
d17b5288 1205static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1206{
1207 RAMBlock *block, *next_block;
3e837b2c 1208 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1209
49cd9ac6
SH
1210 assert(size != 0); /* it would hand out same offset multiple times */
1211
0dc3f44a 1212 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
04b16653 1213 return 0;
0d53d9fe 1214 }
04b16653 1215
0dc3f44a 1216 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
f15fbc4b 1217 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653 1218
62be4e3a 1219 end = block->offset + block->max_length;
04b16653 1220
0dc3f44a 1221 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
04b16653
AW
1222 if (next_block->offset >= end) {
1223 next = MIN(next, next_block->offset);
1224 }
1225 }
1226 if (next - end >= size && next - end < mingap) {
3e837b2c 1227 offset = end;
04b16653
AW
1228 mingap = next - end;
1229 }
1230 }
3e837b2c
AW
1231
1232 if (offset == RAM_ADDR_MAX) {
1233 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1234 (uint64_t)size);
1235 abort();
1236 }
1237
04b16653
AW
1238 return offset;
1239}
1240
652d7ec2 1241ram_addr_t last_ram_offset(void)
d17b5288
AW
1242{
1243 RAMBlock *block;
1244 ram_addr_t last = 0;
1245
0dc3f44a
MD
1246 rcu_read_lock();
1247 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
62be4e3a 1248 last = MAX(last, block->offset + block->max_length);
0d53d9fe 1249 }
0dc3f44a 1250 rcu_read_unlock();
d17b5288
AW
1251 return last;
1252}
1253
ddb97f1d
JB
1254static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1255{
1256 int ret;
ddb97f1d
JB
1257
1258 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
47c8ca53 1259 if (!machine_dump_guest_core(current_machine)) {
ddb97f1d
JB
1260 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1261 if (ret) {
1262 perror("qemu_madvise");
1263 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1264 "but dump_guest_core=off specified\n");
1265 }
1266 }
1267}
1268
0dc3f44a
MD
1269/* Called within an RCU critical section, or while the ramlist lock
1270 * is held.
1271 */
20cfe881 1272static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1273{
20cfe881 1274 RAMBlock *block;
84b89d78 1275
0dc3f44a 1276 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1277 if (block->offset == addr) {
20cfe881 1278 return block;
c5705a77
AK
1279 }
1280 }
20cfe881
HT
1281
1282 return NULL;
1283}
1284
ae3a7047 1285/* Called with iothread lock held. */
20cfe881
HT
1286void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1287{
ae3a7047 1288 RAMBlock *new_block, *block;
20cfe881 1289
0dc3f44a 1290 rcu_read_lock();
ae3a7047 1291 new_block = find_ram_block(addr);
c5705a77
AK
1292 assert(new_block);
1293 assert(!new_block->idstr[0]);
84b89d78 1294
09e5ab63
AL
1295 if (dev) {
1296 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1297 if (id) {
1298 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1299 g_free(id);
84b89d78
CM
1300 }
1301 }
1302 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1303
0dc3f44a 1304 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1305 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1306 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1307 new_block->idstr);
1308 abort();
1309 }
1310 }
0dc3f44a 1311 rcu_read_unlock();
c5705a77
AK
1312}
1313
ae3a7047 1314/* Called with iothread lock held. */
20cfe881
HT
1315void qemu_ram_unset_idstr(ram_addr_t addr)
1316{
ae3a7047 1317 RAMBlock *block;
20cfe881 1318
ae3a7047
MD
1319 /* FIXME: arch_init.c assumes that this is not called throughout
1320 * migration. Ignore the problem since hot-unplug during migration
1321 * does not work anyway.
1322 */
1323
0dc3f44a 1324 rcu_read_lock();
ae3a7047 1325 block = find_ram_block(addr);
20cfe881
HT
1326 if (block) {
1327 memset(block->idstr, 0, sizeof(block->idstr));
1328 }
0dc3f44a 1329 rcu_read_unlock();
20cfe881
HT
1330}
1331
8490fc78
LC
1332static int memory_try_enable_merging(void *addr, size_t len)
1333{
75cc7f01 1334 if (!machine_mem_merge(current_machine)) {
8490fc78
LC
1335 /* disabled by the user */
1336 return 0;
1337 }
1338
1339 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1340}
1341
62be4e3a
MT
1342/* Only legal before guest might have detected the memory size: e.g. on
1343 * incoming migration, or right after reset.
1344 *
1345 * As memory core doesn't know how is memory accessed, it is up to
1346 * resize callback to update device state and/or add assertions to detect
1347 * misuse, if necessary.
1348 */
1349int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1350{
1351 RAMBlock *block = find_ram_block(base);
1352
1353 assert(block);
1354
129ddaf3
MT
1355 newsize = TARGET_PAGE_ALIGN(newsize);
1356
62be4e3a
MT
1357 if (block->used_length == newsize) {
1358 return 0;
1359 }
1360
1361 if (!(block->flags & RAM_RESIZEABLE)) {
1362 error_setg_errno(errp, EINVAL,
1363 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1364 " in != 0x" RAM_ADDR_FMT, block->idstr,
1365 newsize, block->used_length);
1366 return -EINVAL;
1367 }
1368
1369 if (block->max_length < newsize) {
1370 error_setg_errno(errp, EINVAL,
1371 "Length too large: %s: 0x" RAM_ADDR_FMT
1372 " > 0x" RAM_ADDR_FMT, block->idstr,
1373 newsize, block->max_length);
1374 return -EINVAL;
1375 }
1376
1377 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1378 block->used_length = newsize;
58d2707e
PB
1379 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1380 DIRTY_CLIENTS_ALL);
62be4e3a
MT
1381 memory_region_set_size(block->mr, newsize);
1382 if (block->resized) {
1383 block->resized(block->idstr, newsize, block->host);
1384 }
1385 return 0;
1386}
1387
ef701d7b 1388static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
c5705a77 1389{
e1c57ab8 1390 RAMBlock *block;
0d53d9fe 1391 RAMBlock *last_block = NULL;
2152f5ca
JQ
1392 ram_addr_t old_ram_size, new_ram_size;
1393
1394 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1395
b2a8658e 1396 qemu_mutex_lock_ramlist();
9b8424d5 1397 new_block->offset = find_ram_offset(new_block->max_length);
e1c57ab8
PB
1398
1399 if (!new_block->host) {
1400 if (xen_enabled()) {
9b8424d5
MT
1401 xen_ram_alloc(new_block->offset, new_block->max_length,
1402 new_block->mr);
e1c57ab8 1403 } else {
9b8424d5 1404 new_block->host = phys_mem_alloc(new_block->max_length,
a2b257d6 1405 &new_block->mr->align);
39228250 1406 if (!new_block->host) {
ef701d7b
HT
1407 error_setg_errno(errp, errno,
1408 "cannot set up guest memory '%s'",
1409 memory_region_name(new_block->mr));
1410 qemu_mutex_unlock_ramlist();
1411 return -1;
39228250 1412 }
9b8424d5 1413 memory_try_enable_merging(new_block->host, new_block->max_length);
6977dfe6 1414 }
c902760f 1415 }
94a6b54f 1416
0d53d9fe
MD
1417 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1418 * QLIST (which has an RCU-friendly variant) does not have insertion at
1419 * tail, so save the last element in last_block.
1420 */
0dc3f44a 1421 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
0d53d9fe 1422 last_block = block;
9b8424d5 1423 if (block->max_length < new_block->max_length) {
abb26d63
PB
1424 break;
1425 }
1426 }
1427 if (block) {
0dc3f44a 1428 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
0d53d9fe 1429 } else if (last_block) {
0dc3f44a 1430 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
0d53d9fe 1431 } else { /* list is empty */
0dc3f44a 1432 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
abb26d63 1433 }
0d6d3c87 1434 ram_list.mru_block = NULL;
94a6b54f 1435
0dc3f44a
MD
1436 /* Write list before version */
1437 smp_wmb();
f798b07f 1438 ram_list.version++;
b2a8658e 1439 qemu_mutex_unlock_ramlist();
f798b07f 1440
2152f5ca
JQ
1441 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1442
1443 if (new_ram_size > old_ram_size) {
1ab4c8ce 1444 int i;
ae3a7047
MD
1445
1446 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1ab4c8ce
JQ
1447 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1448 ram_list.dirty_memory[i] =
1449 bitmap_zero_extend(ram_list.dirty_memory[i],
1450 old_ram_size, new_ram_size);
1451 }
2152f5ca 1452 }
9b8424d5 1453 cpu_physical_memory_set_dirty_range(new_block->offset,
58d2707e
PB
1454 new_block->used_length,
1455 DIRTY_CLIENTS_ALL);
94a6b54f 1456
a904c911
PB
1457 if (new_block->host) {
1458 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1459 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1460 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1461 if (kvm_enabled()) {
1462 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1463 }
e1c57ab8 1464 }
6f0437e8 1465
94a6b54f
PB
1466 return new_block->offset;
1467}
e9a1ab19 1468
0b183fc8 1469#ifdef __linux__
e1c57ab8 1470ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1471 bool share, const char *mem_path,
7f56e740 1472 Error **errp)
e1c57ab8
PB
1473{
1474 RAMBlock *new_block;
ef701d7b
HT
1475 ram_addr_t addr;
1476 Error *local_err = NULL;
e1c57ab8
PB
1477
1478 if (xen_enabled()) {
7f56e740
PB
1479 error_setg(errp, "-mem-path not supported with Xen");
1480 return -1;
e1c57ab8
PB
1481 }
1482
1483 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1484 /*
1485 * file_ram_alloc() needs to allocate just like
1486 * phys_mem_alloc, but we haven't bothered to provide
1487 * a hook there.
1488 */
7f56e740
PB
1489 error_setg(errp,
1490 "-mem-path not supported with this accelerator");
1491 return -1;
e1c57ab8
PB
1492 }
1493
1494 size = TARGET_PAGE_ALIGN(size);
1495 new_block = g_malloc0(sizeof(*new_block));
1496 new_block->mr = mr;
9b8424d5
MT
1497 new_block->used_length = size;
1498 new_block->max_length = size;
dbcb8981 1499 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1500 new_block->host = file_ram_alloc(new_block, size,
1501 mem_path, errp);
1502 if (!new_block->host) {
1503 g_free(new_block);
1504 return -1;
1505 }
1506
ef701d7b
HT
1507 addr = ram_block_add(new_block, &local_err);
1508 if (local_err) {
1509 g_free(new_block);
1510 error_propagate(errp, local_err);
1511 return -1;
1512 }
1513 return addr;
e1c57ab8 1514}
0b183fc8 1515#endif
e1c57ab8 1516
62be4e3a
MT
1517static
1518ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1519 void (*resized)(const char*,
1520 uint64_t length,
1521 void *host),
1522 void *host, bool resizeable,
ef701d7b 1523 MemoryRegion *mr, Error **errp)
e1c57ab8
PB
1524{
1525 RAMBlock *new_block;
ef701d7b
HT
1526 ram_addr_t addr;
1527 Error *local_err = NULL;
e1c57ab8
PB
1528
1529 size = TARGET_PAGE_ALIGN(size);
62be4e3a 1530 max_size = TARGET_PAGE_ALIGN(max_size);
e1c57ab8
PB
1531 new_block = g_malloc0(sizeof(*new_block));
1532 new_block->mr = mr;
62be4e3a 1533 new_block->resized = resized;
9b8424d5
MT
1534 new_block->used_length = size;
1535 new_block->max_length = max_size;
62be4e3a 1536 assert(max_size >= size);
e1c57ab8
PB
1537 new_block->fd = -1;
1538 new_block->host = host;
1539 if (host) {
7bd4f430 1540 new_block->flags |= RAM_PREALLOC;
e1c57ab8 1541 }
62be4e3a
MT
1542 if (resizeable) {
1543 new_block->flags |= RAM_RESIZEABLE;
1544 }
ef701d7b
HT
1545 addr = ram_block_add(new_block, &local_err);
1546 if (local_err) {
1547 g_free(new_block);
1548 error_propagate(errp, local_err);
1549 return -1;
1550 }
1551 return addr;
e1c57ab8
PB
1552}
1553
62be4e3a
MT
1554ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1555 MemoryRegion *mr, Error **errp)
1556{
1557 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1558}
1559
ef701d7b 1560ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
6977dfe6 1561{
62be4e3a
MT
1562 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1563}
1564
1565ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1566 void (*resized)(const char*,
1567 uint64_t length,
1568 void *host),
1569 MemoryRegion *mr, Error **errp)
1570{
1571 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
6977dfe6
YT
1572}
1573
1f2e98b6
AW
1574void qemu_ram_free_from_ptr(ram_addr_t addr)
1575{
1576 RAMBlock *block;
1577
b2a8658e 1578 qemu_mutex_lock_ramlist();
0dc3f44a 1579 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1f2e98b6 1580 if (addr == block->offset) {
0dc3f44a 1581 QLIST_REMOVE_RCU(block, next);
0d6d3c87 1582 ram_list.mru_block = NULL;
0dc3f44a
MD
1583 /* Write list before version */
1584 smp_wmb();
f798b07f 1585 ram_list.version++;
43771539 1586 g_free_rcu(block, rcu);
b2a8658e 1587 break;
1f2e98b6
AW
1588 }
1589 }
b2a8658e 1590 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1591}
1592
43771539
PB
1593static void reclaim_ramblock(RAMBlock *block)
1594{
1595 if (block->flags & RAM_PREALLOC) {
1596 ;
1597 } else if (xen_enabled()) {
1598 xen_invalidate_map_cache_entry(block->host);
1599#ifndef _WIN32
1600 } else if (block->fd >= 0) {
1601 munmap(block->host, block->max_length);
1602 close(block->fd);
1603#endif
1604 } else {
1605 qemu_anon_ram_free(block->host, block->max_length);
1606 }
1607 g_free(block);
1608}
1609
c227f099 1610void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1611{
04b16653
AW
1612 RAMBlock *block;
1613
b2a8658e 1614 qemu_mutex_lock_ramlist();
0dc3f44a 1615 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
04b16653 1616 if (addr == block->offset) {
0dc3f44a 1617 QLIST_REMOVE_RCU(block, next);
0d6d3c87 1618 ram_list.mru_block = NULL;
0dc3f44a
MD
1619 /* Write list before version */
1620 smp_wmb();
f798b07f 1621 ram_list.version++;
43771539 1622 call_rcu(block, reclaim_ramblock, rcu);
b2a8658e 1623 break;
04b16653
AW
1624 }
1625 }
b2a8658e 1626 qemu_mutex_unlock_ramlist();
e9a1ab19
FB
1627}
1628
cd19cfa2
HY
1629#ifndef _WIN32
1630void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1631{
1632 RAMBlock *block;
1633 ram_addr_t offset;
1634 int flags;
1635 void *area, *vaddr;
1636
0dc3f44a 1637 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
cd19cfa2 1638 offset = addr - block->offset;
9b8424d5 1639 if (offset < block->max_length) {
1240be24 1640 vaddr = ramblock_ptr(block, offset);
7bd4f430 1641 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1642 ;
dfeaf2ab
MA
1643 } else if (xen_enabled()) {
1644 abort();
cd19cfa2
HY
1645 } else {
1646 flags = MAP_FIXED;
3435f395 1647 if (block->fd >= 0) {
dbcb8981
PB
1648 flags |= (block->flags & RAM_SHARED ?
1649 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1650 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1651 flags, block->fd, offset);
cd19cfa2 1652 } else {
2eb9fbaa
MA
1653 /*
1654 * Remap needs to match alloc. Accelerators that
1655 * set phys_mem_alloc never remap. If they did,
1656 * we'd need a remap hook here.
1657 */
1658 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1659
cd19cfa2
HY
1660 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1661 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1662 flags, -1, 0);
cd19cfa2
HY
1663 }
1664 if (area != vaddr) {
f15fbc4b
AP
1665 fprintf(stderr, "Could not remap addr: "
1666 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1667 length, addr);
1668 exit(1);
1669 }
8490fc78 1670 memory_try_enable_merging(vaddr, length);
ddb97f1d 1671 qemu_ram_setup_dump(vaddr, length);
cd19cfa2 1672 }
cd19cfa2
HY
1673 }
1674 }
1675}
1676#endif /* !_WIN32 */
1677
a35ba7be
PB
1678int qemu_get_ram_fd(ram_addr_t addr)
1679{
ae3a7047
MD
1680 RAMBlock *block;
1681 int fd;
a35ba7be 1682
0dc3f44a 1683 rcu_read_lock();
ae3a7047
MD
1684 block = qemu_get_ram_block(addr);
1685 fd = block->fd;
0dc3f44a 1686 rcu_read_unlock();
ae3a7047 1687 return fd;
a35ba7be
PB
1688}
1689
3fd74b84
DM
1690void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1691{
ae3a7047
MD
1692 RAMBlock *block;
1693 void *ptr;
3fd74b84 1694
0dc3f44a 1695 rcu_read_lock();
ae3a7047
MD
1696 block = qemu_get_ram_block(addr);
1697 ptr = ramblock_ptr(block, 0);
0dc3f44a 1698 rcu_read_unlock();
ae3a7047 1699 return ptr;
3fd74b84
DM
1700}
1701
1b5ec234 1702/* Return a host pointer to ram allocated with qemu_ram_alloc.
ae3a7047
MD
1703 * This should not be used for general purpose DMA. Use address_space_map
1704 * or address_space_rw instead. For local memory (e.g. video ram) that the
1705 * device owns, use memory_region_get_ram_ptr.
0dc3f44a
MD
1706 *
1707 * By the time this function returns, the returned pointer is not protected
1708 * by RCU anymore. If the caller is not within an RCU critical section and
1709 * does not hold the iothread lock, it must have other means of protecting the
1710 * pointer, such as a reference to the region that includes the incoming
1711 * ram_addr_t.
1b5ec234
PB
1712 */
1713void *qemu_get_ram_ptr(ram_addr_t addr)
1714{
ae3a7047
MD
1715 RAMBlock *block;
1716 void *ptr;
1b5ec234 1717
0dc3f44a 1718 rcu_read_lock();
ae3a7047
MD
1719 block = qemu_get_ram_block(addr);
1720
1721 if (xen_enabled() && block->host == NULL) {
0d6d3c87
PB
1722 /* We need to check if the requested address is in the RAM
1723 * because we don't want to map the entire memory in QEMU.
1724 * In that case just map until the end of the page.
1725 */
1726 if (block->offset == 0) {
ae3a7047 1727 ptr = xen_map_cache(addr, 0, 0);
0dc3f44a 1728 goto unlock;
0d6d3c87 1729 }
ae3a7047
MD
1730
1731 block->host = xen_map_cache(block->offset, block->max_length, 1);
0d6d3c87 1732 }
ae3a7047
MD
1733 ptr = ramblock_ptr(block, addr - block->offset);
1734
0dc3f44a
MD
1735unlock:
1736 rcu_read_unlock();
ae3a7047 1737 return ptr;
dc828ca1
PB
1738}
1739
38bee5dc 1740/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
ae3a7047 1741 * but takes a size argument.
0dc3f44a
MD
1742 *
1743 * By the time this function returns, the returned pointer is not protected
1744 * by RCU anymore. If the caller is not within an RCU critical section and
1745 * does not hold the iothread lock, it must have other means of protecting the
1746 * pointer, such as a reference to the region that includes the incoming
1747 * ram_addr_t.
ae3a7047 1748 */
cb85f7ab 1749static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1750{
ae3a7047 1751 void *ptr;
8ab934f9
SS
1752 if (*size == 0) {
1753 return NULL;
1754 }
868bb33f 1755 if (xen_enabled()) {
e41d7c69 1756 return xen_map_cache(addr, *size, 1);
868bb33f 1757 } else {
38bee5dc 1758 RAMBlock *block;
0dc3f44a
MD
1759 rcu_read_lock();
1760 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5
MT
1761 if (addr - block->offset < block->max_length) {
1762 if (addr - block->offset + *size > block->max_length)
1763 *size = block->max_length - addr + block->offset;
ae3a7047 1764 ptr = ramblock_ptr(block, addr - block->offset);
0dc3f44a 1765 rcu_read_unlock();
ae3a7047 1766 return ptr;
38bee5dc
SS
1767 }
1768 }
1769
1770 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1771 abort();
38bee5dc
SS
1772 }
1773}
1774
7443b437 1775/* Some of the softmmu routines need to translate from a host pointer
ae3a7047
MD
1776 * (typically a TLB entry) back to a ram offset.
1777 *
1778 * By the time this function returns, the returned pointer is not protected
1779 * by RCU anymore. If the caller is not within an RCU critical section and
1780 * does not hold the iothread lock, it must have other means of protecting the
1781 * pointer, such as a reference to the region that includes the incoming
1782 * ram_addr_t.
1783 */
1b5ec234 1784MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1785{
94a6b54f
PB
1786 RAMBlock *block;
1787 uint8_t *host = ptr;
ae3a7047 1788 MemoryRegion *mr;
94a6b54f 1789
868bb33f 1790 if (xen_enabled()) {
0dc3f44a 1791 rcu_read_lock();
e41d7c69 1792 *ram_addr = xen_ram_addr_from_mapcache(ptr);
ae3a7047 1793 mr = qemu_get_ram_block(*ram_addr)->mr;
0dc3f44a 1794 rcu_read_unlock();
ae3a7047 1795 return mr;
712c2b41
SS
1796 }
1797
0dc3f44a
MD
1798 rcu_read_lock();
1799 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 1800 if (block && block->host && host - block->host < block->max_length) {
23887b79
PB
1801 goto found;
1802 }
1803
0dc3f44a 1804 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
432d268c
JN
1805 /* This case append when the block is not mapped. */
1806 if (block->host == NULL) {
1807 continue;
1808 }
9b8424d5 1809 if (host - block->host < block->max_length) {
23887b79 1810 goto found;
f471a17e 1811 }
94a6b54f 1812 }
432d268c 1813
0dc3f44a 1814 rcu_read_unlock();
1b5ec234 1815 return NULL;
23887b79
PB
1816
1817found:
1818 *ram_addr = block->offset + (host - block->host);
ae3a7047 1819 mr = block->mr;
0dc3f44a 1820 rcu_read_unlock();
ae3a7047 1821 return mr;
e890261f 1822}
f471a17e 1823
a8170e5e 1824static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1825 uint64_t val, unsigned size)
9fa3e853 1826{
52159192 1827 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1828 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1829 }
0e0df1e2
AK
1830 switch (size) {
1831 case 1:
1832 stb_p(qemu_get_ram_ptr(ram_addr), val);
1833 break;
1834 case 2:
1835 stw_p(qemu_get_ram_ptr(ram_addr), val);
1836 break;
1837 case 4:
1838 stl_p(qemu_get_ram_ptr(ram_addr), val);
1839 break;
1840 default:
1841 abort();
3a7d929e 1842 }
58d2707e
PB
1843 /* Set both VGA and migration bits for simplicity and to remove
1844 * the notdirty callback faster.
1845 */
1846 cpu_physical_memory_set_dirty_range(ram_addr, size,
1847 DIRTY_CLIENTS_NOCODE);
f23db169
FB
1848 /* we remove the notdirty callback only if the code has been
1849 flushed */
a2cd8c85 1850 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1851 CPUArchState *env = current_cpu->env_ptr;
93afeade 1852 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1853 }
9fa3e853
FB
1854}
1855
b018ddf6
PB
1856static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1857 unsigned size, bool is_write)
1858{
1859 return is_write;
1860}
1861
0e0df1e2 1862static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1863 .write = notdirty_mem_write,
b018ddf6 1864 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1865 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1866};
1867
0f459d16 1868/* Generate a debug exception if a watchpoint has been hit. */
66b9b43c 1869static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
0f459d16 1870{
93afeade
AF
1871 CPUState *cpu = current_cpu;
1872 CPUArchState *env = cpu->env_ptr;
06d55cc1 1873 target_ulong pc, cs_base;
0f459d16 1874 target_ulong vaddr;
a1d1bb31 1875 CPUWatchpoint *wp;
06d55cc1 1876 int cpu_flags;
0f459d16 1877
ff4700b0 1878 if (cpu->watchpoint_hit) {
06d55cc1
AL
1879 /* We re-entered the check after replacing the TB. Now raise
1880 * the debug interrupt so that is will trigger after the
1881 * current instruction. */
93afeade 1882 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1883 return;
1884 }
93afeade 1885 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1886 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d
PM
1887 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1888 && (wp->flags & flags)) {
08225676
PM
1889 if (flags == BP_MEM_READ) {
1890 wp->flags |= BP_WATCHPOINT_HIT_READ;
1891 } else {
1892 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1893 }
1894 wp->hitaddr = vaddr;
66b9b43c 1895 wp->hitattrs = attrs;
ff4700b0
AF
1896 if (!cpu->watchpoint_hit) {
1897 cpu->watchpoint_hit = wp;
239c51a5 1898 tb_check_watchpoint(cpu);
6e140f28 1899 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1900 cpu->exception_index = EXCP_DEBUG;
5638d180 1901 cpu_loop_exit(cpu);
6e140f28
AL
1902 } else {
1903 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1904 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1905 cpu_resume_from_signal(cpu, NULL);
6e140f28 1906 }
06d55cc1 1907 }
6e140f28
AL
1908 } else {
1909 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1910 }
1911 }
1912}
1913
6658ffb8
PB
1914/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1915 so these check for a hit then pass through to the normal out-of-line
1916 phys routines. */
66b9b43c
PM
1917static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1918 unsigned size, MemTxAttrs attrs)
6658ffb8 1919{
66b9b43c
PM
1920 MemTxResult res;
1921 uint64_t data;
1922
1923 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
1ec9b909 1924 switch (size) {
66b9b43c
PM
1925 case 1:
1926 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
1927 break;
1928 case 2:
1929 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
1930 break;
1931 case 4:
1932 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
1933 break;
1ec9b909
AK
1934 default: abort();
1935 }
66b9b43c
PM
1936 *pdata = data;
1937 return res;
6658ffb8
PB
1938}
1939
66b9b43c
PM
1940static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
1941 uint64_t val, unsigned size,
1942 MemTxAttrs attrs)
6658ffb8 1943{
66b9b43c
PM
1944 MemTxResult res;
1945
1946 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1ec9b909 1947 switch (size) {
67364150 1948 case 1:
66b9b43c 1949 address_space_stb(&address_space_memory, addr, val, attrs, &res);
67364150
MF
1950 break;
1951 case 2:
66b9b43c 1952 address_space_stw(&address_space_memory, addr, val, attrs, &res);
67364150
MF
1953 break;
1954 case 4:
66b9b43c 1955 address_space_stl(&address_space_memory, addr, val, attrs, &res);
67364150 1956 break;
1ec9b909
AK
1957 default: abort();
1958 }
66b9b43c 1959 return res;
6658ffb8
PB
1960}
1961
1ec9b909 1962static const MemoryRegionOps watch_mem_ops = {
66b9b43c
PM
1963 .read_with_attrs = watch_mem_read,
1964 .write_with_attrs = watch_mem_write,
1ec9b909 1965 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1966};
6658ffb8 1967
f25a49e0
PM
1968static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
1969 unsigned len, MemTxAttrs attrs)
db7b5426 1970{
acc9d80b 1971 subpage_t *subpage = opaque;
ff6cff75 1972 uint8_t buf[8];
5c9eb028 1973 MemTxResult res;
791af8c8 1974
db7b5426 1975#if defined(DEBUG_SUBPAGE)
016e9d62 1976 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1977 subpage, len, addr);
db7b5426 1978#endif
5c9eb028
PM
1979 res = address_space_read(subpage->as, addr + subpage->base,
1980 attrs, buf, len);
1981 if (res) {
1982 return res;
f25a49e0 1983 }
acc9d80b
JK
1984 switch (len) {
1985 case 1:
f25a49e0
PM
1986 *data = ldub_p(buf);
1987 return MEMTX_OK;
acc9d80b 1988 case 2:
f25a49e0
PM
1989 *data = lduw_p(buf);
1990 return MEMTX_OK;
acc9d80b 1991 case 4:
f25a49e0
PM
1992 *data = ldl_p(buf);
1993 return MEMTX_OK;
ff6cff75 1994 case 8:
f25a49e0
PM
1995 *data = ldq_p(buf);
1996 return MEMTX_OK;
acc9d80b
JK
1997 default:
1998 abort();
1999 }
db7b5426
BS
2000}
2001
f25a49e0
PM
2002static MemTxResult subpage_write(void *opaque, hwaddr addr,
2003 uint64_t value, unsigned len, MemTxAttrs attrs)
db7b5426 2004{
acc9d80b 2005 subpage_t *subpage = opaque;
ff6cff75 2006 uint8_t buf[8];
acc9d80b 2007
db7b5426 2008#if defined(DEBUG_SUBPAGE)
016e9d62 2009 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
2010 " value %"PRIx64"\n",
2011 __func__, subpage, len, addr, value);
db7b5426 2012#endif
acc9d80b
JK
2013 switch (len) {
2014 case 1:
2015 stb_p(buf, value);
2016 break;
2017 case 2:
2018 stw_p(buf, value);
2019 break;
2020 case 4:
2021 stl_p(buf, value);
2022 break;
ff6cff75
PB
2023 case 8:
2024 stq_p(buf, value);
2025 break;
acc9d80b
JK
2026 default:
2027 abort();
2028 }
5c9eb028
PM
2029 return address_space_write(subpage->as, addr + subpage->base,
2030 attrs, buf, len);
db7b5426
BS
2031}
2032
c353e4cc 2033static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 2034 unsigned len, bool is_write)
c353e4cc 2035{
acc9d80b 2036 subpage_t *subpage = opaque;
c353e4cc 2037#if defined(DEBUG_SUBPAGE)
016e9d62 2038 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 2039 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
2040#endif
2041
acc9d80b 2042 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 2043 len, is_write);
c353e4cc
PB
2044}
2045
70c68e44 2046static const MemoryRegionOps subpage_ops = {
f25a49e0
PM
2047 .read_with_attrs = subpage_read,
2048 .write_with_attrs = subpage_write,
ff6cff75
PB
2049 .impl.min_access_size = 1,
2050 .impl.max_access_size = 8,
2051 .valid.min_access_size = 1,
2052 .valid.max_access_size = 8,
c353e4cc 2053 .valid.accepts = subpage_accepts,
70c68e44 2054 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
2055};
2056
c227f099 2057static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2058 uint16_t section)
db7b5426
BS
2059{
2060 int idx, eidx;
2061
2062 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2063 return -1;
2064 idx = SUBPAGE_IDX(start);
2065 eidx = SUBPAGE_IDX(end);
2066#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2067 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2068 __func__, mmio, start, end, idx, eidx, section);
db7b5426 2069#endif
db7b5426 2070 for (; idx <= eidx; idx++) {
5312bd8b 2071 mmio->sub_section[idx] = section;
db7b5426
BS
2072 }
2073
2074 return 0;
2075}
2076
acc9d80b 2077static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 2078{
c227f099 2079 subpage_t *mmio;
db7b5426 2080
7267c094 2081 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 2082
acc9d80b 2083 mmio->as = as;
1eec614b 2084 mmio->base = base;
2c9b15ca 2085 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 2086 NULL, TARGET_PAGE_SIZE);
b3b00c78 2087 mmio->iomem.subpage = true;
db7b5426 2088#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2089 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2090 mmio, base, TARGET_PAGE_SIZE);
db7b5426 2091#endif
b41aac4f 2092 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
2093
2094 return mmio;
2095}
2096
a656e22f
PC
2097static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2098 MemoryRegion *mr)
5312bd8b 2099{
a656e22f 2100 assert(as);
5312bd8b 2101 MemoryRegionSection section = {
a656e22f 2102 .address_space = as,
5312bd8b
AK
2103 .mr = mr,
2104 .offset_within_address_space = 0,
2105 .offset_within_region = 0,
052e87b0 2106 .size = int128_2_64(),
5312bd8b
AK
2107 };
2108
53cb28cb 2109 return phys_section_add(map, &section);
5312bd8b
AK
2110}
2111
9d82b5a7 2112MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
aa102231 2113{
79e2b9ae
PB
2114 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2115 MemoryRegionSection *sections = d->map.sections;
9d82b5a7
PB
2116
2117 return sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
2118}
2119
e9179ce1
AK
2120static void io_mem_init(void)
2121{
1f6245e5 2122 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2c9b15ca 2123 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 2124 NULL, UINT64_MAX);
2c9b15ca 2125 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 2126 NULL, UINT64_MAX);
2c9b15ca 2127 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 2128 NULL, UINT64_MAX);
e9179ce1
AK
2129}
2130
ac1970fb 2131static void mem_begin(MemoryListener *listener)
00752703
PB
2132{
2133 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
2134 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2135 uint16_t n;
2136
a656e22f 2137 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 2138 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 2139 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 2140 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 2141 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 2142 assert(n == PHYS_SECTION_ROM);
a656e22f 2143 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 2144 assert(n == PHYS_SECTION_WATCH);
00752703 2145
9736e55b 2146 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
2147 d->as = as;
2148 as->next_dispatch = d;
2149}
2150
79e2b9ae
PB
2151static void address_space_dispatch_free(AddressSpaceDispatch *d)
2152{
2153 phys_sections_free(&d->map);
2154 g_free(d);
2155}
2156
00752703 2157static void mem_commit(MemoryListener *listener)
ac1970fb 2158{
89ae337a 2159 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
2160 AddressSpaceDispatch *cur = as->dispatch;
2161 AddressSpaceDispatch *next = as->next_dispatch;
2162
53cb28cb 2163 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 2164
79e2b9ae 2165 atomic_rcu_set(&as->dispatch, next);
53cb28cb 2166 if (cur) {
79e2b9ae 2167 call_rcu(cur, address_space_dispatch_free, rcu);
53cb28cb 2168 }
9affd6fc
PB
2169}
2170
1d71148e 2171static void tcg_commit(MemoryListener *listener)
50c1e149 2172{
182735ef 2173 CPUState *cpu;
117712c3
AK
2174
2175 /* since each CPU stores ram addresses in its TLB cache, we must
2176 reset the modified entries */
2177 /* XXX: slow ! */
bdc44640 2178 CPU_FOREACH(cpu) {
33bde2e1
EI
2179 /* FIXME: Disentangle the cpu.h circular files deps so we can
2180 directly get the right CPU from listener. */
2181 if (cpu->tcg_as_listener != listener) {
2182 continue;
2183 }
76e5c76f 2184 cpu_reload_memory_map(cpu);
117712c3 2185 }
50c1e149
AK
2186}
2187
ac1970fb
AK
2188void address_space_init_dispatch(AddressSpace *as)
2189{
00752703 2190 as->dispatch = NULL;
89ae337a 2191 as->dispatch_listener = (MemoryListener) {
ac1970fb 2192 .begin = mem_begin,
00752703 2193 .commit = mem_commit,
ac1970fb
AK
2194 .region_add = mem_add,
2195 .region_nop = mem_add,
2196 .priority = 0,
2197 };
89ae337a 2198 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
2199}
2200
6e48e8f9
PB
2201void address_space_unregister(AddressSpace *as)
2202{
2203 memory_listener_unregister(&as->dispatch_listener);
2204}
2205
83f3c251
AK
2206void address_space_destroy_dispatch(AddressSpace *as)
2207{
2208 AddressSpaceDispatch *d = as->dispatch;
2209
79e2b9ae
PB
2210 atomic_rcu_set(&as->dispatch, NULL);
2211 if (d) {
2212 call_rcu(d, address_space_dispatch_free, rcu);
2213 }
83f3c251
AK
2214}
2215
62152b8a
AK
2216static void memory_map_init(void)
2217{
7267c094 2218 system_memory = g_malloc(sizeof(*system_memory));
03f49957 2219
57271d63 2220 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 2221 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 2222
7267c094 2223 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
2224 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2225 65536);
7dca8043 2226 address_space_init(&address_space_io, system_io, "I/O");
62152b8a
AK
2227}
2228
2229MemoryRegion *get_system_memory(void)
2230{
2231 return system_memory;
2232}
2233
309cb471
AK
2234MemoryRegion *get_system_io(void)
2235{
2236 return system_io;
2237}
2238
e2eef170
PB
2239#endif /* !defined(CONFIG_USER_ONLY) */
2240
13eb76e0
FB
2241/* physical memory access (slow version, mainly for debug) */
2242#if defined(CONFIG_USER_ONLY)
f17ec444 2243int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 2244 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2245{
2246 int l, flags;
2247 target_ulong page;
53a5960a 2248 void * p;
13eb76e0
FB
2249
2250 while (len > 0) {
2251 page = addr & TARGET_PAGE_MASK;
2252 l = (page + TARGET_PAGE_SIZE) - addr;
2253 if (l > len)
2254 l = len;
2255 flags = page_get_flags(page);
2256 if (!(flags & PAGE_VALID))
a68fe89c 2257 return -1;
13eb76e0
FB
2258 if (is_write) {
2259 if (!(flags & PAGE_WRITE))
a68fe89c 2260 return -1;
579a97f7 2261 /* XXX: this code should not depend on lock_user */
72fb7daa 2262 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 2263 return -1;
72fb7daa
AJ
2264 memcpy(p, buf, l);
2265 unlock_user(p, addr, l);
13eb76e0
FB
2266 } else {
2267 if (!(flags & PAGE_READ))
a68fe89c 2268 return -1;
579a97f7 2269 /* XXX: this code should not depend on lock_user */
72fb7daa 2270 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 2271 return -1;
72fb7daa 2272 memcpy(buf, p, l);
5b257578 2273 unlock_user(p, addr, 0);
13eb76e0
FB
2274 }
2275 len -= l;
2276 buf += l;
2277 addr += l;
2278 }
a68fe89c 2279 return 0;
13eb76e0 2280}
8df1cd07 2281
13eb76e0 2282#else
51d7a9eb 2283
845b6214 2284static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
a8170e5e 2285 hwaddr length)
51d7a9eb 2286{
e87f7778
PB
2287 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2288 /* No early return if dirty_log_mask is or becomes 0, because
2289 * cpu_physical_memory_set_dirty_range will still call
2290 * xen_modified_memory.
2291 */
2292 if (dirty_log_mask) {
2293 dirty_log_mask =
2294 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2295 }
2296 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2297 tb_invalidate_phys_range(addr, addr + length);
2298 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
51d7a9eb 2299 }
e87f7778 2300 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
51d7a9eb
AP
2301}
2302
23326164 2303static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2304{
e1622f4b 2305 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2306
2307 /* Regions are assumed to support 1-4 byte accesses unless
2308 otherwise specified. */
23326164
RH
2309 if (access_size_max == 0) {
2310 access_size_max = 4;
2311 }
2312
2313 /* Bound the maximum access by the alignment of the address. */
2314 if (!mr->ops->impl.unaligned) {
2315 unsigned align_size_max = addr & -addr;
2316 if (align_size_max != 0 && align_size_max < access_size_max) {
2317 access_size_max = align_size_max;
2318 }
82f2563f 2319 }
23326164
RH
2320
2321 /* Don't attempt accesses larger than the maximum. */
2322 if (l > access_size_max) {
2323 l = access_size_max;
82f2563f 2324 }
098178f2
PB
2325 if (l & (l - 1)) {
2326 l = 1 << (qemu_fls(l) - 1);
2327 }
23326164
RH
2328
2329 return l;
82f2563f
PB
2330}
2331
4840f10e 2332static bool prepare_mmio_access(MemoryRegion *mr)
125b3806 2333{
4840f10e
JK
2334 bool unlocked = !qemu_mutex_iothread_locked();
2335 bool release_lock = false;
2336
2337 if (unlocked && mr->global_locking) {
2338 qemu_mutex_lock_iothread();
2339 unlocked = false;
2340 release_lock = true;
2341 }
125b3806 2342 if (mr->flush_coalesced_mmio) {
4840f10e
JK
2343 if (unlocked) {
2344 qemu_mutex_lock_iothread();
2345 }
125b3806 2346 qemu_flush_coalesced_mmio_buffer();
4840f10e
JK
2347 if (unlocked) {
2348 qemu_mutex_unlock_iothread();
2349 }
125b3806 2350 }
4840f10e
JK
2351
2352 return release_lock;
125b3806
PB
2353}
2354
5c9eb028
PM
2355MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2356 uint8_t *buf, int len, bool is_write)
13eb76e0 2357{
149f54b5 2358 hwaddr l;
13eb76e0 2359 uint8_t *ptr;
791af8c8 2360 uint64_t val;
149f54b5 2361 hwaddr addr1;
5c8a00ce 2362 MemoryRegion *mr;
3b643495 2363 MemTxResult result = MEMTX_OK;
4840f10e 2364 bool release_lock = false;
3b46e624 2365
41063e1e 2366 rcu_read_lock();
13eb76e0 2367 while (len > 0) {
149f54b5 2368 l = len;
5c8a00ce 2369 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2370
13eb76e0 2371 if (is_write) {
5c8a00ce 2372 if (!memory_access_is_direct(mr, is_write)) {
4840f10e 2373 release_lock |= prepare_mmio_access(mr);
5c8a00ce 2374 l = memory_access_size(mr, l, addr1);
4917cf44 2375 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2376 potential bugs */
23326164
RH
2377 switch (l) {
2378 case 8:
2379 /* 64 bit write access */
2380 val = ldq_p(buf);
3b643495
PM
2381 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2382 attrs);
23326164
RH
2383 break;
2384 case 4:
1c213d19 2385 /* 32 bit write access */
c27004ec 2386 val = ldl_p(buf);
3b643495
PM
2387 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2388 attrs);
23326164
RH
2389 break;
2390 case 2:
1c213d19 2391 /* 16 bit write access */
c27004ec 2392 val = lduw_p(buf);
3b643495
PM
2393 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2394 attrs);
23326164
RH
2395 break;
2396 case 1:
1c213d19 2397 /* 8 bit write access */
c27004ec 2398 val = ldub_p(buf);
3b643495
PM
2399 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2400 attrs);
23326164
RH
2401 break;
2402 default:
2403 abort();
13eb76e0 2404 }
2bbfa05d 2405 } else {
5c8a00ce 2406 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2407 /* RAM case */
5579c7f3 2408 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2409 memcpy(ptr, buf, l);
845b6214 2410 invalidate_and_set_dirty(mr, addr1, l);
13eb76e0
FB
2411 }
2412 } else {
5c8a00ce 2413 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2414 /* I/O case */
4840f10e 2415 release_lock |= prepare_mmio_access(mr);
5c8a00ce 2416 l = memory_access_size(mr, l, addr1);
23326164
RH
2417 switch (l) {
2418 case 8:
2419 /* 64 bit read access */
3b643495
PM
2420 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2421 attrs);
23326164
RH
2422 stq_p(buf, val);
2423 break;
2424 case 4:
13eb76e0 2425 /* 32 bit read access */
3b643495
PM
2426 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2427 attrs);
c27004ec 2428 stl_p(buf, val);
23326164
RH
2429 break;
2430 case 2:
13eb76e0 2431 /* 16 bit read access */
3b643495
PM
2432 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2433 attrs);
c27004ec 2434 stw_p(buf, val);
23326164
RH
2435 break;
2436 case 1:
1c213d19 2437 /* 8 bit read access */
3b643495
PM
2438 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2439 attrs);
c27004ec 2440 stb_p(buf, val);
23326164
RH
2441 break;
2442 default:
2443 abort();
13eb76e0
FB
2444 }
2445 } else {
2446 /* RAM case */
5c8a00ce 2447 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2448 memcpy(buf, ptr, l);
13eb76e0
FB
2449 }
2450 }
4840f10e
JK
2451
2452 if (release_lock) {
2453 qemu_mutex_unlock_iothread();
2454 release_lock = false;
2455 }
2456
13eb76e0
FB
2457 len -= l;
2458 buf += l;
2459 addr += l;
2460 }
41063e1e 2461 rcu_read_unlock();
fd8aaa76 2462
3b643495 2463 return result;
13eb76e0 2464}
8df1cd07 2465
5c9eb028
PM
2466MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2467 const uint8_t *buf, int len)
ac1970fb 2468{
5c9eb028 2469 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
ac1970fb
AK
2470}
2471
5c9eb028
PM
2472MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2473 uint8_t *buf, int len)
ac1970fb 2474{
5c9eb028 2475 return address_space_rw(as, addr, attrs, buf, len, false);
ac1970fb
AK
2476}
2477
2478
a8170e5e 2479void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2480 int len, int is_write)
2481{
5c9eb028
PM
2482 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2483 buf, len, is_write);
ac1970fb
AK
2484}
2485
582b55a9
AG
2486enum write_rom_type {
2487 WRITE_DATA,
2488 FLUSH_CACHE,
2489};
2490
2a221651 2491static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2492 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2493{
149f54b5 2494 hwaddr l;
d0ecd2aa 2495 uint8_t *ptr;
149f54b5 2496 hwaddr addr1;
5c8a00ce 2497 MemoryRegion *mr;
3b46e624 2498
41063e1e 2499 rcu_read_lock();
d0ecd2aa 2500 while (len > 0) {
149f54b5 2501 l = len;
2a221651 2502 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2503
5c8a00ce
PB
2504 if (!(memory_region_is_ram(mr) ||
2505 memory_region_is_romd(mr))) {
b242e0e0 2506 l = memory_access_size(mr, l, addr1);
d0ecd2aa 2507 } else {
5c8a00ce 2508 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2509 /* ROM/RAM case */
5579c7f3 2510 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2511 switch (type) {
2512 case WRITE_DATA:
2513 memcpy(ptr, buf, l);
845b6214 2514 invalidate_and_set_dirty(mr, addr1, l);
582b55a9
AG
2515 break;
2516 case FLUSH_CACHE:
2517 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2518 break;
2519 }
d0ecd2aa
FB
2520 }
2521 len -= l;
2522 buf += l;
2523 addr += l;
2524 }
41063e1e 2525 rcu_read_unlock();
d0ecd2aa
FB
2526}
2527
582b55a9 2528/* used for ROM loading : can write in RAM and ROM */
2a221651 2529void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2530 const uint8_t *buf, int len)
2531{
2a221651 2532 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2533}
2534
2535void cpu_flush_icache_range(hwaddr start, int len)
2536{
2537 /*
2538 * This function should do the same thing as an icache flush that was
2539 * triggered from within the guest. For TCG we are always cache coherent,
2540 * so there is no need to flush anything. For KVM / Xen we need to flush
2541 * the host's instruction cache at least.
2542 */
2543 if (tcg_enabled()) {
2544 return;
2545 }
2546
2a221651
EI
2547 cpu_physical_memory_write_rom_internal(&address_space_memory,
2548 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2549}
2550
6d16c2f8 2551typedef struct {
d3e71559 2552 MemoryRegion *mr;
6d16c2f8 2553 void *buffer;
a8170e5e
AK
2554 hwaddr addr;
2555 hwaddr len;
c2cba0ff 2556 bool in_use;
6d16c2f8
AL
2557} BounceBuffer;
2558
2559static BounceBuffer bounce;
2560
ba223c29 2561typedef struct MapClient {
e95205e1 2562 QEMUBH *bh;
72cf2d4f 2563 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2564} MapClient;
2565
38e047b5 2566QemuMutex map_client_list_lock;
72cf2d4f
BS
2567static QLIST_HEAD(map_client_list, MapClient) map_client_list
2568 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29 2569
e95205e1
FZ
2570static void cpu_unregister_map_client_do(MapClient *client)
2571{
2572 QLIST_REMOVE(client, link);
2573 g_free(client);
2574}
2575
33b6c2ed
FZ
2576static void cpu_notify_map_clients_locked(void)
2577{
2578 MapClient *client;
2579
2580 while (!QLIST_EMPTY(&map_client_list)) {
2581 client = QLIST_FIRST(&map_client_list);
e95205e1
FZ
2582 qemu_bh_schedule(client->bh);
2583 cpu_unregister_map_client_do(client);
33b6c2ed
FZ
2584 }
2585}
2586
e95205e1 2587void cpu_register_map_client(QEMUBH *bh)
ba223c29 2588{
7267c094 2589 MapClient *client = g_malloc(sizeof(*client));
ba223c29 2590
38e047b5 2591 qemu_mutex_lock(&map_client_list_lock);
e95205e1 2592 client->bh = bh;
72cf2d4f 2593 QLIST_INSERT_HEAD(&map_client_list, client, link);
33b6c2ed
FZ
2594 if (!atomic_read(&bounce.in_use)) {
2595 cpu_notify_map_clients_locked();
2596 }
38e047b5 2597 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2598}
2599
38e047b5 2600void cpu_exec_init_all(void)
ba223c29 2601{
38e047b5
FZ
2602 qemu_mutex_init(&ram_list.mutex);
2603 memory_map_init();
2604 io_mem_init();
2605 qemu_mutex_init(&map_client_list_lock);
ba223c29
AL
2606}
2607
e95205e1 2608void cpu_unregister_map_client(QEMUBH *bh)
ba223c29
AL
2609{
2610 MapClient *client;
2611
e95205e1
FZ
2612 qemu_mutex_lock(&map_client_list_lock);
2613 QLIST_FOREACH(client, &map_client_list, link) {
2614 if (client->bh == bh) {
2615 cpu_unregister_map_client_do(client);
2616 break;
2617 }
ba223c29 2618 }
e95205e1 2619 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2620}
2621
2622static void cpu_notify_map_clients(void)
2623{
38e047b5 2624 qemu_mutex_lock(&map_client_list_lock);
33b6c2ed 2625 cpu_notify_map_clients_locked();
38e047b5 2626 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2627}
2628
51644ab7
PB
2629bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2630{
5c8a00ce 2631 MemoryRegion *mr;
51644ab7
PB
2632 hwaddr l, xlat;
2633
41063e1e 2634 rcu_read_lock();
51644ab7
PB
2635 while (len > 0) {
2636 l = len;
5c8a00ce
PB
2637 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2638 if (!memory_access_is_direct(mr, is_write)) {
2639 l = memory_access_size(mr, l, addr);
2640 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2641 return false;
2642 }
2643 }
2644
2645 len -= l;
2646 addr += l;
2647 }
41063e1e 2648 rcu_read_unlock();
51644ab7
PB
2649 return true;
2650}
2651
6d16c2f8
AL
2652/* Map a physical memory region into a host virtual address.
2653 * May map a subset of the requested range, given by and returned in *plen.
2654 * May return NULL if resources needed to perform the mapping are exhausted.
2655 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2656 * Use cpu_register_map_client() to know when retrying the map operation is
2657 * likely to succeed.
6d16c2f8 2658 */
ac1970fb 2659void *address_space_map(AddressSpace *as,
a8170e5e
AK
2660 hwaddr addr,
2661 hwaddr *plen,
ac1970fb 2662 bool is_write)
6d16c2f8 2663{
a8170e5e 2664 hwaddr len = *plen;
e3127ae0
PB
2665 hwaddr done = 0;
2666 hwaddr l, xlat, base;
2667 MemoryRegion *mr, *this_mr;
2668 ram_addr_t raddr;
6d16c2f8 2669
e3127ae0
PB
2670 if (len == 0) {
2671 return NULL;
2672 }
38bee5dc 2673
e3127ae0 2674 l = len;
41063e1e 2675 rcu_read_lock();
e3127ae0 2676 mr = address_space_translate(as, addr, &xlat, &l, is_write);
41063e1e 2677
e3127ae0 2678 if (!memory_access_is_direct(mr, is_write)) {
c2cba0ff 2679 if (atomic_xchg(&bounce.in_use, true)) {
41063e1e 2680 rcu_read_unlock();
e3127ae0 2681 return NULL;
6d16c2f8 2682 }
e85d9db5
KW
2683 /* Avoid unbounded allocations */
2684 l = MIN(l, TARGET_PAGE_SIZE);
2685 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2686 bounce.addr = addr;
2687 bounce.len = l;
d3e71559
PB
2688
2689 memory_region_ref(mr);
2690 bounce.mr = mr;
e3127ae0 2691 if (!is_write) {
5c9eb028
PM
2692 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2693 bounce.buffer, l);
8ab934f9 2694 }
6d16c2f8 2695
41063e1e 2696 rcu_read_unlock();
e3127ae0
PB
2697 *plen = l;
2698 return bounce.buffer;
2699 }
2700
2701 base = xlat;
2702 raddr = memory_region_get_ram_addr(mr);
2703
2704 for (;;) {
6d16c2f8
AL
2705 len -= l;
2706 addr += l;
e3127ae0
PB
2707 done += l;
2708 if (len == 0) {
2709 break;
2710 }
2711
2712 l = len;
2713 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2714 if (this_mr != mr || xlat != base + done) {
2715 break;
2716 }
6d16c2f8 2717 }
e3127ae0 2718
d3e71559 2719 memory_region_ref(mr);
41063e1e 2720 rcu_read_unlock();
e3127ae0
PB
2721 *plen = done;
2722 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2723}
2724
ac1970fb 2725/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2726 * Will also mark the memory as dirty if is_write == 1. access_len gives
2727 * the amount of memory that was actually read or written by the caller.
2728 */
a8170e5e
AK
2729void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2730 int is_write, hwaddr access_len)
6d16c2f8
AL
2731{
2732 if (buffer != bounce.buffer) {
d3e71559
PB
2733 MemoryRegion *mr;
2734 ram_addr_t addr1;
2735
2736 mr = qemu_ram_addr_from_host(buffer, &addr1);
2737 assert(mr != NULL);
6d16c2f8 2738 if (is_write) {
845b6214 2739 invalidate_and_set_dirty(mr, addr1, access_len);
6d16c2f8 2740 }
868bb33f 2741 if (xen_enabled()) {
e41d7c69 2742 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2743 }
d3e71559 2744 memory_region_unref(mr);
6d16c2f8
AL
2745 return;
2746 }
2747 if (is_write) {
5c9eb028
PM
2748 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2749 bounce.buffer, access_len);
6d16c2f8 2750 }
f8a83245 2751 qemu_vfree(bounce.buffer);
6d16c2f8 2752 bounce.buffer = NULL;
d3e71559 2753 memory_region_unref(bounce.mr);
c2cba0ff 2754 atomic_mb_set(&bounce.in_use, false);
ba223c29 2755 cpu_notify_map_clients();
6d16c2f8 2756}
d0ecd2aa 2757
a8170e5e
AK
2758void *cpu_physical_memory_map(hwaddr addr,
2759 hwaddr *plen,
ac1970fb
AK
2760 int is_write)
2761{
2762 return address_space_map(&address_space_memory, addr, plen, is_write);
2763}
2764
a8170e5e
AK
2765void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2766 int is_write, hwaddr access_len)
ac1970fb
AK
2767{
2768 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2769}
2770
8df1cd07 2771/* warning: addr must be aligned */
50013115
PM
2772static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2773 MemTxAttrs attrs,
2774 MemTxResult *result,
2775 enum device_endian endian)
8df1cd07 2776{
8df1cd07 2777 uint8_t *ptr;
791af8c8 2778 uint64_t val;
5c8a00ce 2779 MemoryRegion *mr;
149f54b5
PB
2780 hwaddr l = 4;
2781 hwaddr addr1;
50013115 2782 MemTxResult r;
4840f10e 2783 bool release_lock = false;
8df1cd07 2784
41063e1e 2785 rcu_read_lock();
fdfba1a2 2786 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2787 if (l < 4 || !memory_access_is_direct(mr, false)) {
4840f10e 2788 release_lock |= prepare_mmio_access(mr);
125b3806 2789
8df1cd07 2790 /* I/O case */
50013115 2791 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
1e78bcc1
AG
2792#if defined(TARGET_WORDS_BIGENDIAN)
2793 if (endian == DEVICE_LITTLE_ENDIAN) {
2794 val = bswap32(val);
2795 }
2796#else
2797 if (endian == DEVICE_BIG_ENDIAN) {
2798 val = bswap32(val);
2799 }
2800#endif
8df1cd07
FB
2801 } else {
2802 /* RAM case */
5c8a00ce 2803 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2804 & TARGET_PAGE_MASK)
149f54b5 2805 + addr1);
1e78bcc1
AG
2806 switch (endian) {
2807 case DEVICE_LITTLE_ENDIAN:
2808 val = ldl_le_p(ptr);
2809 break;
2810 case DEVICE_BIG_ENDIAN:
2811 val = ldl_be_p(ptr);
2812 break;
2813 default:
2814 val = ldl_p(ptr);
2815 break;
2816 }
50013115
PM
2817 r = MEMTX_OK;
2818 }
2819 if (result) {
2820 *result = r;
8df1cd07 2821 }
4840f10e
JK
2822 if (release_lock) {
2823 qemu_mutex_unlock_iothread();
2824 }
41063e1e 2825 rcu_read_unlock();
8df1cd07
FB
2826 return val;
2827}
2828
50013115
PM
2829uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2830 MemTxAttrs attrs, MemTxResult *result)
2831{
2832 return address_space_ldl_internal(as, addr, attrs, result,
2833 DEVICE_NATIVE_ENDIAN);
2834}
2835
2836uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2837 MemTxAttrs attrs, MemTxResult *result)
2838{
2839 return address_space_ldl_internal(as, addr, attrs, result,
2840 DEVICE_LITTLE_ENDIAN);
2841}
2842
2843uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2844 MemTxAttrs attrs, MemTxResult *result)
2845{
2846 return address_space_ldl_internal(as, addr, attrs, result,
2847 DEVICE_BIG_ENDIAN);
2848}
2849
fdfba1a2 2850uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2851{
50013115 2852 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2853}
2854
fdfba1a2 2855uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2856{
50013115 2857 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2858}
2859
fdfba1a2 2860uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2861{
50013115 2862 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2863}
2864
84b7b8e7 2865/* warning: addr must be aligned */
50013115
PM
2866static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2867 MemTxAttrs attrs,
2868 MemTxResult *result,
2869 enum device_endian endian)
84b7b8e7 2870{
84b7b8e7
FB
2871 uint8_t *ptr;
2872 uint64_t val;
5c8a00ce 2873 MemoryRegion *mr;
149f54b5
PB
2874 hwaddr l = 8;
2875 hwaddr addr1;
50013115 2876 MemTxResult r;
4840f10e 2877 bool release_lock = false;
84b7b8e7 2878
41063e1e 2879 rcu_read_lock();
2c17449b 2880 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2881 false);
2882 if (l < 8 || !memory_access_is_direct(mr, false)) {
4840f10e 2883 release_lock |= prepare_mmio_access(mr);
125b3806 2884
84b7b8e7 2885 /* I/O case */
50013115 2886 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
968a5627
PB
2887#if defined(TARGET_WORDS_BIGENDIAN)
2888 if (endian == DEVICE_LITTLE_ENDIAN) {
2889 val = bswap64(val);
2890 }
2891#else
2892 if (endian == DEVICE_BIG_ENDIAN) {
2893 val = bswap64(val);
2894 }
84b7b8e7
FB
2895#endif
2896 } else {
2897 /* RAM case */
5c8a00ce 2898 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2899 & TARGET_PAGE_MASK)
149f54b5 2900 + addr1);
1e78bcc1
AG
2901 switch (endian) {
2902 case DEVICE_LITTLE_ENDIAN:
2903 val = ldq_le_p(ptr);
2904 break;
2905 case DEVICE_BIG_ENDIAN:
2906 val = ldq_be_p(ptr);
2907 break;
2908 default:
2909 val = ldq_p(ptr);
2910 break;
2911 }
50013115
PM
2912 r = MEMTX_OK;
2913 }
2914 if (result) {
2915 *result = r;
84b7b8e7 2916 }
4840f10e
JK
2917 if (release_lock) {
2918 qemu_mutex_unlock_iothread();
2919 }
41063e1e 2920 rcu_read_unlock();
84b7b8e7
FB
2921 return val;
2922}
2923
50013115
PM
2924uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2925 MemTxAttrs attrs, MemTxResult *result)
2926{
2927 return address_space_ldq_internal(as, addr, attrs, result,
2928 DEVICE_NATIVE_ENDIAN);
2929}
2930
2931uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
2932 MemTxAttrs attrs, MemTxResult *result)
2933{
2934 return address_space_ldq_internal(as, addr, attrs, result,
2935 DEVICE_LITTLE_ENDIAN);
2936}
2937
2938uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
2939 MemTxAttrs attrs, MemTxResult *result)
2940{
2941 return address_space_ldq_internal(as, addr, attrs, result,
2942 DEVICE_BIG_ENDIAN);
2943}
2944
2c17449b 2945uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2946{
50013115 2947 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2948}
2949
2c17449b 2950uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2951{
50013115 2952 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2953}
2954
2c17449b 2955uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2956{
50013115 2957 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2958}
2959
aab33094 2960/* XXX: optimize */
50013115
PM
2961uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
2962 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
2963{
2964 uint8_t val;
50013115
PM
2965 MemTxResult r;
2966
2967 r = address_space_rw(as, addr, attrs, &val, 1, 0);
2968 if (result) {
2969 *result = r;
2970 }
aab33094
FB
2971 return val;
2972}
2973
50013115
PM
2974uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2975{
2976 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
2977}
2978
733f0b02 2979/* warning: addr must be aligned */
50013115
PM
2980static inline uint32_t address_space_lduw_internal(AddressSpace *as,
2981 hwaddr addr,
2982 MemTxAttrs attrs,
2983 MemTxResult *result,
2984 enum device_endian endian)
aab33094 2985{
733f0b02
MT
2986 uint8_t *ptr;
2987 uint64_t val;
5c8a00ce 2988 MemoryRegion *mr;
149f54b5
PB
2989 hwaddr l = 2;
2990 hwaddr addr1;
50013115 2991 MemTxResult r;
4840f10e 2992 bool release_lock = false;
733f0b02 2993
41063e1e 2994 rcu_read_lock();
41701aa4 2995 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2996 false);
2997 if (l < 2 || !memory_access_is_direct(mr, false)) {
4840f10e 2998 release_lock |= prepare_mmio_access(mr);
125b3806 2999
733f0b02 3000 /* I/O case */
50013115 3001 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
1e78bcc1
AG
3002#if defined(TARGET_WORDS_BIGENDIAN)
3003 if (endian == DEVICE_LITTLE_ENDIAN) {
3004 val = bswap16(val);
3005 }
3006#else
3007 if (endian == DEVICE_BIG_ENDIAN) {
3008 val = bswap16(val);
3009 }
3010#endif
733f0b02
MT
3011 } else {
3012 /* RAM case */
5c8a00ce 3013 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 3014 & TARGET_PAGE_MASK)
149f54b5 3015 + addr1);
1e78bcc1
AG
3016 switch (endian) {
3017 case DEVICE_LITTLE_ENDIAN:
3018 val = lduw_le_p(ptr);
3019 break;
3020 case DEVICE_BIG_ENDIAN:
3021 val = lduw_be_p(ptr);
3022 break;
3023 default:
3024 val = lduw_p(ptr);
3025 break;
3026 }
50013115
PM
3027 r = MEMTX_OK;
3028 }
3029 if (result) {
3030 *result = r;
733f0b02 3031 }
4840f10e
JK
3032 if (release_lock) {
3033 qemu_mutex_unlock_iothread();
3034 }
41063e1e 3035 rcu_read_unlock();
733f0b02 3036 return val;
aab33094
FB
3037}
3038
50013115
PM
3039uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3040 MemTxAttrs attrs, MemTxResult *result)
3041{
3042 return address_space_lduw_internal(as, addr, attrs, result,
3043 DEVICE_NATIVE_ENDIAN);
3044}
3045
3046uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3047 MemTxAttrs attrs, MemTxResult *result)
3048{
3049 return address_space_lduw_internal(as, addr, attrs, result,
3050 DEVICE_LITTLE_ENDIAN);
3051}
3052
3053uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3054 MemTxAttrs attrs, MemTxResult *result)
3055{
3056 return address_space_lduw_internal(as, addr, attrs, result,
3057 DEVICE_BIG_ENDIAN);
3058}
3059
41701aa4 3060uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3061{
50013115 3062 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3063}
3064
41701aa4 3065uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3066{
50013115 3067 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3068}
3069
41701aa4 3070uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3071{
50013115 3072 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3073}
3074
8df1cd07
FB
3075/* warning: addr must be aligned. The ram page is not masked as dirty
3076 and the code inside is not invalidated. It is useful if the dirty
3077 bits are used to track modified PTEs */
50013115
PM
3078void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3079 MemTxAttrs attrs, MemTxResult *result)
8df1cd07 3080{
8df1cd07 3081 uint8_t *ptr;
5c8a00ce 3082 MemoryRegion *mr;
149f54b5
PB
3083 hwaddr l = 4;
3084 hwaddr addr1;
50013115 3085 MemTxResult r;
845b6214 3086 uint8_t dirty_log_mask;
4840f10e 3087 bool release_lock = false;
8df1cd07 3088
41063e1e 3089 rcu_read_lock();
2198a121 3090 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3091 true);
3092 if (l < 4 || !memory_access_is_direct(mr, true)) {
4840f10e 3093 release_lock |= prepare_mmio_access(mr);
125b3806 3094
50013115 3095 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3096 } else {
5c8a00ce 3097 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 3098 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3099 stl_p(ptr, val);
74576198 3100
845b6214
PB
3101 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3102 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
58d2707e 3103 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
50013115
PM
3104 r = MEMTX_OK;
3105 }
3106 if (result) {
3107 *result = r;
8df1cd07 3108 }
4840f10e
JK
3109 if (release_lock) {
3110 qemu_mutex_unlock_iothread();
3111 }
41063e1e 3112 rcu_read_unlock();
8df1cd07
FB
3113}
3114
50013115
PM
3115void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3116{
3117 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3118}
3119
8df1cd07 3120/* warning: addr must be aligned */
50013115
PM
3121static inline void address_space_stl_internal(AddressSpace *as,
3122 hwaddr addr, uint32_t val,
3123 MemTxAttrs attrs,
3124 MemTxResult *result,
3125 enum device_endian endian)
8df1cd07 3126{
8df1cd07 3127 uint8_t *ptr;
5c8a00ce 3128 MemoryRegion *mr;
149f54b5
PB
3129 hwaddr l = 4;
3130 hwaddr addr1;
50013115 3131 MemTxResult r;
4840f10e 3132 bool release_lock = false;
8df1cd07 3133
41063e1e 3134 rcu_read_lock();
ab1da857 3135 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3136 true);
3137 if (l < 4 || !memory_access_is_direct(mr, true)) {
4840f10e 3138 release_lock |= prepare_mmio_access(mr);
125b3806 3139
1e78bcc1
AG
3140#if defined(TARGET_WORDS_BIGENDIAN)
3141 if (endian == DEVICE_LITTLE_ENDIAN) {
3142 val = bswap32(val);
3143 }
3144#else
3145 if (endian == DEVICE_BIG_ENDIAN) {
3146 val = bswap32(val);
3147 }
3148#endif
50013115 3149 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3150 } else {
8df1cd07 3151 /* RAM case */
5c8a00ce 3152 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 3153 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3154 switch (endian) {
3155 case DEVICE_LITTLE_ENDIAN:
3156 stl_le_p(ptr, val);
3157 break;
3158 case DEVICE_BIG_ENDIAN:
3159 stl_be_p(ptr, val);
3160 break;
3161 default:
3162 stl_p(ptr, val);
3163 break;
3164 }
845b6214 3165 invalidate_and_set_dirty(mr, addr1, 4);
50013115
PM
3166 r = MEMTX_OK;
3167 }
3168 if (result) {
3169 *result = r;
8df1cd07 3170 }
4840f10e
JK
3171 if (release_lock) {
3172 qemu_mutex_unlock_iothread();
3173 }
41063e1e 3174 rcu_read_unlock();
8df1cd07
FB
3175}
3176
50013115
PM
3177void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3178 MemTxAttrs attrs, MemTxResult *result)
3179{
3180 address_space_stl_internal(as, addr, val, attrs, result,
3181 DEVICE_NATIVE_ENDIAN);
3182}
3183
3184void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3185 MemTxAttrs attrs, MemTxResult *result)
3186{
3187 address_space_stl_internal(as, addr, val, attrs, result,
3188 DEVICE_LITTLE_ENDIAN);
3189}
3190
3191void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3192 MemTxAttrs attrs, MemTxResult *result)
3193{
3194 address_space_stl_internal(as, addr, val, attrs, result,
3195 DEVICE_BIG_ENDIAN);
3196}
3197
ab1da857 3198void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3199{
50013115 3200 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3201}
3202
ab1da857 3203void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3204{
50013115 3205 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3206}
3207
ab1da857 3208void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3209{
50013115 3210 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3211}
3212
aab33094 3213/* XXX: optimize */
50013115
PM
3214void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3215 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
3216{
3217 uint8_t v = val;
50013115
PM
3218 MemTxResult r;
3219
3220 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3221 if (result) {
3222 *result = r;
3223 }
3224}
3225
3226void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3227{
3228 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
aab33094
FB
3229}
3230
733f0b02 3231/* warning: addr must be aligned */
50013115
PM
3232static inline void address_space_stw_internal(AddressSpace *as,
3233 hwaddr addr, uint32_t val,
3234 MemTxAttrs attrs,
3235 MemTxResult *result,
3236 enum device_endian endian)
aab33094 3237{
733f0b02 3238 uint8_t *ptr;
5c8a00ce 3239 MemoryRegion *mr;
149f54b5
PB
3240 hwaddr l = 2;
3241 hwaddr addr1;
50013115 3242 MemTxResult r;
4840f10e 3243 bool release_lock = false;
733f0b02 3244
41063e1e 3245 rcu_read_lock();
5ce5944d 3246 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 3247 if (l < 2 || !memory_access_is_direct(mr, true)) {
4840f10e 3248 release_lock |= prepare_mmio_access(mr);
125b3806 3249
1e78bcc1
AG
3250#if defined(TARGET_WORDS_BIGENDIAN)
3251 if (endian == DEVICE_LITTLE_ENDIAN) {
3252 val = bswap16(val);
3253 }
3254#else
3255 if (endian == DEVICE_BIG_ENDIAN) {
3256 val = bswap16(val);
3257 }
3258#endif
50013115 3259 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
733f0b02 3260 } else {
733f0b02 3261 /* RAM case */
5c8a00ce 3262 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 3263 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3264 switch (endian) {
3265 case DEVICE_LITTLE_ENDIAN:
3266 stw_le_p(ptr, val);
3267 break;
3268 case DEVICE_BIG_ENDIAN:
3269 stw_be_p(ptr, val);
3270 break;
3271 default:
3272 stw_p(ptr, val);
3273 break;
3274 }
845b6214 3275 invalidate_and_set_dirty(mr, addr1, 2);
50013115
PM
3276 r = MEMTX_OK;
3277 }
3278 if (result) {
3279 *result = r;
733f0b02 3280 }
4840f10e
JK
3281 if (release_lock) {
3282 qemu_mutex_unlock_iothread();
3283 }
41063e1e 3284 rcu_read_unlock();
aab33094
FB
3285}
3286
50013115
PM
3287void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3288 MemTxAttrs attrs, MemTxResult *result)
3289{
3290 address_space_stw_internal(as, addr, val, attrs, result,
3291 DEVICE_NATIVE_ENDIAN);
3292}
3293
3294void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3295 MemTxAttrs attrs, MemTxResult *result)
3296{
3297 address_space_stw_internal(as, addr, val, attrs, result,
3298 DEVICE_LITTLE_ENDIAN);
3299}
3300
3301void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3302 MemTxAttrs attrs, MemTxResult *result)
3303{
3304 address_space_stw_internal(as, addr, val, attrs, result,
3305 DEVICE_BIG_ENDIAN);
3306}
3307
5ce5944d 3308void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3309{
50013115 3310 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3311}
3312
5ce5944d 3313void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3314{
50013115 3315 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3316}
3317
5ce5944d 3318void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3319{
50013115 3320 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3321}
3322
aab33094 3323/* XXX: optimize */
50013115
PM
3324void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3325 MemTxAttrs attrs, MemTxResult *result)
aab33094 3326{
50013115 3327 MemTxResult r;
aab33094 3328 val = tswap64(val);
50013115
PM
3329 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3330 if (result) {
3331 *result = r;
3332 }
aab33094
FB
3333}
3334
50013115
PM
3335void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3336 MemTxAttrs attrs, MemTxResult *result)
1e78bcc1 3337{
50013115 3338 MemTxResult r;
1e78bcc1 3339 val = cpu_to_le64(val);
50013115
PM
3340 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3341 if (result) {
3342 *result = r;
3343 }
3344}
3345void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3346 MemTxAttrs attrs, MemTxResult *result)
3347{
3348 MemTxResult r;
3349 val = cpu_to_be64(val);
3350 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3351 if (result) {
3352 *result = r;
3353 }
3354}
3355
3356void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3357{
3358 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3359}
3360
3361void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3362{
3363 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3364}
3365
f606604f 3366void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1 3367{
50013115 3368 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3369}
3370
5e2972fd 3371/* virtual memory access for debug (includes writing to ROM) */
f17ec444 3372int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 3373 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3374{
3375 int l;
a8170e5e 3376 hwaddr phys_addr;
9b3c35e0 3377 target_ulong page;
13eb76e0
FB
3378
3379 while (len > 0) {
3380 page = addr & TARGET_PAGE_MASK;
f17ec444 3381 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
3382 /* if no physical page mapped, return an error */
3383 if (phys_addr == -1)
3384 return -1;
3385 l = (page + TARGET_PAGE_SIZE) - addr;
3386 if (l > len)
3387 l = len;
5e2972fd 3388 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
3389 if (is_write) {
3390 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3391 } else {
5c9eb028
PM
3392 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3393 buf, l, 0);
2e38847b 3394 }
13eb76e0
FB
3395 len -= l;
3396 buf += l;
3397 addr += l;
3398 }
3399 return 0;
3400}
a68fe89c 3401#endif
13eb76e0 3402
8e4a424b
BS
3403/*
3404 * A helper function for the _utterly broken_ virtio device model to find out if
3405 * it's running on a big endian machine. Don't do this at home kids!
3406 */
98ed8ecf
GK
3407bool target_words_bigendian(void);
3408bool target_words_bigendian(void)
8e4a424b
BS
3409{
3410#if defined(TARGET_WORDS_BIGENDIAN)
3411 return true;
3412#else
3413 return false;
3414#endif
3415}
3416
76f35538 3417#ifndef CONFIG_USER_ONLY
a8170e5e 3418bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 3419{
5c8a00ce 3420 MemoryRegion*mr;
149f54b5 3421 hwaddr l = 1;
41063e1e 3422 bool res;
76f35538 3423
41063e1e 3424 rcu_read_lock();
5c8a00ce
PB
3425 mr = address_space_translate(&address_space_memory,
3426 phys_addr, &phys_addr, &l, false);
76f35538 3427
41063e1e
PB
3428 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3429 rcu_read_unlock();
3430 return res;
76f35538 3431}
bd2fa51f 3432
e3807054 3433int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
bd2fa51f
MH
3434{
3435 RAMBlock *block;
e3807054 3436 int ret = 0;
bd2fa51f 3437
0dc3f44a
MD
3438 rcu_read_lock();
3439 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
e3807054
DDAG
3440 ret = func(block->idstr, block->host, block->offset,
3441 block->used_length, opaque);
3442 if (ret) {
3443 break;
3444 }
bd2fa51f 3445 }
0dc3f44a 3446 rcu_read_unlock();
e3807054 3447 return ret;
bd2fa51f 3448}
ec3f8c99 3449#endif