]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
apic_common: improve readability of apic_reset_common
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
4485bd26 29#if !defined(CONFIG_USER_ONLY)
47c8ca53 30#include "hw/boards.h"
4485bd26 31#endif
cc9e98cb 32#include "hw/qdev.h"
1de7afc9 33#include "qemu/osdep.h"
9c17d615 34#include "sysemu/kvm.h"
2ff3de68 35#include "sysemu/sysemu.h"
0d09e41a 36#include "hw/xen/xen.h"
1de7afc9
PB
37#include "qemu/timer.h"
38#include "qemu/config-file.h"
75a34036 39#include "qemu/error-report.h"
022c62cb 40#include "exec/memory.h"
9c17d615 41#include "sysemu/dma.h"
022c62cb 42#include "exec/address-spaces.h"
53a5960a
PB
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
432d268c 45#else /* !CONFIG_USER_ONLY */
9c17d615 46#include "sysemu/xen-mapcache.h"
6506e4f9 47#include "trace.h"
53a5960a 48#endif
0d6d3c87 49#include "exec/cpu-all.h"
0dc3f44a 50#include "qemu/rcu_queue.h"
022c62cb 51#include "exec/cputlb.h"
5b6dd868 52#include "translate-all.h"
0cac1b66 53
022c62cb 54#include "exec/memory-internal.h"
220c3ebd 55#include "exec/ram_addr.h"
67d95c15 56
b35ba30f
MT
57#include "qemu/range.h"
58
db7b5426 59//#define DEBUG_SUBPAGE
1196be37 60
e2eef170 61#if !defined(CONFIG_USER_ONLY)
981fdf23 62static bool in_migration;
94a6b54f 63
0dc3f44a
MD
64/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
65 * are protected by the ramlist lock.
66 */
0d53d9fe 67RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
68
69static MemoryRegion *system_memory;
309cb471 70static MemoryRegion *system_io;
62152b8a 71
f6790af6
AK
72AddressSpace address_space_io;
73AddressSpace address_space_memory;
2673a5da 74
0844e007 75MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 76static MemoryRegion io_mem_unassigned;
0e0df1e2 77
7bd4f430
PB
78/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
79#define RAM_PREALLOC (1 << 0)
80
dbcb8981
PB
81/* RAM is mmap-ed with MAP_SHARED */
82#define RAM_SHARED (1 << 1)
83
62be4e3a
MT
84/* Only a portion of RAM (used_length) is actually used, and migrated.
85 * This used_length size can change across reboots.
86 */
87#define RAM_RESIZEABLE (1 << 2)
88
e2eef170 89#endif
9fa3e853 90
bdc44640 91struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
92/* current CPU in the current thread. It is only valid inside
93 cpu_exec() */
4917cf44 94DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 95/* 0 = Do not count executed instructions.
bf20dc07 96 1 = Precise instruction counting.
2e70f6ef 97 2 = Adaptive rate instruction counting. */
5708fc66 98int use_icount;
6a00d601 99
e2eef170 100#if !defined(CONFIG_USER_ONLY)
4346ae3e 101
1db8abb1
PB
102typedef struct PhysPageEntry PhysPageEntry;
103
104struct PhysPageEntry {
9736e55b 105 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 106 uint32_t skip : 6;
9736e55b 107 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 108 uint32_t ptr : 26;
1db8abb1
PB
109};
110
8b795765
MT
111#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
112
03f49957 113/* Size of the L2 (and L3, etc) page tables. */
57271d63 114#define ADDR_SPACE_BITS 64
03f49957 115
026736ce 116#define P_L2_BITS 9
03f49957
PB
117#define P_L2_SIZE (1 << P_L2_BITS)
118
119#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
120
121typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 122
53cb28cb 123typedef struct PhysPageMap {
79e2b9ae
PB
124 struct rcu_head rcu;
125
53cb28cb
MA
126 unsigned sections_nb;
127 unsigned sections_nb_alloc;
128 unsigned nodes_nb;
129 unsigned nodes_nb_alloc;
130 Node *nodes;
131 MemoryRegionSection *sections;
132} PhysPageMap;
133
1db8abb1 134struct AddressSpaceDispatch {
79e2b9ae
PB
135 struct rcu_head rcu;
136
1db8abb1
PB
137 /* This is a multi-level map on the physical address space.
138 * The bottom level has pointers to MemoryRegionSections.
139 */
140 PhysPageEntry phys_map;
53cb28cb 141 PhysPageMap map;
acc9d80b 142 AddressSpace *as;
1db8abb1
PB
143};
144
90260c6c
JK
145#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
146typedef struct subpage_t {
147 MemoryRegion iomem;
acc9d80b 148 AddressSpace *as;
90260c6c
JK
149 hwaddr base;
150 uint16_t sub_section[TARGET_PAGE_SIZE];
151} subpage_t;
152
b41aac4f
LPF
153#define PHYS_SECTION_UNASSIGNED 0
154#define PHYS_SECTION_NOTDIRTY 1
155#define PHYS_SECTION_ROM 2
156#define PHYS_SECTION_WATCH 3
5312bd8b 157
e2eef170 158static void io_mem_init(void);
62152b8a 159static void memory_map_init(void);
09daed84 160static void tcg_commit(MemoryListener *listener);
e2eef170 161
1ec9b909 162static MemoryRegion io_mem_watch;
6658ffb8 163#endif
fd6ce8f6 164
6d9a1304 165#if !defined(CONFIG_USER_ONLY)
d6f2ea22 166
53cb28cb 167static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 168{
53cb28cb
MA
169 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
170 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
171 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
172 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 173 }
f7bf5461
AK
174}
175
53cb28cb 176static uint32_t phys_map_node_alloc(PhysPageMap *map)
f7bf5461
AK
177{
178 unsigned i;
8b795765 179 uint32_t ret;
f7bf5461 180
53cb28cb 181 ret = map->nodes_nb++;
f7bf5461 182 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 183 assert(ret != map->nodes_nb_alloc);
03f49957 184 for (i = 0; i < P_L2_SIZE; ++i) {
53cb28cb
MA
185 map->nodes[ret][i].skip = 1;
186 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 187 }
f7bf5461 188 return ret;
d6f2ea22
AK
189}
190
53cb28cb
MA
191static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
192 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 193 int level)
f7bf5461
AK
194{
195 PhysPageEntry *p;
196 int i;
03f49957 197 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 198
9736e55b 199 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
53cb28cb
MA
200 lp->ptr = phys_map_node_alloc(map);
201 p = map->nodes[lp->ptr];
f7bf5461 202 if (level == 0) {
03f49957 203 for (i = 0; i < P_L2_SIZE; i++) {
9736e55b 204 p[i].skip = 0;
b41aac4f 205 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 206 }
67c4d23c 207 }
f7bf5461 208 } else {
53cb28cb 209 p = map->nodes[lp->ptr];
92e873b9 210 }
03f49957 211 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 212
03f49957 213 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 214 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 215 lp->skip = 0;
c19e8800 216 lp->ptr = leaf;
07f07b31
AK
217 *index += step;
218 *nb -= step;
2999097b 219 } else {
53cb28cb 220 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
221 }
222 ++lp;
f7bf5461
AK
223 }
224}
225
ac1970fb 226static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 227 hwaddr index, hwaddr nb,
2999097b 228 uint16_t leaf)
f7bf5461 229{
2999097b 230 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 231 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 232
53cb28cb 233 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
234}
235
b35ba30f
MT
236/* Compact a non leaf page entry. Simply detect that the entry has a single child,
237 * and update our entry so we can skip it and go directly to the destination.
238 */
239static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
240{
241 unsigned valid_ptr = P_L2_SIZE;
242 int valid = 0;
243 PhysPageEntry *p;
244 int i;
245
246 if (lp->ptr == PHYS_MAP_NODE_NIL) {
247 return;
248 }
249
250 p = nodes[lp->ptr];
251 for (i = 0; i < P_L2_SIZE; i++) {
252 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
253 continue;
254 }
255
256 valid_ptr = i;
257 valid++;
258 if (p[i].skip) {
259 phys_page_compact(&p[i], nodes, compacted);
260 }
261 }
262
263 /* We can only compress if there's only one child. */
264 if (valid != 1) {
265 return;
266 }
267
268 assert(valid_ptr < P_L2_SIZE);
269
270 /* Don't compress if it won't fit in the # of bits we have. */
271 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
272 return;
273 }
274
275 lp->ptr = p[valid_ptr].ptr;
276 if (!p[valid_ptr].skip) {
277 /* If our only child is a leaf, make this a leaf. */
278 /* By design, we should have made this node a leaf to begin with so we
279 * should never reach here.
280 * But since it's so simple to handle this, let's do it just in case we
281 * change this rule.
282 */
283 lp->skip = 0;
284 } else {
285 lp->skip += p[valid_ptr].skip;
286 }
287}
288
289static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
290{
291 DECLARE_BITMAP(compacted, nodes_nb);
292
293 if (d->phys_map.skip) {
53cb28cb 294 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
295 }
296}
297
97115a8d 298static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 299 Node *nodes, MemoryRegionSection *sections)
92e873b9 300{
31ab2b4a 301 PhysPageEntry *p;
97115a8d 302 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 303 int i;
f1f6e3b8 304
9736e55b 305 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 306 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 307 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 308 }
9affd6fc 309 p = nodes[lp.ptr];
03f49957 310 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 311 }
b35ba30f
MT
312
313 if (sections[lp.ptr].size.hi ||
314 range_covers_byte(sections[lp.ptr].offset_within_address_space,
315 sections[lp.ptr].size.lo, addr)) {
316 return &sections[lp.ptr];
317 } else {
318 return &sections[PHYS_SECTION_UNASSIGNED];
319 }
f3705d53
AK
320}
321
e5548617
BS
322bool memory_region_is_unassigned(MemoryRegion *mr)
323{
2a8e7499 324 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 325 && mr != &io_mem_watch;
fd6ce8f6 326}
149f54b5 327
79e2b9ae 328/* Called from RCU critical section */
c7086b4a 329static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
330 hwaddr addr,
331 bool resolve_subpage)
9f029603 332{
90260c6c
JK
333 MemoryRegionSection *section;
334 subpage_t *subpage;
335
53cb28cb 336 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
337 if (resolve_subpage && section->mr->subpage) {
338 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 339 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
340 }
341 return section;
9f029603
JK
342}
343
79e2b9ae 344/* Called from RCU critical section */
90260c6c 345static MemoryRegionSection *
c7086b4a 346address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 347 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
348{
349 MemoryRegionSection *section;
a87f3954 350 Int128 diff;
149f54b5 351
c7086b4a 352 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
353 /* Compute offset within MemoryRegionSection */
354 addr -= section->offset_within_address_space;
355
356 /* Compute offset within MemoryRegion */
357 *xlat = addr + section->offset_within_region;
358
359 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 360 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
361 return section;
362}
90260c6c 363
a87f3954
PB
364static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
365{
366 if (memory_region_is_ram(mr)) {
367 return !(is_write && mr->readonly);
368 }
369 if (memory_region_is_romd(mr)) {
370 return !is_write;
371 }
372
373 return false;
374}
375
5c8a00ce
PB
376MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
377 hwaddr *xlat, hwaddr *plen,
378 bool is_write)
90260c6c 379{
30951157
AK
380 IOMMUTLBEntry iotlb;
381 MemoryRegionSection *section;
382 MemoryRegion *mr;
30951157 383
79e2b9ae 384 rcu_read_lock();
30951157 385 for (;;) {
79e2b9ae
PB
386 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
387 section = address_space_translate_internal(d, addr, &addr, plen, true);
30951157
AK
388 mr = section->mr;
389
390 if (!mr->iommu_ops) {
391 break;
392 }
393
8d7b8cb9 394 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
30951157
AK
395 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
396 | (addr & iotlb.addr_mask));
23820dbf 397 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
30951157
AK
398 if (!(iotlb.perm & (1 << is_write))) {
399 mr = &io_mem_unassigned;
400 break;
401 }
402
403 as = iotlb.target_as;
404 }
405
fe680d0d 406 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954 407 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
23820dbf 408 *plen = MIN(page, *plen);
a87f3954
PB
409 }
410
30951157 411 *xlat = addr;
79e2b9ae 412 rcu_read_unlock();
30951157 413 return mr;
90260c6c
JK
414}
415
79e2b9ae 416/* Called from RCU critical section */
90260c6c 417MemoryRegionSection *
9d82b5a7
PB
418address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
419 hwaddr *xlat, hwaddr *plen)
90260c6c 420{
30951157 421 MemoryRegionSection *section;
9d82b5a7
PB
422 section = address_space_translate_internal(cpu->memory_dispatch,
423 addr, xlat, plen, false);
30951157
AK
424
425 assert(!section->mr->iommu_ops);
426 return section;
90260c6c 427}
5b6dd868 428#endif
fd6ce8f6 429
b170fce3 430#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
431
432static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 433{
259186a7 434 CPUState *cpu = opaque;
a513fe19 435
5b6dd868
BS
436 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
437 version_id is increased. */
259186a7 438 cpu->interrupt_request &= ~0x01;
c01a71c1 439 tlb_flush(cpu, 1);
5b6dd868
BS
440
441 return 0;
a513fe19 442}
7501267e 443
6c3bff0e
PD
444static int cpu_common_pre_load(void *opaque)
445{
446 CPUState *cpu = opaque;
447
adee6424 448 cpu->exception_index = -1;
6c3bff0e
PD
449
450 return 0;
451}
452
453static bool cpu_common_exception_index_needed(void *opaque)
454{
455 CPUState *cpu = opaque;
456
adee6424 457 return tcg_enabled() && cpu->exception_index != -1;
6c3bff0e
PD
458}
459
460static const VMStateDescription vmstate_cpu_common_exception_index = {
461 .name = "cpu_common/exception_index",
462 .version_id = 1,
463 .minimum_version_id = 1,
464 .fields = (VMStateField[]) {
465 VMSTATE_INT32(exception_index, CPUState),
466 VMSTATE_END_OF_LIST()
467 }
468};
469
1a1562f5 470const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
471 .name = "cpu_common",
472 .version_id = 1,
473 .minimum_version_id = 1,
6c3bff0e 474 .pre_load = cpu_common_pre_load,
5b6dd868 475 .post_load = cpu_common_post_load,
35d08458 476 .fields = (VMStateField[]) {
259186a7
AF
477 VMSTATE_UINT32(halted, CPUState),
478 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 479 VMSTATE_END_OF_LIST()
6c3bff0e
PD
480 },
481 .subsections = (VMStateSubsection[]) {
482 {
483 .vmsd = &vmstate_cpu_common_exception_index,
484 .needed = cpu_common_exception_index_needed,
485 } , {
486 /* empty */
487 }
5b6dd868
BS
488 }
489};
1a1562f5 490
5b6dd868 491#endif
ea041c0e 492
38d8f5c8 493CPUState *qemu_get_cpu(int index)
ea041c0e 494{
bdc44640 495 CPUState *cpu;
ea041c0e 496
bdc44640 497 CPU_FOREACH(cpu) {
55e5c285 498 if (cpu->cpu_index == index) {
bdc44640 499 return cpu;
55e5c285 500 }
ea041c0e 501 }
5b6dd868 502
bdc44640 503 return NULL;
ea041c0e
FB
504}
505
09daed84
EI
506#if !defined(CONFIG_USER_ONLY)
507void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
508{
509 /* We only support one address space per cpu at the moment. */
510 assert(cpu->as == as);
511
512 if (cpu->tcg_as_listener) {
513 memory_listener_unregister(cpu->tcg_as_listener);
514 } else {
515 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
516 }
517 cpu->tcg_as_listener->commit = tcg_commit;
518 memory_listener_register(cpu->tcg_as_listener, as);
519}
520#endif
521
5b6dd868 522void cpu_exec_init(CPUArchState *env)
ea041c0e 523{
5b6dd868 524 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 525 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 526 CPUState *some_cpu;
5b6dd868
BS
527 int cpu_index;
528
529#if defined(CONFIG_USER_ONLY)
530 cpu_list_lock();
531#endif
5b6dd868 532 cpu_index = 0;
bdc44640 533 CPU_FOREACH(some_cpu) {
5b6dd868
BS
534 cpu_index++;
535 }
55e5c285 536 cpu->cpu_index = cpu_index;
1b1ed8dc 537 cpu->numa_node = 0;
f0c3c505 538 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 539 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 540#ifndef CONFIG_USER_ONLY
09daed84 541 cpu->as = &address_space_memory;
5b6dd868 542 cpu->thread_id = qemu_get_thread_id();
cba70549 543 cpu_reload_memory_map(cpu);
5b6dd868 544#endif
bdc44640 545 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
546#if defined(CONFIG_USER_ONLY)
547 cpu_list_unlock();
548#endif
e0d47944
AF
549 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
550 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
551 }
5b6dd868 552#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
553 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
554 cpu_save, cpu_load, env);
b170fce3 555 assert(cc->vmsd == NULL);
e0d47944 556 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 557#endif
b170fce3
AF
558 if (cc->vmsd != NULL) {
559 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
560 }
ea041c0e
FB
561}
562
94df27fd 563#if defined(CONFIG_USER_ONLY)
00b941e5 564static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
565{
566 tb_invalidate_phys_page_range(pc, pc + 1, 0);
567}
568#else
00b941e5 569static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 570{
e8262a1b
MF
571 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
572 if (phys != -1) {
09daed84 573 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 574 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 575 }
1e7855a5 576}
c27004ec 577#endif
d720b93d 578
c527ee8f 579#if defined(CONFIG_USER_ONLY)
75a34036 580void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
581
582{
583}
584
3ee887e8
PM
585int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
586 int flags)
587{
588 return -ENOSYS;
589}
590
591void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
592{
593}
594
75a34036 595int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
596 int flags, CPUWatchpoint **watchpoint)
597{
598 return -ENOSYS;
599}
600#else
6658ffb8 601/* Add a watchpoint. */
75a34036 602int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 603 int flags, CPUWatchpoint **watchpoint)
6658ffb8 604{
c0ce998e 605 CPUWatchpoint *wp;
6658ffb8 606
05068c0d 607 /* forbid ranges which are empty or run off the end of the address space */
07e2863d 608 if (len == 0 || (addr + len - 1) < addr) {
75a34036
AF
609 error_report("tried to set invalid watchpoint at %"
610 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
611 return -EINVAL;
612 }
7267c094 613 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
614
615 wp->vaddr = addr;
05068c0d 616 wp->len = len;
a1d1bb31
AL
617 wp->flags = flags;
618
2dc9f411 619 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
620 if (flags & BP_GDB) {
621 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
622 } else {
623 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
624 }
6658ffb8 625
31b030d4 626 tlb_flush_page(cpu, addr);
a1d1bb31
AL
627
628 if (watchpoint)
629 *watchpoint = wp;
630 return 0;
6658ffb8
PB
631}
632
a1d1bb31 633/* Remove a specific watchpoint. */
75a34036 634int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 635 int flags)
6658ffb8 636{
a1d1bb31 637 CPUWatchpoint *wp;
6658ffb8 638
ff4700b0 639 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 640 if (addr == wp->vaddr && len == wp->len
6e140f28 641 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 642 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
643 return 0;
644 }
645 }
a1d1bb31 646 return -ENOENT;
6658ffb8
PB
647}
648
a1d1bb31 649/* Remove a specific watchpoint by reference. */
75a34036 650void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 651{
ff4700b0 652 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 653
31b030d4 654 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 655
7267c094 656 g_free(watchpoint);
a1d1bb31
AL
657}
658
659/* Remove all matching watchpoints. */
75a34036 660void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 661{
c0ce998e 662 CPUWatchpoint *wp, *next;
a1d1bb31 663
ff4700b0 664 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
665 if (wp->flags & mask) {
666 cpu_watchpoint_remove_by_ref(cpu, wp);
667 }
c0ce998e 668 }
7d03f82f 669}
05068c0d
PM
670
671/* Return true if this watchpoint address matches the specified
672 * access (ie the address range covered by the watchpoint overlaps
673 * partially or completely with the address range covered by the
674 * access).
675 */
676static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
677 vaddr addr,
678 vaddr len)
679{
680 /* We know the lengths are non-zero, but a little caution is
681 * required to avoid errors in the case where the range ends
682 * exactly at the top of the address space and so addr + len
683 * wraps round to zero.
684 */
685 vaddr wpend = wp->vaddr + wp->len - 1;
686 vaddr addrend = addr + len - 1;
687
688 return !(addr > wpend || wp->vaddr > addrend);
689}
690
c527ee8f 691#endif
7d03f82f 692
a1d1bb31 693/* Add a breakpoint. */
b3310ab3 694int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 695 CPUBreakpoint **breakpoint)
4c3a88a2 696{
c0ce998e 697 CPUBreakpoint *bp;
3b46e624 698
7267c094 699 bp = g_malloc(sizeof(*bp));
4c3a88a2 700
a1d1bb31
AL
701 bp->pc = pc;
702 bp->flags = flags;
703
2dc9f411 704 /* keep all GDB-injected breakpoints in front */
00b941e5 705 if (flags & BP_GDB) {
f0c3c505 706 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 707 } else {
f0c3c505 708 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 709 }
3b46e624 710
f0c3c505 711 breakpoint_invalidate(cpu, pc);
a1d1bb31 712
00b941e5 713 if (breakpoint) {
a1d1bb31 714 *breakpoint = bp;
00b941e5 715 }
4c3a88a2 716 return 0;
4c3a88a2
FB
717}
718
a1d1bb31 719/* Remove a specific breakpoint. */
b3310ab3 720int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 721{
a1d1bb31
AL
722 CPUBreakpoint *bp;
723
f0c3c505 724 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 725 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 726 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
727 return 0;
728 }
7d03f82f 729 }
a1d1bb31 730 return -ENOENT;
7d03f82f
EI
731}
732
a1d1bb31 733/* Remove a specific breakpoint by reference. */
b3310ab3 734void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 735{
f0c3c505
AF
736 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
737
738 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 739
7267c094 740 g_free(breakpoint);
a1d1bb31
AL
741}
742
743/* Remove all matching breakpoints. */
b3310ab3 744void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 745{
c0ce998e 746 CPUBreakpoint *bp, *next;
a1d1bb31 747
f0c3c505 748 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
749 if (bp->flags & mask) {
750 cpu_breakpoint_remove_by_ref(cpu, bp);
751 }
c0ce998e 752 }
4c3a88a2
FB
753}
754
c33a346e
FB
755/* enable or disable single step mode. EXCP_DEBUG is returned by the
756 CPU loop after each instruction */
3825b28f 757void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 758{
ed2803da
AF
759 if (cpu->singlestep_enabled != enabled) {
760 cpu->singlestep_enabled = enabled;
761 if (kvm_enabled()) {
38e478ec 762 kvm_update_guest_debug(cpu, 0);
ed2803da 763 } else {
ccbb4d44 764 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 765 /* XXX: only flush what is necessary */
38e478ec 766 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
767 tb_flush(env);
768 }
c33a346e 769 }
c33a346e
FB
770}
771
a47dddd7 772void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
773{
774 va_list ap;
493ae1f0 775 va_list ap2;
7501267e
FB
776
777 va_start(ap, fmt);
493ae1f0 778 va_copy(ap2, ap);
7501267e
FB
779 fprintf(stderr, "qemu: fatal: ");
780 vfprintf(stderr, fmt, ap);
781 fprintf(stderr, "\n");
878096ee 782 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
783 if (qemu_log_enabled()) {
784 qemu_log("qemu: fatal: ");
785 qemu_log_vprintf(fmt, ap2);
786 qemu_log("\n");
a0762859 787 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 788 qemu_log_flush();
93fcfe39 789 qemu_log_close();
924edcae 790 }
493ae1f0 791 va_end(ap2);
f9373291 792 va_end(ap);
fd052bf6
RV
793#if defined(CONFIG_USER_ONLY)
794 {
795 struct sigaction act;
796 sigfillset(&act.sa_mask);
797 act.sa_handler = SIG_DFL;
798 sigaction(SIGABRT, &act, NULL);
799 }
800#endif
7501267e
FB
801 abort();
802}
803
0124311e 804#if !defined(CONFIG_USER_ONLY)
0dc3f44a 805/* Called from RCU critical section */
041603fe
PB
806static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
807{
808 RAMBlock *block;
809
43771539 810 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 811 if (block && addr - block->offset < block->max_length) {
041603fe
PB
812 goto found;
813 }
0dc3f44a 814 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5 815 if (addr - block->offset < block->max_length) {
041603fe
PB
816 goto found;
817 }
818 }
819
820 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
821 abort();
822
823found:
43771539
PB
824 /* It is safe to write mru_block outside the iothread lock. This
825 * is what happens:
826 *
827 * mru_block = xxx
828 * rcu_read_unlock()
829 * xxx removed from list
830 * rcu_read_lock()
831 * read mru_block
832 * mru_block = NULL;
833 * call_rcu(reclaim_ramblock, xxx);
834 * rcu_read_unlock()
835 *
836 * atomic_rcu_set is not needed here. The block was already published
837 * when it was placed into the list. Here we're just making an extra
838 * copy of the pointer.
839 */
041603fe
PB
840 ram_list.mru_block = block;
841 return block;
842}
843
a2f4d5be 844static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 845{
041603fe 846 ram_addr_t start1;
a2f4d5be
JQ
847 RAMBlock *block;
848 ram_addr_t end;
849
850 end = TARGET_PAGE_ALIGN(start + length);
851 start &= TARGET_PAGE_MASK;
d24981d3 852
0dc3f44a 853 rcu_read_lock();
041603fe
PB
854 block = qemu_get_ram_block(start);
855 assert(block == qemu_get_ram_block(end - 1));
1240be24 856 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
041603fe 857 cpu_tlb_reset_dirty_all(start1, length);
0dc3f44a 858 rcu_read_unlock();
d24981d3
JQ
859}
860
5579c7f3 861/* Note: start and end must be within the same ram block. */
a2f4d5be 862void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
52159192 863 unsigned client)
1ccde1cb 864{
1ccde1cb
FB
865 if (length == 0)
866 return;
c8d6f66a 867 cpu_physical_memory_clear_dirty_range_type(start, length, client);
f23db169 868
d24981d3 869 if (tcg_enabled()) {
a2f4d5be 870 tlb_reset_dirty_range_all(start, length);
5579c7f3 871 }
1ccde1cb
FB
872}
873
981fdf23 874static void cpu_physical_memory_set_dirty_tracking(bool enable)
74576198
AL
875{
876 in_migration = enable;
74576198
AL
877}
878
79e2b9ae 879/* Called from RCU critical section */
bb0e627a 880hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
881 MemoryRegionSection *section,
882 target_ulong vaddr,
883 hwaddr paddr, hwaddr xlat,
884 int prot,
885 target_ulong *address)
e5548617 886{
a8170e5e 887 hwaddr iotlb;
e5548617
BS
888 CPUWatchpoint *wp;
889
cc5bea60 890 if (memory_region_is_ram(section->mr)) {
e5548617
BS
891 /* Normal RAM. */
892 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 893 + xlat;
e5548617 894 if (!section->readonly) {
b41aac4f 895 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 896 } else {
b41aac4f 897 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
898 }
899 } else {
1b3fb98f 900 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 901 iotlb += xlat;
e5548617
BS
902 }
903
904 /* Make accesses to pages with watchpoints go via the
905 watchpoint trap routines. */
ff4700b0 906 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 907 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
e5548617
BS
908 /* Avoid trapping reads of pages with a write breakpoint. */
909 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 910 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
911 *address |= TLB_MMIO;
912 break;
913 }
914 }
915 }
916
917 return iotlb;
918}
9fa3e853
FB
919#endif /* defined(CONFIG_USER_ONLY) */
920
e2eef170 921#if !defined(CONFIG_USER_ONLY)
8da3ff18 922
c227f099 923static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 924 uint16_t section);
acc9d80b 925static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 926
a2b257d6
IM
927static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
928 qemu_anon_ram_alloc;
91138037
MA
929
930/*
931 * Set a custom physical guest memory alloator.
932 * Accelerators with unusual needs may need this. Hopefully, we can
933 * get rid of it eventually.
934 */
a2b257d6 935void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
91138037
MA
936{
937 phys_mem_alloc = alloc;
938}
939
53cb28cb
MA
940static uint16_t phys_section_add(PhysPageMap *map,
941 MemoryRegionSection *section)
5312bd8b 942{
68f3f65b
PB
943 /* The physical section number is ORed with a page-aligned
944 * pointer to produce the iotlb entries. Thus it should
945 * never overflow into the page-aligned value.
946 */
53cb28cb 947 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 948
53cb28cb
MA
949 if (map->sections_nb == map->sections_nb_alloc) {
950 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
951 map->sections = g_renew(MemoryRegionSection, map->sections,
952 map->sections_nb_alloc);
5312bd8b 953 }
53cb28cb 954 map->sections[map->sections_nb] = *section;
dfde4e6e 955 memory_region_ref(section->mr);
53cb28cb 956 return map->sections_nb++;
5312bd8b
AK
957}
958
058bc4b5
PB
959static void phys_section_destroy(MemoryRegion *mr)
960{
dfde4e6e
PB
961 memory_region_unref(mr);
962
058bc4b5
PB
963 if (mr->subpage) {
964 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 965 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
966 g_free(subpage);
967 }
968}
969
6092666e 970static void phys_sections_free(PhysPageMap *map)
5312bd8b 971{
9affd6fc
PB
972 while (map->sections_nb > 0) {
973 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
974 phys_section_destroy(section->mr);
975 }
9affd6fc
PB
976 g_free(map->sections);
977 g_free(map->nodes);
5312bd8b
AK
978}
979
ac1970fb 980static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
981{
982 subpage_t *subpage;
a8170e5e 983 hwaddr base = section->offset_within_address_space
0f0cb164 984 & TARGET_PAGE_MASK;
97115a8d 985 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 986 d->map.nodes, d->map.sections);
0f0cb164
AK
987 MemoryRegionSection subsection = {
988 .offset_within_address_space = base,
052e87b0 989 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 990 };
a8170e5e 991 hwaddr start, end;
0f0cb164 992
f3705d53 993 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 994
f3705d53 995 if (!(existing->mr->subpage)) {
acc9d80b 996 subpage = subpage_init(d->as, base);
3be91e86 997 subsection.address_space = d->as;
0f0cb164 998 subsection.mr = &subpage->iomem;
ac1970fb 999 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 1000 phys_section_add(&d->map, &subsection));
0f0cb164 1001 } else {
f3705d53 1002 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
1003 }
1004 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 1005 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
1006 subpage_register(subpage, start, end,
1007 phys_section_add(&d->map, section));
0f0cb164
AK
1008}
1009
1010
052e87b0
PB
1011static void register_multipage(AddressSpaceDispatch *d,
1012 MemoryRegionSection *section)
33417e70 1013{
a8170e5e 1014 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 1015 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
1016 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1017 TARGET_PAGE_BITS));
dd81124b 1018
733d5ef5
PB
1019 assert(num_pages);
1020 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
1021}
1022
ac1970fb 1023static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 1024{
89ae337a 1025 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 1026 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 1027 MemoryRegionSection now = *section, remain = *section;
052e87b0 1028 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 1029
733d5ef5
PB
1030 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1031 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1032 - now.offset_within_address_space;
1033
052e87b0 1034 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 1035 register_subpage(d, &now);
733d5ef5 1036 } else {
052e87b0 1037 now.size = int128_zero();
733d5ef5 1038 }
052e87b0
PB
1039 while (int128_ne(remain.size, now.size)) {
1040 remain.size = int128_sub(remain.size, now.size);
1041 remain.offset_within_address_space += int128_get64(now.size);
1042 remain.offset_within_region += int128_get64(now.size);
69b67646 1043 now = remain;
052e87b0 1044 if (int128_lt(remain.size, page_size)) {
733d5ef5 1045 register_subpage(d, &now);
88266249 1046 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1047 now.size = page_size;
ac1970fb 1048 register_subpage(d, &now);
69b67646 1049 } else {
052e87b0 1050 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 1051 register_multipage(d, &now);
69b67646 1052 }
0f0cb164
AK
1053 }
1054}
1055
62a2744c
SY
1056void qemu_flush_coalesced_mmio_buffer(void)
1057{
1058 if (kvm_enabled())
1059 kvm_flush_coalesced_mmio_buffer();
1060}
1061
b2a8658e
UD
1062void qemu_mutex_lock_ramlist(void)
1063{
1064 qemu_mutex_lock(&ram_list.mutex);
1065}
1066
1067void qemu_mutex_unlock_ramlist(void)
1068{
1069 qemu_mutex_unlock(&ram_list.mutex);
1070}
1071
e1e84ba0 1072#ifdef __linux__
c902760f
MT
1073
1074#include <sys/vfs.h>
1075
1076#define HUGETLBFS_MAGIC 0x958458f6
1077
fc7a5800 1078static long gethugepagesize(const char *path, Error **errp)
c902760f
MT
1079{
1080 struct statfs fs;
1081 int ret;
1082
1083 do {
9742bf26 1084 ret = statfs(path, &fs);
c902760f
MT
1085 } while (ret != 0 && errno == EINTR);
1086
1087 if (ret != 0) {
fc7a5800
HT
1088 error_setg_errno(errp, errno, "failed to get page size of file %s",
1089 path);
9742bf26 1090 return 0;
c902760f
MT
1091 }
1092
1093 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1094 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1095
1096 return fs.f_bsize;
1097}
1098
04b16653
AW
1099static void *file_ram_alloc(RAMBlock *block,
1100 ram_addr_t memory,
7f56e740
PB
1101 const char *path,
1102 Error **errp)
c902760f
MT
1103{
1104 char *filename;
8ca761f6
PF
1105 char *sanitized_name;
1106 char *c;
557529dd 1107 void *area = NULL;
c902760f 1108 int fd;
557529dd 1109 uint64_t hpagesize;
fc7a5800 1110 Error *local_err = NULL;
c902760f 1111
fc7a5800
HT
1112 hpagesize = gethugepagesize(path, &local_err);
1113 if (local_err) {
1114 error_propagate(errp, local_err);
f9a49dfa 1115 goto error;
c902760f 1116 }
a2b257d6 1117 block->mr->align = hpagesize;
c902760f
MT
1118
1119 if (memory < hpagesize) {
557529dd
HT
1120 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1121 "or larger than huge page size 0x%" PRIx64,
1122 memory, hpagesize);
1123 goto error;
c902760f
MT
1124 }
1125
1126 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1127 error_setg(errp,
1128 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1129 goto error;
c902760f
MT
1130 }
1131
8ca761f6 1132 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
83234bf2 1133 sanitized_name = g_strdup(memory_region_name(block->mr));
8ca761f6
PF
1134 for (c = sanitized_name; *c != '\0'; c++) {
1135 if (*c == '/')
1136 *c = '_';
1137 }
1138
1139 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1140 sanitized_name);
1141 g_free(sanitized_name);
c902760f
MT
1142
1143 fd = mkstemp(filename);
1144 if (fd < 0) {
7f56e740
PB
1145 error_setg_errno(errp, errno,
1146 "unable to create backing store for hugepages");
e4ada482 1147 g_free(filename);
f9a49dfa 1148 goto error;
c902760f
MT
1149 }
1150 unlink(filename);
e4ada482 1151 g_free(filename);
c902760f
MT
1152
1153 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1154
1155 /*
1156 * ftruncate is not supported by hugetlbfs in older
1157 * hosts, so don't bother bailing out on errors.
1158 * If anything goes wrong with it under other filesystems,
1159 * mmap will fail.
1160 */
7f56e740 1161 if (ftruncate(fd, memory)) {
9742bf26 1162 perror("ftruncate");
7f56e740 1163 }
c902760f 1164
dbcb8981
PB
1165 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1166 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1167 fd, 0);
c902760f 1168 if (area == MAP_FAILED) {
7f56e740
PB
1169 error_setg_errno(errp, errno,
1170 "unable to map backing store for hugepages");
9742bf26 1171 close(fd);
f9a49dfa 1172 goto error;
c902760f 1173 }
ef36fa14
MT
1174
1175 if (mem_prealloc) {
38183310 1176 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1177 }
1178
04b16653 1179 block->fd = fd;
c902760f 1180 return area;
f9a49dfa
MT
1181
1182error:
1183 if (mem_prealloc) {
81b07353 1184 error_report("%s", error_get_pretty(*errp));
f9a49dfa
MT
1185 exit(1);
1186 }
1187 return NULL;
c902760f
MT
1188}
1189#endif
1190
0dc3f44a 1191/* Called with the ramlist lock held. */
d17b5288 1192static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1193{
1194 RAMBlock *block, *next_block;
3e837b2c 1195 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1196
49cd9ac6
SH
1197 assert(size != 0); /* it would hand out same offset multiple times */
1198
0dc3f44a 1199 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
04b16653 1200 return 0;
0d53d9fe 1201 }
04b16653 1202
0dc3f44a 1203 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
f15fbc4b 1204 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653 1205
62be4e3a 1206 end = block->offset + block->max_length;
04b16653 1207
0dc3f44a 1208 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
04b16653
AW
1209 if (next_block->offset >= end) {
1210 next = MIN(next, next_block->offset);
1211 }
1212 }
1213 if (next - end >= size && next - end < mingap) {
3e837b2c 1214 offset = end;
04b16653
AW
1215 mingap = next - end;
1216 }
1217 }
3e837b2c
AW
1218
1219 if (offset == RAM_ADDR_MAX) {
1220 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1221 (uint64_t)size);
1222 abort();
1223 }
1224
04b16653
AW
1225 return offset;
1226}
1227
652d7ec2 1228ram_addr_t last_ram_offset(void)
d17b5288
AW
1229{
1230 RAMBlock *block;
1231 ram_addr_t last = 0;
1232
0dc3f44a
MD
1233 rcu_read_lock();
1234 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
62be4e3a 1235 last = MAX(last, block->offset + block->max_length);
0d53d9fe 1236 }
0dc3f44a 1237 rcu_read_unlock();
d17b5288
AW
1238 return last;
1239}
1240
ddb97f1d
JB
1241static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1242{
1243 int ret;
ddb97f1d
JB
1244
1245 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
47c8ca53 1246 if (!machine_dump_guest_core(current_machine)) {
ddb97f1d
JB
1247 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1248 if (ret) {
1249 perror("qemu_madvise");
1250 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1251 "but dump_guest_core=off specified\n");
1252 }
1253 }
1254}
1255
0dc3f44a
MD
1256/* Called within an RCU critical section, or while the ramlist lock
1257 * is held.
1258 */
20cfe881 1259static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1260{
20cfe881 1261 RAMBlock *block;
84b89d78 1262
0dc3f44a 1263 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1264 if (block->offset == addr) {
20cfe881 1265 return block;
c5705a77
AK
1266 }
1267 }
20cfe881
HT
1268
1269 return NULL;
1270}
1271
ae3a7047 1272/* Called with iothread lock held. */
20cfe881
HT
1273void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1274{
ae3a7047 1275 RAMBlock *new_block, *block;
20cfe881 1276
0dc3f44a 1277 rcu_read_lock();
ae3a7047 1278 new_block = find_ram_block(addr);
c5705a77
AK
1279 assert(new_block);
1280 assert(!new_block->idstr[0]);
84b89d78 1281
09e5ab63
AL
1282 if (dev) {
1283 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1284 if (id) {
1285 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1286 g_free(id);
84b89d78
CM
1287 }
1288 }
1289 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1290
0dc3f44a 1291 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1292 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1293 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1294 new_block->idstr);
1295 abort();
1296 }
1297 }
0dc3f44a 1298 rcu_read_unlock();
c5705a77
AK
1299}
1300
ae3a7047 1301/* Called with iothread lock held. */
20cfe881
HT
1302void qemu_ram_unset_idstr(ram_addr_t addr)
1303{
ae3a7047 1304 RAMBlock *block;
20cfe881 1305
ae3a7047
MD
1306 /* FIXME: arch_init.c assumes that this is not called throughout
1307 * migration. Ignore the problem since hot-unplug during migration
1308 * does not work anyway.
1309 */
1310
0dc3f44a 1311 rcu_read_lock();
ae3a7047 1312 block = find_ram_block(addr);
20cfe881
HT
1313 if (block) {
1314 memset(block->idstr, 0, sizeof(block->idstr));
1315 }
0dc3f44a 1316 rcu_read_unlock();
20cfe881
HT
1317}
1318
8490fc78
LC
1319static int memory_try_enable_merging(void *addr, size_t len)
1320{
75cc7f01 1321 if (!machine_mem_merge(current_machine)) {
8490fc78
LC
1322 /* disabled by the user */
1323 return 0;
1324 }
1325
1326 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1327}
1328
62be4e3a
MT
1329/* Only legal before guest might have detected the memory size: e.g. on
1330 * incoming migration, or right after reset.
1331 *
1332 * As memory core doesn't know how is memory accessed, it is up to
1333 * resize callback to update device state and/or add assertions to detect
1334 * misuse, if necessary.
1335 */
1336int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1337{
1338 RAMBlock *block = find_ram_block(base);
1339
1340 assert(block);
1341
129ddaf3
MT
1342 newsize = TARGET_PAGE_ALIGN(newsize);
1343
62be4e3a
MT
1344 if (block->used_length == newsize) {
1345 return 0;
1346 }
1347
1348 if (!(block->flags & RAM_RESIZEABLE)) {
1349 error_setg_errno(errp, EINVAL,
1350 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1351 " in != 0x" RAM_ADDR_FMT, block->idstr,
1352 newsize, block->used_length);
1353 return -EINVAL;
1354 }
1355
1356 if (block->max_length < newsize) {
1357 error_setg_errno(errp, EINVAL,
1358 "Length too large: %s: 0x" RAM_ADDR_FMT
1359 " > 0x" RAM_ADDR_FMT, block->idstr,
1360 newsize, block->max_length);
1361 return -EINVAL;
1362 }
1363
1364 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1365 block->used_length = newsize;
1366 cpu_physical_memory_set_dirty_range(block->offset, block->used_length);
1367 memory_region_set_size(block->mr, newsize);
1368 if (block->resized) {
1369 block->resized(block->idstr, newsize, block->host);
1370 }
1371 return 0;
1372}
1373
ef701d7b 1374static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
c5705a77 1375{
e1c57ab8 1376 RAMBlock *block;
0d53d9fe 1377 RAMBlock *last_block = NULL;
2152f5ca
JQ
1378 ram_addr_t old_ram_size, new_ram_size;
1379
1380 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1381
b2a8658e 1382 qemu_mutex_lock_ramlist();
9b8424d5 1383 new_block->offset = find_ram_offset(new_block->max_length);
e1c57ab8
PB
1384
1385 if (!new_block->host) {
1386 if (xen_enabled()) {
9b8424d5
MT
1387 xen_ram_alloc(new_block->offset, new_block->max_length,
1388 new_block->mr);
e1c57ab8 1389 } else {
9b8424d5 1390 new_block->host = phys_mem_alloc(new_block->max_length,
a2b257d6 1391 &new_block->mr->align);
39228250 1392 if (!new_block->host) {
ef701d7b
HT
1393 error_setg_errno(errp, errno,
1394 "cannot set up guest memory '%s'",
1395 memory_region_name(new_block->mr));
1396 qemu_mutex_unlock_ramlist();
1397 return -1;
39228250 1398 }
9b8424d5 1399 memory_try_enable_merging(new_block->host, new_block->max_length);
6977dfe6 1400 }
c902760f 1401 }
94a6b54f 1402
0d53d9fe
MD
1403 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1404 * QLIST (which has an RCU-friendly variant) does not have insertion at
1405 * tail, so save the last element in last_block.
1406 */
0dc3f44a 1407 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
0d53d9fe 1408 last_block = block;
9b8424d5 1409 if (block->max_length < new_block->max_length) {
abb26d63
PB
1410 break;
1411 }
1412 }
1413 if (block) {
0dc3f44a 1414 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
0d53d9fe 1415 } else if (last_block) {
0dc3f44a 1416 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
0d53d9fe 1417 } else { /* list is empty */
0dc3f44a 1418 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
abb26d63 1419 }
0d6d3c87 1420 ram_list.mru_block = NULL;
94a6b54f 1421
0dc3f44a
MD
1422 /* Write list before version */
1423 smp_wmb();
f798b07f 1424 ram_list.version++;
b2a8658e 1425 qemu_mutex_unlock_ramlist();
f798b07f 1426
2152f5ca
JQ
1427 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1428
1429 if (new_ram_size > old_ram_size) {
1ab4c8ce 1430 int i;
ae3a7047
MD
1431
1432 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1ab4c8ce
JQ
1433 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1434 ram_list.dirty_memory[i] =
1435 bitmap_zero_extend(ram_list.dirty_memory[i],
1436 old_ram_size, new_ram_size);
1437 }
2152f5ca 1438 }
9b8424d5
MT
1439 cpu_physical_memory_set_dirty_range(new_block->offset,
1440 new_block->used_length);
94a6b54f 1441
a904c911
PB
1442 if (new_block->host) {
1443 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1444 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1445 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1446 if (kvm_enabled()) {
1447 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1448 }
e1c57ab8 1449 }
6f0437e8 1450
94a6b54f
PB
1451 return new_block->offset;
1452}
e9a1ab19 1453
0b183fc8 1454#ifdef __linux__
e1c57ab8 1455ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1456 bool share, const char *mem_path,
7f56e740 1457 Error **errp)
e1c57ab8
PB
1458{
1459 RAMBlock *new_block;
ef701d7b
HT
1460 ram_addr_t addr;
1461 Error *local_err = NULL;
e1c57ab8
PB
1462
1463 if (xen_enabled()) {
7f56e740
PB
1464 error_setg(errp, "-mem-path not supported with Xen");
1465 return -1;
e1c57ab8
PB
1466 }
1467
1468 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1469 /*
1470 * file_ram_alloc() needs to allocate just like
1471 * phys_mem_alloc, but we haven't bothered to provide
1472 * a hook there.
1473 */
7f56e740
PB
1474 error_setg(errp,
1475 "-mem-path not supported with this accelerator");
1476 return -1;
e1c57ab8
PB
1477 }
1478
1479 size = TARGET_PAGE_ALIGN(size);
1480 new_block = g_malloc0(sizeof(*new_block));
1481 new_block->mr = mr;
9b8424d5
MT
1482 new_block->used_length = size;
1483 new_block->max_length = size;
dbcb8981 1484 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1485 new_block->host = file_ram_alloc(new_block, size,
1486 mem_path, errp);
1487 if (!new_block->host) {
1488 g_free(new_block);
1489 return -1;
1490 }
1491
ef701d7b
HT
1492 addr = ram_block_add(new_block, &local_err);
1493 if (local_err) {
1494 g_free(new_block);
1495 error_propagate(errp, local_err);
1496 return -1;
1497 }
1498 return addr;
e1c57ab8 1499}
0b183fc8 1500#endif
e1c57ab8 1501
62be4e3a
MT
1502static
1503ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1504 void (*resized)(const char*,
1505 uint64_t length,
1506 void *host),
1507 void *host, bool resizeable,
ef701d7b 1508 MemoryRegion *mr, Error **errp)
e1c57ab8
PB
1509{
1510 RAMBlock *new_block;
ef701d7b
HT
1511 ram_addr_t addr;
1512 Error *local_err = NULL;
e1c57ab8
PB
1513
1514 size = TARGET_PAGE_ALIGN(size);
62be4e3a 1515 max_size = TARGET_PAGE_ALIGN(max_size);
e1c57ab8
PB
1516 new_block = g_malloc0(sizeof(*new_block));
1517 new_block->mr = mr;
62be4e3a 1518 new_block->resized = resized;
9b8424d5
MT
1519 new_block->used_length = size;
1520 new_block->max_length = max_size;
62be4e3a 1521 assert(max_size >= size);
e1c57ab8
PB
1522 new_block->fd = -1;
1523 new_block->host = host;
1524 if (host) {
7bd4f430 1525 new_block->flags |= RAM_PREALLOC;
e1c57ab8 1526 }
62be4e3a
MT
1527 if (resizeable) {
1528 new_block->flags |= RAM_RESIZEABLE;
1529 }
ef701d7b
HT
1530 addr = ram_block_add(new_block, &local_err);
1531 if (local_err) {
1532 g_free(new_block);
1533 error_propagate(errp, local_err);
1534 return -1;
1535 }
1536 return addr;
e1c57ab8
PB
1537}
1538
62be4e3a
MT
1539ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1540 MemoryRegion *mr, Error **errp)
1541{
1542 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1543}
1544
ef701d7b 1545ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
6977dfe6 1546{
62be4e3a
MT
1547 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1548}
1549
1550ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1551 void (*resized)(const char*,
1552 uint64_t length,
1553 void *host),
1554 MemoryRegion *mr, Error **errp)
1555{
1556 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
6977dfe6
YT
1557}
1558
1f2e98b6
AW
1559void qemu_ram_free_from_ptr(ram_addr_t addr)
1560{
1561 RAMBlock *block;
1562
b2a8658e 1563 qemu_mutex_lock_ramlist();
0dc3f44a 1564 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1f2e98b6 1565 if (addr == block->offset) {
0dc3f44a 1566 QLIST_REMOVE_RCU(block, next);
0d6d3c87 1567 ram_list.mru_block = NULL;
0dc3f44a
MD
1568 /* Write list before version */
1569 smp_wmb();
f798b07f 1570 ram_list.version++;
43771539 1571 g_free_rcu(block, rcu);
b2a8658e 1572 break;
1f2e98b6
AW
1573 }
1574 }
b2a8658e 1575 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1576}
1577
43771539
PB
1578static void reclaim_ramblock(RAMBlock *block)
1579{
1580 if (block->flags & RAM_PREALLOC) {
1581 ;
1582 } else if (xen_enabled()) {
1583 xen_invalidate_map_cache_entry(block->host);
1584#ifndef _WIN32
1585 } else if (block->fd >= 0) {
1586 munmap(block->host, block->max_length);
1587 close(block->fd);
1588#endif
1589 } else {
1590 qemu_anon_ram_free(block->host, block->max_length);
1591 }
1592 g_free(block);
1593}
1594
c227f099 1595void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1596{
04b16653
AW
1597 RAMBlock *block;
1598
b2a8658e 1599 qemu_mutex_lock_ramlist();
0dc3f44a 1600 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
04b16653 1601 if (addr == block->offset) {
0dc3f44a 1602 QLIST_REMOVE_RCU(block, next);
0d6d3c87 1603 ram_list.mru_block = NULL;
0dc3f44a
MD
1604 /* Write list before version */
1605 smp_wmb();
f798b07f 1606 ram_list.version++;
43771539 1607 call_rcu(block, reclaim_ramblock, rcu);
b2a8658e 1608 break;
04b16653
AW
1609 }
1610 }
b2a8658e 1611 qemu_mutex_unlock_ramlist();
e9a1ab19
FB
1612}
1613
cd19cfa2
HY
1614#ifndef _WIN32
1615void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1616{
1617 RAMBlock *block;
1618 ram_addr_t offset;
1619 int flags;
1620 void *area, *vaddr;
1621
0dc3f44a 1622 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
cd19cfa2 1623 offset = addr - block->offset;
9b8424d5 1624 if (offset < block->max_length) {
1240be24 1625 vaddr = ramblock_ptr(block, offset);
7bd4f430 1626 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1627 ;
dfeaf2ab
MA
1628 } else if (xen_enabled()) {
1629 abort();
cd19cfa2
HY
1630 } else {
1631 flags = MAP_FIXED;
3435f395 1632 if (block->fd >= 0) {
dbcb8981
PB
1633 flags |= (block->flags & RAM_SHARED ?
1634 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1635 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1636 flags, block->fd, offset);
cd19cfa2 1637 } else {
2eb9fbaa
MA
1638 /*
1639 * Remap needs to match alloc. Accelerators that
1640 * set phys_mem_alloc never remap. If they did,
1641 * we'd need a remap hook here.
1642 */
1643 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1644
cd19cfa2
HY
1645 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1646 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1647 flags, -1, 0);
cd19cfa2
HY
1648 }
1649 if (area != vaddr) {
f15fbc4b
AP
1650 fprintf(stderr, "Could not remap addr: "
1651 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1652 length, addr);
1653 exit(1);
1654 }
8490fc78 1655 memory_try_enable_merging(vaddr, length);
ddb97f1d 1656 qemu_ram_setup_dump(vaddr, length);
cd19cfa2 1657 }
cd19cfa2
HY
1658 }
1659 }
1660}
1661#endif /* !_WIN32 */
1662
a35ba7be
PB
1663int qemu_get_ram_fd(ram_addr_t addr)
1664{
ae3a7047
MD
1665 RAMBlock *block;
1666 int fd;
a35ba7be 1667
0dc3f44a 1668 rcu_read_lock();
ae3a7047
MD
1669 block = qemu_get_ram_block(addr);
1670 fd = block->fd;
0dc3f44a 1671 rcu_read_unlock();
ae3a7047 1672 return fd;
a35ba7be
PB
1673}
1674
3fd74b84
DM
1675void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1676{
ae3a7047
MD
1677 RAMBlock *block;
1678 void *ptr;
3fd74b84 1679
0dc3f44a 1680 rcu_read_lock();
ae3a7047
MD
1681 block = qemu_get_ram_block(addr);
1682 ptr = ramblock_ptr(block, 0);
0dc3f44a 1683 rcu_read_unlock();
ae3a7047 1684 return ptr;
3fd74b84
DM
1685}
1686
1b5ec234 1687/* Return a host pointer to ram allocated with qemu_ram_alloc.
ae3a7047
MD
1688 * This should not be used for general purpose DMA. Use address_space_map
1689 * or address_space_rw instead. For local memory (e.g. video ram) that the
1690 * device owns, use memory_region_get_ram_ptr.
0dc3f44a
MD
1691 *
1692 * By the time this function returns, the returned pointer is not protected
1693 * by RCU anymore. If the caller is not within an RCU critical section and
1694 * does not hold the iothread lock, it must have other means of protecting the
1695 * pointer, such as a reference to the region that includes the incoming
1696 * ram_addr_t.
1b5ec234
PB
1697 */
1698void *qemu_get_ram_ptr(ram_addr_t addr)
1699{
ae3a7047
MD
1700 RAMBlock *block;
1701 void *ptr;
1b5ec234 1702
0dc3f44a 1703 rcu_read_lock();
ae3a7047
MD
1704 block = qemu_get_ram_block(addr);
1705
1706 if (xen_enabled() && block->host == NULL) {
0d6d3c87
PB
1707 /* We need to check if the requested address is in the RAM
1708 * because we don't want to map the entire memory in QEMU.
1709 * In that case just map until the end of the page.
1710 */
1711 if (block->offset == 0) {
ae3a7047 1712 ptr = xen_map_cache(addr, 0, 0);
0dc3f44a 1713 goto unlock;
0d6d3c87 1714 }
ae3a7047
MD
1715
1716 block->host = xen_map_cache(block->offset, block->max_length, 1);
0d6d3c87 1717 }
ae3a7047
MD
1718 ptr = ramblock_ptr(block, addr - block->offset);
1719
0dc3f44a
MD
1720unlock:
1721 rcu_read_unlock();
ae3a7047 1722 return ptr;
dc828ca1
PB
1723}
1724
38bee5dc 1725/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
ae3a7047 1726 * but takes a size argument.
0dc3f44a
MD
1727 *
1728 * By the time this function returns, the returned pointer is not protected
1729 * by RCU anymore. If the caller is not within an RCU critical section and
1730 * does not hold the iothread lock, it must have other means of protecting the
1731 * pointer, such as a reference to the region that includes the incoming
1732 * ram_addr_t.
ae3a7047 1733 */
cb85f7ab 1734static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1735{
ae3a7047 1736 void *ptr;
8ab934f9
SS
1737 if (*size == 0) {
1738 return NULL;
1739 }
868bb33f 1740 if (xen_enabled()) {
e41d7c69 1741 return xen_map_cache(addr, *size, 1);
868bb33f 1742 } else {
38bee5dc 1743 RAMBlock *block;
0dc3f44a
MD
1744 rcu_read_lock();
1745 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5
MT
1746 if (addr - block->offset < block->max_length) {
1747 if (addr - block->offset + *size > block->max_length)
1748 *size = block->max_length - addr + block->offset;
ae3a7047 1749 ptr = ramblock_ptr(block, addr - block->offset);
0dc3f44a 1750 rcu_read_unlock();
ae3a7047 1751 return ptr;
38bee5dc
SS
1752 }
1753 }
1754
1755 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1756 abort();
38bee5dc
SS
1757 }
1758}
1759
7443b437 1760/* Some of the softmmu routines need to translate from a host pointer
ae3a7047
MD
1761 * (typically a TLB entry) back to a ram offset.
1762 *
1763 * By the time this function returns, the returned pointer is not protected
1764 * by RCU anymore. If the caller is not within an RCU critical section and
1765 * does not hold the iothread lock, it must have other means of protecting the
1766 * pointer, such as a reference to the region that includes the incoming
1767 * ram_addr_t.
1768 */
1b5ec234 1769MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1770{
94a6b54f
PB
1771 RAMBlock *block;
1772 uint8_t *host = ptr;
ae3a7047 1773 MemoryRegion *mr;
94a6b54f 1774
868bb33f 1775 if (xen_enabled()) {
0dc3f44a 1776 rcu_read_lock();
e41d7c69 1777 *ram_addr = xen_ram_addr_from_mapcache(ptr);
ae3a7047 1778 mr = qemu_get_ram_block(*ram_addr)->mr;
0dc3f44a 1779 rcu_read_unlock();
ae3a7047 1780 return mr;
712c2b41
SS
1781 }
1782
0dc3f44a
MD
1783 rcu_read_lock();
1784 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 1785 if (block && block->host && host - block->host < block->max_length) {
23887b79
PB
1786 goto found;
1787 }
1788
0dc3f44a 1789 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
432d268c
JN
1790 /* This case append when the block is not mapped. */
1791 if (block->host == NULL) {
1792 continue;
1793 }
9b8424d5 1794 if (host - block->host < block->max_length) {
23887b79 1795 goto found;
f471a17e 1796 }
94a6b54f 1797 }
432d268c 1798
0dc3f44a 1799 rcu_read_unlock();
1b5ec234 1800 return NULL;
23887b79
PB
1801
1802found:
1803 *ram_addr = block->offset + (host - block->host);
ae3a7047 1804 mr = block->mr;
0dc3f44a 1805 rcu_read_unlock();
ae3a7047 1806 return mr;
e890261f 1807}
f471a17e 1808
a8170e5e 1809static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1810 uint64_t val, unsigned size)
9fa3e853 1811{
52159192 1812 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1813 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1814 }
0e0df1e2
AK
1815 switch (size) {
1816 case 1:
1817 stb_p(qemu_get_ram_ptr(ram_addr), val);
1818 break;
1819 case 2:
1820 stw_p(qemu_get_ram_ptr(ram_addr), val);
1821 break;
1822 case 4:
1823 stl_p(qemu_get_ram_ptr(ram_addr), val);
1824 break;
1825 default:
1826 abort();
3a7d929e 1827 }
6886867e 1828 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
f23db169
FB
1829 /* we remove the notdirty callback only if the code has been
1830 flushed */
a2cd8c85 1831 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1832 CPUArchState *env = current_cpu->env_ptr;
93afeade 1833 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1834 }
9fa3e853
FB
1835}
1836
b018ddf6
PB
1837static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1838 unsigned size, bool is_write)
1839{
1840 return is_write;
1841}
1842
0e0df1e2 1843static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1844 .write = notdirty_mem_write,
b018ddf6 1845 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1846 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1847};
1848
0f459d16 1849/* Generate a debug exception if a watchpoint has been hit. */
66b9b43c 1850static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
0f459d16 1851{
93afeade
AF
1852 CPUState *cpu = current_cpu;
1853 CPUArchState *env = cpu->env_ptr;
06d55cc1 1854 target_ulong pc, cs_base;
0f459d16 1855 target_ulong vaddr;
a1d1bb31 1856 CPUWatchpoint *wp;
06d55cc1 1857 int cpu_flags;
0f459d16 1858
ff4700b0 1859 if (cpu->watchpoint_hit) {
06d55cc1
AL
1860 /* We re-entered the check after replacing the TB. Now raise
1861 * the debug interrupt so that is will trigger after the
1862 * current instruction. */
93afeade 1863 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1864 return;
1865 }
93afeade 1866 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1867 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d
PM
1868 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1869 && (wp->flags & flags)) {
08225676
PM
1870 if (flags == BP_MEM_READ) {
1871 wp->flags |= BP_WATCHPOINT_HIT_READ;
1872 } else {
1873 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1874 }
1875 wp->hitaddr = vaddr;
66b9b43c 1876 wp->hitattrs = attrs;
ff4700b0
AF
1877 if (!cpu->watchpoint_hit) {
1878 cpu->watchpoint_hit = wp;
239c51a5 1879 tb_check_watchpoint(cpu);
6e140f28 1880 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1881 cpu->exception_index = EXCP_DEBUG;
5638d180 1882 cpu_loop_exit(cpu);
6e140f28
AL
1883 } else {
1884 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1885 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1886 cpu_resume_from_signal(cpu, NULL);
6e140f28 1887 }
06d55cc1 1888 }
6e140f28
AL
1889 } else {
1890 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1891 }
1892 }
1893}
1894
6658ffb8
PB
1895/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1896 so these check for a hit then pass through to the normal out-of-line
1897 phys routines. */
66b9b43c
PM
1898static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1899 unsigned size, MemTxAttrs attrs)
6658ffb8 1900{
66b9b43c
PM
1901 MemTxResult res;
1902 uint64_t data;
1903
1904 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
1ec9b909 1905 switch (size) {
66b9b43c
PM
1906 case 1:
1907 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
1908 break;
1909 case 2:
1910 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
1911 break;
1912 case 4:
1913 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
1914 break;
1ec9b909
AK
1915 default: abort();
1916 }
66b9b43c
PM
1917 *pdata = data;
1918 return res;
6658ffb8
PB
1919}
1920
66b9b43c
PM
1921static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
1922 uint64_t val, unsigned size,
1923 MemTxAttrs attrs)
6658ffb8 1924{
66b9b43c
PM
1925 MemTxResult res;
1926
1927 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1ec9b909 1928 switch (size) {
67364150 1929 case 1:
66b9b43c 1930 address_space_stb(&address_space_memory, addr, val, attrs, &res);
67364150
MF
1931 break;
1932 case 2:
66b9b43c 1933 address_space_stw(&address_space_memory, addr, val, attrs, &res);
67364150
MF
1934 break;
1935 case 4:
66b9b43c 1936 address_space_stl(&address_space_memory, addr, val, attrs, &res);
67364150 1937 break;
1ec9b909
AK
1938 default: abort();
1939 }
66b9b43c 1940 return res;
6658ffb8
PB
1941}
1942
1ec9b909 1943static const MemoryRegionOps watch_mem_ops = {
66b9b43c
PM
1944 .read_with_attrs = watch_mem_read,
1945 .write_with_attrs = watch_mem_write,
1ec9b909 1946 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1947};
6658ffb8 1948
f25a49e0
PM
1949static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
1950 unsigned len, MemTxAttrs attrs)
db7b5426 1951{
acc9d80b 1952 subpage_t *subpage = opaque;
ff6cff75 1953 uint8_t buf[8];
5c9eb028 1954 MemTxResult res;
791af8c8 1955
db7b5426 1956#if defined(DEBUG_SUBPAGE)
016e9d62 1957 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1958 subpage, len, addr);
db7b5426 1959#endif
5c9eb028
PM
1960 res = address_space_read(subpage->as, addr + subpage->base,
1961 attrs, buf, len);
1962 if (res) {
1963 return res;
f25a49e0 1964 }
acc9d80b
JK
1965 switch (len) {
1966 case 1:
f25a49e0
PM
1967 *data = ldub_p(buf);
1968 return MEMTX_OK;
acc9d80b 1969 case 2:
f25a49e0
PM
1970 *data = lduw_p(buf);
1971 return MEMTX_OK;
acc9d80b 1972 case 4:
f25a49e0
PM
1973 *data = ldl_p(buf);
1974 return MEMTX_OK;
ff6cff75 1975 case 8:
f25a49e0
PM
1976 *data = ldq_p(buf);
1977 return MEMTX_OK;
acc9d80b
JK
1978 default:
1979 abort();
1980 }
db7b5426
BS
1981}
1982
f25a49e0
PM
1983static MemTxResult subpage_write(void *opaque, hwaddr addr,
1984 uint64_t value, unsigned len, MemTxAttrs attrs)
db7b5426 1985{
acc9d80b 1986 subpage_t *subpage = opaque;
ff6cff75 1987 uint8_t buf[8];
acc9d80b 1988
db7b5426 1989#if defined(DEBUG_SUBPAGE)
016e9d62 1990 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1991 " value %"PRIx64"\n",
1992 __func__, subpage, len, addr, value);
db7b5426 1993#endif
acc9d80b
JK
1994 switch (len) {
1995 case 1:
1996 stb_p(buf, value);
1997 break;
1998 case 2:
1999 stw_p(buf, value);
2000 break;
2001 case 4:
2002 stl_p(buf, value);
2003 break;
ff6cff75
PB
2004 case 8:
2005 stq_p(buf, value);
2006 break;
acc9d80b
JK
2007 default:
2008 abort();
2009 }
5c9eb028
PM
2010 return address_space_write(subpage->as, addr + subpage->base,
2011 attrs, buf, len);
db7b5426
BS
2012}
2013
c353e4cc 2014static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 2015 unsigned len, bool is_write)
c353e4cc 2016{
acc9d80b 2017 subpage_t *subpage = opaque;
c353e4cc 2018#if defined(DEBUG_SUBPAGE)
016e9d62 2019 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 2020 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
2021#endif
2022
acc9d80b 2023 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 2024 len, is_write);
c353e4cc
PB
2025}
2026
70c68e44 2027static const MemoryRegionOps subpage_ops = {
f25a49e0
PM
2028 .read_with_attrs = subpage_read,
2029 .write_with_attrs = subpage_write,
ff6cff75
PB
2030 .impl.min_access_size = 1,
2031 .impl.max_access_size = 8,
2032 .valid.min_access_size = 1,
2033 .valid.max_access_size = 8,
c353e4cc 2034 .valid.accepts = subpage_accepts,
70c68e44 2035 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
2036};
2037
c227f099 2038static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2039 uint16_t section)
db7b5426
BS
2040{
2041 int idx, eidx;
2042
2043 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2044 return -1;
2045 idx = SUBPAGE_IDX(start);
2046 eidx = SUBPAGE_IDX(end);
2047#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2048 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2049 __func__, mmio, start, end, idx, eidx, section);
db7b5426 2050#endif
db7b5426 2051 for (; idx <= eidx; idx++) {
5312bd8b 2052 mmio->sub_section[idx] = section;
db7b5426
BS
2053 }
2054
2055 return 0;
2056}
2057
acc9d80b 2058static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 2059{
c227f099 2060 subpage_t *mmio;
db7b5426 2061
7267c094 2062 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 2063
acc9d80b 2064 mmio->as = as;
1eec614b 2065 mmio->base = base;
2c9b15ca 2066 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 2067 NULL, TARGET_PAGE_SIZE);
b3b00c78 2068 mmio->iomem.subpage = true;
db7b5426 2069#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2070 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2071 mmio, base, TARGET_PAGE_SIZE);
db7b5426 2072#endif
b41aac4f 2073 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
2074
2075 return mmio;
2076}
2077
a656e22f
PC
2078static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2079 MemoryRegion *mr)
5312bd8b 2080{
a656e22f 2081 assert(as);
5312bd8b 2082 MemoryRegionSection section = {
a656e22f 2083 .address_space = as,
5312bd8b
AK
2084 .mr = mr,
2085 .offset_within_address_space = 0,
2086 .offset_within_region = 0,
052e87b0 2087 .size = int128_2_64(),
5312bd8b
AK
2088 };
2089
53cb28cb 2090 return phys_section_add(map, &section);
5312bd8b
AK
2091}
2092
9d82b5a7 2093MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
aa102231 2094{
79e2b9ae
PB
2095 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2096 MemoryRegionSection *sections = d->map.sections;
9d82b5a7
PB
2097
2098 return sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
2099}
2100
e9179ce1
AK
2101static void io_mem_init(void)
2102{
1f6245e5 2103 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2c9b15ca 2104 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 2105 NULL, UINT64_MAX);
2c9b15ca 2106 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 2107 NULL, UINT64_MAX);
2c9b15ca 2108 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 2109 NULL, UINT64_MAX);
e9179ce1
AK
2110}
2111
ac1970fb 2112static void mem_begin(MemoryListener *listener)
00752703
PB
2113{
2114 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
2115 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2116 uint16_t n;
2117
a656e22f 2118 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 2119 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 2120 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 2121 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 2122 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 2123 assert(n == PHYS_SECTION_ROM);
a656e22f 2124 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 2125 assert(n == PHYS_SECTION_WATCH);
00752703 2126
9736e55b 2127 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
2128 d->as = as;
2129 as->next_dispatch = d;
2130}
2131
79e2b9ae
PB
2132static void address_space_dispatch_free(AddressSpaceDispatch *d)
2133{
2134 phys_sections_free(&d->map);
2135 g_free(d);
2136}
2137
00752703 2138static void mem_commit(MemoryListener *listener)
ac1970fb 2139{
89ae337a 2140 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
2141 AddressSpaceDispatch *cur = as->dispatch;
2142 AddressSpaceDispatch *next = as->next_dispatch;
2143
53cb28cb 2144 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 2145
79e2b9ae 2146 atomic_rcu_set(&as->dispatch, next);
53cb28cb 2147 if (cur) {
79e2b9ae 2148 call_rcu(cur, address_space_dispatch_free, rcu);
53cb28cb 2149 }
9affd6fc
PB
2150}
2151
1d71148e 2152static void tcg_commit(MemoryListener *listener)
50c1e149 2153{
182735ef 2154 CPUState *cpu;
117712c3
AK
2155
2156 /* since each CPU stores ram addresses in its TLB cache, we must
2157 reset the modified entries */
2158 /* XXX: slow ! */
bdc44640 2159 CPU_FOREACH(cpu) {
33bde2e1
EI
2160 /* FIXME: Disentangle the cpu.h circular files deps so we can
2161 directly get the right CPU from listener. */
2162 if (cpu->tcg_as_listener != listener) {
2163 continue;
2164 }
76e5c76f 2165 cpu_reload_memory_map(cpu);
117712c3 2166 }
50c1e149
AK
2167}
2168
93632747
AK
2169static void core_log_global_start(MemoryListener *listener)
2170{
981fdf23 2171 cpu_physical_memory_set_dirty_tracking(true);
93632747
AK
2172}
2173
2174static void core_log_global_stop(MemoryListener *listener)
2175{
981fdf23 2176 cpu_physical_memory_set_dirty_tracking(false);
93632747
AK
2177}
2178
93632747 2179static MemoryListener core_memory_listener = {
93632747
AK
2180 .log_global_start = core_log_global_start,
2181 .log_global_stop = core_log_global_stop,
ac1970fb 2182 .priority = 1,
93632747
AK
2183};
2184
ac1970fb
AK
2185void address_space_init_dispatch(AddressSpace *as)
2186{
00752703 2187 as->dispatch = NULL;
89ae337a 2188 as->dispatch_listener = (MemoryListener) {
ac1970fb 2189 .begin = mem_begin,
00752703 2190 .commit = mem_commit,
ac1970fb
AK
2191 .region_add = mem_add,
2192 .region_nop = mem_add,
2193 .priority = 0,
2194 };
89ae337a 2195 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
2196}
2197
6e48e8f9
PB
2198void address_space_unregister(AddressSpace *as)
2199{
2200 memory_listener_unregister(&as->dispatch_listener);
2201}
2202
83f3c251
AK
2203void address_space_destroy_dispatch(AddressSpace *as)
2204{
2205 AddressSpaceDispatch *d = as->dispatch;
2206
79e2b9ae
PB
2207 atomic_rcu_set(&as->dispatch, NULL);
2208 if (d) {
2209 call_rcu(d, address_space_dispatch_free, rcu);
2210 }
83f3c251
AK
2211}
2212
62152b8a
AK
2213static void memory_map_init(void)
2214{
7267c094 2215 system_memory = g_malloc(sizeof(*system_memory));
03f49957 2216
57271d63 2217 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 2218 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 2219
7267c094 2220 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
2221 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2222 65536);
7dca8043 2223 address_space_init(&address_space_io, system_io, "I/O");
93632747 2224
f6790af6 2225 memory_listener_register(&core_memory_listener, &address_space_memory);
62152b8a
AK
2226}
2227
2228MemoryRegion *get_system_memory(void)
2229{
2230 return system_memory;
2231}
2232
309cb471
AK
2233MemoryRegion *get_system_io(void)
2234{
2235 return system_io;
2236}
2237
e2eef170
PB
2238#endif /* !defined(CONFIG_USER_ONLY) */
2239
13eb76e0
FB
2240/* physical memory access (slow version, mainly for debug) */
2241#if defined(CONFIG_USER_ONLY)
f17ec444 2242int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 2243 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2244{
2245 int l, flags;
2246 target_ulong page;
53a5960a 2247 void * p;
13eb76e0
FB
2248
2249 while (len > 0) {
2250 page = addr & TARGET_PAGE_MASK;
2251 l = (page + TARGET_PAGE_SIZE) - addr;
2252 if (l > len)
2253 l = len;
2254 flags = page_get_flags(page);
2255 if (!(flags & PAGE_VALID))
a68fe89c 2256 return -1;
13eb76e0
FB
2257 if (is_write) {
2258 if (!(flags & PAGE_WRITE))
a68fe89c 2259 return -1;
579a97f7 2260 /* XXX: this code should not depend on lock_user */
72fb7daa 2261 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 2262 return -1;
72fb7daa
AJ
2263 memcpy(p, buf, l);
2264 unlock_user(p, addr, l);
13eb76e0
FB
2265 } else {
2266 if (!(flags & PAGE_READ))
a68fe89c 2267 return -1;
579a97f7 2268 /* XXX: this code should not depend on lock_user */
72fb7daa 2269 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 2270 return -1;
72fb7daa 2271 memcpy(buf, p, l);
5b257578 2272 unlock_user(p, addr, 0);
13eb76e0
FB
2273 }
2274 len -= l;
2275 buf += l;
2276 addr += l;
2277 }
a68fe89c 2278 return 0;
13eb76e0 2279}
8df1cd07 2280
13eb76e0 2281#else
51d7a9eb 2282
a8170e5e
AK
2283static void invalidate_and_set_dirty(hwaddr addr,
2284 hwaddr length)
51d7a9eb 2285{
f874bf90
PM
2286 if (cpu_physical_memory_range_includes_clean(addr, length)) {
2287 tb_invalidate_phys_range(addr, addr + length, 0);
6886867e 2288 cpu_physical_memory_set_dirty_range_nocode(addr, length);
51d7a9eb 2289 }
e226939d 2290 xen_modified_memory(addr, length);
51d7a9eb
AP
2291}
2292
23326164 2293static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2294{
e1622f4b 2295 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2296
2297 /* Regions are assumed to support 1-4 byte accesses unless
2298 otherwise specified. */
23326164
RH
2299 if (access_size_max == 0) {
2300 access_size_max = 4;
2301 }
2302
2303 /* Bound the maximum access by the alignment of the address. */
2304 if (!mr->ops->impl.unaligned) {
2305 unsigned align_size_max = addr & -addr;
2306 if (align_size_max != 0 && align_size_max < access_size_max) {
2307 access_size_max = align_size_max;
2308 }
82f2563f 2309 }
23326164
RH
2310
2311 /* Don't attempt accesses larger than the maximum. */
2312 if (l > access_size_max) {
2313 l = access_size_max;
82f2563f 2314 }
098178f2
PB
2315 if (l & (l - 1)) {
2316 l = 1 << (qemu_fls(l) - 1);
2317 }
23326164
RH
2318
2319 return l;
82f2563f
PB
2320}
2321
5c9eb028
PM
2322MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2323 uint8_t *buf, int len, bool is_write)
13eb76e0 2324{
149f54b5 2325 hwaddr l;
13eb76e0 2326 uint8_t *ptr;
791af8c8 2327 uint64_t val;
149f54b5 2328 hwaddr addr1;
5c8a00ce 2329 MemoryRegion *mr;
3b643495 2330 MemTxResult result = MEMTX_OK;
3b46e624 2331
13eb76e0 2332 while (len > 0) {
149f54b5 2333 l = len;
5c8a00ce 2334 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2335
13eb76e0 2336 if (is_write) {
5c8a00ce
PB
2337 if (!memory_access_is_direct(mr, is_write)) {
2338 l = memory_access_size(mr, l, addr1);
4917cf44 2339 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2340 potential bugs */
23326164
RH
2341 switch (l) {
2342 case 8:
2343 /* 64 bit write access */
2344 val = ldq_p(buf);
3b643495
PM
2345 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2346 attrs);
23326164
RH
2347 break;
2348 case 4:
1c213d19 2349 /* 32 bit write access */
c27004ec 2350 val = ldl_p(buf);
3b643495
PM
2351 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2352 attrs);
23326164
RH
2353 break;
2354 case 2:
1c213d19 2355 /* 16 bit write access */
c27004ec 2356 val = lduw_p(buf);
3b643495
PM
2357 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2358 attrs);
23326164
RH
2359 break;
2360 case 1:
1c213d19 2361 /* 8 bit write access */
c27004ec 2362 val = ldub_p(buf);
3b643495
PM
2363 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2364 attrs);
23326164
RH
2365 break;
2366 default:
2367 abort();
13eb76e0 2368 }
2bbfa05d 2369 } else {
5c8a00ce 2370 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2371 /* RAM case */
5579c7f3 2372 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2373 memcpy(ptr, buf, l);
51d7a9eb 2374 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
2375 }
2376 } else {
5c8a00ce 2377 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2378 /* I/O case */
5c8a00ce 2379 l = memory_access_size(mr, l, addr1);
23326164
RH
2380 switch (l) {
2381 case 8:
2382 /* 64 bit read access */
3b643495
PM
2383 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2384 attrs);
23326164
RH
2385 stq_p(buf, val);
2386 break;
2387 case 4:
13eb76e0 2388 /* 32 bit read access */
3b643495
PM
2389 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2390 attrs);
c27004ec 2391 stl_p(buf, val);
23326164
RH
2392 break;
2393 case 2:
13eb76e0 2394 /* 16 bit read access */
3b643495
PM
2395 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2396 attrs);
c27004ec 2397 stw_p(buf, val);
23326164
RH
2398 break;
2399 case 1:
1c213d19 2400 /* 8 bit read access */
3b643495
PM
2401 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2402 attrs);
c27004ec 2403 stb_p(buf, val);
23326164
RH
2404 break;
2405 default:
2406 abort();
13eb76e0
FB
2407 }
2408 } else {
2409 /* RAM case */
5c8a00ce 2410 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2411 memcpy(buf, ptr, l);
13eb76e0
FB
2412 }
2413 }
2414 len -= l;
2415 buf += l;
2416 addr += l;
2417 }
fd8aaa76 2418
3b643495 2419 return result;
13eb76e0 2420}
8df1cd07 2421
5c9eb028
PM
2422MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2423 const uint8_t *buf, int len)
ac1970fb 2424{
5c9eb028 2425 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
ac1970fb
AK
2426}
2427
5c9eb028
PM
2428MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2429 uint8_t *buf, int len)
ac1970fb 2430{
5c9eb028 2431 return address_space_rw(as, addr, attrs, buf, len, false);
ac1970fb
AK
2432}
2433
2434
a8170e5e 2435void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2436 int len, int is_write)
2437{
5c9eb028
PM
2438 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2439 buf, len, is_write);
ac1970fb
AK
2440}
2441
582b55a9
AG
2442enum write_rom_type {
2443 WRITE_DATA,
2444 FLUSH_CACHE,
2445};
2446
2a221651 2447static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2448 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2449{
149f54b5 2450 hwaddr l;
d0ecd2aa 2451 uint8_t *ptr;
149f54b5 2452 hwaddr addr1;
5c8a00ce 2453 MemoryRegion *mr;
3b46e624 2454
d0ecd2aa 2455 while (len > 0) {
149f54b5 2456 l = len;
2a221651 2457 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2458
5c8a00ce
PB
2459 if (!(memory_region_is_ram(mr) ||
2460 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2461 /* do nothing */
2462 } else {
5c8a00ce 2463 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2464 /* ROM/RAM case */
5579c7f3 2465 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2466 switch (type) {
2467 case WRITE_DATA:
2468 memcpy(ptr, buf, l);
2469 invalidate_and_set_dirty(addr1, l);
2470 break;
2471 case FLUSH_CACHE:
2472 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2473 break;
2474 }
d0ecd2aa
FB
2475 }
2476 len -= l;
2477 buf += l;
2478 addr += l;
2479 }
2480}
2481
582b55a9 2482/* used for ROM loading : can write in RAM and ROM */
2a221651 2483void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2484 const uint8_t *buf, int len)
2485{
2a221651 2486 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2487}
2488
2489void cpu_flush_icache_range(hwaddr start, int len)
2490{
2491 /*
2492 * This function should do the same thing as an icache flush that was
2493 * triggered from within the guest. For TCG we are always cache coherent,
2494 * so there is no need to flush anything. For KVM / Xen we need to flush
2495 * the host's instruction cache at least.
2496 */
2497 if (tcg_enabled()) {
2498 return;
2499 }
2500
2a221651
EI
2501 cpu_physical_memory_write_rom_internal(&address_space_memory,
2502 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2503}
2504
6d16c2f8 2505typedef struct {
d3e71559 2506 MemoryRegion *mr;
6d16c2f8 2507 void *buffer;
a8170e5e
AK
2508 hwaddr addr;
2509 hwaddr len;
c2cba0ff 2510 bool in_use;
6d16c2f8
AL
2511} BounceBuffer;
2512
2513static BounceBuffer bounce;
2514
ba223c29 2515typedef struct MapClient {
e95205e1 2516 QEMUBH *bh;
72cf2d4f 2517 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2518} MapClient;
2519
38e047b5 2520QemuMutex map_client_list_lock;
72cf2d4f
BS
2521static QLIST_HEAD(map_client_list, MapClient) map_client_list
2522 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29 2523
e95205e1
FZ
2524static void cpu_unregister_map_client_do(MapClient *client)
2525{
2526 QLIST_REMOVE(client, link);
2527 g_free(client);
2528}
2529
33b6c2ed
FZ
2530static void cpu_notify_map_clients_locked(void)
2531{
2532 MapClient *client;
2533
2534 while (!QLIST_EMPTY(&map_client_list)) {
2535 client = QLIST_FIRST(&map_client_list);
e95205e1
FZ
2536 qemu_bh_schedule(client->bh);
2537 cpu_unregister_map_client_do(client);
33b6c2ed
FZ
2538 }
2539}
2540
e95205e1 2541void cpu_register_map_client(QEMUBH *bh)
ba223c29 2542{
7267c094 2543 MapClient *client = g_malloc(sizeof(*client));
ba223c29 2544
38e047b5 2545 qemu_mutex_lock(&map_client_list_lock);
e95205e1 2546 client->bh = bh;
72cf2d4f 2547 QLIST_INSERT_HEAD(&map_client_list, client, link);
33b6c2ed
FZ
2548 if (!atomic_read(&bounce.in_use)) {
2549 cpu_notify_map_clients_locked();
2550 }
38e047b5 2551 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2552}
2553
38e047b5 2554void cpu_exec_init_all(void)
ba223c29 2555{
38e047b5
FZ
2556 qemu_mutex_init(&ram_list.mutex);
2557 memory_map_init();
2558 io_mem_init();
2559 qemu_mutex_init(&map_client_list_lock);
ba223c29
AL
2560}
2561
e95205e1 2562void cpu_unregister_map_client(QEMUBH *bh)
ba223c29
AL
2563{
2564 MapClient *client;
2565
e95205e1
FZ
2566 qemu_mutex_lock(&map_client_list_lock);
2567 QLIST_FOREACH(client, &map_client_list, link) {
2568 if (client->bh == bh) {
2569 cpu_unregister_map_client_do(client);
2570 break;
2571 }
ba223c29 2572 }
e95205e1 2573 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2574}
2575
2576static void cpu_notify_map_clients(void)
2577{
38e047b5 2578 qemu_mutex_lock(&map_client_list_lock);
33b6c2ed 2579 cpu_notify_map_clients_locked();
38e047b5 2580 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2581}
2582
51644ab7
PB
2583bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2584{
5c8a00ce 2585 MemoryRegion *mr;
51644ab7
PB
2586 hwaddr l, xlat;
2587
2588 while (len > 0) {
2589 l = len;
5c8a00ce
PB
2590 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2591 if (!memory_access_is_direct(mr, is_write)) {
2592 l = memory_access_size(mr, l, addr);
2593 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2594 return false;
2595 }
2596 }
2597
2598 len -= l;
2599 addr += l;
2600 }
2601 return true;
2602}
2603
6d16c2f8
AL
2604/* Map a physical memory region into a host virtual address.
2605 * May map a subset of the requested range, given by and returned in *plen.
2606 * May return NULL if resources needed to perform the mapping are exhausted.
2607 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2608 * Use cpu_register_map_client() to know when retrying the map operation is
2609 * likely to succeed.
6d16c2f8 2610 */
ac1970fb 2611void *address_space_map(AddressSpace *as,
a8170e5e
AK
2612 hwaddr addr,
2613 hwaddr *plen,
ac1970fb 2614 bool is_write)
6d16c2f8 2615{
a8170e5e 2616 hwaddr len = *plen;
e3127ae0
PB
2617 hwaddr done = 0;
2618 hwaddr l, xlat, base;
2619 MemoryRegion *mr, *this_mr;
2620 ram_addr_t raddr;
6d16c2f8 2621
e3127ae0
PB
2622 if (len == 0) {
2623 return NULL;
2624 }
38bee5dc 2625
e3127ae0
PB
2626 l = len;
2627 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2628 if (!memory_access_is_direct(mr, is_write)) {
c2cba0ff 2629 if (atomic_xchg(&bounce.in_use, true)) {
e3127ae0 2630 return NULL;
6d16c2f8 2631 }
e85d9db5
KW
2632 /* Avoid unbounded allocations */
2633 l = MIN(l, TARGET_PAGE_SIZE);
2634 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2635 bounce.addr = addr;
2636 bounce.len = l;
d3e71559
PB
2637
2638 memory_region_ref(mr);
2639 bounce.mr = mr;
e3127ae0 2640 if (!is_write) {
5c9eb028
PM
2641 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2642 bounce.buffer, l);
8ab934f9 2643 }
6d16c2f8 2644
e3127ae0
PB
2645 *plen = l;
2646 return bounce.buffer;
2647 }
2648
2649 base = xlat;
2650 raddr = memory_region_get_ram_addr(mr);
2651
2652 for (;;) {
6d16c2f8
AL
2653 len -= l;
2654 addr += l;
e3127ae0
PB
2655 done += l;
2656 if (len == 0) {
2657 break;
2658 }
2659
2660 l = len;
2661 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2662 if (this_mr != mr || xlat != base + done) {
2663 break;
2664 }
6d16c2f8 2665 }
e3127ae0 2666
d3e71559 2667 memory_region_ref(mr);
e3127ae0
PB
2668 *plen = done;
2669 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2670}
2671
ac1970fb 2672/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2673 * Will also mark the memory as dirty if is_write == 1. access_len gives
2674 * the amount of memory that was actually read or written by the caller.
2675 */
a8170e5e
AK
2676void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2677 int is_write, hwaddr access_len)
6d16c2f8
AL
2678{
2679 if (buffer != bounce.buffer) {
d3e71559
PB
2680 MemoryRegion *mr;
2681 ram_addr_t addr1;
2682
2683 mr = qemu_ram_addr_from_host(buffer, &addr1);
2684 assert(mr != NULL);
6d16c2f8 2685 if (is_write) {
6886867e 2686 invalidate_and_set_dirty(addr1, access_len);
6d16c2f8 2687 }
868bb33f 2688 if (xen_enabled()) {
e41d7c69 2689 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2690 }
d3e71559 2691 memory_region_unref(mr);
6d16c2f8
AL
2692 return;
2693 }
2694 if (is_write) {
5c9eb028
PM
2695 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2696 bounce.buffer, access_len);
6d16c2f8 2697 }
f8a83245 2698 qemu_vfree(bounce.buffer);
6d16c2f8 2699 bounce.buffer = NULL;
d3e71559 2700 memory_region_unref(bounce.mr);
c2cba0ff 2701 atomic_mb_set(&bounce.in_use, false);
ba223c29 2702 cpu_notify_map_clients();
6d16c2f8 2703}
d0ecd2aa 2704
a8170e5e
AK
2705void *cpu_physical_memory_map(hwaddr addr,
2706 hwaddr *plen,
ac1970fb
AK
2707 int is_write)
2708{
2709 return address_space_map(&address_space_memory, addr, plen, is_write);
2710}
2711
a8170e5e
AK
2712void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2713 int is_write, hwaddr access_len)
ac1970fb
AK
2714{
2715 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2716}
2717
8df1cd07 2718/* warning: addr must be aligned */
50013115
PM
2719static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2720 MemTxAttrs attrs,
2721 MemTxResult *result,
2722 enum device_endian endian)
8df1cd07 2723{
8df1cd07 2724 uint8_t *ptr;
791af8c8 2725 uint64_t val;
5c8a00ce 2726 MemoryRegion *mr;
149f54b5
PB
2727 hwaddr l = 4;
2728 hwaddr addr1;
50013115 2729 MemTxResult r;
8df1cd07 2730
fdfba1a2 2731 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2732 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2733 /* I/O case */
50013115 2734 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
1e78bcc1
AG
2735#if defined(TARGET_WORDS_BIGENDIAN)
2736 if (endian == DEVICE_LITTLE_ENDIAN) {
2737 val = bswap32(val);
2738 }
2739#else
2740 if (endian == DEVICE_BIG_ENDIAN) {
2741 val = bswap32(val);
2742 }
2743#endif
8df1cd07
FB
2744 } else {
2745 /* RAM case */
5c8a00ce 2746 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2747 & TARGET_PAGE_MASK)
149f54b5 2748 + addr1);
1e78bcc1
AG
2749 switch (endian) {
2750 case DEVICE_LITTLE_ENDIAN:
2751 val = ldl_le_p(ptr);
2752 break;
2753 case DEVICE_BIG_ENDIAN:
2754 val = ldl_be_p(ptr);
2755 break;
2756 default:
2757 val = ldl_p(ptr);
2758 break;
2759 }
50013115
PM
2760 r = MEMTX_OK;
2761 }
2762 if (result) {
2763 *result = r;
8df1cd07
FB
2764 }
2765 return val;
2766}
2767
50013115
PM
2768uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2769 MemTxAttrs attrs, MemTxResult *result)
2770{
2771 return address_space_ldl_internal(as, addr, attrs, result,
2772 DEVICE_NATIVE_ENDIAN);
2773}
2774
2775uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2776 MemTxAttrs attrs, MemTxResult *result)
2777{
2778 return address_space_ldl_internal(as, addr, attrs, result,
2779 DEVICE_LITTLE_ENDIAN);
2780}
2781
2782uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2783 MemTxAttrs attrs, MemTxResult *result)
2784{
2785 return address_space_ldl_internal(as, addr, attrs, result,
2786 DEVICE_BIG_ENDIAN);
2787}
2788
fdfba1a2 2789uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2790{
50013115 2791 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2792}
2793
fdfba1a2 2794uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2795{
50013115 2796 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2797}
2798
fdfba1a2 2799uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2800{
50013115 2801 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2802}
2803
84b7b8e7 2804/* warning: addr must be aligned */
50013115
PM
2805static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2806 MemTxAttrs attrs,
2807 MemTxResult *result,
2808 enum device_endian endian)
84b7b8e7 2809{
84b7b8e7
FB
2810 uint8_t *ptr;
2811 uint64_t val;
5c8a00ce 2812 MemoryRegion *mr;
149f54b5
PB
2813 hwaddr l = 8;
2814 hwaddr addr1;
50013115 2815 MemTxResult r;
84b7b8e7 2816
2c17449b 2817 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2818 false);
2819 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2820 /* I/O case */
50013115 2821 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
968a5627
PB
2822#if defined(TARGET_WORDS_BIGENDIAN)
2823 if (endian == DEVICE_LITTLE_ENDIAN) {
2824 val = bswap64(val);
2825 }
2826#else
2827 if (endian == DEVICE_BIG_ENDIAN) {
2828 val = bswap64(val);
2829 }
84b7b8e7
FB
2830#endif
2831 } else {
2832 /* RAM case */
5c8a00ce 2833 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2834 & TARGET_PAGE_MASK)
149f54b5 2835 + addr1);
1e78bcc1
AG
2836 switch (endian) {
2837 case DEVICE_LITTLE_ENDIAN:
2838 val = ldq_le_p(ptr);
2839 break;
2840 case DEVICE_BIG_ENDIAN:
2841 val = ldq_be_p(ptr);
2842 break;
2843 default:
2844 val = ldq_p(ptr);
2845 break;
2846 }
50013115
PM
2847 r = MEMTX_OK;
2848 }
2849 if (result) {
2850 *result = r;
84b7b8e7
FB
2851 }
2852 return val;
2853}
2854
50013115
PM
2855uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2856 MemTxAttrs attrs, MemTxResult *result)
2857{
2858 return address_space_ldq_internal(as, addr, attrs, result,
2859 DEVICE_NATIVE_ENDIAN);
2860}
2861
2862uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
2863 MemTxAttrs attrs, MemTxResult *result)
2864{
2865 return address_space_ldq_internal(as, addr, attrs, result,
2866 DEVICE_LITTLE_ENDIAN);
2867}
2868
2869uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
2870 MemTxAttrs attrs, MemTxResult *result)
2871{
2872 return address_space_ldq_internal(as, addr, attrs, result,
2873 DEVICE_BIG_ENDIAN);
2874}
2875
2c17449b 2876uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2877{
50013115 2878 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2879}
2880
2c17449b 2881uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2882{
50013115 2883 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2884}
2885
2c17449b 2886uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2887{
50013115 2888 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2889}
2890
aab33094 2891/* XXX: optimize */
50013115
PM
2892uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
2893 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
2894{
2895 uint8_t val;
50013115
PM
2896 MemTxResult r;
2897
2898 r = address_space_rw(as, addr, attrs, &val, 1, 0);
2899 if (result) {
2900 *result = r;
2901 }
aab33094
FB
2902 return val;
2903}
2904
50013115
PM
2905uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2906{
2907 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
2908}
2909
733f0b02 2910/* warning: addr must be aligned */
50013115
PM
2911static inline uint32_t address_space_lduw_internal(AddressSpace *as,
2912 hwaddr addr,
2913 MemTxAttrs attrs,
2914 MemTxResult *result,
2915 enum device_endian endian)
aab33094 2916{
733f0b02
MT
2917 uint8_t *ptr;
2918 uint64_t val;
5c8a00ce 2919 MemoryRegion *mr;
149f54b5
PB
2920 hwaddr l = 2;
2921 hwaddr addr1;
50013115 2922 MemTxResult r;
733f0b02 2923
41701aa4 2924 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2925 false);
2926 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2927 /* I/O case */
50013115 2928 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
1e78bcc1
AG
2929#if defined(TARGET_WORDS_BIGENDIAN)
2930 if (endian == DEVICE_LITTLE_ENDIAN) {
2931 val = bswap16(val);
2932 }
2933#else
2934 if (endian == DEVICE_BIG_ENDIAN) {
2935 val = bswap16(val);
2936 }
2937#endif
733f0b02
MT
2938 } else {
2939 /* RAM case */
5c8a00ce 2940 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2941 & TARGET_PAGE_MASK)
149f54b5 2942 + addr1);
1e78bcc1
AG
2943 switch (endian) {
2944 case DEVICE_LITTLE_ENDIAN:
2945 val = lduw_le_p(ptr);
2946 break;
2947 case DEVICE_BIG_ENDIAN:
2948 val = lduw_be_p(ptr);
2949 break;
2950 default:
2951 val = lduw_p(ptr);
2952 break;
2953 }
50013115
PM
2954 r = MEMTX_OK;
2955 }
2956 if (result) {
2957 *result = r;
733f0b02
MT
2958 }
2959 return val;
aab33094
FB
2960}
2961
50013115
PM
2962uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
2963 MemTxAttrs attrs, MemTxResult *result)
2964{
2965 return address_space_lduw_internal(as, addr, attrs, result,
2966 DEVICE_NATIVE_ENDIAN);
2967}
2968
2969uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
2970 MemTxAttrs attrs, MemTxResult *result)
2971{
2972 return address_space_lduw_internal(as, addr, attrs, result,
2973 DEVICE_LITTLE_ENDIAN);
2974}
2975
2976uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
2977 MemTxAttrs attrs, MemTxResult *result)
2978{
2979 return address_space_lduw_internal(as, addr, attrs, result,
2980 DEVICE_BIG_ENDIAN);
2981}
2982
41701aa4 2983uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2984{
50013115 2985 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2986}
2987
41701aa4 2988uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2989{
50013115 2990 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2991}
2992
41701aa4 2993uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2994{
50013115 2995 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2996}
2997
8df1cd07
FB
2998/* warning: addr must be aligned. The ram page is not masked as dirty
2999 and the code inside is not invalidated. It is useful if the dirty
3000 bits are used to track modified PTEs */
50013115
PM
3001void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3002 MemTxAttrs attrs, MemTxResult *result)
8df1cd07 3003{
8df1cd07 3004 uint8_t *ptr;
5c8a00ce 3005 MemoryRegion *mr;
149f54b5
PB
3006 hwaddr l = 4;
3007 hwaddr addr1;
50013115 3008 MemTxResult r;
8df1cd07 3009
2198a121 3010 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3011 true);
3012 if (l < 4 || !memory_access_is_direct(mr, true)) {
50013115 3013 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3014 } else {
5c8a00ce 3015 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 3016 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3017 stl_p(ptr, val);
74576198
AL
3018
3019 if (unlikely(in_migration)) {
a2cd8c85 3020 if (cpu_physical_memory_is_clean(addr1)) {
74576198
AL
3021 /* invalidate code */
3022 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3023 /* set dirty bit */
6886867e 3024 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
74576198
AL
3025 }
3026 }
50013115
PM
3027 r = MEMTX_OK;
3028 }
3029 if (result) {
3030 *result = r;
8df1cd07
FB
3031 }
3032}
3033
50013115
PM
3034void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3035{
3036 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3037}
3038
8df1cd07 3039/* warning: addr must be aligned */
50013115
PM
3040static inline void address_space_stl_internal(AddressSpace *as,
3041 hwaddr addr, uint32_t val,
3042 MemTxAttrs attrs,
3043 MemTxResult *result,
3044 enum device_endian endian)
8df1cd07 3045{
8df1cd07 3046 uint8_t *ptr;
5c8a00ce 3047 MemoryRegion *mr;
149f54b5
PB
3048 hwaddr l = 4;
3049 hwaddr addr1;
50013115 3050 MemTxResult r;
8df1cd07 3051
ab1da857 3052 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3053 true);
3054 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
3055#if defined(TARGET_WORDS_BIGENDIAN)
3056 if (endian == DEVICE_LITTLE_ENDIAN) {
3057 val = bswap32(val);
3058 }
3059#else
3060 if (endian == DEVICE_BIG_ENDIAN) {
3061 val = bswap32(val);
3062 }
3063#endif
50013115 3064 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3065 } else {
8df1cd07 3066 /* RAM case */
5c8a00ce 3067 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 3068 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3069 switch (endian) {
3070 case DEVICE_LITTLE_ENDIAN:
3071 stl_le_p(ptr, val);
3072 break;
3073 case DEVICE_BIG_ENDIAN:
3074 stl_be_p(ptr, val);
3075 break;
3076 default:
3077 stl_p(ptr, val);
3078 break;
3079 }
51d7a9eb 3080 invalidate_and_set_dirty(addr1, 4);
50013115
PM
3081 r = MEMTX_OK;
3082 }
3083 if (result) {
3084 *result = r;
8df1cd07
FB
3085 }
3086}
3087
50013115
PM
3088void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3089 MemTxAttrs attrs, MemTxResult *result)
3090{
3091 address_space_stl_internal(as, addr, val, attrs, result,
3092 DEVICE_NATIVE_ENDIAN);
3093}
3094
3095void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3096 MemTxAttrs attrs, MemTxResult *result)
3097{
3098 address_space_stl_internal(as, addr, val, attrs, result,
3099 DEVICE_LITTLE_ENDIAN);
3100}
3101
3102void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3103 MemTxAttrs attrs, MemTxResult *result)
3104{
3105 address_space_stl_internal(as, addr, val, attrs, result,
3106 DEVICE_BIG_ENDIAN);
3107}
3108
ab1da857 3109void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3110{
50013115 3111 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3112}
3113
ab1da857 3114void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3115{
50013115 3116 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3117}
3118
ab1da857 3119void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3120{
50013115 3121 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3122}
3123
aab33094 3124/* XXX: optimize */
50013115
PM
3125void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3126 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
3127{
3128 uint8_t v = val;
50013115
PM
3129 MemTxResult r;
3130
3131 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3132 if (result) {
3133 *result = r;
3134 }
3135}
3136
3137void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3138{
3139 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
aab33094
FB
3140}
3141
733f0b02 3142/* warning: addr must be aligned */
50013115
PM
3143static inline void address_space_stw_internal(AddressSpace *as,
3144 hwaddr addr, uint32_t val,
3145 MemTxAttrs attrs,
3146 MemTxResult *result,
3147 enum device_endian endian)
aab33094 3148{
733f0b02 3149 uint8_t *ptr;
5c8a00ce 3150 MemoryRegion *mr;
149f54b5
PB
3151 hwaddr l = 2;
3152 hwaddr addr1;
50013115 3153 MemTxResult r;
733f0b02 3154
5ce5944d 3155 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 3156 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
3157#if defined(TARGET_WORDS_BIGENDIAN)
3158 if (endian == DEVICE_LITTLE_ENDIAN) {
3159 val = bswap16(val);
3160 }
3161#else
3162 if (endian == DEVICE_BIG_ENDIAN) {
3163 val = bswap16(val);
3164 }
3165#endif
50013115 3166 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
733f0b02 3167 } else {
733f0b02 3168 /* RAM case */
5c8a00ce 3169 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 3170 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3171 switch (endian) {
3172 case DEVICE_LITTLE_ENDIAN:
3173 stw_le_p(ptr, val);
3174 break;
3175 case DEVICE_BIG_ENDIAN:
3176 stw_be_p(ptr, val);
3177 break;
3178 default:
3179 stw_p(ptr, val);
3180 break;
3181 }
51d7a9eb 3182 invalidate_and_set_dirty(addr1, 2);
50013115
PM
3183 r = MEMTX_OK;
3184 }
3185 if (result) {
3186 *result = r;
733f0b02 3187 }
aab33094
FB
3188}
3189
50013115
PM
3190void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3191 MemTxAttrs attrs, MemTxResult *result)
3192{
3193 address_space_stw_internal(as, addr, val, attrs, result,
3194 DEVICE_NATIVE_ENDIAN);
3195}
3196
3197void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3198 MemTxAttrs attrs, MemTxResult *result)
3199{
3200 address_space_stw_internal(as, addr, val, attrs, result,
3201 DEVICE_LITTLE_ENDIAN);
3202}
3203
3204void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3205 MemTxAttrs attrs, MemTxResult *result)
3206{
3207 address_space_stw_internal(as, addr, val, attrs, result,
3208 DEVICE_BIG_ENDIAN);
3209}
3210
5ce5944d 3211void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3212{
50013115 3213 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3214}
3215
5ce5944d 3216void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3217{
50013115 3218 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3219}
3220
5ce5944d 3221void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3222{
50013115 3223 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3224}
3225
aab33094 3226/* XXX: optimize */
50013115
PM
3227void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3228 MemTxAttrs attrs, MemTxResult *result)
aab33094 3229{
50013115 3230 MemTxResult r;
aab33094 3231 val = tswap64(val);
50013115
PM
3232 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3233 if (result) {
3234 *result = r;
3235 }
aab33094
FB
3236}
3237
50013115
PM
3238void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3239 MemTxAttrs attrs, MemTxResult *result)
1e78bcc1 3240{
50013115 3241 MemTxResult r;
1e78bcc1 3242 val = cpu_to_le64(val);
50013115
PM
3243 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3244 if (result) {
3245 *result = r;
3246 }
3247}
3248void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3249 MemTxAttrs attrs, MemTxResult *result)
3250{
3251 MemTxResult r;
3252 val = cpu_to_be64(val);
3253 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3254 if (result) {
3255 *result = r;
3256 }
3257}
3258
3259void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3260{
3261 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3262}
3263
3264void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3265{
3266 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3267}
3268
f606604f 3269void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1 3270{
50013115 3271 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3272}
3273
5e2972fd 3274/* virtual memory access for debug (includes writing to ROM) */
f17ec444 3275int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 3276 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3277{
3278 int l;
a8170e5e 3279 hwaddr phys_addr;
9b3c35e0 3280 target_ulong page;
13eb76e0
FB
3281
3282 while (len > 0) {
3283 page = addr & TARGET_PAGE_MASK;
f17ec444 3284 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
3285 /* if no physical page mapped, return an error */
3286 if (phys_addr == -1)
3287 return -1;
3288 l = (page + TARGET_PAGE_SIZE) - addr;
3289 if (l > len)
3290 l = len;
5e2972fd 3291 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
3292 if (is_write) {
3293 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3294 } else {
5c9eb028
PM
3295 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3296 buf, l, 0);
2e38847b 3297 }
13eb76e0
FB
3298 len -= l;
3299 buf += l;
3300 addr += l;
3301 }
3302 return 0;
3303}
a68fe89c 3304#endif
13eb76e0 3305
8e4a424b
BS
3306/*
3307 * A helper function for the _utterly broken_ virtio device model to find out if
3308 * it's running on a big endian machine. Don't do this at home kids!
3309 */
98ed8ecf
GK
3310bool target_words_bigendian(void);
3311bool target_words_bigendian(void)
8e4a424b
BS
3312{
3313#if defined(TARGET_WORDS_BIGENDIAN)
3314 return true;
3315#else
3316 return false;
3317#endif
3318}
3319
76f35538 3320#ifndef CONFIG_USER_ONLY
a8170e5e 3321bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 3322{
5c8a00ce 3323 MemoryRegion*mr;
149f54b5 3324 hwaddr l = 1;
76f35538 3325
5c8a00ce
PB
3326 mr = address_space_translate(&address_space_memory,
3327 phys_addr, &phys_addr, &l, false);
76f35538 3328
5c8a00ce
PB
3329 return !(memory_region_is_ram(mr) ||
3330 memory_region_is_romd(mr));
76f35538 3331}
bd2fa51f
MH
3332
3333void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3334{
3335 RAMBlock *block;
3336
0dc3f44a
MD
3337 rcu_read_lock();
3338 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5 3339 func(block->host, block->offset, block->used_length, opaque);
bd2fa51f 3340 }
0dc3f44a 3341 rcu_read_unlock();
bd2fa51f 3342}
ec3f8c99 3343#endif