]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
7b31bbc2 19#include "qemu/osdep.h"
da34e65c 20#include "qapi/error.h"
777872e5 21#ifndef _WIN32
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
f348b6d1 25#include "qemu/cutils.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
4485bd26 29#if !defined(CONFIG_USER_ONLY)
47c8ca53 30#include "hw/boards.h"
4485bd26 31#endif
cc9e98cb 32#include "hw/qdev.h"
9c17d615 33#include "sysemu/kvm.h"
2ff3de68 34#include "sysemu/sysemu.h"
0d09e41a 35#include "hw/xen/xen.h"
1de7afc9
PB
36#include "qemu/timer.h"
37#include "qemu/config-file.h"
75a34036 38#include "qemu/error-report.h"
022c62cb 39#include "exec/memory.h"
9c17d615 40#include "sysemu/dma.h"
022c62cb 41#include "exec/address-spaces.h"
53a5960a
PB
42#if defined(CONFIG_USER_ONLY)
43#include <qemu.h>
432d268c 44#else /* !CONFIG_USER_ONLY */
9c17d615 45#include "sysemu/xen-mapcache.h"
6506e4f9 46#include "trace.h"
53a5960a 47#endif
0d6d3c87 48#include "exec/cpu-all.h"
0dc3f44a 49#include "qemu/rcu_queue.h"
4840f10e 50#include "qemu/main-loop.h"
5b6dd868 51#include "translate-all.h"
7615936e 52#include "sysemu/replay.h"
0cac1b66 53
022c62cb 54#include "exec/memory-internal.h"
220c3ebd 55#include "exec/ram_addr.h"
508127e2 56#include "exec/log.h"
67d95c15 57
b35ba30f 58#include "qemu/range.h"
794e8f30
MT
59#ifndef _WIN32
60#include "qemu/mmap-alloc.h"
61#endif
b35ba30f 62
db7b5426 63//#define DEBUG_SUBPAGE
1196be37 64
e2eef170 65#if !defined(CONFIG_USER_ONLY)
0dc3f44a
MD
66/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
68 */
0d53d9fe 69RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
70
71static MemoryRegion *system_memory;
309cb471 72static MemoryRegion *system_io;
62152b8a 73
f6790af6
AK
74AddressSpace address_space_io;
75AddressSpace address_space_memory;
2673a5da 76
0844e007 77MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 78static MemoryRegion io_mem_unassigned;
0e0df1e2 79
7bd4f430
PB
80/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81#define RAM_PREALLOC (1 << 0)
82
dbcb8981
PB
83/* RAM is mmap-ed with MAP_SHARED */
84#define RAM_SHARED (1 << 1)
85
62be4e3a
MT
86/* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
88 */
89#define RAM_RESIZEABLE (1 << 2)
90
e2eef170 91#endif
9fa3e853 92
bdc44640 93struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
94/* current CPU in the current thread. It is only valid inside
95 cpu_exec() */
f240eb6f 96__thread CPUState *current_cpu;
2e70f6ef 97/* 0 = Do not count executed instructions.
bf20dc07 98 1 = Precise instruction counting.
2e70f6ef 99 2 = Adaptive rate instruction counting. */
5708fc66 100int use_icount;
6a00d601 101
e2eef170 102#if !defined(CONFIG_USER_ONLY)
4346ae3e 103
1db8abb1
PB
104typedef struct PhysPageEntry PhysPageEntry;
105
106struct PhysPageEntry {
9736e55b 107 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 108 uint32_t skip : 6;
9736e55b 109 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 110 uint32_t ptr : 26;
1db8abb1
PB
111};
112
8b795765
MT
113#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
114
03f49957 115/* Size of the L2 (and L3, etc) page tables. */
57271d63 116#define ADDR_SPACE_BITS 64
03f49957 117
026736ce 118#define P_L2_BITS 9
03f49957
PB
119#define P_L2_SIZE (1 << P_L2_BITS)
120
121#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
122
123typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 124
53cb28cb 125typedef struct PhysPageMap {
79e2b9ae
PB
126 struct rcu_head rcu;
127
53cb28cb
MA
128 unsigned sections_nb;
129 unsigned sections_nb_alloc;
130 unsigned nodes_nb;
131 unsigned nodes_nb_alloc;
132 Node *nodes;
133 MemoryRegionSection *sections;
134} PhysPageMap;
135
1db8abb1 136struct AddressSpaceDispatch {
79e2b9ae
PB
137 struct rcu_head rcu;
138
729633c2 139 MemoryRegionSection *mru_section;
1db8abb1
PB
140 /* This is a multi-level map on the physical address space.
141 * The bottom level has pointers to MemoryRegionSections.
142 */
143 PhysPageEntry phys_map;
53cb28cb 144 PhysPageMap map;
acc9d80b 145 AddressSpace *as;
1db8abb1
PB
146};
147
90260c6c
JK
148#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
149typedef struct subpage_t {
150 MemoryRegion iomem;
acc9d80b 151 AddressSpace *as;
90260c6c
JK
152 hwaddr base;
153 uint16_t sub_section[TARGET_PAGE_SIZE];
154} subpage_t;
155
b41aac4f
LPF
156#define PHYS_SECTION_UNASSIGNED 0
157#define PHYS_SECTION_NOTDIRTY 1
158#define PHYS_SECTION_ROM 2
159#define PHYS_SECTION_WATCH 3
5312bd8b 160
e2eef170 161static void io_mem_init(void);
62152b8a 162static void memory_map_init(void);
09daed84 163static void tcg_commit(MemoryListener *listener);
e2eef170 164
1ec9b909 165static MemoryRegion io_mem_watch;
32857f4d
PM
166
167/**
168 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
169 * @cpu: the CPU whose AddressSpace this is
170 * @as: the AddressSpace itself
171 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
172 * @tcg_as_listener: listener for tracking changes to the AddressSpace
173 */
174struct CPUAddressSpace {
175 CPUState *cpu;
176 AddressSpace *as;
177 struct AddressSpaceDispatch *memory_dispatch;
178 MemoryListener tcg_as_listener;
179};
180
6658ffb8 181#endif
fd6ce8f6 182
6d9a1304 183#if !defined(CONFIG_USER_ONLY)
d6f2ea22 184
53cb28cb 185static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 186{
53cb28cb
MA
187 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
188 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
189 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
190 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 191 }
f7bf5461
AK
192}
193
db94604b 194static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
f7bf5461
AK
195{
196 unsigned i;
8b795765 197 uint32_t ret;
db94604b
PB
198 PhysPageEntry e;
199 PhysPageEntry *p;
f7bf5461 200
53cb28cb 201 ret = map->nodes_nb++;
db94604b 202 p = map->nodes[ret];
f7bf5461 203 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 204 assert(ret != map->nodes_nb_alloc);
db94604b
PB
205
206 e.skip = leaf ? 0 : 1;
207 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
03f49957 208 for (i = 0; i < P_L2_SIZE; ++i) {
db94604b 209 memcpy(&p[i], &e, sizeof(e));
d6f2ea22 210 }
f7bf5461 211 return ret;
d6f2ea22
AK
212}
213
53cb28cb
MA
214static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
215 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 216 int level)
f7bf5461
AK
217{
218 PhysPageEntry *p;
03f49957 219 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 220
9736e55b 221 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
db94604b 222 lp->ptr = phys_map_node_alloc(map, level == 0);
92e873b9 223 }
db94604b 224 p = map->nodes[lp->ptr];
03f49957 225 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 226
03f49957 227 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 228 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 229 lp->skip = 0;
c19e8800 230 lp->ptr = leaf;
07f07b31
AK
231 *index += step;
232 *nb -= step;
2999097b 233 } else {
53cb28cb 234 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
235 }
236 ++lp;
f7bf5461
AK
237 }
238}
239
ac1970fb 240static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 241 hwaddr index, hwaddr nb,
2999097b 242 uint16_t leaf)
f7bf5461 243{
2999097b 244 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 245 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 246
53cb28cb 247 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
248}
249
b35ba30f
MT
250/* Compact a non leaf page entry. Simply detect that the entry has a single child,
251 * and update our entry so we can skip it and go directly to the destination.
252 */
253static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
254{
255 unsigned valid_ptr = P_L2_SIZE;
256 int valid = 0;
257 PhysPageEntry *p;
258 int i;
259
260 if (lp->ptr == PHYS_MAP_NODE_NIL) {
261 return;
262 }
263
264 p = nodes[lp->ptr];
265 for (i = 0; i < P_L2_SIZE; i++) {
266 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
267 continue;
268 }
269
270 valid_ptr = i;
271 valid++;
272 if (p[i].skip) {
273 phys_page_compact(&p[i], nodes, compacted);
274 }
275 }
276
277 /* We can only compress if there's only one child. */
278 if (valid != 1) {
279 return;
280 }
281
282 assert(valid_ptr < P_L2_SIZE);
283
284 /* Don't compress if it won't fit in the # of bits we have. */
285 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
286 return;
287 }
288
289 lp->ptr = p[valid_ptr].ptr;
290 if (!p[valid_ptr].skip) {
291 /* If our only child is a leaf, make this a leaf. */
292 /* By design, we should have made this node a leaf to begin with so we
293 * should never reach here.
294 * But since it's so simple to handle this, let's do it just in case we
295 * change this rule.
296 */
297 lp->skip = 0;
298 } else {
299 lp->skip += p[valid_ptr].skip;
300 }
301}
302
303static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
304{
305 DECLARE_BITMAP(compacted, nodes_nb);
306
307 if (d->phys_map.skip) {
53cb28cb 308 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
309 }
310}
311
29cb533d
FZ
312static inline bool section_covers_addr(const MemoryRegionSection *section,
313 hwaddr addr)
314{
315 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
316 * the section must cover the entire address space.
317 */
318 return section->size.hi ||
319 range_covers_byte(section->offset_within_address_space,
320 section->size.lo, addr);
321}
322
97115a8d 323static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 324 Node *nodes, MemoryRegionSection *sections)
92e873b9 325{
31ab2b4a 326 PhysPageEntry *p;
97115a8d 327 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 328 int i;
f1f6e3b8 329
9736e55b 330 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 331 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 332 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 333 }
9affd6fc 334 p = nodes[lp.ptr];
03f49957 335 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 336 }
b35ba30f 337
29cb533d 338 if (section_covers_addr(&sections[lp.ptr], addr)) {
b35ba30f
MT
339 return &sections[lp.ptr];
340 } else {
341 return &sections[PHYS_SECTION_UNASSIGNED];
342 }
f3705d53
AK
343}
344
e5548617
BS
345bool memory_region_is_unassigned(MemoryRegion *mr)
346{
2a8e7499 347 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 348 && mr != &io_mem_watch;
fd6ce8f6 349}
149f54b5 350
79e2b9ae 351/* Called from RCU critical section */
c7086b4a 352static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
353 hwaddr addr,
354 bool resolve_subpage)
9f029603 355{
729633c2 356 MemoryRegionSection *section = atomic_read(&d->mru_section);
90260c6c 357 subpage_t *subpage;
729633c2 358 bool update;
90260c6c 359
729633c2
FZ
360 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
361 section_covers_addr(section, addr)) {
362 update = false;
363 } else {
364 section = phys_page_find(d->phys_map, addr, d->map.nodes,
365 d->map.sections);
366 update = true;
367 }
90260c6c
JK
368 if (resolve_subpage && section->mr->subpage) {
369 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 370 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c 371 }
729633c2
FZ
372 if (update) {
373 atomic_set(&d->mru_section, section);
374 }
90260c6c 375 return section;
9f029603
JK
376}
377
79e2b9ae 378/* Called from RCU critical section */
90260c6c 379static MemoryRegionSection *
c7086b4a 380address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 381 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
382{
383 MemoryRegionSection *section;
965eb2fc 384 MemoryRegion *mr;
a87f3954 385 Int128 diff;
149f54b5 386
c7086b4a 387 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
388 /* Compute offset within MemoryRegionSection */
389 addr -= section->offset_within_address_space;
390
391 /* Compute offset within MemoryRegion */
392 *xlat = addr + section->offset_within_region;
393
965eb2fc 394 mr = section->mr;
b242e0e0
PB
395
396 /* MMIO registers can be expected to perform full-width accesses based only
397 * on their address, without considering adjacent registers that could
398 * decode to completely different MemoryRegions. When such registers
399 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
400 * regions overlap wildly. For this reason we cannot clamp the accesses
401 * here.
402 *
403 * If the length is small (as is the case for address_space_ldl/stl),
404 * everything works fine. If the incoming length is large, however,
405 * the caller really has to do the clamping through memory_access_size.
406 */
965eb2fc 407 if (memory_region_is_ram(mr)) {
e4a511f8 408 diff = int128_sub(section->size, int128_make64(addr));
965eb2fc
PB
409 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
410 }
149f54b5
PB
411 return section;
412}
90260c6c 413
41063e1e 414/* Called from RCU critical section */
5c8a00ce
PB
415MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
416 hwaddr *xlat, hwaddr *plen,
417 bool is_write)
90260c6c 418{
30951157
AK
419 IOMMUTLBEntry iotlb;
420 MemoryRegionSection *section;
421 MemoryRegion *mr;
30951157
AK
422
423 for (;;) {
79e2b9ae
PB
424 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
425 section = address_space_translate_internal(d, addr, &addr, plen, true);
30951157
AK
426 mr = section->mr;
427
428 if (!mr->iommu_ops) {
429 break;
430 }
431
8d7b8cb9 432 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
30951157
AK
433 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
434 | (addr & iotlb.addr_mask));
23820dbf 435 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
30951157
AK
436 if (!(iotlb.perm & (1 << is_write))) {
437 mr = &io_mem_unassigned;
438 break;
439 }
440
441 as = iotlb.target_as;
442 }
443
fe680d0d 444 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954 445 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
23820dbf 446 *plen = MIN(page, *plen);
a87f3954
PB
447 }
448
30951157
AK
449 *xlat = addr;
450 return mr;
90260c6c
JK
451}
452
79e2b9ae 453/* Called from RCU critical section */
90260c6c 454MemoryRegionSection *
d7898cda 455address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
9d82b5a7 456 hwaddr *xlat, hwaddr *plen)
90260c6c 457{
30951157 458 MemoryRegionSection *section;
d7898cda
PM
459 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
460
461 section = address_space_translate_internal(d, addr, xlat, plen, false);
30951157
AK
462
463 assert(!section->mr->iommu_ops);
464 return section;
90260c6c 465}
5b6dd868 466#endif
fd6ce8f6 467
b170fce3 468#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
469
470static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 471{
259186a7 472 CPUState *cpu = opaque;
a513fe19 473
5b6dd868
BS
474 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
475 version_id is increased. */
259186a7 476 cpu->interrupt_request &= ~0x01;
c01a71c1 477 tlb_flush(cpu, 1);
5b6dd868
BS
478
479 return 0;
a513fe19 480}
7501267e 481
6c3bff0e
PD
482static int cpu_common_pre_load(void *opaque)
483{
484 CPUState *cpu = opaque;
485
adee6424 486 cpu->exception_index = -1;
6c3bff0e
PD
487
488 return 0;
489}
490
491static bool cpu_common_exception_index_needed(void *opaque)
492{
493 CPUState *cpu = opaque;
494
adee6424 495 return tcg_enabled() && cpu->exception_index != -1;
6c3bff0e
PD
496}
497
498static const VMStateDescription vmstate_cpu_common_exception_index = {
499 .name = "cpu_common/exception_index",
500 .version_id = 1,
501 .minimum_version_id = 1,
5cd8cada 502 .needed = cpu_common_exception_index_needed,
6c3bff0e
PD
503 .fields = (VMStateField[]) {
504 VMSTATE_INT32(exception_index, CPUState),
505 VMSTATE_END_OF_LIST()
506 }
507};
508
bac05aa9
AS
509static bool cpu_common_crash_occurred_needed(void *opaque)
510{
511 CPUState *cpu = opaque;
512
513 return cpu->crash_occurred;
514}
515
516static const VMStateDescription vmstate_cpu_common_crash_occurred = {
517 .name = "cpu_common/crash_occurred",
518 .version_id = 1,
519 .minimum_version_id = 1,
520 .needed = cpu_common_crash_occurred_needed,
521 .fields = (VMStateField[]) {
522 VMSTATE_BOOL(crash_occurred, CPUState),
523 VMSTATE_END_OF_LIST()
524 }
525};
526
1a1562f5 527const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
528 .name = "cpu_common",
529 .version_id = 1,
530 .minimum_version_id = 1,
6c3bff0e 531 .pre_load = cpu_common_pre_load,
5b6dd868 532 .post_load = cpu_common_post_load,
35d08458 533 .fields = (VMStateField[]) {
259186a7
AF
534 VMSTATE_UINT32(halted, CPUState),
535 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 536 VMSTATE_END_OF_LIST()
6c3bff0e 537 },
5cd8cada
JQ
538 .subsections = (const VMStateDescription*[]) {
539 &vmstate_cpu_common_exception_index,
bac05aa9 540 &vmstate_cpu_common_crash_occurred,
5cd8cada 541 NULL
5b6dd868
BS
542 }
543};
1a1562f5 544
5b6dd868 545#endif
ea041c0e 546
38d8f5c8 547CPUState *qemu_get_cpu(int index)
ea041c0e 548{
bdc44640 549 CPUState *cpu;
ea041c0e 550
bdc44640 551 CPU_FOREACH(cpu) {
55e5c285 552 if (cpu->cpu_index == index) {
bdc44640 553 return cpu;
55e5c285 554 }
ea041c0e 555 }
5b6dd868 556
bdc44640 557 return NULL;
ea041c0e
FB
558}
559
09daed84 560#if !defined(CONFIG_USER_ONLY)
56943e8c 561void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
09daed84 562{
12ebc9a7
PM
563 CPUAddressSpace *newas;
564
565 /* Target code should have set num_ases before calling us */
566 assert(asidx < cpu->num_ases);
567
56943e8c
PM
568 if (asidx == 0) {
569 /* address space 0 gets the convenience alias */
570 cpu->as = as;
571 }
572
12ebc9a7
PM
573 /* KVM cannot currently support multiple address spaces. */
574 assert(asidx == 0 || !kvm_enabled());
09daed84 575
12ebc9a7
PM
576 if (!cpu->cpu_ases) {
577 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
09daed84 578 }
32857f4d 579
12ebc9a7
PM
580 newas = &cpu->cpu_ases[asidx];
581 newas->cpu = cpu;
582 newas->as = as;
56943e8c 583 if (tcg_enabled()) {
12ebc9a7
PM
584 newas->tcg_as_listener.commit = tcg_commit;
585 memory_listener_register(&newas->tcg_as_listener, as);
56943e8c 586 }
09daed84 587}
651a5bc0
PM
588
589AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
590{
591 /* Return the AddressSpace corresponding to the specified index */
592 return cpu->cpu_ases[asidx].as;
593}
09daed84
EI
594#endif
595
b7bca733
BR
596#ifndef CONFIG_USER_ONLY
597static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
598
599static int cpu_get_free_index(Error **errp)
600{
601 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
602
603 if (cpu >= MAX_CPUMASK_BITS) {
604 error_setg(errp, "Trying to use more CPUs than max of %d",
605 MAX_CPUMASK_BITS);
606 return -1;
607 }
608
609 bitmap_set(cpu_index_map, cpu, 1);
610 return cpu;
611}
612
613void cpu_exec_exit(CPUState *cpu)
614{
615 if (cpu->cpu_index == -1) {
616 /* cpu_index was never allocated by this @cpu or was already freed. */
617 return;
618 }
619
620 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
621 cpu->cpu_index = -1;
622}
623#else
624
625static int cpu_get_free_index(Error **errp)
626{
627 CPUState *some_cpu;
628 int cpu_index = 0;
629
630 CPU_FOREACH(some_cpu) {
631 cpu_index++;
632 }
633 return cpu_index;
634}
635
636void cpu_exec_exit(CPUState *cpu)
637{
638}
639#endif
640
4bad9e39 641void cpu_exec_init(CPUState *cpu, Error **errp)
ea041c0e 642{
b170fce3 643 CPUClass *cc = CPU_GET_CLASS(cpu);
5b6dd868 644 int cpu_index;
b7bca733 645 Error *local_err = NULL;
5b6dd868 646
56943e8c 647 cpu->as = NULL;
12ebc9a7 648 cpu->num_ases = 0;
56943e8c 649
291135b5 650#ifndef CONFIG_USER_ONLY
291135b5 651 cpu->thread_id = qemu_get_thread_id();
6731d864
PC
652
653 /* This is a softmmu CPU object, so create a property for it
654 * so users can wire up its memory. (This can't go in qom/cpu.c
655 * because that file is compiled only once for both user-mode
656 * and system builds.) The default if no link is set up is to use
657 * the system address space.
658 */
659 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
660 (Object **)&cpu->memory,
661 qdev_prop_allow_set_link_before_realize,
662 OBJ_PROP_LINK_UNREF_ON_RELEASE,
663 &error_abort);
664 cpu->memory = system_memory;
665 object_ref(OBJECT(cpu->memory));
291135b5
EH
666#endif
667
5b6dd868
BS
668#if defined(CONFIG_USER_ONLY)
669 cpu_list_lock();
670#endif
b7bca733
BR
671 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
672 if (local_err) {
673 error_propagate(errp, local_err);
674#if defined(CONFIG_USER_ONLY)
675 cpu_list_unlock();
676#endif
677 return;
5b6dd868 678 }
bdc44640 679 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
680#if defined(CONFIG_USER_ONLY)
681 cpu_list_unlock();
682#endif
e0d47944
AF
683 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
684 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
685 }
b170fce3
AF
686 if (cc->vmsd != NULL) {
687 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
688 }
ea041c0e
FB
689}
690
94df27fd 691#if defined(CONFIG_USER_ONLY)
00b941e5 692static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
693{
694 tb_invalidate_phys_page_range(pc, pc + 1, 0);
695}
696#else
00b941e5 697static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 698{
5232e4c7
PM
699 MemTxAttrs attrs;
700 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
701 int asidx = cpu_asidx_from_attrs(cpu, attrs);
e8262a1b 702 if (phys != -1) {
5232e4c7 703 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
29d8ec7b 704 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 705 }
1e7855a5 706}
c27004ec 707#endif
d720b93d 708
c527ee8f 709#if defined(CONFIG_USER_ONLY)
75a34036 710void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
711
712{
713}
714
3ee887e8
PM
715int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
716 int flags)
717{
718 return -ENOSYS;
719}
720
721void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
722{
723}
724
75a34036 725int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
726 int flags, CPUWatchpoint **watchpoint)
727{
728 return -ENOSYS;
729}
730#else
6658ffb8 731/* Add a watchpoint. */
75a34036 732int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 733 int flags, CPUWatchpoint **watchpoint)
6658ffb8 734{
c0ce998e 735 CPUWatchpoint *wp;
6658ffb8 736
05068c0d 737 /* forbid ranges which are empty or run off the end of the address space */
07e2863d 738 if (len == 0 || (addr + len - 1) < addr) {
75a34036
AF
739 error_report("tried to set invalid watchpoint at %"
740 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
741 return -EINVAL;
742 }
7267c094 743 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
744
745 wp->vaddr = addr;
05068c0d 746 wp->len = len;
a1d1bb31
AL
747 wp->flags = flags;
748
2dc9f411 749 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
750 if (flags & BP_GDB) {
751 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
752 } else {
753 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
754 }
6658ffb8 755
31b030d4 756 tlb_flush_page(cpu, addr);
a1d1bb31
AL
757
758 if (watchpoint)
759 *watchpoint = wp;
760 return 0;
6658ffb8
PB
761}
762
a1d1bb31 763/* Remove a specific watchpoint. */
75a34036 764int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 765 int flags)
6658ffb8 766{
a1d1bb31 767 CPUWatchpoint *wp;
6658ffb8 768
ff4700b0 769 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 770 if (addr == wp->vaddr && len == wp->len
6e140f28 771 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 772 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
773 return 0;
774 }
775 }
a1d1bb31 776 return -ENOENT;
6658ffb8
PB
777}
778
a1d1bb31 779/* Remove a specific watchpoint by reference. */
75a34036 780void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 781{
ff4700b0 782 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 783
31b030d4 784 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 785
7267c094 786 g_free(watchpoint);
a1d1bb31
AL
787}
788
789/* Remove all matching watchpoints. */
75a34036 790void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 791{
c0ce998e 792 CPUWatchpoint *wp, *next;
a1d1bb31 793
ff4700b0 794 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
795 if (wp->flags & mask) {
796 cpu_watchpoint_remove_by_ref(cpu, wp);
797 }
c0ce998e 798 }
7d03f82f 799}
05068c0d
PM
800
801/* Return true if this watchpoint address matches the specified
802 * access (ie the address range covered by the watchpoint overlaps
803 * partially or completely with the address range covered by the
804 * access).
805 */
806static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
807 vaddr addr,
808 vaddr len)
809{
810 /* We know the lengths are non-zero, but a little caution is
811 * required to avoid errors in the case where the range ends
812 * exactly at the top of the address space and so addr + len
813 * wraps round to zero.
814 */
815 vaddr wpend = wp->vaddr + wp->len - 1;
816 vaddr addrend = addr + len - 1;
817
818 return !(addr > wpend || wp->vaddr > addrend);
819}
820
c527ee8f 821#endif
7d03f82f 822
a1d1bb31 823/* Add a breakpoint. */
b3310ab3 824int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 825 CPUBreakpoint **breakpoint)
4c3a88a2 826{
c0ce998e 827 CPUBreakpoint *bp;
3b46e624 828
7267c094 829 bp = g_malloc(sizeof(*bp));
4c3a88a2 830
a1d1bb31
AL
831 bp->pc = pc;
832 bp->flags = flags;
833
2dc9f411 834 /* keep all GDB-injected breakpoints in front */
00b941e5 835 if (flags & BP_GDB) {
f0c3c505 836 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 837 } else {
f0c3c505 838 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 839 }
3b46e624 840
f0c3c505 841 breakpoint_invalidate(cpu, pc);
a1d1bb31 842
00b941e5 843 if (breakpoint) {
a1d1bb31 844 *breakpoint = bp;
00b941e5 845 }
4c3a88a2 846 return 0;
4c3a88a2
FB
847}
848
a1d1bb31 849/* Remove a specific breakpoint. */
b3310ab3 850int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 851{
a1d1bb31
AL
852 CPUBreakpoint *bp;
853
f0c3c505 854 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 855 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 856 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
857 return 0;
858 }
7d03f82f 859 }
a1d1bb31 860 return -ENOENT;
7d03f82f
EI
861}
862
a1d1bb31 863/* Remove a specific breakpoint by reference. */
b3310ab3 864void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 865{
f0c3c505
AF
866 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
867
868 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 869
7267c094 870 g_free(breakpoint);
a1d1bb31
AL
871}
872
873/* Remove all matching breakpoints. */
b3310ab3 874void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 875{
c0ce998e 876 CPUBreakpoint *bp, *next;
a1d1bb31 877
f0c3c505 878 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
879 if (bp->flags & mask) {
880 cpu_breakpoint_remove_by_ref(cpu, bp);
881 }
c0ce998e 882 }
4c3a88a2
FB
883}
884
c33a346e
FB
885/* enable or disable single step mode. EXCP_DEBUG is returned by the
886 CPU loop after each instruction */
3825b28f 887void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 888{
ed2803da
AF
889 if (cpu->singlestep_enabled != enabled) {
890 cpu->singlestep_enabled = enabled;
891 if (kvm_enabled()) {
38e478ec 892 kvm_update_guest_debug(cpu, 0);
ed2803da 893 } else {
ccbb4d44 894 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 895 /* XXX: only flush what is necessary */
bbd77c18 896 tb_flush(cpu);
e22a25c9 897 }
c33a346e 898 }
c33a346e
FB
899}
900
a47dddd7 901void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
902{
903 va_list ap;
493ae1f0 904 va_list ap2;
7501267e
FB
905
906 va_start(ap, fmt);
493ae1f0 907 va_copy(ap2, ap);
7501267e
FB
908 fprintf(stderr, "qemu: fatal: ");
909 vfprintf(stderr, fmt, ap);
910 fprintf(stderr, "\n");
878096ee 911 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
013a2942 912 if (qemu_log_separate()) {
93fcfe39
AL
913 qemu_log("qemu: fatal: ");
914 qemu_log_vprintf(fmt, ap2);
915 qemu_log("\n");
a0762859 916 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 917 qemu_log_flush();
93fcfe39 918 qemu_log_close();
924edcae 919 }
493ae1f0 920 va_end(ap2);
f9373291 921 va_end(ap);
7615936e 922 replay_finish();
fd052bf6
RV
923#if defined(CONFIG_USER_ONLY)
924 {
925 struct sigaction act;
926 sigfillset(&act.sa_mask);
927 act.sa_handler = SIG_DFL;
928 sigaction(SIGABRT, &act, NULL);
929 }
930#endif
7501267e
FB
931 abort();
932}
933
0124311e 934#if !defined(CONFIG_USER_ONLY)
0dc3f44a 935/* Called from RCU critical section */
041603fe
PB
936static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
937{
938 RAMBlock *block;
939
43771539 940 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 941 if (block && addr - block->offset < block->max_length) {
68851b98 942 return block;
041603fe 943 }
0dc3f44a 944 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5 945 if (addr - block->offset < block->max_length) {
041603fe
PB
946 goto found;
947 }
948 }
949
950 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
951 abort();
952
953found:
43771539
PB
954 /* It is safe to write mru_block outside the iothread lock. This
955 * is what happens:
956 *
957 * mru_block = xxx
958 * rcu_read_unlock()
959 * xxx removed from list
960 * rcu_read_lock()
961 * read mru_block
962 * mru_block = NULL;
963 * call_rcu(reclaim_ramblock, xxx);
964 * rcu_read_unlock()
965 *
966 * atomic_rcu_set is not needed here. The block was already published
967 * when it was placed into the list. Here we're just making an extra
968 * copy of the pointer.
969 */
041603fe
PB
970 ram_list.mru_block = block;
971 return block;
972}
973
a2f4d5be 974static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 975{
9a13565d 976 CPUState *cpu;
041603fe 977 ram_addr_t start1;
a2f4d5be
JQ
978 RAMBlock *block;
979 ram_addr_t end;
980
981 end = TARGET_PAGE_ALIGN(start + length);
982 start &= TARGET_PAGE_MASK;
d24981d3 983
0dc3f44a 984 rcu_read_lock();
041603fe
PB
985 block = qemu_get_ram_block(start);
986 assert(block == qemu_get_ram_block(end - 1));
1240be24 987 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
9a13565d
PC
988 CPU_FOREACH(cpu) {
989 tlb_reset_dirty(cpu, start1, length);
990 }
0dc3f44a 991 rcu_read_unlock();
d24981d3
JQ
992}
993
5579c7f3 994/* Note: start and end must be within the same ram block. */
03eebc9e
SH
995bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
996 ram_addr_t length,
997 unsigned client)
1ccde1cb 998{
5b82b703 999 DirtyMemoryBlocks *blocks;
03eebc9e 1000 unsigned long end, page;
5b82b703 1001 bool dirty = false;
03eebc9e
SH
1002
1003 if (length == 0) {
1004 return false;
1005 }
f23db169 1006
03eebc9e
SH
1007 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1008 page = start >> TARGET_PAGE_BITS;
5b82b703
SH
1009
1010 rcu_read_lock();
1011
1012 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1013
1014 while (page < end) {
1015 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1016 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1017 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1018
1019 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1020 offset, num);
1021 page += num;
1022 }
1023
1024 rcu_read_unlock();
03eebc9e
SH
1025
1026 if (dirty && tcg_enabled()) {
a2f4d5be 1027 tlb_reset_dirty_range_all(start, length);
5579c7f3 1028 }
03eebc9e
SH
1029
1030 return dirty;
1ccde1cb
FB
1031}
1032
79e2b9ae 1033/* Called from RCU critical section */
bb0e627a 1034hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
1035 MemoryRegionSection *section,
1036 target_ulong vaddr,
1037 hwaddr paddr, hwaddr xlat,
1038 int prot,
1039 target_ulong *address)
e5548617 1040{
a8170e5e 1041 hwaddr iotlb;
e5548617
BS
1042 CPUWatchpoint *wp;
1043
cc5bea60 1044 if (memory_region_is_ram(section->mr)) {
e5548617
BS
1045 /* Normal RAM. */
1046 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 1047 + xlat;
e5548617 1048 if (!section->readonly) {
b41aac4f 1049 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 1050 } else {
b41aac4f 1051 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
1052 }
1053 } else {
0b8e2c10
PM
1054 AddressSpaceDispatch *d;
1055
1056 d = atomic_rcu_read(&section->address_space->dispatch);
1057 iotlb = section - d->map.sections;
149f54b5 1058 iotlb += xlat;
e5548617
BS
1059 }
1060
1061 /* Make accesses to pages with watchpoints go via the
1062 watchpoint trap routines. */
ff4700b0 1063 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 1064 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
e5548617
BS
1065 /* Avoid trapping reads of pages with a write breakpoint. */
1066 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 1067 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
1068 *address |= TLB_MMIO;
1069 break;
1070 }
1071 }
1072 }
1073
1074 return iotlb;
1075}
9fa3e853
FB
1076#endif /* defined(CONFIG_USER_ONLY) */
1077
e2eef170 1078#if !defined(CONFIG_USER_ONLY)
8da3ff18 1079
c227f099 1080static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1081 uint16_t section);
acc9d80b 1082static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 1083
a2b257d6
IM
1084static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1085 qemu_anon_ram_alloc;
91138037
MA
1086
1087/*
1088 * Set a custom physical guest memory alloator.
1089 * Accelerators with unusual needs may need this. Hopefully, we can
1090 * get rid of it eventually.
1091 */
a2b257d6 1092void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
91138037
MA
1093{
1094 phys_mem_alloc = alloc;
1095}
1096
53cb28cb
MA
1097static uint16_t phys_section_add(PhysPageMap *map,
1098 MemoryRegionSection *section)
5312bd8b 1099{
68f3f65b
PB
1100 /* The physical section number is ORed with a page-aligned
1101 * pointer to produce the iotlb entries. Thus it should
1102 * never overflow into the page-aligned value.
1103 */
53cb28cb 1104 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 1105
53cb28cb
MA
1106 if (map->sections_nb == map->sections_nb_alloc) {
1107 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1108 map->sections = g_renew(MemoryRegionSection, map->sections,
1109 map->sections_nb_alloc);
5312bd8b 1110 }
53cb28cb 1111 map->sections[map->sections_nb] = *section;
dfde4e6e 1112 memory_region_ref(section->mr);
53cb28cb 1113 return map->sections_nb++;
5312bd8b
AK
1114}
1115
058bc4b5
PB
1116static void phys_section_destroy(MemoryRegion *mr)
1117{
55b4e80b
DS
1118 bool have_sub_page = mr->subpage;
1119
dfde4e6e
PB
1120 memory_region_unref(mr);
1121
55b4e80b 1122 if (have_sub_page) {
058bc4b5 1123 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 1124 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
1125 g_free(subpage);
1126 }
1127}
1128
6092666e 1129static void phys_sections_free(PhysPageMap *map)
5312bd8b 1130{
9affd6fc
PB
1131 while (map->sections_nb > 0) {
1132 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
1133 phys_section_destroy(section->mr);
1134 }
9affd6fc
PB
1135 g_free(map->sections);
1136 g_free(map->nodes);
5312bd8b
AK
1137}
1138
ac1970fb 1139static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
1140{
1141 subpage_t *subpage;
a8170e5e 1142 hwaddr base = section->offset_within_address_space
0f0cb164 1143 & TARGET_PAGE_MASK;
97115a8d 1144 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 1145 d->map.nodes, d->map.sections);
0f0cb164
AK
1146 MemoryRegionSection subsection = {
1147 .offset_within_address_space = base,
052e87b0 1148 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 1149 };
a8170e5e 1150 hwaddr start, end;
0f0cb164 1151
f3705d53 1152 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 1153
f3705d53 1154 if (!(existing->mr->subpage)) {
acc9d80b 1155 subpage = subpage_init(d->as, base);
3be91e86 1156 subsection.address_space = d->as;
0f0cb164 1157 subsection.mr = &subpage->iomem;
ac1970fb 1158 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 1159 phys_section_add(&d->map, &subsection));
0f0cb164 1160 } else {
f3705d53 1161 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
1162 }
1163 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 1164 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
1165 subpage_register(subpage, start, end,
1166 phys_section_add(&d->map, section));
0f0cb164
AK
1167}
1168
1169
052e87b0
PB
1170static void register_multipage(AddressSpaceDispatch *d,
1171 MemoryRegionSection *section)
33417e70 1172{
a8170e5e 1173 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 1174 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
1175 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1176 TARGET_PAGE_BITS));
dd81124b 1177
733d5ef5
PB
1178 assert(num_pages);
1179 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
1180}
1181
ac1970fb 1182static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 1183{
89ae337a 1184 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 1185 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 1186 MemoryRegionSection now = *section, remain = *section;
052e87b0 1187 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 1188
733d5ef5
PB
1189 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1190 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1191 - now.offset_within_address_space;
1192
052e87b0 1193 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 1194 register_subpage(d, &now);
733d5ef5 1195 } else {
052e87b0 1196 now.size = int128_zero();
733d5ef5 1197 }
052e87b0
PB
1198 while (int128_ne(remain.size, now.size)) {
1199 remain.size = int128_sub(remain.size, now.size);
1200 remain.offset_within_address_space += int128_get64(now.size);
1201 remain.offset_within_region += int128_get64(now.size);
69b67646 1202 now = remain;
052e87b0 1203 if (int128_lt(remain.size, page_size)) {
733d5ef5 1204 register_subpage(d, &now);
88266249 1205 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1206 now.size = page_size;
ac1970fb 1207 register_subpage(d, &now);
69b67646 1208 } else {
052e87b0 1209 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 1210 register_multipage(d, &now);
69b67646 1211 }
0f0cb164
AK
1212 }
1213}
1214
62a2744c
SY
1215void qemu_flush_coalesced_mmio_buffer(void)
1216{
1217 if (kvm_enabled())
1218 kvm_flush_coalesced_mmio_buffer();
1219}
1220
b2a8658e
UD
1221void qemu_mutex_lock_ramlist(void)
1222{
1223 qemu_mutex_lock(&ram_list.mutex);
1224}
1225
1226void qemu_mutex_unlock_ramlist(void)
1227{
1228 qemu_mutex_unlock(&ram_list.mutex);
1229}
1230
e1e84ba0 1231#ifdef __linux__
04b16653
AW
1232static void *file_ram_alloc(RAMBlock *block,
1233 ram_addr_t memory,
7f56e740
PB
1234 const char *path,
1235 Error **errp)
c902760f 1236{
fd97fd44 1237 bool unlink_on_error = false;
c902760f 1238 char *filename;
8ca761f6
PF
1239 char *sanitized_name;
1240 char *c;
794e8f30 1241 void *area;
5c3ece79 1242 int fd = -1;
e1fb6471 1243 int64_t page_size;
c902760f
MT
1244
1245 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1246 error_setg(errp,
1247 "host lacks kvm mmu notifiers, -mem-path unsupported");
fd97fd44 1248 return NULL;
c902760f
MT
1249 }
1250
fd97fd44
MA
1251 for (;;) {
1252 fd = open(path, O_RDWR);
1253 if (fd >= 0) {
1254 /* @path names an existing file, use it */
1255 break;
8d31d6b6 1256 }
fd97fd44
MA
1257 if (errno == ENOENT) {
1258 /* @path names a file that doesn't exist, create it */
1259 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1260 if (fd >= 0) {
1261 unlink_on_error = true;
1262 break;
1263 }
1264 } else if (errno == EISDIR) {
1265 /* @path names a directory, create a file there */
1266 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1267 sanitized_name = g_strdup(memory_region_name(block->mr));
1268 for (c = sanitized_name; *c != '\0'; c++) {
1269 if (*c == '/') {
1270 *c = '_';
1271 }
1272 }
8ca761f6 1273
fd97fd44
MA
1274 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1275 sanitized_name);
1276 g_free(sanitized_name);
8d31d6b6 1277
fd97fd44
MA
1278 fd = mkstemp(filename);
1279 if (fd >= 0) {
1280 unlink(filename);
1281 g_free(filename);
1282 break;
1283 }
1284 g_free(filename);
8d31d6b6 1285 }
fd97fd44
MA
1286 if (errno != EEXIST && errno != EINTR) {
1287 error_setg_errno(errp, errno,
1288 "can't open backing store %s for guest RAM",
1289 path);
1290 goto error;
1291 }
1292 /*
1293 * Try again on EINTR and EEXIST. The latter happens when
1294 * something else creates the file between our two open().
1295 */
8d31d6b6 1296 }
c902760f 1297
e1fb6471
MA
1298 page_size = qemu_fd_getpagesize(fd);
1299 block->mr->align = page_size;
fd97fd44 1300
e1fb6471 1301 if (memory < page_size) {
fd97fd44
MA
1302 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1303 "or larger than page size 0x%" PRIx64,
e1fb6471 1304 memory, page_size);
f9a49dfa 1305 goto error;
c902760f 1306 }
c902760f 1307
e1fb6471 1308 memory = ROUND_UP(memory, page_size);
c902760f
MT
1309
1310 /*
1311 * ftruncate is not supported by hugetlbfs in older
1312 * hosts, so don't bother bailing out on errors.
1313 * If anything goes wrong with it under other filesystems,
1314 * mmap will fail.
1315 */
7f56e740 1316 if (ftruncate(fd, memory)) {
9742bf26 1317 perror("ftruncate");
7f56e740 1318 }
c902760f 1319
e1fb6471 1320 area = qemu_ram_mmap(fd, memory, page_size, block->flags & RAM_SHARED);
c902760f 1321 if (area == MAP_FAILED) {
7f56e740 1322 error_setg_errno(errp, errno,
fd97fd44 1323 "unable to map backing store for guest RAM");
f9a49dfa 1324 goto error;
c902760f 1325 }
ef36fa14
MT
1326
1327 if (mem_prealloc) {
38183310 1328 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1329 }
1330
04b16653 1331 block->fd = fd;
c902760f 1332 return area;
f9a49dfa
MT
1333
1334error:
fd97fd44
MA
1335 if (unlink_on_error) {
1336 unlink(path);
1337 }
5c3ece79
PB
1338 if (fd != -1) {
1339 close(fd);
1340 }
f9a49dfa 1341 return NULL;
c902760f
MT
1342}
1343#endif
1344
0dc3f44a 1345/* Called with the ramlist lock held. */
d17b5288 1346static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1347{
1348 RAMBlock *block, *next_block;
3e837b2c 1349 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1350
49cd9ac6
SH
1351 assert(size != 0); /* it would hand out same offset multiple times */
1352
0dc3f44a 1353 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
04b16653 1354 return 0;
0d53d9fe 1355 }
04b16653 1356
0dc3f44a 1357 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
f15fbc4b 1358 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653 1359
62be4e3a 1360 end = block->offset + block->max_length;
04b16653 1361
0dc3f44a 1362 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
04b16653
AW
1363 if (next_block->offset >= end) {
1364 next = MIN(next, next_block->offset);
1365 }
1366 }
1367 if (next - end >= size && next - end < mingap) {
3e837b2c 1368 offset = end;
04b16653
AW
1369 mingap = next - end;
1370 }
1371 }
3e837b2c
AW
1372
1373 if (offset == RAM_ADDR_MAX) {
1374 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1375 (uint64_t)size);
1376 abort();
1377 }
1378
04b16653
AW
1379 return offset;
1380}
1381
652d7ec2 1382ram_addr_t last_ram_offset(void)
d17b5288
AW
1383{
1384 RAMBlock *block;
1385 ram_addr_t last = 0;
1386
0dc3f44a
MD
1387 rcu_read_lock();
1388 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
62be4e3a 1389 last = MAX(last, block->offset + block->max_length);
0d53d9fe 1390 }
0dc3f44a 1391 rcu_read_unlock();
d17b5288
AW
1392 return last;
1393}
1394
ddb97f1d
JB
1395static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1396{
1397 int ret;
ddb97f1d
JB
1398
1399 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
47c8ca53 1400 if (!machine_dump_guest_core(current_machine)) {
ddb97f1d
JB
1401 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1402 if (ret) {
1403 perror("qemu_madvise");
1404 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1405 "but dump_guest_core=off specified\n");
1406 }
1407 }
1408}
1409
0dc3f44a
MD
1410/* Called within an RCU critical section, or while the ramlist lock
1411 * is held.
1412 */
20cfe881 1413static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1414{
20cfe881 1415 RAMBlock *block;
84b89d78 1416
0dc3f44a 1417 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1418 if (block->offset == addr) {
20cfe881 1419 return block;
c5705a77
AK
1420 }
1421 }
20cfe881
HT
1422
1423 return NULL;
1424}
1425
422148d3
DDAG
1426const char *qemu_ram_get_idstr(RAMBlock *rb)
1427{
1428 return rb->idstr;
1429}
1430
ae3a7047 1431/* Called with iothread lock held. */
20cfe881
HT
1432void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1433{
ae3a7047 1434 RAMBlock *new_block, *block;
20cfe881 1435
0dc3f44a 1436 rcu_read_lock();
ae3a7047 1437 new_block = find_ram_block(addr);
c5705a77
AK
1438 assert(new_block);
1439 assert(!new_block->idstr[0]);
84b89d78 1440
09e5ab63
AL
1441 if (dev) {
1442 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1443 if (id) {
1444 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1445 g_free(id);
84b89d78
CM
1446 }
1447 }
1448 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1449
0dc3f44a 1450 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1451 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1452 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1453 new_block->idstr);
1454 abort();
1455 }
1456 }
0dc3f44a 1457 rcu_read_unlock();
c5705a77
AK
1458}
1459
ae3a7047 1460/* Called with iothread lock held. */
20cfe881
HT
1461void qemu_ram_unset_idstr(ram_addr_t addr)
1462{
ae3a7047 1463 RAMBlock *block;
20cfe881 1464
ae3a7047
MD
1465 /* FIXME: arch_init.c assumes that this is not called throughout
1466 * migration. Ignore the problem since hot-unplug during migration
1467 * does not work anyway.
1468 */
1469
0dc3f44a 1470 rcu_read_lock();
ae3a7047 1471 block = find_ram_block(addr);
20cfe881
HT
1472 if (block) {
1473 memset(block->idstr, 0, sizeof(block->idstr));
1474 }
0dc3f44a 1475 rcu_read_unlock();
20cfe881
HT
1476}
1477
8490fc78
LC
1478static int memory_try_enable_merging(void *addr, size_t len)
1479{
75cc7f01 1480 if (!machine_mem_merge(current_machine)) {
8490fc78
LC
1481 /* disabled by the user */
1482 return 0;
1483 }
1484
1485 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1486}
1487
62be4e3a
MT
1488/* Only legal before guest might have detected the memory size: e.g. on
1489 * incoming migration, or right after reset.
1490 *
1491 * As memory core doesn't know how is memory accessed, it is up to
1492 * resize callback to update device state and/or add assertions to detect
1493 * misuse, if necessary.
1494 */
1495int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1496{
1497 RAMBlock *block = find_ram_block(base);
1498
1499 assert(block);
1500
4ed023ce 1501 newsize = HOST_PAGE_ALIGN(newsize);
129ddaf3 1502
62be4e3a
MT
1503 if (block->used_length == newsize) {
1504 return 0;
1505 }
1506
1507 if (!(block->flags & RAM_RESIZEABLE)) {
1508 error_setg_errno(errp, EINVAL,
1509 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1510 " in != 0x" RAM_ADDR_FMT, block->idstr,
1511 newsize, block->used_length);
1512 return -EINVAL;
1513 }
1514
1515 if (block->max_length < newsize) {
1516 error_setg_errno(errp, EINVAL,
1517 "Length too large: %s: 0x" RAM_ADDR_FMT
1518 " > 0x" RAM_ADDR_FMT, block->idstr,
1519 newsize, block->max_length);
1520 return -EINVAL;
1521 }
1522
1523 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1524 block->used_length = newsize;
58d2707e
PB
1525 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1526 DIRTY_CLIENTS_ALL);
62be4e3a
MT
1527 memory_region_set_size(block->mr, newsize);
1528 if (block->resized) {
1529 block->resized(block->idstr, newsize, block->host);
1530 }
1531 return 0;
1532}
1533
5b82b703
SH
1534/* Called with ram_list.mutex held */
1535static void dirty_memory_extend(ram_addr_t old_ram_size,
1536 ram_addr_t new_ram_size)
1537{
1538 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1539 DIRTY_MEMORY_BLOCK_SIZE);
1540 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1541 DIRTY_MEMORY_BLOCK_SIZE);
1542 int i;
1543
1544 /* Only need to extend if block count increased */
1545 if (new_num_blocks <= old_num_blocks) {
1546 return;
1547 }
1548
1549 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1550 DirtyMemoryBlocks *old_blocks;
1551 DirtyMemoryBlocks *new_blocks;
1552 int j;
1553
1554 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1555 new_blocks = g_malloc(sizeof(*new_blocks) +
1556 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1557
1558 if (old_num_blocks) {
1559 memcpy(new_blocks->blocks, old_blocks->blocks,
1560 old_num_blocks * sizeof(old_blocks->blocks[0]));
1561 }
1562
1563 for (j = old_num_blocks; j < new_num_blocks; j++) {
1564 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1565 }
1566
1567 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1568
1569 if (old_blocks) {
1570 g_free_rcu(old_blocks, rcu);
1571 }
1572 }
1573}
1574
528f46af 1575static void ram_block_add(RAMBlock *new_block, Error **errp)
c5705a77 1576{
e1c57ab8 1577 RAMBlock *block;
0d53d9fe 1578 RAMBlock *last_block = NULL;
2152f5ca 1579 ram_addr_t old_ram_size, new_ram_size;
37aa7a0e 1580 Error *err = NULL;
2152f5ca
JQ
1581
1582 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1583
b2a8658e 1584 qemu_mutex_lock_ramlist();
9b8424d5 1585 new_block->offset = find_ram_offset(new_block->max_length);
e1c57ab8
PB
1586
1587 if (!new_block->host) {
1588 if (xen_enabled()) {
9b8424d5 1589 xen_ram_alloc(new_block->offset, new_block->max_length,
37aa7a0e
MA
1590 new_block->mr, &err);
1591 if (err) {
1592 error_propagate(errp, err);
1593 qemu_mutex_unlock_ramlist();
39c350ee 1594 return;
37aa7a0e 1595 }
e1c57ab8 1596 } else {
9b8424d5 1597 new_block->host = phys_mem_alloc(new_block->max_length,
a2b257d6 1598 &new_block->mr->align);
39228250 1599 if (!new_block->host) {
ef701d7b
HT
1600 error_setg_errno(errp, errno,
1601 "cannot set up guest memory '%s'",
1602 memory_region_name(new_block->mr));
1603 qemu_mutex_unlock_ramlist();
39c350ee 1604 return;
39228250 1605 }
9b8424d5 1606 memory_try_enable_merging(new_block->host, new_block->max_length);
6977dfe6 1607 }
c902760f 1608 }
94a6b54f 1609
dd631697
LZ
1610 new_ram_size = MAX(old_ram_size,
1611 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1612 if (new_ram_size > old_ram_size) {
1613 migration_bitmap_extend(old_ram_size, new_ram_size);
5b82b703 1614 dirty_memory_extend(old_ram_size, new_ram_size);
dd631697 1615 }
0d53d9fe
MD
1616 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1617 * QLIST (which has an RCU-friendly variant) does not have insertion at
1618 * tail, so save the last element in last_block.
1619 */
0dc3f44a 1620 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
0d53d9fe 1621 last_block = block;
9b8424d5 1622 if (block->max_length < new_block->max_length) {
abb26d63
PB
1623 break;
1624 }
1625 }
1626 if (block) {
0dc3f44a 1627 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
0d53d9fe 1628 } else if (last_block) {
0dc3f44a 1629 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
0d53d9fe 1630 } else { /* list is empty */
0dc3f44a 1631 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
abb26d63 1632 }
0d6d3c87 1633 ram_list.mru_block = NULL;
94a6b54f 1634
0dc3f44a
MD
1635 /* Write list before version */
1636 smp_wmb();
f798b07f 1637 ram_list.version++;
b2a8658e 1638 qemu_mutex_unlock_ramlist();
f798b07f 1639
9b8424d5 1640 cpu_physical_memory_set_dirty_range(new_block->offset,
58d2707e
PB
1641 new_block->used_length,
1642 DIRTY_CLIENTS_ALL);
94a6b54f 1643
a904c911
PB
1644 if (new_block->host) {
1645 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1646 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1647 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1648 if (kvm_enabled()) {
1649 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1650 }
e1c57ab8 1651 }
94a6b54f 1652}
e9a1ab19 1653
0b183fc8 1654#ifdef __linux__
528f46af
FZ
1655RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1656 bool share, const char *mem_path,
1657 Error **errp)
e1c57ab8
PB
1658{
1659 RAMBlock *new_block;
ef701d7b 1660 Error *local_err = NULL;
e1c57ab8
PB
1661
1662 if (xen_enabled()) {
7f56e740 1663 error_setg(errp, "-mem-path not supported with Xen");
528f46af 1664 return NULL;
e1c57ab8
PB
1665 }
1666
1667 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1668 /*
1669 * file_ram_alloc() needs to allocate just like
1670 * phys_mem_alloc, but we haven't bothered to provide
1671 * a hook there.
1672 */
7f56e740
PB
1673 error_setg(errp,
1674 "-mem-path not supported with this accelerator");
528f46af 1675 return NULL;
e1c57ab8
PB
1676 }
1677
4ed023ce 1678 size = HOST_PAGE_ALIGN(size);
e1c57ab8
PB
1679 new_block = g_malloc0(sizeof(*new_block));
1680 new_block->mr = mr;
9b8424d5
MT
1681 new_block->used_length = size;
1682 new_block->max_length = size;
dbcb8981 1683 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1684 new_block->host = file_ram_alloc(new_block, size,
1685 mem_path, errp);
1686 if (!new_block->host) {
1687 g_free(new_block);
528f46af 1688 return NULL;
7f56e740
PB
1689 }
1690
528f46af 1691 ram_block_add(new_block, &local_err);
ef701d7b
HT
1692 if (local_err) {
1693 g_free(new_block);
1694 error_propagate(errp, local_err);
528f46af 1695 return NULL;
ef701d7b 1696 }
528f46af 1697 return new_block;
e1c57ab8 1698}
0b183fc8 1699#endif
e1c57ab8 1700
62be4e3a 1701static
528f46af
FZ
1702RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1703 void (*resized)(const char*,
1704 uint64_t length,
1705 void *host),
1706 void *host, bool resizeable,
1707 MemoryRegion *mr, Error **errp)
e1c57ab8
PB
1708{
1709 RAMBlock *new_block;
ef701d7b 1710 Error *local_err = NULL;
e1c57ab8 1711
4ed023ce
DDAG
1712 size = HOST_PAGE_ALIGN(size);
1713 max_size = HOST_PAGE_ALIGN(max_size);
e1c57ab8
PB
1714 new_block = g_malloc0(sizeof(*new_block));
1715 new_block->mr = mr;
62be4e3a 1716 new_block->resized = resized;
9b8424d5
MT
1717 new_block->used_length = size;
1718 new_block->max_length = max_size;
62be4e3a 1719 assert(max_size >= size);
e1c57ab8
PB
1720 new_block->fd = -1;
1721 new_block->host = host;
1722 if (host) {
7bd4f430 1723 new_block->flags |= RAM_PREALLOC;
e1c57ab8 1724 }
62be4e3a
MT
1725 if (resizeable) {
1726 new_block->flags |= RAM_RESIZEABLE;
1727 }
528f46af 1728 ram_block_add(new_block, &local_err);
ef701d7b
HT
1729 if (local_err) {
1730 g_free(new_block);
1731 error_propagate(errp, local_err);
528f46af 1732 return NULL;
ef701d7b 1733 }
528f46af 1734 return new_block;
e1c57ab8
PB
1735}
1736
528f46af 1737RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
62be4e3a
MT
1738 MemoryRegion *mr, Error **errp)
1739{
1740 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1741}
1742
528f46af 1743RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
6977dfe6 1744{
62be4e3a
MT
1745 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1746}
1747
528f46af 1748RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
62be4e3a
MT
1749 void (*resized)(const char*,
1750 uint64_t length,
1751 void *host),
1752 MemoryRegion *mr, Error **errp)
1753{
1754 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
6977dfe6
YT
1755}
1756
43771539
PB
1757static void reclaim_ramblock(RAMBlock *block)
1758{
1759 if (block->flags & RAM_PREALLOC) {
1760 ;
1761 } else if (xen_enabled()) {
1762 xen_invalidate_map_cache_entry(block->host);
1763#ifndef _WIN32
1764 } else if (block->fd >= 0) {
2f3a2bb1 1765 qemu_ram_munmap(block->host, block->max_length);
43771539
PB
1766 close(block->fd);
1767#endif
1768 } else {
1769 qemu_anon_ram_free(block->host, block->max_length);
1770 }
1771 g_free(block);
1772}
1773
f1060c55 1774void qemu_ram_free(RAMBlock *block)
e9a1ab19 1775{
85bc2a15
MAL
1776 if (!block) {
1777 return;
1778 }
1779
b2a8658e 1780 qemu_mutex_lock_ramlist();
f1060c55
FZ
1781 QLIST_REMOVE_RCU(block, next);
1782 ram_list.mru_block = NULL;
1783 /* Write list before version */
1784 smp_wmb();
1785 ram_list.version++;
1786 call_rcu(block, reclaim_ramblock, rcu);
b2a8658e 1787 qemu_mutex_unlock_ramlist();
e9a1ab19
FB
1788}
1789
cd19cfa2
HY
1790#ifndef _WIN32
1791void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1792{
1793 RAMBlock *block;
1794 ram_addr_t offset;
1795 int flags;
1796 void *area, *vaddr;
1797
0dc3f44a 1798 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
cd19cfa2 1799 offset = addr - block->offset;
9b8424d5 1800 if (offset < block->max_length) {
1240be24 1801 vaddr = ramblock_ptr(block, offset);
7bd4f430 1802 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1803 ;
dfeaf2ab
MA
1804 } else if (xen_enabled()) {
1805 abort();
cd19cfa2
HY
1806 } else {
1807 flags = MAP_FIXED;
3435f395 1808 if (block->fd >= 0) {
dbcb8981
PB
1809 flags |= (block->flags & RAM_SHARED ?
1810 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1811 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1812 flags, block->fd, offset);
cd19cfa2 1813 } else {
2eb9fbaa
MA
1814 /*
1815 * Remap needs to match alloc. Accelerators that
1816 * set phys_mem_alloc never remap. If they did,
1817 * we'd need a remap hook here.
1818 */
1819 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1820
cd19cfa2
HY
1821 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1822 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1823 flags, -1, 0);
cd19cfa2
HY
1824 }
1825 if (area != vaddr) {
f15fbc4b
AP
1826 fprintf(stderr, "Could not remap addr: "
1827 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1828 length, addr);
1829 exit(1);
1830 }
8490fc78 1831 memory_try_enable_merging(vaddr, length);
ddb97f1d 1832 qemu_ram_setup_dump(vaddr, length);
cd19cfa2 1833 }
cd19cfa2
HY
1834 }
1835 }
1836}
1837#endif /* !_WIN32 */
1838
a35ba7be
PB
1839int qemu_get_ram_fd(ram_addr_t addr)
1840{
ae3a7047
MD
1841 RAMBlock *block;
1842 int fd;
a35ba7be 1843
0dc3f44a 1844 rcu_read_lock();
ae3a7047
MD
1845 block = qemu_get_ram_block(addr);
1846 fd = block->fd;
0dc3f44a 1847 rcu_read_unlock();
ae3a7047 1848 return fd;
a35ba7be
PB
1849}
1850
56a571d9
TM
1851void qemu_set_ram_fd(ram_addr_t addr, int fd)
1852{
1853 RAMBlock *block;
1854
1855 rcu_read_lock();
1856 block = qemu_get_ram_block(addr);
1857 block->fd = fd;
1858 rcu_read_unlock();
1859}
1860
3fd74b84
DM
1861void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1862{
ae3a7047
MD
1863 RAMBlock *block;
1864 void *ptr;
3fd74b84 1865
0dc3f44a 1866 rcu_read_lock();
ae3a7047
MD
1867 block = qemu_get_ram_block(addr);
1868 ptr = ramblock_ptr(block, 0);
0dc3f44a 1869 rcu_read_unlock();
ae3a7047 1870 return ptr;
3fd74b84
DM
1871}
1872
1b5ec234 1873/* Return a host pointer to ram allocated with qemu_ram_alloc.
ae3a7047
MD
1874 * This should not be used for general purpose DMA. Use address_space_map
1875 * or address_space_rw instead. For local memory (e.g. video ram) that the
1876 * device owns, use memory_region_get_ram_ptr.
0dc3f44a 1877 *
49b24afc 1878 * Called within RCU critical section.
1b5ec234 1879 */
3655cb9c 1880void *qemu_get_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
1b5ec234 1881{
3655cb9c
GA
1882 RAMBlock *block = ram_block;
1883
1884 if (block == NULL) {
1885 block = qemu_get_ram_block(addr);
1886 }
ae3a7047
MD
1887
1888 if (xen_enabled() && block->host == NULL) {
0d6d3c87
PB
1889 /* We need to check if the requested address is in the RAM
1890 * because we don't want to map the entire memory in QEMU.
1891 * In that case just map until the end of the page.
1892 */
1893 if (block->offset == 0) {
49b24afc 1894 return xen_map_cache(addr, 0, 0);
0d6d3c87 1895 }
ae3a7047
MD
1896
1897 block->host = xen_map_cache(block->offset, block->max_length, 1);
0d6d3c87 1898 }
49b24afc 1899 return ramblock_ptr(block, addr - block->offset);
dc828ca1
PB
1900}
1901
38bee5dc 1902/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
ae3a7047 1903 * but takes a size argument.
0dc3f44a 1904 *
e81bcda5 1905 * Called within RCU critical section.
ae3a7047 1906 */
3655cb9c
GA
1907static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1908 hwaddr *size)
38bee5dc 1909{
3655cb9c 1910 RAMBlock *block = ram_block;
e81bcda5 1911 ram_addr_t offset_inside_block;
8ab934f9
SS
1912 if (*size == 0) {
1913 return NULL;
1914 }
e81bcda5 1915
3655cb9c
GA
1916 if (block == NULL) {
1917 block = qemu_get_ram_block(addr);
1918 }
e81bcda5
PB
1919 offset_inside_block = addr - block->offset;
1920 *size = MIN(*size, block->max_length - offset_inside_block);
1921
1922 if (xen_enabled() && block->host == NULL) {
1923 /* We need to check if the requested address is in the RAM
1924 * because we don't want to map the entire memory in QEMU.
1925 * In that case just map the requested area.
1926 */
1927 if (block->offset == 0) {
1928 return xen_map_cache(addr, *size, 1);
38bee5dc
SS
1929 }
1930
e81bcda5 1931 block->host = xen_map_cache(block->offset, block->max_length, 1);
38bee5dc 1932 }
e81bcda5
PB
1933
1934 return ramblock_ptr(block, offset_inside_block);
38bee5dc
SS
1935}
1936
422148d3
DDAG
1937/*
1938 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1939 * in that RAMBlock.
1940 *
1941 * ptr: Host pointer to look up
1942 * round_offset: If true round the result offset down to a page boundary
1943 * *ram_addr: set to result ram_addr
1944 * *offset: set to result offset within the RAMBlock
1945 *
1946 * Returns: RAMBlock (or NULL if not found)
ae3a7047
MD
1947 *
1948 * By the time this function returns, the returned pointer is not protected
1949 * by RCU anymore. If the caller is not within an RCU critical section and
1950 * does not hold the iothread lock, it must have other means of protecting the
1951 * pointer, such as a reference to the region that includes the incoming
1952 * ram_addr_t.
1953 */
422148d3
DDAG
1954RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1955 ram_addr_t *ram_addr,
1956 ram_addr_t *offset)
5579c7f3 1957{
94a6b54f
PB
1958 RAMBlock *block;
1959 uint8_t *host = ptr;
1960
868bb33f 1961 if (xen_enabled()) {
0dc3f44a 1962 rcu_read_lock();
e41d7c69 1963 *ram_addr = xen_ram_addr_from_mapcache(ptr);
422148d3
DDAG
1964 block = qemu_get_ram_block(*ram_addr);
1965 if (block) {
1966 *offset = (host - block->host);
1967 }
0dc3f44a 1968 rcu_read_unlock();
422148d3 1969 return block;
712c2b41
SS
1970 }
1971
0dc3f44a
MD
1972 rcu_read_lock();
1973 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 1974 if (block && block->host && host - block->host < block->max_length) {
23887b79
PB
1975 goto found;
1976 }
1977
0dc3f44a 1978 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
432d268c
JN
1979 /* This case append when the block is not mapped. */
1980 if (block->host == NULL) {
1981 continue;
1982 }
9b8424d5 1983 if (host - block->host < block->max_length) {
23887b79 1984 goto found;
f471a17e 1985 }
94a6b54f 1986 }
432d268c 1987
0dc3f44a 1988 rcu_read_unlock();
1b5ec234 1989 return NULL;
23887b79
PB
1990
1991found:
422148d3
DDAG
1992 *offset = (host - block->host);
1993 if (round_offset) {
1994 *offset &= TARGET_PAGE_MASK;
1995 }
1996 *ram_addr = block->offset + *offset;
0dc3f44a 1997 rcu_read_unlock();
422148d3
DDAG
1998 return block;
1999}
2000
e3dd7493
DDAG
2001/*
2002 * Finds the named RAMBlock
2003 *
2004 * name: The name of RAMBlock to find
2005 *
2006 * Returns: RAMBlock (or NULL if not found)
2007 */
2008RAMBlock *qemu_ram_block_by_name(const char *name)
2009{
2010 RAMBlock *block;
2011
2012 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
2013 if (!strcmp(name, block->idstr)) {
2014 return block;
2015 }
2016 }
2017
2018 return NULL;
2019}
2020
422148d3
DDAG
2021/* Some of the softmmu routines need to translate from a host pointer
2022 (typically a TLB entry) back to a ram offset. */
2023MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2024{
2025 RAMBlock *block;
2026 ram_addr_t offset; /* Not used */
2027
2028 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
2029
2030 if (!block) {
2031 return NULL;
2032 }
2033
2034 return block->mr;
e890261f 2035}
f471a17e 2036
49b24afc 2037/* Called within RCU critical section. */
a8170e5e 2038static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 2039 uint64_t val, unsigned size)
9fa3e853 2040{
52159192 2041 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 2042 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 2043 }
0e0df1e2
AK
2044 switch (size) {
2045 case 1:
3655cb9c 2046 stb_p(qemu_get_ram_ptr(NULL, ram_addr), val);
0e0df1e2
AK
2047 break;
2048 case 2:
3655cb9c 2049 stw_p(qemu_get_ram_ptr(NULL, ram_addr), val);
0e0df1e2
AK
2050 break;
2051 case 4:
3655cb9c 2052 stl_p(qemu_get_ram_ptr(NULL, ram_addr), val);
0e0df1e2
AK
2053 break;
2054 default:
2055 abort();
3a7d929e 2056 }
58d2707e
PB
2057 /* Set both VGA and migration bits for simplicity and to remove
2058 * the notdirty callback faster.
2059 */
2060 cpu_physical_memory_set_dirty_range(ram_addr, size,
2061 DIRTY_CLIENTS_NOCODE);
f23db169
FB
2062 /* we remove the notdirty callback only if the code has been
2063 flushed */
a2cd8c85 2064 if (!cpu_physical_memory_is_clean(ram_addr)) {
bcae01e4 2065 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
4917cf44 2066 }
9fa3e853
FB
2067}
2068
b018ddf6
PB
2069static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2070 unsigned size, bool is_write)
2071{
2072 return is_write;
2073}
2074
0e0df1e2 2075static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 2076 .write = notdirty_mem_write,
b018ddf6 2077 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 2078 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
2079};
2080
0f459d16 2081/* Generate a debug exception if a watchpoint has been hit. */
66b9b43c 2082static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
0f459d16 2083{
93afeade 2084 CPUState *cpu = current_cpu;
568496c0 2085 CPUClass *cc = CPU_GET_CLASS(cpu);
93afeade 2086 CPUArchState *env = cpu->env_ptr;
06d55cc1 2087 target_ulong pc, cs_base;
0f459d16 2088 target_ulong vaddr;
a1d1bb31 2089 CPUWatchpoint *wp;
06d55cc1 2090 int cpu_flags;
0f459d16 2091
ff4700b0 2092 if (cpu->watchpoint_hit) {
06d55cc1
AL
2093 /* We re-entered the check after replacing the TB. Now raise
2094 * the debug interrupt so that is will trigger after the
2095 * current instruction. */
93afeade 2096 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
2097 return;
2098 }
93afeade 2099 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 2100 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d
PM
2101 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2102 && (wp->flags & flags)) {
08225676
PM
2103 if (flags == BP_MEM_READ) {
2104 wp->flags |= BP_WATCHPOINT_HIT_READ;
2105 } else {
2106 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2107 }
2108 wp->hitaddr = vaddr;
66b9b43c 2109 wp->hitattrs = attrs;
ff4700b0 2110 if (!cpu->watchpoint_hit) {
568496c0
SF
2111 if (wp->flags & BP_CPU &&
2112 !cc->debug_check_watchpoint(cpu, wp)) {
2113 wp->flags &= ~BP_WATCHPOINT_HIT;
2114 continue;
2115 }
ff4700b0 2116 cpu->watchpoint_hit = wp;
239c51a5 2117 tb_check_watchpoint(cpu);
6e140f28 2118 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 2119 cpu->exception_index = EXCP_DEBUG;
5638d180 2120 cpu_loop_exit(cpu);
6e140f28
AL
2121 } else {
2122 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 2123 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 2124 cpu_resume_from_signal(cpu, NULL);
6e140f28 2125 }
06d55cc1 2126 }
6e140f28
AL
2127 } else {
2128 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
2129 }
2130 }
2131}
2132
6658ffb8
PB
2133/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2134 so these check for a hit then pass through to the normal out-of-line
2135 phys routines. */
66b9b43c
PM
2136static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2137 unsigned size, MemTxAttrs attrs)
6658ffb8 2138{
66b9b43c
PM
2139 MemTxResult res;
2140 uint64_t data;
79ed0416
PM
2141 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2142 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
66b9b43c
PM
2143
2144 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
1ec9b909 2145 switch (size) {
66b9b43c 2146 case 1:
79ed0416 2147 data = address_space_ldub(as, addr, attrs, &res);
66b9b43c
PM
2148 break;
2149 case 2:
79ed0416 2150 data = address_space_lduw(as, addr, attrs, &res);
66b9b43c
PM
2151 break;
2152 case 4:
79ed0416 2153 data = address_space_ldl(as, addr, attrs, &res);
66b9b43c 2154 break;
1ec9b909
AK
2155 default: abort();
2156 }
66b9b43c
PM
2157 *pdata = data;
2158 return res;
6658ffb8
PB
2159}
2160
66b9b43c
PM
2161static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2162 uint64_t val, unsigned size,
2163 MemTxAttrs attrs)
6658ffb8 2164{
66b9b43c 2165 MemTxResult res;
79ed0416
PM
2166 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2167 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
66b9b43c
PM
2168
2169 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1ec9b909 2170 switch (size) {
67364150 2171 case 1:
79ed0416 2172 address_space_stb(as, addr, val, attrs, &res);
67364150
MF
2173 break;
2174 case 2:
79ed0416 2175 address_space_stw(as, addr, val, attrs, &res);
67364150
MF
2176 break;
2177 case 4:
79ed0416 2178 address_space_stl(as, addr, val, attrs, &res);
67364150 2179 break;
1ec9b909
AK
2180 default: abort();
2181 }
66b9b43c 2182 return res;
6658ffb8
PB
2183}
2184
1ec9b909 2185static const MemoryRegionOps watch_mem_ops = {
66b9b43c
PM
2186 .read_with_attrs = watch_mem_read,
2187 .write_with_attrs = watch_mem_write,
1ec9b909 2188 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 2189};
6658ffb8 2190
f25a49e0
PM
2191static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2192 unsigned len, MemTxAttrs attrs)
db7b5426 2193{
acc9d80b 2194 subpage_t *subpage = opaque;
ff6cff75 2195 uint8_t buf[8];
5c9eb028 2196 MemTxResult res;
791af8c8 2197
db7b5426 2198#if defined(DEBUG_SUBPAGE)
016e9d62 2199 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 2200 subpage, len, addr);
db7b5426 2201#endif
5c9eb028
PM
2202 res = address_space_read(subpage->as, addr + subpage->base,
2203 attrs, buf, len);
2204 if (res) {
2205 return res;
f25a49e0 2206 }
acc9d80b
JK
2207 switch (len) {
2208 case 1:
f25a49e0
PM
2209 *data = ldub_p(buf);
2210 return MEMTX_OK;
acc9d80b 2211 case 2:
f25a49e0
PM
2212 *data = lduw_p(buf);
2213 return MEMTX_OK;
acc9d80b 2214 case 4:
f25a49e0
PM
2215 *data = ldl_p(buf);
2216 return MEMTX_OK;
ff6cff75 2217 case 8:
f25a49e0
PM
2218 *data = ldq_p(buf);
2219 return MEMTX_OK;
acc9d80b
JK
2220 default:
2221 abort();
2222 }
db7b5426
BS
2223}
2224
f25a49e0
PM
2225static MemTxResult subpage_write(void *opaque, hwaddr addr,
2226 uint64_t value, unsigned len, MemTxAttrs attrs)
db7b5426 2227{
acc9d80b 2228 subpage_t *subpage = opaque;
ff6cff75 2229 uint8_t buf[8];
acc9d80b 2230
db7b5426 2231#if defined(DEBUG_SUBPAGE)
016e9d62 2232 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
2233 " value %"PRIx64"\n",
2234 __func__, subpage, len, addr, value);
db7b5426 2235#endif
acc9d80b
JK
2236 switch (len) {
2237 case 1:
2238 stb_p(buf, value);
2239 break;
2240 case 2:
2241 stw_p(buf, value);
2242 break;
2243 case 4:
2244 stl_p(buf, value);
2245 break;
ff6cff75
PB
2246 case 8:
2247 stq_p(buf, value);
2248 break;
acc9d80b
JK
2249 default:
2250 abort();
2251 }
5c9eb028
PM
2252 return address_space_write(subpage->as, addr + subpage->base,
2253 attrs, buf, len);
db7b5426
BS
2254}
2255
c353e4cc 2256static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 2257 unsigned len, bool is_write)
c353e4cc 2258{
acc9d80b 2259 subpage_t *subpage = opaque;
c353e4cc 2260#if defined(DEBUG_SUBPAGE)
016e9d62 2261 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 2262 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
2263#endif
2264
acc9d80b 2265 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 2266 len, is_write);
c353e4cc
PB
2267}
2268
70c68e44 2269static const MemoryRegionOps subpage_ops = {
f25a49e0
PM
2270 .read_with_attrs = subpage_read,
2271 .write_with_attrs = subpage_write,
ff6cff75
PB
2272 .impl.min_access_size = 1,
2273 .impl.max_access_size = 8,
2274 .valid.min_access_size = 1,
2275 .valid.max_access_size = 8,
c353e4cc 2276 .valid.accepts = subpage_accepts,
70c68e44 2277 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
2278};
2279
c227f099 2280static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2281 uint16_t section)
db7b5426
BS
2282{
2283 int idx, eidx;
2284
2285 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2286 return -1;
2287 idx = SUBPAGE_IDX(start);
2288 eidx = SUBPAGE_IDX(end);
2289#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2290 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2291 __func__, mmio, start, end, idx, eidx, section);
db7b5426 2292#endif
db7b5426 2293 for (; idx <= eidx; idx++) {
5312bd8b 2294 mmio->sub_section[idx] = section;
db7b5426
BS
2295 }
2296
2297 return 0;
2298}
2299
acc9d80b 2300static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 2301{
c227f099 2302 subpage_t *mmio;
db7b5426 2303
7267c094 2304 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 2305
acc9d80b 2306 mmio->as = as;
1eec614b 2307 mmio->base = base;
2c9b15ca 2308 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 2309 NULL, TARGET_PAGE_SIZE);
b3b00c78 2310 mmio->iomem.subpage = true;
db7b5426 2311#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2312 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2313 mmio, base, TARGET_PAGE_SIZE);
db7b5426 2314#endif
b41aac4f 2315 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
2316
2317 return mmio;
2318}
2319
a656e22f
PC
2320static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2321 MemoryRegion *mr)
5312bd8b 2322{
a656e22f 2323 assert(as);
5312bd8b 2324 MemoryRegionSection section = {
a656e22f 2325 .address_space = as,
5312bd8b
AK
2326 .mr = mr,
2327 .offset_within_address_space = 0,
2328 .offset_within_region = 0,
052e87b0 2329 .size = int128_2_64(),
5312bd8b
AK
2330 };
2331
53cb28cb 2332 return phys_section_add(map, &section);
5312bd8b
AK
2333}
2334
a54c87b6 2335MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
aa102231 2336{
a54c87b6
PM
2337 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2338 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
32857f4d 2339 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
79e2b9ae 2340 MemoryRegionSection *sections = d->map.sections;
9d82b5a7
PB
2341
2342 return sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
2343}
2344
e9179ce1
AK
2345static void io_mem_init(void)
2346{
1f6245e5 2347 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2c9b15ca 2348 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 2349 NULL, UINT64_MAX);
2c9b15ca 2350 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 2351 NULL, UINT64_MAX);
2c9b15ca 2352 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 2353 NULL, UINT64_MAX);
e9179ce1
AK
2354}
2355
ac1970fb 2356static void mem_begin(MemoryListener *listener)
00752703
PB
2357{
2358 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
2359 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2360 uint16_t n;
2361
a656e22f 2362 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 2363 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 2364 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 2365 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 2366 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 2367 assert(n == PHYS_SECTION_ROM);
a656e22f 2368 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 2369 assert(n == PHYS_SECTION_WATCH);
00752703 2370
9736e55b 2371 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
2372 d->as = as;
2373 as->next_dispatch = d;
2374}
2375
79e2b9ae
PB
2376static void address_space_dispatch_free(AddressSpaceDispatch *d)
2377{
2378 phys_sections_free(&d->map);
2379 g_free(d);
2380}
2381
00752703 2382static void mem_commit(MemoryListener *listener)
ac1970fb 2383{
89ae337a 2384 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
2385 AddressSpaceDispatch *cur = as->dispatch;
2386 AddressSpaceDispatch *next = as->next_dispatch;
2387
53cb28cb 2388 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 2389
79e2b9ae 2390 atomic_rcu_set(&as->dispatch, next);
53cb28cb 2391 if (cur) {
79e2b9ae 2392 call_rcu(cur, address_space_dispatch_free, rcu);
53cb28cb 2393 }
9affd6fc
PB
2394}
2395
1d71148e 2396static void tcg_commit(MemoryListener *listener)
50c1e149 2397{
32857f4d
PM
2398 CPUAddressSpace *cpuas;
2399 AddressSpaceDispatch *d;
117712c3
AK
2400
2401 /* since each CPU stores ram addresses in its TLB cache, we must
2402 reset the modified entries */
32857f4d
PM
2403 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2404 cpu_reloading_memory_map();
2405 /* The CPU and TLB are protected by the iothread lock.
2406 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2407 * may have split the RCU critical section.
2408 */
2409 d = atomic_rcu_read(&cpuas->as->dispatch);
2410 cpuas->memory_dispatch = d;
2411 tlb_flush(cpuas->cpu, 1);
50c1e149
AK
2412}
2413
ac1970fb
AK
2414void address_space_init_dispatch(AddressSpace *as)
2415{
00752703 2416 as->dispatch = NULL;
89ae337a 2417 as->dispatch_listener = (MemoryListener) {
ac1970fb 2418 .begin = mem_begin,
00752703 2419 .commit = mem_commit,
ac1970fb
AK
2420 .region_add = mem_add,
2421 .region_nop = mem_add,
2422 .priority = 0,
2423 };
89ae337a 2424 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
2425}
2426
6e48e8f9
PB
2427void address_space_unregister(AddressSpace *as)
2428{
2429 memory_listener_unregister(&as->dispatch_listener);
2430}
2431
83f3c251
AK
2432void address_space_destroy_dispatch(AddressSpace *as)
2433{
2434 AddressSpaceDispatch *d = as->dispatch;
2435
79e2b9ae
PB
2436 atomic_rcu_set(&as->dispatch, NULL);
2437 if (d) {
2438 call_rcu(d, address_space_dispatch_free, rcu);
2439 }
83f3c251
AK
2440}
2441
62152b8a
AK
2442static void memory_map_init(void)
2443{
7267c094 2444 system_memory = g_malloc(sizeof(*system_memory));
03f49957 2445
57271d63 2446 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 2447 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 2448
7267c094 2449 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
2450 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2451 65536);
7dca8043 2452 address_space_init(&address_space_io, system_io, "I/O");
62152b8a
AK
2453}
2454
2455MemoryRegion *get_system_memory(void)
2456{
2457 return system_memory;
2458}
2459
309cb471
AK
2460MemoryRegion *get_system_io(void)
2461{
2462 return system_io;
2463}
2464
e2eef170
PB
2465#endif /* !defined(CONFIG_USER_ONLY) */
2466
13eb76e0
FB
2467/* physical memory access (slow version, mainly for debug) */
2468#if defined(CONFIG_USER_ONLY)
f17ec444 2469int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 2470 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2471{
2472 int l, flags;
2473 target_ulong page;
53a5960a 2474 void * p;
13eb76e0
FB
2475
2476 while (len > 0) {
2477 page = addr & TARGET_PAGE_MASK;
2478 l = (page + TARGET_PAGE_SIZE) - addr;
2479 if (l > len)
2480 l = len;
2481 flags = page_get_flags(page);
2482 if (!(flags & PAGE_VALID))
a68fe89c 2483 return -1;
13eb76e0
FB
2484 if (is_write) {
2485 if (!(flags & PAGE_WRITE))
a68fe89c 2486 return -1;
579a97f7 2487 /* XXX: this code should not depend on lock_user */
72fb7daa 2488 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 2489 return -1;
72fb7daa
AJ
2490 memcpy(p, buf, l);
2491 unlock_user(p, addr, l);
13eb76e0
FB
2492 } else {
2493 if (!(flags & PAGE_READ))
a68fe89c 2494 return -1;
579a97f7 2495 /* XXX: this code should not depend on lock_user */
72fb7daa 2496 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 2497 return -1;
72fb7daa 2498 memcpy(buf, p, l);
5b257578 2499 unlock_user(p, addr, 0);
13eb76e0
FB
2500 }
2501 len -= l;
2502 buf += l;
2503 addr += l;
2504 }
a68fe89c 2505 return 0;
13eb76e0 2506}
8df1cd07 2507
13eb76e0 2508#else
51d7a9eb 2509
845b6214 2510static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
a8170e5e 2511 hwaddr length)
51d7a9eb 2512{
e87f7778
PB
2513 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2514 /* No early return if dirty_log_mask is or becomes 0, because
2515 * cpu_physical_memory_set_dirty_range will still call
2516 * xen_modified_memory.
2517 */
2518 if (dirty_log_mask) {
2519 dirty_log_mask =
2520 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2521 }
2522 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2523 tb_invalidate_phys_range(addr, addr + length);
2524 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
51d7a9eb 2525 }
e87f7778 2526 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
51d7a9eb
AP
2527}
2528
23326164 2529static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2530{
e1622f4b 2531 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2532
2533 /* Regions are assumed to support 1-4 byte accesses unless
2534 otherwise specified. */
23326164
RH
2535 if (access_size_max == 0) {
2536 access_size_max = 4;
2537 }
2538
2539 /* Bound the maximum access by the alignment of the address. */
2540 if (!mr->ops->impl.unaligned) {
2541 unsigned align_size_max = addr & -addr;
2542 if (align_size_max != 0 && align_size_max < access_size_max) {
2543 access_size_max = align_size_max;
2544 }
82f2563f 2545 }
23326164
RH
2546
2547 /* Don't attempt accesses larger than the maximum. */
2548 if (l > access_size_max) {
2549 l = access_size_max;
82f2563f 2550 }
6554f5c0 2551 l = pow2floor(l);
23326164
RH
2552
2553 return l;
82f2563f
PB
2554}
2555
4840f10e 2556static bool prepare_mmio_access(MemoryRegion *mr)
125b3806 2557{
4840f10e
JK
2558 bool unlocked = !qemu_mutex_iothread_locked();
2559 bool release_lock = false;
2560
2561 if (unlocked && mr->global_locking) {
2562 qemu_mutex_lock_iothread();
2563 unlocked = false;
2564 release_lock = true;
2565 }
125b3806 2566 if (mr->flush_coalesced_mmio) {
4840f10e
JK
2567 if (unlocked) {
2568 qemu_mutex_lock_iothread();
2569 }
125b3806 2570 qemu_flush_coalesced_mmio_buffer();
4840f10e
JK
2571 if (unlocked) {
2572 qemu_mutex_unlock_iothread();
2573 }
125b3806 2574 }
4840f10e
JK
2575
2576 return release_lock;
125b3806
PB
2577}
2578
a203ac70
PB
2579/* Called within RCU critical section. */
2580static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2581 MemTxAttrs attrs,
2582 const uint8_t *buf,
2583 int len, hwaddr addr1,
2584 hwaddr l, MemoryRegion *mr)
13eb76e0 2585{
13eb76e0 2586 uint8_t *ptr;
791af8c8 2587 uint64_t val;
3b643495 2588 MemTxResult result = MEMTX_OK;
4840f10e 2589 bool release_lock = false;
3b46e624 2590
a203ac70 2591 for (;;) {
eb7eeb88
PB
2592 if (!memory_access_is_direct(mr, true)) {
2593 release_lock |= prepare_mmio_access(mr);
2594 l = memory_access_size(mr, l, addr1);
2595 /* XXX: could force current_cpu to NULL to avoid
2596 potential bugs */
2597 switch (l) {
2598 case 8:
2599 /* 64 bit write access */
2600 val = ldq_p(buf);
2601 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2602 attrs);
2603 break;
2604 case 4:
2605 /* 32 bit write access */
2606 val = ldl_p(buf);
2607 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2608 attrs);
2609 break;
2610 case 2:
2611 /* 16 bit write access */
2612 val = lduw_p(buf);
2613 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2614 attrs);
2615 break;
2616 case 1:
2617 /* 8 bit write access */
2618 val = ldub_p(buf);
2619 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2620 attrs);
2621 break;
2622 default:
2623 abort();
13eb76e0
FB
2624 }
2625 } else {
eb7eeb88
PB
2626 addr1 += memory_region_get_ram_addr(mr);
2627 /* RAM case */
3655cb9c 2628 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
eb7eeb88
PB
2629 memcpy(ptr, buf, l);
2630 invalidate_and_set_dirty(mr, addr1, l);
13eb76e0 2631 }
4840f10e
JK
2632
2633 if (release_lock) {
2634 qemu_mutex_unlock_iothread();
2635 release_lock = false;
2636 }
2637
13eb76e0
FB
2638 len -= l;
2639 buf += l;
2640 addr += l;
a203ac70
PB
2641
2642 if (!len) {
2643 break;
2644 }
2645
2646 l = len;
2647 mr = address_space_translate(as, addr, &addr1, &l, true);
13eb76e0 2648 }
fd8aaa76 2649
3b643495 2650 return result;
13eb76e0 2651}
8df1cd07 2652
a203ac70
PB
2653MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2654 const uint8_t *buf, int len)
ac1970fb 2655{
eb7eeb88 2656 hwaddr l;
eb7eeb88
PB
2657 hwaddr addr1;
2658 MemoryRegion *mr;
2659 MemTxResult result = MEMTX_OK;
eb7eeb88 2660
a203ac70
PB
2661 if (len > 0) {
2662 rcu_read_lock();
eb7eeb88 2663 l = len;
a203ac70
PB
2664 mr = address_space_translate(as, addr, &addr1, &l, true);
2665 result = address_space_write_continue(as, addr, attrs, buf, len,
2666 addr1, l, mr);
2667 rcu_read_unlock();
2668 }
2669
2670 return result;
2671}
2672
2673/* Called within RCU critical section. */
2674MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2675 MemTxAttrs attrs, uint8_t *buf,
2676 int len, hwaddr addr1, hwaddr l,
2677 MemoryRegion *mr)
2678{
2679 uint8_t *ptr;
2680 uint64_t val;
2681 MemTxResult result = MEMTX_OK;
2682 bool release_lock = false;
eb7eeb88 2683
a203ac70 2684 for (;;) {
eb7eeb88
PB
2685 if (!memory_access_is_direct(mr, false)) {
2686 /* I/O case */
2687 release_lock |= prepare_mmio_access(mr);
2688 l = memory_access_size(mr, l, addr1);
2689 switch (l) {
2690 case 8:
2691 /* 64 bit read access */
2692 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2693 attrs);
2694 stq_p(buf, val);
2695 break;
2696 case 4:
2697 /* 32 bit read access */
2698 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2699 attrs);
2700 stl_p(buf, val);
2701 break;
2702 case 2:
2703 /* 16 bit read access */
2704 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2705 attrs);
2706 stw_p(buf, val);
2707 break;
2708 case 1:
2709 /* 8 bit read access */
2710 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2711 attrs);
2712 stb_p(buf, val);
2713 break;
2714 default:
2715 abort();
2716 }
2717 } else {
2718 /* RAM case */
8e41fb63
FZ
2719 ptr = qemu_get_ram_ptr(mr->ram_block,
2720 memory_region_get_ram_addr(mr) + addr1);
eb7eeb88
PB
2721 memcpy(buf, ptr, l);
2722 }
2723
2724 if (release_lock) {
2725 qemu_mutex_unlock_iothread();
2726 release_lock = false;
2727 }
2728
2729 len -= l;
2730 buf += l;
2731 addr += l;
a203ac70
PB
2732
2733 if (!len) {
2734 break;
2735 }
2736
2737 l = len;
2738 mr = address_space_translate(as, addr, &addr1, &l, false);
2739 }
2740
2741 return result;
2742}
2743
3cc8f884
PB
2744MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2745 MemTxAttrs attrs, uint8_t *buf, int len)
a203ac70
PB
2746{
2747 hwaddr l;
2748 hwaddr addr1;
2749 MemoryRegion *mr;
2750 MemTxResult result = MEMTX_OK;
2751
2752 if (len > 0) {
2753 rcu_read_lock();
2754 l = len;
2755 mr = address_space_translate(as, addr, &addr1, &l, false);
2756 result = address_space_read_continue(as, addr, attrs, buf, len,
2757 addr1, l, mr);
2758 rcu_read_unlock();
eb7eeb88 2759 }
eb7eeb88
PB
2760
2761 return result;
ac1970fb
AK
2762}
2763
eb7eeb88
PB
2764MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2765 uint8_t *buf, int len, bool is_write)
2766{
2767 if (is_write) {
2768 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2769 } else {
2770 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2771 }
2772}
ac1970fb 2773
a8170e5e 2774void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2775 int len, int is_write)
2776{
5c9eb028
PM
2777 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2778 buf, len, is_write);
ac1970fb
AK
2779}
2780
582b55a9
AG
2781enum write_rom_type {
2782 WRITE_DATA,
2783 FLUSH_CACHE,
2784};
2785
2a221651 2786static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2787 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2788{
149f54b5 2789 hwaddr l;
d0ecd2aa 2790 uint8_t *ptr;
149f54b5 2791 hwaddr addr1;
5c8a00ce 2792 MemoryRegion *mr;
3b46e624 2793
41063e1e 2794 rcu_read_lock();
d0ecd2aa 2795 while (len > 0) {
149f54b5 2796 l = len;
2a221651 2797 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2798
5c8a00ce
PB
2799 if (!(memory_region_is_ram(mr) ||
2800 memory_region_is_romd(mr))) {
b242e0e0 2801 l = memory_access_size(mr, l, addr1);
d0ecd2aa 2802 } else {
5c8a00ce 2803 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2804 /* ROM/RAM case */
3655cb9c 2805 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
582b55a9
AG
2806 switch (type) {
2807 case WRITE_DATA:
2808 memcpy(ptr, buf, l);
845b6214 2809 invalidate_and_set_dirty(mr, addr1, l);
582b55a9
AG
2810 break;
2811 case FLUSH_CACHE:
2812 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2813 break;
2814 }
d0ecd2aa
FB
2815 }
2816 len -= l;
2817 buf += l;
2818 addr += l;
2819 }
41063e1e 2820 rcu_read_unlock();
d0ecd2aa
FB
2821}
2822
582b55a9 2823/* used for ROM loading : can write in RAM and ROM */
2a221651 2824void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2825 const uint8_t *buf, int len)
2826{
2a221651 2827 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2828}
2829
2830void cpu_flush_icache_range(hwaddr start, int len)
2831{
2832 /*
2833 * This function should do the same thing as an icache flush that was
2834 * triggered from within the guest. For TCG we are always cache coherent,
2835 * so there is no need to flush anything. For KVM / Xen we need to flush
2836 * the host's instruction cache at least.
2837 */
2838 if (tcg_enabled()) {
2839 return;
2840 }
2841
2a221651
EI
2842 cpu_physical_memory_write_rom_internal(&address_space_memory,
2843 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2844}
2845
6d16c2f8 2846typedef struct {
d3e71559 2847 MemoryRegion *mr;
6d16c2f8 2848 void *buffer;
a8170e5e
AK
2849 hwaddr addr;
2850 hwaddr len;
c2cba0ff 2851 bool in_use;
6d16c2f8
AL
2852} BounceBuffer;
2853
2854static BounceBuffer bounce;
2855
ba223c29 2856typedef struct MapClient {
e95205e1 2857 QEMUBH *bh;
72cf2d4f 2858 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2859} MapClient;
2860
38e047b5 2861QemuMutex map_client_list_lock;
72cf2d4f
BS
2862static QLIST_HEAD(map_client_list, MapClient) map_client_list
2863 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29 2864
e95205e1
FZ
2865static void cpu_unregister_map_client_do(MapClient *client)
2866{
2867 QLIST_REMOVE(client, link);
2868 g_free(client);
2869}
2870
33b6c2ed
FZ
2871static void cpu_notify_map_clients_locked(void)
2872{
2873 MapClient *client;
2874
2875 while (!QLIST_EMPTY(&map_client_list)) {
2876 client = QLIST_FIRST(&map_client_list);
e95205e1
FZ
2877 qemu_bh_schedule(client->bh);
2878 cpu_unregister_map_client_do(client);
33b6c2ed
FZ
2879 }
2880}
2881
e95205e1 2882void cpu_register_map_client(QEMUBH *bh)
ba223c29 2883{
7267c094 2884 MapClient *client = g_malloc(sizeof(*client));
ba223c29 2885
38e047b5 2886 qemu_mutex_lock(&map_client_list_lock);
e95205e1 2887 client->bh = bh;
72cf2d4f 2888 QLIST_INSERT_HEAD(&map_client_list, client, link);
33b6c2ed
FZ
2889 if (!atomic_read(&bounce.in_use)) {
2890 cpu_notify_map_clients_locked();
2891 }
38e047b5 2892 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2893}
2894
38e047b5 2895void cpu_exec_init_all(void)
ba223c29 2896{
38e047b5 2897 qemu_mutex_init(&ram_list.mutex);
38e047b5 2898 io_mem_init();
680a4783 2899 memory_map_init();
38e047b5 2900 qemu_mutex_init(&map_client_list_lock);
ba223c29
AL
2901}
2902
e95205e1 2903void cpu_unregister_map_client(QEMUBH *bh)
ba223c29
AL
2904{
2905 MapClient *client;
2906
e95205e1
FZ
2907 qemu_mutex_lock(&map_client_list_lock);
2908 QLIST_FOREACH(client, &map_client_list, link) {
2909 if (client->bh == bh) {
2910 cpu_unregister_map_client_do(client);
2911 break;
2912 }
ba223c29 2913 }
e95205e1 2914 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2915}
2916
2917static void cpu_notify_map_clients(void)
2918{
38e047b5 2919 qemu_mutex_lock(&map_client_list_lock);
33b6c2ed 2920 cpu_notify_map_clients_locked();
38e047b5 2921 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2922}
2923
51644ab7
PB
2924bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2925{
5c8a00ce 2926 MemoryRegion *mr;
51644ab7
PB
2927 hwaddr l, xlat;
2928
41063e1e 2929 rcu_read_lock();
51644ab7
PB
2930 while (len > 0) {
2931 l = len;
5c8a00ce
PB
2932 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2933 if (!memory_access_is_direct(mr, is_write)) {
2934 l = memory_access_size(mr, l, addr);
2935 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2936 return false;
2937 }
2938 }
2939
2940 len -= l;
2941 addr += l;
2942 }
41063e1e 2943 rcu_read_unlock();
51644ab7
PB
2944 return true;
2945}
2946
6d16c2f8
AL
2947/* Map a physical memory region into a host virtual address.
2948 * May map a subset of the requested range, given by and returned in *plen.
2949 * May return NULL if resources needed to perform the mapping are exhausted.
2950 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2951 * Use cpu_register_map_client() to know when retrying the map operation is
2952 * likely to succeed.
6d16c2f8 2953 */
ac1970fb 2954void *address_space_map(AddressSpace *as,
a8170e5e
AK
2955 hwaddr addr,
2956 hwaddr *plen,
ac1970fb 2957 bool is_write)
6d16c2f8 2958{
a8170e5e 2959 hwaddr len = *plen;
e3127ae0
PB
2960 hwaddr done = 0;
2961 hwaddr l, xlat, base;
2962 MemoryRegion *mr, *this_mr;
2963 ram_addr_t raddr;
e81bcda5 2964 void *ptr;
6d16c2f8 2965
e3127ae0
PB
2966 if (len == 0) {
2967 return NULL;
2968 }
38bee5dc 2969
e3127ae0 2970 l = len;
41063e1e 2971 rcu_read_lock();
e3127ae0 2972 mr = address_space_translate(as, addr, &xlat, &l, is_write);
41063e1e 2973
e3127ae0 2974 if (!memory_access_is_direct(mr, is_write)) {
c2cba0ff 2975 if (atomic_xchg(&bounce.in_use, true)) {
41063e1e 2976 rcu_read_unlock();
e3127ae0 2977 return NULL;
6d16c2f8 2978 }
e85d9db5
KW
2979 /* Avoid unbounded allocations */
2980 l = MIN(l, TARGET_PAGE_SIZE);
2981 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2982 bounce.addr = addr;
2983 bounce.len = l;
d3e71559
PB
2984
2985 memory_region_ref(mr);
2986 bounce.mr = mr;
e3127ae0 2987 if (!is_write) {
5c9eb028
PM
2988 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2989 bounce.buffer, l);
8ab934f9 2990 }
6d16c2f8 2991
41063e1e 2992 rcu_read_unlock();
e3127ae0
PB
2993 *plen = l;
2994 return bounce.buffer;
2995 }
2996
2997 base = xlat;
2998 raddr = memory_region_get_ram_addr(mr);
2999
3000 for (;;) {
6d16c2f8
AL
3001 len -= l;
3002 addr += l;
e3127ae0
PB
3003 done += l;
3004 if (len == 0) {
3005 break;
3006 }
3007
3008 l = len;
3009 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
3010 if (this_mr != mr || xlat != base + done) {
3011 break;
3012 }
6d16c2f8 3013 }
e3127ae0 3014
d3e71559 3015 memory_region_ref(mr);
e3127ae0 3016 *plen = done;
3655cb9c 3017 ptr = qemu_ram_ptr_length(mr->ram_block, raddr + base, plen);
e81bcda5
PB
3018 rcu_read_unlock();
3019
3020 return ptr;
6d16c2f8
AL
3021}
3022
ac1970fb 3023/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
3024 * Will also mark the memory as dirty if is_write == 1. access_len gives
3025 * the amount of memory that was actually read or written by the caller.
3026 */
a8170e5e
AK
3027void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3028 int is_write, hwaddr access_len)
6d16c2f8
AL
3029{
3030 if (buffer != bounce.buffer) {
d3e71559
PB
3031 MemoryRegion *mr;
3032 ram_addr_t addr1;
3033
3034 mr = qemu_ram_addr_from_host(buffer, &addr1);
3035 assert(mr != NULL);
6d16c2f8 3036 if (is_write) {
845b6214 3037 invalidate_and_set_dirty(mr, addr1, access_len);
6d16c2f8 3038 }
868bb33f 3039 if (xen_enabled()) {
e41d7c69 3040 xen_invalidate_map_cache_entry(buffer);
050a0ddf 3041 }
d3e71559 3042 memory_region_unref(mr);
6d16c2f8
AL
3043 return;
3044 }
3045 if (is_write) {
5c9eb028
PM
3046 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3047 bounce.buffer, access_len);
6d16c2f8 3048 }
f8a83245 3049 qemu_vfree(bounce.buffer);
6d16c2f8 3050 bounce.buffer = NULL;
d3e71559 3051 memory_region_unref(bounce.mr);
c2cba0ff 3052 atomic_mb_set(&bounce.in_use, false);
ba223c29 3053 cpu_notify_map_clients();
6d16c2f8 3054}
d0ecd2aa 3055
a8170e5e
AK
3056void *cpu_physical_memory_map(hwaddr addr,
3057 hwaddr *plen,
ac1970fb
AK
3058 int is_write)
3059{
3060 return address_space_map(&address_space_memory, addr, plen, is_write);
3061}
3062
a8170e5e
AK
3063void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3064 int is_write, hwaddr access_len)
ac1970fb
AK
3065{
3066 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3067}
3068
8df1cd07 3069/* warning: addr must be aligned */
50013115
PM
3070static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3071 MemTxAttrs attrs,
3072 MemTxResult *result,
3073 enum device_endian endian)
8df1cd07 3074{
8df1cd07 3075 uint8_t *ptr;
791af8c8 3076 uint64_t val;
5c8a00ce 3077 MemoryRegion *mr;
149f54b5
PB
3078 hwaddr l = 4;
3079 hwaddr addr1;
50013115 3080 MemTxResult r;
4840f10e 3081 bool release_lock = false;
8df1cd07 3082
41063e1e 3083 rcu_read_lock();
fdfba1a2 3084 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 3085 if (l < 4 || !memory_access_is_direct(mr, false)) {
4840f10e 3086 release_lock |= prepare_mmio_access(mr);
125b3806 3087
8df1cd07 3088 /* I/O case */
50013115 3089 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
1e78bcc1
AG
3090#if defined(TARGET_WORDS_BIGENDIAN)
3091 if (endian == DEVICE_LITTLE_ENDIAN) {
3092 val = bswap32(val);
3093 }
3094#else
3095 if (endian == DEVICE_BIG_ENDIAN) {
3096 val = bswap32(val);
3097 }
3098#endif
8df1cd07
FB
3099 } else {
3100 /* RAM case */
3655cb9c
GA
3101 ptr = qemu_get_ram_ptr(mr->ram_block,
3102 (memory_region_get_ram_addr(mr)
06ef3525 3103 & TARGET_PAGE_MASK)
149f54b5 3104 + addr1);
1e78bcc1
AG
3105 switch (endian) {
3106 case DEVICE_LITTLE_ENDIAN:
3107 val = ldl_le_p(ptr);
3108 break;
3109 case DEVICE_BIG_ENDIAN:
3110 val = ldl_be_p(ptr);
3111 break;
3112 default:
3113 val = ldl_p(ptr);
3114 break;
3115 }
50013115
PM
3116 r = MEMTX_OK;
3117 }
3118 if (result) {
3119 *result = r;
8df1cd07 3120 }
4840f10e
JK
3121 if (release_lock) {
3122 qemu_mutex_unlock_iothread();
3123 }
41063e1e 3124 rcu_read_unlock();
8df1cd07
FB
3125 return val;
3126}
3127
50013115
PM
3128uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3129 MemTxAttrs attrs, MemTxResult *result)
3130{
3131 return address_space_ldl_internal(as, addr, attrs, result,
3132 DEVICE_NATIVE_ENDIAN);
3133}
3134
3135uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3136 MemTxAttrs attrs, MemTxResult *result)
3137{
3138 return address_space_ldl_internal(as, addr, attrs, result,
3139 DEVICE_LITTLE_ENDIAN);
3140}
3141
3142uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3143 MemTxAttrs attrs, MemTxResult *result)
3144{
3145 return address_space_ldl_internal(as, addr, attrs, result,
3146 DEVICE_BIG_ENDIAN);
3147}
3148
fdfba1a2 3149uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3150{
50013115 3151 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3152}
3153
fdfba1a2 3154uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3155{
50013115 3156 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3157}
3158
fdfba1a2 3159uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3160{
50013115 3161 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3162}
3163
84b7b8e7 3164/* warning: addr must be aligned */
50013115
PM
3165static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3166 MemTxAttrs attrs,
3167 MemTxResult *result,
3168 enum device_endian endian)
84b7b8e7 3169{
84b7b8e7
FB
3170 uint8_t *ptr;
3171 uint64_t val;
5c8a00ce 3172 MemoryRegion *mr;
149f54b5
PB
3173 hwaddr l = 8;
3174 hwaddr addr1;
50013115 3175 MemTxResult r;
4840f10e 3176 bool release_lock = false;
84b7b8e7 3177
41063e1e 3178 rcu_read_lock();
2c17449b 3179 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3180 false);
3181 if (l < 8 || !memory_access_is_direct(mr, false)) {
4840f10e 3182 release_lock |= prepare_mmio_access(mr);
125b3806 3183
84b7b8e7 3184 /* I/O case */
50013115 3185 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
968a5627
PB
3186#if defined(TARGET_WORDS_BIGENDIAN)
3187 if (endian == DEVICE_LITTLE_ENDIAN) {
3188 val = bswap64(val);
3189 }
3190#else
3191 if (endian == DEVICE_BIG_ENDIAN) {
3192 val = bswap64(val);
3193 }
84b7b8e7
FB
3194#endif
3195 } else {
3196 /* RAM case */
3655cb9c
GA
3197 ptr = qemu_get_ram_ptr(mr->ram_block,
3198 (memory_region_get_ram_addr(mr)
06ef3525 3199 & TARGET_PAGE_MASK)
149f54b5 3200 + addr1);
1e78bcc1
AG
3201 switch (endian) {
3202 case DEVICE_LITTLE_ENDIAN:
3203 val = ldq_le_p(ptr);
3204 break;
3205 case DEVICE_BIG_ENDIAN:
3206 val = ldq_be_p(ptr);
3207 break;
3208 default:
3209 val = ldq_p(ptr);
3210 break;
3211 }
50013115
PM
3212 r = MEMTX_OK;
3213 }
3214 if (result) {
3215 *result = r;
84b7b8e7 3216 }
4840f10e
JK
3217 if (release_lock) {
3218 qemu_mutex_unlock_iothread();
3219 }
41063e1e 3220 rcu_read_unlock();
84b7b8e7
FB
3221 return val;
3222}
3223
50013115
PM
3224uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3225 MemTxAttrs attrs, MemTxResult *result)
3226{
3227 return address_space_ldq_internal(as, addr, attrs, result,
3228 DEVICE_NATIVE_ENDIAN);
3229}
3230
3231uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3232 MemTxAttrs attrs, MemTxResult *result)
3233{
3234 return address_space_ldq_internal(as, addr, attrs, result,
3235 DEVICE_LITTLE_ENDIAN);
3236}
3237
3238uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3239 MemTxAttrs attrs, MemTxResult *result)
3240{
3241 return address_space_ldq_internal(as, addr, attrs, result,
3242 DEVICE_BIG_ENDIAN);
3243}
3244
2c17449b 3245uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3246{
50013115 3247 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3248}
3249
2c17449b 3250uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3251{
50013115 3252 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3253}
3254
2c17449b 3255uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3256{
50013115 3257 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3258}
3259
aab33094 3260/* XXX: optimize */
50013115
PM
3261uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3262 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
3263{
3264 uint8_t val;
50013115
PM
3265 MemTxResult r;
3266
3267 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3268 if (result) {
3269 *result = r;
3270 }
aab33094
FB
3271 return val;
3272}
3273
50013115
PM
3274uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3275{
3276 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3277}
3278
733f0b02 3279/* warning: addr must be aligned */
50013115
PM
3280static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3281 hwaddr addr,
3282 MemTxAttrs attrs,
3283 MemTxResult *result,
3284 enum device_endian endian)
aab33094 3285{
733f0b02
MT
3286 uint8_t *ptr;
3287 uint64_t val;
5c8a00ce 3288 MemoryRegion *mr;
149f54b5
PB
3289 hwaddr l = 2;
3290 hwaddr addr1;
50013115 3291 MemTxResult r;
4840f10e 3292 bool release_lock = false;
733f0b02 3293
41063e1e 3294 rcu_read_lock();
41701aa4 3295 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3296 false);
3297 if (l < 2 || !memory_access_is_direct(mr, false)) {
4840f10e 3298 release_lock |= prepare_mmio_access(mr);
125b3806 3299
733f0b02 3300 /* I/O case */
50013115 3301 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
1e78bcc1
AG
3302#if defined(TARGET_WORDS_BIGENDIAN)
3303 if (endian == DEVICE_LITTLE_ENDIAN) {
3304 val = bswap16(val);
3305 }
3306#else
3307 if (endian == DEVICE_BIG_ENDIAN) {
3308 val = bswap16(val);
3309 }
3310#endif
733f0b02
MT
3311 } else {
3312 /* RAM case */
3655cb9c
GA
3313 ptr = qemu_get_ram_ptr(mr->ram_block,
3314 (memory_region_get_ram_addr(mr)
06ef3525 3315 & TARGET_PAGE_MASK)
149f54b5 3316 + addr1);
1e78bcc1
AG
3317 switch (endian) {
3318 case DEVICE_LITTLE_ENDIAN:
3319 val = lduw_le_p(ptr);
3320 break;
3321 case DEVICE_BIG_ENDIAN:
3322 val = lduw_be_p(ptr);
3323 break;
3324 default:
3325 val = lduw_p(ptr);
3326 break;
3327 }
50013115
PM
3328 r = MEMTX_OK;
3329 }
3330 if (result) {
3331 *result = r;
733f0b02 3332 }
4840f10e
JK
3333 if (release_lock) {
3334 qemu_mutex_unlock_iothread();
3335 }
41063e1e 3336 rcu_read_unlock();
733f0b02 3337 return val;
aab33094
FB
3338}
3339
50013115
PM
3340uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3341 MemTxAttrs attrs, MemTxResult *result)
3342{
3343 return address_space_lduw_internal(as, addr, attrs, result,
3344 DEVICE_NATIVE_ENDIAN);
3345}
3346
3347uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3348 MemTxAttrs attrs, MemTxResult *result)
3349{
3350 return address_space_lduw_internal(as, addr, attrs, result,
3351 DEVICE_LITTLE_ENDIAN);
3352}
3353
3354uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3355 MemTxAttrs attrs, MemTxResult *result)
3356{
3357 return address_space_lduw_internal(as, addr, attrs, result,
3358 DEVICE_BIG_ENDIAN);
3359}
3360
41701aa4 3361uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3362{
50013115 3363 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3364}
3365
41701aa4 3366uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3367{
50013115 3368 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3369}
3370
41701aa4 3371uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3372{
50013115 3373 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3374}
3375
8df1cd07
FB
3376/* warning: addr must be aligned. The ram page is not masked as dirty
3377 and the code inside is not invalidated. It is useful if the dirty
3378 bits are used to track modified PTEs */
50013115
PM
3379void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3380 MemTxAttrs attrs, MemTxResult *result)
8df1cd07 3381{
8df1cd07 3382 uint8_t *ptr;
5c8a00ce 3383 MemoryRegion *mr;
149f54b5
PB
3384 hwaddr l = 4;
3385 hwaddr addr1;
50013115 3386 MemTxResult r;
845b6214 3387 uint8_t dirty_log_mask;
4840f10e 3388 bool release_lock = false;
8df1cd07 3389
41063e1e 3390 rcu_read_lock();
2198a121 3391 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3392 true);
3393 if (l < 4 || !memory_access_is_direct(mr, true)) {
4840f10e 3394 release_lock |= prepare_mmio_access(mr);
125b3806 3395
50013115 3396 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3397 } else {
5c8a00ce 3398 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
3655cb9c 3399 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
8df1cd07 3400 stl_p(ptr, val);
74576198 3401
845b6214
PB
3402 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3403 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
58d2707e 3404 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
50013115
PM
3405 r = MEMTX_OK;
3406 }
3407 if (result) {
3408 *result = r;
8df1cd07 3409 }
4840f10e
JK
3410 if (release_lock) {
3411 qemu_mutex_unlock_iothread();
3412 }
41063e1e 3413 rcu_read_unlock();
8df1cd07
FB
3414}
3415
50013115
PM
3416void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3417{
3418 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3419}
3420
8df1cd07 3421/* warning: addr must be aligned */
50013115
PM
3422static inline void address_space_stl_internal(AddressSpace *as,
3423 hwaddr addr, uint32_t val,
3424 MemTxAttrs attrs,
3425 MemTxResult *result,
3426 enum device_endian endian)
8df1cd07 3427{
8df1cd07 3428 uint8_t *ptr;
5c8a00ce 3429 MemoryRegion *mr;
149f54b5
PB
3430 hwaddr l = 4;
3431 hwaddr addr1;
50013115 3432 MemTxResult r;
4840f10e 3433 bool release_lock = false;
8df1cd07 3434
41063e1e 3435 rcu_read_lock();
ab1da857 3436 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3437 true);
3438 if (l < 4 || !memory_access_is_direct(mr, true)) {
4840f10e 3439 release_lock |= prepare_mmio_access(mr);
125b3806 3440
1e78bcc1
AG
3441#if defined(TARGET_WORDS_BIGENDIAN)
3442 if (endian == DEVICE_LITTLE_ENDIAN) {
3443 val = bswap32(val);
3444 }
3445#else
3446 if (endian == DEVICE_BIG_ENDIAN) {
3447 val = bswap32(val);
3448 }
3449#endif
50013115 3450 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3451 } else {
8df1cd07 3452 /* RAM case */
5c8a00ce 3453 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
3655cb9c 3454 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
1e78bcc1
AG
3455 switch (endian) {
3456 case DEVICE_LITTLE_ENDIAN:
3457 stl_le_p(ptr, val);
3458 break;
3459 case DEVICE_BIG_ENDIAN:
3460 stl_be_p(ptr, val);
3461 break;
3462 default:
3463 stl_p(ptr, val);
3464 break;
3465 }
845b6214 3466 invalidate_and_set_dirty(mr, addr1, 4);
50013115
PM
3467 r = MEMTX_OK;
3468 }
3469 if (result) {
3470 *result = r;
8df1cd07 3471 }
4840f10e
JK
3472 if (release_lock) {
3473 qemu_mutex_unlock_iothread();
3474 }
41063e1e 3475 rcu_read_unlock();
8df1cd07
FB
3476}
3477
50013115
PM
3478void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3479 MemTxAttrs attrs, MemTxResult *result)
3480{
3481 address_space_stl_internal(as, addr, val, attrs, result,
3482 DEVICE_NATIVE_ENDIAN);
3483}
3484
3485void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3486 MemTxAttrs attrs, MemTxResult *result)
3487{
3488 address_space_stl_internal(as, addr, val, attrs, result,
3489 DEVICE_LITTLE_ENDIAN);
3490}
3491
3492void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3493 MemTxAttrs attrs, MemTxResult *result)
3494{
3495 address_space_stl_internal(as, addr, val, attrs, result,
3496 DEVICE_BIG_ENDIAN);
3497}
3498
ab1da857 3499void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3500{
50013115 3501 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3502}
3503
ab1da857 3504void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3505{
50013115 3506 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3507}
3508
ab1da857 3509void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3510{
50013115 3511 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3512}
3513
aab33094 3514/* XXX: optimize */
50013115
PM
3515void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3516 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
3517{
3518 uint8_t v = val;
50013115
PM
3519 MemTxResult r;
3520
3521 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3522 if (result) {
3523 *result = r;
3524 }
3525}
3526
3527void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3528{
3529 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
aab33094
FB
3530}
3531
733f0b02 3532/* warning: addr must be aligned */
50013115
PM
3533static inline void address_space_stw_internal(AddressSpace *as,
3534 hwaddr addr, uint32_t val,
3535 MemTxAttrs attrs,
3536 MemTxResult *result,
3537 enum device_endian endian)
aab33094 3538{
733f0b02 3539 uint8_t *ptr;
5c8a00ce 3540 MemoryRegion *mr;
149f54b5
PB
3541 hwaddr l = 2;
3542 hwaddr addr1;
50013115 3543 MemTxResult r;
4840f10e 3544 bool release_lock = false;
733f0b02 3545
41063e1e 3546 rcu_read_lock();
5ce5944d 3547 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 3548 if (l < 2 || !memory_access_is_direct(mr, true)) {
4840f10e 3549 release_lock |= prepare_mmio_access(mr);
125b3806 3550
1e78bcc1
AG
3551#if defined(TARGET_WORDS_BIGENDIAN)
3552 if (endian == DEVICE_LITTLE_ENDIAN) {
3553 val = bswap16(val);
3554 }
3555#else
3556 if (endian == DEVICE_BIG_ENDIAN) {
3557 val = bswap16(val);
3558 }
3559#endif
50013115 3560 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
733f0b02 3561 } else {
733f0b02 3562 /* RAM case */
5c8a00ce 3563 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
3655cb9c 3564 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
1e78bcc1
AG
3565 switch (endian) {
3566 case DEVICE_LITTLE_ENDIAN:
3567 stw_le_p(ptr, val);
3568 break;
3569 case DEVICE_BIG_ENDIAN:
3570 stw_be_p(ptr, val);
3571 break;
3572 default:
3573 stw_p(ptr, val);
3574 break;
3575 }
845b6214 3576 invalidate_and_set_dirty(mr, addr1, 2);
50013115
PM
3577 r = MEMTX_OK;
3578 }
3579 if (result) {
3580 *result = r;
733f0b02 3581 }
4840f10e
JK
3582 if (release_lock) {
3583 qemu_mutex_unlock_iothread();
3584 }
41063e1e 3585 rcu_read_unlock();
aab33094
FB
3586}
3587
50013115
PM
3588void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3589 MemTxAttrs attrs, MemTxResult *result)
3590{
3591 address_space_stw_internal(as, addr, val, attrs, result,
3592 DEVICE_NATIVE_ENDIAN);
3593}
3594
3595void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3596 MemTxAttrs attrs, MemTxResult *result)
3597{
3598 address_space_stw_internal(as, addr, val, attrs, result,
3599 DEVICE_LITTLE_ENDIAN);
3600}
3601
3602void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3603 MemTxAttrs attrs, MemTxResult *result)
3604{
3605 address_space_stw_internal(as, addr, val, attrs, result,
3606 DEVICE_BIG_ENDIAN);
3607}
3608
5ce5944d 3609void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3610{
50013115 3611 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3612}
3613
5ce5944d 3614void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3615{
50013115 3616 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3617}
3618
5ce5944d 3619void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3620{
50013115 3621 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3622}
3623
aab33094 3624/* XXX: optimize */
50013115
PM
3625void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3626 MemTxAttrs attrs, MemTxResult *result)
aab33094 3627{
50013115 3628 MemTxResult r;
aab33094 3629 val = tswap64(val);
50013115
PM
3630 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3631 if (result) {
3632 *result = r;
3633 }
aab33094
FB
3634}
3635
50013115
PM
3636void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3637 MemTxAttrs attrs, MemTxResult *result)
1e78bcc1 3638{
50013115 3639 MemTxResult r;
1e78bcc1 3640 val = cpu_to_le64(val);
50013115
PM
3641 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3642 if (result) {
3643 *result = r;
3644 }
3645}
3646void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3647 MemTxAttrs attrs, MemTxResult *result)
3648{
3649 MemTxResult r;
3650 val = cpu_to_be64(val);
3651 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3652 if (result) {
3653 *result = r;
3654 }
3655}
3656
3657void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3658{
3659 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3660}
3661
3662void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3663{
3664 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3665}
3666
f606604f 3667void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1 3668{
50013115 3669 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3670}
3671
5e2972fd 3672/* virtual memory access for debug (includes writing to ROM) */
f17ec444 3673int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 3674 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3675{
3676 int l;
a8170e5e 3677 hwaddr phys_addr;
9b3c35e0 3678 target_ulong page;
13eb76e0
FB
3679
3680 while (len > 0) {
5232e4c7
PM
3681 int asidx;
3682 MemTxAttrs attrs;
3683
13eb76e0 3684 page = addr & TARGET_PAGE_MASK;
5232e4c7
PM
3685 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3686 asidx = cpu_asidx_from_attrs(cpu, attrs);
13eb76e0
FB
3687 /* if no physical page mapped, return an error */
3688 if (phys_addr == -1)
3689 return -1;
3690 l = (page + TARGET_PAGE_SIZE) - addr;
3691 if (l > len)
3692 l = len;
5e2972fd 3693 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b 3694 if (is_write) {
5232e4c7
PM
3695 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3696 phys_addr, buf, l);
2e38847b 3697 } else {
5232e4c7
PM
3698 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3699 MEMTXATTRS_UNSPECIFIED,
5c9eb028 3700 buf, l, 0);
2e38847b 3701 }
13eb76e0
FB
3702 len -= l;
3703 buf += l;
3704 addr += l;
3705 }
3706 return 0;
3707}
038629a6
DDAG
3708
3709/*
3710 * Allows code that needs to deal with migration bitmaps etc to still be built
3711 * target independent.
3712 */
3713size_t qemu_target_page_bits(void)
3714{
3715 return TARGET_PAGE_BITS;
3716}
3717
a68fe89c 3718#endif
13eb76e0 3719
8e4a424b
BS
3720/*
3721 * A helper function for the _utterly broken_ virtio device model to find out if
3722 * it's running on a big endian machine. Don't do this at home kids!
3723 */
98ed8ecf
GK
3724bool target_words_bigendian(void);
3725bool target_words_bigendian(void)
8e4a424b
BS
3726{
3727#if defined(TARGET_WORDS_BIGENDIAN)
3728 return true;
3729#else
3730 return false;
3731#endif
3732}
3733
76f35538 3734#ifndef CONFIG_USER_ONLY
a8170e5e 3735bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 3736{
5c8a00ce 3737 MemoryRegion*mr;
149f54b5 3738 hwaddr l = 1;
41063e1e 3739 bool res;
76f35538 3740
41063e1e 3741 rcu_read_lock();
5c8a00ce
PB
3742 mr = address_space_translate(&address_space_memory,
3743 phys_addr, &phys_addr, &l, false);
76f35538 3744
41063e1e
PB
3745 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3746 rcu_read_unlock();
3747 return res;
76f35538 3748}
bd2fa51f 3749
e3807054 3750int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
bd2fa51f
MH
3751{
3752 RAMBlock *block;
e3807054 3753 int ret = 0;
bd2fa51f 3754
0dc3f44a
MD
3755 rcu_read_lock();
3756 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
e3807054
DDAG
3757 ret = func(block->idstr, block->host, block->offset,
3758 block->used_length, opaque);
3759 if (ret) {
3760 break;
3761 }
bd2fa51f 3762 }
0dc3f44a 3763 rcu_read_unlock();
e3807054 3764 return ret;
bd2fa51f 3765}
ec3f8c99 3766#endif