]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
scripts: Remove fixed entries from the device-crash-test
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
7b31bbc2 19#include "qemu/osdep.h"
da34e65c 20#include "qapi/error.h"
54936004 21
f348b6d1 22#include "qemu/cutils.h"
6180a181 23#include "cpu.h"
63c91552 24#include "exec/exec-all.h"
51180423 25#include "exec/target_page.h"
b67d9a52 26#include "tcg.h"
741da0d3 27#include "hw/qdev-core.h"
c7e002c5 28#include "hw/qdev-properties.h"
4485bd26 29#if !defined(CONFIG_USER_ONLY)
47c8ca53 30#include "hw/boards.h"
33c11879 31#include "hw/xen/xen.h"
4485bd26 32#endif
9c17d615 33#include "sysemu/kvm.h"
2ff3de68 34#include "sysemu/sysemu.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
75a34036 37#include "qemu/error-report.h"
53a5960a 38#if defined(CONFIG_USER_ONLY)
a9c94277 39#include "qemu.h"
432d268c 40#else /* !CONFIG_USER_ONLY */
741da0d3
PB
41#include "hw/hw.h"
42#include "exec/memory.h"
df43d49c 43#include "exec/ioport.h"
741da0d3 44#include "sysemu/dma.h"
9c607668 45#include "sysemu/numa.h"
79ca7a1b 46#include "sysemu/hw_accel.h"
741da0d3 47#include "exec/address-spaces.h"
9c17d615 48#include "sysemu/xen-mapcache.h"
0ab8ed18 49#include "trace-root.h"
d3a5038c 50
e2fa71f5 51#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
e2fa71f5
DDAG
52#include <linux/falloc.h>
53#endif
54
53a5960a 55#endif
0dc3f44a 56#include "qemu/rcu_queue.h"
4840f10e 57#include "qemu/main-loop.h"
5b6dd868 58#include "translate-all.h"
7615936e 59#include "sysemu/replay.h"
0cac1b66 60
022c62cb 61#include "exec/memory-internal.h"
220c3ebd 62#include "exec/ram_addr.h"
508127e2 63#include "exec/log.h"
67d95c15 64
9dfeca7c
BR
65#include "migration/vmstate.h"
66
b35ba30f 67#include "qemu/range.h"
794e8f30
MT
68#ifndef _WIN32
69#include "qemu/mmap-alloc.h"
70#endif
b35ba30f 71
be9b23c4
PX
72#include "monitor/monitor.h"
73
db7b5426 74//#define DEBUG_SUBPAGE
1196be37 75
e2eef170 76#if !defined(CONFIG_USER_ONLY)
0dc3f44a
MD
77/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
78 * are protected by the ramlist lock.
79 */
0d53d9fe 80RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
81
82static MemoryRegion *system_memory;
309cb471 83static MemoryRegion *system_io;
62152b8a 84
f6790af6
AK
85AddressSpace address_space_io;
86AddressSpace address_space_memory;
2673a5da 87
0844e007 88MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 89static MemoryRegion io_mem_unassigned;
0e0df1e2 90
7bd4f430
PB
91/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
92#define RAM_PREALLOC (1 << 0)
93
dbcb8981
PB
94/* RAM is mmap-ed with MAP_SHARED */
95#define RAM_SHARED (1 << 1)
96
62be4e3a
MT
97/* Only a portion of RAM (used_length) is actually used, and migrated.
98 * This used_length size can change across reboots.
99 */
100#define RAM_RESIZEABLE (1 << 2)
101
e2eef170 102#endif
9fa3e853 103
20bccb82
PM
104#ifdef TARGET_PAGE_BITS_VARY
105int target_page_bits;
106bool target_page_bits_decided;
107#endif
108
bdc44640 109struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
110/* current CPU in the current thread. It is only valid inside
111 cpu_exec() */
f240eb6f 112__thread CPUState *current_cpu;
2e70f6ef 113/* 0 = Do not count executed instructions.
bf20dc07 114 1 = Precise instruction counting.
2e70f6ef 115 2 = Adaptive rate instruction counting. */
5708fc66 116int use_icount;
6a00d601 117
a0be0c58
YZ
118uintptr_t qemu_host_page_size;
119intptr_t qemu_host_page_mask;
a0be0c58 120
20bccb82
PM
121bool set_preferred_target_page_bits(int bits)
122{
123 /* The target page size is the lowest common denominator for all
124 * the CPUs in the system, so we can only make it smaller, never
125 * larger. And we can't make it smaller once we've committed to
126 * a particular size.
127 */
128#ifdef TARGET_PAGE_BITS_VARY
129 assert(bits >= TARGET_PAGE_BITS_MIN);
130 if (target_page_bits == 0 || target_page_bits > bits) {
131 if (target_page_bits_decided) {
132 return false;
133 }
134 target_page_bits = bits;
135 }
136#endif
137 return true;
138}
139
e2eef170 140#if !defined(CONFIG_USER_ONLY)
4346ae3e 141
20bccb82
PM
142static void finalize_target_page_bits(void)
143{
144#ifdef TARGET_PAGE_BITS_VARY
145 if (target_page_bits == 0) {
146 target_page_bits = TARGET_PAGE_BITS_MIN;
147 }
148 target_page_bits_decided = true;
149#endif
150}
151
1db8abb1
PB
152typedef struct PhysPageEntry PhysPageEntry;
153
154struct PhysPageEntry {
9736e55b 155 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 156 uint32_t skip : 6;
9736e55b 157 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 158 uint32_t ptr : 26;
1db8abb1
PB
159};
160
8b795765
MT
161#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
162
03f49957 163/* Size of the L2 (and L3, etc) page tables. */
57271d63 164#define ADDR_SPACE_BITS 64
03f49957 165
026736ce 166#define P_L2_BITS 9
03f49957
PB
167#define P_L2_SIZE (1 << P_L2_BITS)
168
169#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
170
171typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 172
53cb28cb 173typedef struct PhysPageMap {
79e2b9ae
PB
174 struct rcu_head rcu;
175
53cb28cb
MA
176 unsigned sections_nb;
177 unsigned sections_nb_alloc;
178 unsigned nodes_nb;
179 unsigned nodes_nb_alloc;
180 Node *nodes;
181 MemoryRegionSection *sections;
182} PhysPageMap;
183
1db8abb1 184struct AddressSpaceDispatch {
729633c2 185 MemoryRegionSection *mru_section;
1db8abb1
PB
186 /* This is a multi-level map on the physical address space.
187 * The bottom level has pointers to MemoryRegionSections.
188 */
189 PhysPageEntry phys_map;
53cb28cb 190 PhysPageMap map;
1db8abb1
PB
191};
192
90260c6c
JK
193#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
194typedef struct subpage_t {
195 MemoryRegion iomem;
16620684 196 FlatView *fv;
90260c6c 197 hwaddr base;
2615fabd 198 uint16_t sub_section[];
90260c6c
JK
199} subpage_t;
200
b41aac4f
LPF
201#define PHYS_SECTION_UNASSIGNED 0
202#define PHYS_SECTION_NOTDIRTY 1
203#define PHYS_SECTION_ROM 2
204#define PHYS_SECTION_WATCH 3
5312bd8b 205
e2eef170 206static void io_mem_init(void);
62152b8a 207static void memory_map_init(void);
09daed84 208static void tcg_commit(MemoryListener *listener);
e2eef170 209
1ec9b909 210static MemoryRegion io_mem_watch;
32857f4d
PM
211
212/**
213 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
214 * @cpu: the CPU whose AddressSpace this is
215 * @as: the AddressSpace itself
216 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
217 * @tcg_as_listener: listener for tracking changes to the AddressSpace
218 */
219struct CPUAddressSpace {
220 CPUState *cpu;
221 AddressSpace *as;
222 struct AddressSpaceDispatch *memory_dispatch;
223 MemoryListener tcg_as_listener;
224};
225
8deaf12c
GH
226struct DirtyBitmapSnapshot {
227 ram_addr_t start;
228 ram_addr_t end;
229 unsigned long dirty[];
230};
231
6658ffb8 232#endif
fd6ce8f6 233
6d9a1304 234#if !defined(CONFIG_USER_ONLY)
d6f2ea22 235
53cb28cb 236static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 237{
101420b8 238 static unsigned alloc_hint = 16;
53cb28cb 239 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
101420b8 240 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
53cb28cb
MA
241 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
242 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
101420b8 243 alloc_hint = map->nodes_nb_alloc;
d6f2ea22 244 }
f7bf5461
AK
245}
246
db94604b 247static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
f7bf5461
AK
248{
249 unsigned i;
8b795765 250 uint32_t ret;
db94604b
PB
251 PhysPageEntry e;
252 PhysPageEntry *p;
f7bf5461 253
53cb28cb 254 ret = map->nodes_nb++;
db94604b 255 p = map->nodes[ret];
f7bf5461 256 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 257 assert(ret != map->nodes_nb_alloc);
db94604b
PB
258
259 e.skip = leaf ? 0 : 1;
260 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
03f49957 261 for (i = 0; i < P_L2_SIZE; ++i) {
db94604b 262 memcpy(&p[i], &e, sizeof(e));
d6f2ea22 263 }
f7bf5461 264 return ret;
d6f2ea22
AK
265}
266
53cb28cb
MA
267static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
268 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 269 int level)
f7bf5461
AK
270{
271 PhysPageEntry *p;
03f49957 272 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 273
9736e55b 274 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
db94604b 275 lp->ptr = phys_map_node_alloc(map, level == 0);
92e873b9 276 }
db94604b 277 p = map->nodes[lp->ptr];
03f49957 278 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 279
03f49957 280 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 281 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 282 lp->skip = 0;
c19e8800 283 lp->ptr = leaf;
07f07b31
AK
284 *index += step;
285 *nb -= step;
2999097b 286 } else {
53cb28cb 287 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
288 }
289 ++lp;
f7bf5461
AK
290 }
291}
292
ac1970fb 293static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 294 hwaddr index, hwaddr nb,
2999097b 295 uint16_t leaf)
f7bf5461 296{
2999097b 297 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 298 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 299
53cb28cb 300 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
301}
302
b35ba30f
MT
303/* Compact a non leaf page entry. Simply detect that the entry has a single child,
304 * and update our entry so we can skip it and go directly to the destination.
305 */
efee678d 306static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
b35ba30f
MT
307{
308 unsigned valid_ptr = P_L2_SIZE;
309 int valid = 0;
310 PhysPageEntry *p;
311 int i;
312
313 if (lp->ptr == PHYS_MAP_NODE_NIL) {
314 return;
315 }
316
317 p = nodes[lp->ptr];
318 for (i = 0; i < P_L2_SIZE; i++) {
319 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
320 continue;
321 }
322
323 valid_ptr = i;
324 valid++;
325 if (p[i].skip) {
efee678d 326 phys_page_compact(&p[i], nodes);
b35ba30f
MT
327 }
328 }
329
330 /* We can only compress if there's only one child. */
331 if (valid != 1) {
332 return;
333 }
334
335 assert(valid_ptr < P_L2_SIZE);
336
337 /* Don't compress if it won't fit in the # of bits we have. */
338 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
339 return;
340 }
341
342 lp->ptr = p[valid_ptr].ptr;
343 if (!p[valid_ptr].skip) {
344 /* If our only child is a leaf, make this a leaf. */
345 /* By design, we should have made this node a leaf to begin with so we
346 * should never reach here.
347 * But since it's so simple to handle this, let's do it just in case we
348 * change this rule.
349 */
350 lp->skip = 0;
351 } else {
352 lp->skip += p[valid_ptr].skip;
353 }
354}
355
8629d3fc 356void address_space_dispatch_compact(AddressSpaceDispatch *d)
b35ba30f 357{
b35ba30f 358 if (d->phys_map.skip) {
efee678d 359 phys_page_compact(&d->phys_map, d->map.nodes);
b35ba30f
MT
360 }
361}
362
29cb533d
FZ
363static inline bool section_covers_addr(const MemoryRegionSection *section,
364 hwaddr addr)
365{
366 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
367 * the section must cover the entire address space.
368 */
258dfaaa 369 return int128_gethi(section->size) ||
29cb533d 370 range_covers_byte(section->offset_within_address_space,
258dfaaa 371 int128_getlo(section->size), addr);
29cb533d
FZ
372}
373
003a0cf2 374static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr)
92e873b9 375{
003a0cf2
PX
376 PhysPageEntry lp = d->phys_map, *p;
377 Node *nodes = d->map.nodes;
378 MemoryRegionSection *sections = d->map.sections;
97115a8d 379 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 380 int i;
f1f6e3b8 381
9736e55b 382 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 383 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 384 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 385 }
9affd6fc 386 p = nodes[lp.ptr];
03f49957 387 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 388 }
b35ba30f 389
29cb533d 390 if (section_covers_addr(&sections[lp.ptr], addr)) {
b35ba30f
MT
391 return &sections[lp.ptr];
392 } else {
393 return &sections[PHYS_SECTION_UNASSIGNED];
394 }
f3705d53
AK
395}
396
e5548617
BS
397bool memory_region_is_unassigned(MemoryRegion *mr)
398{
2a8e7499 399 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 400 && mr != &io_mem_watch;
fd6ce8f6 401}
149f54b5 402
79e2b9ae 403/* Called from RCU critical section */
c7086b4a 404static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
405 hwaddr addr,
406 bool resolve_subpage)
9f029603 407{
729633c2 408 MemoryRegionSection *section = atomic_read(&d->mru_section);
90260c6c
JK
409 subpage_t *subpage;
410
07c114bb
PB
411 if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] ||
412 !section_covers_addr(section, addr)) {
003a0cf2 413 section = phys_page_find(d, addr);
07c114bb 414 atomic_set(&d->mru_section, section);
729633c2 415 }
90260c6c
JK
416 if (resolve_subpage && section->mr->subpage) {
417 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 418 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
419 }
420 return section;
9f029603
JK
421}
422
79e2b9ae 423/* Called from RCU critical section */
90260c6c 424static MemoryRegionSection *
c7086b4a 425address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 426 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
427{
428 MemoryRegionSection *section;
965eb2fc 429 MemoryRegion *mr;
a87f3954 430 Int128 diff;
149f54b5 431
c7086b4a 432 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
433 /* Compute offset within MemoryRegionSection */
434 addr -= section->offset_within_address_space;
435
436 /* Compute offset within MemoryRegion */
437 *xlat = addr + section->offset_within_region;
438
965eb2fc 439 mr = section->mr;
b242e0e0
PB
440
441 /* MMIO registers can be expected to perform full-width accesses based only
442 * on their address, without considering adjacent registers that could
443 * decode to completely different MemoryRegions. When such registers
444 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
445 * regions overlap wildly. For this reason we cannot clamp the accesses
446 * here.
447 *
448 * If the length is small (as is the case for address_space_ldl/stl),
449 * everything works fine. If the incoming length is large, however,
450 * the caller really has to do the clamping through memory_access_size.
451 */
965eb2fc 452 if (memory_region_is_ram(mr)) {
e4a511f8 453 diff = int128_sub(section->size, int128_make64(addr));
965eb2fc
PB
454 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
455 }
149f54b5
PB
456 return section;
457}
90260c6c 458
d5e5fafd
PX
459/**
460 * flatview_do_translate - translate an address in FlatView
461 *
462 * @fv: the flat view that we want to translate on
463 * @addr: the address to be translated in above address space
464 * @xlat: the translated address offset within memory region. It
465 * cannot be @NULL.
466 * @plen_out: valid read/write length of the translated address. It
467 * can be @NULL when we don't care about it.
468 * @page_mask_out: page mask for the translated address. This
469 * should only be meaningful for IOMMU translated
470 * addresses, since there may be huge pages that this bit
471 * would tell. It can be @NULL if we don't care about it.
472 * @is_write: whether the translation operation is for write
473 * @is_mmio: whether this can be MMIO, set true if it can
474 *
475 * This function is called from RCU critical section
476 */
16620684
AK
477static MemoryRegionSection flatview_do_translate(FlatView *fv,
478 hwaddr addr,
479 hwaddr *xlat,
d5e5fafd
PX
480 hwaddr *plen_out,
481 hwaddr *page_mask_out,
16620684
AK
482 bool is_write,
483 bool is_mmio,
484 AddressSpace **target_as)
052c8fa9 485{
a764040c 486 IOMMUTLBEntry iotlb;
052c8fa9 487 MemoryRegionSection *section;
3df9d748 488 IOMMUMemoryRegion *iommu_mr;
1221a474 489 IOMMUMemoryRegionClass *imrc;
d5e5fafd
PX
490 hwaddr page_mask = (hwaddr)(-1);
491 hwaddr plen = (hwaddr)(-1);
492
493 if (plen_out) {
494 plen = *plen_out;
495 }
052c8fa9
JW
496
497 for (;;) {
16620684
AK
498 section = address_space_translate_internal(
499 flatview_to_dispatch(fv), addr, &addr,
d5e5fafd 500 &plen, is_mmio);
052c8fa9 501
3df9d748
AK
502 iommu_mr = memory_region_get_iommu(section->mr);
503 if (!iommu_mr) {
052c8fa9
JW
504 break;
505 }
1221a474 506 imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
052c8fa9 507
1221a474
AK
508 iotlb = imrc->translate(iommu_mr, addr, is_write ?
509 IOMMU_WO : IOMMU_RO);
a764040c
PX
510 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
511 | (addr & iotlb.addr_mask));
d5e5fafd
PX
512 page_mask &= iotlb.addr_mask;
513 plen = MIN(plen, (addr | iotlb.addr_mask) - addr + 1);
052c8fa9 514 if (!(iotlb.perm & (1 << is_write))) {
a764040c 515 goto translate_fail;
052c8fa9
JW
516 }
517
16620684 518 fv = address_space_to_flatview(iotlb.target_as);
e76bb18f 519 *target_as = iotlb.target_as;
052c8fa9
JW
520 }
521
a764040c
PX
522 *xlat = addr;
523
d5e5fafd
PX
524 if (page_mask == (hwaddr)(-1)) {
525 /* Not behind an IOMMU, use default page size. */
526 page_mask = ~TARGET_PAGE_MASK;
527 }
528
529 if (page_mask_out) {
530 *page_mask_out = page_mask;
531 }
532
533 if (plen_out) {
534 *plen_out = plen;
535 }
536
a764040c
PX
537 return *section;
538
539translate_fail:
540 return (MemoryRegionSection) { .mr = &io_mem_unassigned };
052c8fa9
JW
541}
542
543/* Called from RCU critical section */
a764040c
PX
544IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
545 bool is_write)
90260c6c 546{
a764040c 547 MemoryRegionSection section;
076a93d7 548 hwaddr xlat, page_mask;
30951157 549
076a93d7
PX
550 /*
551 * This can never be MMIO, and we don't really care about plen,
552 * but page mask.
553 */
554 section = flatview_do_translate(address_space_to_flatview(as), addr, &xlat,
555 NULL, &page_mask, is_write, false, &as);
30951157 556
a764040c
PX
557 /* Illegal translation */
558 if (section.mr == &io_mem_unassigned) {
559 goto iotlb_fail;
560 }
30951157 561
a764040c
PX
562 /* Convert memory region offset into address space offset */
563 xlat += section.offset_within_address_space -
564 section.offset_within_region;
565
a764040c 566 return (IOMMUTLBEntry) {
e76bb18f 567 .target_as = as,
076a93d7
PX
568 .iova = addr & ~page_mask,
569 .translated_addr = xlat & ~page_mask,
570 .addr_mask = page_mask,
a764040c
PX
571 /* IOTLBs are for DMAs, and DMA only allows on RAMs. */
572 .perm = IOMMU_RW,
573 };
574
575iotlb_fail:
576 return (IOMMUTLBEntry) {0};
577}
578
579/* Called from RCU critical section */
16620684
AK
580MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
581 hwaddr *plen, bool is_write)
a764040c
PX
582{
583 MemoryRegion *mr;
584 MemoryRegionSection section;
16620684 585 AddressSpace *as = NULL;
a764040c
PX
586
587 /* This can be MMIO, so setup MMIO bit. */
d5e5fafd
PX
588 section = flatview_do_translate(fv, addr, xlat, plen, NULL,
589 is_write, true, &as);
a764040c
PX
590 mr = section.mr;
591
fe680d0d 592 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954 593 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
23820dbf 594 *plen = MIN(page, *plen);
a87f3954
PB
595 }
596
30951157 597 return mr;
90260c6c
JK
598}
599
79e2b9ae 600/* Called from RCU critical section */
90260c6c 601MemoryRegionSection *
d7898cda 602address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
9d82b5a7 603 hwaddr *xlat, hwaddr *plen)
90260c6c 604{
30951157 605 MemoryRegionSection *section;
f35e44e7 606 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
d7898cda
PM
607
608 section = address_space_translate_internal(d, addr, xlat, plen, false);
30951157 609
3df9d748 610 assert(!memory_region_is_iommu(section->mr));
30951157 611 return section;
90260c6c 612}
5b6dd868 613#endif
fd6ce8f6 614
b170fce3 615#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
616
617static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 618{
259186a7 619 CPUState *cpu = opaque;
a513fe19 620
5b6dd868
BS
621 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
622 version_id is increased. */
259186a7 623 cpu->interrupt_request &= ~0x01;
d10eb08f 624 tlb_flush(cpu);
5b6dd868 625
15a356c4
PD
626 /* loadvm has just updated the content of RAM, bypassing the
627 * usual mechanisms that ensure we flush TBs for writes to
628 * memory we've translated code from. So we must flush all TBs,
629 * which will now be stale.
630 */
631 tb_flush(cpu);
632
5b6dd868 633 return 0;
a513fe19 634}
7501267e 635
6c3bff0e
PD
636static int cpu_common_pre_load(void *opaque)
637{
638 CPUState *cpu = opaque;
639
adee6424 640 cpu->exception_index = -1;
6c3bff0e
PD
641
642 return 0;
643}
644
645static bool cpu_common_exception_index_needed(void *opaque)
646{
647 CPUState *cpu = opaque;
648
adee6424 649 return tcg_enabled() && cpu->exception_index != -1;
6c3bff0e
PD
650}
651
652static const VMStateDescription vmstate_cpu_common_exception_index = {
653 .name = "cpu_common/exception_index",
654 .version_id = 1,
655 .minimum_version_id = 1,
5cd8cada 656 .needed = cpu_common_exception_index_needed,
6c3bff0e
PD
657 .fields = (VMStateField[]) {
658 VMSTATE_INT32(exception_index, CPUState),
659 VMSTATE_END_OF_LIST()
660 }
661};
662
bac05aa9
AS
663static bool cpu_common_crash_occurred_needed(void *opaque)
664{
665 CPUState *cpu = opaque;
666
667 return cpu->crash_occurred;
668}
669
670static const VMStateDescription vmstate_cpu_common_crash_occurred = {
671 .name = "cpu_common/crash_occurred",
672 .version_id = 1,
673 .minimum_version_id = 1,
674 .needed = cpu_common_crash_occurred_needed,
675 .fields = (VMStateField[]) {
676 VMSTATE_BOOL(crash_occurred, CPUState),
677 VMSTATE_END_OF_LIST()
678 }
679};
680
1a1562f5 681const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
682 .name = "cpu_common",
683 .version_id = 1,
684 .minimum_version_id = 1,
6c3bff0e 685 .pre_load = cpu_common_pre_load,
5b6dd868 686 .post_load = cpu_common_post_load,
35d08458 687 .fields = (VMStateField[]) {
259186a7
AF
688 VMSTATE_UINT32(halted, CPUState),
689 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 690 VMSTATE_END_OF_LIST()
6c3bff0e 691 },
5cd8cada
JQ
692 .subsections = (const VMStateDescription*[]) {
693 &vmstate_cpu_common_exception_index,
bac05aa9 694 &vmstate_cpu_common_crash_occurred,
5cd8cada 695 NULL
5b6dd868
BS
696 }
697};
1a1562f5 698
5b6dd868 699#endif
ea041c0e 700
38d8f5c8 701CPUState *qemu_get_cpu(int index)
ea041c0e 702{
bdc44640 703 CPUState *cpu;
ea041c0e 704
bdc44640 705 CPU_FOREACH(cpu) {
55e5c285 706 if (cpu->cpu_index == index) {
bdc44640 707 return cpu;
55e5c285 708 }
ea041c0e 709 }
5b6dd868 710
bdc44640 711 return NULL;
ea041c0e
FB
712}
713
09daed84 714#if !defined(CONFIG_USER_ONLY)
80ceb07a
PX
715void cpu_address_space_init(CPUState *cpu, int asidx,
716 const char *prefix, MemoryRegion *mr)
09daed84 717{
12ebc9a7 718 CPUAddressSpace *newas;
80ceb07a 719 AddressSpace *as = g_new0(AddressSpace, 1);
87a621d8 720 char *as_name;
80ceb07a
PX
721
722 assert(mr);
87a621d8
PX
723 as_name = g_strdup_printf("%s-%d", prefix, cpu->cpu_index);
724 address_space_init(as, mr, as_name);
725 g_free(as_name);
12ebc9a7
PM
726
727 /* Target code should have set num_ases before calling us */
728 assert(asidx < cpu->num_ases);
729
56943e8c
PM
730 if (asidx == 0) {
731 /* address space 0 gets the convenience alias */
732 cpu->as = as;
733 }
734
12ebc9a7
PM
735 /* KVM cannot currently support multiple address spaces. */
736 assert(asidx == 0 || !kvm_enabled());
09daed84 737
12ebc9a7
PM
738 if (!cpu->cpu_ases) {
739 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
09daed84 740 }
32857f4d 741
12ebc9a7
PM
742 newas = &cpu->cpu_ases[asidx];
743 newas->cpu = cpu;
744 newas->as = as;
56943e8c 745 if (tcg_enabled()) {
12ebc9a7
PM
746 newas->tcg_as_listener.commit = tcg_commit;
747 memory_listener_register(&newas->tcg_as_listener, as);
56943e8c 748 }
09daed84 749}
651a5bc0
PM
750
751AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
752{
753 /* Return the AddressSpace corresponding to the specified index */
754 return cpu->cpu_ases[asidx].as;
755}
09daed84
EI
756#endif
757
7bbc124e 758void cpu_exec_unrealizefn(CPUState *cpu)
1c59eb39 759{
9dfeca7c
BR
760 CPUClass *cc = CPU_GET_CLASS(cpu);
761
267f685b 762 cpu_list_remove(cpu);
9dfeca7c
BR
763
764 if (cc->vmsd != NULL) {
765 vmstate_unregister(NULL, cc->vmsd, cpu);
766 }
767 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
768 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
769 }
1c59eb39
BR
770}
771
c7e002c5
FZ
772Property cpu_common_props[] = {
773#ifndef CONFIG_USER_ONLY
774 /* Create a memory property for softmmu CPU object,
775 * so users can wire up its memory. (This can't go in qom/cpu.c
776 * because that file is compiled only once for both user-mode
777 * and system builds.) The default if no link is set up is to use
778 * the system address space.
779 */
780 DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
781 MemoryRegion *),
782#endif
783 DEFINE_PROP_END_OF_LIST(),
784};
785
39e329e3 786void cpu_exec_initfn(CPUState *cpu)
ea041c0e 787{
56943e8c 788 cpu->as = NULL;
12ebc9a7 789 cpu->num_ases = 0;
56943e8c 790
291135b5 791#ifndef CONFIG_USER_ONLY
291135b5 792 cpu->thread_id = qemu_get_thread_id();
6731d864
PC
793 cpu->memory = system_memory;
794 object_ref(OBJECT(cpu->memory));
291135b5 795#endif
39e329e3
LV
796}
797
ce5b1bbf 798void cpu_exec_realizefn(CPUState *cpu, Error **errp)
39e329e3 799{
55c3ceef 800 CPUClass *cc = CPU_GET_CLASS(cpu);
2dda6354 801 static bool tcg_target_initialized;
291135b5 802
267f685b 803 cpu_list_add(cpu);
1bc7e522 804
2dda6354
EC
805 if (tcg_enabled() && !tcg_target_initialized) {
806 tcg_target_initialized = true;
55c3ceef
RH
807 cc->tcg_initialize();
808 }
809
1bc7e522 810#ifndef CONFIG_USER_ONLY
e0d47944 811 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
741da0d3 812 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
e0d47944 813 }
b170fce3 814 if (cc->vmsd != NULL) {
741da0d3 815 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
b170fce3 816 }
741da0d3 817#endif
ea041c0e
FB
818}
819
406bc339 820#if defined(CONFIG_USER_ONLY)
00b941e5 821static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 822{
406bc339
PK
823 mmap_lock();
824 tb_lock();
825 tb_invalidate_phys_page_range(pc, pc + 1, 0);
826 tb_unlock();
827 mmap_unlock();
828}
829#else
830static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
831{
832 MemTxAttrs attrs;
833 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
834 int asidx = cpu_asidx_from_attrs(cpu, attrs);
835 if (phys != -1) {
836 /* Locks grabbed by tb_invalidate_phys_addr */
837 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
838 phys | (pc & ~TARGET_PAGE_MASK));
839 }
1e7855a5 840}
406bc339 841#endif
d720b93d 842
c527ee8f 843#if defined(CONFIG_USER_ONLY)
75a34036 844void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
845
846{
847}
848
3ee887e8
PM
849int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
850 int flags)
851{
852 return -ENOSYS;
853}
854
855void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
856{
857}
858
75a34036 859int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
860 int flags, CPUWatchpoint **watchpoint)
861{
862 return -ENOSYS;
863}
864#else
6658ffb8 865/* Add a watchpoint. */
75a34036 866int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 867 int flags, CPUWatchpoint **watchpoint)
6658ffb8 868{
c0ce998e 869 CPUWatchpoint *wp;
6658ffb8 870
05068c0d 871 /* forbid ranges which are empty or run off the end of the address space */
07e2863d 872 if (len == 0 || (addr + len - 1) < addr) {
75a34036
AF
873 error_report("tried to set invalid watchpoint at %"
874 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
875 return -EINVAL;
876 }
7267c094 877 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
878
879 wp->vaddr = addr;
05068c0d 880 wp->len = len;
a1d1bb31
AL
881 wp->flags = flags;
882
2dc9f411 883 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
884 if (flags & BP_GDB) {
885 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
886 } else {
887 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
888 }
6658ffb8 889
31b030d4 890 tlb_flush_page(cpu, addr);
a1d1bb31
AL
891
892 if (watchpoint)
893 *watchpoint = wp;
894 return 0;
6658ffb8
PB
895}
896
a1d1bb31 897/* Remove a specific watchpoint. */
75a34036 898int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 899 int flags)
6658ffb8 900{
a1d1bb31 901 CPUWatchpoint *wp;
6658ffb8 902
ff4700b0 903 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 904 if (addr == wp->vaddr && len == wp->len
6e140f28 905 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 906 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
907 return 0;
908 }
909 }
a1d1bb31 910 return -ENOENT;
6658ffb8
PB
911}
912
a1d1bb31 913/* Remove a specific watchpoint by reference. */
75a34036 914void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 915{
ff4700b0 916 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 917
31b030d4 918 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 919
7267c094 920 g_free(watchpoint);
a1d1bb31
AL
921}
922
923/* Remove all matching watchpoints. */
75a34036 924void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 925{
c0ce998e 926 CPUWatchpoint *wp, *next;
a1d1bb31 927
ff4700b0 928 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
929 if (wp->flags & mask) {
930 cpu_watchpoint_remove_by_ref(cpu, wp);
931 }
c0ce998e 932 }
7d03f82f 933}
05068c0d
PM
934
935/* Return true if this watchpoint address matches the specified
936 * access (ie the address range covered by the watchpoint overlaps
937 * partially or completely with the address range covered by the
938 * access).
939 */
940static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
941 vaddr addr,
942 vaddr len)
943{
944 /* We know the lengths are non-zero, but a little caution is
945 * required to avoid errors in the case where the range ends
946 * exactly at the top of the address space and so addr + len
947 * wraps round to zero.
948 */
949 vaddr wpend = wp->vaddr + wp->len - 1;
950 vaddr addrend = addr + len - 1;
951
952 return !(addr > wpend || wp->vaddr > addrend);
953}
954
c527ee8f 955#endif
7d03f82f 956
a1d1bb31 957/* Add a breakpoint. */
b3310ab3 958int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 959 CPUBreakpoint **breakpoint)
4c3a88a2 960{
c0ce998e 961 CPUBreakpoint *bp;
3b46e624 962
7267c094 963 bp = g_malloc(sizeof(*bp));
4c3a88a2 964
a1d1bb31
AL
965 bp->pc = pc;
966 bp->flags = flags;
967
2dc9f411 968 /* keep all GDB-injected breakpoints in front */
00b941e5 969 if (flags & BP_GDB) {
f0c3c505 970 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 971 } else {
f0c3c505 972 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 973 }
3b46e624 974
f0c3c505 975 breakpoint_invalidate(cpu, pc);
a1d1bb31 976
00b941e5 977 if (breakpoint) {
a1d1bb31 978 *breakpoint = bp;
00b941e5 979 }
4c3a88a2 980 return 0;
4c3a88a2
FB
981}
982
a1d1bb31 983/* Remove a specific breakpoint. */
b3310ab3 984int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 985{
a1d1bb31
AL
986 CPUBreakpoint *bp;
987
f0c3c505 988 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 989 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 990 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
991 return 0;
992 }
7d03f82f 993 }
a1d1bb31 994 return -ENOENT;
7d03f82f
EI
995}
996
a1d1bb31 997/* Remove a specific breakpoint by reference. */
b3310ab3 998void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 999{
f0c3c505
AF
1000 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
1001
1002 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 1003
7267c094 1004 g_free(breakpoint);
a1d1bb31
AL
1005}
1006
1007/* Remove all matching breakpoints. */
b3310ab3 1008void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 1009{
c0ce998e 1010 CPUBreakpoint *bp, *next;
a1d1bb31 1011
f0c3c505 1012 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
1013 if (bp->flags & mask) {
1014 cpu_breakpoint_remove_by_ref(cpu, bp);
1015 }
c0ce998e 1016 }
4c3a88a2
FB
1017}
1018
c33a346e
FB
1019/* enable or disable single step mode. EXCP_DEBUG is returned by the
1020 CPU loop after each instruction */
3825b28f 1021void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 1022{
ed2803da
AF
1023 if (cpu->singlestep_enabled != enabled) {
1024 cpu->singlestep_enabled = enabled;
1025 if (kvm_enabled()) {
38e478ec 1026 kvm_update_guest_debug(cpu, 0);
ed2803da 1027 } else {
ccbb4d44 1028 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 1029 /* XXX: only flush what is necessary */
bbd77c18 1030 tb_flush(cpu);
e22a25c9 1031 }
c33a346e 1032 }
c33a346e
FB
1033}
1034
a47dddd7 1035void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
1036{
1037 va_list ap;
493ae1f0 1038 va_list ap2;
7501267e
FB
1039
1040 va_start(ap, fmt);
493ae1f0 1041 va_copy(ap2, ap);
7501267e
FB
1042 fprintf(stderr, "qemu: fatal: ");
1043 vfprintf(stderr, fmt, ap);
1044 fprintf(stderr, "\n");
878096ee 1045 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
013a2942 1046 if (qemu_log_separate()) {
1ee73216 1047 qemu_log_lock();
93fcfe39
AL
1048 qemu_log("qemu: fatal: ");
1049 qemu_log_vprintf(fmt, ap2);
1050 qemu_log("\n");
a0762859 1051 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 1052 qemu_log_flush();
1ee73216 1053 qemu_log_unlock();
93fcfe39 1054 qemu_log_close();
924edcae 1055 }
493ae1f0 1056 va_end(ap2);
f9373291 1057 va_end(ap);
7615936e 1058 replay_finish();
fd052bf6
RV
1059#if defined(CONFIG_USER_ONLY)
1060 {
1061 struct sigaction act;
1062 sigfillset(&act.sa_mask);
1063 act.sa_handler = SIG_DFL;
1064 sigaction(SIGABRT, &act, NULL);
1065 }
1066#endif
7501267e
FB
1067 abort();
1068}
1069
0124311e 1070#if !defined(CONFIG_USER_ONLY)
0dc3f44a 1071/* Called from RCU critical section */
041603fe
PB
1072static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
1073{
1074 RAMBlock *block;
1075
43771539 1076 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 1077 if (block && addr - block->offset < block->max_length) {
68851b98 1078 return block;
041603fe 1079 }
99e15582 1080 RAMBLOCK_FOREACH(block) {
9b8424d5 1081 if (addr - block->offset < block->max_length) {
041603fe
PB
1082 goto found;
1083 }
1084 }
1085
1086 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1087 abort();
1088
1089found:
43771539
PB
1090 /* It is safe to write mru_block outside the iothread lock. This
1091 * is what happens:
1092 *
1093 * mru_block = xxx
1094 * rcu_read_unlock()
1095 * xxx removed from list
1096 * rcu_read_lock()
1097 * read mru_block
1098 * mru_block = NULL;
1099 * call_rcu(reclaim_ramblock, xxx);
1100 * rcu_read_unlock()
1101 *
1102 * atomic_rcu_set is not needed here. The block was already published
1103 * when it was placed into the list. Here we're just making an extra
1104 * copy of the pointer.
1105 */
041603fe
PB
1106 ram_list.mru_block = block;
1107 return block;
1108}
1109
a2f4d5be 1110static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 1111{
9a13565d 1112 CPUState *cpu;
041603fe 1113 ram_addr_t start1;
a2f4d5be
JQ
1114 RAMBlock *block;
1115 ram_addr_t end;
1116
1117 end = TARGET_PAGE_ALIGN(start + length);
1118 start &= TARGET_PAGE_MASK;
d24981d3 1119
0dc3f44a 1120 rcu_read_lock();
041603fe
PB
1121 block = qemu_get_ram_block(start);
1122 assert(block == qemu_get_ram_block(end - 1));
1240be24 1123 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
9a13565d
PC
1124 CPU_FOREACH(cpu) {
1125 tlb_reset_dirty(cpu, start1, length);
1126 }
0dc3f44a 1127 rcu_read_unlock();
d24981d3
JQ
1128}
1129
5579c7f3 1130/* Note: start and end must be within the same ram block. */
03eebc9e
SH
1131bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
1132 ram_addr_t length,
1133 unsigned client)
1ccde1cb 1134{
5b82b703 1135 DirtyMemoryBlocks *blocks;
03eebc9e 1136 unsigned long end, page;
5b82b703 1137 bool dirty = false;
03eebc9e
SH
1138
1139 if (length == 0) {
1140 return false;
1141 }
f23db169 1142
03eebc9e
SH
1143 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1144 page = start >> TARGET_PAGE_BITS;
5b82b703
SH
1145
1146 rcu_read_lock();
1147
1148 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1149
1150 while (page < end) {
1151 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1152 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1153 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1154
1155 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1156 offset, num);
1157 page += num;
1158 }
1159
1160 rcu_read_unlock();
03eebc9e
SH
1161
1162 if (dirty && tcg_enabled()) {
a2f4d5be 1163 tlb_reset_dirty_range_all(start, length);
5579c7f3 1164 }
03eebc9e
SH
1165
1166 return dirty;
1ccde1cb
FB
1167}
1168
8deaf12c
GH
1169DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
1170 (ram_addr_t start, ram_addr_t length, unsigned client)
1171{
1172 DirtyMemoryBlocks *blocks;
1173 unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL);
1174 ram_addr_t first = QEMU_ALIGN_DOWN(start, align);
1175 ram_addr_t last = QEMU_ALIGN_UP(start + length, align);
1176 DirtyBitmapSnapshot *snap;
1177 unsigned long page, end, dest;
1178
1179 snap = g_malloc0(sizeof(*snap) +
1180 ((last - first) >> (TARGET_PAGE_BITS + 3)));
1181 snap->start = first;
1182 snap->end = last;
1183
1184 page = first >> TARGET_PAGE_BITS;
1185 end = last >> TARGET_PAGE_BITS;
1186 dest = 0;
1187
1188 rcu_read_lock();
1189
1190 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1191
1192 while (page < end) {
1193 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1194 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1195 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1196
1197 assert(QEMU_IS_ALIGNED(offset, (1 << BITS_PER_LEVEL)));
1198 assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL)));
1199 offset >>= BITS_PER_LEVEL;
1200
1201 bitmap_copy_and_clear_atomic(snap->dirty + dest,
1202 blocks->blocks[idx] + offset,
1203 num);
1204 page += num;
1205 dest += num >> BITS_PER_LEVEL;
1206 }
1207
1208 rcu_read_unlock();
1209
1210 if (tcg_enabled()) {
1211 tlb_reset_dirty_range_all(start, length);
1212 }
1213
1214 return snap;
1215}
1216
1217bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
1218 ram_addr_t start,
1219 ram_addr_t length)
1220{
1221 unsigned long page, end;
1222
1223 assert(start >= snap->start);
1224 assert(start + length <= snap->end);
1225
1226 end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS;
1227 page = (start - snap->start) >> TARGET_PAGE_BITS;
1228
1229 while (page < end) {
1230 if (test_bit(page, snap->dirty)) {
1231 return true;
1232 }
1233 page++;
1234 }
1235 return false;
1236}
1237
79e2b9ae 1238/* Called from RCU critical section */
bb0e627a 1239hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
1240 MemoryRegionSection *section,
1241 target_ulong vaddr,
1242 hwaddr paddr, hwaddr xlat,
1243 int prot,
1244 target_ulong *address)
e5548617 1245{
a8170e5e 1246 hwaddr iotlb;
e5548617
BS
1247 CPUWatchpoint *wp;
1248
cc5bea60 1249 if (memory_region_is_ram(section->mr)) {
e5548617 1250 /* Normal RAM. */
e4e69794 1251 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
e5548617 1252 if (!section->readonly) {
b41aac4f 1253 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 1254 } else {
b41aac4f 1255 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
1256 }
1257 } else {
0b8e2c10
PM
1258 AddressSpaceDispatch *d;
1259
16620684 1260 d = flatview_to_dispatch(section->fv);
0b8e2c10 1261 iotlb = section - d->map.sections;
149f54b5 1262 iotlb += xlat;
e5548617
BS
1263 }
1264
1265 /* Make accesses to pages with watchpoints go via the
1266 watchpoint trap routines. */
ff4700b0 1267 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 1268 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
e5548617
BS
1269 /* Avoid trapping reads of pages with a write breakpoint. */
1270 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 1271 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
1272 *address |= TLB_MMIO;
1273 break;
1274 }
1275 }
1276 }
1277
1278 return iotlb;
1279}
9fa3e853
FB
1280#endif /* defined(CONFIG_USER_ONLY) */
1281
e2eef170 1282#if !defined(CONFIG_USER_ONLY)
8da3ff18 1283
c227f099 1284static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1285 uint16_t section);
16620684 1286static subpage_t *subpage_init(FlatView *fv, hwaddr base);
54688b1e 1287
a2b257d6
IM
1288static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1289 qemu_anon_ram_alloc;
91138037
MA
1290
1291/*
1292 * Set a custom physical guest memory alloator.
1293 * Accelerators with unusual needs may need this. Hopefully, we can
1294 * get rid of it eventually.
1295 */
a2b257d6 1296void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
91138037
MA
1297{
1298 phys_mem_alloc = alloc;
1299}
1300
53cb28cb
MA
1301static uint16_t phys_section_add(PhysPageMap *map,
1302 MemoryRegionSection *section)
5312bd8b 1303{
68f3f65b
PB
1304 /* The physical section number is ORed with a page-aligned
1305 * pointer to produce the iotlb entries. Thus it should
1306 * never overflow into the page-aligned value.
1307 */
53cb28cb 1308 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 1309
53cb28cb
MA
1310 if (map->sections_nb == map->sections_nb_alloc) {
1311 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1312 map->sections = g_renew(MemoryRegionSection, map->sections,
1313 map->sections_nb_alloc);
5312bd8b 1314 }
53cb28cb 1315 map->sections[map->sections_nb] = *section;
dfde4e6e 1316 memory_region_ref(section->mr);
53cb28cb 1317 return map->sections_nb++;
5312bd8b
AK
1318}
1319
058bc4b5
PB
1320static void phys_section_destroy(MemoryRegion *mr)
1321{
55b4e80b
DS
1322 bool have_sub_page = mr->subpage;
1323
dfde4e6e
PB
1324 memory_region_unref(mr);
1325
55b4e80b 1326 if (have_sub_page) {
058bc4b5 1327 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 1328 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
1329 g_free(subpage);
1330 }
1331}
1332
6092666e 1333static void phys_sections_free(PhysPageMap *map)
5312bd8b 1334{
9affd6fc
PB
1335 while (map->sections_nb > 0) {
1336 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
1337 phys_section_destroy(section->mr);
1338 }
9affd6fc
PB
1339 g_free(map->sections);
1340 g_free(map->nodes);
5312bd8b
AK
1341}
1342
9950322a 1343static void register_subpage(FlatView *fv, MemoryRegionSection *section)
0f0cb164 1344{
9950322a 1345 AddressSpaceDispatch *d = flatview_to_dispatch(fv);
0f0cb164 1346 subpage_t *subpage;
a8170e5e 1347 hwaddr base = section->offset_within_address_space
0f0cb164 1348 & TARGET_PAGE_MASK;
003a0cf2 1349 MemoryRegionSection *existing = phys_page_find(d, base);
0f0cb164
AK
1350 MemoryRegionSection subsection = {
1351 .offset_within_address_space = base,
052e87b0 1352 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 1353 };
a8170e5e 1354 hwaddr start, end;
0f0cb164 1355
f3705d53 1356 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 1357
f3705d53 1358 if (!(existing->mr->subpage)) {
16620684
AK
1359 subpage = subpage_init(fv, base);
1360 subsection.fv = fv;
0f0cb164 1361 subsection.mr = &subpage->iomem;
ac1970fb 1362 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 1363 phys_section_add(&d->map, &subsection));
0f0cb164 1364 } else {
f3705d53 1365 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
1366 }
1367 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 1368 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
1369 subpage_register(subpage, start, end,
1370 phys_section_add(&d->map, section));
0f0cb164
AK
1371}
1372
1373
9950322a 1374static void register_multipage(FlatView *fv,
052e87b0 1375 MemoryRegionSection *section)
33417e70 1376{
9950322a 1377 AddressSpaceDispatch *d = flatview_to_dispatch(fv);
a8170e5e 1378 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 1379 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
1380 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1381 TARGET_PAGE_BITS));
dd81124b 1382
733d5ef5
PB
1383 assert(num_pages);
1384 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
1385}
1386
8629d3fc 1387void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section)
0f0cb164 1388{
99b9cc06 1389 MemoryRegionSection now = *section, remain = *section;
052e87b0 1390 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 1391
733d5ef5
PB
1392 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1393 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1394 - now.offset_within_address_space;
1395
052e87b0 1396 now.size = int128_min(int128_make64(left), now.size);
9950322a 1397 register_subpage(fv, &now);
733d5ef5 1398 } else {
052e87b0 1399 now.size = int128_zero();
733d5ef5 1400 }
052e87b0
PB
1401 while (int128_ne(remain.size, now.size)) {
1402 remain.size = int128_sub(remain.size, now.size);
1403 remain.offset_within_address_space += int128_get64(now.size);
1404 remain.offset_within_region += int128_get64(now.size);
69b67646 1405 now = remain;
052e87b0 1406 if (int128_lt(remain.size, page_size)) {
9950322a 1407 register_subpage(fv, &now);
88266249 1408 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1409 now.size = page_size;
9950322a 1410 register_subpage(fv, &now);
69b67646 1411 } else {
052e87b0 1412 now.size = int128_and(now.size, int128_neg(page_size));
9950322a 1413 register_multipage(fv, &now);
69b67646 1414 }
0f0cb164
AK
1415 }
1416}
1417
62a2744c
SY
1418void qemu_flush_coalesced_mmio_buffer(void)
1419{
1420 if (kvm_enabled())
1421 kvm_flush_coalesced_mmio_buffer();
1422}
1423
b2a8658e
UD
1424void qemu_mutex_lock_ramlist(void)
1425{
1426 qemu_mutex_lock(&ram_list.mutex);
1427}
1428
1429void qemu_mutex_unlock_ramlist(void)
1430{
1431 qemu_mutex_unlock(&ram_list.mutex);
1432}
1433
be9b23c4
PX
1434void ram_block_dump(Monitor *mon)
1435{
1436 RAMBlock *block;
1437 char *psize;
1438
1439 rcu_read_lock();
1440 monitor_printf(mon, "%24s %8s %18s %18s %18s\n",
1441 "Block Name", "PSize", "Offset", "Used", "Total");
1442 RAMBLOCK_FOREACH(block) {
1443 psize = size_to_str(block->page_size);
1444 monitor_printf(mon, "%24s %8s 0x%016" PRIx64 " 0x%016" PRIx64
1445 " 0x%016" PRIx64 "\n", block->idstr, psize,
1446 (uint64_t)block->offset,
1447 (uint64_t)block->used_length,
1448 (uint64_t)block->max_length);
1449 g_free(psize);
1450 }
1451 rcu_read_unlock();
1452}
1453
9c607668
AK
1454#ifdef __linux__
1455/*
1456 * FIXME TOCTTOU: this iterates over memory backends' mem-path, which
1457 * may or may not name the same files / on the same filesystem now as
1458 * when we actually open and map them. Iterate over the file
1459 * descriptors instead, and use qemu_fd_getpagesize().
1460 */
1461static int find_max_supported_pagesize(Object *obj, void *opaque)
1462{
1463 char *mem_path;
1464 long *hpsize_min = opaque;
1465
1466 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
1467 mem_path = object_property_get_str(obj, "mem-path", NULL);
1468 if (mem_path) {
1469 long hpsize = qemu_mempath_getpagesize(mem_path);
1470 if (hpsize < *hpsize_min) {
1471 *hpsize_min = hpsize;
1472 }
1473 } else {
1474 *hpsize_min = getpagesize();
1475 }
1476 }
1477
1478 return 0;
1479}
1480
1481long qemu_getrampagesize(void)
1482{
1483 long hpsize = LONG_MAX;
1484 long mainrampagesize;
1485 Object *memdev_root;
1486
1487 if (mem_path) {
1488 mainrampagesize = qemu_mempath_getpagesize(mem_path);
1489 } else {
1490 mainrampagesize = getpagesize();
1491 }
1492
1493 /* it's possible we have memory-backend objects with
1494 * hugepage-backed RAM. these may get mapped into system
1495 * address space via -numa parameters or memory hotplug
1496 * hooks. we want to take these into account, but we
1497 * also want to make sure these supported hugepage
1498 * sizes are applicable across the entire range of memory
1499 * we may boot from, so we take the min across all
1500 * backends, and assume normal pages in cases where a
1501 * backend isn't backed by hugepages.
1502 */
1503 memdev_root = object_resolve_path("/objects", NULL);
1504 if (memdev_root) {
1505 object_child_foreach(memdev_root, find_max_supported_pagesize, &hpsize);
1506 }
1507 if (hpsize == LONG_MAX) {
1508 /* No additional memory regions found ==> Report main RAM page size */
1509 return mainrampagesize;
1510 }
1511
1512 /* If NUMA is disabled or the NUMA nodes are not backed with a
1513 * memory-backend, then there is at least one node using "normal" RAM,
1514 * so if its page size is smaller we have got to report that size instead.
1515 */
1516 if (hpsize > mainrampagesize &&
1517 (nb_numa_nodes == 0 || numa_info[0].node_memdev == NULL)) {
1518 static bool warned;
1519 if (!warned) {
1520 error_report("Huge page support disabled (n/a for main memory).");
1521 warned = true;
1522 }
1523 return mainrampagesize;
1524 }
1525
1526 return hpsize;
1527}
1528#else
1529long qemu_getrampagesize(void)
1530{
1531 return getpagesize();
1532}
1533#endif
1534
e1e84ba0 1535#ifdef __linux__
d6af99c9
HZ
1536static int64_t get_file_size(int fd)
1537{
1538 int64_t size = lseek(fd, 0, SEEK_END);
1539 if (size < 0) {
1540 return -errno;
1541 }
1542 return size;
1543}
1544
8d37b030
MAL
1545static int file_ram_open(const char *path,
1546 const char *region_name,
1547 bool *created,
1548 Error **errp)
c902760f
MT
1549{
1550 char *filename;
8ca761f6
PF
1551 char *sanitized_name;
1552 char *c;
5c3ece79 1553 int fd = -1;
c902760f 1554
8d37b030 1555 *created = false;
fd97fd44
MA
1556 for (;;) {
1557 fd = open(path, O_RDWR);
1558 if (fd >= 0) {
1559 /* @path names an existing file, use it */
1560 break;
8d31d6b6 1561 }
fd97fd44
MA
1562 if (errno == ENOENT) {
1563 /* @path names a file that doesn't exist, create it */
1564 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1565 if (fd >= 0) {
8d37b030 1566 *created = true;
fd97fd44
MA
1567 break;
1568 }
1569 } else if (errno == EISDIR) {
1570 /* @path names a directory, create a file there */
1571 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
8d37b030 1572 sanitized_name = g_strdup(region_name);
fd97fd44
MA
1573 for (c = sanitized_name; *c != '\0'; c++) {
1574 if (*c == '/') {
1575 *c = '_';
1576 }
1577 }
8ca761f6 1578
fd97fd44
MA
1579 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1580 sanitized_name);
1581 g_free(sanitized_name);
8d31d6b6 1582
fd97fd44
MA
1583 fd = mkstemp(filename);
1584 if (fd >= 0) {
1585 unlink(filename);
1586 g_free(filename);
1587 break;
1588 }
1589 g_free(filename);
8d31d6b6 1590 }
fd97fd44
MA
1591 if (errno != EEXIST && errno != EINTR) {
1592 error_setg_errno(errp, errno,
1593 "can't open backing store %s for guest RAM",
1594 path);
8d37b030 1595 return -1;
fd97fd44
MA
1596 }
1597 /*
1598 * Try again on EINTR and EEXIST. The latter happens when
1599 * something else creates the file between our two open().
1600 */
8d31d6b6 1601 }
c902760f 1602
8d37b030
MAL
1603 return fd;
1604}
1605
1606static void *file_ram_alloc(RAMBlock *block,
1607 ram_addr_t memory,
1608 int fd,
1609 bool truncate,
1610 Error **errp)
1611{
1612 void *area;
1613
863e9621 1614 block->page_size = qemu_fd_getpagesize(fd);
8360668e
HZ
1615 block->mr->align = block->page_size;
1616#if defined(__s390x__)
1617 if (kvm_enabled()) {
1618 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
1619 }
1620#endif
fd97fd44 1621
863e9621 1622 if (memory < block->page_size) {
fd97fd44 1623 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
863e9621
DDAG
1624 "or larger than page size 0x%zx",
1625 memory, block->page_size);
8d37b030 1626 return NULL;
1775f111
HZ
1627 }
1628
863e9621 1629 memory = ROUND_UP(memory, block->page_size);
c902760f
MT
1630
1631 /*
1632 * ftruncate is not supported by hugetlbfs in older
1633 * hosts, so don't bother bailing out on errors.
1634 * If anything goes wrong with it under other filesystems,
1635 * mmap will fail.
d6af99c9
HZ
1636 *
1637 * Do not truncate the non-empty backend file to avoid corrupting
1638 * the existing data in the file. Disabling shrinking is not
1639 * enough. For example, the current vNVDIMM implementation stores
1640 * the guest NVDIMM labels at the end of the backend file. If the
1641 * backend file is later extended, QEMU will not be able to find
1642 * those labels. Therefore, extending the non-empty backend file
1643 * is disabled as well.
c902760f 1644 */
8d37b030 1645 if (truncate && ftruncate(fd, memory)) {
9742bf26 1646 perror("ftruncate");
7f56e740 1647 }
c902760f 1648
d2f39add
DD
1649 area = qemu_ram_mmap(fd, memory, block->mr->align,
1650 block->flags & RAM_SHARED);
c902760f 1651 if (area == MAP_FAILED) {
7f56e740 1652 error_setg_errno(errp, errno,
fd97fd44 1653 "unable to map backing store for guest RAM");
8d37b030 1654 return NULL;
c902760f 1655 }
ef36fa14
MT
1656
1657 if (mem_prealloc) {
1e356fc1 1658 os_mem_prealloc(fd, area, memory, smp_cpus, errp);
056b68af 1659 if (errp && *errp) {
8d37b030
MAL
1660 qemu_ram_munmap(area, memory);
1661 return NULL;
056b68af 1662 }
ef36fa14
MT
1663 }
1664
04b16653 1665 block->fd = fd;
c902760f
MT
1666 return area;
1667}
1668#endif
1669
154cc9ea
DDAG
1670/* Allocate space within the ram_addr_t space that governs the
1671 * dirty bitmaps.
1672 * Called with the ramlist lock held.
1673 */
d17b5288 1674static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1675{
1676 RAMBlock *block, *next_block;
3e837b2c 1677 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1678
49cd9ac6
SH
1679 assert(size != 0); /* it would hand out same offset multiple times */
1680
0dc3f44a 1681 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
04b16653 1682 return 0;
0d53d9fe 1683 }
04b16653 1684
99e15582 1685 RAMBLOCK_FOREACH(block) {
154cc9ea 1686 ram_addr_t candidate, next = RAM_ADDR_MAX;
04b16653 1687
801110ab
DDAG
1688 /* Align blocks to start on a 'long' in the bitmap
1689 * which makes the bitmap sync'ing take the fast path.
1690 */
154cc9ea 1691 candidate = block->offset + block->max_length;
801110ab 1692 candidate = ROUND_UP(candidate, BITS_PER_LONG << TARGET_PAGE_BITS);
04b16653 1693
154cc9ea
DDAG
1694 /* Search for the closest following block
1695 * and find the gap.
1696 */
99e15582 1697 RAMBLOCK_FOREACH(next_block) {
154cc9ea 1698 if (next_block->offset >= candidate) {
04b16653
AW
1699 next = MIN(next, next_block->offset);
1700 }
1701 }
154cc9ea
DDAG
1702
1703 /* If it fits remember our place and remember the size
1704 * of gap, but keep going so that we might find a smaller
1705 * gap to fill so avoiding fragmentation.
1706 */
1707 if (next - candidate >= size && next - candidate < mingap) {
1708 offset = candidate;
1709 mingap = next - candidate;
04b16653 1710 }
154cc9ea
DDAG
1711
1712 trace_find_ram_offset_loop(size, candidate, offset, next, mingap);
04b16653 1713 }
3e837b2c
AW
1714
1715 if (offset == RAM_ADDR_MAX) {
1716 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1717 (uint64_t)size);
1718 abort();
1719 }
1720
154cc9ea
DDAG
1721 trace_find_ram_offset(size, offset);
1722
04b16653
AW
1723 return offset;
1724}
1725
b8c48993 1726unsigned long last_ram_page(void)
d17b5288
AW
1727{
1728 RAMBlock *block;
1729 ram_addr_t last = 0;
1730
0dc3f44a 1731 rcu_read_lock();
99e15582 1732 RAMBLOCK_FOREACH(block) {
62be4e3a 1733 last = MAX(last, block->offset + block->max_length);
0d53d9fe 1734 }
0dc3f44a 1735 rcu_read_unlock();
b8c48993 1736 return last >> TARGET_PAGE_BITS;
d17b5288
AW
1737}
1738
ddb97f1d
JB
1739static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1740{
1741 int ret;
ddb97f1d
JB
1742
1743 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
47c8ca53 1744 if (!machine_dump_guest_core(current_machine)) {
ddb97f1d
JB
1745 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1746 if (ret) {
1747 perror("qemu_madvise");
1748 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1749 "but dump_guest_core=off specified\n");
1750 }
1751 }
1752}
1753
422148d3
DDAG
1754const char *qemu_ram_get_idstr(RAMBlock *rb)
1755{
1756 return rb->idstr;
1757}
1758
463a4ac2
DDAG
1759bool qemu_ram_is_shared(RAMBlock *rb)
1760{
1761 return rb->flags & RAM_SHARED;
1762}
1763
ae3a7047 1764/* Called with iothread lock held. */
fa53a0e5 1765void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
20cfe881 1766{
fa53a0e5 1767 RAMBlock *block;
20cfe881 1768
c5705a77
AK
1769 assert(new_block);
1770 assert(!new_block->idstr[0]);
84b89d78 1771
09e5ab63
AL
1772 if (dev) {
1773 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1774 if (id) {
1775 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1776 g_free(id);
84b89d78
CM
1777 }
1778 }
1779 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1780
ab0a9956 1781 rcu_read_lock();
99e15582 1782 RAMBLOCK_FOREACH(block) {
fa53a0e5
GA
1783 if (block != new_block &&
1784 !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1785 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1786 new_block->idstr);
1787 abort();
1788 }
1789 }
0dc3f44a 1790 rcu_read_unlock();
c5705a77
AK
1791}
1792
ae3a7047 1793/* Called with iothread lock held. */
fa53a0e5 1794void qemu_ram_unset_idstr(RAMBlock *block)
20cfe881 1795{
ae3a7047
MD
1796 /* FIXME: arch_init.c assumes that this is not called throughout
1797 * migration. Ignore the problem since hot-unplug during migration
1798 * does not work anyway.
1799 */
20cfe881
HT
1800 if (block) {
1801 memset(block->idstr, 0, sizeof(block->idstr));
1802 }
1803}
1804
863e9621
DDAG
1805size_t qemu_ram_pagesize(RAMBlock *rb)
1806{
1807 return rb->page_size;
1808}
1809
67f11b5c
DDAG
1810/* Returns the largest size of page in use */
1811size_t qemu_ram_pagesize_largest(void)
1812{
1813 RAMBlock *block;
1814 size_t largest = 0;
1815
99e15582 1816 RAMBLOCK_FOREACH(block) {
67f11b5c
DDAG
1817 largest = MAX(largest, qemu_ram_pagesize(block));
1818 }
1819
1820 return largest;
1821}
1822
8490fc78
LC
1823static int memory_try_enable_merging(void *addr, size_t len)
1824{
75cc7f01 1825 if (!machine_mem_merge(current_machine)) {
8490fc78
LC
1826 /* disabled by the user */
1827 return 0;
1828 }
1829
1830 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1831}
1832
62be4e3a
MT
1833/* Only legal before guest might have detected the memory size: e.g. on
1834 * incoming migration, or right after reset.
1835 *
1836 * As memory core doesn't know how is memory accessed, it is up to
1837 * resize callback to update device state and/or add assertions to detect
1838 * misuse, if necessary.
1839 */
fa53a0e5 1840int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
62be4e3a 1841{
62be4e3a
MT
1842 assert(block);
1843
4ed023ce 1844 newsize = HOST_PAGE_ALIGN(newsize);
129ddaf3 1845
62be4e3a
MT
1846 if (block->used_length == newsize) {
1847 return 0;
1848 }
1849
1850 if (!(block->flags & RAM_RESIZEABLE)) {
1851 error_setg_errno(errp, EINVAL,
1852 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1853 " in != 0x" RAM_ADDR_FMT, block->idstr,
1854 newsize, block->used_length);
1855 return -EINVAL;
1856 }
1857
1858 if (block->max_length < newsize) {
1859 error_setg_errno(errp, EINVAL,
1860 "Length too large: %s: 0x" RAM_ADDR_FMT
1861 " > 0x" RAM_ADDR_FMT, block->idstr,
1862 newsize, block->max_length);
1863 return -EINVAL;
1864 }
1865
1866 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1867 block->used_length = newsize;
58d2707e
PB
1868 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1869 DIRTY_CLIENTS_ALL);
62be4e3a
MT
1870 memory_region_set_size(block->mr, newsize);
1871 if (block->resized) {
1872 block->resized(block->idstr, newsize, block->host);
1873 }
1874 return 0;
1875}
1876
5b82b703
SH
1877/* Called with ram_list.mutex held */
1878static void dirty_memory_extend(ram_addr_t old_ram_size,
1879 ram_addr_t new_ram_size)
1880{
1881 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1882 DIRTY_MEMORY_BLOCK_SIZE);
1883 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1884 DIRTY_MEMORY_BLOCK_SIZE);
1885 int i;
1886
1887 /* Only need to extend if block count increased */
1888 if (new_num_blocks <= old_num_blocks) {
1889 return;
1890 }
1891
1892 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1893 DirtyMemoryBlocks *old_blocks;
1894 DirtyMemoryBlocks *new_blocks;
1895 int j;
1896
1897 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1898 new_blocks = g_malloc(sizeof(*new_blocks) +
1899 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1900
1901 if (old_num_blocks) {
1902 memcpy(new_blocks->blocks, old_blocks->blocks,
1903 old_num_blocks * sizeof(old_blocks->blocks[0]));
1904 }
1905
1906 for (j = old_num_blocks; j < new_num_blocks; j++) {
1907 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1908 }
1909
1910 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1911
1912 if (old_blocks) {
1913 g_free_rcu(old_blocks, rcu);
1914 }
1915 }
1916}
1917
528f46af 1918static void ram_block_add(RAMBlock *new_block, Error **errp)
c5705a77 1919{
e1c57ab8 1920 RAMBlock *block;
0d53d9fe 1921 RAMBlock *last_block = NULL;
2152f5ca 1922 ram_addr_t old_ram_size, new_ram_size;
37aa7a0e 1923 Error *err = NULL;
2152f5ca 1924
b8c48993 1925 old_ram_size = last_ram_page();
c5705a77 1926
b2a8658e 1927 qemu_mutex_lock_ramlist();
9b8424d5 1928 new_block->offset = find_ram_offset(new_block->max_length);
e1c57ab8
PB
1929
1930 if (!new_block->host) {
1931 if (xen_enabled()) {
9b8424d5 1932 xen_ram_alloc(new_block->offset, new_block->max_length,
37aa7a0e
MA
1933 new_block->mr, &err);
1934 if (err) {
1935 error_propagate(errp, err);
1936 qemu_mutex_unlock_ramlist();
39c350ee 1937 return;
37aa7a0e 1938 }
e1c57ab8 1939 } else {
9b8424d5 1940 new_block->host = phys_mem_alloc(new_block->max_length,
a2b257d6 1941 &new_block->mr->align);
39228250 1942 if (!new_block->host) {
ef701d7b
HT
1943 error_setg_errno(errp, errno,
1944 "cannot set up guest memory '%s'",
1945 memory_region_name(new_block->mr));
1946 qemu_mutex_unlock_ramlist();
39c350ee 1947 return;
39228250 1948 }
9b8424d5 1949 memory_try_enable_merging(new_block->host, new_block->max_length);
6977dfe6 1950 }
c902760f 1951 }
94a6b54f 1952
dd631697
LZ
1953 new_ram_size = MAX(old_ram_size,
1954 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1955 if (new_ram_size > old_ram_size) {
5b82b703 1956 dirty_memory_extend(old_ram_size, new_ram_size);
dd631697 1957 }
0d53d9fe
MD
1958 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1959 * QLIST (which has an RCU-friendly variant) does not have insertion at
1960 * tail, so save the last element in last_block.
1961 */
99e15582 1962 RAMBLOCK_FOREACH(block) {
0d53d9fe 1963 last_block = block;
9b8424d5 1964 if (block->max_length < new_block->max_length) {
abb26d63
PB
1965 break;
1966 }
1967 }
1968 if (block) {
0dc3f44a 1969 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
0d53d9fe 1970 } else if (last_block) {
0dc3f44a 1971 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
0d53d9fe 1972 } else { /* list is empty */
0dc3f44a 1973 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
abb26d63 1974 }
0d6d3c87 1975 ram_list.mru_block = NULL;
94a6b54f 1976
0dc3f44a
MD
1977 /* Write list before version */
1978 smp_wmb();
f798b07f 1979 ram_list.version++;
b2a8658e 1980 qemu_mutex_unlock_ramlist();
f798b07f 1981
9b8424d5 1982 cpu_physical_memory_set_dirty_range(new_block->offset,
58d2707e
PB
1983 new_block->used_length,
1984 DIRTY_CLIENTS_ALL);
94a6b54f 1985
a904c911
PB
1986 if (new_block->host) {
1987 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1988 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
c2cd627d 1989 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
a904c911 1990 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
0987d735 1991 ram_block_notify_add(new_block->host, new_block->max_length);
e1c57ab8 1992 }
94a6b54f 1993}
e9a1ab19 1994
0b183fc8 1995#ifdef __linux__
38b3362d
MAL
1996RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
1997 bool share, int fd,
1998 Error **errp)
e1c57ab8
PB
1999{
2000 RAMBlock *new_block;
ef701d7b 2001 Error *local_err = NULL;
8d37b030 2002 int64_t file_size;
e1c57ab8
PB
2003
2004 if (xen_enabled()) {
7f56e740 2005 error_setg(errp, "-mem-path not supported with Xen");
528f46af 2006 return NULL;
e1c57ab8
PB
2007 }
2008
e45e7ae2
MAL
2009 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2010 error_setg(errp,
2011 "host lacks kvm mmu notifiers, -mem-path unsupported");
2012 return NULL;
2013 }
2014
e1c57ab8
PB
2015 if (phys_mem_alloc != qemu_anon_ram_alloc) {
2016 /*
2017 * file_ram_alloc() needs to allocate just like
2018 * phys_mem_alloc, but we haven't bothered to provide
2019 * a hook there.
2020 */
7f56e740
PB
2021 error_setg(errp,
2022 "-mem-path not supported with this accelerator");
528f46af 2023 return NULL;
e1c57ab8
PB
2024 }
2025
4ed023ce 2026 size = HOST_PAGE_ALIGN(size);
8d37b030
MAL
2027 file_size = get_file_size(fd);
2028 if (file_size > 0 && file_size < size) {
2029 error_setg(errp, "backing store %s size 0x%" PRIx64
2030 " does not match 'size' option 0x" RAM_ADDR_FMT,
2031 mem_path, file_size, size);
8d37b030
MAL
2032 return NULL;
2033 }
2034
e1c57ab8
PB
2035 new_block = g_malloc0(sizeof(*new_block));
2036 new_block->mr = mr;
9b8424d5
MT
2037 new_block->used_length = size;
2038 new_block->max_length = size;
dbcb8981 2039 new_block->flags = share ? RAM_SHARED : 0;
8d37b030 2040 new_block->host = file_ram_alloc(new_block, size, fd, !file_size, errp);
7f56e740
PB
2041 if (!new_block->host) {
2042 g_free(new_block);
528f46af 2043 return NULL;
7f56e740
PB
2044 }
2045
528f46af 2046 ram_block_add(new_block, &local_err);
ef701d7b
HT
2047 if (local_err) {
2048 g_free(new_block);
2049 error_propagate(errp, local_err);
528f46af 2050 return NULL;
ef701d7b 2051 }
528f46af 2052 return new_block;
38b3362d
MAL
2053
2054}
2055
2056
2057RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
2058 bool share, const char *mem_path,
2059 Error **errp)
2060{
2061 int fd;
2062 bool created;
2063 RAMBlock *block;
2064
2065 fd = file_ram_open(mem_path, memory_region_name(mr), &created, errp);
2066 if (fd < 0) {
2067 return NULL;
2068 }
2069
2070 block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
2071 if (!block) {
2072 if (created) {
2073 unlink(mem_path);
2074 }
2075 close(fd);
2076 return NULL;
2077 }
2078
2079 return block;
e1c57ab8 2080}
0b183fc8 2081#endif
e1c57ab8 2082
62be4e3a 2083static
528f46af
FZ
2084RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
2085 void (*resized)(const char*,
2086 uint64_t length,
2087 void *host),
2088 void *host, bool resizeable,
2089 MemoryRegion *mr, Error **errp)
e1c57ab8
PB
2090{
2091 RAMBlock *new_block;
ef701d7b 2092 Error *local_err = NULL;
e1c57ab8 2093
4ed023ce
DDAG
2094 size = HOST_PAGE_ALIGN(size);
2095 max_size = HOST_PAGE_ALIGN(max_size);
e1c57ab8
PB
2096 new_block = g_malloc0(sizeof(*new_block));
2097 new_block->mr = mr;
62be4e3a 2098 new_block->resized = resized;
9b8424d5
MT
2099 new_block->used_length = size;
2100 new_block->max_length = max_size;
62be4e3a 2101 assert(max_size >= size);
e1c57ab8 2102 new_block->fd = -1;
863e9621 2103 new_block->page_size = getpagesize();
e1c57ab8
PB
2104 new_block->host = host;
2105 if (host) {
7bd4f430 2106 new_block->flags |= RAM_PREALLOC;
e1c57ab8 2107 }
62be4e3a
MT
2108 if (resizeable) {
2109 new_block->flags |= RAM_RESIZEABLE;
2110 }
528f46af 2111 ram_block_add(new_block, &local_err);
ef701d7b
HT
2112 if (local_err) {
2113 g_free(new_block);
2114 error_propagate(errp, local_err);
528f46af 2115 return NULL;
ef701d7b 2116 }
528f46af 2117 return new_block;
e1c57ab8
PB
2118}
2119
528f46af 2120RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
62be4e3a
MT
2121 MemoryRegion *mr, Error **errp)
2122{
2123 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
2124}
2125
528f46af 2126RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
6977dfe6 2127{
62be4e3a
MT
2128 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
2129}
2130
528f46af 2131RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
62be4e3a
MT
2132 void (*resized)(const char*,
2133 uint64_t length,
2134 void *host),
2135 MemoryRegion *mr, Error **errp)
2136{
2137 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
6977dfe6
YT
2138}
2139
43771539
PB
2140static void reclaim_ramblock(RAMBlock *block)
2141{
2142 if (block->flags & RAM_PREALLOC) {
2143 ;
2144 } else if (xen_enabled()) {
2145 xen_invalidate_map_cache_entry(block->host);
2146#ifndef _WIN32
2147 } else if (block->fd >= 0) {
2f3a2bb1 2148 qemu_ram_munmap(block->host, block->max_length);
43771539
PB
2149 close(block->fd);
2150#endif
2151 } else {
2152 qemu_anon_ram_free(block->host, block->max_length);
2153 }
2154 g_free(block);
2155}
2156
f1060c55 2157void qemu_ram_free(RAMBlock *block)
e9a1ab19 2158{
85bc2a15
MAL
2159 if (!block) {
2160 return;
2161 }
2162
0987d735
PB
2163 if (block->host) {
2164 ram_block_notify_remove(block->host, block->max_length);
2165 }
2166
b2a8658e 2167 qemu_mutex_lock_ramlist();
f1060c55
FZ
2168 QLIST_REMOVE_RCU(block, next);
2169 ram_list.mru_block = NULL;
2170 /* Write list before version */
2171 smp_wmb();
2172 ram_list.version++;
2173 call_rcu(block, reclaim_ramblock, rcu);
b2a8658e 2174 qemu_mutex_unlock_ramlist();
e9a1ab19
FB
2175}
2176
cd19cfa2
HY
2177#ifndef _WIN32
2178void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2179{
2180 RAMBlock *block;
2181 ram_addr_t offset;
2182 int flags;
2183 void *area, *vaddr;
2184
99e15582 2185 RAMBLOCK_FOREACH(block) {
cd19cfa2 2186 offset = addr - block->offset;
9b8424d5 2187 if (offset < block->max_length) {
1240be24 2188 vaddr = ramblock_ptr(block, offset);
7bd4f430 2189 if (block->flags & RAM_PREALLOC) {
cd19cfa2 2190 ;
dfeaf2ab
MA
2191 } else if (xen_enabled()) {
2192 abort();
cd19cfa2
HY
2193 } else {
2194 flags = MAP_FIXED;
3435f395 2195 if (block->fd >= 0) {
dbcb8981
PB
2196 flags |= (block->flags & RAM_SHARED ?
2197 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
2198 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2199 flags, block->fd, offset);
cd19cfa2 2200 } else {
2eb9fbaa
MA
2201 /*
2202 * Remap needs to match alloc. Accelerators that
2203 * set phys_mem_alloc never remap. If they did,
2204 * we'd need a remap hook here.
2205 */
2206 assert(phys_mem_alloc == qemu_anon_ram_alloc);
2207
cd19cfa2
HY
2208 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2209 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2210 flags, -1, 0);
cd19cfa2
HY
2211 }
2212 if (area != vaddr) {
f15fbc4b
AP
2213 fprintf(stderr, "Could not remap addr: "
2214 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
2215 length, addr);
2216 exit(1);
2217 }
8490fc78 2218 memory_try_enable_merging(vaddr, length);
ddb97f1d 2219 qemu_ram_setup_dump(vaddr, length);
cd19cfa2 2220 }
cd19cfa2
HY
2221 }
2222 }
2223}
2224#endif /* !_WIN32 */
2225
1b5ec234 2226/* Return a host pointer to ram allocated with qemu_ram_alloc.
ae3a7047
MD
2227 * This should not be used for general purpose DMA. Use address_space_map
2228 * or address_space_rw instead. For local memory (e.g. video ram) that the
2229 * device owns, use memory_region_get_ram_ptr.
0dc3f44a 2230 *
49b24afc 2231 * Called within RCU critical section.
1b5ec234 2232 */
0878d0e1 2233void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
1b5ec234 2234{
3655cb9c
GA
2235 RAMBlock *block = ram_block;
2236
2237 if (block == NULL) {
2238 block = qemu_get_ram_block(addr);
0878d0e1 2239 addr -= block->offset;
3655cb9c 2240 }
ae3a7047
MD
2241
2242 if (xen_enabled() && block->host == NULL) {
0d6d3c87
PB
2243 /* We need to check if the requested address is in the RAM
2244 * because we don't want to map the entire memory in QEMU.
2245 * In that case just map until the end of the page.
2246 */
2247 if (block->offset == 0) {
1ff7c598 2248 return xen_map_cache(addr, 0, 0, false);
0d6d3c87 2249 }
ae3a7047 2250
1ff7c598 2251 block->host = xen_map_cache(block->offset, block->max_length, 1, false);
0d6d3c87 2252 }
0878d0e1 2253 return ramblock_ptr(block, addr);
dc828ca1
PB
2254}
2255
0878d0e1 2256/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
ae3a7047 2257 * but takes a size argument.
0dc3f44a 2258 *
e81bcda5 2259 * Called within RCU critical section.
ae3a7047 2260 */
3655cb9c 2261static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
f5aa69bd 2262 hwaddr *size, bool lock)
38bee5dc 2263{
3655cb9c 2264 RAMBlock *block = ram_block;
8ab934f9
SS
2265 if (*size == 0) {
2266 return NULL;
2267 }
e81bcda5 2268
3655cb9c
GA
2269 if (block == NULL) {
2270 block = qemu_get_ram_block(addr);
0878d0e1 2271 addr -= block->offset;
3655cb9c 2272 }
0878d0e1 2273 *size = MIN(*size, block->max_length - addr);
e81bcda5
PB
2274
2275 if (xen_enabled() && block->host == NULL) {
2276 /* We need to check if the requested address is in the RAM
2277 * because we don't want to map the entire memory in QEMU.
2278 * In that case just map the requested area.
2279 */
2280 if (block->offset == 0) {
f5aa69bd 2281 return xen_map_cache(addr, *size, lock, lock);
38bee5dc
SS
2282 }
2283
f5aa69bd 2284 block->host = xen_map_cache(block->offset, block->max_length, 1, lock);
38bee5dc 2285 }
e81bcda5 2286
0878d0e1 2287 return ramblock_ptr(block, addr);
38bee5dc
SS
2288}
2289
422148d3
DDAG
2290/*
2291 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
2292 * in that RAMBlock.
2293 *
2294 * ptr: Host pointer to look up
2295 * round_offset: If true round the result offset down to a page boundary
2296 * *ram_addr: set to result ram_addr
2297 * *offset: set to result offset within the RAMBlock
2298 *
2299 * Returns: RAMBlock (or NULL if not found)
ae3a7047
MD
2300 *
2301 * By the time this function returns, the returned pointer is not protected
2302 * by RCU anymore. If the caller is not within an RCU critical section and
2303 * does not hold the iothread lock, it must have other means of protecting the
2304 * pointer, such as a reference to the region that includes the incoming
2305 * ram_addr_t.
2306 */
422148d3 2307RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
422148d3 2308 ram_addr_t *offset)
5579c7f3 2309{
94a6b54f
PB
2310 RAMBlock *block;
2311 uint8_t *host = ptr;
2312
868bb33f 2313 if (xen_enabled()) {
f615f396 2314 ram_addr_t ram_addr;
0dc3f44a 2315 rcu_read_lock();
f615f396
PB
2316 ram_addr = xen_ram_addr_from_mapcache(ptr);
2317 block = qemu_get_ram_block(ram_addr);
422148d3 2318 if (block) {
d6b6aec4 2319 *offset = ram_addr - block->offset;
422148d3 2320 }
0dc3f44a 2321 rcu_read_unlock();
422148d3 2322 return block;
712c2b41
SS
2323 }
2324
0dc3f44a
MD
2325 rcu_read_lock();
2326 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 2327 if (block && block->host && host - block->host < block->max_length) {
23887b79
PB
2328 goto found;
2329 }
2330
99e15582 2331 RAMBLOCK_FOREACH(block) {
432d268c
JN
2332 /* This case append when the block is not mapped. */
2333 if (block->host == NULL) {
2334 continue;
2335 }
9b8424d5 2336 if (host - block->host < block->max_length) {
23887b79 2337 goto found;
f471a17e 2338 }
94a6b54f 2339 }
432d268c 2340
0dc3f44a 2341 rcu_read_unlock();
1b5ec234 2342 return NULL;
23887b79
PB
2343
2344found:
422148d3
DDAG
2345 *offset = (host - block->host);
2346 if (round_offset) {
2347 *offset &= TARGET_PAGE_MASK;
2348 }
0dc3f44a 2349 rcu_read_unlock();
422148d3
DDAG
2350 return block;
2351}
2352
e3dd7493
DDAG
2353/*
2354 * Finds the named RAMBlock
2355 *
2356 * name: The name of RAMBlock to find
2357 *
2358 * Returns: RAMBlock (or NULL if not found)
2359 */
2360RAMBlock *qemu_ram_block_by_name(const char *name)
2361{
2362 RAMBlock *block;
2363
99e15582 2364 RAMBLOCK_FOREACH(block) {
e3dd7493
DDAG
2365 if (!strcmp(name, block->idstr)) {
2366 return block;
2367 }
2368 }
2369
2370 return NULL;
2371}
2372
422148d3
DDAG
2373/* Some of the softmmu routines need to translate from a host pointer
2374 (typically a TLB entry) back to a ram offset. */
07bdaa41 2375ram_addr_t qemu_ram_addr_from_host(void *ptr)
422148d3
DDAG
2376{
2377 RAMBlock *block;
f615f396 2378 ram_addr_t offset;
422148d3 2379
f615f396 2380 block = qemu_ram_block_from_host(ptr, false, &offset);
422148d3 2381 if (!block) {
07bdaa41 2382 return RAM_ADDR_INVALID;
422148d3
DDAG
2383 }
2384
07bdaa41 2385 return block->offset + offset;
e890261f 2386}
f471a17e 2387
27266271
PM
2388/* Called within RCU critical section. */
2389void memory_notdirty_write_prepare(NotDirtyInfo *ndi,
2390 CPUState *cpu,
2391 vaddr mem_vaddr,
2392 ram_addr_t ram_addr,
2393 unsigned size)
2394{
2395 ndi->cpu = cpu;
2396 ndi->ram_addr = ram_addr;
2397 ndi->mem_vaddr = mem_vaddr;
2398 ndi->size = size;
2399 ndi->locked = false;
ba051fb5 2400
5aa1ef71 2401 assert(tcg_enabled());
52159192 2402 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
27266271 2403 ndi->locked = true;
ba051fb5 2404 tb_lock();
0e0df1e2 2405 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 2406 }
27266271
PM
2407}
2408
2409/* Called within RCU critical section. */
2410void memory_notdirty_write_complete(NotDirtyInfo *ndi)
2411{
2412 if (ndi->locked) {
2413 tb_unlock();
2414 }
2415
2416 /* Set both VGA and migration bits for simplicity and to remove
2417 * the notdirty callback faster.
2418 */
2419 cpu_physical_memory_set_dirty_range(ndi->ram_addr, ndi->size,
2420 DIRTY_CLIENTS_NOCODE);
2421 /* we remove the notdirty callback only if the code has been
2422 flushed */
2423 if (!cpu_physical_memory_is_clean(ndi->ram_addr)) {
2424 tlb_set_dirty(ndi->cpu, ndi->mem_vaddr);
2425 }
2426}
2427
2428/* Called within RCU critical section. */
2429static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
2430 uint64_t val, unsigned size)
2431{
2432 NotDirtyInfo ndi;
2433
2434 memory_notdirty_write_prepare(&ndi, current_cpu, current_cpu->mem_io_vaddr,
2435 ram_addr, size);
2436
0e0df1e2
AK
2437 switch (size) {
2438 case 1:
0878d0e1 2439 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
0e0df1e2
AK
2440 break;
2441 case 2:
0878d0e1 2442 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
0e0df1e2
AK
2443 break;
2444 case 4:
0878d0e1 2445 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
0e0df1e2 2446 break;
ad52878f
AB
2447 case 8:
2448 stq_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2449 break;
0e0df1e2
AK
2450 default:
2451 abort();
3a7d929e 2452 }
27266271 2453 memory_notdirty_write_complete(&ndi);
9fa3e853
FB
2454}
2455
b018ddf6
PB
2456static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2457 unsigned size, bool is_write)
2458{
2459 return is_write;
2460}
2461
0e0df1e2 2462static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 2463 .write = notdirty_mem_write,
b018ddf6 2464 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 2465 .endianness = DEVICE_NATIVE_ENDIAN,
ad52878f
AB
2466 .valid = {
2467 .min_access_size = 1,
2468 .max_access_size = 8,
2469 .unaligned = false,
2470 },
2471 .impl = {
2472 .min_access_size = 1,
2473 .max_access_size = 8,
2474 .unaligned = false,
2475 },
1ccde1cb
FB
2476};
2477
0f459d16 2478/* Generate a debug exception if a watchpoint has been hit. */
66b9b43c 2479static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
0f459d16 2480{
93afeade 2481 CPUState *cpu = current_cpu;
568496c0 2482 CPUClass *cc = CPU_GET_CLASS(cpu);
0f459d16 2483 target_ulong vaddr;
a1d1bb31 2484 CPUWatchpoint *wp;
0f459d16 2485
5aa1ef71 2486 assert(tcg_enabled());
ff4700b0 2487 if (cpu->watchpoint_hit) {
06d55cc1
AL
2488 /* We re-entered the check after replacing the TB. Now raise
2489 * the debug interrupt so that is will trigger after the
2490 * current instruction. */
93afeade 2491 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
2492 return;
2493 }
93afeade 2494 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
40612000 2495 vaddr = cc->adjust_watchpoint_address(cpu, vaddr, len);
ff4700b0 2496 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d
PM
2497 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2498 && (wp->flags & flags)) {
08225676
PM
2499 if (flags == BP_MEM_READ) {
2500 wp->flags |= BP_WATCHPOINT_HIT_READ;
2501 } else {
2502 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2503 }
2504 wp->hitaddr = vaddr;
66b9b43c 2505 wp->hitattrs = attrs;
ff4700b0 2506 if (!cpu->watchpoint_hit) {
568496c0
SF
2507 if (wp->flags & BP_CPU &&
2508 !cc->debug_check_watchpoint(cpu, wp)) {
2509 wp->flags &= ~BP_WATCHPOINT_HIT;
2510 continue;
2511 }
ff4700b0 2512 cpu->watchpoint_hit = wp;
a5e99826 2513
8d04fb55
JK
2514 /* Both tb_lock and iothread_mutex will be reset when
2515 * cpu_loop_exit or cpu_loop_exit_noexc longjmp
2516 * back into the cpu_exec main loop.
a5e99826
FK
2517 */
2518 tb_lock();
239c51a5 2519 tb_check_watchpoint(cpu);
6e140f28 2520 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 2521 cpu->exception_index = EXCP_DEBUG;
5638d180 2522 cpu_loop_exit(cpu);
6e140f28 2523 } else {
9b990ee5
RH
2524 /* Force execution of one insn next time. */
2525 cpu->cflags_next_tb = 1 | curr_cflags();
6886b980 2526 cpu_loop_exit_noexc(cpu);
6e140f28 2527 }
06d55cc1 2528 }
6e140f28
AL
2529 } else {
2530 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
2531 }
2532 }
2533}
2534
6658ffb8
PB
2535/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2536 so these check for a hit then pass through to the normal out-of-line
2537 phys routines. */
66b9b43c
PM
2538static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2539 unsigned size, MemTxAttrs attrs)
6658ffb8 2540{
66b9b43c
PM
2541 MemTxResult res;
2542 uint64_t data;
79ed0416
PM
2543 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2544 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
66b9b43c
PM
2545
2546 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
1ec9b909 2547 switch (size) {
66b9b43c 2548 case 1:
79ed0416 2549 data = address_space_ldub(as, addr, attrs, &res);
66b9b43c
PM
2550 break;
2551 case 2:
79ed0416 2552 data = address_space_lduw(as, addr, attrs, &res);
66b9b43c
PM
2553 break;
2554 case 4:
79ed0416 2555 data = address_space_ldl(as, addr, attrs, &res);
66b9b43c 2556 break;
306526b5
PB
2557 case 8:
2558 data = address_space_ldq(as, addr, attrs, &res);
2559 break;
1ec9b909
AK
2560 default: abort();
2561 }
66b9b43c
PM
2562 *pdata = data;
2563 return res;
6658ffb8
PB
2564}
2565
66b9b43c
PM
2566static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2567 uint64_t val, unsigned size,
2568 MemTxAttrs attrs)
6658ffb8 2569{
66b9b43c 2570 MemTxResult res;
79ed0416
PM
2571 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2572 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
66b9b43c
PM
2573
2574 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1ec9b909 2575 switch (size) {
67364150 2576 case 1:
79ed0416 2577 address_space_stb(as, addr, val, attrs, &res);
67364150
MF
2578 break;
2579 case 2:
79ed0416 2580 address_space_stw(as, addr, val, attrs, &res);
67364150
MF
2581 break;
2582 case 4:
79ed0416 2583 address_space_stl(as, addr, val, attrs, &res);
67364150 2584 break;
306526b5
PB
2585 case 8:
2586 address_space_stq(as, addr, val, attrs, &res);
2587 break;
1ec9b909
AK
2588 default: abort();
2589 }
66b9b43c 2590 return res;
6658ffb8
PB
2591}
2592
1ec9b909 2593static const MemoryRegionOps watch_mem_ops = {
66b9b43c
PM
2594 .read_with_attrs = watch_mem_read,
2595 .write_with_attrs = watch_mem_write,
1ec9b909 2596 .endianness = DEVICE_NATIVE_ENDIAN,
306526b5
PB
2597 .valid = {
2598 .min_access_size = 1,
2599 .max_access_size = 8,
2600 .unaligned = false,
2601 },
2602 .impl = {
2603 .min_access_size = 1,
2604 .max_access_size = 8,
2605 .unaligned = false,
2606 },
6658ffb8 2607};
6658ffb8 2608
16620684
AK
2609static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
2610 const uint8_t *buf, int len);
2611static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
2612 bool is_write);
2613
f25a49e0
PM
2614static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2615 unsigned len, MemTxAttrs attrs)
db7b5426 2616{
acc9d80b 2617 subpage_t *subpage = opaque;
ff6cff75 2618 uint8_t buf[8];
5c9eb028 2619 MemTxResult res;
791af8c8 2620
db7b5426 2621#if defined(DEBUG_SUBPAGE)
016e9d62 2622 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 2623 subpage, len, addr);
db7b5426 2624#endif
16620684 2625 res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len);
5c9eb028
PM
2626 if (res) {
2627 return res;
f25a49e0 2628 }
acc9d80b
JK
2629 switch (len) {
2630 case 1:
f25a49e0
PM
2631 *data = ldub_p(buf);
2632 return MEMTX_OK;
acc9d80b 2633 case 2:
f25a49e0
PM
2634 *data = lduw_p(buf);
2635 return MEMTX_OK;
acc9d80b 2636 case 4:
f25a49e0
PM
2637 *data = ldl_p(buf);
2638 return MEMTX_OK;
ff6cff75 2639 case 8:
f25a49e0
PM
2640 *data = ldq_p(buf);
2641 return MEMTX_OK;
acc9d80b
JK
2642 default:
2643 abort();
2644 }
db7b5426
BS
2645}
2646
f25a49e0
PM
2647static MemTxResult subpage_write(void *opaque, hwaddr addr,
2648 uint64_t value, unsigned len, MemTxAttrs attrs)
db7b5426 2649{
acc9d80b 2650 subpage_t *subpage = opaque;
ff6cff75 2651 uint8_t buf[8];
acc9d80b 2652
db7b5426 2653#if defined(DEBUG_SUBPAGE)
016e9d62 2654 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
2655 " value %"PRIx64"\n",
2656 __func__, subpage, len, addr, value);
db7b5426 2657#endif
acc9d80b
JK
2658 switch (len) {
2659 case 1:
2660 stb_p(buf, value);
2661 break;
2662 case 2:
2663 stw_p(buf, value);
2664 break;
2665 case 4:
2666 stl_p(buf, value);
2667 break;
ff6cff75
PB
2668 case 8:
2669 stq_p(buf, value);
2670 break;
acc9d80b
JK
2671 default:
2672 abort();
2673 }
16620684 2674 return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len);
db7b5426
BS
2675}
2676
c353e4cc 2677static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 2678 unsigned len, bool is_write)
c353e4cc 2679{
acc9d80b 2680 subpage_t *subpage = opaque;
c353e4cc 2681#if defined(DEBUG_SUBPAGE)
016e9d62 2682 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 2683 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
2684#endif
2685
16620684
AK
2686 return flatview_access_valid(subpage->fv, addr + subpage->base,
2687 len, is_write);
c353e4cc
PB
2688}
2689
70c68e44 2690static const MemoryRegionOps subpage_ops = {
f25a49e0
PM
2691 .read_with_attrs = subpage_read,
2692 .write_with_attrs = subpage_write,
ff6cff75
PB
2693 .impl.min_access_size = 1,
2694 .impl.max_access_size = 8,
2695 .valid.min_access_size = 1,
2696 .valid.max_access_size = 8,
c353e4cc 2697 .valid.accepts = subpage_accepts,
70c68e44 2698 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
2699};
2700
c227f099 2701static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2702 uint16_t section)
db7b5426
BS
2703{
2704 int idx, eidx;
2705
2706 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2707 return -1;
2708 idx = SUBPAGE_IDX(start);
2709 eidx = SUBPAGE_IDX(end);
2710#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2711 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2712 __func__, mmio, start, end, idx, eidx, section);
db7b5426 2713#endif
db7b5426 2714 for (; idx <= eidx; idx++) {
5312bd8b 2715 mmio->sub_section[idx] = section;
db7b5426
BS
2716 }
2717
2718 return 0;
2719}
2720
16620684 2721static subpage_t *subpage_init(FlatView *fv, hwaddr base)
db7b5426 2722{
c227f099 2723 subpage_t *mmio;
db7b5426 2724
2615fabd 2725 mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
16620684 2726 mmio->fv = fv;
1eec614b 2727 mmio->base = base;
2c9b15ca 2728 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 2729 NULL, TARGET_PAGE_SIZE);
b3b00c78 2730 mmio->iomem.subpage = true;
db7b5426 2731#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2732 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2733 mmio, base, TARGET_PAGE_SIZE);
db7b5426 2734#endif
b41aac4f 2735 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
2736
2737 return mmio;
2738}
2739
16620684 2740static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr)
5312bd8b 2741{
16620684 2742 assert(fv);
5312bd8b 2743 MemoryRegionSection section = {
16620684 2744 .fv = fv,
5312bd8b
AK
2745 .mr = mr,
2746 .offset_within_address_space = 0,
2747 .offset_within_region = 0,
052e87b0 2748 .size = int128_2_64(),
5312bd8b
AK
2749 };
2750
53cb28cb 2751 return phys_section_add(map, &section);
5312bd8b
AK
2752}
2753
8af36743
PM
2754static void readonly_mem_write(void *opaque, hwaddr addr,
2755 uint64_t val, unsigned size)
2756{
2757 /* Ignore any write to ROM. */
2758}
2759
2760static bool readonly_mem_accepts(void *opaque, hwaddr addr,
2761 unsigned size, bool is_write)
2762{
2763 return is_write;
2764}
2765
2766/* This will only be used for writes, because reads are special cased
2767 * to directly access the underlying host ram.
2768 */
2769static const MemoryRegionOps readonly_mem_ops = {
2770 .write = readonly_mem_write,
2771 .valid.accepts = readonly_mem_accepts,
2772 .endianness = DEVICE_NATIVE_ENDIAN,
2773 .valid = {
2774 .min_access_size = 1,
2775 .max_access_size = 8,
2776 .unaligned = false,
2777 },
2778 .impl = {
2779 .min_access_size = 1,
2780 .max_access_size = 8,
2781 .unaligned = false,
2782 },
2783};
2784
a54c87b6 2785MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
aa102231 2786{
a54c87b6
PM
2787 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2788 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
32857f4d 2789 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
79e2b9ae 2790 MemoryRegionSection *sections = d->map.sections;
9d82b5a7
PB
2791
2792 return sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
2793}
2794
e9179ce1
AK
2795static void io_mem_init(void)
2796{
8af36743
PM
2797 memory_region_init_io(&io_mem_rom, NULL, &readonly_mem_ops,
2798 NULL, NULL, UINT64_MAX);
2c9b15ca 2799 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 2800 NULL, UINT64_MAX);
8d04fb55
JK
2801
2802 /* io_mem_notdirty calls tb_invalidate_phys_page_fast,
2803 * which can be called without the iothread mutex.
2804 */
2c9b15ca 2805 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 2806 NULL, UINT64_MAX);
8d04fb55
JK
2807 memory_region_clear_global_locking(&io_mem_notdirty);
2808
2c9b15ca 2809 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 2810 NULL, UINT64_MAX);
e9179ce1
AK
2811}
2812
8629d3fc 2813AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
00752703 2814{
53cb28cb
MA
2815 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2816 uint16_t n;
2817
16620684 2818 n = dummy_section(&d->map, fv, &io_mem_unassigned);
53cb28cb 2819 assert(n == PHYS_SECTION_UNASSIGNED);
16620684 2820 n = dummy_section(&d->map, fv, &io_mem_notdirty);
53cb28cb 2821 assert(n == PHYS_SECTION_NOTDIRTY);
16620684 2822 n = dummy_section(&d->map, fv, &io_mem_rom);
53cb28cb 2823 assert(n == PHYS_SECTION_ROM);
16620684 2824 n = dummy_section(&d->map, fv, &io_mem_watch);
53cb28cb 2825 assert(n == PHYS_SECTION_WATCH);
00752703 2826
9736e55b 2827 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
66a6df1d
AK
2828
2829 return d;
00752703
PB
2830}
2831
66a6df1d 2832void address_space_dispatch_free(AddressSpaceDispatch *d)
79e2b9ae
PB
2833{
2834 phys_sections_free(&d->map);
2835 g_free(d);
2836}
2837
1d71148e 2838static void tcg_commit(MemoryListener *listener)
50c1e149 2839{
32857f4d
PM
2840 CPUAddressSpace *cpuas;
2841 AddressSpaceDispatch *d;
117712c3
AK
2842
2843 /* since each CPU stores ram addresses in its TLB cache, we must
2844 reset the modified entries */
32857f4d
PM
2845 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2846 cpu_reloading_memory_map();
2847 /* The CPU and TLB are protected by the iothread lock.
2848 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2849 * may have split the RCU critical section.
2850 */
66a6df1d 2851 d = address_space_to_dispatch(cpuas->as);
f35e44e7 2852 atomic_rcu_set(&cpuas->memory_dispatch, d);
d10eb08f 2853 tlb_flush(cpuas->cpu);
50c1e149
AK
2854}
2855
62152b8a
AK
2856static void memory_map_init(void)
2857{
7267c094 2858 system_memory = g_malloc(sizeof(*system_memory));
03f49957 2859
57271d63 2860 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 2861 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 2862
7267c094 2863 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
2864 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2865 65536);
7dca8043 2866 address_space_init(&address_space_io, system_io, "I/O");
62152b8a
AK
2867}
2868
2869MemoryRegion *get_system_memory(void)
2870{
2871 return system_memory;
2872}
2873
309cb471
AK
2874MemoryRegion *get_system_io(void)
2875{
2876 return system_io;
2877}
2878
e2eef170
PB
2879#endif /* !defined(CONFIG_USER_ONLY) */
2880
13eb76e0
FB
2881/* physical memory access (slow version, mainly for debug) */
2882#if defined(CONFIG_USER_ONLY)
f17ec444 2883int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 2884 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2885{
2886 int l, flags;
2887 target_ulong page;
53a5960a 2888 void * p;
13eb76e0
FB
2889
2890 while (len > 0) {
2891 page = addr & TARGET_PAGE_MASK;
2892 l = (page + TARGET_PAGE_SIZE) - addr;
2893 if (l > len)
2894 l = len;
2895 flags = page_get_flags(page);
2896 if (!(flags & PAGE_VALID))
a68fe89c 2897 return -1;
13eb76e0
FB
2898 if (is_write) {
2899 if (!(flags & PAGE_WRITE))
a68fe89c 2900 return -1;
579a97f7 2901 /* XXX: this code should not depend on lock_user */
72fb7daa 2902 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 2903 return -1;
72fb7daa
AJ
2904 memcpy(p, buf, l);
2905 unlock_user(p, addr, l);
13eb76e0
FB
2906 } else {
2907 if (!(flags & PAGE_READ))
a68fe89c 2908 return -1;
579a97f7 2909 /* XXX: this code should not depend on lock_user */
72fb7daa 2910 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 2911 return -1;
72fb7daa 2912 memcpy(buf, p, l);
5b257578 2913 unlock_user(p, addr, 0);
13eb76e0
FB
2914 }
2915 len -= l;
2916 buf += l;
2917 addr += l;
2918 }
a68fe89c 2919 return 0;
13eb76e0 2920}
8df1cd07 2921
13eb76e0 2922#else
51d7a9eb 2923
845b6214 2924static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
a8170e5e 2925 hwaddr length)
51d7a9eb 2926{
e87f7778 2927 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
0878d0e1
PB
2928 addr += memory_region_get_ram_addr(mr);
2929
e87f7778
PB
2930 /* No early return if dirty_log_mask is or becomes 0, because
2931 * cpu_physical_memory_set_dirty_range will still call
2932 * xen_modified_memory.
2933 */
2934 if (dirty_log_mask) {
2935 dirty_log_mask =
2936 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2937 }
2938 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
5aa1ef71 2939 assert(tcg_enabled());
ba051fb5 2940 tb_lock();
e87f7778 2941 tb_invalidate_phys_range(addr, addr + length);
ba051fb5 2942 tb_unlock();
e87f7778 2943 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
51d7a9eb 2944 }
e87f7778 2945 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
51d7a9eb
AP
2946}
2947
23326164 2948static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2949{
e1622f4b 2950 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2951
2952 /* Regions are assumed to support 1-4 byte accesses unless
2953 otherwise specified. */
23326164
RH
2954 if (access_size_max == 0) {
2955 access_size_max = 4;
2956 }
2957
2958 /* Bound the maximum access by the alignment of the address. */
2959 if (!mr->ops->impl.unaligned) {
2960 unsigned align_size_max = addr & -addr;
2961 if (align_size_max != 0 && align_size_max < access_size_max) {
2962 access_size_max = align_size_max;
2963 }
82f2563f 2964 }
23326164
RH
2965
2966 /* Don't attempt accesses larger than the maximum. */
2967 if (l > access_size_max) {
2968 l = access_size_max;
82f2563f 2969 }
6554f5c0 2970 l = pow2floor(l);
23326164
RH
2971
2972 return l;
82f2563f
PB
2973}
2974
4840f10e 2975static bool prepare_mmio_access(MemoryRegion *mr)
125b3806 2976{
4840f10e
JK
2977 bool unlocked = !qemu_mutex_iothread_locked();
2978 bool release_lock = false;
2979
2980 if (unlocked && mr->global_locking) {
2981 qemu_mutex_lock_iothread();
2982 unlocked = false;
2983 release_lock = true;
2984 }
125b3806 2985 if (mr->flush_coalesced_mmio) {
4840f10e
JK
2986 if (unlocked) {
2987 qemu_mutex_lock_iothread();
2988 }
125b3806 2989 qemu_flush_coalesced_mmio_buffer();
4840f10e
JK
2990 if (unlocked) {
2991 qemu_mutex_unlock_iothread();
2992 }
125b3806 2993 }
4840f10e
JK
2994
2995 return release_lock;
125b3806
PB
2996}
2997
a203ac70 2998/* Called within RCU critical section. */
16620684
AK
2999static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
3000 MemTxAttrs attrs,
3001 const uint8_t *buf,
3002 int len, hwaddr addr1,
3003 hwaddr l, MemoryRegion *mr)
13eb76e0 3004{
13eb76e0 3005 uint8_t *ptr;
791af8c8 3006 uint64_t val;
3b643495 3007 MemTxResult result = MEMTX_OK;
4840f10e 3008 bool release_lock = false;
3b46e624 3009
a203ac70 3010 for (;;) {
eb7eeb88
PB
3011 if (!memory_access_is_direct(mr, true)) {
3012 release_lock |= prepare_mmio_access(mr);
3013 l = memory_access_size(mr, l, addr1);
3014 /* XXX: could force current_cpu to NULL to avoid
3015 potential bugs */
3016 switch (l) {
3017 case 8:
3018 /* 64 bit write access */
3019 val = ldq_p(buf);
3020 result |= memory_region_dispatch_write(mr, addr1, val, 8,
3021 attrs);
3022 break;
3023 case 4:
3024 /* 32 bit write access */
6da67de6 3025 val = (uint32_t)ldl_p(buf);
eb7eeb88
PB
3026 result |= memory_region_dispatch_write(mr, addr1, val, 4,
3027 attrs);
3028 break;
3029 case 2:
3030 /* 16 bit write access */
3031 val = lduw_p(buf);
3032 result |= memory_region_dispatch_write(mr, addr1, val, 2,
3033 attrs);
3034 break;
3035 case 1:
3036 /* 8 bit write access */
3037 val = ldub_p(buf);
3038 result |= memory_region_dispatch_write(mr, addr1, val, 1,
3039 attrs);
3040 break;
3041 default:
3042 abort();
13eb76e0
FB
3043 }
3044 } else {
eb7eeb88 3045 /* RAM case */
f5aa69bd 3046 ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
eb7eeb88
PB
3047 memcpy(ptr, buf, l);
3048 invalidate_and_set_dirty(mr, addr1, l);
13eb76e0 3049 }
4840f10e
JK
3050
3051 if (release_lock) {
3052 qemu_mutex_unlock_iothread();
3053 release_lock = false;
3054 }
3055
13eb76e0
FB
3056 len -= l;
3057 buf += l;
3058 addr += l;
a203ac70
PB
3059
3060 if (!len) {
3061 break;
3062 }
3063
3064 l = len;
16620684 3065 mr = flatview_translate(fv, addr, &addr1, &l, true);
13eb76e0 3066 }
fd8aaa76 3067
3b643495 3068 return result;
13eb76e0 3069}
8df1cd07 3070
16620684
AK
3071static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
3072 const uint8_t *buf, int len)
ac1970fb 3073{
eb7eeb88 3074 hwaddr l;
eb7eeb88
PB
3075 hwaddr addr1;
3076 MemoryRegion *mr;
3077 MemTxResult result = MEMTX_OK;
eb7eeb88 3078
a203ac70
PB
3079 if (len > 0) {
3080 rcu_read_lock();
eb7eeb88 3081 l = len;
16620684
AK
3082 mr = flatview_translate(fv, addr, &addr1, &l, true);
3083 result = flatview_write_continue(fv, addr, attrs, buf, len,
3084 addr1, l, mr);
a203ac70
PB
3085 rcu_read_unlock();
3086 }
3087
3088 return result;
3089}
3090
16620684
AK
3091MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
3092 MemTxAttrs attrs,
3093 const uint8_t *buf, int len)
3094{
3095 return flatview_write(address_space_to_flatview(as), addr, attrs, buf, len);
3096}
3097
a203ac70 3098/* Called within RCU critical section. */
16620684
AK
3099MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
3100 MemTxAttrs attrs, uint8_t *buf,
3101 int len, hwaddr addr1, hwaddr l,
3102 MemoryRegion *mr)
a203ac70
PB
3103{
3104 uint8_t *ptr;
3105 uint64_t val;
3106 MemTxResult result = MEMTX_OK;
3107 bool release_lock = false;
eb7eeb88 3108
a203ac70 3109 for (;;) {
eb7eeb88
PB
3110 if (!memory_access_is_direct(mr, false)) {
3111 /* I/O case */
3112 release_lock |= prepare_mmio_access(mr);
3113 l = memory_access_size(mr, l, addr1);
3114 switch (l) {
3115 case 8:
3116 /* 64 bit read access */
3117 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
3118 attrs);
3119 stq_p(buf, val);
3120 break;
3121 case 4:
3122 /* 32 bit read access */
3123 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
3124 attrs);
3125 stl_p(buf, val);
3126 break;
3127 case 2:
3128 /* 16 bit read access */
3129 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
3130 attrs);
3131 stw_p(buf, val);
3132 break;
3133 case 1:
3134 /* 8 bit read access */
3135 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
3136 attrs);
3137 stb_p(buf, val);
3138 break;
3139 default:
3140 abort();
3141 }
3142 } else {
3143 /* RAM case */
f5aa69bd 3144 ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
eb7eeb88
PB
3145 memcpy(buf, ptr, l);
3146 }
3147
3148 if (release_lock) {
3149 qemu_mutex_unlock_iothread();
3150 release_lock = false;
3151 }
3152
3153 len -= l;
3154 buf += l;
3155 addr += l;
a203ac70
PB
3156
3157 if (!len) {
3158 break;
3159 }
3160
3161 l = len;
16620684 3162 mr = flatview_translate(fv, addr, &addr1, &l, false);
a203ac70
PB
3163 }
3164
3165 return result;
3166}
3167
16620684
AK
3168MemTxResult flatview_read_full(FlatView *fv, hwaddr addr,
3169 MemTxAttrs attrs, uint8_t *buf, int len)
a203ac70
PB
3170{
3171 hwaddr l;
3172 hwaddr addr1;
3173 MemoryRegion *mr;
3174 MemTxResult result = MEMTX_OK;
3175
3176 if (len > 0) {
3177 rcu_read_lock();
3178 l = len;
16620684
AK
3179 mr = flatview_translate(fv, addr, &addr1, &l, false);
3180 result = flatview_read_continue(fv, addr, attrs, buf, len,
3181 addr1, l, mr);
a203ac70 3182 rcu_read_unlock();
eb7eeb88 3183 }
eb7eeb88
PB
3184
3185 return result;
ac1970fb
AK
3186}
3187
16620684
AK
3188static MemTxResult flatview_rw(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
3189 uint8_t *buf, int len, bool is_write)
eb7eeb88
PB
3190{
3191 if (is_write) {
16620684 3192 return flatview_write(fv, addr, attrs, (uint8_t *)buf, len);
eb7eeb88 3193 } else {
16620684 3194 return flatview_read(fv, addr, attrs, (uint8_t *)buf, len);
eb7eeb88
PB
3195 }
3196}
ac1970fb 3197
16620684
AK
3198MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
3199 MemTxAttrs attrs, uint8_t *buf,
3200 int len, bool is_write)
3201{
3202 return flatview_rw(address_space_to_flatview(as),
3203 addr, attrs, buf, len, is_write);
3204}
3205
a8170e5e 3206void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
3207 int len, int is_write)
3208{
5c9eb028
PM
3209 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
3210 buf, len, is_write);
ac1970fb
AK
3211}
3212
582b55a9
AG
3213enum write_rom_type {
3214 WRITE_DATA,
3215 FLUSH_CACHE,
3216};
3217
2a221651 3218static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 3219 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 3220{
149f54b5 3221 hwaddr l;
d0ecd2aa 3222 uint8_t *ptr;
149f54b5 3223 hwaddr addr1;
5c8a00ce 3224 MemoryRegion *mr;
3b46e624 3225
41063e1e 3226 rcu_read_lock();
d0ecd2aa 3227 while (len > 0) {
149f54b5 3228 l = len;
2a221651 3229 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 3230
5c8a00ce
PB
3231 if (!(memory_region_is_ram(mr) ||
3232 memory_region_is_romd(mr))) {
b242e0e0 3233 l = memory_access_size(mr, l, addr1);
d0ecd2aa 3234 } else {
d0ecd2aa 3235 /* ROM/RAM case */
0878d0e1 3236 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
582b55a9
AG
3237 switch (type) {
3238 case WRITE_DATA:
3239 memcpy(ptr, buf, l);
845b6214 3240 invalidate_and_set_dirty(mr, addr1, l);
582b55a9
AG
3241 break;
3242 case FLUSH_CACHE:
3243 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
3244 break;
3245 }
d0ecd2aa
FB
3246 }
3247 len -= l;
3248 buf += l;
3249 addr += l;
3250 }
41063e1e 3251 rcu_read_unlock();
d0ecd2aa
FB
3252}
3253
582b55a9 3254/* used for ROM loading : can write in RAM and ROM */
2a221651 3255void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
3256 const uint8_t *buf, int len)
3257{
2a221651 3258 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
3259}
3260
3261void cpu_flush_icache_range(hwaddr start, int len)
3262{
3263 /*
3264 * This function should do the same thing as an icache flush that was
3265 * triggered from within the guest. For TCG we are always cache coherent,
3266 * so there is no need to flush anything. For KVM / Xen we need to flush
3267 * the host's instruction cache at least.
3268 */
3269 if (tcg_enabled()) {
3270 return;
3271 }
3272
2a221651
EI
3273 cpu_physical_memory_write_rom_internal(&address_space_memory,
3274 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
3275}
3276
6d16c2f8 3277typedef struct {
d3e71559 3278 MemoryRegion *mr;
6d16c2f8 3279 void *buffer;
a8170e5e
AK
3280 hwaddr addr;
3281 hwaddr len;
c2cba0ff 3282 bool in_use;
6d16c2f8
AL
3283} BounceBuffer;
3284
3285static BounceBuffer bounce;
3286
ba223c29 3287typedef struct MapClient {
e95205e1 3288 QEMUBH *bh;
72cf2d4f 3289 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3290} MapClient;
3291
38e047b5 3292QemuMutex map_client_list_lock;
72cf2d4f
BS
3293static QLIST_HEAD(map_client_list, MapClient) map_client_list
3294 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29 3295
e95205e1
FZ
3296static void cpu_unregister_map_client_do(MapClient *client)
3297{
3298 QLIST_REMOVE(client, link);
3299 g_free(client);
3300}
3301
33b6c2ed
FZ
3302static void cpu_notify_map_clients_locked(void)
3303{
3304 MapClient *client;
3305
3306 while (!QLIST_EMPTY(&map_client_list)) {
3307 client = QLIST_FIRST(&map_client_list);
e95205e1
FZ
3308 qemu_bh_schedule(client->bh);
3309 cpu_unregister_map_client_do(client);
33b6c2ed
FZ
3310 }
3311}
3312
e95205e1 3313void cpu_register_map_client(QEMUBH *bh)
ba223c29 3314{
7267c094 3315 MapClient *client = g_malloc(sizeof(*client));
ba223c29 3316
38e047b5 3317 qemu_mutex_lock(&map_client_list_lock);
e95205e1 3318 client->bh = bh;
72cf2d4f 3319 QLIST_INSERT_HEAD(&map_client_list, client, link);
33b6c2ed
FZ
3320 if (!atomic_read(&bounce.in_use)) {
3321 cpu_notify_map_clients_locked();
3322 }
38e047b5 3323 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
3324}
3325
38e047b5 3326void cpu_exec_init_all(void)
ba223c29 3327{
38e047b5 3328 qemu_mutex_init(&ram_list.mutex);
20bccb82
PM
3329 /* The data structures we set up here depend on knowing the page size,
3330 * so no more changes can be made after this point.
3331 * In an ideal world, nothing we did before we had finished the
3332 * machine setup would care about the target page size, and we could
3333 * do this much later, rather than requiring board models to state
3334 * up front what their requirements are.
3335 */
3336 finalize_target_page_bits();
38e047b5 3337 io_mem_init();
680a4783 3338 memory_map_init();
38e047b5 3339 qemu_mutex_init(&map_client_list_lock);
ba223c29
AL
3340}
3341
e95205e1 3342void cpu_unregister_map_client(QEMUBH *bh)
ba223c29
AL
3343{
3344 MapClient *client;
3345
e95205e1
FZ
3346 qemu_mutex_lock(&map_client_list_lock);
3347 QLIST_FOREACH(client, &map_client_list, link) {
3348 if (client->bh == bh) {
3349 cpu_unregister_map_client_do(client);
3350 break;
3351 }
ba223c29 3352 }
e95205e1 3353 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
3354}
3355
3356static void cpu_notify_map_clients(void)
3357{
38e047b5 3358 qemu_mutex_lock(&map_client_list_lock);
33b6c2ed 3359 cpu_notify_map_clients_locked();
38e047b5 3360 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
3361}
3362
16620684
AK
3363static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
3364 bool is_write)
51644ab7 3365{
5c8a00ce 3366 MemoryRegion *mr;
51644ab7
PB
3367 hwaddr l, xlat;
3368
41063e1e 3369 rcu_read_lock();
51644ab7
PB
3370 while (len > 0) {
3371 l = len;
16620684 3372 mr = flatview_translate(fv, addr, &xlat, &l, is_write);
5c8a00ce
PB
3373 if (!memory_access_is_direct(mr, is_write)) {
3374 l = memory_access_size(mr, l, addr);
3375 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
5ad4a2b7 3376 rcu_read_unlock();
51644ab7
PB
3377 return false;
3378 }
3379 }
3380
3381 len -= l;
3382 addr += l;
3383 }
41063e1e 3384 rcu_read_unlock();
51644ab7
PB
3385 return true;
3386}
3387
16620684
AK
3388bool address_space_access_valid(AddressSpace *as, hwaddr addr,
3389 int len, bool is_write)
3390{
3391 return flatview_access_valid(address_space_to_flatview(as),
3392 addr, len, is_write);
3393}
3394
715c31ec 3395static hwaddr
16620684
AK
3396flatview_extend_translation(FlatView *fv, hwaddr addr,
3397 hwaddr target_len,
715c31ec
PB
3398 MemoryRegion *mr, hwaddr base, hwaddr len,
3399 bool is_write)
3400{
3401 hwaddr done = 0;
3402 hwaddr xlat;
3403 MemoryRegion *this_mr;
3404
3405 for (;;) {
3406 target_len -= len;
3407 addr += len;
3408 done += len;
3409 if (target_len == 0) {
3410 return done;
3411 }
3412
3413 len = target_len;
16620684
AK
3414 this_mr = flatview_translate(fv, addr, &xlat,
3415 &len, is_write);
715c31ec
PB
3416 if (this_mr != mr || xlat != base + done) {
3417 return done;
3418 }
3419 }
3420}
3421
6d16c2f8
AL
3422/* Map a physical memory region into a host virtual address.
3423 * May map a subset of the requested range, given by and returned in *plen.
3424 * May return NULL if resources needed to perform the mapping are exhausted.
3425 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3426 * Use cpu_register_map_client() to know when retrying the map operation is
3427 * likely to succeed.
6d16c2f8 3428 */
ac1970fb 3429void *address_space_map(AddressSpace *as,
a8170e5e
AK
3430 hwaddr addr,
3431 hwaddr *plen,
ac1970fb 3432 bool is_write)
6d16c2f8 3433{
a8170e5e 3434 hwaddr len = *plen;
715c31ec
PB
3435 hwaddr l, xlat;
3436 MemoryRegion *mr;
e81bcda5 3437 void *ptr;
16620684 3438 FlatView *fv = address_space_to_flatview(as);
6d16c2f8 3439
e3127ae0
PB
3440 if (len == 0) {
3441 return NULL;
3442 }
38bee5dc 3443
e3127ae0 3444 l = len;
41063e1e 3445 rcu_read_lock();
16620684 3446 mr = flatview_translate(fv, addr, &xlat, &l, is_write);
41063e1e 3447
e3127ae0 3448 if (!memory_access_is_direct(mr, is_write)) {
c2cba0ff 3449 if (atomic_xchg(&bounce.in_use, true)) {
41063e1e 3450 rcu_read_unlock();
e3127ae0 3451 return NULL;
6d16c2f8 3452 }
e85d9db5
KW
3453 /* Avoid unbounded allocations */
3454 l = MIN(l, TARGET_PAGE_SIZE);
3455 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
3456 bounce.addr = addr;
3457 bounce.len = l;
d3e71559
PB
3458
3459 memory_region_ref(mr);
3460 bounce.mr = mr;
e3127ae0 3461 if (!is_write) {
16620684 3462 flatview_read(fv, addr, MEMTXATTRS_UNSPECIFIED,
5c9eb028 3463 bounce.buffer, l);
8ab934f9 3464 }
6d16c2f8 3465
41063e1e 3466 rcu_read_unlock();
e3127ae0
PB
3467 *plen = l;
3468 return bounce.buffer;
3469 }
3470
e3127ae0 3471
d3e71559 3472 memory_region_ref(mr);
16620684
AK
3473 *plen = flatview_extend_translation(fv, addr, len, mr, xlat,
3474 l, is_write);
f5aa69bd 3475 ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true);
e81bcda5
PB
3476 rcu_read_unlock();
3477
3478 return ptr;
6d16c2f8
AL
3479}
3480
ac1970fb 3481/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
3482 * Will also mark the memory as dirty if is_write == 1. access_len gives
3483 * the amount of memory that was actually read or written by the caller.
3484 */
a8170e5e
AK
3485void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3486 int is_write, hwaddr access_len)
6d16c2f8
AL
3487{
3488 if (buffer != bounce.buffer) {
d3e71559
PB
3489 MemoryRegion *mr;
3490 ram_addr_t addr1;
3491
07bdaa41 3492 mr = memory_region_from_host(buffer, &addr1);
d3e71559 3493 assert(mr != NULL);
6d16c2f8 3494 if (is_write) {
845b6214 3495 invalidate_and_set_dirty(mr, addr1, access_len);
6d16c2f8 3496 }
868bb33f 3497 if (xen_enabled()) {
e41d7c69 3498 xen_invalidate_map_cache_entry(buffer);
050a0ddf 3499 }
d3e71559 3500 memory_region_unref(mr);
6d16c2f8
AL
3501 return;
3502 }
3503 if (is_write) {
5c9eb028
PM
3504 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3505 bounce.buffer, access_len);
6d16c2f8 3506 }
f8a83245 3507 qemu_vfree(bounce.buffer);
6d16c2f8 3508 bounce.buffer = NULL;
d3e71559 3509 memory_region_unref(bounce.mr);
c2cba0ff 3510 atomic_mb_set(&bounce.in_use, false);
ba223c29 3511 cpu_notify_map_clients();
6d16c2f8 3512}
d0ecd2aa 3513
a8170e5e
AK
3514void *cpu_physical_memory_map(hwaddr addr,
3515 hwaddr *plen,
ac1970fb
AK
3516 int is_write)
3517{
3518 return address_space_map(&address_space_memory, addr, plen, is_write);
3519}
3520
a8170e5e
AK
3521void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3522 int is_write, hwaddr access_len)
ac1970fb
AK
3523{
3524 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3525}
3526
0ce265ff
PB
3527#define ARG1_DECL AddressSpace *as
3528#define ARG1 as
3529#define SUFFIX
3530#define TRANSLATE(...) address_space_translate(as, __VA_ARGS__)
3531#define IS_DIRECT(mr, is_write) memory_access_is_direct(mr, is_write)
3532#define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs)
3533#define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
3534#define RCU_READ_LOCK(...) rcu_read_lock()
3535#define RCU_READ_UNLOCK(...) rcu_read_unlock()
3536#include "memory_ldst.inc.c"
1e78bcc1 3537
1f4e496e
PB
3538int64_t address_space_cache_init(MemoryRegionCache *cache,
3539 AddressSpace *as,
3540 hwaddr addr,
3541 hwaddr len,
3542 bool is_write)
3543{
90c4fe5f
PB
3544 cache->len = len;
3545 cache->as = as;
3546 cache->xlat = addr;
3547 return len;
1f4e496e
PB
3548}
3549
3550void address_space_cache_invalidate(MemoryRegionCache *cache,
3551 hwaddr addr,
3552 hwaddr access_len)
3553{
1f4e496e
PB
3554}
3555
3556void address_space_cache_destroy(MemoryRegionCache *cache)
3557{
90c4fe5f 3558 cache->as = NULL;
1f4e496e
PB
3559}
3560
3561#define ARG1_DECL MemoryRegionCache *cache
3562#define ARG1 cache
3563#define SUFFIX _cached
90c4fe5f
PB
3564#define TRANSLATE(addr, ...) \
3565 address_space_translate(cache->as, cache->xlat + (addr), __VA_ARGS__)
1f4e496e 3566#define IS_DIRECT(mr, is_write) true
90c4fe5f
PB
3567#define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs)
3568#define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
3569#define RCU_READ_LOCK() rcu_read_lock()
3570#define RCU_READ_UNLOCK() rcu_read_unlock()
1f4e496e
PB
3571#include "memory_ldst.inc.c"
3572
5e2972fd 3573/* virtual memory access for debug (includes writing to ROM) */
f17ec444 3574int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 3575 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3576{
3577 int l;
a8170e5e 3578 hwaddr phys_addr;
9b3c35e0 3579 target_ulong page;
13eb76e0 3580
79ca7a1b 3581 cpu_synchronize_state(cpu);
13eb76e0 3582 while (len > 0) {
5232e4c7
PM
3583 int asidx;
3584 MemTxAttrs attrs;
3585
13eb76e0 3586 page = addr & TARGET_PAGE_MASK;
5232e4c7
PM
3587 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3588 asidx = cpu_asidx_from_attrs(cpu, attrs);
13eb76e0
FB
3589 /* if no physical page mapped, return an error */
3590 if (phys_addr == -1)
3591 return -1;
3592 l = (page + TARGET_PAGE_SIZE) - addr;
3593 if (l > len)
3594 l = len;
5e2972fd 3595 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b 3596 if (is_write) {
5232e4c7
PM
3597 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3598 phys_addr, buf, l);
2e38847b 3599 } else {
5232e4c7
PM
3600 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3601 MEMTXATTRS_UNSPECIFIED,
5c9eb028 3602 buf, l, 0);
2e38847b 3603 }
13eb76e0
FB
3604 len -= l;
3605 buf += l;
3606 addr += l;
3607 }
3608 return 0;
3609}
038629a6
DDAG
3610
3611/*
3612 * Allows code that needs to deal with migration bitmaps etc to still be built
3613 * target independent.
3614 */
20afaed9 3615size_t qemu_target_page_size(void)
038629a6 3616{
20afaed9 3617 return TARGET_PAGE_SIZE;
038629a6
DDAG
3618}
3619
46d702b1
JQ
3620int qemu_target_page_bits(void)
3621{
3622 return TARGET_PAGE_BITS;
3623}
3624
3625int qemu_target_page_bits_min(void)
3626{
3627 return TARGET_PAGE_BITS_MIN;
3628}
a68fe89c 3629#endif
13eb76e0 3630
8e4a424b
BS
3631/*
3632 * A helper function for the _utterly broken_ virtio device model to find out if
3633 * it's running on a big endian machine. Don't do this at home kids!
3634 */
98ed8ecf
GK
3635bool target_words_bigendian(void);
3636bool target_words_bigendian(void)
8e4a424b
BS
3637{
3638#if defined(TARGET_WORDS_BIGENDIAN)
3639 return true;
3640#else
3641 return false;
3642#endif
3643}
3644
76f35538 3645#ifndef CONFIG_USER_ONLY
a8170e5e 3646bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 3647{
5c8a00ce 3648 MemoryRegion*mr;
149f54b5 3649 hwaddr l = 1;
41063e1e 3650 bool res;
76f35538 3651
41063e1e 3652 rcu_read_lock();
5c8a00ce
PB
3653 mr = address_space_translate(&address_space_memory,
3654 phys_addr, &phys_addr, &l, false);
76f35538 3655
41063e1e
PB
3656 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3657 rcu_read_unlock();
3658 return res;
76f35538 3659}
bd2fa51f 3660
e3807054 3661int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
bd2fa51f
MH
3662{
3663 RAMBlock *block;
e3807054 3664 int ret = 0;
bd2fa51f 3665
0dc3f44a 3666 rcu_read_lock();
99e15582 3667 RAMBLOCK_FOREACH(block) {
e3807054
DDAG
3668 ret = func(block->idstr, block->host, block->offset,
3669 block->used_length, opaque);
3670 if (ret) {
3671 break;
3672 }
bd2fa51f 3673 }
0dc3f44a 3674 rcu_read_unlock();
e3807054 3675 return ret;
bd2fa51f 3676}
d3a5038c
DDAG
3677
3678/*
3679 * Unmap pages of memory from start to start+length such that
3680 * they a) read as 0, b) Trigger whatever fault mechanism
3681 * the OS provides for postcopy.
3682 * The pages must be unmapped by the end of the function.
3683 * Returns: 0 on success, none-0 on failure
3684 *
3685 */
3686int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
3687{
3688 int ret = -1;
3689
3690 uint8_t *host_startaddr = rb->host + start;
3691
3692 if ((uintptr_t)host_startaddr & (rb->page_size - 1)) {
3693 error_report("ram_block_discard_range: Unaligned start address: %p",
3694 host_startaddr);
3695 goto err;
3696 }
3697
3698 if ((start + length) <= rb->used_length) {
3699 uint8_t *host_endaddr = host_startaddr + length;
3700 if ((uintptr_t)host_endaddr & (rb->page_size - 1)) {
3701 error_report("ram_block_discard_range: Unaligned end address: %p",
3702 host_endaddr);
3703 goto err;
3704 }
3705
3706 errno = ENOTSUP; /* If we are missing MADVISE etc */
3707
e2fa71f5 3708 if (rb->page_size == qemu_host_page_size) {
d3a5038c 3709#if defined(CONFIG_MADVISE)
e2fa71f5
DDAG
3710 /* Note: We need the madvise MADV_DONTNEED behaviour of definitely
3711 * freeing the page.
3712 */
3713 ret = madvise(host_startaddr, length, MADV_DONTNEED);
d3a5038c 3714#endif
e2fa71f5
DDAG
3715 } else {
3716 /* Huge page case - unfortunately it can't do DONTNEED, but
3717 * it can do the equivalent by FALLOC_FL_PUNCH_HOLE in the
3718 * huge page file.
3719 */
3720#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
3721 ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
3722 start, length);
3723#endif
3724 }
d3a5038c
DDAG
3725 if (ret) {
3726 ret = -errno;
3727 error_report("ram_block_discard_range: Failed to discard range "
3728 "%s:%" PRIx64 " +%zx (%d)",
3729 rb->idstr, start, length, ret);
3730 }
3731 } else {
3732 error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64
3733 "/%zx/" RAM_ADDR_FMT")",
3734 rb->idstr, start, length, rb->used_length);
3735 }
3736
3737err:
3738 return ret;
3739}
3740
ec3f8c99 3741#endif
a0be0c58
YZ
3742
3743void page_size_init(void)
3744{
3745 /* NOTE: we can always suppose that qemu_host_page_size >=
3746 TARGET_PAGE_SIZE */
a0be0c58
YZ
3747 if (qemu_host_page_size == 0) {
3748 qemu_host_page_size = qemu_real_host_page_size;
3749 }
3750 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
3751 qemu_host_page_size = TARGET_PAGE_SIZE;
3752 }
3753 qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
3754}
5e8fd947
AK
3755
3756#if !defined(CONFIG_USER_ONLY)
3757
3758static void mtree_print_phys_entries(fprintf_function mon, void *f,
3759 int start, int end, int skip, int ptr)
3760{
3761 if (start == end - 1) {
3762 mon(f, "\t%3d ", start);
3763 } else {
3764 mon(f, "\t%3d..%-3d ", start, end - 1);
3765 }
3766 mon(f, " skip=%d ", skip);
3767 if (ptr == PHYS_MAP_NODE_NIL) {
3768 mon(f, " ptr=NIL");
3769 } else if (!skip) {
3770 mon(f, " ptr=#%d", ptr);
3771 } else {
3772 mon(f, " ptr=[%d]", ptr);
3773 }
3774 mon(f, "\n");
3775}
3776
3777#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
3778 int128_sub((size), int128_one())) : 0)
3779
3780void mtree_print_dispatch(fprintf_function mon, void *f,
3781 AddressSpaceDispatch *d, MemoryRegion *root)
3782{
3783 int i;
3784
3785 mon(f, " Dispatch\n");
3786 mon(f, " Physical sections\n");
3787
3788 for (i = 0; i < d->map.sections_nb; ++i) {
3789 MemoryRegionSection *s = d->map.sections + i;
3790 const char *names[] = { " [unassigned]", " [not dirty]",
3791 " [ROM]", " [watch]" };
3792
3793 mon(f, " #%d @" TARGET_FMT_plx ".." TARGET_FMT_plx " %s%s%s%s%s",
3794 i,
3795 s->offset_within_address_space,
3796 s->offset_within_address_space + MR_SIZE(s->mr->size),
3797 s->mr->name ? s->mr->name : "(noname)",
3798 i < ARRAY_SIZE(names) ? names[i] : "",
3799 s->mr == root ? " [ROOT]" : "",
3800 s == d->mru_section ? " [MRU]" : "",
3801 s->mr->is_iommu ? " [iommu]" : "");
3802
3803 if (s->mr->alias) {
3804 mon(f, " alias=%s", s->mr->alias->name ?
3805 s->mr->alias->name : "noname");
3806 }
3807 mon(f, "\n");
3808 }
3809
3810 mon(f, " Nodes (%d bits per level, %d levels) ptr=[%d] skip=%d\n",
3811 P_L2_BITS, P_L2_LEVELS, d->phys_map.ptr, d->phys_map.skip);
3812 for (i = 0; i < d->map.nodes_nb; ++i) {
3813 int j, jprev;
3814 PhysPageEntry prev;
3815 Node *n = d->map.nodes + i;
3816
3817 mon(f, " [%d]\n", i);
3818
3819 for (j = 0, jprev = 0, prev = *n[0]; j < ARRAY_SIZE(*n); ++j) {
3820 PhysPageEntry *pe = *n + j;
3821
3822 if (pe->ptr == prev.ptr && pe->skip == prev.skip) {
3823 continue;
3824 }
3825
3826 mtree_print_phys_entries(mon, f, jprev, j, prev.skip, prev.ptr);
3827
3828 jprev = j;
3829 prev = *pe;
3830 }
3831
3832 if (jprev != ARRAY_SIZE(*n)) {
3833 mtree_print_phys_entries(mon, f, jprev, j, prev.skip, prev.ptr);
3834 }
3835 }
3836}
3837
3838#endif