]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
xen-hvm: increase maxmem before calling xc_domain_populate_physmap
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
cc9e98cb 29#include "hw/qdev.h"
1de7afc9 30#include "qemu/osdep.h"
9c17d615 31#include "sysemu/kvm.h"
2ff3de68 32#include "sysemu/sysemu.h"
0d09e41a 33#include "hw/xen/xen.h"
1de7afc9
PB
34#include "qemu/timer.h"
35#include "qemu/config-file.h"
75a34036 36#include "qemu/error-report.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
220c3ebd 52#include "exec/ram_addr.h"
67d95c15 53
b35ba30f
MT
54#include "qemu/range.h"
55
db7b5426 56//#define DEBUG_SUBPAGE
1196be37 57
e2eef170 58#if !defined(CONFIG_USER_ONLY)
981fdf23 59static bool in_migration;
94a6b54f 60
a3161038 61RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
62
63static MemoryRegion *system_memory;
309cb471 64static MemoryRegion *system_io;
62152b8a 65
f6790af6
AK
66AddressSpace address_space_io;
67AddressSpace address_space_memory;
2673a5da 68
0844e007 69MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 70static MemoryRegion io_mem_unassigned;
0e0df1e2 71
7bd4f430
PB
72/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73#define RAM_PREALLOC (1 << 0)
74
dbcb8981
PB
75/* RAM is mmap-ed with MAP_SHARED */
76#define RAM_SHARED (1 << 1)
77
62be4e3a
MT
78/* Only a portion of RAM (used_length) is actually used, and migrated.
79 * This used_length size can change across reboots.
80 */
81#define RAM_RESIZEABLE (1 << 2)
82
e2eef170 83#endif
9fa3e853 84
bdc44640 85struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
86/* current CPU in the current thread. It is only valid inside
87 cpu_exec() */
4917cf44 88DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 89/* 0 = Do not count executed instructions.
bf20dc07 90 1 = Precise instruction counting.
2e70f6ef 91 2 = Adaptive rate instruction counting. */
5708fc66 92int use_icount;
6a00d601 93
e2eef170 94#if !defined(CONFIG_USER_ONLY)
4346ae3e 95
1db8abb1
PB
96typedef struct PhysPageEntry PhysPageEntry;
97
98struct PhysPageEntry {
9736e55b 99 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 100 uint32_t skip : 6;
9736e55b 101 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 102 uint32_t ptr : 26;
1db8abb1
PB
103};
104
8b795765
MT
105#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
106
03f49957 107/* Size of the L2 (and L3, etc) page tables. */
57271d63 108#define ADDR_SPACE_BITS 64
03f49957 109
026736ce 110#define P_L2_BITS 9
03f49957
PB
111#define P_L2_SIZE (1 << P_L2_BITS)
112
113#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
114
115typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 116
53cb28cb
MA
117typedef struct PhysPageMap {
118 unsigned sections_nb;
119 unsigned sections_nb_alloc;
120 unsigned nodes_nb;
121 unsigned nodes_nb_alloc;
122 Node *nodes;
123 MemoryRegionSection *sections;
124} PhysPageMap;
125
1db8abb1
PB
126struct AddressSpaceDispatch {
127 /* This is a multi-level map on the physical address space.
128 * The bottom level has pointers to MemoryRegionSections.
129 */
130 PhysPageEntry phys_map;
53cb28cb 131 PhysPageMap map;
acc9d80b 132 AddressSpace *as;
1db8abb1
PB
133};
134
90260c6c
JK
135#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
136typedef struct subpage_t {
137 MemoryRegion iomem;
acc9d80b 138 AddressSpace *as;
90260c6c
JK
139 hwaddr base;
140 uint16_t sub_section[TARGET_PAGE_SIZE];
141} subpage_t;
142
b41aac4f
LPF
143#define PHYS_SECTION_UNASSIGNED 0
144#define PHYS_SECTION_NOTDIRTY 1
145#define PHYS_SECTION_ROM 2
146#define PHYS_SECTION_WATCH 3
5312bd8b 147
e2eef170 148static void io_mem_init(void);
62152b8a 149static void memory_map_init(void);
09daed84 150static void tcg_commit(MemoryListener *listener);
e2eef170 151
1ec9b909 152static MemoryRegion io_mem_watch;
6658ffb8 153#endif
fd6ce8f6 154
6d9a1304 155#if !defined(CONFIG_USER_ONLY)
d6f2ea22 156
53cb28cb 157static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 158{
53cb28cb
MA
159 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
160 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
161 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
162 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 163 }
f7bf5461
AK
164}
165
53cb28cb 166static uint32_t phys_map_node_alloc(PhysPageMap *map)
f7bf5461
AK
167{
168 unsigned i;
8b795765 169 uint32_t ret;
f7bf5461 170
53cb28cb 171 ret = map->nodes_nb++;
f7bf5461 172 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 173 assert(ret != map->nodes_nb_alloc);
03f49957 174 for (i = 0; i < P_L2_SIZE; ++i) {
53cb28cb
MA
175 map->nodes[ret][i].skip = 1;
176 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 177 }
f7bf5461 178 return ret;
d6f2ea22
AK
179}
180
53cb28cb
MA
181static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
182 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 183 int level)
f7bf5461
AK
184{
185 PhysPageEntry *p;
186 int i;
03f49957 187 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 188
9736e55b 189 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
53cb28cb
MA
190 lp->ptr = phys_map_node_alloc(map);
191 p = map->nodes[lp->ptr];
f7bf5461 192 if (level == 0) {
03f49957 193 for (i = 0; i < P_L2_SIZE; i++) {
9736e55b 194 p[i].skip = 0;
b41aac4f 195 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 196 }
67c4d23c 197 }
f7bf5461 198 } else {
53cb28cb 199 p = map->nodes[lp->ptr];
92e873b9 200 }
03f49957 201 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 202
03f49957 203 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 204 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 205 lp->skip = 0;
c19e8800 206 lp->ptr = leaf;
07f07b31
AK
207 *index += step;
208 *nb -= step;
2999097b 209 } else {
53cb28cb 210 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
211 }
212 ++lp;
f7bf5461
AK
213 }
214}
215
ac1970fb 216static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 217 hwaddr index, hwaddr nb,
2999097b 218 uint16_t leaf)
f7bf5461 219{
2999097b 220 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 221 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 222
53cb28cb 223 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
224}
225
b35ba30f
MT
226/* Compact a non leaf page entry. Simply detect that the entry has a single child,
227 * and update our entry so we can skip it and go directly to the destination.
228 */
229static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
230{
231 unsigned valid_ptr = P_L2_SIZE;
232 int valid = 0;
233 PhysPageEntry *p;
234 int i;
235
236 if (lp->ptr == PHYS_MAP_NODE_NIL) {
237 return;
238 }
239
240 p = nodes[lp->ptr];
241 for (i = 0; i < P_L2_SIZE; i++) {
242 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
243 continue;
244 }
245
246 valid_ptr = i;
247 valid++;
248 if (p[i].skip) {
249 phys_page_compact(&p[i], nodes, compacted);
250 }
251 }
252
253 /* We can only compress if there's only one child. */
254 if (valid != 1) {
255 return;
256 }
257
258 assert(valid_ptr < P_L2_SIZE);
259
260 /* Don't compress if it won't fit in the # of bits we have. */
261 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
262 return;
263 }
264
265 lp->ptr = p[valid_ptr].ptr;
266 if (!p[valid_ptr].skip) {
267 /* If our only child is a leaf, make this a leaf. */
268 /* By design, we should have made this node a leaf to begin with so we
269 * should never reach here.
270 * But since it's so simple to handle this, let's do it just in case we
271 * change this rule.
272 */
273 lp->skip = 0;
274 } else {
275 lp->skip += p[valid_ptr].skip;
276 }
277}
278
279static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
280{
281 DECLARE_BITMAP(compacted, nodes_nb);
282
283 if (d->phys_map.skip) {
53cb28cb 284 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
285 }
286}
287
97115a8d 288static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 289 Node *nodes, MemoryRegionSection *sections)
92e873b9 290{
31ab2b4a 291 PhysPageEntry *p;
97115a8d 292 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 293 int i;
f1f6e3b8 294
9736e55b 295 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 296 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 297 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 298 }
9affd6fc 299 p = nodes[lp.ptr];
03f49957 300 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 301 }
b35ba30f
MT
302
303 if (sections[lp.ptr].size.hi ||
304 range_covers_byte(sections[lp.ptr].offset_within_address_space,
305 sections[lp.ptr].size.lo, addr)) {
306 return &sections[lp.ptr];
307 } else {
308 return &sections[PHYS_SECTION_UNASSIGNED];
309 }
f3705d53
AK
310}
311
e5548617
BS
312bool memory_region_is_unassigned(MemoryRegion *mr)
313{
2a8e7499 314 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 315 && mr != &io_mem_watch;
fd6ce8f6 316}
149f54b5 317
c7086b4a 318static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
319 hwaddr addr,
320 bool resolve_subpage)
9f029603 321{
90260c6c
JK
322 MemoryRegionSection *section;
323 subpage_t *subpage;
324
53cb28cb 325 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
326 if (resolve_subpage && section->mr->subpage) {
327 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 328 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
329 }
330 return section;
9f029603
JK
331}
332
90260c6c 333static MemoryRegionSection *
c7086b4a 334address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 335 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
336{
337 MemoryRegionSection *section;
a87f3954 338 Int128 diff;
149f54b5 339
c7086b4a 340 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
341 /* Compute offset within MemoryRegionSection */
342 addr -= section->offset_within_address_space;
343
344 /* Compute offset within MemoryRegion */
345 *xlat = addr + section->offset_within_region;
346
347 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 348 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
349 return section;
350}
90260c6c 351
a87f3954
PB
352static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
353{
354 if (memory_region_is_ram(mr)) {
355 return !(is_write && mr->readonly);
356 }
357 if (memory_region_is_romd(mr)) {
358 return !is_write;
359 }
360
361 return false;
362}
363
5c8a00ce
PB
364MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
365 hwaddr *xlat, hwaddr *plen,
366 bool is_write)
90260c6c 367{
30951157
AK
368 IOMMUTLBEntry iotlb;
369 MemoryRegionSection *section;
370 MemoryRegion *mr;
371 hwaddr len = *plen;
372
373 for (;;) {
a87f3954 374 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
375 mr = section->mr;
376
377 if (!mr->iommu_ops) {
378 break;
379 }
380
8d7b8cb9 381 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
30951157
AK
382 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
383 | (addr & iotlb.addr_mask));
384 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
385 if (!(iotlb.perm & (1 << is_write))) {
386 mr = &io_mem_unassigned;
387 break;
388 }
389
390 as = iotlb.target_as;
391 }
392
fe680d0d 393 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954
PB
394 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
395 len = MIN(page, len);
396 }
397
30951157
AK
398 *plen = len;
399 *xlat = addr;
400 return mr;
90260c6c
JK
401}
402
403MemoryRegionSection *
404address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
405 hwaddr *plen)
406{
30951157 407 MemoryRegionSection *section;
c7086b4a 408 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
409
410 assert(!section->mr->iommu_ops);
411 return section;
90260c6c 412}
5b6dd868 413#endif
fd6ce8f6 414
5b6dd868 415void cpu_exec_init_all(void)
fdbb84d1 416{
5b6dd868 417#if !defined(CONFIG_USER_ONLY)
b2a8658e 418 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
419 memory_map_init();
420 io_mem_init();
fdbb84d1 421#endif
5b6dd868 422}
fdbb84d1 423
b170fce3 424#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
425
426static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 427{
259186a7 428 CPUState *cpu = opaque;
a513fe19 429
5b6dd868
BS
430 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
431 version_id is increased. */
259186a7 432 cpu->interrupt_request &= ~0x01;
c01a71c1 433 tlb_flush(cpu, 1);
5b6dd868
BS
434
435 return 0;
a513fe19 436}
7501267e 437
6c3bff0e
PD
438static int cpu_common_pre_load(void *opaque)
439{
440 CPUState *cpu = opaque;
441
adee6424 442 cpu->exception_index = -1;
6c3bff0e
PD
443
444 return 0;
445}
446
447static bool cpu_common_exception_index_needed(void *opaque)
448{
449 CPUState *cpu = opaque;
450
adee6424 451 return tcg_enabled() && cpu->exception_index != -1;
6c3bff0e
PD
452}
453
454static const VMStateDescription vmstate_cpu_common_exception_index = {
455 .name = "cpu_common/exception_index",
456 .version_id = 1,
457 .minimum_version_id = 1,
458 .fields = (VMStateField[]) {
459 VMSTATE_INT32(exception_index, CPUState),
460 VMSTATE_END_OF_LIST()
461 }
462};
463
1a1562f5 464const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
465 .name = "cpu_common",
466 .version_id = 1,
467 .minimum_version_id = 1,
6c3bff0e 468 .pre_load = cpu_common_pre_load,
5b6dd868 469 .post_load = cpu_common_post_load,
35d08458 470 .fields = (VMStateField[]) {
259186a7
AF
471 VMSTATE_UINT32(halted, CPUState),
472 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 473 VMSTATE_END_OF_LIST()
6c3bff0e
PD
474 },
475 .subsections = (VMStateSubsection[]) {
476 {
477 .vmsd = &vmstate_cpu_common_exception_index,
478 .needed = cpu_common_exception_index_needed,
479 } , {
480 /* empty */
481 }
5b6dd868
BS
482 }
483};
1a1562f5 484
5b6dd868 485#endif
ea041c0e 486
38d8f5c8 487CPUState *qemu_get_cpu(int index)
ea041c0e 488{
bdc44640 489 CPUState *cpu;
ea041c0e 490
bdc44640 491 CPU_FOREACH(cpu) {
55e5c285 492 if (cpu->cpu_index == index) {
bdc44640 493 return cpu;
55e5c285 494 }
ea041c0e 495 }
5b6dd868 496
bdc44640 497 return NULL;
ea041c0e
FB
498}
499
09daed84
EI
500#if !defined(CONFIG_USER_ONLY)
501void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
502{
503 /* We only support one address space per cpu at the moment. */
504 assert(cpu->as == as);
505
506 if (cpu->tcg_as_listener) {
507 memory_listener_unregister(cpu->tcg_as_listener);
508 } else {
509 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
510 }
511 cpu->tcg_as_listener->commit = tcg_commit;
512 memory_listener_register(cpu->tcg_as_listener, as);
513}
514#endif
515
5b6dd868 516void cpu_exec_init(CPUArchState *env)
ea041c0e 517{
5b6dd868 518 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 519 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 520 CPUState *some_cpu;
5b6dd868
BS
521 int cpu_index;
522
523#if defined(CONFIG_USER_ONLY)
524 cpu_list_lock();
525#endif
5b6dd868 526 cpu_index = 0;
bdc44640 527 CPU_FOREACH(some_cpu) {
5b6dd868
BS
528 cpu_index++;
529 }
55e5c285 530 cpu->cpu_index = cpu_index;
1b1ed8dc 531 cpu->numa_node = 0;
f0c3c505 532 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 533 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 534#ifndef CONFIG_USER_ONLY
09daed84 535 cpu->as = &address_space_memory;
5b6dd868
BS
536 cpu->thread_id = qemu_get_thread_id();
537#endif
bdc44640 538 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
539#if defined(CONFIG_USER_ONLY)
540 cpu_list_unlock();
541#endif
e0d47944
AF
542 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
543 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
544 }
5b6dd868 545#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
546 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
547 cpu_save, cpu_load, env);
b170fce3 548 assert(cc->vmsd == NULL);
e0d47944 549 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 550#endif
b170fce3
AF
551 if (cc->vmsd != NULL) {
552 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
553 }
ea041c0e
FB
554}
555
1fddef4b 556#if defined(TARGET_HAS_ICE)
94df27fd 557#if defined(CONFIG_USER_ONLY)
00b941e5 558static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
559{
560 tb_invalidate_phys_page_range(pc, pc + 1, 0);
561}
562#else
00b941e5 563static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 564{
e8262a1b
MF
565 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
566 if (phys != -1) {
09daed84 567 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 568 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 569 }
1e7855a5 570}
c27004ec 571#endif
94df27fd 572#endif /* TARGET_HAS_ICE */
d720b93d 573
c527ee8f 574#if defined(CONFIG_USER_ONLY)
75a34036 575void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
576
577{
578}
579
3ee887e8
PM
580int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
581 int flags)
582{
583 return -ENOSYS;
584}
585
586void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
587{
588}
589
75a34036 590int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
591 int flags, CPUWatchpoint **watchpoint)
592{
593 return -ENOSYS;
594}
595#else
6658ffb8 596/* Add a watchpoint. */
75a34036 597int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 598 int flags, CPUWatchpoint **watchpoint)
6658ffb8 599{
c0ce998e 600 CPUWatchpoint *wp;
6658ffb8 601
05068c0d 602 /* forbid ranges which are empty or run off the end of the address space */
07e2863d 603 if (len == 0 || (addr + len - 1) < addr) {
75a34036
AF
604 error_report("tried to set invalid watchpoint at %"
605 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
606 return -EINVAL;
607 }
7267c094 608 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
609
610 wp->vaddr = addr;
05068c0d 611 wp->len = len;
a1d1bb31
AL
612 wp->flags = flags;
613
2dc9f411 614 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
615 if (flags & BP_GDB) {
616 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
617 } else {
618 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
619 }
6658ffb8 620
31b030d4 621 tlb_flush_page(cpu, addr);
a1d1bb31
AL
622
623 if (watchpoint)
624 *watchpoint = wp;
625 return 0;
6658ffb8
PB
626}
627
a1d1bb31 628/* Remove a specific watchpoint. */
75a34036 629int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 630 int flags)
6658ffb8 631{
a1d1bb31 632 CPUWatchpoint *wp;
6658ffb8 633
ff4700b0 634 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 635 if (addr == wp->vaddr && len == wp->len
6e140f28 636 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 637 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
638 return 0;
639 }
640 }
a1d1bb31 641 return -ENOENT;
6658ffb8
PB
642}
643
a1d1bb31 644/* Remove a specific watchpoint by reference. */
75a34036 645void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 646{
ff4700b0 647 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 648
31b030d4 649 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 650
7267c094 651 g_free(watchpoint);
a1d1bb31
AL
652}
653
654/* Remove all matching watchpoints. */
75a34036 655void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 656{
c0ce998e 657 CPUWatchpoint *wp, *next;
a1d1bb31 658
ff4700b0 659 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
660 if (wp->flags & mask) {
661 cpu_watchpoint_remove_by_ref(cpu, wp);
662 }
c0ce998e 663 }
7d03f82f 664}
05068c0d
PM
665
666/* Return true if this watchpoint address matches the specified
667 * access (ie the address range covered by the watchpoint overlaps
668 * partially or completely with the address range covered by the
669 * access).
670 */
671static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
672 vaddr addr,
673 vaddr len)
674{
675 /* We know the lengths are non-zero, but a little caution is
676 * required to avoid errors in the case where the range ends
677 * exactly at the top of the address space and so addr + len
678 * wraps round to zero.
679 */
680 vaddr wpend = wp->vaddr + wp->len - 1;
681 vaddr addrend = addr + len - 1;
682
683 return !(addr > wpend || wp->vaddr > addrend);
684}
685
c527ee8f 686#endif
7d03f82f 687
a1d1bb31 688/* Add a breakpoint. */
b3310ab3 689int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 690 CPUBreakpoint **breakpoint)
4c3a88a2 691{
1fddef4b 692#if defined(TARGET_HAS_ICE)
c0ce998e 693 CPUBreakpoint *bp;
3b46e624 694
7267c094 695 bp = g_malloc(sizeof(*bp));
4c3a88a2 696
a1d1bb31
AL
697 bp->pc = pc;
698 bp->flags = flags;
699
2dc9f411 700 /* keep all GDB-injected breakpoints in front */
00b941e5 701 if (flags & BP_GDB) {
f0c3c505 702 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 703 } else {
f0c3c505 704 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 705 }
3b46e624 706
f0c3c505 707 breakpoint_invalidate(cpu, pc);
a1d1bb31 708
00b941e5 709 if (breakpoint) {
a1d1bb31 710 *breakpoint = bp;
00b941e5 711 }
4c3a88a2
FB
712 return 0;
713#else
a1d1bb31 714 return -ENOSYS;
4c3a88a2
FB
715#endif
716}
717
a1d1bb31 718/* Remove a specific breakpoint. */
b3310ab3 719int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 720{
7d03f82f 721#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
722 CPUBreakpoint *bp;
723
f0c3c505 724 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 725 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 726 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
727 return 0;
728 }
7d03f82f 729 }
a1d1bb31
AL
730 return -ENOENT;
731#else
732 return -ENOSYS;
7d03f82f
EI
733#endif
734}
735
a1d1bb31 736/* Remove a specific breakpoint by reference. */
b3310ab3 737void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 738{
1fddef4b 739#if defined(TARGET_HAS_ICE)
f0c3c505
AF
740 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
741
742 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 743
7267c094 744 g_free(breakpoint);
a1d1bb31
AL
745#endif
746}
747
748/* Remove all matching breakpoints. */
b3310ab3 749void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31
AL
750{
751#if defined(TARGET_HAS_ICE)
c0ce998e 752 CPUBreakpoint *bp, *next;
a1d1bb31 753
f0c3c505 754 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
755 if (bp->flags & mask) {
756 cpu_breakpoint_remove_by_ref(cpu, bp);
757 }
c0ce998e 758 }
4c3a88a2
FB
759#endif
760}
761
c33a346e
FB
762/* enable or disable single step mode. EXCP_DEBUG is returned by the
763 CPU loop after each instruction */
3825b28f 764void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 765{
1fddef4b 766#if defined(TARGET_HAS_ICE)
ed2803da
AF
767 if (cpu->singlestep_enabled != enabled) {
768 cpu->singlestep_enabled = enabled;
769 if (kvm_enabled()) {
38e478ec 770 kvm_update_guest_debug(cpu, 0);
ed2803da 771 } else {
ccbb4d44 772 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 773 /* XXX: only flush what is necessary */
38e478ec 774 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
775 tb_flush(env);
776 }
c33a346e
FB
777 }
778#endif
779}
780
a47dddd7 781void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
782{
783 va_list ap;
493ae1f0 784 va_list ap2;
7501267e
FB
785
786 va_start(ap, fmt);
493ae1f0 787 va_copy(ap2, ap);
7501267e
FB
788 fprintf(stderr, "qemu: fatal: ");
789 vfprintf(stderr, fmt, ap);
790 fprintf(stderr, "\n");
878096ee 791 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
792 if (qemu_log_enabled()) {
793 qemu_log("qemu: fatal: ");
794 qemu_log_vprintf(fmt, ap2);
795 qemu_log("\n");
a0762859 796 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 797 qemu_log_flush();
93fcfe39 798 qemu_log_close();
924edcae 799 }
493ae1f0 800 va_end(ap2);
f9373291 801 va_end(ap);
fd052bf6
RV
802#if defined(CONFIG_USER_ONLY)
803 {
804 struct sigaction act;
805 sigfillset(&act.sa_mask);
806 act.sa_handler = SIG_DFL;
807 sigaction(SIGABRT, &act, NULL);
808 }
809#endif
7501267e
FB
810 abort();
811}
812
0124311e 813#if !defined(CONFIG_USER_ONLY)
041603fe
PB
814static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
815{
816 RAMBlock *block;
817
818 /* The list is protected by the iothread lock here. */
819 block = ram_list.mru_block;
9b8424d5 820 if (block && addr - block->offset < block->max_length) {
041603fe
PB
821 goto found;
822 }
823 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
9b8424d5 824 if (addr - block->offset < block->max_length) {
041603fe
PB
825 goto found;
826 }
827 }
828
829 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
830 abort();
831
832found:
833 ram_list.mru_block = block;
834 return block;
835}
836
a2f4d5be 837static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 838{
041603fe 839 ram_addr_t start1;
a2f4d5be
JQ
840 RAMBlock *block;
841 ram_addr_t end;
842
843 end = TARGET_PAGE_ALIGN(start + length);
844 start &= TARGET_PAGE_MASK;
d24981d3 845
041603fe
PB
846 block = qemu_get_ram_block(start);
847 assert(block == qemu_get_ram_block(end - 1));
1240be24 848 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
041603fe 849 cpu_tlb_reset_dirty_all(start1, length);
d24981d3
JQ
850}
851
5579c7f3 852/* Note: start and end must be within the same ram block. */
a2f4d5be 853void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
52159192 854 unsigned client)
1ccde1cb 855{
1ccde1cb
FB
856 if (length == 0)
857 return;
c8d6f66a 858 cpu_physical_memory_clear_dirty_range_type(start, length, client);
f23db169 859
d24981d3 860 if (tcg_enabled()) {
a2f4d5be 861 tlb_reset_dirty_range_all(start, length);
5579c7f3 862 }
1ccde1cb
FB
863}
864
981fdf23 865static void cpu_physical_memory_set_dirty_tracking(bool enable)
74576198
AL
866{
867 in_migration = enable;
74576198
AL
868}
869
bb0e627a 870hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
871 MemoryRegionSection *section,
872 target_ulong vaddr,
873 hwaddr paddr, hwaddr xlat,
874 int prot,
875 target_ulong *address)
e5548617 876{
a8170e5e 877 hwaddr iotlb;
e5548617
BS
878 CPUWatchpoint *wp;
879
cc5bea60 880 if (memory_region_is_ram(section->mr)) {
e5548617
BS
881 /* Normal RAM. */
882 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 883 + xlat;
e5548617 884 if (!section->readonly) {
b41aac4f 885 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 886 } else {
b41aac4f 887 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
888 }
889 } else {
1b3fb98f 890 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 891 iotlb += xlat;
e5548617
BS
892 }
893
894 /* Make accesses to pages with watchpoints go via the
895 watchpoint trap routines. */
ff4700b0 896 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 897 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
e5548617
BS
898 /* Avoid trapping reads of pages with a write breakpoint. */
899 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 900 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
901 *address |= TLB_MMIO;
902 break;
903 }
904 }
905 }
906
907 return iotlb;
908}
9fa3e853
FB
909#endif /* defined(CONFIG_USER_ONLY) */
910
e2eef170 911#if !defined(CONFIG_USER_ONLY)
8da3ff18 912
c227f099 913static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 914 uint16_t section);
acc9d80b 915static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 916
a2b257d6
IM
917static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
918 qemu_anon_ram_alloc;
91138037
MA
919
920/*
921 * Set a custom physical guest memory alloator.
922 * Accelerators with unusual needs may need this. Hopefully, we can
923 * get rid of it eventually.
924 */
a2b257d6 925void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
91138037
MA
926{
927 phys_mem_alloc = alloc;
928}
929
53cb28cb
MA
930static uint16_t phys_section_add(PhysPageMap *map,
931 MemoryRegionSection *section)
5312bd8b 932{
68f3f65b
PB
933 /* The physical section number is ORed with a page-aligned
934 * pointer to produce the iotlb entries. Thus it should
935 * never overflow into the page-aligned value.
936 */
53cb28cb 937 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 938
53cb28cb
MA
939 if (map->sections_nb == map->sections_nb_alloc) {
940 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
941 map->sections = g_renew(MemoryRegionSection, map->sections,
942 map->sections_nb_alloc);
5312bd8b 943 }
53cb28cb 944 map->sections[map->sections_nb] = *section;
dfde4e6e 945 memory_region_ref(section->mr);
53cb28cb 946 return map->sections_nb++;
5312bd8b
AK
947}
948
058bc4b5
PB
949static void phys_section_destroy(MemoryRegion *mr)
950{
dfde4e6e
PB
951 memory_region_unref(mr);
952
058bc4b5
PB
953 if (mr->subpage) {
954 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 955 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
956 g_free(subpage);
957 }
958}
959
6092666e 960static void phys_sections_free(PhysPageMap *map)
5312bd8b 961{
9affd6fc
PB
962 while (map->sections_nb > 0) {
963 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
964 phys_section_destroy(section->mr);
965 }
9affd6fc
PB
966 g_free(map->sections);
967 g_free(map->nodes);
5312bd8b
AK
968}
969
ac1970fb 970static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
971{
972 subpage_t *subpage;
a8170e5e 973 hwaddr base = section->offset_within_address_space
0f0cb164 974 & TARGET_PAGE_MASK;
97115a8d 975 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 976 d->map.nodes, d->map.sections);
0f0cb164
AK
977 MemoryRegionSection subsection = {
978 .offset_within_address_space = base,
052e87b0 979 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 980 };
a8170e5e 981 hwaddr start, end;
0f0cb164 982
f3705d53 983 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 984
f3705d53 985 if (!(existing->mr->subpage)) {
acc9d80b 986 subpage = subpage_init(d->as, base);
3be91e86 987 subsection.address_space = d->as;
0f0cb164 988 subsection.mr = &subpage->iomem;
ac1970fb 989 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 990 phys_section_add(&d->map, &subsection));
0f0cb164 991 } else {
f3705d53 992 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
993 }
994 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 995 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
996 subpage_register(subpage, start, end,
997 phys_section_add(&d->map, section));
0f0cb164
AK
998}
999
1000
052e87b0
PB
1001static void register_multipage(AddressSpaceDispatch *d,
1002 MemoryRegionSection *section)
33417e70 1003{
a8170e5e 1004 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 1005 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
1006 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1007 TARGET_PAGE_BITS));
dd81124b 1008
733d5ef5
PB
1009 assert(num_pages);
1010 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
1011}
1012
ac1970fb 1013static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 1014{
89ae337a 1015 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 1016 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 1017 MemoryRegionSection now = *section, remain = *section;
052e87b0 1018 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 1019
733d5ef5
PB
1020 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1021 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1022 - now.offset_within_address_space;
1023
052e87b0 1024 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 1025 register_subpage(d, &now);
733d5ef5 1026 } else {
052e87b0 1027 now.size = int128_zero();
733d5ef5 1028 }
052e87b0
PB
1029 while (int128_ne(remain.size, now.size)) {
1030 remain.size = int128_sub(remain.size, now.size);
1031 remain.offset_within_address_space += int128_get64(now.size);
1032 remain.offset_within_region += int128_get64(now.size);
69b67646 1033 now = remain;
052e87b0 1034 if (int128_lt(remain.size, page_size)) {
733d5ef5 1035 register_subpage(d, &now);
88266249 1036 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1037 now.size = page_size;
ac1970fb 1038 register_subpage(d, &now);
69b67646 1039 } else {
052e87b0 1040 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 1041 register_multipage(d, &now);
69b67646 1042 }
0f0cb164
AK
1043 }
1044}
1045
62a2744c
SY
1046void qemu_flush_coalesced_mmio_buffer(void)
1047{
1048 if (kvm_enabled())
1049 kvm_flush_coalesced_mmio_buffer();
1050}
1051
b2a8658e
UD
1052void qemu_mutex_lock_ramlist(void)
1053{
1054 qemu_mutex_lock(&ram_list.mutex);
1055}
1056
1057void qemu_mutex_unlock_ramlist(void)
1058{
1059 qemu_mutex_unlock(&ram_list.mutex);
1060}
1061
e1e84ba0 1062#ifdef __linux__
c902760f
MT
1063
1064#include <sys/vfs.h>
1065
1066#define HUGETLBFS_MAGIC 0x958458f6
1067
fc7a5800 1068static long gethugepagesize(const char *path, Error **errp)
c902760f
MT
1069{
1070 struct statfs fs;
1071 int ret;
1072
1073 do {
9742bf26 1074 ret = statfs(path, &fs);
c902760f
MT
1075 } while (ret != 0 && errno == EINTR);
1076
1077 if (ret != 0) {
fc7a5800
HT
1078 error_setg_errno(errp, errno, "failed to get page size of file %s",
1079 path);
9742bf26 1080 return 0;
c902760f
MT
1081 }
1082
1083 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1084 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1085
1086 return fs.f_bsize;
1087}
1088
04b16653
AW
1089static void *file_ram_alloc(RAMBlock *block,
1090 ram_addr_t memory,
7f56e740
PB
1091 const char *path,
1092 Error **errp)
c902760f
MT
1093{
1094 char *filename;
8ca761f6
PF
1095 char *sanitized_name;
1096 char *c;
557529dd 1097 void *area = NULL;
c902760f 1098 int fd;
557529dd 1099 uint64_t hpagesize;
fc7a5800 1100 Error *local_err = NULL;
c902760f 1101
fc7a5800
HT
1102 hpagesize = gethugepagesize(path, &local_err);
1103 if (local_err) {
1104 error_propagate(errp, local_err);
f9a49dfa 1105 goto error;
c902760f 1106 }
a2b257d6 1107 block->mr->align = hpagesize;
c902760f
MT
1108
1109 if (memory < hpagesize) {
557529dd
HT
1110 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1111 "or larger than huge page size 0x%" PRIx64,
1112 memory, hpagesize);
1113 goto error;
c902760f
MT
1114 }
1115
1116 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1117 error_setg(errp,
1118 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1119 goto error;
c902760f
MT
1120 }
1121
8ca761f6 1122 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
83234bf2 1123 sanitized_name = g_strdup(memory_region_name(block->mr));
8ca761f6
PF
1124 for (c = sanitized_name; *c != '\0'; c++) {
1125 if (*c == '/')
1126 *c = '_';
1127 }
1128
1129 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1130 sanitized_name);
1131 g_free(sanitized_name);
c902760f
MT
1132
1133 fd = mkstemp(filename);
1134 if (fd < 0) {
7f56e740
PB
1135 error_setg_errno(errp, errno,
1136 "unable to create backing store for hugepages");
e4ada482 1137 g_free(filename);
f9a49dfa 1138 goto error;
c902760f
MT
1139 }
1140 unlink(filename);
e4ada482 1141 g_free(filename);
c902760f
MT
1142
1143 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1144
1145 /*
1146 * ftruncate is not supported by hugetlbfs in older
1147 * hosts, so don't bother bailing out on errors.
1148 * If anything goes wrong with it under other filesystems,
1149 * mmap will fail.
1150 */
7f56e740 1151 if (ftruncate(fd, memory)) {
9742bf26 1152 perror("ftruncate");
7f56e740 1153 }
c902760f 1154
dbcb8981
PB
1155 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1156 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1157 fd, 0);
c902760f 1158 if (area == MAP_FAILED) {
7f56e740
PB
1159 error_setg_errno(errp, errno,
1160 "unable to map backing store for hugepages");
9742bf26 1161 close(fd);
f9a49dfa 1162 goto error;
c902760f 1163 }
ef36fa14
MT
1164
1165 if (mem_prealloc) {
38183310 1166 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1167 }
1168
04b16653 1169 block->fd = fd;
c902760f 1170 return area;
f9a49dfa
MT
1171
1172error:
1173 if (mem_prealloc) {
e4d9df4f 1174 error_report("%s\n", error_get_pretty(*errp));
f9a49dfa
MT
1175 exit(1);
1176 }
1177 return NULL;
c902760f
MT
1178}
1179#endif
1180
d17b5288 1181static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1182{
1183 RAMBlock *block, *next_block;
3e837b2c 1184 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1185
49cd9ac6
SH
1186 assert(size != 0); /* it would hand out same offset multiple times */
1187
a3161038 1188 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1189 return 0;
1190
a3161038 1191 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1192 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653 1193
62be4e3a 1194 end = block->offset + block->max_length;
04b16653 1195
a3161038 1196 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1197 if (next_block->offset >= end) {
1198 next = MIN(next, next_block->offset);
1199 }
1200 }
1201 if (next - end >= size && next - end < mingap) {
3e837b2c 1202 offset = end;
04b16653
AW
1203 mingap = next - end;
1204 }
1205 }
3e837b2c
AW
1206
1207 if (offset == RAM_ADDR_MAX) {
1208 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1209 (uint64_t)size);
1210 abort();
1211 }
1212
04b16653
AW
1213 return offset;
1214}
1215
652d7ec2 1216ram_addr_t last_ram_offset(void)
d17b5288
AW
1217{
1218 RAMBlock *block;
1219 ram_addr_t last = 0;
1220
a3161038 1221 QTAILQ_FOREACH(block, &ram_list.blocks, next)
62be4e3a 1222 last = MAX(last, block->offset + block->max_length);
d17b5288
AW
1223
1224 return last;
1225}
1226
ddb97f1d
JB
1227static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1228{
1229 int ret;
ddb97f1d
JB
1230
1231 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1232 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1233 "dump-guest-core", true)) {
ddb97f1d
JB
1234 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1235 if (ret) {
1236 perror("qemu_madvise");
1237 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1238 "but dump_guest_core=off specified\n");
1239 }
1240 }
1241}
1242
20cfe881 1243static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1244{
20cfe881 1245 RAMBlock *block;
84b89d78 1246
a3161038 1247 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1248 if (block->offset == addr) {
20cfe881 1249 return block;
c5705a77
AK
1250 }
1251 }
20cfe881
HT
1252
1253 return NULL;
1254}
1255
1256void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1257{
1258 RAMBlock *new_block = find_ram_block(addr);
1259 RAMBlock *block;
1260
c5705a77
AK
1261 assert(new_block);
1262 assert(!new_block->idstr[0]);
84b89d78 1263
09e5ab63
AL
1264 if (dev) {
1265 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1266 if (id) {
1267 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1268 g_free(id);
84b89d78
CM
1269 }
1270 }
1271 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1272
b2a8658e
UD
1273 /* This assumes the iothread lock is taken here too. */
1274 qemu_mutex_lock_ramlist();
a3161038 1275 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1276 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1277 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1278 new_block->idstr);
1279 abort();
1280 }
1281 }
b2a8658e 1282 qemu_mutex_unlock_ramlist();
c5705a77
AK
1283}
1284
20cfe881
HT
1285void qemu_ram_unset_idstr(ram_addr_t addr)
1286{
1287 RAMBlock *block = find_ram_block(addr);
1288
1289 if (block) {
1290 memset(block->idstr, 0, sizeof(block->idstr));
1291 }
1292}
1293
8490fc78
LC
1294static int memory_try_enable_merging(void *addr, size_t len)
1295{
2ff3de68 1296 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1297 /* disabled by the user */
1298 return 0;
1299 }
1300
1301 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1302}
1303
62be4e3a
MT
1304/* Only legal before guest might have detected the memory size: e.g. on
1305 * incoming migration, or right after reset.
1306 *
1307 * As memory core doesn't know how is memory accessed, it is up to
1308 * resize callback to update device state and/or add assertions to detect
1309 * misuse, if necessary.
1310 */
1311int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1312{
1313 RAMBlock *block = find_ram_block(base);
1314
1315 assert(block);
1316
1317 if (block->used_length == newsize) {
1318 return 0;
1319 }
1320
1321 if (!(block->flags & RAM_RESIZEABLE)) {
1322 error_setg_errno(errp, EINVAL,
1323 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1324 " in != 0x" RAM_ADDR_FMT, block->idstr,
1325 newsize, block->used_length);
1326 return -EINVAL;
1327 }
1328
1329 if (block->max_length < newsize) {
1330 error_setg_errno(errp, EINVAL,
1331 "Length too large: %s: 0x" RAM_ADDR_FMT
1332 " > 0x" RAM_ADDR_FMT, block->idstr,
1333 newsize, block->max_length);
1334 return -EINVAL;
1335 }
1336
1337 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1338 block->used_length = newsize;
1339 cpu_physical_memory_set_dirty_range(block->offset, block->used_length);
1340 memory_region_set_size(block->mr, newsize);
1341 if (block->resized) {
1342 block->resized(block->idstr, newsize, block->host);
1343 }
1344 return 0;
1345}
1346
ef701d7b 1347static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
c5705a77 1348{
e1c57ab8 1349 RAMBlock *block;
2152f5ca
JQ
1350 ram_addr_t old_ram_size, new_ram_size;
1351
1352 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1353
b2a8658e
UD
1354 /* This assumes the iothread lock is taken here too. */
1355 qemu_mutex_lock_ramlist();
9b8424d5 1356 new_block->offset = find_ram_offset(new_block->max_length);
e1c57ab8
PB
1357
1358 if (!new_block->host) {
1359 if (xen_enabled()) {
9b8424d5
MT
1360 xen_ram_alloc(new_block->offset, new_block->max_length,
1361 new_block->mr);
e1c57ab8 1362 } else {
9b8424d5 1363 new_block->host = phys_mem_alloc(new_block->max_length,
a2b257d6 1364 &new_block->mr->align);
39228250 1365 if (!new_block->host) {
ef701d7b
HT
1366 error_setg_errno(errp, errno,
1367 "cannot set up guest memory '%s'",
1368 memory_region_name(new_block->mr));
1369 qemu_mutex_unlock_ramlist();
1370 return -1;
39228250 1371 }
9b8424d5 1372 memory_try_enable_merging(new_block->host, new_block->max_length);
6977dfe6 1373 }
c902760f 1374 }
94a6b54f 1375
abb26d63
PB
1376 /* Keep the list sorted from biggest to smallest block. */
1377 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
9b8424d5 1378 if (block->max_length < new_block->max_length) {
abb26d63
PB
1379 break;
1380 }
1381 }
1382 if (block) {
1383 QTAILQ_INSERT_BEFORE(block, new_block, next);
1384 } else {
1385 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1386 }
0d6d3c87 1387 ram_list.mru_block = NULL;
94a6b54f 1388
f798b07f 1389 ram_list.version++;
b2a8658e 1390 qemu_mutex_unlock_ramlist();
f798b07f 1391
2152f5ca
JQ
1392 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1393
1394 if (new_ram_size > old_ram_size) {
1ab4c8ce
JQ
1395 int i;
1396 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1397 ram_list.dirty_memory[i] =
1398 bitmap_zero_extend(ram_list.dirty_memory[i],
1399 old_ram_size, new_ram_size);
1400 }
2152f5ca 1401 }
9b8424d5
MT
1402 cpu_physical_memory_set_dirty_range(new_block->offset,
1403 new_block->used_length);
94a6b54f 1404
9b8424d5
MT
1405 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1406 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1407 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
ddb97f1d 1408
e1c57ab8 1409 if (kvm_enabled()) {
9b8424d5 1410 kvm_setup_guest_memory(new_block->host, new_block->max_length);
e1c57ab8 1411 }
6f0437e8 1412
94a6b54f
PB
1413 return new_block->offset;
1414}
e9a1ab19 1415
0b183fc8 1416#ifdef __linux__
e1c57ab8 1417ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1418 bool share, const char *mem_path,
7f56e740 1419 Error **errp)
e1c57ab8
PB
1420{
1421 RAMBlock *new_block;
ef701d7b
HT
1422 ram_addr_t addr;
1423 Error *local_err = NULL;
e1c57ab8
PB
1424
1425 if (xen_enabled()) {
7f56e740
PB
1426 error_setg(errp, "-mem-path not supported with Xen");
1427 return -1;
e1c57ab8
PB
1428 }
1429
1430 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1431 /*
1432 * file_ram_alloc() needs to allocate just like
1433 * phys_mem_alloc, but we haven't bothered to provide
1434 * a hook there.
1435 */
7f56e740
PB
1436 error_setg(errp,
1437 "-mem-path not supported with this accelerator");
1438 return -1;
e1c57ab8
PB
1439 }
1440
1441 size = TARGET_PAGE_ALIGN(size);
1442 new_block = g_malloc0(sizeof(*new_block));
1443 new_block->mr = mr;
9b8424d5
MT
1444 new_block->used_length = size;
1445 new_block->max_length = size;
dbcb8981 1446 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1447 new_block->host = file_ram_alloc(new_block, size,
1448 mem_path, errp);
1449 if (!new_block->host) {
1450 g_free(new_block);
1451 return -1;
1452 }
1453
ef701d7b
HT
1454 addr = ram_block_add(new_block, &local_err);
1455 if (local_err) {
1456 g_free(new_block);
1457 error_propagate(errp, local_err);
1458 return -1;
1459 }
1460 return addr;
e1c57ab8 1461}
0b183fc8 1462#endif
e1c57ab8 1463
62be4e3a
MT
1464static
1465ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1466 void (*resized)(const char*,
1467 uint64_t length,
1468 void *host),
1469 void *host, bool resizeable,
ef701d7b 1470 MemoryRegion *mr, Error **errp)
e1c57ab8
PB
1471{
1472 RAMBlock *new_block;
ef701d7b
HT
1473 ram_addr_t addr;
1474 Error *local_err = NULL;
e1c57ab8
PB
1475
1476 size = TARGET_PAGE_ALIGN(size);
62be4e3a 1477 max_size = TARGET_PAGE_ALIGN(max_size);
e1c57ab8
PB
1478 new_block = g_malloc0(sizeof(*new_block));
1479 new_block->mr = mr;
62be4e3a 1480 new_block->resized = resized;
9b8424d5
MT
1481 new_block->used_length = size;
1482 new_block->max_length = max_size;
62be4e3a 1483 assert(max_size >= size);
e1c57ab8
PB
1484 new_block->fd = -1;
1485 new_block->host = host;
1486 if (host) {
7bd4f430 1487 new_block->flags |= RAM_PREALLOC;
e1c57ab8 1488 }
62be4e3a
MT
1489 if (resizeable) {
1490 new_block->flags |= RAM_RESIZEABLE;
1491 }
ef701d7b
HT
1492 addr = ram_block_add(new_block, &local_err);
1493 if (local_err) {
1494 g_free(new_block);
1495 error_propagate(errp, local_err);
1496 return -1;
1497 }
1498 return addr;
e1c57ab8
PB
1499}
1500
62be4e3a
MT
1501ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1502 MemoryRegion *mr, Error **errp)
1503{
1504 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1505}
1506
ef701d7b 1507ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
6977dfe6 1508{
62be4e3a
MT
1509 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1510}
1511
1512ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1513 void (*resized)(const char*,
1514 uint64_t length,
1515 void *host),
1516 MemoryRegion *mr, Error **errp)
1517{
1518 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
6977dfe6
YT
1519}
1520
1f2e98b6
AW
1521void qemu_ram_free_from_ptr(ram_addr_t addr)
1522{
1523 RAMBlock *block;
1524
b2a8658e
UD
1525 /* This assumes the iothread lock is taken here too. */
1526 qemu_mutex_lock_ramlist();
a3161038 1527 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1528 if (addr == block->offset) {
a3161038 1529 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1530 ram_list.mru_block = NULL;
f798b07f 1531 ram_list.version++;
7267c094 1532 g_free(block);
b2a8658e 1533 break;
1f2e98b6
AW
1534 }
1535 }
b2a8658e 1536 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1537}
1538
c227f099 1539void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1540{
04b16653
AW
1541 RAMBlock *block;
1542
b2a8658e
UD
1543 /* This assumes the iothread lock is taken here too. */
1544 qemu_mutex_lock_ramlist();
a3161038 1545 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1546 if (addr == block->offset) {
a3161038 1547 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1548 ram_list.mru_block = NULL;
f798b07f 1549 ram_list.version++;
7bd4f430 1550 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1551 ;
dfeaf2ab
MA
1552 } else if (xen_enabled()) {
1553 xen_invalidate_map_cache_entry(block->host);
089f3f76 1554#ifndef _WIN32
3435f395 1555 } else if (block->fd >= 0) {
9b8424d5 1556 munmap(block->host, block->max_length);
3435f395 1557 close(block->fd);
089f3f76 1558#endif
04b16653 1559 } else {
9b8424d5 1560 qemu_anon_ram_free(block->host, block->max_length);
04b16653 1561 }
7267c094 1562 g_free(block);
b2a8658e 1563 break;
04b16653
AW
1564 }
1565 }
b2a8658e 1566 qemu_mutex_unlock_ramlist();
04b16653 1567
e9a1ab19
FB
1568}
1569
cd19cfa2
HY
1570#ifndef _WIN32
1571void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1572{
1573 RAMBlock *block;
1574 ram_addr_t offset;
1575 int flags;
1576 void *area, *vaddr;
1577
a3161038 1578 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2 1579 offset = addr - block->offset;
9b8424d5 1580 if (offset < block->max_length) {
1240be24 1581 vaddr = ramblock_ptr(block, offset);
7bd4f430 1582 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1583 ;
dfeaf2ab
MA
1584 } else if (xen_enabled()) {
1585 abort();
cd19cfa2
HY
1586 } else {
1587 flags = MAP_FIXED;
1588 munmap(vaddr, length);
3435f395 1589 if (block->fd >= 0) {
dbcb8981
PB
1590 flags |= (block->flags & RAM_SHARED ?
1591 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1592 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1593 flags, block->fd, offset);
cd19cfa2 1594 } else {
2eb9fbaa
MA
1595 /*
1596 * Remap needs to match alloc. Accelerators that
1597 * set phys_mem_alloc never remap. If they did,
1598 * we'd need a remap hook here.
1599 */
1600 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1601
cd19cfa2
HY
1602 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1603 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1604 flags, -1, 0);
cd19cfa2
HY
1605 }
1606 if (area != vaddr) {
f15fbc4b
AP
1607 fprintf(stderr, "Could not remap addr: "
1608 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1609 length, addr);
1610 exit(1);
1611 }
8490fc78 1612 memory_try_enable_merging(vaddr, length);
ddb97f1d 1613 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1614 }
1615 return;
1616 }
1617 }
1618}
1619#endif /* !_WIN32 */
1620
a35ba7be
PB
1621int qemu_get_ram_fd(ram_addr_t addr)
1622{
1623 RAMBlock *block = qemu_get_ram_block(addr);
1624
1625 return block->fd;
1626}
1627
3fd74b84
DM
1628void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1629{
1630 RAMBlock *block = qemu_get_ram_block(addr);
1631
1240be24 1632 return ramblock_ptr(block, 0);
3fd74b84
DM
1633}
1634
1b5ec234
PB
1635/* Return a host pointer to ram allocated with qemu_ram_alloc.
1636 With the exception of the softmmu code in this file, this should
1637 only be used for local memory (e.g. video ram) that the device owns,
1638 and knows it isn't going to access beyond the end of the block.
1639
1640 It should not be used for general purpose DMA.
1641 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1642 */
1643void *qemu_get_ram_ptr(ram_addr_t addr)
1644{
1645 RAMBlock *block = qemu_get_ram_block(addr);
1646
0d6d3c87
PB
1647 if (xen_enabled()) {
1648 /* We need to check if the requested address is in the RAM
1649 * because we don't want to map the entire memory in QEMU.
1650 * In that case just map until the end of the page.
1651 */
1652 if (block->offset == 0) {
1653 return xen_map_cache(addr, 0, 0);
1654 } else if (block->host == NULL) {
1655 block->host =
9b8424d5 1656 xen_map_cache(block->offset, block->max_length, 1);
0d6d3c87
PB
1657 }
1658 }
1240be24 1659 return ramblock_ptr(block, addr - block->offset);
dc828ca1
PB
1660}
1661
38bee5dc
SS
1662/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1663 * but takes a size argument */
cb85f7ab 1664static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1665{
8ab934f9
SS
1666 if (*size == 0) {
1667 return NULL;
1668 }
868bb33f 1669 if (xen_enabled()) {
e41d7c69 1670 return xen_map_cache(addr, *size, 1);
868bb33f 1671 } else {
38bee5dc
SS
1672 RAMBlock *block;
1673
a3161038 1674 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
9b8424d5
MT
1675 if (addr - block->offset < block->max_length) {
1676 if (addr - block->offset + *size > block->max_length)
1677 *size = block->max_length - addr + block->offset;
1240be24 1678 return ramblock_ptr(block, addr - block->offset);
38bee5dc
SS
1679 }
1680 }
1681
1682 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1683 abort();
38bee5dc
SS
1684 }
1685}
1686
7443b437
PB
1687/* Some of the softmmu routines need to translate from a host pointer
1688 (typically a TLB entry) back to a ram offset. */
1b5ec234 1689MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1690{
94a6b54f
PB
1691 RAMBlock *block;
1692 uint8_t *host = ptr;
1693
868bb33f 1694 if (xen_enabled()) {
e41d7c69 1695 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1696 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1697 }
1698
23887b79 1699 block = ram_list.mru_block;
9b8424d5 1700 if (block && block->host && host - block->host < block->max_length) {
23887b79
PB
1701 goto found;
1702 }
1703
a3161038 1704 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1705 /* This case append when the block is not mapped. */
1706 if (block->host == NULL) {
1707 continue;
1708 }
9b8424d5 1709 if (host - block->host < block->max_length) {
23887b79 1710 goto found;
f471a17e 1711 }
94a6b54f 1712 }
432d268c 1713
1b5ec234 1714 return NULL;
23887b79
PB
1715
1716found:
1717 *ram_addr = block->offset + (host - block->host);
1b5ec234 1718 return block->mr;
e890261f 1719}
f471a17e 1720
a8170e5e 1721static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1722 uint64_t val, unsigned size)
9fa3e853 1723{
52159192 1724 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1725 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1726 }
0e0df1e2
AK
1727 switch (size) {
1728 case 1:
1729 stb_p(qemu_get_ram_ptr(ram_addr), val);
1730 break;
1731 case 2:
1732 stw_p(qemu_get_ram_ptr(ram_addr), val);
1733 break;
1734 case 4:
1735 stl_p(qemu_get_ram_ptr(ram_addr), val);
1736 break;
1737 default:
1738 abort();
3a7d929e 1739 }
6886867e 1740 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
f23db169
FB
1741 /* we remove the notdirty callback only if the code has been
1742 flushed */
a2cd8c85 1743 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1744 CPUArchState *env = current_cpu->env_ptr;
93afeade 1745 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1746 }
9fa3e853
FB
1747}
1748
b018ddf6
PB
1749static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1750 unsigned size, bool is_write)
1751{
1752 return is_write;
1753}
1754
0e0df1e2 1755static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1756 .write = notdirty_mem_write,
b018ddf6 1757 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1758 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1759};
1760
0f459d16 1761/* Generate a debug exception if a watchpoint has been hit. */
05068c0d 1762static void check_watchpoint(int offset, int len, int flags)
0f459d16 1763{
93afeade
AF
1764 CPUState *cpu = current_cpu;
1765 CPUArchState *env = cpu->env_ptr;
06d55cc1 1766 target_ulong pc, cs_base;
0f459d16 1767 target_ulong vaddr;
a1d1bb31 1768 CPUWatchpoint *wp;
06d55cc1 1769 int cpu_flags;
0f459d16 1770
ff4700b0 1771 if (cpu->watchpoint_hit) {
06d55cc1
AL
1772 /* We re-entered the check after replacing the TB. Now raise
1773 * the debug interrupt so that is will trigger after the
1774 * current instruction. */
93afeade 1775 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1776 return;
1777 }
93afeade 1778 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1779 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d
PM
1780 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1781 && (wp->flags & flags)) {
08225676
PM
1782 if (flags == BP_MEM_READ) {
1783 wp->flags |= BP_WATCHPOINT_HIT_READ;
1784 } else {
1785 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1786 }
1787 wp->hitaddr = vaddr;
ff4700b0
AF
1788 if (!cpu->watchpoint_hit) {
1789 cpu->watchpoint_hit = wp;
239c51a5 1790 tb_check_watchpoint(cpu);
6e140f28 1791 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1792 cpu->exception_index = EXCP_DEBUG;
5638d180 1793 cpu_loop_exit(cpu);
6e140f28
AL
1794 } else {
1795 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1796 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1797 cpu_resume_from_signal(cpu, NULL);
6e140f28 1798 }
06d55cc1 1799 }
6e140f28
AL
1800 } else {
1801 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1802 }
1803 }
1804}
1805
6658ffb8
PB
1806/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1807 so these check for a hit then pass through to the normal out-of-line
1808 phys routines. */
a8170e5e 1809static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1810 unsigned size)
6658ffb8 1811{
05068c0d 1812 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ);
1ec9b909 1813 switch (size) {
2c17449b 1814 case 1: return ldub_phys(&address_space_memory, addr);
41701aa4 1815 case 2: return lduw_phys(&address_space_memory, addr);
fdfba1a2 1816 case 4: return ldl_phys(&address_space_memory, addr);
1ec9b909
AK
1817 default: abort();
1818 }
6658ffb8
PB
1819}
1820
a8170e5e 1821static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1822 uint64_t val, unsigned size)
6658ffb8 1823{
05068c0d 1824 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE);
1ec9b909 1825 switch (size) {
67364150 1826 case 1:
db3be60d 1827 stb_phys(&address_space_memory, addr, val);
67364150
MF
1828 break;
1829 case 2:
5ce5944d 1830 stw_phys(&address_space_memory, addr, val);
67364150
MF
1831 break;
1832 case 4:
ab1da857 1833 stl_phys(&address_space_memory, addr, val);
67364150 1834 break;
1ec9b909
AK
1835 default: abort();
1836 }
6658ffb8
PB
1837}
1838
1ec9b909
AK
1839static const MemoryRegionOps watch_mem_ops = {
1840 .read = watch_mem_read,
1841 .write = watch_mem_write,
1842 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1843};
6658ffb8 1844
a8170e5e 1845static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1846 unsigned len)
db7b5426 1847{
acc9d80b 1848 subpage_t *subpage = opaque;
ff6cff75 1849 uint8_t buf[8];
791af8c8 1850
db7b5426 1851#if defined(DEBUG_SUBPAGE)
016e9d62 1852 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1853 subpage, len, addr);
db7b5426 1854#endif
acc9d80b
JK
1855 address_space_read(subpage->as, addr + subpage->base, buf, len);
1856 switch (len) {
1857 case 1:
1858 return ldub_p(buf);
1859 case 2:
1860 return lduw_p(buf);
1861 case 4:
1862 return ldl_p(buf);
ff6cff75
PB
1863 case 8:
1864 return ldq_p(buf);
acc9d80b
JK
1865 default:
1866 abort();
1867 }
db7b5426
BS
1868}
1869
a8170e5e 1870static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1871 uint64_t value, unsigned len)
db7b5426 1872{
acc9d80b 1873 subpage_t *subpage = opaque;
ff6cff75 1874 uint8_t buf[8];
acc9d80b 1875
db7b5426 1876#if defined(DEBUG_SUBPAGE)
016e9d62 1877 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1878 " value %"PRIx64"\n",
1879 __func__, subpage, len, addr, value);
db7b5426 1880#endif
acc9d80b
JK
1881 switch (len) {
1882 case 1:
1883 stb_p(buf, value);
1884 break;
1885 case 2:
1886 stw_p(buf, value);
1887 break;
1888 case 4:
1889 stl_p(buf, value);
1890 break;
ff6cff75
PB
1891 case 8:
1892 stq_p(buf, value);
1893 break;
acc9d80b
JK
1894 default:
1895 abort();
1896 }
1897 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1898}
1899
c353e4cc 1900static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 1901 unsigned len, bool is_write)
c353e4cc 1902{
acc9d80b 1903 subpage_t *subpage = opaque;
c353e4cc 1904#if defined(DEBUG_SUBPAGE)
016e9d62 1905 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 1906 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1907#endif
1908
acc9d80b 1909 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 1910 len, is_write);
c353e4cc
PB
1911}
1912
70c68e44
AK
1913static const MemoryRegionOps subpage_ops = {
1914 .read = subpage_read,
1915 .write = subpage_write,
ff6cff75
PB
1916 .impl.min_access_size = 1,
1917 .impl.max_access_size = 8,
1918 .valid.min_access_size = 1,
1919 .valid.max_access_size = 8,
c353e4cc 1920 .valid.accepts = subpage_accepts,
70c68e44 1921 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1922};
1923
c227f099 1924static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1925 uint16_t section)
db7b5426
BS
1926{
1927 int idx, eidx;
1928
1929 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1930 return -1;
1931 idx = SUBPAGE_IDX(start);
1932 eidx = SUBPAGE_IDX(end);
1933#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1934 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1935 __func__, mmio, start, end, idx, eidx, section);
db7b5426 1936#endif
db7b5426 1937 for (; idx <= eidx; idx++) {
5312bd8b 1938 mmio->sub_section[idx] = section;
db7b5426
BS
1939 }
1940
1941 return 0;
1942}
1943
acc9d80b 1944static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1945{
c227f099 1946 subpage_t *mmio;
db7b5426 1947
7267c094 1948 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1949
acc9d80b 1950 mmio->as = as;
1eec614b 1951 mmio->base = base;
2c9b15ca 1952 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 1953 NULL, TARGET_PAGE_SIZE);
b3b00c78 1954 mmio->iomem.subpage = true;
db7b5426 1955#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1956 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1957 mmio, base, TARGET_PAGE_SIZE);
db7b5426 1958#endif
b41aac4f 1959 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1960
1961 return mmio;
1962}
1963
a656e22f
PC
1964static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1965 MemoryRegion *mr)
5312bd8b 1966{
a656e22f 1967 assert(as);
5312bd8b 1968 MemoryRegionSection section = {
a656e22f 1969 .address_space = as,
5312bd8b
AK
1970 .mr = mr,
1971 .offset_within_address_space = 0,
1972 .offset_within_region = 0,
052e87b0 1973 .size = int128_2_64(),
5312bd8b
AK
1974 };
1975
53cb28cb 1976 return phys_section_add(map, &section);
5312bd8b
AK
1977}
1978
77717094 1979MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
aa102231 1980{
77717094 1981 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1982}
1983
e9179ce1
AK
1984static void io_mem_init(void)
1985{
1f6245e5 1986 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2c9b15ca 1987 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 1988 NULL, UINT64_MAX);
2c9b15ca 1989 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 1990 NULL, UINT64_MAX);
2c9b15ca 1991 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 1992 NULL, UINT64_MAX);
e9179ce1
AK
1993}
1994
ac1970fb 1995static void mem_begin(MemoryListener *listener)
00752703
PB
1996{
1997 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
1998 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1999 uint16_t n;
2000
a656e22f 2001 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 2002 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 2003 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 2004 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 2005 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 2006 assert(n == PHYS_SECTION_ROM);
a656e22f 2007 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 2008 assert(n == PHYS_SECTION_WATCH);
00752703 2009
9736e55b 2010 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
2011 d->as = as;
2012 as->next_dispatch = d;
2013}
2014
2015static void mem_commit(MemoryListener *listener)
ac1970fb 2016{
89ae337a 2017 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
2018 AddressSpaceDispatch *cur = as->dispatch;
2019 AddressSpaceDispatch *next = as->next_dispatch;
2020
53cb28cb 2021 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 2022
0475d94f 2023 as->dispatch = next;
b41aac4f 2024
53cb28cb
MA
2025 if (cur) {
2026 phys_sections_free(&cur->map);
2027 g_free(cur);
2028 }
9affd6fc
PB
2029}
2030
1d71148e 2031static void tcg_commit(MemoryListener *listener)
50c1e149 2032{
182735ef 2033 CPUState *cpu;
117712c3
AK
2034
2035 /* since each CPU stores ram addresses in its TLB cache, we must
2036 reset the modified entries */
2037 /* XXX: slow ! */
bdc44640 2038 CPU_FOREACH(cpu) {
33bde2e1
EI
2039 /* FIXME: Disentangle the cpu.h circular files deps so we can
2040 directly get the right CPU from listener. */
2041 if (cpu->tcg_as_listener != listener) {
2042 continue;
2043 }
00c8cb0a 2044 tlb_flush(cpu, 1);
117712c3 2045 }
50c1e149
AK
2046}
2047
93632747
AK
2048static void core_log_global_start(MemoryListener *listener)
2049{
981fdf23 2050 cpu_physical_memory_set_dirty_tracking(true);
93632747
AK
2051}
2052
2053static void core_log_global_stop(MemoryListener *listener)
2054{
981fdf23 2055 cpu_physical_memory_set_dirty_tracking(false);
93632747
AK
2056}
2057
93632747 2058static MemoryListener core_memory_listener = {
93632747
AK
2059 .log_global_start = core_log_global_start,
2060 .log_global_stop = core_log_global_stop,
ac1970fb 2061 .priority = 1,
93632747
AK
2062};
2063
ac1970fb
AK
2064void address_space_init_dispatch(AddressSpace *as)
2065{
00752703 2066 as->dispatch = NULL;
89ae337a 2067 as->dispatch_listener = (MemoryListener) {
ac1970fb 2068 .begin = mem_begin,
00752703 2069 .commit = mem_commit,
ac1970fb
AK
2070 .region_add = mem_add,
2071 .region_nop = mem_add,
2072 .priority = 0,
2073 };
89ae337a 2074 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
2075}
2076
83f3c251
AK
2077void address_space_destroy_dispatch(AddressSpace *as)
2078{
2079 AddressSpaceDispatch *d = as->dispatch;
2080
89ae337a 2081 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
2082 g_free(d);
2083 as->dispatch = NULL;
2084}
2085
62152b8a
AK
2086static void memory_map_init(void)
2087{
7267c094 2088 system_memory = g_malloc(sizeof(*system_memory));
03f49957 2089
57271d63 2090 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 2091 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 2092
7267c094 2093 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
2094 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2095 65536);
7dca8043 2096 address_space_init(&address_space_io, system_io, "I/O");
93632747 2097
f6790af6 2098 memory_listener_register(&core_memory_listener, &address_space_memory);
62152b8a
AK
2099}
2100
2101MemoryRegion *get_system_memory(void)
2102{
2103 return system_memory;
2104}
2105
309cb471
AK
2106MemoryRegion *get_system_io(void)
2107{
2108 return system_io;
2109}
2110
e2eef170
PB
2111#endif /* !defined(CONFIG_USER_ONLY) */
2112
13eb76e0
FB
2113/* physical memory access (slow version, mainly for debug) */
2114#if defined(CONFIG_USER_ONLY)
f17ec444 2115int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 2116 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2117{
2118 int l, flags;
2119 target_ulong page;
53a5960a 2120 void * p;
13eb76e0
FB
2121
2122 while (len > 0) {
2123 page = addr & TARGET_PAGE_MASK;
2124 l = (page + TARGET_PAGE_SIZE) - addr;
2125 if (l > len)
2126 l = len;
2127 flags = page_get_flags(page);
2128 if (!(flags & PAGE_VALID))
a68fe89c 2129 return -1;
13eb76e0
FB
2130 if (is_write) {
2131 if (!(flags & PAGE_WRITE))
a68fe89c 2132 return -1;
579a97f7 2133 /* XXX: this code should not depend on lock_user */
72fb7daa 2134 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 2135 return -1;
72fb7daa
AJ
2136 memcpy(p, buf, l);
2137 unlock_user(p, addr, l);
13eb76e0
FB
2138 } else {
2139 if (!(flags & PAGE_READ))
a68fe89c 2140 return -1;
579a97f7 2141 /* XXX: this code should not depend on lock_user */
72fb7daa 2142 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 2143 return -1;
72fb7daa 2144 memcpy(buf, p, l);
5b257578 2145 unlock_user(p, addr, 0);
13eb76e0
FB
2146 }
2147 len -= l;
2148 buf += l;
2149 addr += l;
2150 }
a68fe89c 2151 return 0;
13eb76e0 2152}
8df1cd07 2153
13eb76e0 2154#else
51d7a9eb 2155
a8170e5e
AK
2156static void invalidate_and_set_dirty(hwaddr addr,
2157 hwaddr length)
51d7a9eb 2158{
f874bf90
PM
2159 if (cpu_physical_memory_range_includes_clean(addr, length)) {
2160 tb_invalidate_phys_range(addr, addr + length, 0);
6886867e 2161 cpu_physical_memory_set_dirty_range_nocode(addr, length);
51d7a9eb 2162 }
e226939d 2163 xen_modified_memory(addr, length);
51d7a9eb
AP
2164}
2165
23326164 2166static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2167{
e1622f4b 2168 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2169
2170 /* Regions are assumed to support 1-4 byte accesses unless
2171 otherwise specified. */
23326164
RH
2172 if (access_size_max == 0) {
2173 access_size_max = 4;
2174 }
2175
2176 /* Bound the maximum access by the alignment of the address. */
2177 if (!mr->ops->impl.unaligned) {
2178 unsigned align_size_max = addr & -addr;
2179 if (align_size_max != 0 && align_size_max < access_size_max) {
2180 access_size_max = align_size_max;
2181 }
82f2563f 2182 }
23326164
RH
2183
2184 /* Don't attempt accesses larger than the maximum. */
2185 if (l > access_size_max) {
2186 l = access_size_max;
82f2563f 2187 }
098178f2
PB
2188 if (l & (l - 1)) {
2189 l = 1 << (qemu_fls(l) - 1);
2190 }
23326164
RH
2191
2192 return l;
82f2563f
PB
2193}
2194
fd8aaa76 2195bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 2196 int len, bool is_write)
13eb76e0 2197{
149f54b5 2198 hwaddr l;
13eb76e0 2199 uint8_t *ptr;
791af8c8 2200 uint64_t val;
149f54b5 2201 hwaddr addr1;
5c8a00ce 2202 MemoryRegion *mr;
fd8aaa76 2203 bool error = false;
3b46e624 2204
13eb76e0 2205 while (len > 0) {
149f54b5 2206 l = len;
5c8a00ce 2207 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2208
13eb76e0 2209 if (is_write) {
5c8a00ce
PB
2210 if (!memory_access_is_direct(mr, is_write)) {
2211 l = memory_access_size(mr, l, addr1);
4917cf44 2212 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2213 potential bugs */
23326164
RH
2214 switch (l) {
2215 case 8:
2216 /* 64 bit write access */
2217 val = ldq_p(buf);
2218 error |= io_mem_write(mr, addr1, val, 8);
2219 break;
2220 case 4:
1c213d19 2221 /* 32 bit write access */
c27004ec 2222 val = ldl_p(buf);
5c8a00ce 2223 error |= io_mem_write(mr, addr1, val, 4);
23326164
RH
2224 break;
2225 case 2:
1c213d19 2226 /* 16 bit write access */
c27004ec 2227 val = lduw_p(buf);
5c8a00ce 2228 error |= io_mem_write(mr, addr1, val, 2);
23326164
RH
2229 break;
2230 case 1:
1c213d19 2231 /* 8 bit write access */
c27004ec 2232 val = ldub_p(buf);
5c8a00ce 2233 error |= io_mem_write(mr, addr1, val, 1);
23326164
RH
2234 break;
2235 default:
2236 abort();
13eb76e0 2237 }
2bbfa05d 2238 } else {
5c8a00ce 2239 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2240 /* RAM case */
5579c7f3 2241 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2242 memcpy(ptr, buf, l);
51d7a9eb 2243 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
2244 }
2245 } else {
5c8a00ce 2246 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2247 /* I/O case */
5c8a00ce 2248 l = memory_access_size(mr, l, addr1);
23326164
RH
2249 switch (l) {
2250 case 8:
2251 /* 64 bit read access */
2252 error |= io_mem_read(mr, addr1, &val, 8);
2253 stq_p(buf, val);
2254 break;
2255 case 4:
13eb76e0 2256 /* 32 bit read access */
5c8a00ce 2257 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 2258 stl_p(buf, val);
23326164
RH
2259 break;
2260 case 2:
13eb76e0 2261 /* 16 bit read access */
5c8a00ce 2262 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 2263 stw_p(buf, val);
23326164
RH
2264 break;
2265 case 1:
1c213d19 2266 /* 8 bit read access */
5c8a00ce 2267 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 2268 stb_p(buf, val);
23326164
RH
2269 break;
2270 default:
2271 abort();
13eb76e0
FB
2272 }
2273 } else {
2274 /* RAM case */
5c8a00ce 2275 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2276 memcpy(buf, ptr, l);
13eb76e0
FB
2277 }
2278 }
2279 len -= l;
2280 buf += l;
2281 addr += l;
2282 }
fd8aaa76
PB
2283
2284 return error;
13eb76e0 2285}
8df1cd07 2286
fd8aaa76 2287bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
2288 const uint8_t *buf, int len)
2289{
fd8aaa76 2290 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
2291}
2292
fd8aaa76 2293bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 2294{
fd8aaa76 2295 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
2296}
2297
2298
a8170e5e 2299void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2300 int len, int is_write)
2301{
fd8aaa76 2302 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2303}
2304
582b55a9
AG
2305enum write_rom_type {
2306 WRITE_DATA,
2307 FLUSH_CACHE,
2308};
2309
2a221651 2310static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2311 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2312{
149f54b5 2313 hwaddr l;
d0ecd2aa 2314 uint8_t *ptr;
149f54b5 2315 hwaddr addr1;
5c8a00ce 2316 MemoryRegion *mr;
3b46e624 2317
d0ecd2aa 2318 while (len > 0) {
149f54b5 2319 l = len;
2a221651 2320 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2321
5c8a00ce
PB
2322 if (!(memory_region_is_ram(mr) ||
2323 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2324 /* do nothing */
2325 } else {
5c8a00ce 2326 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2327 /* ROM/RAM case */
5579c7f3 2328 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2329 switch (type) {
2330 case WRITE_DATA:
2331 memcpy(ptr, buf, l);
2332 invalidate_and_set_dirty(addr1, l);
2333 break;
2334 case FLUSH_CACHE:
2335 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2336 break;
2337 }
d0ecd2aa
FB
2338 }
2339 len -= l;
2340 buf += l;
2341 addr += l;
2342 }
2343}
2344
582b55a9 2345/* used for ROM loading : can write in RAM and ROM */
2a221651 2346void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2347 const uint8_t *buf, int len)
2348{
2a221651 2349 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2350}
2351
2352void cpu_flush_icache_range(hwaddr start, int len)
2353{
2354 /*
2355 * This function should do the same thing as an icache flush that was
2356 * triggered from within the guest. For TCG we are always cache coherent,
2357 * so there is no need to flush anything. For KVM / Xen we need to flush
2358 * the host's instruction cache at least.
2359 */
2360 if (tcg_enabled()) {
2361 return;
2362 }
2363
2a221651
EI
2364 cpu_physical_memory_write_rom_internal(&address_space_memory,
2365 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2366}
2367
6d16c2f8 2368typedef struct {
d3e71559 2369 MemoryRegion *mr;
6d16c2f8 2370 void *buffer;
a8170e5e
AK
2371 hwaddr addr;
2372 hwaddr len;
6d16c2f8
AL
2373} BounceBuffer;
2374
2375static BounceBuffer bounce;
2376
ba223c29
AL
2377typedef struct MapClient {
2378 void *opaque;
2379 void (*callback)(void *opaque);
72cf2d4f 2380 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2381} MapClient;
2382
72cf2d4f
BS
2383static QLIST_HEAD(map_client_list, MapClient) map_client_list
2384 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2385
2386void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2387{
7267c094 2388 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2389
2390 client->opaque = opaque;
2391 client->callback = callback;
72cf2d4f 2392 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2393 return client;
2394}
2395
8b9c99d9 2396static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2397{
2398 MapClient *client = (MapClient *)_client;
2399
72cf2d4f 2400 QLIST_REMOVE(client, link);
7267c094 2401 g_free(client);
ba223c29
AL
2402}
2403
2404static void cpu_notify_map_clients(void)
2405{
2406 MapClient *client;
2407
72cf2d4f
BS
2408 while (!QLIST_EMPTY(&map_client_list)) {
2409 client = QLIST_FIRST(&map_client_list);
ba223c29 2410 client->callback(client->opaque);
34d5e948 2411 cpu_unregister_map_client(client);
ba223c29
AL
2412 }
2413}
2414
51644ab7
PB
2415bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2416{
5c8a00ce 2417 MemoryRegion *mr;
51644ab7
PB
2418 hwaddr l, xlat;
2419
2420 while (len > 0) {
2421 l = len;
5c8a00ce
PB
2422 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2423 if (!memory_access_is_direct(mr, is_write)) {
2424 l = memory_access_size(mr, l, addr);
2425 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2426 return false;
2427 }
2428 }
2429
2430 len -= l;
2431 addr += l;
2432 }
2433 return true;
2434}
2435
6d16c2f8
AL
2436/* Map a physical memory region into a host virtual address.
2437 * May map a subset of the requested range, given by and returned in *plen.
2438 * May return NULL if resources needed to perform the mapping are exhausted.
2439 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2440 * Use cpu_register_map_client() to know when retrying the map operation is
2441 * likely to succeed.
6d16c2f8 2442 */
ac1970fb 2443void *address_space_map(AddressSpace *as,
a8170e5e
AK
2444 hwaddr addr,
2445 hwaddr *plen,
ac1970fb 2446 bool is_write)
6d16c2f8 2447{
a8170e5e 2448 hwaddr len = *plen;
e3127ae0
PB
2449 hwaddr done = 0;
2450 hwaddr l, xlat, base;
2451 MemoryRegion *mr, *this_mr;
2452 ram_addr_t raddr;
6d16c2f8 2453
e3127ae0
PB
2454 if (len == 0) {
2455 return NULL;
2456 }
38bee5dc 2457
e3127ae0
PB
2458 l = len;
2459 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2460 if (!memory_access_is_direct(mr, is_write)) {
2461 if (bounce.buffer) {
2462 return NULL;
6d16c2f8 2463 }
e85d9db5
KW
2464 /* Avoid unbounded allocations */
2465 l = MIN(l, TARGET_PAGE_SIZE);
2466 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2467 bounce.addr = addr;
2468 bounce.len = l;
d3e71559
PB
2469
2470 memory_region_ref(mr);
2471 bounce.mr = mr;
e3127ae0
PB
2472 if (!is_write) {
2473 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2474 }
6d16c2f8 2475
e3127ae0
PB
2476 *plen = l;
2477 return bounce.buffer;
2478 }
2479
2480 base = xlat;
2481 raddr = memory_region_get_ram_addr(mr);
2482
2483 for (;;) {
6d16c2f8
AL
2484 len -= l;
2485 addr += l;
e3127ae0
PB
2486 done += l;
2487 if (len == 0) {
2488 break;
2489 }
2490
2491 l = len;
2492 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2493 if (this_mr != mr || xlat != base + done) {
2494 break;
2495 }
6d16c2f8 2496 }
e3127ae0 2497
d3e71559 2498 memory_region_ref(mr);
e3127ae0
PB
2499 *plen = done;
2500 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2501}
2502
ac1970fb 2503/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2504 * Will also mark the memory as dirty if is_write == 1. access_len gives
2505 * the amount of memory that was actually read or written by the caller.
2506 */
a8170e5e
AK
2507void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2508 int is_write, hwaddr access_len)
6d16c2f8
AL
2509{
2510 if (buffer != bounce.buffer) {
d3e71559
PB
2511 MemoryRegion *mr;
2512 ram_addr_t addr1;
2513
2514 mr = qemu_ram_addr_from_host(buffer, &addr1);
2515 assert(mr != NULL);
6d16c2f8 2516 if (is_write) {
6886867e 2517 invalidate_and_set_dirty(addr1, access_len);
6d16c2f8 2518 }
868bb33f 2519 if (xen_enabled()) {
e41d7c69 2520 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2521 }
d3e71559 2522 memory_region_unref(mr);
6d16c2f8
AL
2523 return;
2524 }
2525 if (is_write) {
ac1970fb 2526 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2527 }
f8a83245 2528 qemu_vfree(bounce.buffer);
6d16c2f8 2529 bounce.buffer = NULL;
d3e71559 2530 memory_region_unref(bounce.mr);
ba223c29 2531 cpu_notify_map_clients();
6d16c2f8 2532}
d0ecd2aa 2533
a8170e5e
AK
2534void *cpu_physical_memory_map(hwaddr addr,
2535 hwaddr *plen,
ac1970fb
AK
2536 int is_write)
2537{
2538 return address_space_map(&address_space_memory, addr, plen, is_write);
2539}
2540
a8170e5e
AK
2541void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2542 int is_write, hwaddr access_len)
ac1970fb
AK
2543{
2544 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2545}
2546
8df1cd07 2547/* warning: addr must be aligned */
fdfba1a2 2548static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2549 enum device_endian endian)
8df1cd07 2550{
8df1cd07 2551 uint8_t *ptr;
791af8c8 2552 uint64_t val;
5c8a00ce 2553 MemoryRegion *mr;
149f54b5
PB
2554 hwaddr l = 4;
2555 hwaddr addr1;
8df1cd07 2556
fdfba1a2 2557 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2558 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2559 /* I/O case */
5c8a00ce 2560 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2561#if defined(TARGET_WORDS_BIGENDIAN)
2562 if (endian == DEVICE_LITTLE_ENDIAN) {
2563 val = bswap32(val);
2564 }
2565#else
2566 if (endian == DEVICE_BIG_ENDIAN) {
2567 val = bswap32(val);
2568 }
2569#endif
8df1cd07
FB
2570 } else {
2571 /* RAM case */
5c8a00ce 2572 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2573 & TARGET_PAGE_MASK)
149f54b5 2574 + addr1);
1e78bcc1
AG
2575 switch (endian) {
2576 case DEVICE_LITTLE_ENDIAN:
2577 val = ldl_le_p(ptr);
2578 break;
2579 case DEVICE_BIG_ENDIAN:
2580 val = ldl_be_p(ptr);
2581 break;
2582 default:
2583 val = ldl_p(ptr);
2584 break;
2585 }
8df1cd07
FB
2586 }
2587 return val;
2588}
2589
fdfba1a2 2590uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2591{
fdfba1a2 2592 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2593}
2594
fdfba1a2 2595uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2596{
fdfba1a2 2597 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2598}
2599
fdfba1a2 2600uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2601{
fdfba1a2 2602 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2603}
2604
84b7b8e7 2605/* warning: addr must be aligned */
2c17449b 2606static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2607 enum device_endian endian)
84b7b8e7 2608{
84b7b8e7
FB
2609 uint8_t *ptr;
2610 uint64_t val;
5c8a00ce 2611 MemoryRegion *mr;
149f54b5
PB
2612 hwaddr l = 8;
2613 hwaddr addr1;
84b7b8e7 2614
2c17449b 2615 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2616 false);
2617 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2618 /* I/O case */
5c8a00ce 2619 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2620#if defined(TARGET_WORDS_BIGENDIAN)
2621 if (endian == DEVICE_LITTLE_ENDIAN) {
2622 val = bswap64(val);
2623 }
2624#else
2625 if (endian == DEVICE_BIG_ENDIAN) {
2626 val = bswap64(val);
2627 }
84b7b8e7
FB
2628#endif
2629 } else {
2630 /* RAM case */
5c8a00ce 2631 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2632 & TARGET_PAGE_MASK)
149f54b5 2633 + addr1);
1e78bcc1
AG
2634 switch (endian) {
2635 case DEVICE_LITTLE_ENDIAN:
2636 val = ldq_le_p(ptr);
2637 break;
2638 case DEVICE_BIG_ENDIAN:
2639 val = ldq_be_p(ptr);
2640 break;
2641 default:
2642 val = ldq_p(ptr);
2643 break;
2644 }
84b7b8e7
FB
2645 }
2646 return val;
2647}
2648
2c17449b 2649uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2650{
2c17449b 2651 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2652}
2653
2c17449b 2654uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2655{
2c17449b 2656 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2657}
2658
2c17449b 2659uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2660{
2c17449b 2661 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2662}
2663
aab33094 2664/* XXX: optimize */
2c17449b 2665uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
aab33094
FB
2666{
2667 uint8_t val;
2c17449b 2668 address_space_rw(as, addr, &val, 1, 0);
aab33094
FB
2669 return val;
2670}
2671
733f0b02 2672/* warning: addr must be aligned */
41701aa4 2673static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2674 enum device_endian endian)
aab33094 2675{
733f0b02
MT
2676 uint8_t *ptr;
2677 uint64_t val;
5c8a00ce 2678 MemoryRegion *mr;
149f54b5
PB
2679 hwaddr l = 2;
2680 hwaddr addr1;
733f0b02 2681
41701aa4 2682 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2683 false);
2684 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2685 /* I/O case */
5c8a00ce 2686 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2687#if defined(TARGET_WORDS_BIGENDIAN)
2688 if (endian == DEVICE_LITTLE_ENDIAN) {
2689 val = bswap16(val);
2690 }
2691#else
2692 if (endian == DEVICE_BIG_ENDIAN) {
2693 val = bswap16(val);
2694 }
2695#endif
733f0b02
MT
2696 } else {
2697 /* RAM case */
5c8a00ce 2698 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2699 & TARGET_PAGE_MASK)
149f54b5 2700 + addr1);
1e78bcc1
AG
2701 switch (endian) {
2702 case DEVICE_LITTLE_ENDIAN:
2703 val = lduw_le_p(ptr);
2704 break;
2705 case DEVICE_BIG_ENDIAN:
2706 val = lduw_be_p(ptr);
2707 break;
2708 default:
2709 val = lduw_p(ptr);
2710 break;
2711 }
733f0b02
MT
2712 }
2713 return val;
aab33094
FB
2714}
2715
41701aa4 2716uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2717{
41701aa4 2718 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2719}
2720
41701aa4 2721uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2722{
41701aa4 2723 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2724}
2725
41701aa4 2726uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2727{
41701aa4 2728 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2729}
2730
8df1cd07
FB
2731/* warning: addr must be aligned. The ram page is not masked as dirty
2732 and the code inside is not invalidated. It is useful if the dirty
2733 bits are used to track modified PTEs */
2198a121 2734void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
8df1cd07 2735{
8df1cd07 2736 uint8_t *ptr;
5c8a00ce 2737 MemoryRegion *mr;
149f54b5
PB
2738 hwaddr l = 4;
2739 hwaddr addr1;
8df1cd07 2740
2198a121 2741 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2742 true);
2743 if (l < 4 || !memory_access_is_direct(mr, true)) {
2744 io_mem_write(mr, addr1, val, 4);
8df1cd07 2745 } else {
5c8a00ce 2746 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2747 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2748 stl_p(ptr, val);
74576198
AL
2749
2750 if (unlikely(in_migration)) {
a2cd8c85 2751 if (cpu_physical_memory_is_clean(addr1)) {
74576198
AL
2752 /* invalidate code */
2753 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2754 /* set dirty bit */
6886867e 2755 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
74576198
AL
2756 }
2757 }
8df1cd07
FB
2758 }
2759}
2760
2761/* warning: addr must be aligned */
ab1da857
EI
2762static inline void stl_phys_internal(AddressSpace *as,
2763 hwaddr addr, uint32_t val,
1e78bcc1 2764 enum device_endian endian)
8df1cd07 2765{
8df1cd07 2766 uint8_t *ptr;
5c8a00ce 2767 MemoryRegion *mr;
149f54b5
PB
2768 hwaddr l = 4;
2769 hwaddr addr1;
8df1cd07 2770
ab1da857 2771 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2772 true);
2773 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2774#if defined(TARGET_WORDS_BIGENDIAN)
2775 if (endian == DEVICE_LITTLE_ENDIAN) {
2776 val = bswap32(val);
2777 }
2778#else
2779 if (endian == DEVICE_BIG_ENDIAN) {
2780 val = bswap32(val);
2781 }
2782#endif
5c8a00ce 2783 io_mem_write(mr, addr1, val, 4);
8df1cd07 2784 } else {
8df1cd07 2785 /* RAM case */
5c8a00ce 2786 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2787 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2788 switch (endian) {
2789 case DEVICE_LITTLE_ENDIAN:
2790 stl_le_p(ptr, val);
2791 break;
2792 case DEVICE_BIG_ENDIAN:
2793 stl_be_p(ptr, val);
2794 break;
2795 default:
2796 stl_p(ptr, val);
2797 break;
2798 }
51d7a9eb 2799 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2800 }
2801}
2802
ab1da857 2803void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2804{
ab1da857 2805 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2806}
2807
ab1da857 2808void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2809{
ab1da857 2810 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2811}
2812
ab1da857 2813void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2814{
ab1da857 2815 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2816}
2817
aab33094 2818/* XXX: optimize */
db3be60d 2819void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
aab33094
FB
2820{
2821 uint8_t v = val;
db3be60d 2822 address_space_rw(as, addr, &v, 1, 1);
aab33094
FB
2823}
2824
733f0b02 2825/* warning: addr must be aligned */
5ce5944d
EI
2826static inline void stw_phys_internal(AddressSpace *as,
2827 hwaddr addr, uint32_t val,
1e78bcc1 2828 enum device_endian endian)
aab33094 2829{
733f0b02 2830 uint8_t *ptr;
5c8a00ce 2831 MemoryRegion *mr;
149f54b5
PB
2832 hwaddr l = 2;
2833 hwaddr addr1;
733f0b02 2834
5ce5944d 2835 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 2836 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2837#if defined(TARGET_WORDS_BIGENDIAN)
2838 if (endian == DEVICE_LITTLE_ENDIAN) {
2839 val = bswap16(val);
2840 }
2841#else
2842 if (endian == DEVICE_BIG_ENDIAN) {
2843 val = bswap16(val);
2844 }
2845#endif
5c8a00ce 2846 io_mem_write(mr, addr1, val, 2);
733f0b02 2847 } else {
733f0b02 2848 /* RAM case */
5c8a00ce 2849 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2850 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2851 switch (endian) {
2852 case DEVICE_LITTLE_ENDIAN:
2853 stw_le_p(ptr, val);
2854 break;
2855 case DEVICE_BIG_ENDIAN:
2856 stw_be_p(ptr, val);
2857 break;
2858 default:
2859 stw_p(ptr, val);
2860 break;
2861 }
51d7a9eb 2862 invalidate_and_set_dirty(addr1, 2);
733f0b02 2863 }
aab33094
FB
2864}
2865
5ce5944d 2866void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2867{
5ce5944d 2868 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2869}
2870
5ce5944d 2871void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2872{
5ce5944d 2873 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2874}
2875
5ce5944d 2876void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2877{
5ce5944d 2878 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2879}
2880
aab33094 2881/* XXX: optimize */
f606604f 2882void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
aab33094
FB
2883{
2884 val = tswap64(val);
f606604f 2885 address_space_rw(as, addr, (void *) &val, 8, 1);
aab33094
FB
2886}
2887
f606604f 2888void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2889{
2890 val = cpu_to_le64(val);
f606604f 2891 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2892}
2893
f606604f 2894void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2895{
2896 val = cpu_to_be64(val);
f606604f 2897 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2898}
2899
5e2972fd 2900/* virtual memory access for debug (includes writing to ROM) */
f17ec444 2901int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 2902 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2903{
2904 int l;
a8170e5e 2905 hwaddr phys_addr;
9b3c35e0 2906 target_ulong page;
13eb76e0
FB
2907
2908 while (len > 0) {
2909 page = addr & TARGET_PAGE_MASK;
f17ec444 2910 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
2911 /* if no physical page mapped, return an error */
2912 if (phys_addr == -1)
2913 return -1;
2914 l = (page + TARGET_PAGE_SIZE) - addr;
2915 if (l > len)
2916 l = len;
5e2972fd 2917 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
2918 if (is_write) {
2919 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2920 } else {
2921 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2922 }
13eb76e0
FB
2923 len -= l;
2924 buf += l;
2925 addr += l;
2926 }
2927 return 0;
2928}
a68fe89c 2929#endif
13eb76e0 2930
8e4a424b
BS
2931/*
2932 * A helper function for the _utterly broken_ virtio device model to find out if
2933 * it's running on a big endian machine. Don't do this at home kids!
2934 */
98ed8ecf
GK
2935bool target_words_bigendian(void);
2936bool target_words_bigendian(void)
8e4a424b
BS
2937{
2938#if defined(TARGET_WORDS_BIGENDIAN)
2939 return true;
2940#else
2941 return false;
2942#endif
2943}
2944
76f35538 2945#ifndef CONFIG_USER_ONLY
a8170e5e 2946bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2947{
5c8a00ce 2948 MemoryRegion*mr;
149f54b5 2949 hwaddr l = 1;
76f35538 2950
5c8a00ce
PB
2951 mr = address_space_translate(&address_space_memory,
2952 phys_addr, &phys_addr, &l, false);
76f35538 2953
5c8a00ce
PB
2954 return !(memory_region_is_ram(mr) ||
2955 memory_region_is_romd(mr));
76f35538 2956}
bd2fa51f
MH
2957
2958void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2959{
2960 RAMBlock *block;
2961
2962 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
9b8424d5 2963 func(block->host, block->offset, block->used_length, opaque);
bd2fa51f
MH
2964 }
2965}
ec3f8c99 2966#endif