]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
vnc: correct missing property about vnc_display
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
cc9e98cb 29#include "hw/qdev.h"
1de7afc9 30#include "qemu/osdep.h"
9c17d615 31#include "sysemu/kvm.h"
2ff3de68 32#include "sysemu/sysemu.h"
0d09e41a 33#include "hw/xen/xen.h"
1de7afc9
PB
34#include "qemu/timer.h"
35#include "qemu/config-file.h"
75a34036 36#include "qemu/error-report.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
220c3ebd 52#include "exec/ram_addr.h"
67d95c15 53
b35ba30f
MT
54#include "qemu/range.h"
55
db7b5426 56//#define DEBUG_SUBPAGE
1196be37 57
e2eef170 58#if !defined(CONFIG_USER_ONLY)
981fdf23 59static bool in_migration;
94a6b54f 60
a3161038 61RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
62
63static MemoryRegion *system_memory;
309cb471 64static MemoryRegion *system_io;
62152b8a 65
f6790af6
AK
66AddressSpace address_space_io;
67AddressSpace address_space_memory;
2673a5da 68
0844e007 69MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 70static MemoryRegion io_mem_unassigned;
0e0df1e2 71
7bd4f430
PB
72/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73#define RAM_PREALLOC (1 << 0)
74
dbcb8981
PB
75/* RAM is mmap-ed with MAP_SHARED */
76#define RAM_SHARED (1 << 1)
77
62be4e3a
MT
78/* Only a portion of RAM (used_length) is actually used, and migrated.
79 * This used_length size can change across reboots.
80 */
81#define RAM_RESIZEABLE (1 << 2)
82
e2eef170 83#endif
9fa3e853 84
bdc44640 85struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
86/* current CPU in the current thread. It is only valid inside
87 cpu_exec() */
4917cf44 88DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 89/* 0 = Do not count executed instructions.
bf20dc07 90 1 = Precise instruction counting.
2e70f6ef 91 2 = Adaptive rate instruction counting. */
5708fc66 92int use_icount;
6a00d601 93
e2eef170 94#if !defined(CONFIG_USER_ONLY)
4346ae3e 95
1db8abb1
PB
96typedef struct PhysPageEntry PhysPageEntry;
97
98struct PhysPageEntry {
9736e55b 99 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 100 uint32_t skip : 6;
9736e55b 101 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 102 uint32_t ptr : 26;
1db8abb1
PB
103};
104
8b795765
MT
105#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
106
03f49957 107/* Size of the L2 (and L3, etc) page tables. */
57271d63 108#define ADDR_SPACE_BITS 64
03f49957 109
026736ce 110#define P_L2_BITS 9
03f49957
PB
111#define P_L2_SIZE (1 << P_L2_BITS)
112
113#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
114
115typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 116
53cb28cb
MA
117typedef struct PhysPageMap {
118 unsigned sections_nb;
119 unsigned sections_nb_alloc;
120 unsigned nodes_nb;
121 unsigned nodes_nb_alloc;
122 Node *nodes;
123 MemoryRegionSection *sections;
124} PhysPageMap;
125
1db8abb1
PB
126struct AddressSpaceDispatch {
127 /* This is a multi-level map on the physical address space.
128 * The bottom level has pointers to MemoryRegionSections.
129 */
130 PhysPageEntry phys_map;
53cb28cb 131 PhysPageMap map;
acc9d80b 132 AddressSpace *as;
1db8abb1
PB
133};
134
90260c6c
JK
135#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
136typedef struct subpage_t {
137 MemoryRegion iomem;
acc9d80b 138 AddressSpace *as;
90260c6c
JK
139 hwaddr base;
140 uint16_t sub_section[TARGET_PAGE_SIZE];
141} subpage_t;
142
b41aac4f
LPF
143#define PHYS_SECTION_UNASSIGNED 0
144#define PHYS_SECTION_NOTDIRTY 1
145#define PHYS_SECTION_ROM 2
146#define PHYS_SECTION_WATCH 3
5312bd8b 147
e2eef170 148static void io_mem_init(void);
62152b8a 149static void memory_map_init(void);
09daed84 150static void tcg_commit(MemoryListener *listener);
e2eef170 151
1ec9b909 152static MemoryRegion io_mem_watch;
6658ffb8 153#endif
fd6ce8f6 154
6d9a1304 155#if !defined(CONFIG_USER_ONLY)
d6f2ea22 156
53cb28cb 157static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 158{
53cb28cb
MA
159 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
160 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
161 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
162 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 163 }
f7bf5461
AK
164}
165
53cb28cb 166static uint32_t phys_map_node_alloc(PhysPageMap *map)
f7bf5461
AK
167{
168 unsigned i;
8b795765 169 uint32_t ret;
f7bf5461 170
53cb28cb 171 ret = map->nodes_nb++;
f7bf5461 172 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 173 assert(ret != map->nodes_nb_alloc);
03f49957 174 for (i = 0; i < P_L2_SIZE; ++i) {
53cb28cb
MA
175 map->nodes[ret][i].skip = 1;
176 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 177 }
f7bf5461 178 return ret;
d6f2ea22
AK
179}
180
53cb28cb
MA
181static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
182 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 183 int level)
f7bf5461
AK
184{
185 PhysPageEntry *p;
186 int i;
03f49957 187 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 188
9736e55b 189 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
53cb28cb
MA
190 lp->ptr = phys_map_node_alloc(map);
191 p = map->nodes[lp->ptr];
f7bf5461 192 if (level == 0) {
03f49957 193 for (i = 0; i < P_L2_SIZE; i++) {
9736e55b 194 p[i].skip = 0;
b41aac4f 195 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 196 }
67c4d23c 197 }
f7bf5461 198 } else {
53cb28cb 199 p = map->nodes[lp->ptr];
92e873b9 200 }
03f49957 201 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 202
03f49957 203 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 204 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 205 lp->skip = 0;
c19e8800 206 lp->ptr = leaf;
07f07b31
AK
207 *index += step;
208 *nb -= step;
2999097b 209 } else {
53cb28cb 210 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
211 }
212 ++lp;
f7bf5461
AK
213 }
214}
215
ac1970fb 216static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 217 hwaddr index, hwaddr nb,
2999097b 218 uint16_t leaf)
f7bf5461 219{
2999097b 220 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 221 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 222
53cb28cb 223 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
224}
225
b35ba30f
MT
226/* Compact a non leaf page entry. Simply detect that the entry has a single child,
227 * and update our entry so we can skip it and go directly to the destination.
228 */
229static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
230{
231 unsigned valid_ptr = P_L2_SIZE;
232 int valid = 0;
233 PhysPageEntry *p;
234 int i;
235
236 if (lp->ptr == PHYS_MAP_NODE_NIL) {
237 return;
238 }
239
240 p = nodes[lp->ptr];
241 for (i = 0; i < P_L2_SIZE; i++) {
242 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
243 continue;
244 }
245
246 valid_ptr = i;
247 valid++;
248 if (p[i].skip) {
249 phys_page_compact(&p[i], nodes, compacted);
250 }
251 }
252
253 /* We can only compress if there's only one child. */
254 if (valid != 1) {
255 return;
256 }
257
258 assert(valid_ptr < P_L2_SIZE);
259
260 /* Don't compress if it won't fit in the # of bits we have. */
261 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
262 return;
263 }
264
265 lp->ptr = p[valid_ptr].ptr;
266 if (!p[valid_ptr].skip) {
267 /* If our only child is a leaf, make this a leaf. */
268 /* By design, we should have made this node a leaf to begin with so we
269 * should never reach here.
270 * But since it's so simple to handle this, let's do it just in case we
271 * change this rule.
272 */
273 lp->skip = 0;
274 } else {
275 lp->skip += p[valid_ptr].skip;
276 }
277}
278
279static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
280{
281 DECLARE_BITMAP(compacted, nodes_nb);
282
283 if (d->phys_map.skip) {
53cb28cb 284 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
285 }
286}
287
97115a8d 288static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 289 Node *nodes, MemoryRegionSection *sections)
92e873b9 290{
31ab2b4a 291 PhysPageEntry *p;
97115a8d 292 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 293 int i;
f1f6e3b8 294
9736e55b 295 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 296 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 297 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 298 }
9affd6fc 299 p = nodes[lp.ptr];
03f49957 300 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 301 }
b35ba30f
MT
302
303 if (sections[lp.ptr].size.hi ||
304 range_covers_byte(sections[lp.ptr].offset_within_address_space,
305 sections[lp.ptr].size.lo, addr)) {
306 return &sections[lp.ptr];
307 } else {
308 return &sections[PHYS_SECTION_UNASSIGNED];
309 }
f3705d53
AK
310}
311
e5548617
BS
312bool memory_region_is_unassigned(MemoryRegion *mr)
313{
2a8e7499 314 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 315 && mr != &io_mem_watch;
fd6ce8f6 316}
149f54b5 317
c7086b4a 318static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
319 hwaddr addr,
320 bool resolve_subpage)
9f029603 321{
90260c6c
JK
322 MemoryRegionSection *section;
323 subpage_t *subpage;
324
53cb28cb 325 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
326 if (resolve_subpage && section->mr->subpage) {
327 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 328 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
329 }
330 return section;
9f029603
JK
331}
332
90260c6c 333static MemoryRegionSection *
c7086b4a 334address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 335 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
336{
337 MemoryRegionSection *section;
a87f3954 338 Int128 diff;
149f54b5 339
c7086b4a 340 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
341 /* Compute offset within MemoryRegionSection */
342 addr -= section->offset_within_address_space;
343
344 /* Compute offset within MemoryRegion */
345 *xlat = addr + section->offset_within_region;
346
347 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 348 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
349 return section;
350}
90260c6c 351
a87f3954
PB
352static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
353{
354 if (memory_region_is_ram(mr)) {
355 return !(is_write && mr->readonly);
356 }
357 if (memory_region_is_romd(mr)) {
358 return !is_write;
359 }
360
361 return false;
362}
363
5c8a00ce
PB
364MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
365 hwaddr *xlat, hwaddr *plen,
366 bool is_write)
90260c6c 367{
30951157
AK
368 IOMMUTLBEntry iotlb;
369 MemoryRegionSection *section;
370 MemoryRegion *mr;
371 hwaddr len = *plen;
372
373 for (;;) {
a87f3954 374 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
375 mr = section->mr;
376
377 if (!mr->iommu_ops) {
378 break;
379 }
380
8d7b8cb9 381 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
30951157
AK
382 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
383 | (addr & iotlb.addr_mask));
384 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
385 if (!(iotlb.perm & (1 << is_write))) {
386 mr = &io_mem_unassigned;
387 break;
388 }
389
390 as = iotlb.target_as;
391 }
392
fe680d0d 393 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954
PB
394 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
395 len = MIN(page, len);
396 }
397
30951157
AK
398 *plen = len;
399 *xlat = addr;
400 return mr;
90260c6c
JK
401}
402
403MemoryRegionSection *
404address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
405 hwaddr *plen)
406{
30951157 407 MemoryRegionSection *section;
c7086b4a 408 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
409
410 assert(!section->mr->iommu_ops);
411 return section;
90260c6c 412}
5b6dd868 413#endif
fd6ce8f6 414
5b6dd868 415void cpu_exec_init_all(void)
fdbb84d1 416{
5b6dd868 417#if !defined(CONFIG_USER_ONLY)
b2a8658e 418 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
419 memory_map_init();
420 io_mem_init();
fdbb84d1 421#endif
5b6dd868 422}
fdbb84d1 423
b170fce3 424#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
425
426static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 427{
259186a7 428 CPUState *cpu = opaque;
a513fe19 429
5b6dd868
BS
430 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
431 version_id is increased. */
259186a7 432 cpu->interrupt_request &= ~0x01;
c01a71c1 433 tlb_flush(cpu, 1);
5b6dd868
BS
434
435 return 0;
a513fe19 436}
7501267e 437
6c3bff0e
PD
438static int cpu_common_pre_load(void *opaque)
439{
440 CPUState *cpu = opaque;
441
adee6424 442 cpu->exception_index = -1;
6c3bff0e
PD
443
444 return 0;
445}
446
447static bool cpu_common_exception_index_needed(void *opaque)
448{
449 CPUState *cpu = opaque;
450
adee6424 451 return tcg_enabled() && cpu->exception_index != -1;
6c3bff0e
PD
452}
453
454static const VMStateDescription vmstate_cpu_common_exception_index = {
455 .name = "cpu_common/exception_index",
456 .version_id = 1,
457 .minimum_version_id = 1,
458 .fields = (VMStateField[]) {
459 VMSTATE_INT32(exception_index, CPUState),
460 VMSTATE_END_OF_LIST()
461 }
462};
463
1a1562f5 464const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
465 .name = "cpu_common",
466 .version_id = 1,
467 .minimum_version_id = 1,
6c3bff0e 468 .pre_load = cpu_common_pre_load,
5b6dd868 469 .post_load = cpu_common_post_load,
35d08458 470 .fields = (VMStateField[]) {
259186a7
AF
471 VMSTATE_UINT32(halted, CPUState),
472 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 473 VMSTATE_END_OF_LIST()
6c3bff0e
PD
474 },
475 .subsections = (VMStateSubsection[]) {
476 {
477 .vmsd = &vmstate_cpu_common_exception_index,
478 .needed = cpu_common_exception_index_needed,
479 } , {
480 /* empty */
481 }
5b6dd868
BS
482 }
483};
1a1562f5 484
5b6dd868 485#endif
ea041c0e 486
38d8f5c8 487CPUState *qemu_get_cpu(int index)
ea041c0e 488{
bdc44640 489 CPUState *cpu;
ea041c0e 490
bdc44640 491 CPU_FOREACH(cpu) {
55e5c285 492 if (cpu->cpu_index == index) {
bdc44640 493 return cpu;
55e5c285 494 }
ea041c0e 495 }
5b6dd868 496
bdc44640 497 return NULL;
ea041c0e
FB
498}
499
09daed84
EI
500#if !defined(CONFIG_USER_ONLY)
501void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
502{
503 /* We only support one address space per cpu at the moment. */
504 assert(cpu->as == as);
505
506 if (cpu->tcg_as_listener) {
507 memory_listener_unregister(cpu->tcg_as_listener);
508 } else {
509 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
510 }
511 cpu->tcg_as_listener->commit = tcg_commit;
512 memory_listener_register(cpu->tcg_as_listener, as);
513}
514#endif
515
5b6dd868 516void cpu_exec_init(CPUArchState *env)
ea041c0e 517{
5b6dd868 518 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 519 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 520 CPUState *some_cpu;
5b6dd868
BS
521 int cpu_index;
522
523#if defined(CONFIG_USER_ONLY)
524 cpu_list_lock();
525#endif
5b6dd868 526 cpu_index = 0;
bdc44640 527 CPU_FOREACH(some_cpu) {
5b6dd868
BS
528 cpu_index++;
529 }
55e5c285 530 cpu->cpu_index = cpu_index;
1b1ed8dc 531 cpu->numa_node = 0;
f0c3c505 532 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 533 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 534#ifndef CONFIG_USER_ONLY
09daed84 535 cpu->as = &address_space_memory;
5b6dd868
BS
536 cpu->thread_id = qemu_get_thread_id();
537#endif
bdc44640 538 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
539#if defined(CONFIG_USER_ONLY)
540 cpu_list_unlock();
541#endif
e0d47944
AF
542 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
543 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
544 }
5b6dd868 545#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
546 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
547 cpu_save, cpu_load, env);
b170fce3 548 assert(cc->vmsd == NULL);
e0d47944 549 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 550#endif
b170fce3
AF
551 if (cc->vmsd != NULL) {
552 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
553 }
ea041c0e
FB
554}
555
94df27fd 556#if defined(CONFIG_USER_ONLY)
00b941e5 557static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
558{
559 tb_invalidate_phys_page_range(pc, pc + 1, 0);
560}
561#else
00b941e5 562static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 563{
e8262a1b
MF
564 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
565 if (phys != -1) {
09daed84 566 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 567 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 568 }
1e7855a5 569}
c27004ec 570#endif
d720b93d 571
c527ee8f 572#if defined(CONFIG_USER_ONLY)
75a34036 573void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
574
575{
576}
577
3ee887e8
PM
578int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
579 int flags)
580{
581 return -ENOSYS;
582}
583
584void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
585{
586}
587
75a34036 588int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
589 int flags, CPUWatchpoint **watchpoint)
590{
591 return -ENOSYS;
592}
593#else
6658ffb8 594/* Add a watchpoint. */
75a34036 595int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 596 int flags, CPUWatchpoint **watchpoint)
6658ffb8 597{
c0ce998e 598 CPUWatchpoint *wp;
6658ffb8 599
05068c0d 600 /* forbid ranges which are empty or run off the end of the address space */
07e2863d 601 if (len == 0 || (addr + len - 1) < addr) {
75a34036
AF
602 error_report("tried to set invalid watchpoint at %"
603 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
604 return -EINVAL;
605 }
7267c094 606 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
607
608 wp->vaddr = addr;
05068c0d 609 wp->len = len;
a1d1bb31
AL
610 wp->flags = flags;
611
2dc9f411 612 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
613 if (flags & BP_GDB) {
614 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
615 } else {
616 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
617 }
6658ffb8 618
31b030d4 619 tlb_flush_page(cpu, addr);
a1d1bb31
AL
620
621 if (watchpoint)
622 *watchpoint = wp;
623 return 0;
6658ffb8
PB
624}
625
a1d1bb31 626/* Remove a specific watchpoint. */
75a34036 627int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 628 int flags)
6658ffb8 629{
a1d1bb31 630 CPUWatchpoint *wp;
6658ffb8 631
ff4700b0 632 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 633 if (addr == wp->vaddr && len == wp->len
6e140f28 634 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 635 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
636 return 0;
637 }
638 }
a1d1bb31 639 return -ENOENT;
6658ffb8
PB
640}
641
a1d1bb31 642/* Remove a specific watchpoint by reference. */
75a34036 643void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 644{
ff4700b0 645 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 646
31b030d4 647 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 648
7267c094 649 g_free(watchpoint);
a1d1bb31
AL
650}
651
652/* Remove all matching watchpoints. */
75a34036 653void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 654{
c0ce998e 655 CPUWatchpoint *wp, *next;
a1d1bb31 656
ff4700b0 657 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
658 if (wp->flags & mask) {
659 cpu_watchpoint_remove_by_ref(cpu, wp);
660 }
c0ce998e 661 }
7d03f82f 662}
05068c0d
PM
663
664/* Return true if this watchpoint address matches the specified
665 * access (ie the address range covered by the watchpoint overlaps
666 * partially or completely with the address range covered by the
667 * access).
668 */
669static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
670 vaddr addr,
671 vaddr len)
672{
673 /* We know the lengths are non-zero, but a little caution is
674 * required to avoid errors in the case where the range ends
675 * exactly at the top of the address space and so addr + len
676 * wraps round to zero.
677 */
678 vaddr wpend = wp->vaddr + wp->len - 1;
679 vaddr addrend = addr + len - 1;
680
681 return !(addr > wpend || wp->vaddr > addrend);
682}
683
c527ee8f 684#endif
7d03f82f 685
a1d1bb31 686/* Add a breakpoint. */
b3310ab3 687int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 688 CPUBreakpoint **breakpoint)
4c3a88a2 689{
c0ce998e 690 CPUBreakpoint *bp;
3b46e624 691
7267c094 692 bp = g_malloc(sizeof(*bp));
4c3a88a2 693
a1d1bb31
AL
694 bp->pc = pc;
695 bp->flags = flags;
696
2dc9f411 697 /* keep all GDB-injected breakpoints in front */
00b941e5 698 if (flags & BP_GDB) {
f0c3c505 699 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 700 } else {
f0c3c505 701 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 702 }
3b46e624 703
f0c3c505 704 breakpoint_invalidate(cpu, pc);
a1d1bb31 705
00b941e5 706 if (breakpoint) {
a1d1bb31 707 *breakpoint = bp;
00b941e5 708 }
4c3a88a2 709 return 0;
4c3a88a2
FB
710}
711
a1d1bb31 712/* Remove a specific breakpoint. */
b3310ab3 713int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 714{
a1d1bb31
AL
715 CPUBreakpoint *bp;
716
f0c3c505 717 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 718 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 719 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
720 return 0;
721 }
7d03f82f 722 }
a1d1bb31 723 return -ENOENT;
7d03f82f
EI
724}
725
a1d1bb31 726/* Remove a specific breakpoint by reference. */
b3310ab3 727void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 728{
f0c3c505
AF
729 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
730
731 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 732
7267c094 733 g_free(breakpoint);
a1d1bb31
AL
734}
735
736/* Remove all matching breakpoints. */
b3310ab3 737void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 738{
c0ce998e 739 CPUBreakpoint *bp, *next;
a1d1bb31 740
f0c3c505 741 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
742 if (bp->flags & mask) {
743 cpu_breakpoint_remove_by_ref(cpu, bp);
744 }
c0ce998e 745 }
4c3a88a2
FB
746}
747
c33a346e
FB
748/* enable or disable single step mode. EXCP_DEBUG is returned by the
749 CPU loop after each instruction */
3825b28f 750void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 751{
ed2803da
AF
752 if (cpu->singlestep_enabled != enabled) {
753 cpu->singlestep_enabled = enabled;
754 if (kvm_enabled()) {
38e478ec 755 kvm_update_guest_debug(cpu, 0);
ed2803da 756 } else {
ccbb4d44 757 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 758 /* XXX: only flush what is necessary */
38e478ec 759 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
760 tb_flush(env);
761 }
c33a346e 762 }
c33a346e
FB
763}
764
a47dddd7 765void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
766{
767 va_list ap;
493ae1f0 768 va_list ap2;
7501267e
FB
769
770 va_start(ap, fmt);
493ae1f0 771 va_copy(ap2, ap);
7501267e
FB
772 fprintf(stderr, "qemu: fatal: ");
773 vfprintf(stderr, fmt, ap);
774 fprintf(stderr, "\n");
878096ee 775 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
776 if (qemu_log_enabled()) {
777 qemu_log("qemu: fatal: ");
778 qemu_log_vprintf(fmt, ap2);
779 qemu_log("\n");
a0762859 780 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 781 qemu_log_flush();
93fcfe39 782 qemu_log_close();
924edcae 783 }
493ae1f0 784 va_end(ap2);
f9373291 785 va_end(ap);
fd052bf6
RV
786#if defined(CONFIG_USER_ONLY)
787 {
788 struct sigaction act;
789 sigfillset(&act.sa_mask);
790 act.sa_handler = SIG_DFL;
791 sigaction(SIGABRT, &act, NULL);
792 }
793#endif
7501267e
FB
794 abort();
795}
796
0124311e 797#if !defined(CONFIG_USER_ONLY)
041603fe
PB
798static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
799{
800 RAMBlock *block;
801
802 /* The list is protected by the iothread lock here. */
803 block = ram_list.mru_block;
9b8424d5 804 if (block && addr - block->offset < block->max_length) {
041603fe
PB
805 goto found;
806 }
807 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
9b8424d5 808 if (addr - block->offset < block->max_length) {
041603fe
PB
809 goto found;
810 }
811 }
812
813 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
814 abort();
815
816found:
817 ram_list.mru_block = block;
818 return block;
819}
820
a2f4d5be 821static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 822{
041603fe 823 ram_addr_t start1;
a2f4d5be
JQ
824 RAMBlock *block;
825 ram_addr_t end;
826
827 end = TARGET_PAGE_ALIGN(start + length);
828 start &= TARGET_PAGE_MASK;
d24981d3 829
041603fe
PB
830 block = qemu_get_ram_block(start);
831 assert(block == qemu_get_ram_block(end - 1));
1240be24 832 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
041603fe 833 cpu_tlb_reset_dirty_all(start1, length);
d24981d3
JQ
834}
835
5579c7f3 836/* Note: start and end must be within the same ram block. */
a2f4d5be 837void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
52159192 838 unsigned client)
1ccde1cb 839{
1ccde1cb
FB
840 if (length == 0)
841 return;
c8d6f66a 842 cpu_physical_memory_clear_dirty_range_type(start, length, client);
f23db169 843
d24981d3 844 if (tcg_enabled()) {
a2f4d5be 845 tlb_reset_dirty_range_all(start, length);
5579c7f3 846 }
1ccde1cb
FB
847}
848
981fdf23 849static void cpu_physical_memory_set_dirty_tracking(bool enable)
74576198
AL
850{
851 in_migration = enable;
74576198
AL
852}
853
bb0e627a 854hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
855 MemoryRegionSection *section,
856 target_ulong vaddr,
857 hwaddr paddr, hwaddr xlat,
858 int prot,
859 target_ulong *address)
e5548617 860{
a8170e5e 861 hwaddr iotlb;
e5548617
BS
862 CPUWatchpoint *wp;
863
cc5bea60 864 if (memory_region_is_ram(section->mr)) {
e5548617
BS
865 /* Normal RAM. */
866 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 867 + xlat;
e5548617 868 if (!section->readonly) {
b41aac4f 869 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 870 } else {
b41aac4f 871 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
872 }
873 } else {
1b3fb98f 874 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 875 iotlb += xlat;
e5548617
BS
876 }
877
878 /* Make accesses to pages with watchpoints go via the
879 watchpoint trap routines. */
ff4700b0 880 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 881 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
e5548617
BS
882 /* Avoid trapping reads of pages with a write breakpoint. */
883 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 884 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
885 *address |= TLB_MMIO;
886 break;
887 }
888 }
889 }
890
891 return iotlb;
892}
9fa3e853
FB
893#endif /* defined(CONFIG_USER_ONLY) */
894
e2eef170 895#if !defined(CONFIG_USER_ONLY)
8da3ff18 896
c227f099 897static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 898 uint16_t section);
acc9d80b 899static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 900
a2b257d6
IM
901static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
902 qemu_anon_ram_alloc;
91138037
MA
903
904/*
905 * Set a custom physical guest memory alloator.
906 * Accelerators with unusual needs may need this. Hopefully, we can
907 * get rid of it eventually.
908 */
a2b257d6 909void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
91138037
MA
910{
911 phys_mem_alloc = alloc;
912}
913
53cb28cb
MA
914static uint16_t phys_section_add(PhysPageMap *map,
915 MemoryRegionSection *section)
5312bd8b 916{
68f3f65b
PB
917 /* The physical section number is ORed with a page-aligned
918 * pointer to produce the iotlb entries. Thus it should
919 * never overflow into the page-aligned value.
920 */
53cb28cb 921 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 922
53cb28cb
MA
923 if (map->sections_nb == map->sections_nb_alloc) {
924 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
925 map->sections = g_renew(MemoryRegionSection, map->sections,
926 map->sections_nb_alloc);
5312bd8b 927 }
53cb28cb 928 map->sections[map->sections_nb] = *section;
dfde4e6e 929 memory_region_ref(section->mr);
53cb28cb 930 return map->sections_nb++;
5312bd8b
AK
931}
932
058bc4b5
PB
933static void phys_section_destroy(MemoryRegion *mr)
934{
dfde4e6e
PB
935 memory_region_unref(mr);
936
058bc4b5
PB
937 if (mr->subpage) {
938 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 939 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
940 g_free(subpage);
941 }
942}
943
6092666e 944static void phys_sections_free(PhysPageMap *map)
5312bd8b 945{
9affd6fc
PB
946 while (map->sections_nb > 0) {
947 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
948 phys_section_destroy(section->mr);
949 }
9affd6fc
PB
950 g_free(map->sections);
951 g_free(map->nodes);
5312bd8b
AK
952}
953
ac1970fb 954static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
955{
956 subpage_t *subpage;
a8170e5e 957 hwaddr base = section->offset_within_address_space
0f0cb164 958 & TARGET_PAGE_MASK;
97115a8d 959 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 960 d->map.nodes, d->map.sections);
0f0cb164
AK
961 MemoryRegionSection subsection = {
962 .offset_within_address_space = base,
052e87b0 963 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 964 };
a8170e5e 965 hwaddr start, end;
0f0cb164 966
f3705d53 967 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 968
f3705d53 969 if (!(existing->mr->subpage)) {
acc9d80b 970 subpage = subpage_init(d->as, base);
3be91e86 971 subsection.address_space = d->as;
0f0cb164 972 subsection.mr = &subpage->iomem;
ac1970fb 973 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 974 phys_section_add(&d->map, &subsection));
0f0cb164 975 } else {
f3705d53 976 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
977 }
978 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 979 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
980 subpage_register(subpage, start, end,
981 phys_section_add(&d->map, section));
0f0cb164
AK
982}
983
984
052e87b0
PB
985static void register_multipage(AddressSpaceDispatch *d,
986 MemoryRegionSection *section)
33417e70 987{
a8170e5e 988 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 989 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
990 uint64_t num_pages = int128_get64(int128_rshift(section->size,
991 TARGET_PAGE_BITS));
dd81124b 992
733d5ef5
PB
993 assert(num_pages);
994 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
995}
996
ac1970fb 997static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 998{
89ae337a 999 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 1000 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 1001 MemoryRegionSection now = *section, remain = *section;
052e87b0 1002 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 1003
733d5ef5
PB
1004 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1005 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1006 - now.offset_within_address_space;
1007
052e87b0 1008 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 1009 register_subpage(d, &now);
733d5ef5 1010 } else {
052e87b0 1011 now.size = int128_zero();
733d5ef5 1012 }
052e87b0
PB
1013 while (int128_ne(remain.size, now.size)) {
1014 remain.size = int128_sub(remain.size, now.size);
1015 remain.offset_within_address_space += int128_get64(now.size);
1016 remain.offset_within_region += int128_get64(now.size);
69b67646 1017 now = remain;
052e87b0 1018 if (int128_lt(remain.size, page_size)) {
733d5ef5 1019 register_subpage(d, &now);
88266249 1020 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1021 now.size = page_size;
ac1970fb 1022 register_subpage(d, &now);
69b67646 1023 } else {
052e87b0 1024 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 1025 register_multipage(d, &now);
69b67646 1026 }
0f0cb164
AK
1027 }
1028}
1029
62a2744c
SY
1030void qemu_flush_coalesced_mmio_buffer(void)
1031{
1032 if (kvm_enabled())
1033 kvm_flush_coalesced_mmio_buffer();
1034}
1035
b2a8658e
UD
1036void qemu_mutex_lock_ramlist(void)
1037{
1038 qemu_mutex_lock(&ram_list.mutex);
1039}
1040
1041void qemu_mutex_unlock_ramlist(void)
1042{
1043 qemu_mutex_unlock(&ram_list.mutex);
1044}
1045
e1e84ba0 1046#ifdef __linux__
c902760f
MT
1047
1048#include <sys/vfs.h>
1049
1050#define HUGETLBFS_MAGIC 0x958458f6
1051
fc7a5800 1052static long gethugepagesize(const char *path, Error **errp)
c902760f
MT
1053{
1054 struct statfs fs;
1055 int ret;
1056
1057 do {
9742bf26 1058 ret = statfs(path, &fs);
c902760f
MT
1059 } while (ret != 0 && errno == EINTR);
1060
1061 if (ret != 0) {
fc7a5800
HT
1062 error_setg_errno(errp, errno, "failed to get page size of file %s",
1063 path);
9742bf26 1064 return 0;
c902760f
MT
1065 }
1066
1067 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1068 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1069
1070 return fs.f_bsize;
1071}
1072
04b16653
AW
1073static void *file_ram_alloc(RAMBlock *block,
1074 ram_addr_t memory,
7f56e740
PB
1075 const char *path,
1076 Error **errp)
c902760f
MT
1077{
1078 char *filename;
8ca761f6
PF
1079 char *sanitized_name;
1080 char *c;
557529dd 1081 void *area = NULL;
c902760f 1082 int fd;
557529dd 1083 uint64_t hpagesize;
fc7a5800 1084 Error *local_err = NULL;
c902760f 1085
fc7a5800
HT
1086 hpagesize = gethugepagesize(path, &local_err);
1087 if (local_err) {
1088 error_propagate(errp, local_err);
f9a49dfa 1089 goto error;
c902760f 1090 }
a2b257d6 1091 block->mr->align = hpagesize;
c902760f
MT
1092
1093 if (memory < hpagesize) {
557529dd
HT
1094 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1095 "or larger than huge page size 0x%" PRIx64,
1096 memory, hpagesize);
1097 goto error;
c902760f
MT
1098 }
1099
1100 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1101 error_setg(errp,
1102 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1103 goto error;
c902760f
MT
1104 }
1105
8ca761f6 1106 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
83234bf2 1107 sanitized_name = g_strdup(memory_region_name(block->mr));
8ca761f6
PF
1108 for (c = sanitized_name; *c != '\0'; c++) {
1109 if (*c == '/')
1110 *c = '_';
1111 }
1112
1113 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1114 sanitized_name);
1115 g_free(sanitized_name);
c902760f
MT
1116
1117 fd = mkstemp(filename);
1118 if (fd < 0) {
7f56e740
PB
1119 error_setg_errno(errp, errno,
1120 "unable to create backing store for hugepages");
e4ada482 1121 g_free(filename);
f9a49dfa 1122 goto error;
c902760f
MT
1123 }
1124 unlink(filename);
e4ada482 1125 g_free(filename);
c902760f
MT
1126
1127 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1128
1129 /*
1130 * ftruncate is not supported by hugetlbfs in older
1131 * hosts, so don't bother bailing out on errors.
1132 * If anything goes wrong with it under other filesystems,
1133 * mmap will fail.
1134 */
7f56e740 1135 if (ftruncate(fd, memory)) {
9742bf26 1136 perror("ftruncate");
7f56e740 1137 }
c902760f 1138
dbcb8981
PB
1139 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1140 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1141 fd, 0);
c902760f 1142 if (area == MAP_FAILED) {
7f56e740
PB
1143 error_setg_errno(errp, errno,
1144 "unable to map backing store for hugepages");
9742bf26 1145 close(fd);
f9a49dfa 1146 goto error;
c902760f 1147 }
ef36fa14
MT
1148
1149 if (mem_prealloc) {
38183310 1150 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1151 }
1152
04b16653 1153 block->fd = fd;
c902760f 1154 return area;
f9a49dfa
MT
1155
1156error:
1157 if (mem_prealloc) {
e4d9df4f 1158 error_report("%s\n", error_get_pretty(*errp));
f9a49dfa
MT
1159 exit(1);
1160 }
1161 return NULL;
c902760f
MT
1162}
1163#endif
1164
d17b5288 1165static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1166{
1167 RAMBlock *block, *next_block;
3e837b2c 1168 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1169
49cd9ac6
SH
1170 assert(size != 0); /* it would hand out same offset multiple times */
1171
a3161038 1172 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1173 return 0;
1174
a3161038 1175 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1176 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653 1177
62be4e3a 1178 end = block->offset + block->max_length;
04b16653 1179
a3161038 1180 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1181 if (next_block->offset >= end) {
1182 next = MIN(next, next_block->offset);
1183 }
1184 }
1185 if (next - end >= size && next - end < mingap) {
3e837b2c 1186 offset = end;
04b16653
AW
1187 mingap = next - end;
1188 }
1189 }
3e837b2c
AW
1190
1191 if (offset == RAM_ADDR_MAX) {
1192 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1193 (uint64_t)size);
1194 abort();
1195 }
1196
04b16653
AW
1197 return offset;
1198}
1199
652d7ec2 1200ram_addr_t last_ram_offset(void)
d17b5288
AW
1201{
1202 RAMBlock *block;
1203 ram_addr_t last = 0;
1204
a3161038 1205 QTAILQ_FOREACH(block, &ram_list.blocks, next)
62be4e3a 1206 last = MAX(last, block->offset + block->max_length);
d17b5288
AW
1207
1208 return last;
1209}
1210
ddb97f1d
JB
1211static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1212{
1213 int ret;
ddb97f1d
JB
1214
1215 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1216 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1217 "dump-guest-core", true)) {
ddb97f1d
JB
1218 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1219 if (ret) {
1220 perror("qemu_madvise");
1221 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1222 "but dump_guest_core=off specified\n");
1223 }
1224 }
1225}
1226
20cfe881 1227static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1228{
20cfe881 1229 RAMBlock *block;
84b89d78 1230
a3161038 1231 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1232 if (block->offset == addr) {
20cfe881 1233 return block;
c5705a77
AK
1234 }
1235 }
20cfe881
HT
1236
1237 return NULL;
1238}
1239
1240void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1241{
1242 RAMBlock *new_block = find_ram_block(addr);
1243 RAMBlock *block;
1244
c5705a77
AK
1245 assert(new_block);
1246 assert(!new_block->idstr[0]);
84b89d78 1247
09e5ab63
AL
1248 if (dev) {
1249 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1250 if (id) {
1251 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1252 g_free(id);
84b89d78
CM
1253 }
1254 }
1255 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1256
b2a8658e
UD
1257 /* This assumes the iothread lock is taken here too. */
1258 qemu_mutex_lock_ramlist();
a3161038 1259 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1260 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1261 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1262 new_block->idstr);
1263 abort();
1264 }
1265 }
b2a8658e 1266 qemu_mutex_unlock_ramlist();
c5705a77
AK
1267}
1268
20cfe881
HT
1269void qemu_ram_unset_idstr(ram_addr_t addr)
1270{
1271 RAMBlock *block = find_ram_block(addr);
1272
1273 if (block) {
1274 memset(block->idstr, 0, sizeof(block->idstr));
1275 }
1276}
1277
8490fc78
LC
1278static int memory_try_enable_merging(void *addr, size_t len)
1279{
2ff3de68 1280 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1281 /* disabled by the user */
1282 return 0;
1283 }
1284
1285 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1286}
1287
62be4e3a
MT
1288/* Only legal before guest might have detected the memory size: e.g. on
1289 * incoming migration, or right after reset.
1290 *
1291 * As memory core doesn't know how is memory accessed, it is up to
1292 * resize callback to update device state and/or add assertions to detect
1293 * misuse, if necessary.
1294 */
1295int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1296{
1297 RAMBlock *block = find_ram_block(base);
1298
1299 assert(block);
1300
1301 if (block->used_length == newsize) {
1302 return 0;
1303 }
1304
1305 if (!(block->flags & RAM_RESIZEABLE)) {
1306 error_setg_errno(errp, EINVAL,
1307 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1308 " in != 0x" RAM_ADDR_FMT, block->idstr,
1309 newsize, block->used_length);
1310 return -EINVAL;
1311 }
1312
1313 if (block->max_length < newsize) {
1314 error_setg_errno(errp, EINVAL,
1315 "Length too large: %s: 0x" RAM_ADDR_FMT
1316 " > 0x" RAM_ADDR_FMT, block->idstr,
1317 newsize, block->max_length);
1318 return -EINVAL;
1319 }
1320
1321 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1322 block->used_length = newsize;
1323 cpu_physical_memory_set_dirty_range(block->offset, block->used_length);
1324 memory_region_set_size(block->mr, newsize);
1325 if (block->resized) {
1326 block->resized(block->idstr, newsize, block->host);
1327 }
1328 return 0;
1329}
1330
ef701d7b 1331static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
c5705a77 1332{
e1c57ab8 1333 RAMBlock *block;
2152f5ca
JQ
1334 ram_addr_t old_ram_size, new_ram_size;
1335
1336 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1337
b2a8658e
UD
1338 /* This assumes the iothread lock is taken here too. */
1339 qemu_mutex_lock_ramlist();
9b8424d5 1340 new_block->offset = find_ram_offset(new_block->max_length);
e1c57ab8
PB
1341
1342 if (!new_block->host) {
1343 if (xen_enabled()) {
9b8424d5
MT
1344 xen_ram_alloc(new_block->offset, new_block->max_length,
1345 new_block->mr);
e1c57ab8 1346 } else {
9b8424d5 1347 new_block->host = phys_mem_alloc(new_block->max_length,
a2b257d6 1348 &new_block->mr->align);
39228250 1349 if (!new_block->host) {
ef701d7b
HT
1350 error_setg_errno(errp, errno,
1351 "cannot set up guest memory '%s'",
1352 memory_region_name(new_block->mr));
1353 qemu_mutex_unlock_ramlist();
1354 return -1;
39228250 1355 }
9b8424d5 1356 memory_try_enable_merging(new_block->host, new_block->max_length);
6977dfe6 1357 }
c902760f 1358 }
94a6b54f 1359
abb26d63
PB
1360 /* Keep the list sorted from biggest to smallest block. */
1361 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
9b8424d5 1362 if (block->max_length < new_block->max_length) {
abb26d63
PB
1363 break;
1364 }
1365 }
1366 if (block) {
1367 QTAILQ_INSERT_BEFORE(block, new_block, next);
1368 } else {
1369 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1370 }
0d6d3c87 1371 ram_list.mru_block = NULL;
94a6b54f 1372
f798b07f 1373 ram_list.version++;
b2a8658e 1374 qemu_mutex_unlock_ramlist();
f798b07f 1375
2152f5ca
JQ
1376 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1377
1378 if (new_ram_size > old_ram_size) {
1ab4c8ce
JQ
1379 int i;
1380 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1381 ram_list.dirty_memory[i] =
1382 bitmap_zero_extend(ram_list.dirty_memory[i],
1383 old_ram_size, new_ram_size);
1384 }
2152f5ca 1385 }
9b8424d5
MT
1386 cpu_physical_memory_set_dirty_range(new_block->offset,
1387 new_block->used_length);
94a6b54f 1388
a904c911
PB
1389 if (new_block->host) {
1390 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1391 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1392 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1393 if (kvm_enabled()) {
1394 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1395 }
e1c57ab8 1396 }
6f0437e8 1397
94a6b54f
PB
1398 return new_block->offset;
1399}
e9a1ab19 1400
0b183fc8 1401#ifdef __linux__
e1c57ab8 1402ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1403 bool share, const char *mem_path,
7f56e740 1404 Error **errp)
e1c57ab8
PB
1405{
1406 RAMBlock *new_block;
ef701d7b
HT
1407 ram_addr_t addr;
1408 Error *local_err = NULL;
e1c57ab8
PB
1409
1410 if (xen_enabled()) {
7f56e740
PB
1411 error_setg(errp, "-mem-path not supported with Xen");
1412 return -1;
e1c57ab8
PB
1413 }
1414
1415 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1416 /*
1417 * file_ram_alloc() needs to allocate just like
1418 * phys_mem_alloc, but we haven't bothered to provide
1419 * a hook there.
1420 */
7f56e740
PB
1421 error_setg(errp,
1422 "-mem-path not supported with this accelerator");
1423 return -1;
e1c57ab8
PB
1424 }
1425
1426 size = TARGET_PAGE_ALIGN(size);
1427 new_block = g_malloc0(sizeof(*new_block));
1428 new_block->mr = mr;
9b8424d5
MT
1429 new_block->used_length = size;
1430 new_block->max_length = size;
dbcb8981 1431 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1432 new_block->host = file_ram_alloc(new_block, size,
1433 mem_path, errp);
1434 if (!new_block->host) {
1435 g_free(new_block);
1436 return -1;
1437 }
1438
ef701d7b
HT
1439 addr = ram_block_add(new_block, &local_err);
1440 if (local_err) {
1441 g_free(new_block);
1442 error_propagate(errp, local_err);
1443 return -1;
1444 }
1445 return addr;
e1c57ab8 1446}
0b183fc8 1447#endif
e1c57ab8 1448
62be4e3a
MT
1449static
1450ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1451 void (*resized)(const char*,
1452 uint64_t length,
1453 void *host),
1454 void *host, bool resizeable,
ef701d7b 1455 MemoryRegion *mr, Error **errp)
e1c57ab8
PB
1456{
1457 RAMBlock *new_block;
ef701d7b
HT
1458 ram_addr_t addr;
1459 Error *local_err = NULL;
e1c57ab8
PB
1460
1461 size = TARGET_PAGE_ALIGN(size);
62be4e3a 1462 max_size = TARGET_PAGE_ALIGN(max_size);
e1c57ab8
PB
1463 new_block = g_malloc0(sizeof(*new_block));
1464 new_block->mr = mr;
62be4e3a 1465 new_block->resized = resized;
9b8424d5
MT
1466 new_block->used_length = size;
1467 new_block->max_length = max_size;
62be4e3a 1468 assert(max_size >= size);
e1c57ab8
PB
1469 new_block->fd = -1;
1470 new_block->host = host;
1471 if (host) {
7bd4f430 1472 new_block->flags |= RAM_PREALLOC;
e1c57ab8 1473 }
62be4e3a
MT
1474 if (resizeable) {
1475 new_block->flags |= RAM_RESIZEABLE;
1476 }
ef701d7b
HT
1477 addr = ram_block_add(new_block, &local_err);
1478 if (local_err) {
1479 g_free(new_block);
1480 error_propagate(errp, local_err);
1481 return -1;
1482 }
1483 return addr;
e1c57ab8
PB
1484}
1485
62be4e3a
MT
1486ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1487 MemoryRegion *mr, Error **errp)
1488{
1489 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1490}
1491
ef701d7b 1492ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
6977dfe6 1493{
62be4e3a
MT
1494 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1495}
1496
1497ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1498 void (*resized)(const char*,
1499 uint64_t length,
1500 void *host),
1501 MemoryRegion *mr, Error **errp)
1502{
1503 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
6977dfe6
YT
1504}
1505
1f2e98b6
AW
1506void qemu_ram_free_from_ptr(ram_addr_t addr)
1507{
1508 RAMBlock *block;
1509
b2a8658e
UD
1510 /* This assumes the iothread lock is taken here too. */
1511 qemu_mutex_lock_ramlist();
a3161038 1512 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1513 if (addr == block->offset) {
a3161038 1514 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1515 ram_list.mru_block = NULL;
f798b07f 1516 ram_list.version++;
7267c094 1517 g_free(block);
b2a8658e 1518 break;
1f2e98b6
AW
1519 }
1520 }
b2a8658e 1521 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1522}
1523
c227f099 1524void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1525{
04b16653
AW
1526 RAMBlock *block;
1527
b2a8658e
UD
1528 /* This assumes the iothread lock is taken here too. */
1529 qemu_mutex_lock_ramlist();
a3161038 1530 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1531 if (addr == block->offset) {
a3161038 1532 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1533 ram_list.mru_block = NULL;
f798b07f 1534 ram_list.version++;
7bd4f430 1535 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1536 ;
dfeaf2ab
MA
1537 } else if (xen_enabled()) {
1538 xen_invalidate_map_cache_entry(block->host);
089f3f76 1539#ifndef _WIN32
3435f395 1540 } else if (block->fd >= 0) {
9b8424d5 1541 munmap(block->host, block->max_length);
3435f395 1542 close(block->fd);
089f3f76 1543#endif
04b16653 1544 } else {
9b8424d5 1545 qemu_anon_ram_free(block->host, block->max_length);
04b16653 1546 }
7267c094 1547 g_free(block);
b2a8658e 1548 break;
04b16653
AW
1549 }
1550 }
b2a8658e 1551 qemu_mutex_unlock_ramlist();
04b16653 1552
e9a1ab19
FB
1553}
1554
cd19cfa2
HY
1555#ifndef _WIN32
1556void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1557{
1558 RAMBlock *block;
1559 ram_addr_t offset;
1560 int flags;
1561 void *area, *vaddr;
1562
a3161038 1563 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2 1564 offset = addr - block->offset;
9b8424d5 1565 if (offset < block->max_length) {
1240be24 1566 vaddr = ramblock_ptr(block, offset);
7bd4f430 1567 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1568 ;
dfeaf2ab
MA
1569 } else if (xen_enabled()) {
1570 abort();
cd19cfa2
HY
1571 } else {
1572 flags = MAP_FIXED;
1573 munmap(vaddr, length);
3435f395 1574 if (block->fd >= 0) {
dbcb8981
PB
1575 flags |= (block->flags & RAM_SHARED ?
1576 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1577 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1578 flags, block->fd, offset);
cd19cfa2 1579 } else {
2eb9fbaa
MA
1580 /*
1581 * Remap needs to match alloc. Accelerators that
1582 * set phys_mem_alloc never remap. If they did,
1583 * we'd need a remap hook here.
1584 */
1585 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1586
cd19cfa2
HY
1587 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1588 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1589 flags, -1, 0);
cd19cfa2
HY
1590 }
1591 if (area != vaddr) {
f15fbc4b
AP
1592 fprintf(stderr, "Could not remap addr: "
1593 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1594 length, addr);
1595 exit(1);
1596 }
8490fc78 1597 memory_try_enable_merging(vaddr, length);
ddb97f1d 1598 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1599 }
1600 return;
1601 }
1602 }
1603}
1604#endif /* !_WIN32 */
1605
a35ba7be
PB
1606int qemu_get_ram_fd(ram_addr_t addr)
1607{
1608 RAMBlock *block = qemu_get_ram_block(addr);
1609
1610 return block->fd;
1611}
1612
3fd74b84
DM
1613void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1614{
1615 RAMBlock *block = qemu_get_ram_block(addr);
1616
1240be24 1617 return ramblock_ptr(block, 0);
3fd74b84
DM
1618}
1619
1b5ec234
PB
1620/* Return a host pointer to ram allocated with qemu_ram_alloc.
1621 With the exception of the softmmu code in this file, this should
1622 only be used for local memory (e.g. video ram) that the device owns,
1623 and knows it isn't going to access beyond the end of the block.
1624
1625 It should not be used for general purpose DMA.
1626 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1627 */
1628void *qemu_get_ram_ptr(ram_addr_t addr)
1629{
1630 RAMBlock *block = qemu_get_ram_block(addr);
1631
0d6d3c87
PB
1632 if (xen_enabled()) {
1633 /* We need to check if the requested address is in the RAM
1634 * because we don't want to map the entire memory in QEMU.
1635 * In that case just map until the end of the page.
1636 */
1637 if (block->offset == 0) {
1638 return xen_map_cache(addr, 0, 0);
1639 } else if (block->host == NULL) {
1640 block->host =
9b8424d5 1641 xen_map_cache(block->offset, block->max_length, 1);
0d6d3c87
PB
1642 }
1643 }
1240be24 1644 return ramblock_ptr(block, addr - block->offset);
dc828ca1
PB
1645}
1646
38bee5dc
SS
1647/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1648 * but takes a size argument */
cb85f7ab 1649static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1650{
8ab934f9
SS
1651 if (*size == 0) {
1652 return NULL;
1653 }
868bb33f 1654 if (xen_enabled()) {
e41d7c69 1655 return xen_map_cache(addr, *size, 1);
868bb33f 1656 } else {
38bee5dc
SS
1657 RAMBlock *block;
1658
a3161038 1659 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
9b8424d5
MT
1660 if (addr - block->offset < block->max_length) {
1661 if (addr - block->offset + *size > block->max_length)
1662 *size = block->max_length - addr + block->offset;
1240be24 1663 return ramblock_ptr(block, addr - block->offset);
38bee5dc
SS
1664 }
1665 }
1666
1667 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1668 abort();
38bee5dc
SS
1669 }
1670}
1671
7443b437
PB
1672/* Some of the softmmu routines need to translate from a host pointer
1673 (typically a TLB entry) back to a ram offset. */
1b5ec234 1674MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1675{
94a6b54f
PB
1676 RAMBlock *block;
1677 uint8_t *host = ptr;
1678
868bb33f 1679 if (xen_enabled()) {
e41d7c69 1680 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1681 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1682 }
1683
23887b79 1684 block = ram_list.mru_block;
9b8424d5 1685 if (block && block->host && host - block->host < block->max_length) {
23887b79
PB
1686 goto found;
1687 }
1688
a3161038 1689 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1690 /* This case append when the block is not mapped. */
1691 if (block->host == NULL) {
1692 continue;
1693 }
9b8424d5 1694 if (host - block->host < block->max_length) {
23887b79 1695 goto found;
f471a17e 1696 }
94a6b54f 1697 }
432d268c 1698
1b5ec234 1699 return NULL;
23887b79
PB
1700
1701found:
1702 *ram_addr = block->offset + (host - block->host);
1b5ec234 1703 return block->mr;
e890261f 1704}
f471a17e 1705
a8170e5e 1706static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1707 uint64_t val, unsigned size)
9fa3e853 1708{
52159192 1709 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1710 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1711 }
0e0df1e2
AK
1712 switch (size) {
1713 case 1:
1714 stb_p(qemu_get_ram_ptr(ram_addr), val);
1715 break;
1716 case 2:
1717 stw_p(qemu_get_ram_ptr(ram_addr), val);
1718 break;
1719 case 4:
1720 stl_p(qemu_get_ram_ptr(ram_addr), val);
1721 break;
1722 default:
1723 abort();
3a7d929e 1724 }
6886867e 1725 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
f23db169
FB
1726 /* we remove the notdirty callback only if the code has been
1727 flushed */
a2cd8c85 1728 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1729 CPUArchState *env = current_cpu->env_ptr;
93afeade 1730 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1731 }
9fa3e853
FB
1732}
1733
b018ddf6
PB
1734static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1735 unsigned size, bool is_write)
1736{
1737 return is_write;
1738}
1739
0e0df1e2 1740static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1741 .write = notdirty_mem_write,
b018ddf6 1742 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1743 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1744};
1745
0f459d16 1746/* Generate a debug exception if a watchpoint has been hit. */
05068c0d 1747static void check_watchpoint(int offset, int len, int flags)
0f459d16 1748{
93afeade
AF
1749 CPUState *cpu = current_cpu;
1750 CPUArchState *env = cpu->env_ptr;
06d55cc1 1751 target_ulong pc, cs_base;
0f459d16 1752 target_ulong vaddr;
a1d1bb31 1753 CPUWatchpoint *wp;
06d55cc1 1754 int cpu_flags;
0f459d16 1755
ff4700b0 1756 if (cpu->watchpoint_hit) {
06d55cc1
AL
1757 /* We re-entered the check after replacing the TB. Now raise
1758 * the debug interrupt so that is will trigger after the
1759 * current instruction. */
93afeade 1760 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1761 return;
1762 }
93afeade 1763 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1764 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d
PM
1765 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1766 && (wp->flags & flags)) {
08225676
PM
1767 if (flags == BP_MEM_READ) {
1768 wp->flags |= BP_WATCHPOINT_HIT_READ;
1769 } else {
1770 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1771 }
1772 wp->hitaddr = vaddr;
ff4700b0
AF
1773 if (!cpu->watchpoint_hit) {
1774 cpu->watchpoint_hit = wp;
239c51a5 1775 tb_check_watchpoint(cpu);
6e140f28 1776 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1777 cpu->exception_index = EXCP_DEBUG;
5638d180 1778 cpu_loop_exit(cpu);
6e140f28
AL
1779 } else {
1780 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1781 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1782 cpu_resume_from_signal(cpu, NULL);
6e140f28 1783 }
06d55cc1 1784 }
6e140f28
AL
1785 } else {
1786 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1787 }
1788 }
1789}
1790
6658ffb8
PB
1791/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1792 so these check for a hit then pass through to the normal out-of-line
1793 phys routines. */
a8170e5e 1794static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1795 unsigned size)
6658ffb8 1796{
05068c0d 1797 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ);
1ec9b909 1798 switch (size) {
2c17449b 1799 case 1: return ldub_phys(&address_space_memory, addr);
41701aa4 1800 case 2: return lduw_phys(&address_space_memory, addr);
fdfba1a2 1801 case 4: return ldl_phys(&address_space_memory, addr);
1ec9b909
AK
1802 default: abort();
1803 }
6658ffb8
PB
1804}
1805
a8170e5e 1806static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1807 uint64_t val, unsigned size)
6658ffb8 1808{
05068c0d 1809 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE);
1ec9b909 1810 switch (size) {
67364150 1811 case 1:
db3be60d 1812 stb_phys(&address_space_memory, addr, val);
67364150
MF
1813 break;
1814 case 2:
5ce5944d 1815 stw_phys(&address_space_memory, addr, val);
67364150
MF
1816 break;
1817 case 4:
ab1da857 1818 stl_phys(&address_space_memory, addr, val);
67364150 1819 break;
1ec9b909
AK
1820 default: abort();
1821 }
6658ffb8
PB
1822}
1823
1ec9b909
AK
1824static const MemoryRegionOps watch_mem_ops = {
1825 .read = watch_mem_read,
1826 .write = watch_mem_write,
1827 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1828};
6658ffb8 1829
a8170e5e 1830static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1831 unsigned len)
db7b5426 1832{
acc9d80b 1833 subpage_t *subpage = opaque;
ff6cff75 1834 uint8_t buf[8];
791af8c8 1835
db7b5426 1836#if defined(DEBUG_SUBPAGE)
016e9d62 1837 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1838 subpage, len, addr);
db7b5426 1839#endif
acc9d80b
JK
1840 address_space_read(subpage->as, addr + subpage->base, buf, len);
1841 switch (len) {
1842 case 1:
1843 return ldub_p(buf);
1844 case 2:
1845 return lduw_p(buf);
1846 case 4:
1847 return ldl_p(buf);
ff6cff75
PB
1848 case 8:
1849 return ldq_p(buf);
acc9d80b
JK
1850 default:
1851 abort();
1852 }
db7b5426
BS
1853}
1854
a8170e5e 1855static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1856 uint64_t value, unsigned len)
db7b5426 1857{
acc9d80b 1858 subpage_t *subpage = opaque;
ff6cff75 1859 uint8_t buf[8];
acc9d80b 1860
db7b5426 1861#if defined(DEBUG_SUBPAGE)
016e9d62 1862 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1863 " value %"PRIx64"\n",
1864 __func__, subpage, len, addr, value);
db7b5426 1865#endif
acc9d80b
JK
1866 switch (len) {
1867 case 1:
1868 stb_p(buf, value);
1869 break;
1870 case 2:
1871 stw_p(buf, value);
1872 break;
1873 case 4:
1874 stl_p(buf, value);
1875 break;
ff6cff75
PB
1876 case 8:
1877 stq_p(buf, value);
1878 break;
acc9d80b
JK
1879 default:
1880 abort();
1881 }
1882 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1883}
1884
c353e4cc 1885static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 1886 unsigned len, bool is_write)
c353e4cc 1887{
acc9d80b 1888 subpage_t *subpage = opaque;
c353e4cc 1889#if defined(DEBUG_SUBPAGE)
016e9d62 1890 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 1891 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1892#endif
1893
acc9d80b 1894 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 1895 len, is_write);
c353e4cc
PB
1896}
1897
70c68e44
AK
1898static const MemoryRegionOps subpage_ops = {
1899 .read = subpage_read,
1900 .write = subpage_write,
ff6cff75
PB
1901 .impl.min_access_size = 1,
1902 .impl.max_access_size = 8,
1903 .valid.min_access_size = 1,
1904 .valid.max_access_size = 8,
c353e4cc 1905 .valid.accepts = subpage_accepts,
70c68e44 1906 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1907};
1908
c227f099 1909static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1910 uint16_t section)
db7b5426
BS
1911{
1912 int idx, eidx;
1913
1914 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1915 return -1;
1916 idx = SUBPAGE_IDX(start);
1917 eidx = SUBPAGE_IDX(end);
1918#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1919 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1920 __func__, mmio, start, end, idx, eidx, section);
db7b5426 1921#endif
db7b5426 1922 for (; idx <= eidx; idx++) {
5312bd8b 1923 mmio->sub_section[idx] = section;
db7b5426
BS
1924 }
1925
1926 return 0;
1927}
1928
acc9d80b 1929static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1930{
c227f099 1931 subpage_t *mmio;
db7b5426 1932
7267c094 1933 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1934
acc9d80b 1935 mmio->as = as;
1eec614b 1936 mmio->base = base;
2c9b15ca 1937 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 1938 NULL, TARGET_PAGE_SIZE);
b3b00c78 1939 mmio->iomem.subpage = true;
db7b5426 1940#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1941 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1942 mmio, base, TARGET_PAGE_SIZE);
db7b5426 1943#endif
b41aac4f 1944 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1945
1946 return mmio;
1947}
1948
a656e22f
PC
1949static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1950 MemoryRegion *mr)
5312bd8b 1951{
a656e22f 1952 assert(as);
5312bd8b 1953 MemoryRegionSection section = {
a656e22f 1954 .address_space = as,
5312bd8b
AK
1955 .mr = mr,
1956 .offset_within_address_space = 0,
1957 .offset_within_region = 0,
052e87b0 1958 .size = int128_2_64(),
5312bd8b
AK
1959 };
1960
53cb28cb 1961 return phys_section_add(map, &section);
5312bd8b
AK
1962}
1963
77717094 1964MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
aa102231 1965{
77717094 1966 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1967}
1968
e9179ce1
AK
1969static void io_mem_init(void)
1970{
1f6245e5 1971 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2c9b15ca 1972 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 1973 NULL, UINT64_MAX);
2c9b15ca 1974 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 1975 NULL, UINT64_MAX);
2c9b15ca 1976 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 1977 NULL, UINT64_MAX);
e9179ce1
AK
1978}
1979
ac1970fb 1980static void mem_begin(MemoryListener *listener)
00752703
PB
1981{
1982 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
1983 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1984 uint16_t n;
1985
a656e22f 1986 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 1987 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 1988 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 1989 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 1990 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 1991 assert(n == PHYS_SECTION_ROM);
a656e22f 1992 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 1993 assert(n == PHYS_SECTION_WATCH);
00752703 1994
9736e55b 1995 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
1996 d->as = as;
1997 as->next_dispatch = d;
1998}
1999
2000static void mem_commit(MemoryListener *listener)
ac1970fb 2001{
89ae337a 2002 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
2003 AddressSpaceDispatch *cur = as->dispatch;
2004 AddressSpaceDispatch *next = as->next_dispatch;
2005
53cb28cb 2006 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 2007
0475d94f 2008 as->dispatch = next;
b41aac4f 2009
53cb28cb
MA
2010 if (cur) {
2011 phys_sections_free(&cur->map);
2012 g_free(cur);
2013 }
9affd6fc
PB
2014}
2015
1d71148e 2016static void tcg_commit(MemoryListener *listener)
50c1e149 2017{
182735ef 2018 CPUState *cpu;
117712c3
AK
2019
2020 /* since each CPU stores ram addresses in its TLB cache, we must
2021 reset the modified entries */
2022 /* XXX: slow ! */
bdc44640 2023 CPU_FOREACH(cpu) {
33bde2e1
EI
2024 /* FIXME: Disentangle the cpu.h circular files deps so we can
2025 directly get the right CPU from listener. */
2026 if (cpu->tcg_as_listener != listener) {
2027 continue;
2028 }
00c8cb0a 2029 tlb_flush(cpu, 1);
117712c3 2030 }
50c1e149
AK
2031}
2032
93632747
AK
2033static void core_log_global_start(MemoryListener *listener)
2034{
981fdf23 2035 cpu_physical_memory_set_dirty_tracking(true);
93632747
AK
2036}
2037
2038static void core_log_global_stop(MemoryListener *listener)
2039{
981fdf23 2040 cpu_physical_memory_set_dirty_tracking(false);
93632747
AK
2041}
2042
93632747 2043static MemoryListener core_memory_listener = {
93632747
AK
2044 .log_global_start = core_log_global_start,
2045 .log_global_stop = core_log_global_stop,
ac1970fb 2046 .priority = 1,
93632747
AK
2047};
2048
ac1970fb
AK
2049void address_space_init_dispatch(AddressSpace *as)
2050{
00752703 2051 as->dispatch = NULL;
89ae337a 2052 as->dispatch_listener = (MemoryListener) {
ac1970fb 2053 .begin = mem_begin,
00752703 2054 .commit = mem_commit,
ac1970fb
AK
2055 .region_add = mem_add,
2056 .region_nop = mem_add,
2057 .priority = 0,
2058 };
89ae337a 2059 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
2060}
2061
6e48e8f9
PB
2062void address_space_unregister(AddressSpace *as)
2063{
2064 memory_listener_unregister(&as->dispatch_listener);
2065}
2066
83f3c251
AK
2067void address_space_destroy_dispatch(AddressSpace *as)
2068{
2069 AddressSpaceDispatch *d = as->dispatch;
2070
83f3c251
AK
2071 g_free(d);
2072 as->dispatch = NULL;
2073}
2074
62152b8a
AK
2075static void memory_map_init(void)
2076{
7267c094 2077 system_memory = g_malloc(sizeof(*system_memory));
03f49957 2078
57271d63 2079 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 2080 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 2081
7267c094 2082 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
2083 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2084 65536);
7dca8043 2085 address_space_init(&address_space_io, system_io, "I/O");
93632747 2086
f6790af6 2087 memory_listener_register(&core_memory_listener, &address_space_memory);
62152b8a
AK
2088}
2089
2090MemoryRegion *get_system_memory(void)
2091{
2092 return system_memory;
2093}
2094
309cb471
AK
2095MemoryRegion *get_system_io(void)
2096{
2097 return system_io;
2098}
2099
e2eef170
PB
2100#endif /* !defined(CONFIG_USER_ONLY) */
2101
13eb76e0
FB
2102/* physical memory access (slow version, mainly for debug) */
2103#if defined(CONFIG_USER_ONLY)
f17ec444 2104int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 2105 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2106{
2107 int l, flags;
2108 target_ulong page;
53a5960a 2109 void * p;
13eb76e0
FB
2110
2111 while (len > 0) {
2112 page = addr & TARGET_PAGE_MASK;
2113 l = (page + TARGET_PAGE_SIZE) - addr;
2114 if (l > len)
2115 l = len;
2116 flags = page_get_flags(page);
2117 if (!(flags & PAGE_VALID))
a68fe89c 2118 return -1;
13eb76e0
FB
2119 if (is_write) {
2120 if (!(flags & PAGE_WRITE))
a68fe89c 2121 return -1;
579a97f7 2122 /* XXX: this code should not depend on lock_user */
72fb7daa 2123 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 2124 return -1;
72fb7daa
AJ
2125 memcpy(p, buf, l);
2126 unlock_user(p, addr, l);
13eb76e0
FB
2127 } else {
2128 if (!(flags & PAGE_READ))
a68fe89c 2129 return -1;
579a97f7 2130 /* XXX: this code should not depend on lock_user */
72fb7daa 2131 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 2132 return -1;
72fb7daa 2133 memcpy(buf, p, l);
5b257578 2134 unlock_user(p, addr, 0);
13eb76e0
FB
2135 }
2136 len -= l;
2137 buf += l;
2138 addr += l;
2139 }
a68fe89c 2140 return 0;
13eb76e0 2141}
8df1cd07 2142
13eb76e0 2143#else
51d7a9eb 2144
a8170e5e
AK
2145static void invalidate_and_set_dirty(hwaddr addr,
2146 hwaddr length)
51d7a9eb 2147{
f874bf90
PM
2148 if (cpu_physical_memory_range_includes_clean(addr, length)) {
2149 tb_invalidate_phys_range(addr, addr + length, 0);
6886867e 2150 cpu_physical_memory_set_dirty_range_nocode(addr, length);
51d7a9eb 2151 }
e226939d 2152 xen_modified_memory(addr, length);
51d7a9eb
AP
2153}
2154
23326164 2155static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2156{
e1622f4b 2157 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2158
2159 /* Regions are assumed to support 1-4 byte accesses unless
2160 otherwise specified. */
23326164
RH
2161 if (access_size_max == 0) {
2162 access_size_max = 4;
2163 }
2164
2165 /* Bound the maximum access by the alignment of the address. */
2166 if (!mr->ops->impl.unaligned) {
2167 unsigned align_size_max = addr & -addr;
2168 if (align_size_max != 0 && align_size_max < access_size_max) {
2169 access_size_max = align_size_max;
2170 }
82f2563f 2171 }
23326164
RH
2172
2173 /* Don't attempt accesses larger than the maximum. */
2174 if (l > access_size_max) {
2175 l = access_size_max;
82f2563f 2176 }
098178f2
PB
2177 if (l & (l - 1)) {
2178 l = 1 << (qemu_fls(l) - 1);
2179 }
23326164
RH
2180
2181 return l;
82f2563f
PB
2182}
2183
fd8aaa76 2184bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 2185 int len, bool is_write)
13eb76e0 2186{
149f54b5 2187 hwaddr l;
13eb76e0 2188 uint8_t *ptr;
791af8c8 2189 uint64_t val;
149f54b5 2190 hwaddr addr1;
5c8a00ce 2191 MemoryRegion *mr;
fd8aaa76 2192 bool error = false;
3b46e624 2193
13eb76e0 2194 while (len > 0) {
149f54b5 2195 l = len;
5c8a00ce 2196 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2197
13eb76e0 2198 if (is_write) {
5c8a00ce
PB
2199 if (!memory_access_is_direct(mr, is_write)) {
2200 l = memory_access_size(mr, l, addr1);
4917cf44 2201 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2202 potential bugs */
23326164
RH
2203 switch (l) {
2204 case 8:
2205 /* 64 bit write access */
2206 val = ldq_p(buf);
2207 error |= io_mem_write(mr, addr1, val, 8);
2208 break;
2209 case 4:
1c213d19 2210 /* 32 bit write access */
c27004ec 2211 val = ldl_p(buf);
5c8a00ce 2212 error |= io_mem_write(mr, addr1, val, 4);
23326164
RH
2213 break;
2214 case 2:
1c213d19 2215 /* 16 bit write access */
c27004ec 2216 val = lduw_p(buf);
5c8a00ce 2217 error |= io_mem_write(mr, addr1, val, 2);
23326164
RH
2218 break;
2219 case 1:
1c213d19 2220 /* 8 bit write access */
c27004ec 2221 val = ldub_p(buf);
5c8a00ce 2222 error |= io_mem_write(mr, addr1, val, 1);
23326164
RH
2223 break;
2224 default:
2225 abort();
13eb76e0 2226 }
2bbfa05d 2227 } else {
5c8a00ce 2228 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2229 /* RAM case */
5579c7f3 2230 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2231 memcpy(ptr, buf, l);
51d7a9eb 2232 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
2233 }
2234 } else {
5c8a00ce 2235 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2236 /* I/O case */
5c8a00ce 2237 l = memory_access_size(mr, l, addr1);
23326164
RH
2238 switch (l) {
2239 case 8:
2240 /* 64 bit read access */
2241 error |= io_mem_read(mr, addr1, &val, 8);
2242 stq_p(buf, val);
2243 break;
2244 case 4:
13eb76e0 2245 /* 32 bit read access */
5c8a00ce 2246 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 2247 stl_p(buf, val);
23326164
RH
2248 break;
2249 case 2:
13eb76e0 2250 /* 16 bit read access */
5c8a00ce 2251 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 2252 stw_p(buf, val);
23326164
RH
2253 break;
2254 case 1:
1c213d19 2255 /* 8 bit read access */
5c8a00ce 2256 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 2257 stb_p(buf, val);
23326164
RH
2258 break;
2259 default:
2260 abort();
13eb76e0
FB
2261 }
2262 } else {
2263 /* RAM case */
5c8a00ce 2264 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2265 memcpy(buf, ptr, l);
13eb76e0
FB
2266 }
2267 }
2268 len -= l;
2269 buf += l;
2270 addr += l;
2271 }
fd8aaa76
PB
2272
2273 return error;
13eb76e0 2274}
8df1cd07 2275
fd8aaa76 2276bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
2277 const uint8_t *buf, int len)
2278{
fd8aaa76 2279 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
2280}
2281
fd8aaa76 2282bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 2283{
fd8aaa76 2284 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
2285}
2286
2287
a8170e5e 2288void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2289 int len, int is_write)
2290{
fd8aaa76 2291 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2292}
2293
582b55a9
AG
2294enum write_rom_type {
2295 WRITE_DATA,
2296 FLUSH_CACHE,
2297};
2298
2a221651 2299static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2300 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2301{
149f54b5 2302 hwaddr l;
d0ecd2aa 2303 uint8_t *ptr;
149f54b5 2304 hwaddr addr1;
5c8a00ce 2305 MemoryRegion *mr;
3b46e624 2306
d0ecd2aa 2307 while (len > 0) {
149f54b5 2308 l = len;
2a221651 2309 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2310
5c8a00ce
PB
2311 if (!(memory_region_is_ram(mr) ||
2312 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2313 /* do nothing */
2314 } else {
5c8a00ce 2315 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2316 /* ROM/RAM case */
5579c7f3 2317 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2318 switch (type) {
2319 case WRITE_DATA:
2320 memcpy(ptr, buf, l);
2321 invalidate_and_set_dirty(addr1, l);
2322 break;
2323 case FLUSH_CACHE:
2324 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2325 break;
2326 }
d0ecd2aa
FB
2327 }
2328 len -= l;
2329 buf += l;
2330 addr += l;
2331 }
2332}
2333
582b55a9 2334/* used for ROM loading : can write in RAM and ROM */
2a221651 2335void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2336 const uint8_t *buf, int len)
2337{
2a221651 2338 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2339}
2340
2341void cpu_flush_icache_range(hwaddr start, int len)
2342{
2343 /*
2344 * This function should do the same thing as an icache flush that was
2345 * triggered from within the guest. For TCG we are always cache coherent,
2346 * so there is no need to flush anything. For KVM / Xen we need to flush
2347 * the host's instruction cache at least.
2348 */
2349 if (tcg_enabled()) {
2350 return;
2351 }
2352
2a221651
EI
2353 cpu_physical_memory_write_rom_internal(&address_space_memory,
2354 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2355}
2356
6d16c2f8 2357typedef struct {
d3e71559 2358 MemoryRegion *mr;
6d16c2f8 2359 void *buffer;
a8170e5e
AK
2360 hwaddr addr;
2361 hwaddr len;
6d16c2f8
AL
2362} BounceBuffer;
2363
2364static BounceBuffer bounce;
2365
ba223c29
AL
2366typedef struct MapClient {
2367 void *opaque;
2368 void (*callback)(void *opaque);
72cf2d4f 2369 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2370} MapClient;
2371
72cf2d4f
BS
2372static QLIST_HEAD(map_client_list, MapClient) map_client_list
2373 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2374
2375void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2376{
7267c094 2377 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2378
2379 client->opaque = opaque;
2380 client->callback = callback;
72cf2d4f 2381 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2382 return client;
2383}
2384
8b9c99d9 2385static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2386{
2387 MapClient *client = (MapClient *)_client;
2388
72cf2d4f 2389 QLIST_REMOVE(client, link);
7267c094 2390 g_free(client);
ba223c29
AL
2391}
2392
2393static void cpu_notify_map_clients(void)
2394{
2395 MapClient *client;
2396
72cf2d4f
BS
2397 while (!QLIST_EMPTY(&map_client_list)) {
2398 client = QLIST_FIRST(&map_client_list);
ba223c29 2399 client->callback(client->opaque);
34d5e948 2400 cpu_unregister_map_client(client);
ba223c29
AL
2401 }
2402}
2403
51644ab7
PB
2404bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2405{
5c8a00ce 2406 MemoryRegion *mr;
51644ab7
PB
2407 hwaddr l, xlat;
2408
2409 while (len > 0) {
2410 l = len;
5c8a00ce
PB
2411 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2412 if (!memory_access_is_direct(mr, is_write)) {
2413 l = memory_access_size(mr, l, addr);
2414 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2415 return false;
2416 }
2417 }
2418
2419 len -= l;
2420 addr += l;
2421 }
2422 return true;
2423}
2424
6d16c2f8
AL
2425/* Map a physical memory region into a host virtual address.
2426 * May map a subset of the requested range, given by and returned in *plen.
2427 * May return NULL if resources needed to perform the mapping are exhausted.
2428 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2429 * Use cpu_register_map_client() to know when retrying the map operation is
2430 * likely to succeed.
6d16c2f8 2431 */
ac1970fb 2432void *address_space_map(AddressSpace *as,
a8170e5e
AK
2433 hwaddr addr,
2434 hwaddr *plen,
ac1970fb 2435 bool is_write)
6d16c2f8 2436{
a8170e5e 2437 hwaddr len = *plen;
e3127ae0
PB
2438 hwaddr done = 0;
2439 hwaddr l, xlat, base;
2440 MemoryRegion *mr, *this_mr;
2441 ram_addr_t raddr;
6d16c2f8 2442
e3127ae0
PB
2443 if (len == 0) {
2444 return NULL;
2445 }
38bee5dc 2446
e3127ae0
PB
2447 l = len;
2448 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2449 if (!memory_access_is_direct(mr, is_write)) {
2450 if (bounce.buffer) {
2451 return NULL;
6d16c2f8 2452 }
e85d9db5
KW
2453 /* Avoid unbounded allocations */
2454 l = MIN(l, TARGET_PAGE_SIZE);
2455 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2456 bounce.addr = addr;
2457 bounce.len = l;
d3e71559
PB
2458
2459 memory_region_ref(mr);
2460 bounce.mr = mr;
e3127ae0
PB
2461 if (!is_write) {
2462 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2463 }
6d16c2f8 2464
e3127ae0
PB
2465 *plen = l;
2466 return bounce.buffer;
2467 }
2468
2469 base = xlat;
2470 raddr = memory_region_get_ram_addr(mr);
2471
2472 for (;;) {
6d16c2f8
AL
2473 len -= l;
2474 addr += l;
e3127ae0
PB
2475 done += l;
2476 if (len == 0) {
2477 break;
2478 }
2479
2480 l = len;
2481 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2482 if (this_mr != mr || xlat != base + done) {
2483 break;
2484 }
6d16c2f8 2485 }
e3127ae0 2486
d3e71559 2487 memory_region_ref(mr);
e3127ae0
PB
2488 *plen = done;
2489 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2490}
2491
ac1970fb 2492/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2493 * Will also mark the memory as dirty if is_write == 1. access_len gives
2494 * the amount of memory that was actually read or written by the caller.
2495 */
a8170e5e
AK
2496void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2497 int is_write, hwaddr access_len)
6d16c2f8
AL
2498{
2499 if (buffer != bounce.buffer) {
d3e71559
PB
2500 MemoryRegion *mr;
2501 ram_addr_t addr1;
2502
2503 mr = qemu_ram_addr_from_host(buffer, &addr1);
2504 assert(mr != NULL);
6d16c2f8 2505 if (is_write) {
6886867e 2506 invalidate_and_set_dirty(addr1, access_len);
6d16c2f8 2507 }
868bb33f 2508 if (xen_enabled()) {
e41d7c69 2509 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2510 }
d3e71559 2511 memory_region_unref(mr);
6d16c2f8
AL
2512 return;
2513 }
2514 if (is_write) {
ac1970fb 2515 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2516 }
f8a83245 2517 qemu_vfree(bounce.buffer);
6d16c2f8 2518 bounce.buffer = NULL;
d3e71559 2519 memory_region_unref(bounce.mr);
ba223c29 2520 cpu_notify_map_clients();
6d16c2f8 2521}
d0ecd2aa 2522
a8170e5e
AK
2523void *cpu_physical_memory_map(hwaddr addr,
2524 hwaddr *plen,
ac1970fb
AK
2525 int is_write)
2526{
2527 return address_space_map(&address_space_memory, addr, plen, is_write);
2528}
2529
a8170e5e
AK
2530void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2531 int is_write, hwaddr access_len)
ac1970fb
AK
2532{
2533 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2534}
2535
8df1cd07 2536/* warning: addr must be aligned */
fdfba1a2 2537static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2538 enum device_endian endian)
8df1cd07 2539{
8df1cd07 2540 uint8_t *ptr;
791af8c8 2541 uint64_t val;
5c8a00ce 2542 MemoryRegion *mr;
149f54b5
PB
2543 hwaddr l = 4;
2544 hwaddr addr1;
8df1cd07 2545
fdfba1a2 2546 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2547 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2548 /* I/O case */
5c8a00ce 2549 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2550#if defined(TARGET_WORDS_BIGENDIAN)
2551 if (endian == DEVICE_LITTLE_ENDIAN) {
2552 val = bswap32(val);
2553 }
2554#else
2555 if (endian == DEVICE_BIG_ENDIAN) {
2556 val = bswap32(val);
2557 }
2558#endif
8df1cd07
FB
2559 } else {
2560 /* RAM case */
5c8a00ce 2561 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2562 & TARGET_PAGE_MASK)
149f54b5 2563 + addr1);
1e78bcc1
AG
2564 switch (endian) {
2565 case DEVICE_LITTLE_ENDIAN:
2566 val = ldl_le_p(ptr);
2567 break;
2568 case DEVICE_BIG_ENDIAN:
2569 val = ldl_be_p(ptr);
2570 break;
2571 default:
2572 val = ldl_p(ptr);
2573 break;
2574 }
8df1cd07
FB
2575 }
2576 return val;
2577}
2578
fdfba1a2 2579uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2580{
fdfba1a2 2581 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2582}
2583
fdfba1a2 2584uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2585{
fdfba1a2 2586 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2587}
2588
fdfba1a2 2589uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2590{
fdfba1a2 2591 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2592}
2593
84b7b8e7 2594/* warning: addr must be aligned */
2c17449b 2595static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2596 enum device_endian endian)
84b7b8e7 2597{
84b7b8e7
FB
2598 uint8_t *ptr;
2599 uint64_t val;
5c8a00ce 2600 MemoryRegion *mr;
149f54b5
PB
2601 hwaddr l = 8;
2602 hwaddr addr1;
84b7b8e7 2603
2c17449b 2604 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2605 false);
2606 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2607 /* I/O case */
5c8a00ce 2608 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2609#if defined(TARGET_WORDS_BIGENDIAN)
2610 if (endian == DEVICE_LITTLE_ENDIAN) {
2611 val = bswap64(val);
2612 }
2613#else
2614 if (endian == DEVICE_BIG_ENDIAN) {
2615 val = bswap64(val);
2616 }
84b7b8e7
FB
2617#endif
2618 } else {
2619 /* RAM case */
5c8a00ce 2620 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2621 & TARGET_PAGE_MASK)
149f54b5 2622 + addr1);
1e78bcc1
AG
2623 switch (endian) {
2624 case DEVICE_LITTLE_ENDIAN:
2625 val = ldq_le_p(ptr);
2626 break;
2627 case DEVICE_BIG_ENDIAN:
2628 val = ldq_be_p(ptr);
2629 break;
2630 default:
2631 val = ldq_p(ptr);
2632 break;
2633 }
84b7b8e7
FB
2634 }
2635 return val;
2636}
2637
2c17449b 2638uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2639{
2c17449b 2640 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2641}
2642
2c17449b 2643uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2644{
2c17449b 2645 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2646}
2647
2c17449b 2648uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2649{
2c17449b 2650 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2651}
2652
aab33094 2653/* XXX: optimize */
2c17449b 2654uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
aab33094
FB
2655{
2656 uint8_t val;
2c17449b 2657 address_space_rw(as, addr, &val, 1, 0);
aab33094
FB
2658 return val;
2659}
2660
733f0b02 2661/* warning: addr must be aligned */
41701aa4 2662static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2663 enum device_endian endian)
aab33094 2664{
733f0b02
MT
2665 uint8_t *ptr;
2666 uint64_t val;
5c8a00ce 2667 MemoryRegion *mr;
149f54b5
PB
2668 hwaddr l = 2;
2669 hwaddr addr1;
733f0b02 2670
41701aa4 2671 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2672 false);
2673 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2674 /* I/O case */
5c8a00ce 2675 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2676#if defined(TARGET_WORDS_BIGENDIAN)
2677 if (endian == DEVICE_LITTLE_ENDIAN) {
2678 val = bswap16(val);
2679 }
2680#else
2681 if (endian == DEVICE_BIG_ENDIAN) {
2682 val = bswap16(val);
2683 }
2684#endif
733f0b02
MT
2685 } else {
2686 /* RAM case */
5c8a00ce 2687 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2688 & TARGET_PAGE_MASK)
149f54b5 2689 + addr1);
1e78bcc1
AG
2690 switch (endian) {
2691 case DEVICE_LITTLE_ENDIAN:
2692 val = lduw_le_p(ptr);
2693 break;
2694 case DEVICE_BIG_ENDIAN:
2695 val = lduw_be_p(ptr);
2696 break;
2697 default:
2698 val = lduw_p(ptr);
2699 break;
2700 }
733f0b02
MT
2701 }
2702 return val;
aab33094
FB
2703}
2704
41701aa4 2705uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2706{
41701aa4 2707 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2708}
2709
41701aa4 2710uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2711{
41701aa4 2712 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2713}
2714
41701aa4 2715uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2716{
41701aa4 2717 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2718}
2719
8df1cd07
FB
2720/* warning: addr must be aligned. The ram page is not masked as dirty
2721 and the code inside is not invalidated. It is useful if the dirty
2722 bits are used to track modified PTEs */
2198a121 2723void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
8df1cd07 2724{
8df1cd07 2725 uint8_t *ptr;
5c8a00ce 2726 MemoryRegion *mr;
149f54b5
PB
2727 hwaddr l = 4;
2728 hwaddr addr1;
8df1cd07 2729
2198a121 2730 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2731 true);
2732 if (l < 4 || !memory_access_is_direct(mr, true)) {
2733 io_mem_write(mr, addr1, val, 4);
8df1cd07 2734 } else {
5c8a00ce 2735 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2736 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2737 stl_p(ptr, val);
74576198
AL
2738
2739 if (unlikely(in_migration)) {
a2cd8c85 2740 if (cpu_physical_memory_is_clean(addr1)) {
74576198
AL
2741 /* invalidate code */
2742 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2743 /* set dirty bit */
6886867e 2744 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
74576198
AL
2745 }
2746 }
8df1cd07
FB
2747 }
2748}
2749
2750/* warning: addr must be aligned */
ab1da857
EI
2751static inline void stl_phys_internal(AddressSpace *as,
2752 hwaddr addr, uint32_t val,
1e78bcc1 2753 enum device_endian endian)
8df1cd07 2754{
8df1cd07 2755 uint8_t *ptr;
5c8a00ce 2756 MemoryRegion *mr;
149f54b5
PB
2757 hwaddr l = 4;
2758 hwaddr addr1;
8df1cd07 2759
ab1da857 2760 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2761 true);
2762 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2763#if defined(TARGET_WORDS_BIGENDIAN)
2764 if (endian == DEVICE_LITTLE_ENDIAN) {
2765 val = bswap32(val);
2766 }
2767#else
2768 if (endian == DEVICE_BIG_ENDIAN) {
2769 val = bswap32(val);
2770 }
2771#endif
5c8a00ce 2772 io_mem_write(mr, addr1, val, 4);
8df1cd07 2773 } else {
8df1cd07 2774 /* RAM case */
5c8a00ce 2775 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2776 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2777 switch (endian) {
2778 case DEVICE_LITTLE_ENDIAN:
2779 stl_le_p(ptr, val);
2780 break;
2781 case DEVICE_BIG_ENDIAN:
2782 stl_be_p(ptr, val);
2783 break;
2784 default:
2785 stl_p(ptr, val);
2786 break;
2787 }
51d7a9eb 2788 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2789 }
2790}
2791
ab1da857 2792void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2793{
ab1da857 2794 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2795}
2796
ab1da857 2797void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2798{
ab1da857 2799 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2800}
2801
ab1da857 2802void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2803{
ab1da857 2804 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2805}
2806
aab33094 2807/* XXX: optimize */
db3be60d 2808void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
aab33094
FB
2809{
2810 uint8_t v = val;
db3be60d 2811 address_space_rw(as, addr, &v, 1, 1);
aab33094
FB
2812}
2813
733f0b02 2814/* warning: addr must be aligned */
5ce5944d
EI
2815static inline void stw_phys_internal(AddressSpace *as,
2816 hwaddr addr, uint32_t val,
1e78bcc1 2817 enum device_endian endian)
aab33094 2818{
733f0b02 2819 uint8_t *ptr;
5c8a00ce 2820 MemoryRegion *mr;
149f54b5
PB
2821 hwaddr l = 2;
2822 hwaddr addr1;
733f0b02 2823
5ce5944d 2824 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 2825 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2826#if defined(TARGET_WORDS_BIGENDIAN)
2827 if (endian == DEVICE_LITTLE_ENDIAN) {
2828 val = bswap16(val);
2829 }
2830#else
2831 if (endian == DEVICE_BIG_ENDIAN) {
2832 val = bswap16(val);
2833 }
2834#endif
5c8a00ce 2835 io_mem_write(mr, addr1, val, 2);
733f0b02 2836 } else {
733f0b02 2837 /* RAM case */
5c8a00ce 2838 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2839 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2840 switch (endian) {
2841 case DEVICE_LITTLE_ENDIAN:
2842 stw_le_p(ptr, val);
2843 break;
2844 case DEVICE_BIG_ENDIAN:
2845 stw_be_p(ptr, val);
2846 break;
2847 default:
2848 stw_p(ptr, val);
2849 break;
2850 }
51d7a9eb 2851 invalidate_and_set_dirty(addr1, 2);
733f0b02 2852 }
aab33094
FB
2853}
2854
5ce5944d 2855void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2856{
5ce5944d 2857 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2858}
2859
5ce5944d 2860void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2861{
5ce5944d 2862 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2863}
2864
5ce5944d 2865void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2866{
5ce5944d 2867 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2868}
2869
aab33094 2870/* XXX: optimize */
f606604f 2871void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
aab33094
FB
2872{
2873 val = tswap64(val);
f606604f 2874 address_space_rw(as, addr, (void *) &val, 8, 1);
aab33094
FB
2875}
2876
f606604f 2877void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2878{
2879 val = cpu_to_le64(val);
f606604f 2880 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2881}
2882
f606604f 2883void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2884{
2885 val = cpu_to_be64(val);
f606604f 2886 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2887}
2888
5e2972fd 2889/* virtual memory access for debug (includes writing to ROM) */
f17ec444 2890int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 2891 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2892{
2893 int l;
a8170e5e 2894 hwaddr phys_addr;
9b3c35e0 2895 target_ulong page;
13eb76e0
FB
2896
2897 while (len > 0) {
2898 page = addr & TARGET_PAGE_MASK;
f17ec444 2899 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
2900 /* if no physical page mapped, return an error */
2901 if (phys_addr == -1)
2902 return -1;
2903 l = (page + TARGET_PAGE_SIZE) - addr;
2904 if (l > len)
2905 l = len;
5e2972fd 2906 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
2907 if (is_write) {
2908 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2909 } else {
2910 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2911 }
13eb76e0
FB
2912 len -= l;
2913 buf += l;
2914 addr += l;
2915 }
2916 return 0;
2917}
a68fe89c 2918#endif
13eb76e0 2919
8e4a424b
BS
2920/*
2921 * A helper function for the _utterly broken_ virtio device model to find out if
2922 * it's running on a big endian machine. Don't do this at home kids!
2923 */
98ed8ecf
GK
2924bool target_words_bigendian(void);
2925bool target_words_bigendian(void)
8e4a424b
BS
2926{
2927#if defined(TARGET_WORDS_BIGENDIAN)
2928 return true;
2929#else
2930 return false;
2931#endif
2932}
2933
76f35538 2934#ifndef CONFIG_USER_ONLY
a8170e5e 2935bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2936{
5c8a00ce 2937 MemoryRegion*mr;
149f54b5 2938 hwaddr l = 1;
76f35538 2939
5c8a00ce
PB
2940 mr = address_space_translate(&address_space_memory,
2941 phys_addr, &phys_addr, &l, false);
76f35538 2942
5c8a00ce
PB
2943 return !(memory_region_is_ram(mr) ||
2944 memory_region_is_romd(mr));
76f35538 2945}
bd2fa51f
MH
2946
2947void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2948{
2949 RAMBlock *block;
2950
2951 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
9b8424d5 2952 func(block->host, block->offset, block->used_length, opaque);
bd2fa51f
MH
2953 }
2954}
ec3f8c99 2955#endif