]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
vmstate-static-checker: update whitelist
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
cc9e98cb 29#include "hw/qdev.h"
1de7afc9 30#include "qemu/osdep.h"
9c17d615 31#include "sysemu/kvm.h"
2ff3de68 32#include "sysemu/sysemu.h"
0d09e41a 33#include "hw/xen/xen.h"
1de7afc9
PB
34#include "qemu/timer.h"
35#include "qemu/config-file.h"
75a34036 36#include "qemu/error-report.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
220c3ebd 52#include "exec/ram_addr.h"
67d95c15 53
b35ba30f
MT
54#include "qemu/range.h"
55
db7b5426 56//#define DEBUG_SUBPAGE
1196be37 57
e2eef170 58#if !defined(CONFIG_USER_ONLY)
981fdf23 59static bool in_migration;
94a6b54f 60
a3161038 61RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
62
63static MemoryRegion *system_memory;
309cb471 64static MemoryRegion *system_io;
62152b8a 65
f6790af6
AK
66AddressSpace address_space_io;
67AddressSpace address_space_memory;
2673a5da 68
0844e007 69MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 70static MemoryRegion io_mem_unassigned;
0e0df1e2 71
7bd4f430
PB
72/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73#define RAM_PREALLOC (1 << 0)
74
dbcb8981
PB
75/* RAM is mmap-ed with MAP_SHARED */
76#define RAM_SHARED (1 << 1)
77
62be4e3a
MT
78/* Only a portion of RAM (used_length) is actually used, and migrated.
79 * This used_length size can change across reboots.
80 */
81#define RAM_RESIZEABLE (1 << 2)
82
e2eef170 83#endif
9fa3e853 84
bdc44640 85struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
86/* current CPU in the current thread. It is only valid inside
87 cpu_exec() */
4917cf44 88DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 89/* 0 = Do not count executed instructions.
bf20dc07 90 1 = Precise instruction counting.
2e70f6ef 91 2 = Adaptive rate instruction counting. */
5708fc66 92int use_icount;
6a00d601 93
e2eef170 94#if !defined(CONFIG_USER_ONLY)
4346ae3e 95
1db8abb1
PB
96typedef struct PhysPageEntry PhysPageEntry;
97
98struct PhysPageEntry {
9736e55b 99 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 100 uint32_t skip : 6;
9736e55b 101 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 102 uint32_t ptr : 26;
1db8abb1
PB
103};
104
8b795765
MT
105#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
106
03f49957 107/* Size of the L2 (and L3, etc) page tables. */
57271d63 108#define ADDR_SPACE_BITS 64
03f49957 109
026736ce 110#define P_L2_BITS 9
03f49957
PB
111#define P_L2_SIZE (1 << P_L2_BITS)
112
113#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
114
115typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 116
53cb28cb
MA
117typedef struct PhysPageMap {
118 unsigned sections_nb;
119 unsigned sections_nb_alloc;
120 unsigned nodes_nb;
121 unsigned nodes_nb_alloc;
122 Node *nodes;
123 MemoryRegionSection *sections;
124} PhysPageMap;
125
1db8abb1
PB
126struct AddressSpaceDispatch {
127 /* This is a multi-level map on the physical address space.
128 * The bottom level has pointers to MemoryRegionSections.
129 */
130 PhysPageEntry phys_map;
53cb28cb 131 PhysPageMap map;
acc9d80b 132 AddressSpace *as;
1db8abb1
PB
133};
134
90260c6c
JK
135#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
136typedef struct subpage_t {
137 MemoryRegion iomem;
acc9d80b 138 AddressSpace *as;
90260c6c
JK
139 hwaddr base;
140 uint16_t sub_section[TARGET_PAGE_SIZE];
141} subpage_t;
142
b41aac4f
LPF
143#define PHYS_SECTION_UNASSIGNED 0
144#define PHYS_SECTION_NOTDIRTY 1
145#define PHYS_SECTION_ROM 2
146#define PHYS_SECTION_WATCH 3
5312bd8b 147
e2eef170 148static void io_mem_init(void);
62152b8a 149static void memory_map_init(void);
09daed84 150static void tcg_commit(MemoryListener *listener);
e2eef170 151
1ec9b909 152static MemoryRegion io_mem_watch;
6658ffb8 153#endif
fd6ce8f6 154
6d9a1304 155#if !defined(CONFIG_USER_ONLY)
d6f2ea22 156
53cb28cb 157static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 158{
53cb28cb
MA
159 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
160 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
161 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
162 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 163 }
f7bf5461
AK
164}
165
53cb28cb 166static uint32_t phys_map_node_alloc(PhysPageMap *map)
f7bf5461
AK
167{
168 unsigned i;
8b795765 169 uint32_t ret;
f7bf5461 170
53cb28cb 171 ret = map->nodes_nb++;
f7bf5461 172 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 173 assert(ret != map->nodes_nb_alloc);
03f49957 174 for (i = 0; i < P_L2_SIZE; ++i) {
53cb28cb
MA
175 map->nodes[ret][i].skip = 1;
176 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 177 }
f7bf5461 178 return ret;
d6f2ea22
AK
179}
180
53cb28cb
MA
181static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
182 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 183 int level)
f7bf5461
AK
184{
185 PhysPageEntry *p;
186 int i;
03f49957 187 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 188
9736e55b 189 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
53cb28cb
MA
190 lp->ptr = phys_map_node_alloc(map);
191 p = map->nodes[lp->ptr];
f7bf5461 192 if (level == 0) {
03f49957 193 for (i = 0; i < P_L2_SIZE; i++) {
9736e55b 194 p[i].skip = 0;
b41aac4f 195 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 196 }
67c4d23c 197 }
f7bf5461 198 } else {
53cb28cb 199 p = map->nodes[lp->ptr];
92e873b9 200 }
03f49957 201 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 202
03f49957 203 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 204 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 205 lp->skip = 0;
c19e8800 206 lp->ptr = leaf;
07f07b31
AK
207 *index += step;
208 *nb -= step;
2999097b 209 } else {
53cb28cb 210 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
211 }
212 ++lp;
f7bf5461
AK
213 }
214}
215
ac1970fb 216static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 217 hwaddr index, hwaddr nb,
2999097b 218 uint16_t leaf)
f7bf5461 219{
2999097b 220 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 221 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 222
53cb28cb 223 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
224}
225
b35ba30f
MT
226/* Compact a non leaf page entry. Simply detect that the entry has a single child,
227 * and update our entry so we can skip it and go directly to the destination.
228 */
229static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
230{
231 unsigned valid_ptr = P_L2_SIZE;
232 int valid = 0;
233 PhysPageEntry *p;
234 int i;
235
236 if (lp->ptr == PHYS_MAP_NODE_NIL) {
237 return;
238 }
239
240 p = nodes[lp->ptr];
241 for (i = 0; i < P_L2_SIZE; i++) {
242 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
243 continue;
244 }
245
246 valid_ptr = i;
247 valid++;
248 if (p[i].skip) {
249 phys_page_compact(&p[i], nodes, compacted);
250 }
251 }
252
253 /* We can only compress if there's only one child. */
254 if (valid != 1) {
255 return;
256 }
257
258 assert(valid_ptr < P_L2_SIZE);
259
260 /* Don't compress if it won't fit in the # of bits we have. */
261 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
262 return;
263 }
264
265 lp->ptr = p[valid_ptr].ptr;
266 if (!p[valid_ptr].skip) {
267 /* If our only child is a leaf, make this a leaf. */
268 /* By design, we should have made this node a leaf to begin with so we
269 * should never reach here.
270 * But since it's so simple to handle this, let's do it just in case we
271 * change this rule.
272 */
273 lp->skip = 0;
274 } else {
275 lp->skip += p[valid_ptr].skip;
276 }
277}
278
279static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
280{
281 DECLARE_BITMAP(compacted, nodes_nb);
282
283 if (d->phys_map.skip) {
53cb28cb 284 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
285 }
286}
287
97115a8d 288static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 289 Node *nodes, MemoryRegionSection *sections)
92e873b9 290{
31ab2b4a 291 PhysPageEntry *p;
97115a8d 292 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 293 int i;
f1f6e3b8 294
9736e55b 295 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 296 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 297 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 298 }
9affd6fc 299 p = nodes[lp.ptr];
03f49957 300 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 301 }
b35ba30f
MT
302
303 if (sections[lp.ptr].size.hi ||
304 range_covers_byte(sections[lp.ptr].offset_within_address_space,
305 sections[lp.ptr].size.lo, addr)) {
306 return &sections[lp.ptr];
307 } else {
308 return &sections[PHYS_SECTION_UNASSIGNED];
309 }
f3705d53
AK
310}
311
e5548617
BS
312bool memory_region_is_unassigned(MemoryRegion *mr)
313{
2a8e7499 314 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 315 && mr != &io_mem_watch;
fd6ce8f6 316}
149f54b5 317
c7086b4a 318static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
319 hwaddr addr,
320 bool resolve_subpage)
9f029603 321{
90260c6c
JK
322 MemoryRegionSection *section;
323 subpage_t *subpage;
324
53cb28cb 325 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
326 if (resolve_subpage && section->mr->subpage) {
327 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 328 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
329 }
330 return section;
9f029603
JK
331}
332
90260c6c 333static MemoryRegionSection *
c7086b4a 334address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 335 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
336{
337 MemoryRegionSection *section;
a87f3954 338 Int128 diff;
149f54b5 339
c7086b4a 340 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
341 /* Compute offset within MemoryRegionSection */
342 addr -= section->offset_within_address_space;
343
344 /* Compute offset within MemoryRegion */
345 *xlat = addr + section->offset_within_region;
346
347 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 348 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
349 return section;
350}
90260c6c 351
a87f3954
PB
352static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
353{
354 if (memory_region_is_ram(mr)) {
355 return !(is_write && mr->readonly);
356 }
357 if (memory_region_is_romd(mr)) {
358 return !is_write;
359 }
360
361 return false;
362}
363
5c8a00ce
PB
364MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
365 hwaddr *xlat, hwaddr *plen,
366 bool is_write)
90260c6c 367{
30951157
AK
368 IOMMUTLBEntry iotlb;
369 MemoryRegionSection *section;
370 MemoryRegion *mr;
371 hwaddr len = *plen;
372
373 for (;;) {
a87f3954 374 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
375 mr = section->mr;
376
377 if (!mr->iommu_ops) {
378 break;
379 }
380
8d7b8cb9 381 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
30951157
AK
382 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
383 | (addr & iotlb.addr_mask));
384 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
385 if (!(iotlb.perm & (1 << is_write))) {
386 mr = &io_mem_unassigned;
387 break;
388 }
389
390 as = iotlb.target_as;
391 }
392
fe680d0d 393 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954
PB
394 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
395 len = MIN(page, len);
396 }
397
30951157
AK
398 *plen = len;
399 *xlat = addr;
400 return mr;
90260c6c
JK
401}
402
403MemoryRegionSection *
404address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
405 hwaddr *plen)
406{
30951157 407 MemoryRegionSection *section;
c7086b4a 408 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
409
410 assert(!section->mr->iommu_ops);
411 return section;
90260c6c 412}
5b6dd868 413#endif
fd6ce8f6 414
5b6dd868 415void cpu_exec_init_all(void)
fdbb84d1 416{
5b6dd868 417#if !defined(CONFIG_USER_ONLY)
b2a8658e 418 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
419 memory_map_init();
420 io_mem_init();
fdbb84d1 421#endif
5b6dd868 422}
fdbb84d1 423
b170fce3 424#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
425
426static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 427{
259186a7 428 CPUState *cpu = opaque;
a513fe19 429
5b6dd868
BS
430 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
431 version_id is increased. */
259186a7 432 cpu->interrupt_request &= ~0x01;
c01a71c1 433 tlb_flush(cpu, 1);
5b6dd868
BS
434
435 return 0;
a513fe19 436}
7501267e 437
6c3bff0e
PD
438static int cpu_common_pre_load(void *opaque)
439{
440 CPUState *cpu = opaque;
441
adee6424 442 cpu->exception_index = -1;
6c3bff0e
PD
443
444 return 0;
445}
446
447static bool cpu_common_exception_index_needed(void *opaque)
448{
449 CPUState *cpu = opaque;
450
adee6424 451 return tcg_enabled() && cpu->exception_index != -1;
6c3bff0e
PD
452}
453
454static const VMStateDescription vmstate_cpu_common_exception_index = {
455 .name = "cpu_common/exception_index",
456 .version_id = 1,
457 .minimum_version_id = 1,
458 .fields = (VMStateField[]) {
459 VMSTATE_INT32(exception_index, CPUState),
460 VMSTATE_END_OF_LIST()
461 }
462};
463
1a1562f5 464const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
465 .name = "cpu_common",
466 .version_id = 1,
467 .minimum_version_id = 1,
6c3bff0e 468 .pre_load = cpu_common_pre_load,
5b6dd868 469 .post_load = cpu_common_post_load,
35d08458 470 .fields = (VMStateField[]) {
259186a7
AF
471 VMSTATE_UINT32(halted, CPUState),
472 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 473 VMSTATE_END_OF_LIST()
6c3bff0e
PD
474 },
475 .subsections = (VMStateSubsection[]) {
476 {
477 .vmsd = &vmstate_cpu_common_exception_index,
478 .needed = cpu_common_exception_index_needed,
479 } , {
480 /* empty */
481 }
5b6dd868
BS
482 }
483};
1a1562f5 484
5b6dd868 485#endif
ea041c0e 486
38d8f5c8 487CPUState *qemu_get_cpu(int index)
ea041c0e 488{
bdc44640 489 CPUState *cpu;
ea041c0e 490
bdc44640 491 CPU_FOREACH(cpu) {
55e5c285 492 if (cpu->cpu_index == index) {
bdc44640 493 return cpu;
55e5c285 494 }
ea041c0e 495 }
5b6dd868 496
bdc44640 497 return NULL;
ea041c0e
FB
498}
499
09daed84
EI
500#if !defined(CONFIG_USER_ONLY)
501void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
502{
503 /* We only support one address space per cpu at the moment. */
504 assert(cpu->as == as);
505
506 if (cpu->tcg_as_listener) {
507 memory_listener_unregister(cpu->tcg_as_listener);
508 } else {
509 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
510 }
511 cpu->tcg_as_listener->commit = tcg_commit;
512 memory_listener_register(cpu->tcg_as_listener, as);
513}
514#endif
515
5b6dd868 516void cpu_exec_init(CPUArchState *env)
ea041c0e 517{
5b6dd868 518 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 519 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 520 CPUState *some_cpu;
5b6dd868
BS
521 int cpu_index;
522
523#if defined(CONFIG_USER_ONLY)
524 cpu_list_lock();
525#endif
5b6dd868 526 cpu_index = 0;
bdc44640 527 CPU_FOREACH(some_cpu) {
5b6dd868
BS
528 cpu_index++;
529 }
55e5c285 530 cpu->cpu_index = cpu_index;
1b1ed8dc 531 cpu->numa_node = 0;
f0c3c505 532 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 533 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 534#ifndef CONFIG_USER_ONLY
09daed84 535 cpu->as = &address_space_memory;
5b6dd868
BS
536 cpu->thread_id = qemu_get_thread_id();
537#endif
bdc44640 538 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
539#if defined(CONFIG_USER_ONLY)
540 cpu_list_unlock();
541#endif
e0d47944
AF
542 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
543 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
544 }
5b6dd868 545#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
546 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
547 cpu_save, cpu_load, env);
b170fce3 548 assert(cc->vmsd == NULL);
e0d47944 549 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 550#endif
b170fce3
AF
551 if (cc->vmsd != NULL) {
552 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
553 }
ea041c0e
FB
554}
555
94df27fd 556#if defined(CONFIG_USER_ONLY)
00b941e5 557static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
558{
559 tb_invalidate_phys_page_range(pc, pc + 1, 0);
560}
561#else
00b941e5 562static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 563{
e8262a1b
MF
564 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
565 if (phys != -1) {
09daed84 566 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 567 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 568 }
1e7855a5 569}
c27004ec 570#endif
d720b93d 571
c527ee8f 572#if defined(CONFIG_USER_ONLY)
75a34036 573void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
574
575{
576}
577
3ee887e8
PM
578int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
579 int flags)
580{
581 return -ENOSYS;
582}
583
584void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
585{
586}
587
75a34036 588int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
589 int flags, CPUWatchpoint **watchpoint)
590{
591 return -ENOSYS;
592}
593#else
6658ffb8 594/* Add a watchpoint. */
75a34036 595int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 596 int flags, CPUWatchpoint **watchpoint)
6658ffb8 597{
c0ce998e 598 CPUWatchpoint *wp;
6658ffb8 599
05068c0d 600 /* forbid ranges which are empty or run off the end of the address space */
07e2863d 601 if (len == 0 || (addr + len - 1) < addr) {
75a34036
AF
602 error_report("tried to set invalid watchpoint at %"
603 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
604 return -EINVAL;
605 }
7267c094 606 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
607
608 wp->vaddr = addr;
05068c0d 609 wp->len = len;
a1d1bb31
AL
610 wp->flags = flags;
611
2dc9f411 612 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
613 if (flags & BP_GDB) {
614 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
615 } else {
616 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
617 }
6658ffb8 618
31b030d4 619 tlb_flush_page(cpu, addr);
a1d1bb31
AL
620
621 if (watchpoint)
622 *watchpoint = wp;
623 return 0;
6658ffb8
PB
624}
625
a1d1bb31 626/* Remove a specific watchpoint. */
75a34036 627int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 628 int flags)
6658ffb8 629{
a1d1bb31 630 CPUWatchpoint *wp;
6658ffb8 631
ff4700b0 632 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 633 if (addr == wp->vaddr && len == wp->len
6e140f28 634 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 635 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
636 return 0;
637 }
638 }
a1d1bb31 639 return -ENOENT;
6658ffb8
PB
640}
641
a1d1bb31 642/* Remove a specific watchpoint by reference. */
75a34036 643void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 644{
ff4700b0 645 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 646
31b030d4 647 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 648
7267c094 649 g_free(watchpoint);
a1d1bb31
AL
650}
651
652/* Remove all matching watchpoints. */
75a34036 653void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 654{
c0ce998e 655 CPUWatchpoint *wp, *next;
a1d1bb31 656
ff4700b0 657 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
658 if (wp->flags & mask) {
659 cpu_watchpoint_remove_by_ref(cpu, wp);
660 }
c0ce998e 661 }
7d03f82f 662}
05068c0d
PM
663
664/* Return true if this watchpoint address matches the specified
665 * access (ie the address range covered by the watchpoint overlaps
666 * partially or completely with the address range covered by the
667 * access).
668 */
669static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
670 vaddr addr,
671 vaddr len)
672{
673 /* We know the lengths are non-zero, but a little caution is
674 * required to avoid errors in the case where the range ends
675 * exactly at the top of the address space and so addr + len
676 * wraps round to zero.
677 */
678 vaddr wpend = wp->vaddr + wp->len - 1;
679 vaddr addrend = addr + len - 1;
680
681 return !(addr > wpend || wp->vaddr > addrend);
682}
683
c527ee8f 684#endif
7d03f82f 685
a1d1bb31 686/* Add a breakpoint. */
b3310ab3 687int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 688 CPUBreakpoint **breakpoint)
4c3a88a2 689{
c0ce998e 690 CPUBreakpoint *bp;
3b46e624 691
7267c094 692 bp = g_malloc(sizeof(*bp));
4c3a88a2 693
a1d1bb31
AL
694 bp->pc = pc;
695 bp->flags = flags;
696
2dc9f411 697 /* keep all GDB-injected breakpoints in front */
00b941e5 698 if (flags & BP_GDB) {
f0c3c505 699 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 700 } else {
f0c3c505 701 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 702 }
3b46e624 703
f0c3c505 704 breakpoint_invalidate(cpu, pc);
a1d1bb31 705
00b941e5 706 if (breakpoint) {
a1d1bb31 707 *breakpoint = bp;
00b941e5 708 }
4c3a88a2 709 return 0;
4c3a88a2
FB
710}
711
a1d1bb31 712/* Remove a specific breakpoint. */
b3310ab3 713int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 714{
a1d1bb31
AL
715 CPUBreakpoint *bp;
716
f0c3c505 717 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 718 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 719 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
720 return 0;
721 }
7d03f82f 722 }
a1d1bb31 723 return -ENOENT;
7d03f82f
EI
724}
725
a1d1bb31 726/* Remove a specific breakpoint by reference. */
b3310ab3 727void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 728{
f0c3c505
AF
729 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
730
731 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 732
7267c094 733 g_free(breakpoint);
a1d1bb31
AL
734}
735
736/* Remove all matching breakpoints. */
b3310ab3 737void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 738{
c0ce998e 739 CPUBreakpoint *bp, *next;
a1d1bb31 740
f0c3c505 741 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
742 if (bp->flags & mask) {
743 cpu_breakpoint_remove_by_ref(cpu, bp);
744 }
c0ce998e 745 }
4c3a88a2
FB
746}
747
c33a346e
FB
748/* enable or disable single step mode. EXCP_DEBUG is returned by the
749 CPU loop after each instruction */
3825b28f 750void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 751{
ed2803da
AF
752 if (cpu->singlestep_enabled != enabled) {
753 cpu->singlestep_enabled = enabled;
754 if (kvm_enabled()) {
38e478ec 755 kvm_update_guest_debug(cpu, 0);
ed2803da 756 } else {
ccbb4d44 757 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 758 /* XXX: only flush what is necessary */
38e478ec 759 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
760 tb_flush(env);
761 }
c33a346e 762 }
c33a346e
FB
763}
764
a47dddd7 765void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
766{
767 va_list ap;
493ae1f0 768 va_list ap2;
7501267e
FB
769
770 va_start(ap, fmt);
493ae1f0 771 va_copy(ap2, ap);
7501267e
FB
772 fprintf(stderr, "qemu: fatal: ");
773 vfprintf(stderr, fmt, ap);
774 fprintf(stderr, "\n");
878096ee 775 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
776 if (qemu_log_enabled()) {
777 qemu_log("qemu: fatal: ");
778 qemu_log_vprintf(fmt, ap2);
779 qemu_log("\n");
a0762859 780 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 781 qemu_log_flush();
93fcfe39 782 qemu_log_close();
924edcae 783 }
493ae1f0 784 va_end(ap2);
f9373291 785 va_end(ap);
fd052bf6
RV
786#if defined(CONFIG_USER_ONLY)
787 {
788 struct sigaction act;
789 sigfillset(&act.sa_mask);
790 act.sa_handler = SIG_DFL;
791 sigaction(SIGABRT, &act, NULL);
792 }
793#endif
7501267e
FB
794 abort();
795}
796
0124311e 797#if !defined(CONFIG_USER_ONLY)
041603fe
PB
798static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
799{
800 RAMBlock *block;
801
802 /* The list is protected by the iothread lock here. */
803 block = ram_list.mru_block;
9b8424d5 804 if (block && addr - block->offset < block->max_length) {
041603fe
PB
805 goto found;
806 }
807 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
9b8424d5 808 if (addr - block->offset < block->max_length) {
041603fe
PB
809 goto found;
810 }
811 }
812
813 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
814 abort();
815
816found:
817 ram_list.mru_block = block;
818 return block;
819}
820
a2f4d5be 821static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 822{
041603fe 823 ram_addr_t start1;
a2f4d5be
JQ
824 RAMBlock *block;
825 ram_addr_t end;
826
827 end = TARGET_PAGE_ALIGN(start + length);
828 start &= TARGET_PAGE_MASK;
d24981d3 829
041603fe
PB
830 block = qemu_get_ram_block(start);
831 assert(block == qemu_get_ram_block(end - 1));
1240be24 832 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
041603fe 833 cpu_tlb_reset_dirty_all(start1, length);
d24981d3
JQ
834}
835
5579c7f3 836/* Note: start and end must be within the same ram block. */
a2f4d5be 837void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
52159192 838 unsigned client)
1ccde1cb 839{
1ccde1cb
FB
840 if (length == 0)
841 return;
c8d6f66a 842 cpu_physical_memory_clear_dirty_range_type(start, length, client);
f23db169 843
d24981d3 844 if (tcg_enabled()) {
a2f4d5be 845 tlb_reset_dirty_range_all(start, length);
5579c7f3 846 }
1ccde1cb
FB
847}
848
981fdf23 849static void cpu_physical_memory_set_dirty_tracking(bool enable)
74576198
AL
850{
851 in_migration = enable;
74576198
AL
852}
853
bb0e627a 854hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
855 MemoryRegionSection *section,
856 target_ulong vaddr,
857 hwaddr paddr, hwaddr xlat,
858 int prot,
859 target_ulong *address)
e5548617 860{
a8170e5e 861 hwaddr iotlb;
e5548617
BS
862 CPUWatchpoint *wp;
863
cc5bea60 864 if (memory_region_is_ram(section->mr)) {
e5548617
BS
865 /* Normal RAM. */
866 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 867 + xlat;
e5548617 868 if (!section->readonly) {
b41aac4f 869 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 870 } else {
b41aac4f 871 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
872 }
873 } else {
1b3fb98f 874 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 875 iotlb += xlat;
e5548617
BS
876 }
877
878 /* Make accesses to pages with watchpoints go via the
879 watchpoint trap routines. */
ff4700b0 880 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 881 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
e5548617
BS
882 /* Avoid trapping reads of pages with a write breakpoint. */
883 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 884 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
885 *address |= TLB_MMIO;
886 break;
887 }
888 }
889 }
890
891 return iotlb;
892}
9fa3e853
FB
893#endif /* defined(CONFIG_USER_ONLY) */
894
e2eef170 895#if !defined(CONFIG_USER_ONLY)
8da3ff18 896
c227f099 897static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 898 uint16_t section);
acc9d80b 899static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 900
a2b257d6
IM
901static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
902 qemu_anon_ram_alloc;
91138037
MA
903
904/*
905 * Set a custom physical guest memory alloator.
906 * Accelerators with unusual needs may need this. Hopefully, we can
907 * get rid of it eventually.
908 */
a2b257d6 909void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
91138037
MA
910{
911 phys_mem_alloc = alloc;
912}
913
53cb28cb
MA
914static uint16_t phys_section_add(PhysPageMap *map,
915 MemoryRegionSection *section)
5312bd8b 916{
68f3f65b
PB
917 /* The physical section number is ORed with a page-aligned
918 * pointer to produce the iotlb entries. Thus it should
919 * never overflow into the page-aligned value.
920 */
53cb28cb 921 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 922
53cb28cb
MA
923 if (map->sections_nb == map->sections_nb_alloc) {
924 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
925 map->sections = g_renew(MemoryRegionSection, map->sections,
926 map->sections_nb_alloc);
5312bd8b 927 }
53cb28cb 928 map->sections[map->sections_nb] = *section;
dfde4e6e 929 memory_region_ref(section->mr);
53cb28cb 930 return map->sections_nb++;
5312bd8b
AK
931}
932
058bc4b5
PB
933static void phys_section_destroy(MemoryRegion *mr)
934{
dfde4e6e
PB
935 memory_region_unref(mr);
936
058bc4b5
PB
937 if (mr->subpage) {
938 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 939 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
940 g_free(subpage);
941 }
942}
943
6092666e 944static void phys_sections_free(PhysPageMap *map)
5312bd8b 945{
9affd6fc
PB
946 while (map->sections_nb > 0) {
947 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
948 phys_section_destroy(section->mr);
949 }
9affd6fc
PB
950 g_free(map->sections);
951 g_free(map->nodes);
5312bd8b
AK
952}
953
ac1970fb 954static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
955{
956 subpage_t *subpage;
a8170e5e 957 hwaddr base = section->offset_within_address_space
0f0cb164 958 & TARGET_PAGE_MASK;
97115a8d 959 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 960 d->map.nodes, d->map.sections);
0f0cb164
AK
961 MemoryRegionSection subsection = {
962 .offset_within_address_space = base,
052e87b0 963 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 964 };
a8170e5e 965 hwaddr start, end;
0f0cb164 966
f3705d53 967 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 968
f3705d53 969 if (!(existing->mr->subpage)) {
acc9d80b 970 subpage = subpage_init(d->as, base);
3be91e86 971 subsection.address_space = d->as;
0f0cb164 972 subsection.mr = &subpage->iomem;
ac1970fb 973 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 974 phys_section_add(&d->map, &subsection));
0f0cb164 975 } else {
f3705d53 976 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
977 }
978 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 979 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
980 subpage_register(subpage, start, end,
981 phys_section_add(&d->map, section));
0f0cb164
AK
982}
983
984
052e87b0
PB
985static void register_multipage(AddressSpaceDispatch *d,
986 MemoryRegionSection *section)
33417e70 987{
a8170e5e 988 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 989 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
990 uint64_t num_pages = int128_get64(int128_rshift(section->size,
991 TARGET_PAGE_BITS));
dd81124b 992
733d5ef5
PB
993 assert(num_pages);
994 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
995}
996
ac1970fb 997static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 998{
89ae337a 999 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 1000 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 1001 MemoryRegionSection now = *section, remain = *section;
052e87b0 1002 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 1003
733d5ef5
PB
1004 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1005 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1006 - now.offset_within_address_space;
1007
052e87b0 1008 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 1009 register_subpage(d, &now);
733d5ef5 1010 } else {
052e87b0 1011 now.size = int128_zero();
733d5ef5 1012 }
052e87b0
PB
1013 while (int128_ne(remain.size, now.size)) {
1014 remain.size = int128_sub(remain.size, now.size);
1015 remain.offset_within_address_space += int128_get64(now.size);
1016 remain.offset_within_region += int128_get64(now.size);
69b67646 1017 now = remain;
052e87b0 1018 if (int128_lt(remain.size, page_size)) {
733d5ef5 1019 register_subpage(d, &now);
88266249 1020 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1021 now.size = page_size;
ac1970fb 1022 register_subpage(d, &now);
69b67646 1023 } else {
052e87b0 1024 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 1025 register_multipage(d, &now);
69b67646 1026 }
0f0cb164
AK
1027 }
1028}
1029
62a2744c
SY
1030void qemu_flush_coalesced_mmio_buffer(void)
1031{
1032 if (kvm_enabled())
1033 kvm_flush_coalesced_mmio_buffer();
1034}
1035
b2a8658e
UD
1036void qemu_mutex_lock_ramlist(void)
1037{
1038 qemu_mutex_lock(&ram_list.mutex);
1039}
1040
1041void qemu_mutex_unlock_ramlist(void)
1042{
1043 qemu_mutex_unlock(&ram_list.mutex);
1044}
1045
e1e84ba0 1046#ifdef __linux__
c902760f
MT
1047
1048#include <sys/vfs.h>
1049
1050#define HUGETLBFS_MAGIC 0x958458f6
1051
fc7a5800 1052static long gethugepagesize(const char *path, Error **errp)
c902760f
MT
1053{
1054 struct statfs fs;
1055 int ret;
1056
1057 do {
9742bf26 1058 ret = statfs(path, &fs);
c902760f
MT
1059 } while (ret != 0 && errno == EINTR);
1060
1061 if (ret != 0) {
fc7a5800
HT
1062 error_setg_errno(errp, errno, "failed to get page size of file %s",
1063 path);
9742bf26 1064 return 0;
c902760f
MT
1065 }
1066
1067 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1068 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1069
1070 return fs.f_bsize;
1071}
1072
04b16653
AW
1073static void *file_ram_alloc(RAMBlock *block,
1074 ram_addr_t memory,
7f56e740
PB
1075 const char *path,
1076 Error **errp)
c902760f
MT
1077{
1078 char *filename;
8ca761f6
PF
1079 char *sanitized_name;
1080 char *c;
557529dd 1081 void *area = NULL;
c902760f 1082 int fd;
557529dd 1083 uint64_t hpagesize;
fc7a5800 1084 Error *local_err = NULL;
c902760f 1085
fc7a5800
HT
1086 hpagesize = gethugepagesize(path, &local_err);
1087 if (local_err) {
1088 error_propagate(errp, local_err);
f9a49dfa 1089 goto error;
c902760f 1090 }
a2b257d6 1091 block->mr->align = hpagesize;
c902760f
MT
1092
1093 if (memory < hpagesize) {
557529dd
HT
1094 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1095 "or larger than huge page size 0x%" PRIx64,
1096 memory, hpagesize);
1097 goto error;
c902760f
MT
1098 }
1099
1100 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1101 error_setg(errp,
1102 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1103 goto error;
c902760f
MT
1104 }
1105
8ca761f6 1106 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
83234bf2 1107 sanitized_name = g_strdup(memory_region_name(block->mr));
8ca761f6
PF
1108 for (c = sanitized_name; *c != '\0'; c++) {
1109 if (*c == '/')
1110 *c = '_';
1111 }
1112
1113 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1114 sanitized_name);
1115 g_free(sanitized_name);
c902760f
MT
1116
1117 fd = mkstemp(filename);
1118 if (fd < 0) {
7f56e740
PB
1119 error_setg_errno(errp, errno,
1120 "unable to create backing store for hugepages");
e4ada482 1121 g_free(filename);
f9a49dfa 1122 goto error;
c902760f
MT
1123 }
1124 unlink(filename);
e4ada482 1125 g_free(filename);
c902760f
MT
1126
1127 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1128
1129 /*
1130 * ftruncate is not supported by hugetlbfs in older
1131 * hosts, so don't bother bailing out on errors.
1132 * If anything goes wrong with it under other filesystems,
1133 * mmap will fail.
1134 */
7f56e740 1135 if (ftruncate(fd, memory)) {
9742bf26 1136 perror("ftruncate");
7f56e740 1137 }
c902760f 1138
dbcb8981
PB
1139 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1140 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1141 fd, 0);
c902760f 1142 if (area == MAP_FAILED) {
7f56e740
PB
1143 error_setg_errno(errp, errno,
1144 "unable to map backing store for hugepages");
9742bf26 1145 close(fd);
f9a49dfa 1146 goto error;
c902760f 1147 }
ef36fa14
MT
1148
1149 if (mem_prealloc) {
38183310 1150 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1151 }
1152
04b16653 1153 block->fd = fd;
c902760f 1154 return area;
f9a49dfa
MT
1155
1156error:
1157 if (mem_prealloc) {
e4d9df4f 1158 error_report("%s\n", error_get_pretty(*errp));
f9a49dfa
MT
1159 exit(1);
1160 }
1161 return NULL;
c902760f
MT
1162}
1163#endif
1164
d17b5288 1165static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1166{
1167 RAMBlock *block, *next_block;
3e837b2c 1168 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1169
49cd9ac6
SH
1170 assert(size != 0); /* it would hand out same offset multiple times */
1171
a3161038 1172 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1173 return 0;
1174
a3161038 1175 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1176 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653 1177
62be4e3a 1178 end = block->offset + block->max_length;
04b16653 1179
a3161038 1180 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1181 if (next_block->offset >= end) {
1182 next = MIN(next, next_block->offset);
1183 }
1184 }
1185 if (next - end >= size && next - end < mingap) {
3e837b2c 1186 offset = end;
04b16653
AW
1187 mingap = next - end;
1188 }
1189 }
3e837b2c
AW
1190
1191 if (offset == RAM_ADDR_MAX) {
1192 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1193 (uint64_t)size);
1194 abort();
1195 }
1196
04b16653
AW
1197 return offset;
1198}
1199
652d7ec2 1200ram_addr_t last_ram_offset(void)
d17b5288
AW
1201{
1202 RAMBlock *block;
1203 ram_addr_t last = 0;
1204
a3161038 1205 QTAILQ_FOREACH(block, &ram_list.blocks, next)
62be4e3a 1206 last = MAX(last, block->offset + block->max_length);
d17b5288
AW
1207
1208 return last;
1209}
1210
ddb97f1d
JB
1211static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1212{
1213 int ret;
ddb97f1d
JB
1214
1215 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1216 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1217 "dump-guest-core", true)) {
ddb97f1d
JB
1218 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1219 if (ret) {
1220 perror("qemu_madvise");
1221 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1222 "but dump_guest_core=off specified\n");
1223 }
1224 }
1225}
1226
20cfe881 1227static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1228{
20cfe881 1229 RAMBlock *block;
84b89d78 1230
a3161038 1231 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1232 if (block->offset == addr) {
20cfe881 1233 return block;
c5705a77
AK
1234 }
1235 }
20cfe881
HT
1236
1237 return NULL;
1238}
1239
1240void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1241{
1242 RAMBlock *new_block = find_ram_block(addr);
1243 RAMBlock *block;
1244
c5705a77
AK
1245 assert(new_block);
1246 assert(!new_block->idstr[0]);
84b89d78 1247
09e5ab63
AL
1248 if (dev) {
1249 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1250 if (id) {
1251 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1252 g_free(id);
84b89d78
CM
1253 }
1254 }
1255 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1256
b2a8658e
UD
1257 /* This assumes the iothread lock is taken here too. */
1258 qemu_mutex_lock_ramlist();
a3161038 1259 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1260 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1261 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1262 new_block->idstr);
1263 abort();
1264 }
1265 }
b2a8658e 1266 qemu_mutex_unlock_ramlist();
c5705a77
AK
1267}
1268
20cfe881
HT
1269void qemu_ram_unset_idstr(ram_addr_t addr)
1270{
1271 RAMBlock *block = find_ram_block(addr);
1272
1273 if (block) {
1274 memset(block->idstr, 0, sizeof(block->idstr));
1275 }
1276}
1277
8490fc78
LC
1278static int memory_try_enable_merging(void *addr, size_t len)
1279{
2ff3de68 1280 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1281 /* disabled by the user */
1282 return 0;
1283 }
1284
1285 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1286}
1287
62be4e3a
MT
1288/* Only legal before guest might have detected the memory size: e.g. on
1289 * incoming migration, or right after reset.
1290 *
1291 * As memory core doesn't know how is memory accessed, it is up to
1292 * resize callback to update device state and/or add assertions to detect
1293 * misuse, if necessary.
1294 */
1295int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1296{
1297 RAMBlock *block = find_ram_block(base);
1298
1299 assert(block);
1300
1301 if (block->used_length == newsize) {
1302 return 0;
1303 }
1304
1305 if (!(block->flags & RAM_RESIZEABLE)) {
1306 error_setg_errno(errp, EINVAL,
1307 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1308 " in != 0x" RAM_ADDR_FMT, block->idstr,
1309 newsize, block->used_length);
1310 return -EINVAL;
1311 }
1312
1313 if (block->max_length < newsize) {
1314 error_setg_errno(errp, EINVAL,
1315 "Length too large: %s: 0x" RAM_ADDR_FMT
1316 " > 0x" RAM_ADDR_FMT, block->idstr,
1317 newsize, block->max_length);
1318 return -EINVAL;
1319 }
1320
1321 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1322 block->used_length = newsize;
1323 cpu_physical_memory_set_dirty_range(block->offset, block->used_length);
1324 memory_region_set_size(block->mr, newsize);
1325 if (block->resized) {
1326 block->resized(block->idstr, newsize, block->host);
1327 }
1328 return 0;
1329}
1330
ef701d7b 1331static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
c5705a77 1332{
e1c57ab8 1333 RAMBlock *block;
2152f5ca
JQ
1334 ram_addr_t old_ram_size, new_ram_size;
1335
1336 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1337
b2a8658e
UD
1338 /* This assumes the iothread lock is taken here too. */
1339 qemu_mutex_lock_ramlist();
9b8424d5 1340 new_block->offset = find_ram_offset(new_block->max_length);
e1c57ab8
PB
1341
1342 if (!new_block->host) {
1343 if (xen_enabled()) {
9b8424d5
MT
1344 xen_ram_alloc(new_block->offset, new_block->max_length,
1345 new_block->mr);
e1c57ab8 1346 } else {
9b8424d5 1347 new_block->host = phys_mem_alloc(new_block->max_length,
a2b257d6 1348 &new_block->mr->align);
39228250 1349 if (!new_block->host) {
ef701d7b
HT
1350 error_setg_errno(errp, errno,
1351 "cannot set up guest memory '%s'",
1352 memory_region_name(new_block->mr));
1353 qemu_mutex_unlock_ramlist();
1354 return -1;
39228250 1355 }
9b8424d5 1356 memory_try_enable_merging(new_block->host, new_block->max_length);
6977dfe6 1357 }
c902760f 1358 }
94a6b54f 1359
abb26d63
PB
1360 /* Keep the list sorted from biggest to smallest block. */
1361 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
9b8424d5 1362 if (block->max_length < new_block->max_length) {
abb26d63
PB
1363 break;
1364 }
1365 }
1366 if (block) {
1367 QTAILQ_INSERT_BEFORE(block, new_block, next);
1368 } else {
1369 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1370 }
0d6d3c87 1371 ram_list.mru_block = NULL;
94a6b54f 1372
f798b07f 1373 ram_list.version++;
b2a8658e 1374 qemu_mutex_unlock_ramlist();
f798b07f 1375
2152f5ca
JQ
1376 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1377
1378 if (new_ram_size > old_ram_size) {
1ab4c8ce
JQ
1379 int i;
1380 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1381 ram_list.dirty_memory[i] =
1382 bitmap_zero_extend(ram_list.dirty_memory[i],
1383 old_ram_size, new_ram_size);
1384 }
2152f5ca 1385 }
9b8424d5
MT
1386 cpu_physical_memory_set_dirty_range(new_block->offset,
1387 new_block->used_length);
94a6b54f 1388
a904c911
PB
1389 if (new_block->host) {
1390 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1391 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1392 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1393 if (kvm_enabled()) {
1394 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1395 }
e1c57ab8 1396 }
6f0437e8 1397
94a6b54f
PB
1398 return new_block->offset;
1399}
e9a1ab19 1400
0b183fc8 1401#ifdef __linux__
e1c57ab8 1402ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1403 bool share, const char *mem_path,
7f56e740 1404 Error **errp)
e1c57ab8
PB
1405{
1406 RAMBlock *new_block;
ef701d7b
HT
1407 ram_addr_t addr;
1408 Error *local_err = NULL;
e1c57ab8
PB
1409
1410 if (xen_enabled()) {
7f56e740
PB
1411 error_setg(errp, "-mem-path not supported with Xen");
1412 return -1;
e1c57ab8
PB
1413 }
1414
1415 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1416 /*
1417 * file_ram_alloc() needs to allocate just like
1418 * phys_mem_alloc, but we haven't bothered to provide
1419 * a hook there.
1420 */
7f56e740
PB
1421 error_setg(errp,
1422 "-mem-path not supported with this accelerator");
1423 return -1;
e1c57ab8
PB
1424 }
1425
1426 size = TARGET_PAGE_ALIGN(size);
1427 new_block = g_malloc0(sizeof(*new_block));
1428 new_block->mr = mr;
9b8424d5
MT
1429 new_block->used_length = size;
1430 new_block->max_length = size;
dbcb8981 1431 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1432 new_block->host = file_ram_alloc(new_block, size,
1433 mem_path, errp);
1434 if (!new_block->host) {
1435 g_free(new_block);
1436 return -1;
1437 }
1438
ef701d7b
HT
1439 addr = ram_block_add(new_block, &local_err);
1440 if (local_err) {
1441 g_free(new_block);
1442 error_propagate(errp, local_err);
1443 return -1;
1444 }
1445 return addr;
e1c57ab8 1446}
0b183fc8 1447#endif
e1c57ab8 1448
62be4e3a
MT
1449static
1450ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1451 void (*resized)(const char*,
1452 uint64_t length,
1453 void *host),
1454 void *host, bool resizeable,
ef701d7b 1455 MemoryRegion *mr, Error **errp)
e1c57ab8
PB
1456{
1457 RAMBlock *new_block;
ef701d7b
HT
1458 ram_addr_t addr;
1459 Error *local_err = NULL;
e1c57ab8
PB
1460
1461 size = TARGET_PAGE_ALIGN(size);
62be4e3a 1462 max_size = TARGET_PAGE_ALIGN(max_size);
e1c57ab8
PB
1463 new_block = g_malloc0(sizeof(*new_block));
1464 new_block->mr = mr;
62be4e3a 1465 new_block->resized = resized;
9b8424d5
MT
1466 new_block->used_length = size;
1467 new_block->max_length = max_size;
62be4e3a 1468 assert(max_size >= size);
e1c57ab8
PB
1469 new_block->fd = -1;
1470 new_block->host = host;
1471 if (host) {
7bd4f430 1472 new_block->flags |= RAM_PREALLOC;
e1c57ab8 1473 }
62be4e3a
MT
1474 if (resizeable) {
1475 new_block->flags |= RAM_RESIZEABLE;
1476 }
ef701d7b
HT
1477 addr = ram_block_add(new_block, &local_err);
1478 if (local_err) {
1479 g_free(new_block);
1480 error_propagate(errp, local_err);
1481 return -1;
1482 }
1483 return addr;
e1c57ab8
PB
1484}
1485
62be4e3a
MT
1486ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1487 MemoryRegion *mr, Error **errp)
1488{
1489 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1490}
1491
ef701d7b 1492ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
6977dfe6 1493{
62be4e3a
MT
1494 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1495}
1496
1497ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1498 void (*resized)(const char*,
1499 uint64_t length,
1500 void *host),
1501 MemoryRegion *mr, Error **errp)
1502{
1503 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
6977dfe6
YT
1504}
1505
1f2e98b6
AW
1506void qemu_ram_free_from_ptr(ram_addr_t addr)
1507{
1508 RAMBlock *block;
1509
b2a8658e
UD
1510 /* This assumes the iothread lock is taken here too. */
1511 qemu_mutex_lock_ramlist();
a3161038 1512 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1513 if (addr == block->offset) {
a3161038 1514 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1515 ram_list.mru_block = NULL;
f798b07f 1516 ram_list.version++;
7267c094 1517 g_free(block);
b2a8658e 1518 break;
1f2e98b6
AW
1519 }
1520 }
b2a8658e 1521 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1522}
1523
c227f099 1524void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1525{
04b16653
AW
1526 RAMBlock *block;
1527
b2a8658e
UD
1528 /* This assumes the iothread lock is taken here too. */
1529 qemu_mutex_lock_ramlist();
a3161038 1530 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1531 if (addr == block->offset) {
a3161038 1532 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1533 ram_list.mru_block = NULL;
f798b07f 1534 ram_list.version++;
7bd4f430 1535 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1536 ;
dfeaf2ab
MA
1537 } else if (xen_enabled()) {
1538 xen_invalidate_map_cache_entry(block->host);
089f3f76 1539#ifndef _WIN32
3435f395 1540 } else if (block->fd >= 0) {
9b8424d5 1541 munmap(block->host, block->max_length);
3435f395 1542 close(block->fd);
089f3f76 1543#endif
04b16653 1544 } else {
9b8424d5 1545 qemu_anon_ram_free(block->host, block->max_length);
04b16653 1546 }
7267c094 1547 g_free(block);
b2a8658e 1548 break;
04b16653
AW
1549 }
1550 }
b2a8658e 1551 qemu_mutex_unlock_ramlist();
04b16653 1552
e9a1ab19
FB
1553}
1554
cd19cfa2
HY
1555#ifndef _WIN32
1556void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1557{
1558 RAMBlock *block;
1559 ram_addr_t offset;
1560 int flags;
1561 void *area, *vaddr;
1562
a3161038 1563 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2 1564 offset = addr - block->offset;
9b8424d5 1565 if (offset < block->max_length) {
1240be24 1566 vaddr = ramblock_ptr(block, offset);
7bd4f430 1567 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1568 ;
dfeaf2ab
MA
1569 } else if (xen_enabled()) {
1570 abort();
cd19cfa2
HY
1571 } else {
1572 flags = MAP_FIXED;
1573 munmap(vaddr, length);
3435f395 1574 if (block->fd >= 0) {
dbcb8981
PB
1575 flags |= (block->flags & RAM_SHARED ?
1576 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1577 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1578 flags, block->fd, offset);
cd19cfa2 1579 } else {
2eb9fbaa
MA
1580 /*
1581 * Remap needs to match alloc. Accelerators that
1582 * set phys_mem_alloc never remap. If they did,
1583 * we'd need a remap hook here.
1584 */
1585 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1586
cd19cfa2
HY
1587 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1588 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1589 flags, -1, 0);
cd19cfa2
HY
1590 }
1591 if (area != vaddr) {
f15fbc4b
AP
1592 fprintf(stderr, "Could not remap addr: "
1593 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1594 length, addr);
1595 exit(1);
1596 }
8490fc78 1597 memory_try_enable_merging(vaddr, length);
ddb97f1d 1598 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1599 }
1600 return;
1601 }
1602 }
1603}
1604#endif /* !_WIN32 */
1605
a35ba7be
PB
1606int qemu_get_ram_fd(ram_addr_t addr)
1607{
1608 RAMBlock *block = qemu_get_ram_block(addr);
1609
1610 return block->fd;
1611}
1612
3fd74b84
DM
1613void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1614{
1615 RAMBlock *block = qemu_get_ram_block(addr);
1616
1240be24 1617 return ramblock_ptr(block, 0);
3fd74b84
DM
1618}
1619
1b5ec234
PB
1620/* Return a host pointer to ram allocated with qemu_ram_alloc.
1621 With the exception of the softmmu code in this file, this should
1622 only be used for local memory (e.g. video ram) that the device owns,
1623 and knows it isn't going to access beyond the end of the block.
1624
1625 It should not be used for general purpose DMA.
1626 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1627 */
1628void *qemu_get_ram_ptr(ram_addr_t addr)
1629{
1630 RAMBlock *block = qemu_get_ram_block(addr);
1631
0d6d3c87
PB
1632 if (xen_enabled()) {
1633 /* We need to check if the requested address is in the RAM
1634 * because we don't want to map the entire memory in QEMU.
1635 * In that case just map until the end of the page.
1636 */
1637 if (block->offset == 0) {
1638 return xen_map_cache(addr, 0, 0);
1639 } else if (block->host == NULL) {
1640 block->host =
9b8424d5 1641 xen_map_cache(block->offset, block->max_length, 1);
0d6d3c87
PB
1642 }
1643 }
1240be24 1644 return ramblock_ptr(block, addr - block->offset);
dc828ca1
PB
1645}
1646
38bee5dc
SS
1647/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1648 * but takes a size argument */
cb85f7ab 1649static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1650{
8ab934f9
SS
1651 if (*size == 0) {
1652 return NULL;
1653 }
868bb33f 1654 if (xen_enabled()) {
e41d7c69 1655 return xen_map_cache(addr, *size, 1);
868bb33f 1656 } else {
38bee5dc
SS
1657 RAMBlock *block;
1658
a3161038 1659 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
9b8424d5
MT
1660 if (addr - block->offset < block->max_length) {
1661 if (addr - block->offset + *size > block->max_length)
1662 *size = block->max_length - addr + block->offset;
1240be24 1663 return ramblock_ptr(block, addr - block->offset);
38bee5dc
SS
1664 }
1665 }
1666
1667 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1668 abort();
38bee5dc
SS
1669 }
1670}
1671
7443b437
PB
1672/* Some of the softmmu routines need to translate from a host pointer
1673 (typically a TLB entry) back to a ram offset. */
1b5ec234 1674MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1675{
94a6b54f
PB
1676 RAMBlock *block;
1677 uint8_t *host = ptr;
1678
868bb33f 1679 if (xen_enabled()) {
e41d7c69 1680 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1681 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1682 }
1683
23887b79 1684 block = ram_list.mru_block;
9b8424d5 1685 if (block && block->host && host - block->host < block->max_length) {
23887b79
PB
1686 goto found;
1687 }
1688
a3161038 1689 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1690 /* This case append when the block is not mapped. */
1691 if (block->host == NULL) {
1692 continue;
1693 }
9b8424d5 1694 if (host - block->host < block->max_length) {
23887b79 1695 goto found;
f471a17e 1696 }
94a6b54f 1697 }
432d268c 1698
1b5ec234 1699 return NULL;
23887b79
PB
1700
1701found:
1702 *ram_addr = block->offset + (host - block->host);
1b5ec234 1703 return block->mr;
e890261f 1704}
f471a17e 1705
a8170e5e 1706static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1707 uint64_t val, unsigned size)
9fa3e853 1708{
52159192 1709 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1710 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1711 }
0e0df1e2
AK
1712 switch (size) {
1713 case 1:
1714 stb_p(qemu_get_ram_ptr(ram_addr), val);
1715 break;
1716 case 2:
1717 stw_p(qemu_get_ram_ptr(ram_addr), val);
1718 break;
1719 case 4:
1720 stl_p(qemu_get_ram_ptr(ram_addr), val);
1721 break;
1722 default:
1723 abort();
3a7d929e 1724 }
6886867e 1725 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
f23db169
FB
1726 /* we remove the notdirty callback only if the code has been
1727 flushed */
a2cd8c85 1728 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1729 CPUArchState *env = current_cpu->env_ptr;
93afeade 1730 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1731 }
9fa3e853
FB
1732}
1733
b018ddf6
PB
1734static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1735 unsigned size, bool is_write)
1736{
1737 return is_write;
1738}
1739
0e0df1e2 1740static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1741 .write = notdirty_mem_write,
b018ddf6 1742 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1743 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1744};
1745
0f459d16 1746/* Generate a debug exception if a watchpoint has been hit. */
05068c0d 1747static void check_watchpoint(int offset, int len, int flags)
0f459d16 1748{
93afeade
AF
1749 CPUState *cpu = current_cpu;
1750 CPUArchState *env = cpu->env_ptr;
06d55cc1 1751 target_ulong pc, cs_base;
0f459d16 1752 target_ulong vaddr;
a1d1bb31 1753 CPUWatchpoint *wp;
06d55cc1 1754 int cpu_flags;
0f459d16 1755
ff4700b0 1756 if (cpu->watchpoint_hit) {
06d55cc1
AL
1757 /* We re-entered the check after replacing the TB. Now raise
1758 * the debug interrupt so that is will trigger after the
1759 * current instruction. */
93afeade 1760 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1761 return;
1762 }
93afeade 1763 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1764 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d
PM
1765 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1766 && (wp->flags & flags)) {
08225676
PM
1767 if (flags == BP_MEM_READ) {
1768 wp->flags |= BP_WATCHPOINT_HIT_READ;
1769 } else {
1770 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1771 }
1772 wp->hitaddr = vaddr;
ff4700b0
AF
1773 if (!cpu->watchpoint_hit) {
1774 cpu->watchpoint_hit = wp;
239c51a5 1775 tb_check_watchpoint(cpu);
6e140f28 1776 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1777 cpu->exception_index = EXCP_DEBUG;
5638d180 1778 cpu_loop_exit(cpu);
6e140f28
AL
1779 } else {
1780 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1781 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1782 cpu_resume_from_signal(cpu, NULL);
6e140f28 1783 }
06d55cc1 1784 }
6e140f28
AL
1785 } else {
1786 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1787 }
1788 }
1789}
1790
6658ffb8
PB
1791/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1792 so these check for a hit then pass through to the normal out-of-line
1793 phys routines. */
a8170e5e 1794static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1795 unsigned size)
6658ffb8 1796{
05068c0d 1797 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ);
1ec9b909 1798 switch (size) {
2c17449b 1799 case 1: return ldub_phys(&address_space_memory, addr);
41701aa4 1800 case 2: return lduw_phys(&address_space_memory, addr);
fdfba1a2 1801 case 4: return ldl_phys(&address_space_memory, addr);
1ec9b909
AK
1802 default: abort();
1803 }
6658ffb8
PB
1804}
1805
a8170e5e 1806static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1807 uint64_t val, unsigned size)
6658ffb8 1808{
05068c0d 1809 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE);
1ec9b909 1810 switch (size) {
67364150 1811 case 1:
db3be60d 1812 stb_phys(&address_space_memory, addr, val);
67364150
MF
1813 break;
1814 case 2:
5ce5944d 1815 stw_phys(&address_space_memory, addr, val);
67364150
MF
1816 break;
1817 case 4:
ab1da857 1818 stl_phys(&address_space_memory, addr, val);
67364150 1819 break;
1ec9b909
AK
1820 default: abort();
1821 }
6658ffb8
PB
1822}
1823
1ec9b909
AK
1824static const MemoryRegionOps watch_mem_ops = {
1825 .read = watch_mem_read,
1826 .write = watch_mem_write,
1827 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1828};
6658ffb8 1829
a8170e5e 1830static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1831 unsigned len)
db7b5426 1832{
acc9d80b 1833 subpage_t *subpage = opaque;
ff6cff75 1834 uint8_t buf[8];
791af8c8 1835
db7b5426 1836#if defined(DEBUG_SUBPAGE)
016e9d62 1837 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1838 subpage, len, addr);
db7b5426 1839#endif
acc9d80b
JK
1840 address_space_read(subpage->as, addr + subpage->base, buf, len);
1841 switch (len) {
1842 case 1:
1843 return ldub_p(buf);
1844 case 2:
1845 return lduw_p(buf);
1846 case 4:
1847 return ldl_p(buf);
ff6cff75
PB
1848 case 8:
1849 return ldq_p(buf);
acc9d80b
JK
1850 default:
1851 abort();
1852 }
db7b5426
BS
1853}
1854
a8170e5e 1855static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1856 uint64_t value, unsigned len)
db7b5426 1857{
acc9d80b 1858 subpage_t *subpage = opaque;
ff6cff75 1859 uint8_t buf[8];
acc9d80b 1860
db7b5426 1861#if defined(DEBUG_SUBPAGE)
016e9d62 1862 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1863 " value %"PRIx64"\n",
1864 __func__, subpage, len, addr, value);
db7b5426 1865#endif
acc9d80b
JK
1866 switch (len) {
1867 case 1:
1868 stb_p(buf, value);
1869 break;
1870 case 2:
1871 stw_p(buf, value);
1872 break;
1873 case 4:
1874 stl_p(buf, value);
1875 break;
ff6cff75
PB
1876 case 8:
1877 stq_p(buf, value);
1878 break;
acc9d80b
JK
1879 default:
1880 abort();
1881 }
1882 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1883}
1884
c353e4cc 1885static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 1886 unsigned len, bool is_write)
c353e4cc 1887{
acc9d80b 1888 subpage_t *subpage = opaque;
c353e4cc 1889#if defined(DEBUG_SUBPAGE)
016e9d62 1890 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 1891 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1892#endif
1893
acc9d80b 1894 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 1895 len, is_write);
c353e4cc
PB
1896}
1897
70c68e44
AK
1898static const MemoryRegionOps subpage_ops = {
1899 .read = subpage_read,
1900 .write = subpage_write,
ff6cff75
PB
1901 .impl.min_access_size = 1,
1902 .impl.max_access_size = 8,
1903 .valid.min_access_size = 1,
1904 .valid.max_access_size = 8,
c353e4cc 1905 .valid.accepts = subpage_accepts,
70c68e44 1906 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1907};
1908
c227f099 1909static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1910 uint16_t section)
db7b5426
BS
1911{
1912 int idx, eidx;
1913
1914 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1915 return -1;
1916 idx = SUBPAGE_IDX(start);
1917 eidx = SUBPAGE_IDX(end);
1918#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1919 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1920 __func__, mmio, start, end, idx, eidx, section);
db7b5426 1921#endif
db7b5426 1922 for (; idx <= eidx; idx++) {
5312bd8b 1923 mmio->sub_section[idx] = section;
db7b5426
BS
1924 }
1925
1926 return 0;
1927}
1928
acc9d80b 1929static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1930{
c227f099 1931 subpage_t *mmio;
db7b5426 1932
7267c094 1933 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1934
acc9d80b 1935 mmio->as = as;
1eec614b 1936 mmio->base = base;
2c9b15ca 1937 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 1938 NULL, TARGET_PAGE_SIZE);
b3b00c78 1939 mmio->iomem.subpage = true;
db7b5426 1940#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1941 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1942 mmio, base, TARGET_PAGE_SIZE);
db7b5426 1943#endif
b41aac4f 1944 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1945
1946 return mmio;
1947}
1948
a656e22f
PC
1949static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1950 MemoryRegion *mr)
5312bd8b 1951{
a656e22f 1952 assert(as);
5312bd8b 1953 MemoryRegionSection section = {
a656e22f 1954 .address_space = as,
5312bd8b
AK
1955 .mr = mr,
1956 .offset_within_address_space = 0,
1957 .offset_within_region = 0,
052e87b0 1958 .size = int128_2_64(),
5312bd8b
AK
1959 };
1960
53cb28cb 1961 return phys_section_add(map, &section);
5312bd8b
AK
1962}
1963
77717094 1964MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
aa102231 1965{
77717094 1966 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1967}
1968
e9179ce1
AK
1969static void io_mem_init(void)
1970{
1f6245e5 1971 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2c9b15ca 1972 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 1973 NULL, UINT64_MAX);
2c9b15ca 1974 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 1975 NULL, UINT64_MAX);
2c9b15ca 1976 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 1977 NULL, UINT64_MAX);
e9179ce1
AK
1978}
1979
ac1970fb 1980static void mem_begin(MemoryListener *listener)
00752703
PB
1981{
1982 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
1983 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1984 uint16_t n;
1985
a656e22f 1986 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 1987 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 1988 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 1989 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 1990 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 1991 assert(n == PHYS_SECTION_ROM);
a656e22f 1992 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 1993 assert(n == PHYS_SECTION_WATCH);
00752703 1994
9736e55b 1995 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
1996 d->as = as;
1997 as->next_dispatch = d;
1998}
1999
2000static void mem_commit(MemoryListener *listener)
ac1970fb 2001{
89ae337a 2002 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
2003 AddressSpaceDispatch *cur = as->dispatch;
2004 AddressSpaceDispatch *next = as->next_dispatch;
2005
53cb28cb 2006 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 2007
0475d94f 2008 as->dispatch = next;
b41aac4f 2009
53cb28cb
MA
2010 if (cur) {
2011 phys_sections_free(&cur->map);
2012 g_free(cur);
2013 }
9affd6fc
PB
2014}
2015
1d71148e 2016static void tcg_commit(MemoryListener *listener)
50c1e149 2017{
182735ef 2018 CPUState *cpu;
117712c3
AK
2019
2020 /* since each CPU stores ram addresses in its TLB cache, we must
2021 reset the modified entries */
2022 /* XXX: slow ! */
bdc44640 2023 CPU_FOREACH(cpu) {
33bde2e1
EI
2024 /* FIXME: Disentangle the cpu.h circular files deps so we can
2025 directly get the right CPU from listener. */
2026 if (cpu->tcg_as_listener != listener) {
2027 continue;
2028 }
00c8cb0a 2029 tlb_flush(cpu, 1);
117712c3 2030 }
50c1e149
AK
2031}
2032
93632747
AK
2033static void core_log_global_start(MemoryListener *listener)
2034{
981fdf23 2035 cpu_physical_memory_set_dirty_tracking(true);
93632747
AK
2036}
2037
2038static void core_log_global_stop(MemoryListener *listener)
2039{
981fdf23 2040 cpu_physical_memory_set_dirty_tracking(false);
93632747
AK
2041}
2042
93632747 2043static MemoryListener core_memory_listener = {
93632747
AK
2044 .log_global_start = core_log_global_start,
2045 .log_global_stop = core_log_global_stop,
ac1970fb 2046 .priority = 1,
93632747
AK
2047};
2048
ac1970fb
AK
2049void address_space_init_dispatch(AddressSpace *as)
2050{
00752703 2051 as->dispatch = NULL;
89ae337a 2052 as->dispatch_listener = (MemoryListener) {
ac1970fb 2053 .begin = mem_begin,
00752703 2054 .commit = mem_commit,
ac1970fb
AK
2055 .region_add = mem_add,
2056 .region_nop = mem_add,
2057 .priority = 0,
2058 };
89ae337a 2059 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
2060}
2061
83f3c251
AK
2062void address_space_destroy_dispatch(AddressSpace *as)
2063{
2064 AddressSpaceDispatch *d = as->dispatch;
2065
89ae337a 2066 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
2067 g_free(d);
2068 as->dispatch = NULL;
2069}
2070
62152b8a
AK
2071static void memory_map_init(void)
2072{
7267c094 2073 system_memory = g_malloc(sizeof(*system_memory));
03f49957 2074
57271d63 2075 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 2076 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 2077
7267c094 2078 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
2079 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2080 65536);
7dca8043 2081 address_space_init(&address_space_io, system_io, "I/O");
93632747 2082
f6790af6 2083 memory_listener_register(&core_memory_listener, &address_space_memory);
62152b8a
AK
2084}
2085
2086MemoryRegion *get_system_memory(void)
2087{
2088 return system_memory;
2089}
2090
309cb471
AK
2091MemoryRegion *get_system_io(void)
2092{
2093 return system_io;
2094}
2095
e2eef170
PB
2096#endif /* !defined(CONFIG_USER_ONLY) */
2097
13eb76e0
FB
2098/* physical memory access (slow version, mainly for debug) */
2099#if defined(CONFIG_USER_ONLY)
f17ec444 2100int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 2101 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2102{
2103 int l, flags;
2104 target_ulong page;
53a5960a 2105 void * p;
13eb76e0
FB
2106
2107 while (len > 0) {
2108 page = addr & TARGET_PAGE_MASK;
2109 l = (page + TARGET_PAGE_SIZE) - addr;
2110 if (l > len)
2111 l = len;
2112 flags = page_get_flags(page);
2113 if (!(flags & PAGE_VALID))
a68fe89c 2114 return -1;
13eb76e0
FB
2115 if (is_write) {
2116 if (!(flags & PAGE_WRITE))
a68fe89c 2117 return -1;
579a97f7 2118 /* XXX: this code should not depend on lock_user */
72fb7daa 2119 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 2120 return -1;
72fb7daa
AJ
2121 memcpy(p, buf, l);
2122 unlock_user(p, addr, l);
13eb76e0
FB
2123 } else {
2124 if (!(flags & PAGE_READ))
a68fe89c 2125 return -1;
579a97f7 2126 /* XXX: this code should not depend on lock_user */
72fb7daa 2127 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 2128 return -1;
72fb7daa 2129 memcpy(buf, p, l);
5b257578 2130 unlock_user(p, addr, 0);
13eb76e0
FB
2131 }
2132 len -= l;
2133 buf += l;
2134 addr += l;
2135 }
a68fe89c 2136 return 0;
13eb76e0 2137}
8df1cd07 2138
13eb76e0 2139#else
51d7a9eb 2140
a8170e5e
AK
2141static void invalidate_and_set_dirty(hwaddr addr,
2142 hwaddr length)
51d7a9eb 2143{
f874bf90
PM
2144 if (cpu_physical_memory_range_includes_clean(addr, length)) {
2145 tb_invalidate_phys_range(addr, addr + length, 0);
6886867e 2146 cpu_physical_memory_set_dirty_range_nocode(addr, length);
51d7a9eb 2147 }
e226939d 2148 xen_modified_memory(addr, length);
51d7a9eb
AP
2149}
2150
23326164 2151static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2152{
e1622f4b 2153 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2154
2155 /* Regions are assumed to support 1-4 byte accesses unless
2156 otherwise specified. */
23326164
RH
2157 if (access_size_max == 0) {
2158 access_size_max = 4;
2159 }
2160
2161 /* Bound the maximum access by the alignment of the address. */
2162 if (!mr->ops->impl.unaligned) {
2163 unsigned align_size_max = addr & -addr;
2164 if (align_size_max != 0 && align_size_max < access_size_max) {
2165 access_size_max = align_size_max;
2166 }
82f2563f 2167 }
23326164
RH
2168
2169 /* Don't attempt accesses larger than the maximum. */
2170 if (l > access_size_max) {
2171 l = access_size_max;
82f2563f 2172 }
098178f2
PB
2173 if (l & (l - 1)) {
2174 l = 1 << (qemu_fls(l) - 1);
2175 }
23326164
RH
2176
2177 return l;
82f2563f
PB
2178}
2179
fd8aaa76 2180bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 2181 int len, bool is_write)
13eb76e0 2182{
149f54b5 2183 hwaddr l;
13eb76e0 2184 uint8_t *ptr;
791af8c8 2185 uint64_t val;
149f54b5 2186 hwaddr addr1;
5c8a00ce 2187 MemoryRegion *mr;
fd8aaa76 2188 bool error = false;
3b46e624 2189
13eb76e0 2190 while (len > 0) {
149f54b5 2191 l = len;
5c8a00ce 2192 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2193
13eb76e0 2194 if (is_write) {
5c8a00ce
PB
2195 if (!memory_access_is_direct(mr, is_write)) {
2196 l = memory_access_size(mr, l, addr1);
4917cf44 2197 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2198 potential bugs */
23326164
RH
2199 switch (l) {
2200 case 8:
2201 /* 64 bit write access */
2202 val = ldq_p(buf);
2203 error |= io_mem_write(mr, addr1, val, 8);
2204 break;
2205 case 4:
1c213d19 2206 /* 32 bit write access */
c27004ec 2207 val = ldl_p(buf);
5c8a00ce 2208 error |= io_mem_write(mr, addr1, val, 4);
23326164
RH
2209 break;
2210 case 2:
1c213d19 2211 /* 16 bit write access */
c27004ec 2212 val = lduw_p(buf);
5c8a00ce 2213 error |= io_mem_write(mr, addr1, val, 2);
23326164
RH
2214 break;
2215 case 1:
1c213d19 2216 /* 8 bit write access */
c27004ec 2217 val = ldub_p(buf);
5c8a00ce 2218 error |= io_mem_write(mr, addr1, val, 1);
23326164
RH
2219 break;
2220 default:
2221 abort();
13eb76e0 2222 }
2bbfa05d 2223 } else {
5c8a00ce 2224 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2225 /* RAM case */
5579c7f3 2226 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2227 memcpy(ptr, buf, l);
51d7a9eb 2228 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
2229 }
2230 } else {
5c8a00ce 2231 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2232 /* I/O case */
5c8a00ce 2233 l = memory_access_size(mr, l, addr1);
23326164
RH
2234 switch (l) {
2235 case 8:
2236 /* 64 bit read access */
2237 error |= io_mem_read(mr, addr1, &val, 8);
2238 stq_p(buf, val);
2239 break;
2240 case 4:
13eb76e0 2241 /* 32 bit read access */
5c8a00ce 2242 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 2243 stl_p(buf, val);
23326164
RH
2244 break;
2245 case 2:
13eb76e0 2246 /* 16 bit read access */
5c8a00ce 2247 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 2248 stw_p(buf, val);
23326164
RH
2249 break;
2250 case 1:
1c213d19 2251 /* 8 bit read access */
5c8a00ce 2252 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 2253 stb_p(buf, val);
23326164
RH
2254 break;
2255 default:
2256 abort();
13eb76e0
FB
2257 }
2258 } else {
2259 /* RAM case */
5c8a00ce 2260 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2261 memcpy(buf, ptr, l);
13eb76e0
FB
2262 }
2263 }
2264 len -= l;
2265 buf += l;
2266 addr += l;
2267 }
fd8aaa76
PB
2268
2269 return error;
13eb76e0 2270}
8df1cd07 2271
fd8aaa76 2272bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
2273 const uint8_t *buf, int len)
2274{
fd8aaa76 2275 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
2276}
2277
fd8aaa76 2278bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 2279{
fd8aaa76 2280 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
2281}
2282
2283
a8170e5e 2284void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2285 int len, int is_write)
2286{
fd8aaa76 2287 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2288}
2289
582b55a9
AG
2290enum write_rom_type {
2291 WRITE_DATA,
2292 FLUSH_CACHE,
2293};
2294
2a221651 2295static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2296 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2297{
149f54b5 2298 hwaddr l;
d0ecd2aa 2299 uint8_t *ptr;
149f54b5 2300 hwaddr addr1;
5c8a00ce 2301 MemoryRegion *mr;
3b46e624 2302
d0ecd2aa 2303 while (len > 0) {
149f54b5 2304 l = len;
2a221651 2305 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2306
5c8a00ce
PB
2307 if (!(memory_region_is_ram(mr) ||
2308 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2309 /* do nothing */
2310 } else {
5c8a00ce 2311 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2312 /* ROM/RAM case */
5579c7f3 2313 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2314 switch (type) {
2315 case WRITE_DATA:
2316 memcpy(ptr, buf, l);
2317 invalidate_and_set_dirty(addr1, l);
2318 break;
2319 case FLUSH_CACHE:
2320 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2321 break;
2322 }
d0ecd2aa
FB
2323 }
2324 len -= l;
2325 buf += l;
2326 addr += l;
2327 }
2328}
2329
582b55a9 2330/* used for ROM loading : can write in RAM and ROM */
2a221651 2331void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2332 const uint8_t *buf, int len)
2333{
2a221651 2334 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2335}
2336
2337void cpu_flush_icache_range(hwaddr start, int len)
2338{
2339 /*
2340 * This function should do the same thing as an icache flush that was
2341 * triggered from within the guest. For TCG we are always cache coherent,
2342 * so there is no need to flush anything. For KVM / Xen we need to flush
2343 * the host's instruction cache at least.
2344 */
2345 if (tcg_enabled()) {
2346 return;
2347 }
2348
2a221651
EI
2349 cpu_physical_memory_write_rom_internal(&address_space_memory,
2350 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2351}
2352
6d16c2f8 2353typedef struct {
d3e71559 2354 MemoryRegion *mr;
6d16c2f8 2355 void *buffer;
a8170e5e
AK
2356 hwaddr addr;
2357 hwaddr len;
6d16c2f8
AL
2358} BounceBuffer;
2359
2360static BounceBuffer bounce;
2361
ba223c29
AL
2362typedef struct MapClient {
2363 void *opaque;
2364 void (*callback)(void *opaque);
72cf2d4f 2365 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2366} MapClient;
2367
72cf2d4f
BS
2368static QLIST_HEAD(map_client_list, MapClient) map_client_list
2369 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2370
2371void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2372{
7267c094 2373 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2374
2375 client->opaque = opaque;
2376 client->callback = callback;
72cf2d4f 2377 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2378 return client;
2379}
2380
8b9c99d9 2381static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2382{
2383 MapClient *client = (MapClient *)_client;
2384
72cf2d4f 2385 QLIST_REMOVE(client, link);
7267c094 2386 g_free(client);
ba223c29
AL
2387}
2388
2389static void cpu_notify_map_clients(void)
2390{
2391 MapClient *client;
2392
72cf2d4f
BS
2393 while (!QLIST_EMPTY(&map_client_list)) {
2394 client = QLIST_FIRST(&map_client_list);
ba223c29 2395 client->callback(client->opaque);
34d5e948 2396 cpu_unregister_map_client(client);
ba223c29
AL
2397 }
2398}
2399
51644ab7
PB
2400bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2401{
5c8a00ce 2402 MemoryRegion *mr;
51644ab7
PB
2403 hwaddr l, xlat;
2404
2405 while (len > 0) {
2406 l = len;
5c8a00ce
PB
2407 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2408 if (!memory_access_is_direct(mr, is_write)) {
2409 l = memory_access_size(mr, l, addr);
2410 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2411 return false;
2412 }
2413 }
2414
2415 len -= l;
2416 addr += l;
2417 }
2418 return true;
2419}
2420
6d16c2f8
AL
2421/* Map a physical memory region into a host virtual address.
2422 * May map a subset of the requested range, given by and returned in *plen.
2423 * May return NULL if resources needed to perform the mapping are exhausted.
2424 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2425 * Use cpu_register_map_client() to know when retrying the map operation is
2426 * likely to succeed.
6d16c2f8 2427 */
ac1970fb 2428void *address_space_map(AddressSpace *as,
a8170e5e
AK
2429 hwaddr addr,
2430 hwaddr *plen,
ac1970fb 2431 bool is_write)
6d16c2f8 2432{
a8170e5e 2433 hwaddr len = *plen;
e3127ae0
PB
2434 hwaddr done = 0;
2435 hwaddr l, xlat, base;
2436 MemoryRegion *mr, *this_mr;
2437 ram_addr_t raddr;
6d16c2f8 2438
e3127ae0
PB
2439 if (len == 0) {
2440 return NULL;
2441 }
38bee5dc 2442
e3127ae0
PB
2443 l = len;
2444 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2445 if (!memory_access_is_direct(mr, is_write)) {
2446 if (bounce.buffer) {
2447 return NULL;
6d16c2f8 2448 }
e85d9db5
KW
2449 /* Avoid unbounded allocations */
2450 l = MIN(l, TARGET_PAGE_SIZE);
2451 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2452 bounce.addr = addr;
2453 bounce.len = l;
d3e71559
PB
2454
2455 memory_region_ref(mr);
2456 bounce.mr = mr;
e3127ae0
PB
2457 if (!is_write) {
2458 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2459 }
6d16c2f8 2460
e3127ae0
PB
2461 *plen = l;
2462 return bounce.buffer;
2463 }
2464
2465 base = xlat;
2466 raddr = memory_region_get_ram_addr(mr);
2467
2468 for (;;) {
6d16c2f8
AL
2469 len -= l;
2470 addr += l;
e3127ae0
PB
2471 done += l;
2472 if (len == 0) {
2473 break;
2474 }
2475
2476 l = len;
2477 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2478 if (this_mr != mr || xlat != base + done) {
2479 break;
2480 }
6d16c2f8 2481 }
e3127ae0 2482
d3e71559 2483 memory_region_ref(mr);
e3127ae0
PB
2484 *plen = done;
2485 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2486}
2487
ac1970fb 2488/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2489 * Will also mark the memory as dirty if is_write == 1. access_len gives
2490 * the amount of memory that was actually read or written by the caller.
2491 */
a8170e5e
AK
2492void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2493 int is_write, hwaddr access_len)
6d16c2f8
AL
2494{
2495 if (buffer != bounce.buffer) {
d3e71559
PB
2496 MemoryRegion *mr;
2497 ram_addr_t addr1;
2498
2499 mr = qemu_ram_addr_from_host(buffer, &addr1);
2500 assert(mr != NULL);
6d16c2f8 2501 if (is_write) {
6886867e 2502 invalidate_and_set_dirty(addr1, access_len);
6d16c2f8 2503 }
868bb33f 2504 if (xen_enabled()) {
e41d7c69 2505 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2506 }
d3e71559 2507 memory_region_unref(mr);
6d16c2f8
AL
2508 return;
2509 }
2510 if (is_write) {
ac1970fb 2511 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2512 }
f8a83245 2513 qemu_vfree(bounce.buffer);
6d16c2f8 2514 bounce.buffer = NULL;
d3e71559 2515 memory_region_unref(bounce.mr);
ba223c29 2516 cpu_notify_map_clients();
6d16c2f8 2517}
d0ecd2aa 2518
a8170e5e
AK
2519void *cpu_physical_memory_map(hwaddr addr,
2520 hwaddr *plen,
ac1970fb
AK
2521 int is_write)
2522{
2523 return address_space_map(&address_space_memory, addr, plen, is_write);
2524}
2525
a8170e5e
AK
2526void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2527 int is_write, hwaddr access_len)
ac1970fb
AK
2528{
2529 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2530}
2531
8df1cd07 2532/* warning: addr must be aligned */
fdfba1a2 2533static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2534 enum device_endian endian)
8df1cd07 2535{
8df1cd07 2536 uint8_t *ptr;
791af8c8 2537 uint64_t val;
5c8a00ce 2538 MemoryRegion *mr;
149f54b5
PB
2539 hwaddr l = 4;
2540 hwaddr addr1;
8df1cd07 2541
fdfba1a2 2542 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2543 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2544 /* I/O case */
5c8a00ce 2545 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2546#if defined(TARGET_WORDS_BIGENDIAN)
2547 if (endian == DEVICE_LITTLE_ENDIAN) {
2548 val = bswap32(val);
2549 }
2550#else
2551 if (endian == DEVICE_BIG_ENDIAN) {
2552 val = bswap32(val);
2553 }
2554#endif
8df1cd07
FB
2555 } else {
2556 /* RAM case */
5c8a00ce 2557 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2558 & TARGET_PAGE_MASK)
149f54b5 2559 + addr1);
1e78bcc1
AG
2560 switch (endian) {
2561 case DEVICE_LITTLE_ENDIAN:
2562 val = ldl_le_p(ptr);
2563 break;
2564 case DEVICE_BIG_ENDIAN:
2565 val = ldl_be_p(ptr);
2566 break;
2567 default:
2568 val = ldl_p(ptr);
2569 break;
2570 }
8df1cd07
FB
2571 }
2572 return val;
2573}
2574
fdfba1a2 2575uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2576{
fdfba1a2 2577 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2578}
2579
fdfba1a2 2580uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2581{
fdfba1a2 2582 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2583}
2584
fdfba1a2 2585uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2586{
fdfba1a2 2587 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2588}
2589
84b7b8e7 2590/* warning: addr must be aligned */
2c17449b 2591static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2592 enum device_endian endian)
84b7b8e7 2593{
84b7b8e7
FB
2594 uint8_t *ptr;
2595 uint64_t val;
5c8a00ce 2596 MemoryRegion *mr;
149f54b5
PB
2597 hwaddr l = 8;
2598 hwaddr addr1;
84b7b8e7 2599
2c17449b 2600 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2601 false);
2602 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2603 /* I/O case */
5c8a00ce 2604 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2605#if defined(TARGET_WORDS_BIGENDIAN)
2606 if (endian == DEVICE_LITTLE_ENDIAN) {
2607 val = bswap64(val);
2608 }
2609#else
2610 if (endian == DEVICE_BIG_ENDIAN) {
2611 val = bswap64(val);
2612 }
84b7b8e7
FB
2613#endif
2614 } else {
2615 /* RAM case */
5c8a00ce 2616 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2617 & TARGET_PAGE_MASK)
149f54b5 2618 + addr1);
1e78bcc1
AG
2619 switch (endian) {
2620 case DEVICE_LITTLE_ENDIAN:
2621 val = ldq_le_p(ptr);
2622 break;
2623 case DEVICE_BIG_ENDIAN:
2624 val = ldq_be_p(ptr);
2625 break;
2626 default:
2627 val = ldq_p(ptr);
2628 break;
2629 }
84b7b8e7
FB
2630 }
2631 return val;
2632}
2633
2c17449b 2634uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2635{
2c17449b 2636 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2637}
2638
2c17449b 2639uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2640{
2c17449b 2641 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2642}
2643
2c17449b 2644uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2645{
2c17449b 2646 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2647}
2648
aab33094 2649/* XXX: optimize */
2c17449b 2650uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
aab33094
FB
2651{
2652 uint8_t val;
2c17449b 2653 address_space_rw(as, addr, &val, 1, 0);
aab33094
FB
2654 return val;
2655}
2656
733f0b02 2657/* warning: addr must be aligned */
41701aa4 2658static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2659 enum device_endian endian)
aab33094 2660{
733f0b02
MT
2661 uint8_t *ptr;
2662 uint64_t val;
5c8a00ce 2663 MemoryRegion *mr;
149f54b5
PB
2664 hwaddr l = 2;
2665 hwaddr addr1;
733f0b02 2666
41701aa4 2667 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2668 false);
2669 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2670 /* I/O case */
5c8a00ce 2671 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2672#if defined(TARGET_WORDS_BIGENDIAN)
2673 if (endian == DEVICE_LITTLE_ENDIAN) {
2674 val = bswap16(val);
2675 }
2676#else
2677 if (endian == DEVICE_BIG_ENDIAN) {
2678 val = bswap16(val);
2679 }
2680#endif
733f0b02
MT
2681 } else {
2682 /* RAM case */
5c8a00ce 2683 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2684 & TARGET_PAGE_MASK)
149f54b5 2685 + addr1);
1e78bcc1
AG
2686 switch (endian) {
2687 case DEVICE_LITTLE_ENDIAN:
2688 val = lduw_le_p(ptr);
2689 break;
2690 case DEVICE_BIG_ENDIAN:
2691 val = lduw_be_p(ptr);
2692 break;
2693 default:
2694 val = lduw_p(ptr);
2695 break;
2696 }
733f0b02
MT
2697 }
2698 return val;
aab33094
FB
2699}
2700
41701aa4 2701uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2702{
41701aa4 2703 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2704}
2705
41701aa4 2706uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2707{
41701aa4 2708 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2709}
2710
41701aa4 2711uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2712{
41701aa4 2713 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2714}
2715
8df1cd07
FB
2716/* warning: addr must be aligned. The ram page is not masked as dirty
2717 and the code inside is not invalidated. It is useful if the dirty
2718 bits are used to track modified PTEs */
2198a121 2719void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
8df1cd07 2720{
8df1cd07 2721 uint8_t *ptr;
5c8a00ce 2722 MemoryRegion *mr;
149f54b5
PB
2723 hwaddr l = 4;
2724 hwaddr addr1;
8df1cd07 2725
2198a121 2726 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2727 true);
2728 if (l < 4 || !memory_access_is_direct(mr, true)) {
2729 io_mem_write(mr, addr1, val, 4);
8df1cd07 2730 } else {
5c8a00ce 2731 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2732 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2733 stl_p(ptr, val);
74576198
AL
2734
2735 if (unlikely(in_migration)) {
a2cd8c85 2736 if (cpu_physical_memory_is_clean(addr1)) {
74576198
AL
2737 /* invalidate code */
2738 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2739 /* set dirty bit */
6886867e 2740 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
74576198
AL
2741 }
2742 }
8df1cd07
FB
2743 }
2744}
2745
2746/* warning: addr must be aligned */
ab1da857
EI
2747static inline void stl_phys_internal(AddressSpace *as,
2748 hwaddr addr, uint32_t val,
1e78bcc1 2749 enum device_endian endian)
8df1cd07 2750{
8df1cd07 2751 uint8_t *ptr;
5c8a00ce 2752 MemoryRegion *mr;
149f54b5
PB
2753 hwaddr l = 4;
2754 hwaddr addr1;
8df1cd07 2755
ab1da857 2756 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2757 true);
2758 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2759#if defined(TARGET_WORDS_BIGENDIAN)
2760 if (endian == DEVICE_LITTLE_ENDIAN) {
2761 val = bswap32(val);
2762 }
2763#else
2764 if (endian == DEVICE_BIG_ENDIAN) {
2765 val = bswap32(val);
2766 }
2767#endif
5c8a00ce 2768 io_mem_write(mr, addr1, val, 4);
8df1cd07 2769 } else {
8df1cd07 2770 /* RAM case */
5c8a00ce 2771 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2772 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2773 switch (endian) {
2774 case DEVICE_LITTLE_ENDIAN:
2775 stl_le_p(ptr, val);
2776 break;
2777 case DEVICE_BIG_ENDIAN:
2778 stl_be_p(ptr, val);
2779 break;
2780 default:
2781 stl_p(ptr, val);
2782 break;
2783 }
51d7a9eb 2784 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2785 }
2786}
2787
ab1da857 2788void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2789{
ab1da857 2790 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2791}
2792
ab1da857 2793void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2794{
ab1da857 2795 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2796}
2797
ab1da857 2798void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2799{
ab1da857 2800 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2801}
2802
aab33094 2803/* XXX: optimize */
db3be60d 2804void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
aab33094
FB
2805{
2806 uint8_t v = val;
db3be60d 2807 address_space_rw(as, addr, &v, 1, 1);
aab33094
FB
2808}
2809
733f0b02 2810/* warning: addr must be aligned */
5ce5944d
EI
2811static inline void stw_phys_internal(AddressSpace *as,
2812 hwaddr addr, uint32_t val,
1e78bcc1 2813 enum device_endian endian)
aab33094 2814{
733f0b02 2815 uint8_t *ptr;
5c8a00ce 2816 MemoryRegion *mr;
149f54b5
PB
2817 hwaddr l = 2;
2818 hwaddr addr1;
733f0b02 2819
5ce5944d 2820 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 2821 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2822#if defined(TARGET_WORDS_BIGENDIAN)
2823 if (endian == DEVICE_LITTLE_ENDIAN) {
2824 val = bswap16(val);
2825 }
2826#else
2827 if (endian == DEVICE_BIG_ENDIAN) {
2828 val = bswap16(val);
2829 }
2830#endif
5c8a00ce 2831 io_mem_write(mr, addr1, val, 2);
733f0b02 2832 } else {
733f0b02 2833 /* RAM case */
5c8a00ce 2834 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2835 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2836 switch (endian) {
2837 case DEVICE_LITTLE_ENDIAN:
2838 stw_le_p(ptr, val);
2839 break;
2840 case DEVICE_BIG_ENDIAN:
2841 stw_be_p(ptr, val);
2842 break;
2843 default:
2844 stw_p(ptr, val);
2845 break;
2846 }
51d7a9eb 2847 invalidate_and_set_dirty(addr1, 2);
733f0b02 2848 }
aab33094
FB
2849}
2850
5ce5944d 2851void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2852{
5ce5944d 2853 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2854}
2855
5ce5944d 2856void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2857{
5ce5944d 2858 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2859}
2860
5ce5944d 2861void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2862{
5ce5944d 2863 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2864}
2865
aab33094 2866/* XXX: optimize */
f606604f 2867void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
aab33094
FB
2868{
2869 val = tswap64(val);
f606604f 2870 address_space_rw(as, addr, (void *) &val, 8, 1);
aab33094
FB
2871}
2872
f606604f 2873void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2874{
2875 val = cpu_to_le64(val);
f606604f 2876 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2877}
2878
f606604f 2879void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2880{
2881 val = cpu_to_be64(val);
f606604f 2882 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2883}
2884
5e2972fd 2885/* virtual memory access for debug (includes writing to ROM) */
f17ec444 2886int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 2887 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2888{
2889 int l;
a8170e5e 2890 hwaddr phys_addr;
9b3c35e0 2891 target_ulong page;
13eb76e0
FB
2892
2893 while (len > 0) {
2894 page = addr & TARGET_PAGE_MASK;
f17ec444 2895 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
2896 /* if no physical page mapped, return an error */
2897 if (phys_addr == -1)
2898 return -1;
2899 l = (page + TARGET_PAGE_SIZE) - addr;
2900 if (l > len)
2901 l = len;
5e2972fd 2902 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
2903 if (is_write) {
2904 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2905 } else {
2906 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2907 }
13eb76e0
FB
2908 len -= l;
2909 buf += l;
2910 addr += l;
2911 }
2912 return 0;
2913}
a68fe89c 2914#endif
13eb76e0 2915
8e4a424b
BS
2916/*
2917 * A helper function for the _utterly broken_ virtio device model to find out if
2918 * it's running on a big endian machine. Don't do this at home kids!
2919 */
98ed8ecf
GK
2920bool target_words_bigendian(void);
2921bool target_words_bigendian(void)
8e4a424b
BS
2922{
2923#if defined(TARGET_WORDS_BIGENDIAN)
2924 return true;
2925#else
2926 return false;
2927#endif
2928}
2929
76f35538 2930#ifndef CONFIG_USER_ONLY
a8170e5e 2931bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2932{
5c8a00ce 2933 MemoryRegion*mr;
149f54b5 2934 hwaddr l = 1;
76f35538 2935
5c8a00ce
PB
2936 mr = address_space_translate(&address_space_memory,
2937 phys_addr, &phys_addr, &l, false);
76f35538 2938
5c8a00ce
PB
2939 return !(memory_region_is_ram(mr) ||
2940 memory_region_is_romd(mr));
76f35538 2941}
bd2fa51f
MH
2942
2943void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2944{
2945 RAMBlock *block;
2946
2947 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
9b8424d5 2948 func(block->host, block->offset, block->used_length, opaque);
bd2fa51f
MH
2949 }
2950}
ec3f8c99 2951#endif