]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
exec: replace leaf with skip
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
2ff3de68 34#include "sysemu/sysemu.h"
0d09e41a 35#include "hw/xen/xen.h"
1de7afc9
PB
36#include "qemu/timer.h"
37#include "qemu/config-file.h"
022c62cb 38#include "exec/memory.h"
9c17d615 39#include "sysemu/dma.h"
022c62cb 40#include "exec/address-spaces.h"
53a5960a
PB
41#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
432d268c 43#else /* !CONFIG_USER_ONLY */
9c17d615 44#include "sysemu/xen-mapcache.h"
6506e4f9 45#include "trace.h"
53a5960a 46#endif
0d6d3c87 47#include "exec/cpu-all.h"
54936004 48
022c62cb 49#include "exec/cputlb.h"
5b6dd868 50#include "translate-all.h"
0cac1b66 51
022c62cb 52#include "exec/memory-internal.h"
67d95c15 53
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
e2eef170 56#if !defined(CONFIG_USER_ONLY)
74576198 57static int in_migration;
94a6b54f 58
a3161038 59RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
60
61static MemoryRegion *system_memory;
309cb471 62static MemoryRegion *system_io;
62152b8a 63
f6790af6
AK
64AddressSpace address_space_io;
65AddressSpace address_space_memory;
2673a5da 66
0844e007 67MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 68static MemoryRegion io_mem_unassigned;
0e0df1e2 69
e2eef170 70#endif
9fa3e853 71
bdc44640 72struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
73/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
4917cf44 75DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 76/* 0 = Do not count executed instructions.
bf20dc07 77 1 = Precise instruction counting.
2e70f6ef 78 2 = Adaptive rate instruction counting. */
5708fc66 79int use_icount;
6a00d601 80
e2eef170 81#if !defined(CONFIG_USER_ONLY)
4346ae3e 82
1db8abb1
PB
83typedef struct PhysPageEntry PhysPageEntry;
84
85struct PhysPageEntry {
9736e55b
MT
86 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
87 uint16_t skip : 1;
88 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
1db8abb1
PB
89 uint16_t ptr : 15;
90};
91
03f49957
PB
92/* Size of the L2 (and L3, etc) page tables. */
93#define ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
94
95#define P_L2_BITS 10
96#define P_L2_SIZE (1 << P_L2_BITS)
97
98#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
99
100typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 101
1db8abb1
PB
102struct AddressSpaceDispatch {
103 /* This is a multi-level map on the physical address space.
104 * The bottom level has pointers to MemoryRegionSections.
105 */
106 PhysPageEntry phys_map;
0475d94f
PB
107 Node *nodes;
108 MemoryRegionSection *sections;
acc9d80b 109 AddressSpace *as;
1db8abb1
PB
110};
111
90260c6c
JK
112#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
113typedef struct subpage_t {
114 MemoryRegion iomem;
acc9d80b 115 AddressSpace *as;
90260c6c
JK
116 hwaddr base;
117 uint16_t sub_section[TARGET_PAGE_SIZE];
118} subpage_t;
119
b41aac4f
LPF
120#define PHYS_SECTION_UNASSIGNED 0
121#define PHYS_SECTION_NOTDIRTY 1
122#define PHYS_SECTION_ROM 2
123#define PHYS_SECTION_WATCH 3
5312bd8b 124
9affd6fc
PB
125typedef struct PhysPageMap {
126 unsigned sections_nb;
127 unsigned sections_nb_alloc;
128 unsigned nodes_nb;
129 unsigned nodes_nb_alloc;
130 Node *nodes;
131 MemoryRegionSection *sections;
132} PhysPageMap;
133
6092666e 134static PhysPageMap *prev_map;
9affd6fc 135static PhysPageMap next_map;
d6f2ea22 136
07f07b31 137#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 138
e2eef170 139static void io_mem_init(void);
62152b8a 140static void memory_map_init(void);
e2eef170 141
1ec9b909 142static MemoryRegion io_mem_watch;
6658ffb8 143#endif
fd6ce8f6 144
6d9a1304 145#if !defined(CONFIG_USER_ONLY)
d6f2ea22 146
f7bf5461 147static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 148{
9affd6fc
PB
149 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
150 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
151 16);
152 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
153 next_map.nodes_nb + nodes);
154 next_map.nodes = g_renew(Node, next_map.nodes,
155 next_map.nodes_nb_alloc);
d6f2ea22 156 }
f7bf5461
AK
157}
158
159static uint16_t phys_map_node_alloc(void)
160{
161 unsigned i;
162 uint16_t ret;
163
9affd6fc 164 ret = next_map.nodes_nb++;
f7bf5461 165 assert(ret != PHYS_MAP_NODE_NIL);
9affd6fc 166 assert(ret != next_map.nodes_nb_alloc);
03f49957 167 for (i = 0; i < P_L2_SIZE; ++i) {
9736e55b 168 next_map.nodes[ret][i].skip = 1;
9affd6fc 169 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 170 }
f7bf5461 171 return ret;
d6f2ea22
AK
172}
173
a8170e5e
AK
174static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
175 hwaddr *nb, uint16_t leaf,
2999097b 176 int level)
f7bf5461
AK
177{
178 PhysPageEntry *p;
179 int i;
03f49957 180 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 181
9736e55b 182 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800 183 lp->ptr = phys_map_node_alloc();
9affd6fc 184 p = next_map.nodes[lp->ptr];
f7bf5461 185 if (level == 0) {
03f49957 186 for (i = 0; i < P_L2_SIZE; i++) {
9736e55b 187 p[i].skip = 0;
b41aac4f 188 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 189 }
67c4d23c 190 }
f7bf5461 191 } else {
9affd6fc 192 p = next_map.nodes[lp->ptr];
92e873b9 193 }
03f49957 194 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 195
03f49957 196 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 197 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 198 lp->skip = 0;
c19e8800 199 lp->ptr = leaf;
07f07b31
AK
200 *index += step;
201 *nb -= step;
2999097b
AK
202 } else {
203 phys_page_set_level(lp, index, nb, leaf, level - 1);
204 }
205 ++lp;
f7bf5461
AK
206 }
207}
208
ac1970fb 209static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 210 hwaddr index, hwaddr nb,
2999097b 211 uint16_t leaf)
f7bf5461 212{
2999097b 213 /* Wildly overreserve - it doesn't matter much. */
07f07b31 214 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 215
ac1970fb 216 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
217}
218
9affd6fc
PB
219static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
220 Node *nodes, MemoryRegionSection *sections)
92e873b9 221{
31ab2b4a
AK
222 PhysPageEntry *p;
223 int i;
f1f6e3b8 224
9736e55b 225 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 226 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 227 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 228 }
9affd6fc 229 p = nodes[lp.ptr];
03f49957 230 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 231 }
9affd6fc 232 return &sections[lp.ptr];
f3705d53
AK
233}
234
e5548617
BS
235bool memory_region_is_unassigned(MemoryRegion *mr)
236{
2a8e7499 237 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 238 && mr != &io_mem_watch;
fd6ce8f6 239}
149f54b5 240
c7086b4a 241static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
242 hwaddr addr,
243 bool resolve_subpage)
9f029603 244{
90260c6c
JK
245 MemoryRegionSection *section;
246 subpage_t *subpage;
247
0475d94f
PB
248 section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
249 d->nodes, d->sections);
90260c6c
JK
250 if (resolve_subpage && section->mr->subpage) {
251 subpage = container_of(section->mr, subpage_t, iomem);
0475d94f 252 section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
253 }
254 return section;
9f029603
JK
255}
256
90260c6c 257static MemoryRegionSection *
c7086b4a 258address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 259 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
260{
261 MemoryRegionSection *section;
262 Int128 diff;
263
c7086b4a 264 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
265 /* Compute offset within MemoryRegionSection */
266 addr -= section->offset_within_address_space;
267
268 /* Compute offset within MemoryRegion */
269 *xlat = addr + section->offset_within_region;
270
271 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 272 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
273 return section;
274}
90260c6c 275
5c8a00ce
PB
276MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
277 hwaddr *xlat, hwaddr *plen,
278 bool is_write)
90260c6c 279{
30951157
AK
280 IOMMUTLBEntry iotlb;
281 MemoryRegionSection *section;
282 MemoryRegion *mr;
283 hwaddr len = *plen;
284
285 for (;;) {
c7086b4a 286 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
287 mr = section->mr;
288
289 if (!mr->iommu_ops) {
290 break;
291 }
292
293 iotlb = mr->iommu_ops->translate(mr, addr);
294 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
295 | (addr & iotlb.addr_mask));
296 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
297 if (!(iotlb.perm & (1 << is_write))) {
298 mr = &io_mem_unassigned;
299 break;
300 }
301
302 as = iotlb.target_as;
303 }
304
305 *plen = len;
306 *xlat = addr;
307 return mr;
90260c6c
JK
308}
309
310MemoryRegionSection *
311address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
312 hwaddr *plen)
313{
30951157 314 MemoryRegionSection *section;
c7086b4a 315 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
316
317 assert(!section->mr->iommu_ops);
318 return section;
90260c6c 319}
5b6dd868 320#endif
fd6ce8f6 321
5b6dd868 322void cpu_exec_init_all(void)
fdbb84d1 323{
5b6dd868 324#if !defined(CONFIG_USER_ONLY)
b2a8658e 325 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
326 memory_map_init();
327 io_mem_init();
fdbb84d1 328#endif
5b6dd868 329}
fdbb84d1 330
b170fce3 331#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
332
333static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 334{
259186a7 335 CPUState *cpu = opaque;
a513fe19 336
5b6dd868
BS
337 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
338 version_id is increased. */
259186a7
AF
339 cpu->interrupt_request &= ~0x01;
340 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
341
342 return 0;
a513fe19 343}
7501267e 344
1a1562f5 345const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
346 .name = "cpu_common",
347 .version_id = 1,
348 .minimum_version_id = 1,
349 .minimum_version_id_old = 1,
350 .post_load = cpu_common_post_load,
351 .fields = (VMStateField []) {
259186a7
AF
352 VMSTATE_UINT32(halted, CPUState),
353 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
354 VMSTATE_END_OF_LIST()
355 }
356};
1a1562f5 357
5b6dd868 358#endif
ea041c0e 359
38d8f5c8 360CPUState *qemu_get_cpu(int index)
ea041c0e 361{
bdc44640 362 CPUState *cpu;
ea041c0e 363
bdc44640 364 CPU_FOREACH(cpu) {
55e5c285 365 if (cpu->cpu_index == index) {
bdc44640 366 return cpu;
55e5c285 367 }
ea041c0e 368 }
5b6dd868 369
bdc44640 370 return NULL;
ea041c0e
FB
371}
372
5b6dd868 373void cpu_exec_init(CPUArchState *env)
ea041c0e 374{
5b6dd868 375 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 376 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 377 CPUState *some_cpu;
5b6dd868
BS
378 int cpu_index;
379
380#if defined(CONFIG_USER_ONLY)
381 cpu_list_lock();
382#endif
5b6dd868 383 cpu_index = 0;
bdc44640 384 CPU_FOREACH(some_cpu) {
5b6dd868
BS
385 cpu_index++;
386 }
55e5c285 387 cpu->cpu_index = cpu_index;
1b1ed8dc 388 cpu->numa_node = 0;
5b6dd868
BS
389 QTAILQ_INIT(&env->breakpoints);
390 QTAILQ_INIT(&env->watchpoints);
391#ifndef CONFIG_USER_ONLY
392 cpu->thread_id = qemu_get_thread_id();
393#endif
bdc44640 394 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
395#if defined(CONFIG_USER_ONLY)
396 cpu_list_unlock();
397#endif
e0d47944
AF
398 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
399 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
400 }
5b6dd868 401#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
402 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
403 cpu_save, cpu_load, env);
b170fce3 404 assert(cc->vmsd == NULL);
e0d47944 405 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 406#endif
b170fce3
AF
407 if (cc->vmsd != NULL) {
408 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
409 }
ea041c0e
FB
410}
411
1fddef4b 412#if defined(TARGET_HAS_ICE)
94df27fd 413#if defined(CONFIG_USER_ONLY)
00b941e5 414static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
415{
416 tb_invalidate_phys_page_range(pc, pc + 1, 0);
417}
418#else
00b941e5 419static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 420{
e8262a1b
MF
421 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
422 if (phys != -1) {
423 tb_invalidate_phys_addr(phys | (pc & ~TARGET_PAGE_MASK));
424 }
1e7855a5 425}
c27004ec 426#endif
94df27fd 427#endif /* TARGET_HAS_ICE */
d720b93d 428
c527ee8f 429#if defined(CONFIG_USER_ONLY)
9349b4f9 430void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
431
432{
433}
434
9349b4f9 435int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
436 int flags, CPUWatchpoint **watchpoint)
437{
438 return -ENOSYS;
439}
440#else
6658ffb8 441/* Add a watchpoint. */
9349b4f9 442int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 443 int flags, CPUWatchpoint **watchpoint)
6658ffb8 444{
b4051334 445 target_ulong len_mask = ~(len - 1);
c0ce998e 446 CPUWatchpoint *wp;
6658ffb8 447
b4051334 448 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
449 if ((len & (len - 1)) || (addr & ~len_mask) ||
450 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
451 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
452 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
453 return -EINVAL;
454 }
7267c094 455 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
456
457 wp->vaddr = addr;
b4051334 458 wp->len_mask = len_mask;
a1d1bb31
AL
459 wp->flags = flags;
460
2dc9f411 461 /* keep all GDB-injected watchpoints in front */
c0ce998e 462 if (flags & BP_GDB)
72cf2d4f 463 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 464 else
72cf2d4f 465 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 466
6658ffb8 467 tlb_flush_page(env, addr);
a1d1bb31
AL
468
469 if (watchpoint)
470 *watchpoint = wp;
471 return 0;
6658ffb8
PB
472}
473
a1d1bb31 474/* Remove a specific watchpoint. */
9349b4f9 475int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 476 int flags)
6658ffb8 477{
b4051334 478 target_ulong len_mask = ~(len - 1);
a1d1bb31 479 CPUWatchpoint *wp;
6658ffb8 480
72cf2d4f 481 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 482 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 483 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 484 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
485 return 0;
486 }
487 }
a1d1bb31 488 return -ENOENT;
6658ffb8
PB
489}
490
a1d1bb31 491/* Remove a specific watchpoint by reference. */
9349b4f9 492void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 493{
72cf2d4f 494 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 495
a1d1bb31
AL
496 tlb_flush_page(env, watchpoint->vaddr);
497
7267c094 498 g_free(watchpoint);
a1d1bb31
AL
499}
500
501/* Remove all matching watchpoints. */
9349b4f9 502void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 503{
c0ce998e 504 CPUWatchpoint *wp, *next;
a1d1bb31 505
72cf2d4f 506 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
507 if (wp->flags & mask)
508 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 509 }
7d03f82f 510}
c527ee8f 511#endif
7d03f82f 512
a1d1bb31 513/* Add a breakpoint. */
9349b4f9 514int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 515 CPUBreakpoint **breakpoint)
4c3a88a2 516{
1fddef4b 517#if defined(TARGET_HAS_ICE)
c0ce998e 518 CPUBreakpoint *bp;
3b46e624 519
7267c094 520 bp = g_malloc(sizeof(*bp));
4c3a88a2 521
a1d1bb31
AL
522 bp->pc = pc;
523 bp->flags = flags;
524
2dc9f411 525 /* keep all GDB-injected breakpoints in front */
00b941e5 526 if (flags & BP_GDB) {
72cf2d4f 527 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
00b941e5 528 } else {
72cf2d4f 529 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
00b941e5 530 }
3b46e624 531
00b941e5 532 breakpoint_invalidate(ENV_GET_CPU(env), pc);
a1d1bb31 533
00b941e5 534 if (breakpoint) {
a1d1bb31 535 *breakpoint = bp;
00b941e5 536 }
4c3a88a2
FB
537 return 0;
538#else
a1d1bb31 539 return -ENOSYS;
4c3a88a2
FB
540#endif
541}
542
a1d1bb31 543/* Remove a specific breakpoint. */
9349b4f9 544int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 545{
7d03f82f 546#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
547 CPUBreakpoint *bp;
548
72cf2d4f 549 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
550 if (bp->pc == pc && bp->flags == flags) {
551 cpu_breakpoint_remove_by_ref(env, bp);
552 return 0;
553 }
7d03f82f 554 }
a1d1bb31
AL
555 return -ENOENT;
556#else
557 return -ENOSYS;
7d03f82f
EI
558#endif
559}
560
a1d1bb31 561/* Remove a specific breakpoint by reference. */
9349b4f9 562void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 563{
1fddef4b 564#if defined(TARGET_HAS_ICE)
72cf2d4f 565 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 566
00b941e5 567 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
a1d1bb31 568
7267c094 569 g_free(breakpoint);
a1d1bb31
AL
570#endif
571}
572
573/* Remove all matching breakpoints. */
9349b4f9 574void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
575{
576#if defined(TARGET_HAS_ICE)
c0ce998e 577 CPUBreakpoint *bp, *next;
a1d1bb31 578
72cf2d4f 579 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
580 if (bp->flags & mask)
581 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 582 }
4c3a88a2
FB
583#endif
584}
585
c33a346e
FB
586/* enable or disable single step mode. EXCP_DEBUG is returned by the
587 CPU loop after each instruction */
3825b28f 588void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 589{
1fddef4b 590#if defined(TARGET_HAS_ICE)
ed2803da
AF
591 if (cpu->singlestep_enabled != enabled) {
592 cpu->singlestep_enabled = enabled;
593 if (kvm_enabled()) {
38e478ec 594 kvm_update_guest_debug(cpu, 0);
ed2803da 595 } else {
ccbb4d44 596 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 597 /* XXX: only flush what is necessary */
38e478ec 598 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
599 tb_flush(env);
600 }
c33a346e
FB
601 }
602#endif
603}
604
9349b4f9 605void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e 606{
878096ee 607 CPUState *cpu = ENV_GET_CPU(env);
7501267e 608 va_list ap;
493ae1f0 609 va_list ap2;
7501267e
FB
610
611 va_start(ap, fmt);
493ae1f0 612 va_copy(ap2, ap);
7501267e
FB
613 fprintf(stderr, "qemu: fatal: ");
614 vfprintf(stderr, fmt, ap);
615 fprintf(stderr, "\n");
878096ee 616 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
617 if (qemu_log_enabled()) {
618 qemu_log("qemu: fatal: ");
619 qemu_log_vprintf(fmt, ap2);
620 qemu_log("\n");
a0762859 621 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 622 qemu_log_flush();
93fcfe39 623 qemu_log_close();
924edcae 624 }
493ae1f0 625 va_end(ap2);
f9373291 626 va_end(ap);
fd052bf6
RV
627#if defined(CONFIG_USER_ONLY)
628 {
629 struct sigaction act;
630 sigfillset(&act.sa_mask);
631 act.sa_handler = SIG_DFL;
632 sigaction(SIGABRT, &act, NULL);
633 }
634#endif
7501267e
FB
635 abort();
636}
637
0124311e 638#if !defined(CONFIG_USER_ONLY)
041603fe
PB
639static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
640{
641 RAMBlock *block;
642
643 /* The list is protected by the iothread lock here. */
644 block = ram_list.mru_block;
645 if (block && addr - block->offset < block->length) {
646 goto found;
647 }
648 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
649 if (addr - block->offset < block->length) {
650 goto found;
651 }
652 }
653
654 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
655 abort();
656
657found:
658 ram_list.mru_block = block;
659 return block;
660}
661
d24981d3
JQ
662static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
663 uintptr_t length)
664{
041603fe
PB
665 RAMBlock *block;
666 ram_addr_t start1;
d24981d3 667
041603fe
PB
668 block = qemu_get_ram_block(start);
669 assert(block == qemu_get_ram_block(end - 1));
670 start1 = (uintptr_t)block->host + (start - block->offset);
671 cpu_tlb_reset_dirty_all(start1, length);
d24981d3
JQ
672}
673
5579c7f3 674/* Note: start and end must be within the same ram block. */
c227f099 675void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 676 int dirty_flags)
1ccde1cb 677{
d24981d3 678 uintptr_t length;
1ccde1cb
FB
679
680 start &= TARGET_PAGE_MASK;
681 end = TARGET_PAGE_ALIGN(end);
682
683 length = end - start;
684 if (length == 0)
685 return;
f7c11b53 686 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 687
d24981d3
JQ
688 if (tcg_enabled()) {
689 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 690 }
1ccde1cb
FB
691}
692
8b9c99d9 693static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 694{
f6f3fbca 695 int ret = 0;
74576198 696 in_migration = enable;
f6f3fbca 697 return ret;
74576198
AL
698}
699
a8170e5e 700hwaddr memory_region_section_get_iotlb(CPUArchState *env,
149f54b5
PB
701 MemoryRegionSection *section,
702 target_ulong vaddr,
703 hwaddr paddr, hwaddr xlat,
704 int prot,
705 target_ulong *address)
e5548617 706{
a8170e5e 707 hwaddr iotlb;
e5548617
BS
708 CPUWatchpoint *wp;
709
cc5bea60 710 if (memory_region_is_ram(section->mr)) {
e5548617
BS
711 /* Normal RAM. */
712 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 713 + xlat;
e5548617 714 if (!section->readonly) {
b41aac4f 715 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 716 } else {
b41aac4f 717 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
718 }
719 } else {
0475d94f 720 iotlb = section - address_space_memory.dispatch->sections;
149f54b5 721 iotlb += xlat;
e5548617
BS
722 }
723
724 /* Make accesses to pages with watchpoints go via the
725 watchpoint trap routines. */
726 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
727 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
728 /* Avoid trapping reads of pages with a write breakpoint. */
729 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 730 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
731 *address |= TLB_MMIO;
732 break;
733 }
734 }
735 }
736
737 return iotlb;
738}
9fa3e853
FB
739#endif /* defined(CONFIG_USER_ONLY) */
740
e2eef170 741#if !defined(CONFIG_USER_ONLY)
8da3ff18 742
c227f099 743static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 744 uint16_t section);
acc9d80b 745static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 746
575ddeb4 747static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
91138037
MA
748
749/*
750 * Set a custom physical guest memory alloator.
751 * Accelerators with unusual needs may need this. Hopefully, we can
752 * get rid of it eventually.
753 */
575ddeb4 754void phys_mem_set_alloc(void *(*alloc)(size_t))
91138037
MA
755{
756 phys_mem_alloc = alloc;
757}
758
5312bd8b
AK
759static uint16_t phys_section_add(MemoryRegionSection *section)
760{
68f3f65b
PB
761 /* The physical section number is ORed with a page-aligned
762 * pointer to produce the iotlb entries. Thus it should
763 * never overflow into the page-aligned value.
764 */
9affd6fc 765 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
68f3f65b 766
9affd6fc
PB
767 if (next_map.sections_nb == next_map.sections_nb_alloc) {
768 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
769 16);
770 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
771 next_map.sections_nb_alloc);
5312bd8b 772 }
9affd6fc 773 next_map.sections[next_map.sections_nb] = *section;
dfde4e6e 774 memory_region_ref(section->mr);
9affd6fc 775 return next_map.sections_nb++;
5312bd8b
AK
776}
777
058bc4b5
PB
778static void phys_section_destroy(MemoryRegion *mr)
779{
dfde4e6e
PB
780 memory_region_unref(mr);
781
058bc4b5
PB
782 if (mr->subpage) {
783 subpage_t *subpage = container_of(mr, subpage_t, iomem);
784 memory_region_destroy(&subpage->iomem);
785 g_free(subpage);
786 }
787}
788
6092666e 789static void phys_sections_free(PhysPageMap *map)
5312bd8b 790{
9affd6fc
PB
791 while (map->sections_nb > 0) {
792 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
793 phys_section_destroy(section->mr);
794 }
9affd6fc
PB
795 g_free(map->sections);
796 g_free(map->nodes);
6092666e 797 g_free(map);
5312bd8b
AK
798}
799
ac1970fb 800static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
801{
802 subpage_t *subpage;
a8170e5e 803 hwaddr base = section->offset_within_address_space
0f0cb164 804 & TARGET_PAGE_MASK;
9affd6fc
PB
805 MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
806 next_map.nodes, next_map.sections);
0f0cb164
AK
807 MemoryRegionSection subsection = {
808 .offset_within_address_space = base,
052e87b0 809 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 810 };
a8170e5e 811 hwaddr start, end;
0f0cb164 812
f3705d53 813 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 814
f3705d53 815 if (!(existing->mr->subpage)) {
acc9d80b 816 subpage = subpage_init(d->as, base);
0f0cb164 817 subsection.mr = &subpage->iomem;
ac1970fb 818 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 819 phys_section_add(&subsection));
0f0cb164 820 } else {
f3705d53 821 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
822 }
823 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 824 end = start + int128_get64(section->size) - 1;
0f0cb164
AK
825 subpage_register(subpage, start, end, phys_section_add(section));
826}
827
828
052e87b0
PB
829static void register_multipage(AddressSpaceDispatch *d,
830 MemoryRegionSection *section)
33417e70 831{
a8170e5e 832 hwaddr start_addr = section->offset_within_address_space;
5312bd8b 833 uint16_t section_index = phys_section_add(section);
052e87b0
PB
834 uint64_t num_pages = int128_get64(int128_rshift(section->size,
835 TARGET_PAGE_BITS));
dd81124b 836
733d5ef5
PB
837 assert(num_pages);
838 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
839}
840
ac1970fb 841static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 842{
89ae337a 843 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 844 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 845 MemoryRegionSection now = *section, remain = *section;
052e87b0 846 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 847
733d5ef5
PB
848 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
849 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
850 - now.offset_within_address_space;
851
052e87b0 852 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 853 register_subpage(d, &now);
733d5ef5 854 } else {
052e87b0 855 now.size = int128_zero();
733d5ef5 856 }
052e87b0
PB
857 while (int128_ne(remain.size, now.size)) {
858 remain.size = int128_sub(remain.size, now.size);
859 remain.offset_within_address_space += int128_get64(now.size);
860 remain.offset_within_region += int128_get64(now.size);
69b67646 861 now = remain;
052e87b0 862 if (int128_lt(remain.size, page_size)) {
733d5ef5 863 register_subpage(d, &now);
88266249 864 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 865 now.size = page_size;
ac1970fb 866 register_subpage(d, &now);
69b67646 867 } else {
052e87b0 868 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 869 register_multipage(d, &now);
69b67646 870 }
0f0cb164
AK
871 }
872}
873
62a2744c
SY
874void qemu_flush_coalesced_mmio_buffer(void)
875{
876 if (kvm_enabled())
877 kvm_flush_coalesced_mmio_buffer();
878}
879
b2a8658e
UD
880void qemu_mutex_lock_ramlist(void)
881{
882 qemu_mutex_lock(&ram_list.mutex);
883}
884
885void qemu_mutex_unlock_ramlist(void)
886{
887 qemu_mutex_unlock(&ram_list.mutex);
888}
889
e1e84ba0 890#ifdef __linux__
c902760f
MT
891
892#include <sys/vfs.h>
893
894#define HUGETLBFS_MAGIC 0x958458f6
895
896static long gethugepagesize(const char *path)
897{
898 struct statfs fs;
899 int ret;
900
901 do {
9742bf26 902 ret = statfs(path, &fs);
c902760f
MT
903 } while (ret != 0 && errno == EINTR);
904
905 if (ret != 0) {
9742bf26
YT
906 perror(path);
907 return 0;
c902760f
MT
908 }
909
910 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 911 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
912
913 return fs.f_bsize;
914}
915
ef36fa14
MT
916static sigjmp_buf sigjump;
917
918static void sigbus_handler(int signal)
919{
920 siglongjmp(sigjump, 1);
921}
922
04b16653
AW
923static void *file_ram_alloc(RAMBlock *block,
924 ram_addr_t memory,
925 const char *path)
c902760f
MT
926{
927 char *filename;
8ca761f6
PF
928 char *sanitized_name;
929 char *c;
c902760f
MT
930 void *area;
931 int fd;
c902760f
MT
932 unsigned long hpagesize;
933
934 hpagesize = gethugepagesize(path);
935 if (!hpagesize) {
9742bf26 936 return NULL;
c902760f
MT
937 }
938
939 if (memory < hpagesize) {
940 return NULL;
941 }
942
943 if (kvm_enabled() && !kvm_has_sync_mmu()) {
944 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
945 return NULL;
946 }
947
8ca761f6
PF
948 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
949 sanitized_name = g_strdup(block->mr->name);
950 for (c = sanitized_name; *c != '\0'; c++) {
951 if (*c == '/')
952 *c = '_';
953 }
954
955 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
956 sanitized_name);
957 g_free(sanitized_name);
c902760f
MT
958
959 fd = mkstemp(filename);
960 if (fd < 0) {
9742bf26 961 perror("unable to create backing store for hugepages");
e4ada482 962 g_free(filename);
9742bf26 963 return NULL;
c902760f
MT
964 }
965 unlink(filename);
e4ada482 966 g_free(filename);
c902760f
MT
967
968 memory = (memory+hpagesize-1) & ~(hpagesize-1);
969
970 /*
971 * ftruncate is not supported by hugetlbfs in older
972 * hosts, so don't bother bailing out on errors.
973 * If anything goes wrong with it under other filesystems,
974 * mmap will fail.
975 */
976 if (ftruncate(fd, memory))
9742bf26 977 perror("ftruncate");
c902760f 978
c902760f 979 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
c902760f 980 if (area == MAP_FAILED) {
9742bf26
YT
981 perror("file_ram_alloc: can't mmap RAM pages");
982 close(fd);
983 return (NULL);
c902760f 984 }
ef36fa14
MT
985
986 if (mem_prealloc) {
987 int ret, i;
988 struct sigaction act, oldact;
989 sigset_t set, oldset;
990
991 memset(&act, 0, sizeof(act));
992 act.sa_handler = &sigbus_handler;
993 act.sa_flags = 0;
994
995 ret = sigaction(SIGBUS, &act, &oldact);
996 if (ret) {
997 perror("file_ram_alloc: failed to install signal handler");
998 exit(1);
999 }
1000
1001 /* unblock SIGBUS */
1002 sigemptyset(&set);
1003 sigaddset(&set, SIGBUS);
1004 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1005
1006 if (sigsetjmp(sigjump, 1)) {
1007 fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1008 exit(1);
1009 }
1010
1011 /* MAP_POPULATE silently ignores failures */
1012 for (i = 0; i < (memory/hpagesize)-1; i++) {
1013 memset(area + (hpagesize*i), 0, 1);
1014 }
1015
1016 ret = sigaction(SIGBUS, &oldact, NULL);
1017 if (ret) {
1018 perror("file_ram_alloc: failed to reinstall signal handler");
1019 exit(1);
1020 }
1021
1022 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1023 }
1024
04b16653 1025 block->fd = fd;
c902760f
MT
1026 return area;
1027}
e1e84ba0
MA
1028#else
1029static void *file_ram_alloc(RAMBlock *block,
1030 ram_addr_t memory,
1031 const char *path)
1032{
1033 fprintf(stderr, "-mem-path not supported on this host\n");
1034 exit(1);
1035}
c902760f
MT
1036#endif
1037
d17b5288 1038static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1039{
1040 RAMBlock *block, *next_block;
3e837b2c 1041 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1042
49cd9ac6
SH
1043 assert(size != 0); /* it would hand out same offset multiple times */
1044
a3161038 1045 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1046 return 0;
1047
a3161038 1048 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1049 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1050
1051 end = block->offset + block->length;
1052
a3161038 1053 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1054 if (next_block->offset >= end) {
1055 next = MIN(next, next_block->offset);
1056 }
1057 }
1058 if (next - end >= size && next - end < mingap) {
3e837b2c 1059 offset = end;
04b16653
AW
1060 mingap = next - end;
1061 }
1062 }
3e837b2c
AW
1063
1064 if (offset == RAM_ADDR_MAX) {
1065 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1066 (uint64_t)size);
1067 abort();
1068 }
1069
04b16653
AW
1070 return offset;
1071}
1072
652d7ec2 1073ram_addr_t last_ram_offset(void)
d17b5288
AW
1074{
1075 RAMBlock *block;
1076 ram_addr_t last = 0;
1077
a3161038 1078 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1079 last = MAX(last, block->offset + block->length);
1080
1081 return last;
1082}
1083
ddb97f1d
JB
1084static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1085{
1086 int ret;
ddb97f1d
JB
1087
1088 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1089 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1090 "dump-guest-core", true)) {
ddb97f1d
JB
1091 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1092 if (ret) {
1093 perror("qemu_madvise");
1094 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1095 "but dump_guest_core=off specified\n");
1096 }
1097 }
1098}
1099
c5705a77 1100void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
1101{
1102 RAMBlock *new_block, *block;
1103
c5705a77 1104 new_block = NULL;
a3161038 1105 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
1106 if (block->offset == addr) {
1107 new_block = block;
1108 break;
1109 }
1110 }
1111 assert(new_block);
1112 assert(!new_block->idstr[0]);
84b89d78 1113
09e5ab63
AL
1114 if (dev) {
1115 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1116 if (id) {
1117 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1118 g_free(id);
84b89d78
CM
1119 }
1120 }
1121 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1122
b2a8658e
UD
1123 /* This assumes the iothread lock is taken here too. */
1124 qemu_mutex_lock_ramlist();
a3161038 1125 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1126 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1127 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1128 new_block->idstr);
1129 abort();
1130 }
1131 }
b2a8658e 1132 qemu_mutex_unlock_ramlist();
c5705a77
AK
1133}
1134
8490fc78
LC
1135static int memory_try_enable_merging(void *addr, size_t len)
1136{
2ff3de68 1137 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1138 /* disabled by the user */
1139 return 0;
1140 }
1141
1142 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1143}
1144
c5705a77
AK
1145ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1146 MemoryRegion *mr)
1147{
abb26d63 1148 RAMBlock *block, *new_block;
c5705a77
AK
1149
1150 size = TARGET_PAGE_ALIGN(size);
1151 new_block = g_malloc0(sizeof(*new_block));
3435f395 1152 new_block->fd = -1;
84b89d78 1153
b2a8658e
UD
1154 /* This assumes the iothread lock is taken here too. */
1155 qemu_mutex_lock_ramlist();
7c637366 1156 new_block->mr = mr;
432d268c 1157 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1158 if (host) {
1159 new_block->host = host;
cd19cfa2 1160 new_block->flags |= RAM_PREALLOC_MASK;
dfeaf2ab
MA
1161 } else if (xen_enabled()) {
1162 if (mem_path) {
1163 fprintf(stderr, "-mem-path not supported with Xen\n");
1164 exit(1);
1165 }
1166 xen_ram_alloc(new_block->offset, size, mr);
6977dfe6
YT
1167 } else {
1168 if (mem_path) {
e1e84ba0
MA
1169 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1170 /*
1171 * file_ram_alloc() needs to allocate just like
1172 * phys_mem_alloc, but we haven't bothered to provide
1173 * a hook there.
1174 */
1175 fprintf(stderr,
1176 "-mem-path not supported with this accelerator\n");
1177 exit(1);
1178 }
6977dfe6 1179 new_block->host = file_ram_alloc(new_block, size, mem_path);
0628c182
MA
1180 }
1181 if (!new_block->host) {
91138037 1182 new_block->host = phys_mem_alloc(size);
39228250
MA
1183 if (!new_block->host) {
1184 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1185 new_block->mr->name, strerror(errno));
1186 exit(1);
1187 }
8490fc78 1188 memory_try_enable_merging(new_block->host, size);
6977dfe6 1189 }
c902760f 1190 }
94a6b54f
PB
1191 new_block->length = size;
1192
abb26d63
PB
1193 /* Keep the list sorted from biggest to smallest block. */
1194 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1195 if (block->length < new_block->length) {
1196 break;
1197 }
1198 }
1199 if (block) {
1200 QTAILQ_INSERT_BEFORE(block, new_block, next);
1201 } else {
1202 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1203 }
0d6d3c87 1204 ram_list.mru_block = NULL;
94a6b54f 1205
f798b07f 1206 ram_list.version++;
b2a8658e 1207 qemu_mutex_unlock_ramlist();
f798b07f 1208
7267c094 1209 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1210 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1211 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1212 0, size >> TARGET_PAGE_BITS);
1720aeee 1213 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1214
ddb97f1d 1215 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1216 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
3e469dbf 1217 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
ddb97f1d 1218
6f0437e8
JK
1219 if (kvm_enabled())
1220 kvm_setup_guest_memory(new_block->host, size);
1221
94a6b54f
PB
1222 return new_block->offset;
1223}
e9a1ab19 1224
c5705a77 1225ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1226{
c5705a77 1227 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1228}
1229
1f2e98b6
AW
1230void qemu_ram_free_from_ptr(ram_addr_t addr)
1231{
1232 RAMBlock *block;
1233
b2a8658e
UD
1234 /* This assumes the iothread lock is taken here too. */
1235 qemu_mutex_lock_ramlist();
a3161038 1236 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1237 if (addr == block->offset) {
a3161038 1238 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1239 ram_list.mru_block = NULL;
f798b07f 1240 ram_list.version++;
7267c094 1241 g_free(block);
b2a8658e 1242 break;
1f2e98b6
AW
1243 }
1244 }
b2a8658e 1245 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1246}
1247
c227f099 1248void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1249{
04b16653
AW
1250 RAMBlock *block;
1251
b2a8658e
UD
1252 /* This assumes the iothread lock is taken here too. */
1253 qemu_mutex_lock_ramlist();
a3161038 1254 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1255 if (addr == block->offset) {
a3161038 1256 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1257 ram_list.mru_block = NULL;
f798b07f 1258 ram_list.version++;
cd19cfa2
HY
1259 if (block->flags & RAM_PREALLOC_MASK) {
1260 ;
dfeaf2ab
MA
1261 } else if (xen_enabled()) {
1262 xen_invalidate_map_cache_entry(block->host);
089f3f76 1263#ifndef _WIN32
3435f395
MA
1264 } else if (block->fd >= 0) {
1265 munmap(block->host, block->length);
1266 close(block->fd);
089f3f76 1267#endif
04b16653 1268 } else {
dfeaf2ab 1269 qemu_anon_ram_free(block->host, block->length);
04b16653 1270 }
7267c094 1271 g_free(block);
b2a8658e 1272 break;
04b16653
AW
1273 }
1274 }
b2a8658e 1275 qemu_mutex_unlock_ramlist();
04b16653 1276
e9a1ab19
FB
1277}
1278
cd19cfa2
HY
1279#ifndef _WIN32
1280void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1281{
1282 RAMBlock *block;
1283 ram_addr_t offset;
1284 int flags;
1285 void *area, *vaddr;
1286
a3161038 1287 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1288 offset = addr - block->offset;
1289 if (offset < block->length) {
1290 vaddr = block->host + offset;
1291 if (block->flags & RAM_PREALLOC_MASK) {
1292 ;
dfeaf2ab
MA
1293 } else if (xen_enabled()) {
1294 abort();
cd19cfa2
HY
1295 } else {
1296 flags = MAP_FIXED;
1297 munmap(vaddr, length);
3435f395 1298 if (block->fd >= 0) {
cd19cfa2 1299#ifdef MAP_POPULATE
3435f395
MA
1300 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1301 MAP_PRIVATE;
fd28aa13 1302#else
3435f395 1303 flags |= MAP_PRIVATE;
cd19cfa2 1304#endif
3435f395
MA
1305 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1306 flags, block->fd, offset);
cd19cfa2 1307 } else {
2eb9fbaa
MA
1308 /*
1309 * Remap needs to match alloc. Accelerators that
1310 * set phys_mem_alloc never remap. If they did,
1311 * we'd need a remap hook here.
1312 */
1313 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1314
cd19cfa2
HY
1315 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1316 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1317 flags, -1, 0);
cd19cfa2
HY
1318 }
1319 if (area != vaddr) {
f15fbc4b
AP
1320 fprintf(stderr, "Could not remap addr: "
1321 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1322 length, addr);
1323 exit(1);
1324 }
8490fc78 1325 memory_try_enable_merging(vaddr, length);
ddb97f1d 1326 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1327 }
1328 return;
1329 }
1330 }
1331}
1332#endif /* !_WIN32 */
1333
1b5ec234
PB
1334/* Return a host pointer to ram allocated with qemu_ram_alloc.
1335 With the exception of the softmmu code in this file, this should
1336 only be used for local memory (e.g. video ram) that the device owns,
1337 and knows it isn't going to access beyond the end of the block.
1338
1339 It should not be used for general purpose DMA.
1340 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1341 */
1342void *qemu_get_ram_ptr(ram_addr_t addr)
1343{
1344 RAMBlock *block = qemu_get_ram_block(addr);
1345
0d6d3c87
PB
1346 if (xen_enabled()) {
1347 /* We need to check if the requested address is in the RAM
1348 * because we don't want to map the entire memory in QEMU.
1349 * In that case just map until the end of the page.
1350 */
1351 if (block->offset == 0) {
1352 return xen_map_cache(addr, 0, 0);
1353 } else if (block->host == NULL) {
1354 block->host =
1355 xen_map_cache(block->offset, block->length, 1);
1356 }
1357 }
1358 return block->host + (addr - block->offset);
dc828ca1
PB
1359}
1360
38bee5dc
SS
1361/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1362 * but takes a size argument */
cb85f7ab 1363static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1364{
8ab934f9
SS
1365 if (*size == 0) {
1366 return NULL;
1367 }
868bb33f 1368 if (xen_enabled()) {
e41d7c69 1369 return xen_map_cache(addr, *size, 1);
868bb33f 1370 } else {
38bee5dc
SS
1371 RAMBlock *block;
1372
a3161038 1373 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1374 if (addr - block->offset < block->length) {
1375 if (addr - block->offset + *size > block->length)
1376 *size = block->length - addr + block->offset;
1377 return block->host + (addr - block->offset);
1378 }
1379 }
1380
1381 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1382 abort();
38bee5dc
SS
1383 }
1384}
1385
7443b437
PB
1386/* Some of the softmmu routines need to translate from a host pointer
1387 (typically a TLB entry) back to a ram offset. */
1b5ec234 1388MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1389{
94a6b54f
PB
1390 RAMBlock *block;
1391 uint8_t *host = ptr;
1392
868bb33f 1393 if (xen_enabled()) {
e41d7c69 1394 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1395 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1396 }
1397
23887b79
PB
1398 block = ram_list.mru_block;
1399 if (block && block->host && host - block->host < block->length) {
1400 goto found;
1401 }
1402
a3161038 1403 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1404 /* This case append when the block is not mapped. */
1405 if (block->host == NULL) {
1406 continue;
1407 }
f471a17e 1408 if (host - block->host < block->length) {
23887b79 1409 goto found;
f471a17e 1410 }
94a6b54f 1411 }
432d268c 1412
1b5ec234 1413 return NULL;
23887b79
PB
1414
1415found:
1416 *ram_addr = block->offset + (host - block->host);
1b5ec234 1417 return block->mr;
e890261f 1418}
f471a17e 1419
a8170e5e 1420static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1421 uint64_t val, unsigned size)
9fa3e853 1422{
3a7d929e 1423 int dirty_flags;
f7c11b53 1424 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1425 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
0e0df1e2 1426 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1427 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1428 }
0e0df1e2
AK
1429 switch (size) {
1430 case 1:
1431 stb_p(qemu_get_ram_ptr(ram_addr), val);
1432 break;
1433 case 2:
1434 stw_p(qemu_get_ram_ptr(ram_addr), val);
1435 break;
1436 case 4:
1437 stl_p(qemu_get_ram_ptr(ram_addr), val);
1438 break;
1439 default:
1440 abort();
3a7d929e 1441 }
f23db169 1442 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1443 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1444 /* we remove the notdirty callback only if the code has been
1445 flushed */
4917cf44
AF
1446 if (dirty_flags == 0xff) {
1447 CPUArchState *env = current_cpu->env_ptr;
1448 tlb_set_dirty(env, env->mem_io_vaddr);
1449 }
9fa3e853
FB
1450}
1451
b018ddf6
PB
1452static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1453 unsigned size, bool is_write)
1454{
1455 return is_write;
1456}
1457
0e0df1e2 1458static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1459 .write = notdirty_mem_write,
b018ddf6 1460 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1461 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1462};
1463
0f459d16 1464/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1465static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1466{
4917cf44 1467 CPUArchState *env = current_cpu->env_ptr;
06d55cc1 1468 target_ulong pc, cs_base;
0f459d16 1469 target_ulong vaddr;
a1d1bb31 1470 CPUWatchpoint *wp;
06d55cc1 1471 int cpu_flags;
0f459d16 1472
06d55cc1
AL
1473 if (env->watchpoint_hit) {
1474 /* We re-entered the check after replacing the TB. Now raise
1475 * the debug interrupt so that is will trigger after the
1476 * current instruction. */
c3affe56 1477 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1478 return;
1479 }
2e70f6ef 1480 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1481 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1482 if ((vaddr == (wp->vaddr & len_mask) ||
1483 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1484 wp->flags |= BP_WATCHPOINT_HIT;
1485 if (!env->watchpoint_hit) {
1486 env->watchpoint_hit = wp;
5a316526 1487 tb_check_watchpoint(env);
6e140f28
AL
1488 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1489 env->exception_index = EXCP_DEBUG;
488d6577 1490 cpu_loop_exit(env);
6e140f28
AL
1491 } else {
1492 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1493 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1494 cpu_resume_from_signal(env, NULL);
6e140f28 1495 }
06d55cc1 1496 }
6e140f28
AL
1497 } else {
1498 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1499 }
1500 }
1501}
1502
6658ffb8
PB
1503/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1504 so these check for a hit then pass through to the normal out-of-line
1505 phys routines. */
a8170e5e 1506static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1507 unsigned size)
6658ffb8 1508{
1ec9b909
AK
1509 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1510 switch (size) {
1511 case 1: return ldub_phys(addr);
1512 case 2: return lduw_phys(addr);
1513 case 4: return ldl_phys(addr);
1514 default: abort();
1515 }
6658ffb8
PB
1516}
1517
a8170e5e 1518static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1519 uint64_t val, unsigned size)
6658ffb8 1520{
1ec9b909
AK
1521 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1522 switch (size) {
67364150
MF
1523 case 1:
1524 stb_phys(addr, val);
1525 break;
1526 case 2:
1527 stw_phys(addr, val);
1528 break;
1529 case 4:
1530 stl_phys(addr, val);
1531 break;
1ec9b909
AK
1532 default: abort();
1533 }
6658ffb8
PB
1534}
1535
1ec9b909
AK
1536static const MemoryRegionOps watch_mem_ops = {
1537 .read = watch_mem_read,
1538 .write = watch_mem_write,
1539 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1540};
6658ffb8 1541
a8170e5e 1542static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1543 unsigned len)
db7b5426 1544{
acc9d80b
JK
1545 subpage_t *subpage = opaque;
1546 uint8_t buf[4];
791af8c8 1547
db7b5426 1548#if defined(DEBUG_SUBPAGE)
016e9d62 1549 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1550 subpage, len, addr);
db7b5426 1551#endif
acc9d80b
JK
1552 address_space_read(subpage->as, addr + subpage->base, buf, len);
1553 switch (len) {
1554 case 1:
1555 return ldub_p(buf);
1556 case 2:
1557 return lduw_p(buf);
1558 case 4:
1559 return ldl_p(buf);
1560 default:
1561 abort();
1562 }
db7b5426
BS
1563}
1564
a8170e5e 1565static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1566 uint64_t value, unsigned len)
db7b5426 1567{
acc9d80b
JK
1568 subpage_t *subpage = opaque;
1569 uint8_t buf[4];
1570
db7b5426 1571#if defined(DEBUG_SUBPAGE)
016e9d62 1572 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1573 " value %"PRIx64"\n",
1574 __func__, subpage, len, addr, value);
db7b5426 1575#endif
acc9d80b
JK
1576 switch (len) {
1577 case 1:
1578 stb_p(buf, value);
1579 break;
1580 case 2:
1581 stw_p(buf, value);
1582 break;
1583 case 4:
1584 stl_p(buf, value);
1585 break;
1586 default:
1587 abort();
1588 }
1589 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1590}
1591
c353e4cc 1592static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 1593 unsigned len, bool is_write)
c353e4cc 1594{
acc9d80b 1595 subpage_t *subpage = opaque;
c353e4cc 1596#if defined(DEBUG_SUBPAGE)
016e9d62 1597 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 1598 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1599#endif
1600
acc9d80b 1601 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 1602 len, is_write);
c353e4cc
PB
1603}
1604
70c68e44
AK
1605static const MemoryRegionOps subpage_ops = {
1606 .read = subpage_read,
1607 .write = subpage_write,
c353e4cc 1608 .valid.accepts = subpage_accepts,
70c68e44 1609 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1610};
1611
c227f099 1612static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1613 uint16_t section)
db7b5426
BS
1614{
1615 int idx, eidx;
1616
1617 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1618 return -1;
1619 idx = SUBPAGE_IDX(start);
1620 eidx = SUBPAGE_IDX(end);
1621#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1622 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1623 __func__, mmio, start, end, idx, eidx, section);
db7b5426 1624#endif
db7b5426 1625 for (; idx <= eidx; idx++) {
5312bd8b 1626 mmio->sub_section[idx] = section;
db7b5426
BS
1627 }
1628
1629 return 0;
1630}
1631
acc9d80b 1632static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1633{
c227f099 1634 subpage_t *mmio;
db7b5426 1635
7267c094 1636 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1637
acc9d80b 1638 mmio->as = as;
1eec614b 1639 mmio->base = base;
2c9b15ca 1640 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
70c68e44 1641 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1642 mmio->iomem.subpage = true;
db7b5426 1643#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1644 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1645 mmio, base, TARGET_PAGE_SIZE);
db7b5426 1646#endif
b41aac4f 1647 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1648
1649 return mmio;
1650}
1651
5312bd8b
AK
1652static uint16_t dummy_section(MemoryRegion *mr)
1653{
1654 MemoryRegionSection section = {
1655 .mr = mr,
1656 .offset_within_address_space = 0,
1657 .offset_within_region = 0,
052e87b0 1658 .size = int128_2_64(),
5312bd8b
AK
1659 };
1660
1661 return phys_section_add(&section);
1662}
1663
a8170e5e 1664MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1665{
0475d94f 1666 return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1667}
1668
e9179ce1
AK
1669static void io_mem_init(void)
1670{
2c9b15ca
PB
1671 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1672 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
0e0df1e2 1673 "unassigned", UINT64_MAX);
2c9b15ca 1674 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
0e0df1e2 1675 "notdirty", UINT64_MAX);
2c9b15ca 1676 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1ec9b909 1677 "watch", UINT64_MAX);
e9179ce1
AK
1678}
1679
ac1970fb 1680static void mem_begin(MemoryListener *listener)
00752703
PB
1681{
1682 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1683 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1684
9736e55b 1685 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
1686 d->as = as;
1687 as->next_dispatch = d;
1688}
1689
1690static void mem_commit(MemoryListener *listener)
ac1970fb 1691{
89ae337a 1692 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
1693 AddressSpaceDispatch *cur = as->dispatch;
1694 AddressSpaceDispatch *next = as->next_dispatch;
1695
1696 next->nodes = next_map.nodes;
1697 next->sections = next_map.sections;
ac1970fb 1698
0475d94f
PB
1699 as->dispatch = next;
1700 g_free(cur);
ac1970fb
AK
1701}
1702
50c1e149
AK
1703static void core_begin(MemoryListener *listener)
1704{
b41aac4f
LPF
1705 uint16_t n;
1706
6092666e
PB
1707 prev_map = g_new(PhysPageMap, 1);
1708 *prev_map = next_map;
1709
9affd6fc 1710 memset(&next_map, 0, sizeof(next_map));
b41aac4f
LPF
1711 n = dummy_section(&io_mem_unassigned);
1712 assert(n == PHYS_SECTION_UNASSIGNED);
1713 n = dummy_section(&io_mem_notdirty);
1714 assert(n == PHYS_SECTION_NOTDIRTY);
1715 n = dummy_section(&io_mem_rom);
1716 assert(n == PHYS_SECTION_ROM);
1717 n = dummy_section(&io_mem_watch);
1718 assert(n == PHYS_SECTION_WATCH);
50c1e149
AK
1719}
1720
9affd6fc
PB
1721/* This listener's commit run after the other AddressSpaceDispatch listeners'.
1722 * All AddressSpaceDispatch instances have switched to the next map.
1723 */
1724static void core_commit(MemoryListener *listener)
1725{
6092666e 1726 phys_sections_free(prev_map);
9affd6fc
PB
1727}
1728
1d71148e 1729static void tcg_commit(MemoryListener *listener)
50c1e149 1730{
182735ef 1731 CPUState *cpu;
117712c3
AK
1732
1733 /* since each CPU stores ram addresses in its TLB cache, we must
1734 reset the modified entries */
1735 /* XXX: slow ! */
bdc44640 1736 CPU_FOREACH(cpu) {
182735ef
AF
1737 CPUArchState *env = cpu->env_ptr;
1738
117712c3
AK
1739 tlb_flush(env, 1);
1740 }
50c1e149
AK
1741}
1742
93632747
AK
1743static void core_log_global_start(MemoryListener *listener)
1744{
1745 cpu_physical_memory_set_dirty_tracking(1);
1746}
1747
1748static void core_log_global_stop(MemoryListener *listener)
1749{
1750 cpu_physical_memory_set_dirty_tracking(0);
1751}
1752
93632747 1753static MemoryListener core_memory_listener = {
50c1e149 1754 .begin = core_begin,
9affd6fc 1755 .commit = core_commit,
93632747
AK
1756 .log_global_start = core_log_global_start,
1757 .log_global_stop = core_log_global_stop,
ac1970fb 1758 .priority = 1,
93632747
AK
1759};
1760
1d71148e
AK
1761static MemoryListener tcg_memory_listener = {
1762 .commit = tcg_commit,
1763};
1764
ac1970fb
AK
1765void address_space_init_dispatch(AddressSpace *as)
1766{
00752703 1767 as->dispatch = NULL;
89ae337a 1768 as->dispatch_listener = (MemoryListener) {
ac1970fb 1769 .begin = mem_begin,
00752703 1770 .commit = mem_commit,
ac1970fb
AK
1771 .region_add = mem_add,
1772 .region_nop = mem_add,
1773 .priority = 0,
1774 };
89ae337a 1775 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
1776}
1777
83f3c251
AK
1778void address_space_destroy_dispatch(AddressSpace *as)
1779{
1780 AddressSpaceDispatch *d = as->dispatch;
1781
89ae337a 1782 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
1783 g_free(d);
1784 as->dispatch = NULL;
1785}
1786
62152b8a
AK
1787static void memory_map_init(void)
1788{
7267c094 1789 system_memory = g_malloc(sizeof(*system_memory));
03f49957
PB
1790
1791 assert(ADDR_SPACE_BITS <= 64);
1792
1793 memory_region_init(system_memory, NULL, "system",
1794 ADDR_SPACE_BITS == 64 ?
1795 UINT64_MAX : (0x1ULL << ADDR_SPACE_BITS));
7dca8043 1796 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1797
7267c094 1798 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
1799 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1800 65536);
7dca8043 1801 address_space_init(&address_space_io, system_io, "I/O");
93632747 1802
f6790af6 1803 memory_listener_register(&core_memory_listener, &address_space_memory);
2641689a
LG
1804 if (tcg_enabled()) {
1805 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1806 }
62152b8a
AK
1807}
1808
1809MemoryRegion *get_system_memory(void)
1810{
1811 return system_memory;
1812}
1813
309cb471
AK
1814MemoryRegion *get_system_io(void)
1815{
1816 return system_io;
1817}
1818
e2eef170
PB
1819#endif /* !defined(CONFIG_USER_ONLY) */
1820
13eb76e0
FB
1821/* physical memory access (slow version, mainly for debug) */
1822#if defined(CONFIG_USER_ONLY)
f17ec444 1823int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 1824 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1825{
1826 int l, flags;
1827 target_ulong page;
53a5960a 1828 void * p;
13eb76e0
FB
1829
1830 while (len > 0) {
1831 page = addr & TARGET_PAGE_MASK;
1832 l = (page + TARGET_PAGE_SIZE) - addr;
1833 if (l > len)
1834 l = len;
1835 flags = page_get_flags(page);
1836 if (!(flags & PAGE_VALID))
a68fe89c 1837 return -1;
13eb76e0
FB
1838 if (is_write) {
1839 if (!(flags & PAGE_WRITE))
a68fe89c 1840 return -1;
579a97f7 1841 /* XXX: this code should not depend on lock_user */
72fb7daa 1842 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1843 return -1;
72fb7daa
AJ
1844 memcpy(p, buf, l);
1845 unlock_user(p, addr, l);
13eb76e0
FB
1846 } else {
1847 if (!(flags & PAGE_READ))
a68fe89c 1848 return -1;
579a97f7 1849 /* XXX: this code should not depend on lock_user */
72fb7daa 1850 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1851 return -1;
72fb7daa 1852 memcpy(buf, p, l);
5b257578 1853 unlock_user(p, addr, 0);
13eb76e0
FB
1854 }
1855 len -= l;
1856 buf += l;
1857 addr += l;
1858 }
a68fe89c 1859 return 0;
13eb76e0 1860}
8df1cd07 1861
13eb76e0 1862#else
51d7a9eb 1863
a8170e5e
AK
1864static void invalidate_and_set_dirty(hwaddr addr,
1865 hwaddr length)
51d7a9eb
AP
1866{
1867 if (!cpu_physical_memory_is_dirty(addr)) {
1868 /* invalidate code */
1869 tb_invalidate_phys_page_range(addr, addr + length, 0);
1870 /* set dirty bit */
1871 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1872 }
e226939d 1873 xen_modified_memory(addr, length);
51d7a9eb
AP
1874}
1875
2bbfa05d
PB
1876static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1877{
1878 if (memory_region_is_ram(mr)) {
1879 return !(is_write && mr->readonly);
1880 }
1881 if (memory_region_is_romd(mr)) {
1882 return !is_write;
1883 }
1884
1885 return false;
1886}
1887
23326164 1888static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 1889{
e1622f4b 1890 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
1891
1892 /* Regions are assumed to support 1-4 byte accesses unless
1893 otherwise specified. */
23326164
RH
1894 if (access_size_max == 0) {
1895 access_size_max = 4;
1896 }
1897
1898 /* Bound the maximum access by the alignment of the address. */
1899 if (!mr->ops->impl.unaligned) {
1900 unsigned align_size_max = addr & -addr;
1901 if (align_size_max != 0 && align_size_max < access_size_max) {
1902 access_size_max = align_size_max;
1903 }
82f2563f 1904 }
23326164
RH
1905
1906 /* Don't attempt accesses larger than the maximum. */
1907 if (l > access_size_max) {
1908 l = access_size_max;
82f2563f 1909 }
098178f2
PB
1910 if (l & (l - 1)) {
1911 l = 1 << (qemu_fls(l) - 1);
1912 }
23326164
RH
1913
1914 return l;
82f2563f
PB
1915}
1916
fd8aaa76 1917bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1918 int len, bool is_write)
13eb76e0 1919{
149f54b5 1920 hwaddr l;
13eb76e0 1921 uint8_t *ptr;
791af8c8 1922 uint64_t val;
149f54b5 1923 hwaddr addr1;
5c8a00ce 1924 MemoryRegion *mr;
fd8aaa76 1925 bool error = false;
3b46e624 1926
13eb76e0 1927 while (len > 0) {
149f54b5 1928 l = len;
5c8a00ce 1929 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 1930
13eb76e0 1931 if (is_write) {
5c8a00ce
PB
1932 if (!memory_access_is_direct(mr, is_write)) {
1933 l = memory_access_size(mr, l, addr1);
4917cf44 1934 /* XXX: could force current_cpu to NULL to avoid
6a00d601 1935 potential bugs */
23326164
RH
1936 switch (l) {
1937 case 8:
1938 /* 64 bit write access */
1939 val = ldq_p(buf);
1940 error |= io_mem_write(mr, addr1, val, 8);
1941 break;
1942 case 4:
1c213d19 1943 /* 32 bit write access */
c27004ec 1944 val = ldl_p(buf);
5c8a00ce 1945 error |= io_mem_write(mr, addr1, val, 4);
23326164
RH
1946 break;
1947 case 2:
1c213d19 1948 /* 16 bit write access */
c27004ec 1949 val = lduw_p(buf);
5c8a00ce 1950 error |= io_mem_write(mr, addr1, val, 2);
23326164
RH
1951 break;
1952 case 1:
1c213d19 1953 /* 8 bit write access */
c27004ec 1954 val = ldub_p(buf);
5c8a00ce 1955 error |= io_mem_write(mr, addr1, val, 1);
23326164
RH
1956 break;
1957 default:
1958 abort();
13eb76e0 1959 }
2bbfa05d 1960 } else {
5c8a00ce 1961 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 1962 /* RAM case */
5579c7f3 1963 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1964 memcpy(ptr, buf, l);
51d7a9eb 1965 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
1966 }
1967 } else {
5c8a00ce 1968 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 1969 /* I/O case */
5c8a00ce 1970 l = memory_access_size(mr, l, addr1);
23326164
RH
1971 switch (l) {
1972 case 8:
1973 /* 64 bit read access */
1974 error |= io_mem_read(mr, addr1, &val, 8);
1975 stq_p(buf, val);
1976 break;
1977 case 4:
13eb76e0 1978 /* 32 bit read access */
5c8a00ce 1979 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 1980 stl_p(buf, val);
23326164
RH
1981 break;
1982 case 2:
13eb76e0 1983 /* 16 bit read access */
5c8a00ce 1984 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 1985 stw_p(buf, val);
23326164
RH
1986 break;
1987 case 1:
1c213d19 1988 /* 8 bit read access */
5c8a00ce 1989 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 1990 stb_p(buf, val);
23326164
RH
1991 break;
1992 default:
1993 abort();
13eb76e0
FB
1994 }
1995 } else {
1996 /* RAM case */
5c8a00ce 1997 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 1998 memcpy(buf, ptr, l);
13eb76e0
FB
1999 }
2000 }
2001 len -= l;
2002 buf += l;
2003 addr += l;
2004 }
fd8aaa76
PB
2005
2006 return error;
13eb76e0 2007}
8df1cd07 2008
fd8aaa76 2009bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
2010 const uint8_t *buf, int len)
2011{
fd8aaa76 2012 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
2013}
2014
fd8aaa76 2015bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 2016{
fd8aaa76 2017 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
2018}
2019
2020
a8170e5e 2021void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2022 int len, int is_write)
2023{
fd8aaa76 2024 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2025}
2026
d0ecd2aa 2027/* used for ROM loading : can write in RAM and ROM */
a8170e5e 2028void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
2029 const uint8_t *buf, int len)
2030{
149f54b5 2031 hwaddr l;
d0ecd2aa 2032 uint8_t *ptr;
149f54b5 2033 hwaddr addr1;
5c8a00ce 2034 MemoryRegion *mr;
3b46e624 2035
d0ecd2aa 2036 while (len > 0) {
149f54b5 2037 l = len;
5c8a00ce
PB
2038 mr = address_space_translate(&address_space_memory,
2039 addr, &addr1, &l, true);
3b46e624 2040
5c8a00ce
PB
2041 if (!(memory_region_is_ram(mr) ||
2042 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2043 /* do nothing */
2044 } else {
5c8a00ce 2045 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2046 /* ROM/RAM case */
5579c7f3 2047 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2048 memcpy(ptr, buf, l);
51d7a9eb 2049 invalidate_and_set_dirty(addr1, l);
d0ecd2aa
FB
2050 }
2051 len -= l;
2052 buf += l;
2053 addr += l;
2054 }
2055}
2056
6d16c2f8 2057typedef struct {
d3e71559 2058 MemoryRegion *mr;
6d16c2f8 2059 void *buffer;
a8170e5e
AK
2060 hwaddr addr;
2061 hwaddr len;
6d16c2f8
AL
2062} BounceBuffer;
2063
2064static BounceBuffer bounce;
2065
ba223c29
AL
2066typedef struct MapClient {
2067 void *opaque;
2068 void (*callback)(void *opaque);
72cf2d4f 2069 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2070} MapClient;
2071
72cf2d4f
BS
2072static QLIST_HEAD(map_client_list, MapClient) map_client_list
2073 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2074
2075void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2076{
7267c094 2077 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2078
2079 client->opaque = opaque;
2080 client->callback = callback;
72cf2d4f 2081 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2082 return client;
2083}
2084
8b9c99d9 2085static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2086{
2087 MapClient *client = (MapClient *)_client;
2088
72cf2d4f 2089 QLIST_REMOVE(client, link);
7267c094 2090 g_free(client);
ba223c29
AL
2091}
2092
2093static void cpu_notify_map_clients(void)
2094{
2095 MapClient *client;
2096
72cf2d4f
BS
2097 while (!QLIST_EMPTY(&map_client_list)) {
2098 client = QLIST_FIRST(&map_client_list);
ba223c29 2099 client->callback(client->opaque);
34d5e948 2100 cpu_unregister_map_client(client);
ba223c29
AL
2101 }
2102}
2103
51644ab7
PB
2104bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2105{
5c8a00ce 2106 MemoryRegion *mr;
51644ab7
PB
2107 hwaddr l, xlat;
2108
2109 while (len > 0) {
2110 l = len;
5c8a00ce
PB
2111 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2112 if (!memory_access_is_direct(mr, is_write)) {
2113 l = memory_access_size(mr, l, addr);
2114 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2115 return false;
2116 }
2117 }
2118
2119 len -= l;
2120 addr += l;
2121 }
2122 return true;
2123}
2124
6d16c2f8
AL
2125/* Map a physical memory region into a host virtual address.
2126 * May map a subset of the requested range, given by and returned in *plen.
2127 * May return NULL if resources needed to perform the mapping are exhausted.
2128 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2129 * Use cpu_register_map_client() to know when retrying the map operation is
2130 * likely to succeed.
6d16c2f8 2131 */
ac1970fb 2132void *address_space_map(AddressSpace *as,
a8170e5e
AK
2133 hwaddr addr,
2134 hwaddr *plen,
ac1970fb 2135 bool is_write)
6d16c2f8 2136{
a8170e5e 2137 hwaddr len = *plen;
e3127ae0
PB
2138 hwaddr done = 0;
2139 hwaddr l, xlat, base;
2140 MemoryRegion *mr, *this_mr;
2141 ram_addr_t raddr;
6d16c2f8 2142
e3127ae0
PB
2143 if (len == 0) {
2144 return NULL;
2145 }
38bee5dc 2146
e3127ae0
PB
2147 l = len;
2148 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2149 if (!memory_access_is_direct(mr, is_write)) {
2150 if (bounce.buffer) {
2151 return NULL;
6d16c2f8 2152 }
e85d9db5
KW
2153 /* Avoid unbounded allocations */
2154 l = MIN(l, TARGET_PAGE_SIZE);
2155 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2156 bounce.addr = addr;
2157 bounce.len = l;
d3e71559
PB
2158
2159 memory_region_ref(mr);
2160 bounce.mr = mr;
e3127ae0
PB
2161 if (!is_write) {
2162 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2163 }
6d16c2f8 2164
e3127ae0
PB
2165 *plen = l;
2166 return bounce.buffer;
2167 }
2168
2169 base = xlat;
2170 raddr = memory_region_get_ram_addr(mr);
2171
2172 for (;;) {
6d16c2f8
AL
2173 len -= l;
2174 addr += l;
e3127ae0
PB
2175 done += l;
2176 if (len == 0) {
2177 break;
2178 }
2179
2180 l = len;
2181 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2182 if (this_mr != mr || xlat != base + done) {
2183 break;
2184 }
6d16c2f8 2185 }
e3127ae0 2186
d3e71559 2187 memory_region_ref(mr);
e3127ae0
PB
2188 *plen = done;
2189 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2190}
2191
ac1970fb 2192/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2193 * Will also mark the memory as dirty if is_write == 1. access_len gives
2194 * the amount of memory that was actually read or written by the caller.
2195 */
a8170e5e
AK
2196void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2197 int is_write, hwaddr access_len)
6d16c2f8
AL
2198{
2199 if (buffer != bounce.buffer) {
d3e71559
PB
2200 MemoryRegion *mr;
2201 ram_addr_t addr1;
2202
2203 mr = qemu_ram_addr_from_host(buffer, &addr1);
2204 assert(mr != NULL);
6d16c2f8 2205 if (is_write) {
6d16c2f8
AL
2206 while (access_len) {
2207 unsigned l;
2208 l = TARGET_PAGE_SIZE;
2209 if (l > access_len)
2210 l = access_len;
51d7a9eb 2211 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2212 addr1 += l;
2213 access_len -= l;
2214 }
2215 }
868bb33f 2216 if (xen_enabled()) {
e41d7c69 2217 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2218 }
d3e71559 2219 memory_region_unref(mr);
6d16c2f8
AL
2220 return;
2221 }
2222 if (is_write) {
ac1970fb 2223 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2224 }
f8a83245 2225 qemu_vfree(bounce.buffer);
6d16c2f8 2226 bounce.buffer = NULL;
d3e71559 2227 memory_region_unref(bounce.mr);
ba223c29 2228 cpu_notify_map_clients();
6d16c2f8 2229}
d0ecd2aa 2230
a8170e5e
AK
2231void *cpu_physical_memory_map(hwaddr addr,
2232 hwaddr *plen,
ac1970fb
AK
2233 int is_write)
2234{
2235 return address_space_map(&address_space_memory, addr, plen, is_write);
2236}
2237
a8170e5e
AK
2238void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2239 int is_write, hwaddr access_len)
ac1970fb
AK
2240{
2241 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2242}
2243
8df1cd07 2244/* warning: addr must be aligned */
a8170e5e 2245static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2246 enum device_endian endian)
8df1cd07 2247{
8df1cd07 2248 uint8_t *ptr;
791af8c8 2249 uint64_t val;
5c8a00ce 2250 MemoryRegion *mr;
149f54b5
PB
2251 hwaddr l = 4;
2252 hwaddr addr1;
8df1cd07 2253
5c8a00ce
PB
2254 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2255 false);
2256 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2257 /* I/O case */
5c8a00ce 2258 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2259#if defined(TARGET_WORDS_BIGENDIAN)
2260 if (endian == DEVICE_LITTLE_ENDIAN) {
2261 val = bswap32(val);
2262 }
2263#else
2264 if (endian == DEVICE_BIG_ENDIAN) {
2265 val = bswap32(val);
2266 }
2267#endif
8df1cd07
FB
2268 } else {
2269 /* RAM case */
5c8a00ce 2270 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2271 & TARGET_PAGE_MASK)
149f54b5 2272 + addr1);
1e78bcc1
AG
2273 switch (endian) {
2274 case DEVICE_LITTLE_ENDIAN:
2275 val = ldl_le_p(ptr);
2276 break;
2277 case DEVICE_BIG_ENDIAN:
2278 val = ldl_be_p(ptr);
2279 break;
2280 default:
2281 val = ldl_p(ptr);
2282 break;
2283 }
8df1cd07
FB
2284 }
2285 return val;
2286}
2287
a8170e5e 2288uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2289{
2290 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2291}
2292
a8170e5e 2293uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2294{
2295 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2296}
2297
a8170e5e 2298uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2299{
2300 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2301}
2302
84b7b8e7 2303/* warning: addr must be aligned */
a8170e5e 2304static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2305 enum device_endian endian)
84b7b8e7 2306{
84b7b8e7
FB
2307 uint8_t *ptr;
2308 uint64_t val;
5c8a00ce 2309 MemoryRegion *mr;
149f54b5
PB
2310 hwaddr l = 8;
2311 hwaddr addr1;
84b7b8e7 2312
5c8a00ce
PB
2313 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2314 false);
2315 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2316 /* I/O case */
5c8a00ce 2317 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2318#if defined(TARGET_WORDS_BIGENDIAN)
2319 if (endian == DEVICE_LITTLE_ENDIAN) {
2320 val = bswap64(val);
2321 }
2322#else
2323 if (endian == DEVICE_BIG_ENDIAN) {
2324 val = bswap64(val);
2325 }
84b7b8e7
FB
2326#endif
2327 } else {
2328 /* RAM case */
5c8a00ce 2329 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2330 & TARGET_PAGE_MASK)
149f54b5 2331 + addr1);
1e78bcc1
AG
2332 switch (endian) {
2333 case DEVICE_LITTLE_ENDIAN:
2334 val = ldq_le_p(ptr);
2335 break;
2336 case DEVICE_BIG_ENDIAN:
2337 val = ldq_be_p(ptr);
2338 break;
2339 default:
2340 val = ldq_p(ptr);
2341 break;
2342 }
84b7b8e7
FB
2343 }
2344 return val;
2345}
2346
a8170e5e 2347uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2348{
2349 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2350}
2351
a8170e5e 2352uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2353{
2354 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2355}
2356
a8170e5e 2357uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2358{
2359 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2360}
2361
aab33094 2362/* XXX: optimize */
a8170e5e 2363uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2364{
2365 uint8_t val;
2366 cpu_physical_memory_read(addr, &val, 1);
2367 return val;
2368}
2369
733f0b02 2370/* warning: addr must be aligned */
a8170e5e 2371static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2372 enum device_endian endian)
aab33094 2373{
733f0b02
MT
2374 uint8_t *ptr;
2375 uint64_t val;
5c8a00ce 2376 MemoryRegion *mr;
149f54b5
PB
2377 hwaddr l = 2;
2378 hwaddr addr1;
733f0b02 2379
5c8a00ce
PB
2380 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2381 false);
2382 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2383 /* I/O case */
5c8a00ce 2384 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2385#if defined(TARGET_WORDS_BIGENDIAN)
2386 if (endian == DEVICE_LITTLE_ENDIAN) {
2387 val = bswap16(val);
2388 }
2389#else
2390 if (endian == DEVICE_BIG_ENDIAN) {
2391 val = bswap16(val);
2392 }
2393#endif
733f0b02
MT
2394 } else {
2395 /* RAM case */
5c8a00ce 2396 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2397 & TARGET_PAGE_MASK)
149f54b5 2398 + addr1);
1e78bcc1
AG
2399 switch (endian) {
2400 case DEVICE_LITTLE_ENDIAN:
2401 val = lduw_le_p(ptr);
2402 break;
2403 case DEVICE_BIG_ENDIAN:
2404 val = lduw_be_p(ptr);
2405 break;
2406 default:
2407 val = lduw_p(ptr);
2408 break;
2409 }
733f0b02
MT
2410 }
2411 return val;
aab33094
FB
2412}
2413
a8170e5e 2414uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2415{
2416 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2417}
2418
a8170e5e 2419uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2420{
2421 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2422}
2423
a8170e5e 2424uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2425{
2426 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2427}
2428
8df1cd07
FB
2429/* warning: addr must be aligned. The ram page is not masked as dirty
2430 and the code inside is not invalidated. It is useful if the dirty
2431 bits are used to track modified PTEs */
a8170e5e 2432void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2433{
8df1cd07 2434 uint8_t *ptr;
5c8a00ce 2435 MemoryRegion *mr;
149f54b5
PB
2436 hwaddr l = 4;
2437 hwaddr addr1;
8df1cd07 2438
5c8a00ce
PB
2439 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2440 true);
2441 if (l < 4 || !memory_access_is_direct(mr, true)) {
2442 io_mem_write(mr, addr1, val, 4);
8df1cd07 2443 } else {
5c8a00ce 2444 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2445 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2446 stl_p(ptr, val);
74576198
AL
2447
2448 if (unlikely(in_migration)) {
2449 if (!cpu_physical_memory_is_dirty(addr1)) {
2450 /* invalidate code */
2451 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2452 /* set dirty bit */
f7c11b53
YT
2453 cpu_physical_memory_set_dirty_flags(
2454 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2455 }
2456 }
8df1cd07
FB
2457 }
2458}
2459
2460/* warning: addr must be aligned */
a8170e5e 2461static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2462 enum device_endian endian)
8df1cd07 2463{
8df1cd07 2464 uint8_t *ptr;
5c8a00ce 2465 MemoryRegion *mr;
149f54b5
PB
2466 hwaddr l = 4;
2467 hwaddr addr1;
8df1cd07 2468
5c8a00ce
PB
2469 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2470 true);
2471 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2472#if defined(TARGET_WORDS_BIGENDIAN)
2473 if (endian == DEVICE_LITTLE_ENDIAN) {
2474 val = bswap32(val);
2475 }
2476#else
2477 if (endian == DEVICE_BIG_ENDIAN) {
2478 val = bswap32(val);
2479 }
2480#endif
5c8a00ce 2481 io_mem_write(mr, addr1, val, 4);
8df1cd07 2482 } else {
8df1cd07 2483 /* RAM case */
5c8a00ce 2484 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2485 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2486 switch (endian) {
2487 case DEVICE_LITTLE_ENDIAN:
2488 stl_le_p(ptr, val);
2489 break;
2490 case DEVICE_BIG_ENDIAN:
2491 stl_be_p(ptr, val);
2492 break;
2493 default:
2494 stl_p(ptr, val);
2495 break;
2496 }
51d7a9eb 2497 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2498 }
2499}
2500
a8170e5e 2501void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2502{
2503 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2504}
2505
a8170e5e 2506void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2507{
2508 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2509}
2510
a8170e5e 2511void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2512{
2513 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2514}
2515
aab33094 2516/* XXX: optimize */
a8170e5e 2517void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2518{
2519 uint8_t v = val;
2520 cpu_physical_memory_write(addr, &v, 1);
2521}
2522
733f0b02 2523/* warning: addr must be aligned */
a8170e5e 2524static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2525 enum device_endian endian)
aab33094 2526{
733f0b02 2527 uint8_t *ptr;
5c8a00ce 2528 MemoryRegion *mr;
149f54b5
PB
2529 hwaddr l = 2;
2530 hwaddr addr1;
733f0b02 2531
5c8a00ce
PB
2532 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2533 true);
2534 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2535#if defined(TARGET_WORDS_BIGENDIAN)
2536 if (endian == DEVICE_LITTLE_ENDIAN) {
2537 val = bswap16(val);
2538 }
2539#else
2540 if (endian == DEVICE_BIG_ENDIAN) {
2541 val = bswap16(val);
2542 }
2543#endif
5c8a00ce 2544 io_mem_write(mr, addr1, val, 2);
733f0b02 2545 } else {
733f0b02 2546 /* RAM case */
5c8a00ce 2547 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2548 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2549 switch (endian) {
2550 case DEVICE_LITTLE_ENDIAN:
2551 stw_le_p(ptr, val);
2552 break;
2553 case DEVICE_BIG_ENDIAN:
2554 stw_be_p(ptr, val);
2555 break;
2556 default:
2557 stw_p(ptr, val);
2558 break;
2559 }
51d7a9eb 2560 invalidate_and_set_dirty(addr1, 2);
733f0b02 2561 }
aab33094
FB
2562}
2563
a8170e5e 2564void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2565{
2566 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2567}
2568
a8170e5e 2569void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2570{
2571 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2572}
2573
a8170e5e 2574void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2575{
2576 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2577}
2578
aab33094 2579/* XXX: optimize */
a8170e5e 2580void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2581{
2582 val = tswap64(val);
71d2b725 2583 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2584}
2585
a8170e5e 2586void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2587{
2588 val = cpu_to_le64(val);
2589 cpu_physical_memory_write(addr, &val, 8);
2590}
2591
a8170e5e 2592void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2593{
2594 val = cpu_to_be64(val);
2595 cpu_physical_memory_write(addr, &val, 8);
2596}
2597
5e2972fd 2598/* virtual memory access for debug (includes writing to ROM) */
f17ec444 2599int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 2600 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2601{
2602 int l;
a8170e5e 2603 hwaddr phys_addr;
9b3c35e0 2604 target_ulong page;
13eb76e0
FB
2605
2606 while (len > 0) {
2607 page = addr & TARGET_PAGE_MASK;
f17ec444 2608 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
2609 /* if no physical page mapped, return an error */
2610 if (phys_addr == -1)
2611 return -1;
2612 l = (page + TARGET_PAGE_SIZE) - addr;
2613 if (l > len)
2614 l = len;
5e2972fd 2615 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2616 if (is_write)
2617 cpu_physical_memory_write_rom(phys_addr, buf, l);
2618 else
5e2972fd 2619 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2620 len -= l;
2621 buf += l;
2622 addr += l;
2623 }
2624 return 0;
2625}
a68fe89c 2626#endif
13eb76e0 2627
8e4a424b
BS
2628#if !defined(CONFIG_USER_ONLY)
2629
2630/*
2631 * A helper function for the _utterly broken_ virtio device model to find out if
2632 * it's running on a big endian machine. Don't do this at home kids!
2633 */
2634bool virtio_is_big_endian(void);
2635bool virtio_is_big_endian(void)
2636{
2637#if defined(TARGET_WORDS_BIGENDIAN)
2638 return true;
2639#else
2640 return false;
2641#endif
2642}
2643
2644#endif
2645
76f35538 2646#ifndef CONFIG_USER_ONLY
a8170e5e 2647bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2648{
5c8a00ce 2649 MemoryRegion*mr;
149f54b5 2650 hwaddr l = 1;
76f35538 2651
5c8a00ce
PB
2652 mr = address_space_translate(&address_space_memory,
2653 phys_addr, &phys_addr, &l, false);
76f35538 2654
5c8a00ce
PB
2655 return !(memory_region_is_ram(mr) ||
2656 memory_region_is_romd(mr));
76f35538 2657}
bd2fa51f
MH
2658
2659void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2660{
2661 RAMBlock *block;
2662
2663 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2664 func(block->host, block->offset, block->length, opaque);
2665 }
2666}
ec3f8c99 2667#endif