]> git.proxmox.com Git - qemu.git/blame - exec.c
po/Makefile: Fix generation of messages.po
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
2ff3de68 34#include "sysemu/sysemu.h"
0d09e41a 35#include "hw/xen/xen.h"
1de7afc9
PB
36#include "qemu/timer.h"
37#include "qemu/config-file.h"
022c62cb 38#include "exec/memory.h"
9c17d615 39#include "sysemu/dma.h"
022c62cb 40#include "exec/address-spaces.h"
53a5960a
PB
41#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
432d268c 43#else /* !CONFIG_USER_ONLY */
9c17d615 44#include "sysemu/xen-mapcache.h"
6506e4f9 45#include "trace.h"
53a5960a 46#endif
0d6d3c87 47#include "exec/cpu-all.h"
54936004 48
022c62cb 49#include "exec/cputlb.h"
5b6dd868 50#include "translate-all.h"
0cac1b66 51
022c62cb 52#include "exec/memory-internal.h"
67d95c15 53
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
e2eef170 56#if !defined(CONFIG_USER_ONLY)
74576198 57static int in_migration;
94a6b54f 58
a3161038 59RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
60
61static MemoryRegion *system_memory;
309cb471 62static MemoryRegion *system_io;
62152b8a 63
f6790af6
AK
64AddressSpace address_space_io;
65AddressSpace address_space_memory;
2673a5da 66
0844e007 67MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 68static MemoryRegion io_mem_unassigned;
0e0df1e2 69
e2eef170 70#endif
9fa3e853 71
182735ef 72CPUState *first_cpu;
6a00d601
FB
73/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
4917cf44 75DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 76/* 0 = Do not count executed instructions.
bf20dc07 77 1 = Precise instruction counting.
2e70f6ef 78 2 = Adaptive rate instruction counting. */
5708fc66 79int use_icount;
6a00d601 80
e2eef170 81#if !defined(CONFIG_USER_ONLY)
4346ae3e 82
1db8abb1
PB
83typedef struct PhysPageEntry PhysPageEntry;
84
85struct PhysPageEntry {
86 uint16_t is_leaf : 1;
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
88 uint16_t ptr : 15;
89};
90
0475d94f
PB
91typedef PhysPageEntry Node[L2_SIZE];
92
1db8abb1
PB
93struct AddressSpaceDispatch {
94 /* This is a multi-level map on the physical address space.
95 * The bottom level has pointers to MemoryRegionSections.
96 */
97 PhysPageEntry phys_map;
0475d94f
PB
98 Node *nodes;
99 MemoryRegionSection *sections;
acc9d80b 100 AddressSpace *as;
1db8abb1
PB
101};
102
90260c6c
JK
103#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
104typedef struct subpage_t {
105 MemoryRegion iomem;
acc9d80b 106 AddressSpace *as;
90260c6c
JK
107 hwaddr base;
108 uint16_t sub_section[TARGET_PAGE_SIZE];
109} subpage_t;
110
b41aac4f
LPF
111#define PHYS_SECTION_UNASSIGNED 0
112#define PHYS_SECTION_NOTDIRTY 1
113#define PHYS_SECTION_ROM 2
114#define PHYS_SECTION_WATCH 3
5312bd8b 115
9affd6fc
PB
116typedef struct PhysPageMap {
117 unsigned sections_nb;
118 unsigned sections_nb_alloc;
119 unsigned nodes_nb;
120 unsigned nodes_nb_alloc;
121 Node *nodes;
122 MemoryRegionSection *sections;
123} PhysPageMap;
124
6092666e 125static PhysPageMap *prev_map;
9affd6fc 126static PhysPageMap next_map;
d6f2ea22 127
07f07b31 128#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 129
e2eef170 130static void io_mem_init(void);
62152b8a 131static void memory_map_init(void);
8b9c99d9 132static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 133
1ec9b909 134static MemoryRegion io_mem_watch;
6658ffb8 135#endif
fd6ce8f6 136
6d9a1304 137#if !defined(CONFIG_USER_ONLY)
d6f2ea22 138
f7bf5461 139static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 140{
9affd6fc
PB
141 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
142 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
143 16);
144 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
145 next_map.nodes_nb + nodes);
146 next_map.nodes = g_renew(Node, next_map.nodes,
147 next_map.nodes_nb_alloc);
d6f2ea22 148 }
f7bf5461
AK
149}
150
151static uint16_t phys_map_node_alloc(void)
152{
153 unsigned i;
154 uint16_t ret;
155
9affd6fc 156 ret = next_map.nodes_nb++;
f7bf5461 157 assert(ret != PHYS_MAP_NODE_NIL);
9affd6fc 158 assert(ret != next_map.nodes_nb_alloc);
d6f2ea22 159 for (i = 0; i < L2_SIZE; ++i) {
9affd6fc
PB
160 next_map.nodes[ret][i].is_leaf = 0;
161 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 162 }
f7bf5461 163 return ret;
d6f2ea22
AK
164}
165
a8170e5e
AK
166static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
167 hwaddr *nb, uint16_t leaf,
2999097b 168 int level)
f7bf5461
AK
169{
170 PhysPageEntry *p;
171 int i;
a8170e5e 172 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 173
07f07b31 174 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800 175 lp->ptr = phys_map_node_alloc();
9affd6fc 176 p = next_map.nodes[lp->ptr];
f7bf5461
AK
177 if (level == 0) {
178 for (i = 0; i < L2_SIZE; i++) {
07f07b31 179 p[i].is_leaf = 1;
b41aac4f 180 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 181 }
67c4d23c 182 }
f7bf5461 183 } else {
9affd6fc 184 p = next_map.nodes[lp->ptr];
92e873b9 185 }
2999097b 186 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 187
2999097b 188 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
189 if ((*index & (step - 1)) == 0 && *nb >= step) {
190 lp->is_leaf = true;
c19e8800 191 lp->ptr = leaf;
07f07b31
AK
192 *index += step;
193 *nb -= step;
2999097b
AK
194 } else {
195 phys_page_set_level(lp, index, nb, leaf, level - 1);
196 }
197 ++lp;
f7bf5461
AK
198 }
199}
200
ac1970fb 201static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 202 hwaddr index, hwaddr nb,
2999097b 203 uint16_t leaf)
f7bf5461 204{
2999097b 205 /* Wildly overreserve - it doesn't matter much. */
07f07b31 206 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 207
ac1970fb 208 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
209}
210
9affd6fc
PB
211static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
212 Node *nodes, MemoryRegionSection *sections)
92e873b9 213{
31ab2b4a
AK
214 PhysPageEntry *p;
215 int i;
f1f6e3b8 216
07f07b31 217 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 218 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 219 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 220 }
9affd6fc 221 p = nodes[lp.ptr];
31ab2b4a 222 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 223 }
9affd6fc 224 return &sections[lp.ptr];
f3705d53
AK
225}
226
e5548617
BS
227bool memory_region_is_unassigned(MemoryRegion *mr)
228{
2a8e7499 229 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 230 && mr != &io_mem_watch;
fd6ce8f6 231}
149f54b5 232
c7086b4a 233static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
234 hwaddr addr,
235 bool resolve_subpage)
9f029603 236{
90260c6c
JK
237 MemoryRegionSection *section;
238 subpage_t *subpage;
239
0475d94f
PB
240 section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
241 d->nodes, d->sections);
90260c6c
JK
242 if (resolve_subpage && section->mr->subpage) {
243 subpage = container_of(section->mr, subpage_t, iomem);
0475d94f 244 section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
245 }
246 return section;
9f029603
JK
247}
248
90260c6c 249static MemoryRegionSection *
c7086b4a 250address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 251 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
252{
253 MemoryRegionSection *section;
254 Int128 diff;
255
c7086b4a 256 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
257 /* Compute offset within MemoryRegionSection */
258 addr -= section->offset_within_address_space;
259
260 /* Compute offset within MemoryRegion */
261 *xlat = addr + section->offset_within_region;
262
263 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 264 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
265 return section;
266}
90260c6c 267
5c8a00ce
PB
268MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
269 hwaddr *xlat, hwaddr *plen,
270 bool is_write)
90260c6c 271{
30951157
AK
272 IOMMUTLBEntry iotlb;
273 MemoryRegionSection *section;
274 MemoryRegion *mr;
275 hwaddr len = *plen;
276
277 for (;;) {
c7086b4a 278 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
279 mr = section->mr;
280
281 if (!mr->iommu_ops) {
282 break;
283 }
284
285 iotlb = mr->iommu_ops->translate(mr, addr);
286 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
287 | (addr & iotlb.addr_mask));
288 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
289 if (!(iotlb.perm & (1 << is_write))) {
290 mr = &io_mem_unassigned;
291 break;
292 }
293
294 as = iotlb.target_as;
295 }
296
297 *plen = len;
298 *xlat = addr;
299 return mr;
90260c6c
JK
300}
301
302MemoryRegionSection *
303address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
304 hwaddr *plen)
305{
30951157 306 MemoryRegionSection *section;
c7086b4a 307 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
308
309 assert(!section->mr->iommu_ops);
310 return section;
90260c6c 311}
5b6dd868 312#endif
fd6ce8f6 313
5b6dd868 314void cpu_exec_init_all(void)
fdbb84d1 315{
5b6dd868 316#if !defined(CONFIG_USER_ONLY)
b2a8658e 317 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
318 memory_map_init();
319 io_mem_init();
fdbb84d1 320#endif
5b6dd868 321}
fdbb84d1 322
b170fce3 323#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
324
325static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 326{
259186a7 327 CPUState *cpu = opaque;
a513fe19 328
5b6dd868
BS
329 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
330 version_id is increased. */
259186a7
AF
331 cpu->interrupt_request &= ~0x01;
332 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
333
334 return 0;
a513fe19 335}
7501267e 336
1a1562f5 337const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
338 .name = "cpu_common",
339 .version_id = 1,
340 .minimum_version_id = 1,
341 .minimum_version_id_old = 1,
342 .post_load = cpu_common_post_load,
343 .fields = (VMStateField []) {
259186a7
AF
344 VMSTATE_UINT32(halted, CPUState),
345 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
346 VMSTATE_END_OF_LIST()
347 }
348};
1a1562f5 349
5b6dd868 350#endif
ea041c0e 351
38d8f5c8 352CPUState *qemu_get_cpu(int index)
ea041c0e 353{
182735ef 354 CPUState *cpu = first_cpu;
ea041c0e 355
182735ef 356 while (cpu) {
55e5c285 357 if (cpu->cpu_index == index) {
5b6dd868 358 break;
55e5c285 359 }
182735ef 360 cpu = cpu->next_cpu;
ea041c0e 361 }
5b6dd868 362
182735ef 363 return cpu;
ea041c0e
FB
364}
365
d6b9e0d6
MT
366void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
367{
182735ef 368 CPUState *cpu;
d6b9e0d6 369
182735ef
AF
370 cpu = first_cpu;
371 while (cpu) {
372 func(cpu, data);
373 cpu = cpu->next_cpu;
d6b9e0d6
MT
374 }
375}
376
5b6dd868 377void cpu_exec_init(CPUArchState *env)
ea041c0e 378{
5b6dd868 379 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 380 CPUClass *cc = CPU_GET_CLASS(cpu);
182735ef 381 CPUState **pcpu;
5b6dd868
BS
382 int cpu_index;
383
384#if defined(CONFIG_USER_ONLY)
385 cpu_list_lock();
386#endif
182735ef
AF
387 cpu->next_cpu = NULL;
388 pcpu = &first_cpu;
5b6dd868 389 cpu_index = 0;
182735ef
AF
390 while (*pcpu != NULL) {
391 pcpu = &(*pcpu)->next_cpu;
5b6dd868
BS
392 cpu_index++;
393 }
55e5c285 394 cpu->cpu_index = cpu_index;
1b1ed8dc 395 cpu->numa_node = 0;
5b6dd868
BS
396 QTAILQ_INIT(&env->breakpoints);
397 QTAILQ_INIT(&env->watchpoints);
398#ifndef CONFIG_USER_ONLY
399 cpu->thread_id = qemu_get_thread_id();
400#endif
182735ef 401 *pcpu = cpu;
5b6dd868
BS
402#if defined(CONFIG_USER_ONLY)
403 cpu_list_unlock();
404#endif
259186a7 405 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
5b6dd868 406#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
407 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
408 cpu_save, cpu_load, env);
b170fce3 409 assert(cc->vmsd == NULL);
5b6dd868 410#endif
b170fce3
AF
411 if (cc->vmsd != NULL) {
412 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
413 }
ea041c0e
FB
414}
415
1fddef4b 416#if defined(TARGET_HAS_ICE)
94df27fd 417#if defined(CONFIG_USER_ONLY)
00b941e5 418static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
419{
420 tb_invalidate_phys_page_range(pc, pc + 1, 0);
421}
422#else
00b941e5 423static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 424{
00b941e5 425 tb_invalidate_phys_addr(cpu_get_phys_page_debug(cpu, pc) |
9d70c4b7 426 (pc & ~TARGET_PAGE_MASK));
1e7855a5 427}
c27004ec 428#endif
94df27fd 429#endif /* TARGET_HAS_ICE */
d720b93d 430
c527ee8f 431#if defined(CONFIG_USER_ONLY)
9349b4f9 432void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
433
434{
435}
436
9349b4f9 437int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
438 int flags, CPUWatchpoint **watchpoint)
439{
440 return -ENOSYS;
441}
442#else
6658ffb8 443/* Add a watchpoint. */
9349b4f9 444int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 445 int flags, CPUWatchpoint **watchpoint)
6658ffb8 446{
b4051334 447 target_ulong len_mask = ~(len - 1);
c0ce998e 448 CPUWatchpoint *wp;
6658ffb8 449
b4051334 450 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
451 if ((len & (len - 1)) || (addr & ~len_mask) ||
452 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
453 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
454 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
455 return -EINVAL;
456 }
7267c094 457 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
458
459 wp->vaddr = addr;
b4051334 460 wp->len_mask = len_mask;
a1d1bb31
AL
461 wp->flags = flags;
462
2dc9f411 463 /* keep all GDB-injected watchpoints in front */
c0ce998e 464 if (flags & BP_GDB)
72cf2d4f 465 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 466 else
72cf2d4f 467 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 468
6658ffb8 469 tlb_flush_page(env, addr);
a1d1bb31
AL
470
471 if (watchpoint)
472 *watchpoint = wp;
473 return 0;
6658ffb8
PB
474}
475
a1d1bb31 476/* Remove a specific watchpoint. */
9349b4f9 477int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 478 int flags)
6658ffb8 479{
b4051334 480 target_ulong len_mask = ~(len - 1);
a1d1bb31 481 CPUWatchpoint *wp;
6658ffb8 482
72cf2d4f 483 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 484 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 485 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 486 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
487 return 0;
488 }
489 }
a1d1bb31 490 return -ENOENT;
6658ffb8
PB
491}
492
a1d1bb31 493/* Remove a specific watchpoint by reference. */
9349b4f9 494void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 495{
72cf2d4f 496 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 497
a1d1bb31
AL
498 tlb_flush_page(env, watchpoint->vaddr);
499
7267c094 500 g_free(watchpoint);
a1d1bb31
AL
501}
502
503/* Remove all matching watchpoints. */
9349b4f9 504void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 505{
c0ce998e 506 CPUWatchpoint *wp, *next;
a1d1bb31 507
72cf2d4f 508 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
509 if (wp->flags & mask)
510 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 511 }
7d03f82f 512}
c527ee8f 513#endif
7d03f82f 514
a1d1bb31 515/* Add a breakpoint. */
9349b4f9 516int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 517 CPUBreakpoint **breakpoint)
4c3a88a2 518{
1fddef4b 519#if defined(TARGET_HAS_ICE)
c0ce998e 520 CPUBreakpoint *bp;
3b46e624 521
7267c094 522 bp = g_malloc(sizeof(*bp));
4c3a88a2 523
a1d1bb31
AL
524 bp->pc = pc;
525 bp->flags = flags;
526
2dc9f411 527 /* keep all GDB-injected breakpoints in front */
00b941e5 528 if (flags & BP_GDB) {
72cf2d4f 529 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
00b941e5 530 } else {
72cf2d4f 531 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
00b941e5 532 }
3b46e624 533
00b941e5 534 breakpoint_invalidate(ENV_GET_CPU(env), pc);
a1d1bb31 535
00b941e5 536 if (breakpoint) {
a1d1bb31 537 *breakpoint = bp;
00b941e5 538 }
4c3a88a2
FB
539 return 0;
540#else
a1d1bb31 541 return -ENOSYS;
4c3a88a2
FB
542#endif
543}
544
a1d1bb31 545/* Remove a specific breakpoint. */
9349b4f9 546int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 547{
7d03f82f 548#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
549 CPUBreakpoint *bp;
550
72cf2d4f 551 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
552 if (bp->pc == pc && bp->flags == flags) {
553 cpu_breakpoint_remove_by_ref(env, bp);
554 return 0;
555 }
7d03f82f 556 }
a1d1bb31
AL
557 return -ENOENT;
558#else
559 return -ENOSYS;
7d03f82f
EI
560#endif
561}
562
a1d1bb31 563/* Remove a specific breakpoint by reference. */
9349b4f9 564void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 565{
1fddef4b 566#if defined(TARGET_HAS_ICE)
72cf2d4f 567 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 568
00b941e5 569 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
a1d1bb31 570
7267c094 571 g_free(breakpoint);
a1d1bb31
AL
572#endif
573}
574
575/* Remove all matching breakpoints. */
9349b4f9 576void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
577{
578#if defined(TARGET_HAS_ICE)
c0ce998e 579 CPUBreakpoint *bp, *next;
a1d1bb31 580
72cf2d4f 581 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
582 if (bp->flags & mask)
583 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 584 }
4c3a88a2
FB
585#endif
586}
587
c33a346e
FB
588/* enable or disable single step mode. EXCP_DEBUG is returned by the
589 CPU loop after each instruction */
3825b28f 590void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 591{
1fddef4b 592#if defined(TARGET_HAS_ICE)
3825b28f 593 CPUArchState *env = cpu->env_ptr;
ed2803da
AF
594
595 if (cpu->singlestep_enabled != enabled) {
596 cpu->singlestep_enabled = enabled;
597 if (kvm_enabled()) {
e22a25c9 598 kvm_update_guest_debug(env, 0);
ed2803da 599 } else {
ccbb4d44 600 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
601 /* XXX: only flush what is necessary */
602 tb_flush(env);
603 }
c33a346e
FB
604 }
605#endif
606}
607
9349b4f9 608void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e 609{
878096ee 610 CPUState *cpu = ENV_GET_CPU(env);
7501267e 611 va_list ap;
493ae1f0 612 va_list ap2;
7501267e
FB
613
614 va_start(ap, fmt);
493ae1f0 615 va_copy(ap2, ap);
7501267e
FB
616 fprintf(stderr, "qemu: fatal: ");
617 vfprintf(stderr, fmt, ap);
618 fprintf(stderr, "\n");
878096ee 619 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
620 if (qemu_log_enabled()) {
621 qemu_log("qemu: fatal: ");
622 qemu_log_vprintf(fmt, ap2);
623 qemu_log("\n");
a0762859 624 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 625 qemu_log_flush();
93fcfe39 626 qemu_log_close();
924edcae 627 }
493ae1f0 628 va_end(ap2);
f9373291 629 va_end(ap);
fd052bf6
RV
630#if defined(CONFIG_USER_ONLY)
631 {
632 struct sigaction act;
633 sigfillset(&act.sa_mask);
634 act.sa_handler = SIG_DFL;
635 sigaction(SIGABRT, &act, NULL);
636 }
637#endif
7501267e
FB
638 abort();
639}
640
9349b4f9 641CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 642{
9349b4f9 643 CPUArchState *new_env = cpu_init(env->cpu_model_str);
5a38f081
AL
644#if defined(TARGET_HAS_ICE)
645 CPUBreakpoint *bp;
646 CPUWatchpoint *wp;
647#endif
648
9349b4f9 649 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 650
5a38f081
AL
651 /* Clone all break/watchpoints.
652 Note: Once we support ptrace with hw-debug register access, make sure
653 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
654 QTAILQ_INIT(&env->breakpoints);
655 QTAILQ_INIT(&env->watchpoints);
5a38f081 656#if defined(TARGET_HAS_ICE)
72cf2d4f 657 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
658 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
659 }
72cf2d4f 660 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
661 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
662 wp->flags, NULL);
663 }
664#endif
665
c5be9f08
TS
666 return new_env;
667}
668
0124311e 669#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
670static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
671 uintptr_t length)
672{
673 uintptr_t start1;
674
675 /* we modify the TLB cache so that the dirty bit will be set again
676 when accessing the range */
677 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
678 /* Check that we don't span multiple blocks - this breaks the
679 address comparisons below. */
680 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
681 != (end - 1) - start) {
682 abort();
683 }
684 cpu_tlb_reset_dirty_all(start1, length);
685
686}
687
5579c7f3 688/* Note: start and end must be within the same ram block. */
c227f099 689void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 690 int dirty_flags)
1ccde1cb 691{
d24981d3 692 uintptr_t length;
1ccde1cb
FB
693
694 start &= TARGET_PAGE_MASK;
695 end = TARGET_PAGE_ALIGN(end);
696
697 length = end - start;
698 if (length == 0)
699 return;
f7c11b53 700 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 701
d24981d3
JQ
702 if (tcg_enabled()) {
703 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 704 }
1ccde1cb
FB
705}
706
8b9c99d9 707static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 708{
f6f3fbca 709 int ret = 0;
74576198 710 in_migration = enable;
f6f3fbca 711 return ret;
74576198
AL
712}
713
a8170e5e 714hwaddr memory_region_section_get_iotlb(CPUArchState *env,
149f54b5
PB
715 MemoryRegionSection *section,
716 target_ulong vaddr,
717 hwaddr paddr, hwaddr xlat,
718 int prot,
719 target_ulong *address)
e5548617 720{
a8170e5e 721 hwaddr iotlb;
e5548617
BS
722 CPUWatchpoint *wp;
723
cc5bea60 724 if (memory_region_is_ram(section->mr)) {
e5548617
BS
725 /* Normal RAM. */
726 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 727 + xlat;
e5548617 728 if (!section->readonly) {
b41aac4f 729 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 730 } else {
b41aac4f 731 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
732 }
733 } else {
0475d94f 734 iotlb = section - address_space_memory.dispatch->sections;
149f54b5 735 iotlb += xlat;
e5548617
BS
736 }
737
738 /* Make accesses to pages with watchpoints go via the
739 watchpoint trap routines. */
740 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
741 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
742 /* Avoid trapping reads of pages with a write breakpoint. */
743 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 744 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
745 *address |= TLB_MMIO;
746 break;
747 }
748 }
749 }
750
751 return iotlb;
752}
9fa3e853
FB
753#endif /* defined(CONFIG_USER_ONLY) */
754
e2eef170 755#if !defined(CONFIG_USER_ONLY)
8da3ff18 756
c227f099 757static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 758 uint16_t section);
acc9d80b 759static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 760
5312bd8b
AK
761static uint16_t phys_section_add(MemoryRegionSection *section)
762{
68f3f65b
PB
763 /* The physical section number is ORed with a page-aligned
764 * pointer to produce the iotlb entries. Thus it should
765 * never overflow into the page-aligned value.
766 */
9affd6fc 767 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
68f3f65b 768
9affd6fc
PB
769 if (next_map.sections_nb == next_map.sections_nb_alloc) {
770 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
771 16);
772 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
773 next_map.sections_nb_alloc);
5312bd8b 774 }
9affd6fc 775 next_map.sections[next_map.sections_nb] = *section;
dfde4e6e 776 memory_region_ref(section->mr);
9affd6fc 777 return next_map.sections_nb++;
5312bd8b
AK
778}
779
058bc4b5
PB
780static void phys_section_destroy(MemoryRegion *mr)
781{
dfde4e6e
PB
782 memory_region_unref(mr);
783
058bc4b5
PB
784 if (mr->subpage) {
785 subpage_t *subpage = container_of(mr, subpage_t, iomem);
786 memory_region_destroy(&subpage->iomem);
787 g_free(subpage);
788 }
789}
790
6092666e 791static void phys_sections_free(PhysPageMap *map)
5312bd8b 792{
9affd6fc
PB
793 while (map->sections_nb > 0) {
794 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
795 phys_section_destroy(section->mr);
796 }
9affd6fc
PB
797 g_free(map->sections);
798 g_free(map->nodes);
6092666e 799 g_free(map);
5312bd8b
AK
800}
801
ac1970fb 802static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
803{
804 subpage_t *subpage;
a8170e5e 805 hwaddr base = section->offset_within_address_space
0f0cb164 806 & TARGET_PAGE_MASK;
9affd6fc
PB
807 MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
808 next_map.nodes, next_map.sections);
0f0cb164
AK
809 MemoryRegionSection subsection = {
810 .offset_within_address_space = base,
052e87b0 811 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 812 };
a8170e5e 813 hwaddr start, end;
0f0cb164 814
f3705d53 815 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 816
f3705d53 817 if (!(existing->mr->subpage)) {
acc9d80b 818 subpage = subpage_init(d->as, base);
0f0cb164 819 subsection.mr = &subpage->iomem;
ac1970fb 820 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 821 phys_section_add(&subsection));
0f0cb164 822 } else {
f3705d53 823 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
824 }
825 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 826 end = start + int128_get64(section->size) - 1;
0f0cb164
AK
827 subpage_register(subpage, start, end, phys_section_add(section));
828}
829
830
052e87b0
PB
831static void register_multipage(AddressSpaceDispatch *d,
832 MemoryRegionSection *section)
33417e70 833{
a8170e5e 834 hwaddr start_addr = section->offset_within_address_space;
5312bd8b 835 uint16_t section_index = phys_section_add(section);
052e87b0
PB
836 uint64_t num_pages = int128_get64(int128_rshift(section->size,
837 TARGET_PAGE_BITS));
dd81124b 838
733d5ef5
PB
839 assert(num_pages);
840 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
841}
842
ac1970fb 843static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 844{
89ae337a 845 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 846 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 847 MemoryRegionSection now = *section, remain = *section;
052e87b0 848 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 849
733d5ef5
PB
850 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
851 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
852 - now.offset_within_address_space;
853
052e87b0 854 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 855 register_subpage(d, &now);
733d5ef5 856 } else {
052e87b0 857 now.size = int128_zero();
733d5ef5 858 }
052e87b0
PB
859 while (int128_ne(remain.size, now.size)) {
860 remain.size = int128_sub(remain.size, now.size);
861 remain.offset_within_address_space += int128_get64(now.size);
862 remain.offset_within_region += int128_get64(now.size);
69b67646 863 now = remain;
052e87b0 864 if (int128_lt(remain.size, page_size)) {
733d5ef5
PB
865 register_subpage(d, &now);
866 } else if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
052e87b0 867 now.size = page_size;
ac1970fb 868 register_subpage(d, &now);
69b67646 869 } else {
052e87b0 870 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 871 register_multipage(d, &now);
69b67646 872 }
0f0cb164
AK
873 }
874}
875
62a2744c
SY
876void qemu_flush_coalesced_mmio_buffer(void)
877{
878 if (kvm_enabled())
879 kvm_flush_coalesced_mmio_buffer();
880}
881
b2a8658e
UD
882void qemu_mutex_lock_ramlist(void)
883{
884 qemu_mutex_lock(&ram_list.mutex);
885}
886
887void qemu_mutex_unlock_ramlist(void)
888{
889 qemu_mutex_unlock(&ram_list.mutex);
890}
891
c902760f
MT
892#if defined(__linux__) && !defined(TARGET_S390X)
893
894#include <sys/vfs.h>
895
896#define HUGETLBFS_MAGIC 0x958458f6
897
898static long gethugepagesize(const char *path)
899{
900 struct statfs fs;
901 int ret;
902
903 do {
9742bf26 904 ret = statfs(path, &fs);
c902760f
MT
905 } while (ret != 0 && errno == EINTR);
906
907 if (ret != 0) {
9742bf26
YT
908 perror(path);
909 return 0;
c902760f
MT
910 }
911
912 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 913 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
914
915 return fs.f_bsize;
916}
917
04b16653
AW
918static void *file_ram_alloc(RAMBlock *block,
919 ram_addr_t memory,
920 const char *path)
c902760f
MT
921{
922 char *filename;
8ca761f6
PF
923 char *sanitized_name;
924 char *c;
c902760f
MT
925 void *area;
926 int fd;
927#ifdef MAP_POPULATE
928 int flags;
929#endif
930 unsigned long hpagesize;
931
932 hpagesize = gethugepagesize(path);
933 if (!hpagesize) {
9742bf26 934 return NULL;
c902760f
MT
935 }
936
937 if (memory < hpagesize) {
938 return NULL;
939 }
940
941 if (kvm_enabled() && !kvm_has_sync_mmu()) {
942 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
943 return NULL;
944 }
945
8ca761f6
PF
946 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
947 sanitized_name = g_strdup(block->mr->name);
948 for (c = sanitized_name; *c != '\0'; c++) {
949 if (*c == '/')
950 *c = '_';
951 }
952
953 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
954 sanitized_name);
955 g_free(sanitized_name);
c902760f
MT
956
957 fd = mkstemp(filename);
958 if (fd < 0) {
9742bf26 959 perror("unable to create backing store for hugepages");
e4ada482 960 g_free(filename);
9742bf26 961 return NULL;
c902760f
MT
962 }
963 unlink(filename);
e4ada482 964 g_free(filename);
c902760f
MT
965
966 memory = (memory+hpagesize-1) & ~(hpagesize-1);
967
968 /*
969 * ftruncate is not supported by hugetlbfs in older
970 * hosts, so don't bother bailing out on errors.
971 * If anything goes wrong with it under other filesystems,
972 * mmap will fail.
973 */
974 if (ftruncate(fd, memory))
9742bf26 975 perror("ftruncate");
c902760f
MT
976
977#ifdef MAP_POPULATE
978 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
979 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
980 * to sidestep this quirk.
981 */
982 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
983 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
984#else
985 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
986#endif
987 if (area == MAP_FAILED) {
9742bf26
YT
988 perror("file_ram_alloc: can't mmap RAM pages");
989 close(fd);
990 return (NULL);
c902760f 991 }
04b16653 992 block->fd = fd;
c902760f
MT
993 return area;
994}
995#endif
996
d17b5288 997static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
998{
999 RAMBlock *block, *next_block;
3e837b2c 1000 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1001
49cd9ac6
SH
1002 assert(size != 0); /* it would hand out same offset multiple times */
1003
a3161038 1004 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1005 return 0;
1006
a3161038 1007 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1008 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1009
1010 end = block->offset + block->length;
1011
a3161038 1012 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1013 if (next_block->offset >= end) {
1014 next = MIN(next, next_block->offset);
1015 }
1016 }
1017 if (next - end >= size && next - end < mingap) {
3e837b2c 1018 offset = end;
04b16653
AW
1019 mingap = next - end;
1020 }
1021 }
3e837b2c
AW
1022
1023 if (offset == RAM_ADDR_MAX) {
1024 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1025 (uint64_t)size);
1026 abort();
1027 }
1028
04b16653
AW
1029 return offset;
1030}
1031
652d7ec2 1032ram_addr_t last_ram_offset(void)
d17b5288
AW
1033{
1034 RAMBlock *block;
1035 ram_addr_t last = 0;
1036
a3161038 1037 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1038 last = MAX(last, block->offset + block->length);
1039
1040 return last;
1041}
1042
ddb97f1d
JB
1043static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1044{
1045 int ret;
ddb97f1d
JB
1046
1047 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1048 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1049 "dump-guest-core", true)) {
ddb97f1d
JB
1050 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1051 if (ret) {
1052 perror("qemu_madvise");
1053 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1054 "but dump_guest_core=off specified\n");
1055 }
1056 }
1057}
1058
c5705a77 1059void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
1060{
1061 RAMBlock *new_block, *block;
1062
c5705a77 1063 new_block = NULL;
a3161038 1064 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
1065 if (block->offset == addr) {
1066 new_block = block;
1067 break;
1068 }
1069 }
1070 assert(new_block);
1071 assert(!new_block->idstr[0]);
84b89d78 1072
09e5ab63
AL
1073 if (dev) {
1074 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1075 if (id) {
1076 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1077 g_free(id);
84b89d78
CM
1078 }
1079 }
1080 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1081
b2a8658e
UD
1082 /* This assumes the iothread lock is taken here too. */
1083 qemu_mutex_lock_ramlist();
a3161038 1084 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1085 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1086 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1087 new_block->idstr);
1088 abort();
1089 }
1090 }
b2a8658e 1091 qemu_mutex_unlock_ramlist();
c5705a77
AK
1092}
1093
8490fc78
LC
1094static int memory_try_enable_merging(void *addr, size_t len)
1095{
2ff3de68 1096 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1097 /* disabled by the user */
1098 return 0;
1099 }
1100
1101 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1102}
1103
c5705a77
AK
1104ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1105 MemoryRegion *mr)
1106{
abb26d63 1107 RAMBlock *block, *new_block;
c5705a77
AK
1108
1109 size = TARGET_PAGE_ALIGN(size);
1110 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1111
b2a8658e
UD
1112 /* This assumes the iothread lock is taken here too. */
1113 qemu_mutex_lock_ramlist();
7c637366 1114 new_block->mr = mr;
432d268c 1115 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1116 if (host) {
1117 new_block->host = host;
cd19cfa2 1118 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1119 } else {
1120 if (mem_path) {
c902760f 1121#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1122 new_block->host = file_ram_alloc(new_block, size, mem_path);
1123 if (!new_block->host) {
6eebf958 1124 new_block->host = qemu_anon_ram_alloc(size);
8490fc78 1125 memory_try_enable_merging(new_block->host, size);
6977dfe6 1126 }
c902760f 1127#else
6977dfe6
YT
1128 fprintf(stderr, "-mem-path option unsupported\n");
1129 exit(1);
c902760f 1130#endif
6977dfe6 1131 } else {
868bb33f 1132 if (xen_enabled()) {
fce537d4 1133 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1134 } else if (kvm_enabled()) {
1135 /* some s390/kvm configurations have special constraints */
6eebf958 1136 new_block->host = kvm_ram_alloc(size);
432d268c 1137 } else {
6eebf958 1138 new_block->host = qemu_anon_ram_alloc(size);
432d268c 1139 }
8490fc78 1140 memory_try_enable_merging(new_block->host, size);
6977dfe6 1141 }
c902760f 1142 }
94a6b54f
PB
1143 new_block->length = size;
1144
abb26d63
PB
1145 /* Keep the list sorted from biggest to smallest block. */
1146 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1147 if (block->length < new_block->length) {
1148 break;
1149 }
1150 }
1151 if (block) {
1152 QTAILQ_INSERT_BEFORE(block, new_block, next);
1153 } else {
1154 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1155 }
0d6d3c87 1156 ram_list.mru_block = NULL;
94a6b54f 1157
f798b07f 1158 ram_list.version++;
b2a8658e 1159 qemu_mutex_unlock_ramlist();
f798b07f 1160
7267c094 1161 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1162 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1163 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1164 0, size >> TARGET_PAGE_BITS);
1720aeee 1165 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1166
ddb97f1d 1167 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1168 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1169
6f0437e8
JK
1170 if (kvm_enabled())
1171 kvm_setup_guest_memory(new_block->host, size);
1172
94a6b54f
PB
1173 return new_block->offset;
1174}
e9a1ab19 1175
c5705a77 1176ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1177{
c5705a77 1178 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1179}
1180
1f2e98b6
AW
1181void qemu_ram_free_from_ptr(ram_addr_t addr)
1182{
1183 RAMBlock *block;
1184
b2a8658e
UD
1185 /* This assumes the iothread lock is taken here too. */
1186 qemu_mutex_lock_ramlist();
a3161038 1187 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1188 if (addr == block->offset) {
a3161038 1189 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1190 ram_list.mru_block = NULL;
f798b07f 1191 ram_list.version++;
7267c094 1192 g_free(block);
b2a8658e 1193 break;
1f2e98b6
AW
1194 }
1195 }
b2a8658e 1196 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1197}
1198
c227f099 1199void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1200{
04b16653
AW
1201 RAMBlock *block;
1202
b2a8658e
UD
1203 /* This assumes the iothread lock is taken here too. */
1204 qemu_mutex_lock_ramlist();
a3161038 1205 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1206 if (addr == block->offset) {
a3161038 1207 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1208 ram_list.mru_block = NULL;
f798b07f 1209 ram_list.version++;
cd19cfa2
HY
1210 if (block->flags & RAM_PREALLOC_MASK) {
1211 ;
1212 } else if (mem_path) {
04b16653
AW
1213#if defined (__linux__) && !defined(TARGET_S390X)
1214 if (block->fd) {
1215 munmap(block->host, block->length);
1216 close(block->fd);
1217 } else {
e7a09b92 1218 qemu_anon_ram_free(block->host, block->length);
04b16653 1219 }
fd28aa13
JK
1220#else
1221 abort();
04b16653
AW
1222#endif
1223 } else {
868bb33f 1224 if (xen_enabled()) {
e41d7c69 1225 xen_invalidate_map_cache_entry(block->host);
432d268c 1226 } else {
e7a09b92 1227 qemu_anon_ram_free(block->host, block->length);
432d268c 1228 }
04b16653 1229 }
7267c094 1230 g_free(block);
b2a8658e 1231 break;
04b16653
AW
1232 }
1233 }
b2a8658e 1234 qemu_mutex_unlock_ramlist();
04b16653 1235
e9a1ab19
FB
1236}
1237
cd19cfa2
HY
1238#ifndef _WIN32
1239void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1240{
1241 RAMBlock *block;
1242 ram_addr_t offset;
1243 int flags;
1244 void *area, *vaddr;
1245
a3161038 1246 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1247 offset = addr - block->offset;
1248 if (offset < block->length) {
1249 vaddr = block->host + offset;
1250 if (block->flags & RAM_PREALLOC_MASK) {
1251 ;
1252 } else {
1253 flags = MAP_FIXED;
1254 munmap(vaddr, length);
1255 if (mem_path) {
1256#if defined(__linux__) && !defined(TARGET_S390X)
1257 if (block->fd) {
1258#ifdef MAP_POPULATE
1259 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1260 MAP_PRIVATE;
1261#else
1262 flags |= MAP_PRIVATE;
1263#endif
1264 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1265 flags, block->fd, offset);
1266 } else {
1267 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1268 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1269 flags, -1, 0);
1270 }
fd28aa13
JK
1271#else
1272 abort();
cd19cfa2
HY
1273#endif
1274 } else {
1275#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1276 flags |= MAP_SHARED | MAP_ANONYMOUS;
1277 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1278 flags, -1, 0);
1279#else
1280 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1281 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1282 flags, -1, 0);
1283#endif
1284 }
1285 if (area != vaddr) {
f15fbc4b
AP
1286 fprintf(stderr, "Could not remap addr: "
1287 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1288 length, addr);
1289 exit(1);
1290 }
8490fc78 1291 memory_try_enable_merging(vaddr, length);
ddb97f1d 1292 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1293 }
1294 return;
1295 }
1296 }
1297}
1298#endif /* !_WIN32 */
1299
1b5ec234 1300static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
dc828ca1 1301{
94a6b54f
PB
1302 RAMBlock *block;
1303
b2a8658e 1304 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1305 block = ram_list.mru_block;
1306 if (block && addr - block->offset < block->length) {
1307 goto found;
1308 }
a3161038 1309 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1310 if (addr - block->offset < block->length) {
0d6d3c87 1311 goto found;
f471a17e 1312 }
94a6b54f 1313 }
f471a17e
AW
1314
1315 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1316 abort();
1317
0d6d3c87
PB
1318found:
1319 ram_list.mru_block = block;
1b5ec234
PB
1320 return block;
1321}
1322
1323/* Return a host pointer to ram allocated with qemu_ram_alloc.
1324 With the exception of the softmmu code in this file, this should
1325 only be used for local memory (e.g. video ram) that the device owns,
1326 and knows it isn't going to access beyond the end of the block.
1327
1328 It should not be used for general purpose DMA.
1329 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1330 */
1331void *qemu_get_ram_ptr(ram_addr_t addr)
1332{
1333 RAMBlock *block = qemu_get_ram_block(addr);
1334
0d6d3c87
PB
1335 if (xen_enabled()) {
1336 /* We need to check if the requested address is in the RAM
1337 * because we don't want to map the entire memory in QEMU.
1338 * In that case just map until the end of the page.
1339 */
1340 if (block->offset == 0) {
1341 return xen_map_cache(addr, 0, 0);
1342 } else if (block->host == NULL) {
1343 block->host =
1344 xen_map_cache(block->offset, block->length, 1);
1345 }
1346 }
1347 return block->host + (addr - block->offset);
dc828ca1
PB
1348}
1349
0d6d3c87
PB
1350/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1351 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1352 *
1353 * ??? Is this still necessary?
b2e0a138 1354 */
8b9c99d9 1355static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1356{
1357 RAMBlock *block;
1358
b2a8658e 1359 /* The list is protected by the iothread lock here. */
a3161038 1360 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1361 if (addr - block->offset < block->length) {
868bb33f 1362 if (xen_enabled()) {
432d268c
JN
1363 /* We need to check if the requested address is in the RAM
1364 * because we don't want to map the entire memory in QEMU.
712c2b41 1365 * In that case just map until the end of the page.
432d268c
JN
1366 */
1367 if (block->offset == 0) {
e41d7c69 1368 return xen_map_cache(addr, 0, 0);
432d268c 1369 } else if (block->host == NULL) {
e41d7c69
JK
1370 block->host =
1371 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1372 }
1373 }
b2e0a138
MT
1374 return block->host + (addr - block->offset);
1375 }
1376 }
1377
1378 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1379 abort();
1380
1381 return NULL;
1382}
1383
38bee5dc
SS
1384/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1385 * but takes a size argument */
cb85f7ab 1386static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1387{
8ab934f9
SS
1388 if (*size == 0) {
1389 return NULL;
1390 }
868bb33f 1391 if (xen_enabled()) {
e41d7c69 1392 return xen_map_cache(addr, *size, 1);
868bb33f 1393 } else {
38bee5dc
SS
1394 RAMBlock *block;
1395
a3161038 1396 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1397 if (addr - block->offset < block->length) {
1398 if (addr - block->offset + *size > block->length)
1399 *size = block->length - addr + block->offset;
1400 return block->host + (addr - block->offset);
1401 }
1402 }
1403
1404 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1405 abort();
38bee5dc
SS
1406 }
1407}
1408
7443b437
PB
1409/* Some of the softmmu routines need to translate from a host pointer
1410 (typically a TLB entry) back to a ram offset. */
1b5ec234 1411MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1412{
94a6b54f
PB
1413 RAMBlock *block;
1414 uint8_t *host = ptr;
1415
868bb33f 1416 if (xen_enabled()) {
e41d7c69 1417 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1418 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1419 }
1420
23887b79
PB
1421 block = ram_list.mru_block;
1422 if (block && block->host && host - block->host < block->length) {
1423 goto found;
1424 }
1425
a3161038 1426 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1427 /* This case append when the block is not mapped. */
1428 if (block->host == NULL) {
1429 continue;
1430 }
f471a17e 1431 if (host - block->host < block->length) {
23887b79 1432 goto found;
f471a17e 1433 }
94a6b54f 1434 }
432d268c 1435
1b5ec234 1436 return NULL;
23887b79
PB
1437
1438found:
1439 *ram_addr = block->offset + (host - block->host);
1b5ec234 1440 return block->mr;
e890261f 1441}
f471a17e 1442
a8170e5e 1443static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1444 uint64_t val, unsigned size)
9fa3e853 1445{
3a7d929e 1446 int dirty_flags;
f7c11b53 1447 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1448 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
0e0df1e2 1449 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1450 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1451 }
0e0df1e2
AK
1452 switch (size) {
1453 case 1:
1454 stb_p(qemu_get_ram_ptr(ram_addr), val);
1455 break;
1456 case 2:
1457 stw_p(qemu_get_ram_ptr(ram_addr), val);
1458 break;
1459 case 4:
1460 stl_p(qemu_get_ram_ptr(ram_addr), val);
1461 break;
1462 default:
1463 abort();
3a7d929e 1464 }
f23db169 1465 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1466 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1467 /* we remove the notdirty callback only if the code has been
1468 flushed */
4917cf44
AF
1469 if (dirty_flags == 0xff) {
1470 CPUArchState *env = current_cpu->env_ptr;
1471 tlb_set_dirty(env, env->mem_io_vaddr);
1472 }
9fa3e853
FB
1473}
1474
b018ddf6
PB
1475static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1476 unsigned size, bool is_write)
1477{
1478 return is_write;
1479}
1480
0e0df1e2 1481static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1482 .write = notdirty_mem_write,
b018ddf6 1483 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1484 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1485};
1486
0f459d16 1487/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1488static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1489{
4917cf44 1490 CPUArchState *env = current_cpu->env_ptr;
06d55cc1 1491 target_ulong pc, cs_base;
0f459d16 1492 target_ulong vaddr;
a1d1bb31 1493 CPUWatchpoint *wp;
06d55cc1 1494 int cpu_flags;
0f459d16 1495
06d55cc1
AL
1496 if (env->watchpoint_hit) {
1497 /* We re-entered the check after replacing the TB. Now raise
1498 * the debug interrupt so that is will trigger after the
1499 * current instruction. */
c3affe56 1500 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1501 return;
1502 }
2e70f6ef 1503 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1504 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1505 if ((vaddr == (wp->vaddr & len_mask) ||
1506 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1507 wp->flags |= BP_WATCHPOINT_HIT;
1508 if (!env->watchpoint_hit) {
1509 env->watchpoint_hit = wp;
5a316526 1510 tb_check_watchpoint(env);
6e140f28
AL
1511 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1512 env->exception_index = EXCP_DEBUG;
488d6577 1513 cpu_loop_exit(env);
6e140f28
AL
1514 } else {
1515 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1516 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1517 cpu_resume_from_signal(env, NULL);
6e140f28 1518 }
06d55cc1 1519 }
6e140f28
AL
1520 } else {
1521 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1522 }
1523 }
1524}
1525
6658ffb8
PB
1526/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1527 so these check for a hit then pass through to the normal out-of-line
1528 phys routines. */
a8170e5e 1529static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1530 unsigned size)
6658ffb8 1531{
1ec9b909
AK
1532 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1533 switch (size) {
1534 case 1: return ldub_phys(addr);
1535 case 2: return lduw_phys(addr);
1536 case 4: return ldl_phys(addr);
1537 default: abort();
1538 }
6658ffb8
PB
1539}
1540
a8170e5e 1541static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1542 uint64_t val, unsigned size)
6658ffb8 1543{
1ec9b909
AK
1544 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1545 switch (size) {
67364150
MF
1546 case 1:
1547 stb_phys(addr, val);
1548 break;
1549 case 2:
1550 stw_phys(addr, val);
1551 break;
1552 case 4:
1553 stl_phys(addr, val);
1554 break;
1ec9b909
AK
1555 default: abort();
1556 }
6658ffb8
PB
1557}
1558
1ec9b909
AK
1559static const MemoryRegionOps watch_mem_ops = {
1560 .read = watch_mem_read,
1561 .write = watch_mem_write,
1562 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1563};
6658ffb8 1564
a8170e5e 1565static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1566 unsigned len)
db7b5426 1567{
acc9d80b
JK
1568 subpage_t *subpage = opaque;
1569 uint8_t buf[4];
791af8c8 1570
db7b5426 1571#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1572 printf("%s: subpage %p len %d addr " TARGET_FMT_plx "\n", __func__,
1573 subpage, len, addr);
db7b5426 1574#endif
acc9d80b
JK
1575 address_space_read(subpage->as, addr + subpage->base, buf, len);
1576 switch (len) {
1577 case 1:
1578 return ldub_p(buf);
1579 case 2:
1580 return lduw_p(buf);
1581 case 4:
1582 return ldl_p(buf);
1583 default:
1584 abort();
1585 }
db7b5426
BS
1586}
1587
a8170e5e 1588static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1589 uint64_t value, unsigned len)
db7b5426 1590{
acc9d80b
JK
1591 subpage_t *subpage = opaque;
1592 uint8_t buf[4];
1593
db7b5426 1594#if defined(DEBUG_SUBPAGE)
70c68e44 1595 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
acc9d80b
JK
1596 " value %"PRIx64"\n",
1597 __func__, subpage, len, addr, value);
db7b5426 1598#endif
acc9d80b
JK
1599 switch (len) {
1600 case 1:
1601 stb_p(buf, value);
1602 break;
1603 case 2:
1604 stw_p(buf, value);
1605 break;
1606 case 4:
1607 stl_p(buf, value);
1608 break;
1609 default:
1610 abort();
1611 }
1612 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1613}
1614
c353e4cc
PB
1615static bool subpage_accepts(void *opaque, hwaddr addr,
1616 unsigned size, bool is_write)
1617{
acc9d80b 1618 subpage_t *subpage = opaque;
c353e4cc 1619#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1620 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx "\n",
1621 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1622#endif
1623
acc9d80b
JK
1624 return address_space_access_valid(subpage->as, addr + subpage->base,
1625 size, is_write);
c353e4cc
PB
1626}
1627
70c68e44
AK
1628static const MemoryRegionOps subpage_ops = {
1629 .read = subpage_read,
1630 .write = subpage_write,
c353e4cc 1631 .valid.accepts = subpage_accepts,
70c68e44 1632 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1633};
1634
c227f099 1635static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1636 uint16_t section)
db7b5426
BS
1637{
1638 int idx, eidx;
1639
1640 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1641 return -1;
1642 idx = SUBPAGE_IDX(start);
1643 eidx = SUBPAGE_IDX(end);
1644#if defined(DEBUG_SUBPAGE)
0bf9e31a 1645 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1646 mmio, start, end, idx, eidx, memory);
1647#endif
db7b5426 1648 for (; idx <= eidx; idx++) {
5312bd8b 1649 mmio->sub_section[idx] = section;
db7b5426
BS
1650 }
1651
1652 return 0;
1653}
1654
acc9d80b 1655static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1656{
c227f099 1657 subpage_t *mmio;
db7b5426 1658
7267c094 1659 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1660
acc9d80b 1661 mmio->as = as;
1eec614b 1662 mmio->base = base;
2c9b15ca 1663 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
70c68e44 1664 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1665 mmio->iomem.subpage = true;
db7b5426 1666#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1667 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1668 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1669#endif
b41aac4f 1670 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1671
1672 return mmio;
1673}
1674
5312bd8b
AK
1675static uint16_t dummy_section(MemoryRegion *mr)
1676{
1677 MemoryRegionSection section = {
1678 .mr = mr,
1679 .offset_within_address_space = 0,
1680 .offset_within_region = 0,
052e87b0 1681 .size = int128_2_64(),
5312bd8b
AK
1682 };
1683
1684 return phys_section_add(&section);
1685}
1686
a8170e5e 1687MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1688{
0475d94f 1689 return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1690}
1691
e9179ce1
AK
1692static void io_mem_init(void)
1693{
2c9b15ca
PB
1694 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1695 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
0e0df1e2 1696 "unassigned", UINT64_MAX);
2c9b15ca 1697 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
0e0df1e2 1698 "notdirty", UINT64_MAX);
2c9b15ca 1699 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1ec9b909 1700 "watch", UINT64_MAX);
e9179ce1
AK
1701}
1702
ac1970fb 1703static void mem_begin(MemoryListener *listener)
00752703
PB
1704{
1705 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1706 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1707
1708 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1709 d->as = as;
1710 as->next_dispatch = d;
1711}
1712
1713static void mem_commit(MemoryListener *listener)
ac1970fb 1714{
89ae337a 1715 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
1716 AddressSpaceDispatch *cur = as->dispatch;
1717 AddressSpaceDispatch *next = as->next_dispatch;
1718
1719 next->nodes = next_map.nodes;
1720 next->sections = next_map.sections;
ac1970fb 1721
0475d94f
PB
1722 as->dispatch = next;
1723 g_free(cur);
ac1970fb
AK
1724}
1725
50c1e149
AK
1726static void core_begin(MemoryListener *listener)
1727{
b41aac4f
LPF
1728 uint16_t n;
1729
6092666e
PB
1730 prev_map = g_new(PhysPageMap, 1);
1731 *prev_map = next_map;
1732
9affd6fc 1733 memset(&next_map, 0, sizeof(next_map));
b41aac4f
LPF
1734 n = dummy_section(&io_mem_unassigned);
1735 assert(n == PHYS_SECTION_UNASSIGNED);
1736 n = dummy_section(&io_mem_notdirty);
1737 assert(n == PHYS_SECTION_NOTDIRTY);
1738 n = dummy_section(&io_mem_rom);
1739 assert(n == PHYS_SECTION_ROM);
1740 n = dummy_section(&io_mem_watch);
1741 assert(n == PHYS_SECTION_WATCH);
50c1e149
AK
1742}
1743
9affd6fc
PB
1744/* This listener's commit run after the other AddressSpaceDispatch listeners'.
1745 * All AddressSpaceDispatch instances have switched to the next map.
1746 */
1747static void core_commit(MemoryListener *listener)
1748{
6092666e 1749 phys_sections_free(prev_map);
9affd6fc
PB
1750}
1751
1d71148e 1752static void tcg_commit(MemoryListener *listener)
50c1e149 1753{
182735ef 1754 CPUState *cpu;
117712c3
AK
1755
1756 /* since each CPU stores ram addresses in its TLB cache, we must
1757 reset the modified entries */
1758 /* XXX: slow ! */
182735ef
AF
1759 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
1760 CPUArchState *env = cpu->env_ptr;
1761
117712c3
AK
1762 tlb_flush(env, 1);
1763 }
50c1e149
AK
1764}
1765
93632747
AK
1766static void core_log_global_start(MemoryListener *listener)
1767{
1768 cpu_physical_memory_set_dirty_tracking(1);
1769}
1770
1771static void core_log_global_stop(MemoryListener *listener)
1772{
1773 cpu_physical_memory_set_dirty_tracking(0);
1774}
1775
93632747 1776static MemoryListener core_memory_listener = {
50c1e149 1777 .begin = core_begin,
9affd6fc 1778 .commit = core_commit,
93632747
AK
1779 .log_global_start = core_log_global_start,
1780 .log_global_stop = core_log_global_stop,
ac1970fb 1781 .priority = 1,
93632747
AK
1782};
1783
1d71148e
AK
1784static MemoryListener tcg_memory_listener = {
1785 .commit = tcg_commit,
1786};
1787
ac1970fb
AK
1788void address_space_init_dispatch(AddressSpace *as)
1789{
00752703 1790 as->dispatch = NULL;
89ae337a 1791 as->dispatch_listener = (MemoryListener) {
ac1970fb 1792 .begin = mem_begin,
00752703 1793 .commit = mem_commit,
ac1970fb
AK
1794 .region_add = mem_add,
1795 .region_nop = mem_add,
1796 .priority = 0,
1797 };
89ae337a 1798 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
1799}
1800
83f3c251
AK
1801void address_space_destroy_dispatch(AddressSpace *as)
1802{
1803 AddressSpaceDispatch *d = as->dispatch;
1804
89ae337a 1805 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
1806 g_free(d);
1807 as->dispatch = NULL;
1808}
1809
62152b8a
AK
1810static void memory_map_init(void)
1811{
7267c094 1812 system_memory = g_malloc(sizeof(*system_memory));
2c9b15ca 1813 memory_region_init(system_memory, NULL, "system", INT64_MAX);
7dca8043 1814 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1815
7267c094 1816 system_io = g_malloc(sizeof(*system_io));
2c9b15ca 1817 memory_region_init(system_io, NULL, "io", 65536);
7dca8043 1818 address_space_init(&address_space_io, system_io, "I/O");
93632747 1819
f6790af6 1820 memory_listener_register(&core_memory_listener, &address_space_memory);
f6790af6 1821 memory_listener_register(&tcg_memory_listener, &address_space_memory);
62152b8a
AK
1822}
1823
1824MemoryRegion *get_system_memory(void)
1825{
1826 return system_memory;
1827}
1828
309cb471
AK
1829MemoryRegion *get_system_io(void)
1830{
1831 return system_io;
1832}
1833
e2eef170
PB
1834#endif /* !defined(CONFIG_USER_ONLY) */
1835
13eb76e0
FB
1836/* physical memory access (slow version, mainly for debug) */
1837#if defined(CONFIG_USER_ONLY)
f17ec444 1838int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 1839 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1840{
1841 int l, flags;
1842 target_ulong page;
53a5960a 1843 void * p;
13eb76e0
FB
1844
1845 while (len > 0) {
1846 page = addr & TARGET_PAGE_MASK;
1847 l = (page + TARGET_PAGE_SIZE) - addr;
1848 if (l > len)
1849 l = len;
1850 flags = page_get_flags(page);
1851 if (!(flags & PAGE_VALID))
a68fe89c 1852 return -1;
13eb76e0
FB
1853 if (is_write) {
1854 if (!(flags & PAGE_WRITE))
a68fe89c 1855 return -1;
579a97f7 1856 /* XXX: this code should not depend on lock_user */
72fb7daa 1857 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1858 return -1;
72fb7daa
AJ
1859 memcpy(p, buf, l);
1860 unlock_user(p, addr, l);
13eb76e0
FB
1861 } else {
1862 if (!(flags & PAGE_READ))
a68fe89c 1863 return -1;
579a97f7 1864 /* XXX: this code should not depend on lock_user */
72fb7daa 1865 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1866 return -1;
72fb7daa 1867 memcpy(buf, p, l);
5b257578 1868 unlock_user(p, addr, 0);
13eb76e0
FB
1869 }
1870 len -= l;
1871 buf += l;
1872 addr += l;
1873 }
a68fe89c 1874 return 0;
13eb76e0 1875}
8df1cd07 1876
13eb76e0 1877#else
51d7a9eb 1878
a8170e5e
AK
1879static void invalidate_and_set_dirty(hwaddr addr,
1880 hwaddr length)
51d7a9eb
AP
1881{
1882 if (!cpu_physical_memory_is_dirty(addr)) {
1883 /* invalidate code */
1884 tb_invalidate_phys_page_range(addr, addr + length, 0);
1885 /* set dirty bit */
1886 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1887 }
e226939d 1888 xen_modified_memory(addr, length);
51d7a9eb
AP
1889}
1890
2bbfa05d
PB
1891static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1892{
1893 if (memory_region_is_ram(mr)) {
1894 return !(is_write && mr->readonly);
1895 }
1896 if (memory_region_is_romd(mr)) {
1897 return !is_write;
1898 }
1899
1900 return false;
1901}
1902
23326164 1903static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 1904{
e1622f4b 1905 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
1906
1907 /* Regions are assumed to support 1-4 byte accesses unless
1908 otherwise specified. */
23326164
RH
1909 if (access_size_max == 0) {
1910 access_size_max = 4;
1911 }
1912
1913 /* Bound the maximum access by the alignment of the address. */
1914 if (!mr->ops->impl.unaligned) {
1915 unsigned align_size_max = addr & -addr;
1916 if (align_size_max != 0 && align_size_max < access_size_max) {
1917 access_size_max = align_size_max;
1918 }
82f2563f 1919 }
23326164
RH
1920
1921 /* Don't attempt accesses larger than the maximum. */
1922 if (l > access_size_max) {
1923 l = access_size_max;
82f2563f 1924 }
23326164
RH
1925
1926 return l;
82f2563f
PB
1927}
1928
fd8aaa76 1929bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1930 int len, bool is_write)
13eb76e0 1931{
149f54b5 1932 hwaddr l;
13eb76e0 1933 uint8_t *ptr;
791af8c8 1934 uint64_t val;
149f54b5 1935 hwaddr addr1;
5c8a00ce 1936 MemoryRegion *mr;
fd8aaa76 1937 bool error = false;
3b46e624 1938
13eb76e0 1939 while (len > 0) {
149f54b5 1940 l = len;
5c8a00ce 1941 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 1942
13eb76e0 1943 if (is_write) {
5c8a00ce
PB
1944 if (!memory_access_is_direct(mr, is_write)) {
1945 l = memory_access_size(mr, l, addr1);
4917cf44 1946 /* XXX: could force current_cpu to NULL to avoid
6a00d601 1947 potential bugs */
23326164
RH
1948 switch (l) {
1949 case 8:
1950 /* 64 bit write access */
1951 val = ldq_p(buf);
1952 error |= io_mem_write(mr, addr1, val, 8);
1953 break;
1954 case 4:
1c213d19 1955 /* 32 bit write access */
c27004ec 1956 val = ldl_p(buf);
5c8a00ce 1957 error |= io_mem_write(mr, addr1, val, 4);
23326164
RH
1958 break;
1959 case 2:
1c213d19 1960 /* 16 bit write access */
c27004ec 1961 val = lduw_p(buf);
5c8a00ce 1962 error |= io_mem_write(mr, addr1, val, 2);
23326164
RH
1963 break;
1964 case 1:
1c213d19 1965 /* 8 bit write access */
c27004ec 1966 val = ldub_p(buf);
5c8a00ce 1967 error |= io_mem_write(mr, addr1, val, 1);
23326164
RH
1968 break;
1969 default:
1970 abort();
13eb76e0 1971 }
2bbfa05d 1972 } else {
5c8a00ce 1973 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 1974 /* RAM case */
5579c7f3 1975 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1976 memcpy(ptr, buf, l);
51d7a9eb 1977 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
1978 }
1979 } else {
5c8a00ce 1980 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 1981 /* I/O case */
5c8a00ce 1982 l = memory_access_size(mr, l, addr1);
23326164
RH
1983 switch (l) {
1984 case 8:
1985 /* 64 bit read access */
1986 error |= io_mem_read(mr, addr1, &val, 8);
1987 stq_p(buf, val);
1988 break;
1989 case 4:
13eb76e0 1990 /* 32 bit read access */
5c8a00ce 1991 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 1992 stl_p(buf, val);
23326164
RH
1993 break;
1994 case 2:
13eb76e0 1995 /* 16 bit read access */
5c8a00ce 1996 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 1997 stw_p(buf, val);
23326164
RH
1998 break;
1999 case 1:
1c213d19 2000 /* 8 bit read access */
5c8a00ce 2001 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 2002 stb_p(buf, val);
23326164
RH
2003 break;
2004 default:
2005 abort();
13eb76e0
FB
2006 }
2007 } else {
2008 /* RAM case */
5c8a00ce 2009 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2010 memcpy(buf, ptr, l);
13eb76e0
FB
2011 }
2012 }
2013 len -= l;
2014 buf += l;
2015 addr += l;
2016 }
fd8aaa76
PB
2017
2018 return error;
13eb76e0 2019}
8df1cd07 2020
fd8aaa76 2021bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
2022 const uint8_t *buf, int len)
2023{
fd8aaa76 2024 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
2025}
2026
fd8aaa76 2027bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 2028{
fd8aaa76 2029 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
2030}
2031
2032
a8170e5e 2033void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2034 int len, int is_write)
2035{
fd8aaa76 2036 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2037}
2038
d0ecd2aa 2039/* used for ROM loading : can write in RAM and ROM */
a8170e5e 2040void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
2041 const uint8_t *buf, int len)
2042{
149f54b5 2043 hwaddr l;
d0ecd2aa 2044 uint8_t *ptr;
149f54b5 2045 hwaddr addr1;
5c8a00ce 2046 MemoryRegion *mr;
3b46e624 2047
d0ecd2aa 2048 while (len > 0) {
149f54b5 2049 l = len;
5c8a00ce
PB
2050 mr = address_space_translate(&address_space_memory,
2051 addr, &addr1, &l, true);
3b46e624 2052
5c8a00ce
PB
2053 if (!(memory_region_is_ram(mr) ||
2054 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2055 /* do nothing */
2056 } else {
5c8a00ce 2057 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2058 /* ROM/RAM case */
5579c7f3 2059 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2060 memcpy(ptr, buf, l);
51d7a9eb 2061 invalidate_and_set_dirty(addr1, l);
d0ecd2aa
FB
2062 }
2063 len -= l;
2064 buf += l;
2065 addr += l;
2066 }
2067}
2068
6d16c2f8 2069typedef struct {
d3e71559 2070 MemoryRegion *mr;
6d16c2f8 2071 void *buffer;
a8170e5e
AK
2072 hwaddr addr;
2073 hwaddr len;
6d16c2f8
AL
2074} BounceBuffer;
2075
2076static BounceBuffer bounce;
2077
ba223c29
AL
2078typedef struct MapClient {
2079 void *opaque;
2080 void (*callback)(void *opaque);
72cf2d4f 2081 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2082} MapClient;
2083
72cf2d4f
BS
2084static QLIST_HEAD(map_client_list, MapClient) map_client_list
2085 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2086
2087void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2088{
7267c094 2089 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2090
2091 client->opaque = opaque;
2092 client->callback = callback;
72cf2d4f 2093 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2094 return client;
2095}
2096
8b9c99d9 2097static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2098{
2099 MapClient *client = (MapClient *)_client;
2100
72cf2d4f 2101 QLIST_REMOVE(client, link);
7267c094 2102 g_free(client);
ba223c29
AL
2103}
2104
2105static void cpu_notify_map_clients(void)
2106{
2107 MapClient *client;
2108
72cf2d4f
BS
2109 while (!QLIST_EMPTY(&map_client_list)) {
2110 client = QLIST_FIRST(&map_client_list);
ba223c29 2111 client->callback(client->opaque);
34d5e948 2112 cpu_unregister_map_client(client);
ba223c29
AL
2113 }
2114}
2115
51644ab7
PB
2116bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2117{
5c8a00ce 2118 MemoryRegion *mr;
51644ab7
PB
2119 hwaddr l, xlat;
2120
2121 while (len > 0) {
2122 l = len;
5c8a00ce
PB
2123 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2124 if (!memory_access_is_direct(mr, is_write)) {
2125 l = memory_access_size(mr, l, addr);
2126 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2127 return false;
2128 }
2129 }
2130
2131 len -= l;
2132 addr += l;
2133 }
2134 return true;
2135}
2136
6d16c2f8
AL
2137/* Map a physical memory region into a host virtual address.
2138 * May map a subset of the requested range, given by and returned in *plen.
2139 * May return NULL if resources needed to perform the mapping are exhausted.
2140 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2141 * Use cpu_register_map_client() to know when retrying the map operation is
2142 * likely to succeed.
6d16c2f8 2143 */
ac1970fb 2144void *address_space_map(AddressSpace *as,
a8170e5e
AK
2145 hwaddr addr,
2146 hwaddr *plen,
ac1970fb 2147 bool is_write)
6d16c2f8 2148{
a8170e5e 2149 hwaddr len = *plen;
e3127ae0
PB
2150 hwaddr done = 0;
2151 hwaddr l, xlat, base;
2152 MemoryRegion *mr, *this_mr;
2153 ram_addr_t raddr;
6d16c2f8 2154
e3127ae0
PB
2155 if (len == 0) {
2156 return NULL;
2157 }
38bee5dc 2158
e3127ae0
PB
2159 l = len;
2160 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2161 if (!memory_access_is_direct(mr, is_write)) {
2162 if (bounce.buffer) {
2163 return NULL;
6d16c2f8 2164 }
e3127ae0
PB
2165 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2166 bounce.addr = addr;
2167 bounce.len = l;
d3e71559
PB
2168
2169 memory_region_ref(mr);
2170 bounce.mr = mr;
e3127ae0
PB
2171 if (!is_write) {
2172 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2173 }
6d16c2f8 2174
e3127ae0
PB
2175 *plen = l;
2176 return bounce.buffer;
2177 }
2178
2179 base = xlat;
2180 raddr = memory_region_get_ram_addr(mr);
2181
2182 for (;;) {
6d16c2f8
AL
2183 len -= l;
2184 addr += l;
e3127ae0
PB
2185 done += l;
2186 if (len == 0) {
2187 break;
2188 }
2189
2190 l = len;
2191 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2192 if (this_mr != mr || xlat != base + done) {
2193 break;
2194 }
6d16c2f8 2195 }
e3127ae0 2196
d3e71559 2197 memory_region_ref(mr);
e3127ae0
PB
2198 *plen = done;
2199 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2200}
2201
ac1970fb 2202/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2203 * Will also mark the memory as dirty if is_write == 1. access_len gives
2204 * the amount of memory that was actually read or written by the caller.
2205 */
a8170e5e
AK
2206void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2207 int is_write, hwaddr access_len)
6d16c2f8
AL
2208{
2209 if (buffer != bounce.buffer) {
d3e71559
PB
2210 MemoryRegion *mr;
2211 ram_addr_t addr1;
2212
2213 mr = qemu_ram_addr_from_host(buffer, &addr1);
2214 assert(mr != NULL);
6d16c2f8 2215 if (is_write) {
6d16c2f8
AL
2216 while (access_len) {
2217 unsigned l;
2218 l = TARGET_PAGE_SIZE;
2219 if (l > access_len)
2220 l = access_len;
51d7a9eb 2221 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2222 addr1 += l;
2223 access_len -= l;
2224 }
2225 }
868bb33f 2226 if (xen_enabled()) {
e41d7c69 2227 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2228 }
d3e71559 2229 memory_region_unref(mr);
6d16c2f8
AL
2230 return;
2231 }
2232 if (is_write) {
ac1970fb 2233 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2234 }
f8a83245 2235 qemu_vfree(bounce.buffer);
6d16c2f8 2236 bounce.buffer = NULL;
d3e71559 2237 memory_region_unref(bounce.mr);
ba223c29 2238 cpu_notify_map_clients();
6d16c2f8 2239}
d0ecd2aa 2240
a8170e5e
AK
2241void *cpu_physical_memory_map(hwaddr addr,
2242 hwaddr *plen,
ac1970fb
AK
2243 int is_write)
2244{
2245 return address_space_map(&address_space_memory, addr, plen, is_write);
2246}
2247
a8170e5e
AK
2248void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2249 int is_write, hwaddr access_len)
ac1970fb
AK
2250{
2251 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2252}
2253
8df1cd07 2254/* warning: addr must be aligned */
a8170e5e 2255static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2256 enum device_endian endian)
8df1cd07 2257{
8df1cd07 2258 uint8_t *ptr;
791af8c8 2259 uint64_t val;
5c8a00ce 2260 MemoryRegion *mr;
149f54b5
PB
2261 hwaddr l = 4;
2262 hwaddr addr1;
8df1cd07 2263
5c8a00ce
PB
2264 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2265 false);
2266 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2267 /* I/O case */
5c8a00ce 2268 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2269#if defined(TARGET_WORDS_BIGENDIAN)
2270 if (endian == DEVICE_LITTLE_ENDIAN) {
2271 val = bswap32(val);
2272 }
2273#else
2274 if (endian == DEVICE_BIG_ENDIAN) {
2275 val = bswap32(val);
2276 }
2277#endif
8df1cd07
FB
2278 } else {
2279 /* RAM case */
5c8a00ce 2280 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2281 & TARGET_PAGE_MASK)
149f54b5 2282 + addr1);
1e78bcc1
AG
2283 switch (endian) {
2284 case DEVICE_LITTLE_ENDIAN:
2285 val = ldl_le_p(ptr);
2286 break;
2287 case DEVICE_BIG_ENDIAN:
2288 val = ldl_be_p(ptr);
2289 break;
2290 default:
2291 val = ldl_p(ptr);
2292 break;
2293 }
8df1cd07
FB
2294 }
2295 return val;
2296}
2297
a8170e5e 2298uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2299{
2300 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2301}
2302
a8170e5e 2303uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2304{
2305 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2306}
2307
a8170e5e 2308uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2309{
2310 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2311}
2312
84b7b8e7 2313/* warning: addr must be aligned */
a8170e5e 2314static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2315 enum device_endian endian)
84b7b8e7 2316{
84b7b8e7
FB
2317 uint8_t *ptr;
2318 uint64_t val;
5c8a00ce 2319 MemoryRegion *mr;
149f54b5
PB
2320 hwaddr l = 8;
2321 hwaddr addr1;
84b7b8e7 2322
5c8a00ce
PB
2323 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2324 false);
2325 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2326 /* I/O case */
5c8a00ce 2327 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2328#if defined(TARGET_WORDS_BIGENDIAN)
2329 if (endian == DEVICE_LITTLE_ENDIAN) {
2330 val = bswap64(val);
2331 }
2332#else
2333 if (endian == DEVICE_BIG_ENDIAN) {
2334 val = bswap64(val);
2335 }
84b7b8e7
FB
2336#endif
2337 } else {
2338 /* RAM case */
5c8a00ce 2339 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2340 & TARGET_PAGE_MASK)
149f54b5 2341 + addr1);
1e78bcc1
AG
2342 switch (endian) {
2343 case DEVICE_LITTLE_ENDIAN:
2344 val = ldq_le_p(ptr);
2345 break;
2346 case DEVICE_BIG_ENDIAN:
2347 val = ldq_be_p(ptr);
2348 break;
2349 default:
2350 val = ldq_p(ptr);
2351 break;
2352 }
84b7b8e7
FB
2353 }
2354 return val;
2355}
2356
a8170e5e 2357uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2358{
2359 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2360}
2361
a8170e5e 2362uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2363{
2364 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2365}
2366
a8170e5e 2367uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2368{
2369 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2370}
2371
aab33094 2372/* XXX: optimize */
a8170e5e 2373uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2374{
2375 uint8_t val;
2376 cpu_physical_memory_read(addr, &val, 1);
2377 return val;
2378}
2379
733f0b02 2380/* warning: addr must be aligned */
a8170e5e 2381static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2382 enum device_endian endian)
aab33094 2383{
733f0b02
MT
2384 uint8_t *ptr;
2385 uint64_t val;
5c8a00ce 2386 MemoryRegion *mr;
149f54b5
PB
2387 hwaddr l = 2;
2388 hwaddr addr1;
733f0b02 2389
5c8a00ce
PB
2390 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2391 false);
2392 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2393 /* I/O case */
5c8a00ce 2394 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2395#if defined(TARGET_WORDS_BIGENDIAN)
2396 if (endian == DEVICE_LITTLE_ENDIAN) {
2397 val = bswap16(val);
2398 }
2399#else
2400 if (endian == DEVICE_BIG_ENDIAN) {
2401 val = bswap16(val);
2402 }
2403#endif
733f0b02
MT
2404 } else {
2405 /* RAM case */
5c8a00ce 2406 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2407 & TARGET_PAGE_MASK)
149f54b5 2408 + addr1);
1e78bcc1
AG
2409 switch (endian) {
2410 case DEVICE_LITTLE_ENDIAN:
2411 val = lduw_le_p(ptr);
2412 break;
2413 case DEVICE_BIG_ENDIAN:
2414 val = lduw_be_p(ptr);
2415 break;
2416 default:
2417 val = lduw_p(ptr);
2418 break;
2419 }
733f0b02
MT
2420 }
2421 return val;
aab33094
FB
2422}
2423
a8170e5e 2424uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2425{
2426 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2427}
2428
a8170e5e 2429uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2430{
2431 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2432}
2433
a8170e5e 2434uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2435{
2436 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2437}
2438
8df1cd07
FB
2439/* warning: addr must be aligned. The ram page is not masked as dirty
2440 and the code inside is not invalidated. It is useful if the dirty
2441 bits are used to track modified PTEs */
a8170e5e 2442void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2443{
8df1cd07 2444 uint8_t *ptr;
5c8a00ce 2445 MemoryRegion *mr;
149f54b5
PB
2446 hwaddr l = 4;
2447 hwaddr addr1;
8df1cd07 2448
5c8a00ce
PB
2449 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2450 true);
2451 if (l < 4 || !memory_access_is_direct(mr, true)) {
2452 io_mem_write(mr, addr1, val, 4);
8df1cd07 2453 } else {
5c8a00ce 2454 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2455 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2456 stl_p(ptr, val);
74576198
AL
2457
2458 if (unlikely(in_migration)) {
2459 if (!cpu_physical_memory_is_dirty(addr1)) {
2460 /* invalidate code */
2461 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2462 /* set dirty bit */
f7c11b53
YT
2463 cpu_physical_memory_set_dirty_flags(
2464 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2465 }
2466 }
8df1cd07
FB
2467 }
2468}
2469
2470/* warning: addr must be aligned */
a8170e5e 2471static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2472 enum device_endian endian)
8df1cd07 2473{
8df1cd07 2474 uint8_t *ptr;
5c8a00ce 2475 MemoryRegion *mr;
149f54b5
PB
2476 hwaddr l = 4;
2477 hwaddr addr1;
8df1cd07 2478
5c8a00ce
PB
2479 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2480 true);
2481 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2482#if defined(TARGET_WORDS_BIGENDIAN)
2483 if (endian == DEVICE_LITTLE_ENDIAN) {
2484 val = bswap32(val);
2485 }
2486#else
2487 if (endian == DEVICE_BIG_ENDIAN) {
2488 val = bswap32(val);
2489 }
2490#endif
5c8a00ce 2491 io_mem_write(mr, addr1, val, 4);
8df1cd07 2492 } else {
8df1cd07 2493 /* RAM case */
5c8a00ce 2494 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2495 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2496 switch (endian) {
2497 case DEVICE_LITTLE_ENDIAN:
2498 stl_le_p(ptr, val);
2499 break;
2500 case DEVICE_BIG_ENDIAN:
2501 stl_be_p(ptr, val);
2502 break;
2503 default:
2504 stl_p(ptr, val);
2505 break;
2506 }
51d7a9eb 2507 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2508 }
2509}
2510
a8170e5e 2511void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2512{
2513 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2514}
2515
a8170e5e 2516void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2517{
2518 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2519}
2520
a8170e5e 2521void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2522{
2523 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2524}
2525
aab33094 2526/* XXX: optimize */
a8170e5e 2527void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2528{
2529 uint8_t v = val;
2530 cpu_physical_memory_write(addr, &v, 1);
2531}
2532
733f0b02 2533/* warning: addr must be aligned */
a8170e5e 2534static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2535 enum device_endian endian)
aab33094 2536{
733f0b02 2537 uint8_t *ptr;
5c8a00ce 2538 MemoryRegion *mr;
149f54b5
PB
2539 hwaddr l = 2;
2540 hwaddr addr1;
733f0b02 2541
5c8a00ce
PB
2542 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2543 true);
2544 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2545#if defined(TARGET_WORDS_BIGENDIAN)
2546 if (endian == DEVICE_LITTLE_ENDIAN) {
2547 val = bswap16(val);
2548 }
2549#else
2550 if (endian == DEVICE_BIG_ENDIAN) {
2551 val = bswap16(val);
2552 }
2553#endif
5c8a00ce 2554 io_mem_write(mr, addr1, val, 2);
733f0b02 2555 } else {
733f0b02 2556 /* RAM case */
5c8a00ce 2557 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2558 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2559 switch (endian) {
2560 case DEVICE_LITTLE_ENDIAN:
2561 stw_le_p(ptr, val);
2562 break;
2563 case DEVICE_BIG_ENDIAN:
2564 stw_be_p(ptr, val);
2565 break;
2566 default:
2567 stw_p(ptr, val);
2568 break;
2569 }
51d7a9eb 2570 invalidate_and_set_dirty(addr1, 2);
733f0b02 2571 }
aab33094
FB
2572}
2573
a8170e5e 2574void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2575{
2576 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2577}
2578
a8170e5e 2579void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2580{
2581 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2582}
2583
a8170e5e 2584void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2585{
2586 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2587}
2588
aab33094 2589/* XXX: optimize */
a8170e5e 2590void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2591{
2592 val = tswap64(val);
71d2b725 2593 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2594}
2595
a8170e5e 2596void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2597{
2598 val = cpu_to_le64(val);
2599 cpu_physical_memory_write(addr, &val, 8);
2600}
2601
a8170e5e 2602void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2603{
2604 val = cpu_to_be64(val);
2605 cpu_physical_memory_write(addr, &val, 8);
2606}
2607
5e2972fd 2608/* virtual memory access for debug (includes writing to ROM) */
f17ec444 2609int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 2610 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2611{
2612 int l;
a8170e5e 2613 hwaddr phys_addr;
9b3c35e0 2614 target_ulong page;
13eb76e0
FB
2615
2616 while (len > 0) {
2617 page = addr & TARGET_PAGE_MASK;
f17ec444 2618 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
2619 /* if no physical page mapped, return an error */
2620 if (phys_addr == -1)
2621 return -1;
2622 l = (page + TARGET_PAGE_SIZE) - addr;
2623 if (l > len)
2624 l = len;
5e2972fd 2625 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2626 if (is_write)
2627 cpu_physical_memory_write_rom(phys_addr, buf, l);
2628 else
5e2972fd 2629 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2630 len -= l;
2631 buf += l;
2632 addr += l;
2633 }
2634 return 0;
2635}
a68fe89c 2636#endif
13eb76e0 2637
8e4a424b
BS
2638#if !defined(CONFIG_USER_ONLY)
2639
2640/*
2641 * A helper function for the _utterly broken_ virtio device model to find out if
2642 * it's running on a big endian machine. Don't do this at home kids!
2643 */
2644bool virtio_is_big_endian(void);
2645bool virtio_is_big_endian(void)
2646{
2647#if defined(TARGET_WORDS_BIGENDIAN)
2648 return true;
2649#else
2650 return false;
2651#endif
2652}
2653
2654#endif
2655
76f35538 2656#ifndef CONFIG_USER_ONLY
a8170e5e 2657bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2658{
5c8a00ce 2659 MemoryRegion*mr;
149f54b5 2660 hwaddr l = 1;
76f35538 2661
5c8a00ce
PB
2662 mr = address_space_translate(&address_space_memory,
2663 phys_addr, &phys_addr, &l, false);
76f35538 2664
5c8a00ce
PB
2665 return !(memory_region_is_ram(mr) ||
2666 memory_region_is_romd(mr));
76f35538 2667}
bd2fa51f
MH
2668
2669void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2670{
2671 RAMBlock *block;
2672
2673 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2674 func(block->host, block->offset, block->length, opaque);
2675 }
2676}
ec3f8c99 2677#endif