]> git.proxmox.com Git - qemu.git/blame - exec.c
target-openrisc: Fix typename in openrisc_cpu_class_by_name()
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
0d09e41a 34#include "hw/xen/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
db7b5426 53//#define DEBUG_SUBPAGE
1196be37 54
e2eef170 55#if !defined(CONFIG_USER_ONLY)
9fa3e853 56int phys_ram_fd;
74576198 57static int in_migration;
94a6b54f 58
a3161038 59RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
60
61static MemoryRegion *system_memory;
309cb471 62static MemoryRegion *system_io;
62152b8a 63
f6790af6
AK
64AddressSpace address_space_io;
65AddressSpace address_space_memory;
2673a5da 66
0844e007 67MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 68static MemoryRegion io_mem_unassigned;
0e0df1e2 69
e2eef170 70#endif
9fa3e853 71
9349b4f9 72CPUArchState *first_cpu;
6a00d601
FB
73/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
9349b4f9 75DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 76/* 0 = Do not count executed instructions.
bf20dc07 77 1 = Precise instruction counting.
2e70f6ef 78 2 = Adaptive rate instruction counting. */
5708fc66 79int use_icount;
6a00d601 80
e2eef170 81#if !defined(CONFIG_USER_ONLY)
4346ae3e 82
1db8abb1
PB
83typedef struct PhysPageEntry PhysPageEntry;
84
85struct PhysPageEntry {
86 uint16_t is_leaf : 1;
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
88 uint16_t ptr : 15;
89};
90
0475d94f
PB
91typedef PhysPageEntry Node[L2_SIZE];
92
1db8abb1
PB
93struct AddressSpaceDispatch {
94 /* This is a multi-level map on the physical address space.
95 * The bottom level has pointers to MemoryRegionSections.
96 */
97 PhysPageEntry phys_map;
0475d94f
PB
98 Node *nodes;
99 MemoryRegionSection *sections;
acc9d80b 100 AddressSpace *as;
1db8abb1
PB
101};
102
90260c6c
JK
103#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
104typedef struct subpage_t {
105 MemoryRegion iomem;
acc9d80b 106 AddressSpace *as;
90260c6c
JK
107 hwaddr base;
108 uint16_t sub_section[TARGET_PAGE_SIZE];
109} subpage_t;
110
b41aac4f
LPF
111#define PHYS_SECTION_UNASSIGNED 0
112#define PHYS_SECTION_NOTDIRTY 1
113#define PHYS_SECTION_ROM 2
114#define PHYS_SECTION_WATCH 3
5312bd8b 115
9affd6fc
PB
116typedef struct PhysPageMap {
117 unsigned sections_nb;
118 unsigned sections_nb_alloc;
119 unsigned nodes_nb;
120 unsigned nodes_nb_alloc;
121 Node *nodes;
122 MemoryRegionSection *sections;
123} PhysPageMap;
124
6092666e 125static PhysPageMap *prev_map;
9affd6fc 126static PhysPageMap next_map;
d6f2ea22 127
07f07b31 128#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 129
e2eef170 130static void io_mem_init(void);
62152b8a 131static void memory_map_init(void);
8b9c99d9 132static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 133
1ec9b909 134static MemoryRegion io_mem_watch;
6658ffb8 135#endif
fd6ce8f6 136
6d9a1304 137#if !defined(CONFIG_USER_ONLY)
d6f2ea22 138
f7bf5461 139static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 140{
9affd6fc
PB
141 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
142 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
143 16);
144 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
145 next_map.nodes_nb + nodes);
146 next_map.nodes = g_renew(Node, next_map.nodes,
147 next_map.nodes_nb_alloc);
d6f2ea22 148 }
f7bf5461
AK
149}
150
151static uint16_t phys_map_node_alloc(void)
152{
153 unsigned i;
154 uint16_t ret;
155
9affd6fc 156 ret = next_map.nodes_nb++;
f7bf5461 157 assert(ret != PHYS_MAP_NODE_NIL);
9affd6fc 158 assert(ret != next_map.nodes_nb_alloc);
d6f2ea22 159 for (i = 0; i < L2_SIZE; ++i) {
9affd6fc
PB
160 next_map.nodes[ret][i].is_leaf = 0;
161 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 162 }
f7bf5461 163 return ret;
d6f2ea22
AK
164}
165
a8170e5e
AK
166static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
167 hwaddr *nb, uint16_t leaf,
2999097b 168 int level)
f7bf5461
AK
169{
170 PhysPageEntry *p;
171 int i;
a8170e5e 172 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 173
07f07b31 174 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800 175 lp->ptr = phys_map_node_alloc();
9affd6fc 176 p = next_map.nodes[lp->ptr];
f7bf5461
AK
177 if (level == 0) {
178 for (i = 0; i < L2_SIZE; i++) {
07f07b31 179 p[i].is_leaf = 1;
b41aac4f 180 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 181 }
67c4d23c 182 }
f7bf5461 183 } else {
9affd6fc 184 p = next_map.nodes[lp->ptr];
92e873b9 185 }
2999097b 186 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 187
2999097b 188 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
189 if ((*index & (step - 1)) == 0 && *nb >= step) {
190 lp->is_leaf = true;
c19e8800 191 lp->ptr = leaf;
07f07b31
AK
192 *index += step;
193 *nb -= step;
2999097b
AK
194 } else {
195 phys_page_set_level(lp, index, nb, leaf, level - 1);
196 }
197 ++lp;
f7bf5461
AK
198 }
199}
200
ac1970fb 201static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 202 hwaddr index, hwaddr nb,
2999097b 203 uint16_t leaf)
f7bf5461 204{
2999097b 205 /* Wildly overreserve - it doesn't matter much. */
07f07b31 206 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 207
ac1970fb 208 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
209}
210
9affd6fc
PB
211static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
212 Node *nodes, MemoryRegionSection *sections)
92e873b9 213{
31ab2b4a
AK
214 PhysPageEntry *p;
215 int i;
f1f6e3b8 216
07f07b31 217 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 218 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 219 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 220 }
9affd6fc 221 p = nodes[lp.ptr];
31ab2b4a 222 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 223 }
9affd6fc 224 return &sections[lp.ptr];
f3705d53
AK
225}
226
e5548617
BS
227bool memory_region_is_unassigned(MemoryRegion *mr)
228{
2a8e7499 229 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 230 && mr != &io_mem_watch;
fd6ce8f6 231}
149f54b5 232
c7086b4a 233static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
234 hwaddr addr,
235 bool resolve_subpage)
9f029603 236{
90260c6c
JK
237 MemoryRegionSection *section;
238 subpage_t *subpage;
239
0475d94f
PB
240 section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
241 d->nodes, d->sections);
90260c6c
JK
242 if (resolve_subpage && section->mr->subpage) {
243 subpage = container_of(section->mr, subpage_t, iomem);
0475d94f 244 section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
245 }
246 return section;
9f029603
JK
247}
248
90260c6c 249static MemoryRegionSection *
c7086b4a 250address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 251 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
252{
253 MemoryRegionSection *section;
254 Int128 diff;
255
c7086b4a 256 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
257 /* Compute offset within MemoryRegionSection */
258 addr -= section->offset_within_address_space;
259
260 /* Compute offset within MemoryRegion */
261 *xlat = addr + section->offset_within_region;
262
263 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 264 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
265 return section;
266}
90260c6c 267
5c8a00ce
PB
268MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
269 hwaddr *xlat, hwaddr *plen,
270 bool is_write)
90260c6c 271{
30951157
AK
272 IOMMUTLBEntry iotlb;
273 MemoryRegionSection *section;
274 MemoryRegion *mr;
275 hwaddr len = *plen;
276
277 for (;;) {
c7086b4a 278 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
279 mr = section->mr;
280
281 if (!mr->iommu_ops) {
282 break;
283 }
284
285 iotlb = mr->iommu_ops->translate(mr, addr);
286 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
287 | (addr & iotlb.addr_mask));
288 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
289 if (!(iotlb.perm & (1 << is_write))) {
290 mr = &io_mem_unassigned;
291 break;
292 }
293
294 as = iotlb.target_as;
295 }
296
297 *plen = len;
298 *xlat = addr;
299 return mr;
90260c6c
JK
300}
301
302MemoryRegionSection *
303address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
304 hwaddr *plen)
305{
30951157 306 MemoryRegionSection *section;
c7086b4a 307 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
308
309 assert(!section->mr->iommu_ops);
310 return section;
90260c6c 311}
5b6dd868 312#endif
fd6ce8f6 313
5b6dd868 314void cpu_exec_init_all(void)
fdbb84d1 315{
5b6dd868 316#if !defined(CONFIG_USER_ONLY)
b2a8658e 317 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
318 memory_map_init();
319 io_mem_init();
fdbb84d1 320#endif
5b6dd868 321}
fdbb84d1 322
b170fce3 323#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
324
325static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 326{
259186a7 327 CPUState *cpu = opaque;
a513fe19 328
5b6dd868
BS
329 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
330 version_id is increased. */
259186a7
AF
331 cpu->interrupt_request &= ~0x01;
332 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
333
334 return 0;
a513fe19 335}
7501267e 336
1a1562f5 337const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
338 .name = "cpu_common",
339 .version_id = 1,
340 .minimum_version_id = 1,
341 .minimum_version_id_old = 1,
342 .post_load = cpu_common_post_load,
343 .fields = (VMStateField []) {
259186a7
AF
344 VMSTATE_UINT32(halted, CPUState),
345 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
346 VMSTATE_END_OF_LIST()
347 }
348};
1a1562f5 349
5b6dd868 350#endif
ea041c0e 351
38d8f5c8 352CPUState *qemu_get_cpu(int index)
ea041c0e 353{
5b6dd868 354 CPUArchState *env = first_cpu;
38d8f5c8 355 CPUState *cpu = NULL;
ea041c0e 356
5b6dd868 357 while (env) {
55e5c285
AF
358 cpu = ENV_GET_CPU(env);
359 if (cpu->cpu_index == index) {
5b6dd868 360 break;
55e5c285 361 }
5b6dd868 362 env = env->next_cpu;
ea041c0e 363 }
5b6dd868 364
d76fddae 365 return env ? cpu : NULL;
ea041c0e
FB
366}
367
d6b9e0d6
MT
368void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
369{
370 CPUArchState *env = first_cpu;
371
372 while (env) {
373 func(ENV_GET_CPU(env), data);
374 env = env->next_cpu;
375 }
376}
377
5b6dd868 378void cpu_exec_init(CPUArchState *env)
ea041c0e 379{
5b6dd868 380 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 381 CPUClass *cc = CPU_GET_CLASS(cpu);
5b6dd868
BS
382 CPUArchState **penv;
383 int cpu_index;
384
385#if defined(CONFIG_USER_ONLY)
386 cpu_list_lock();
387#endif
388 env->next_cpu = NULL;
389 penv = &first_cpu;
390 cpu_index = 0;
391 while (*penv != NULL) {
392 penv = &(*penv)->next_cpu;
393 cpu_index++;
394 }
55e5c285 395 cpu->cpu_index = cpu_index;
1b1ed8dc 396 cpu->numa_node = 0;
5b6dd868
BS
397 QTAILQ_INIT(&env->breakpoints);
398 QTAILQ_INIT(&env->watchpoints);
399#ifndef CONFIG_USER_ONLY
400 cpu->thread_id = qemu_get_thread_id();
401#endif
402 *penv = env;
403#if defined(CONFIG_USER_ONLY)
404 cpu_list_unlock();
405#endif
259186a7 406 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
5b6dd868 407#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
408 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
409 cpu_save, cpu_load, env);
b170fce3 410 assert(cc->vmsd == NULL);
5b6dd868 411#endif
b170fce3
AF
412 if (cc->vmsd != NULL) {
413 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
414 }
ea041c0e
FB
415}
416
1fddef4b 417#if defined(TARGET_HAS_ICE)
94df27fd 418#if defined(CONFIG_USER_ONLY)
9349b4f9 419static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
420{
421 tb_invalidate_phys_page_range(pc, pc + 1, 0);
422}
423#else
1e7855a5
MF
424static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
425{
9d70c4b7
MF
426 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
427 (pc & ~TARGET_PAGE_MASK));
1e7855a5 428}
c27004ec 429#endif
94df27fd 430#endif /* TARGET_HAS_ICE */
d720b93d 431
c527ee8f 432#if defined(CONFIG_USER_ONLY)
9349b4f9 433void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
434
435{
436}
437
9349b4f9 438int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
439 int flags, CPUWatchpoint **watchpoint)
440{
441 return -ENOSYS;
442}
443#else
6658ffb8 444/* Add a watchpoint. */
9349b4f9 445int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 446 int flags, CPUWatchpoint **watchpoint)
6658ffb8 447{
b4051334 448 target_ulong len_mask = ~(len - 1);
c0ce998e 449 CPUWatchpoint *wp;
6658ffb8 450
b4051334 451 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
452 if ((len & (len - 1)) || (addr & ~len_mask) ||
453 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
454 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
455 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
456 return -EINVAL;
457 }
7267c094 458 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
459
460 wp->vaddr = addr;
b4051334 461 wp->len_mask = len_mask;
a1d1bb31
AL
462 wp->flags = flags;
463
2dc9f411 464 /* keep all GDB-injected watchpoints in front */
c0ce998e 465 if (flags & BP_GDB)
72cf2d4f 466 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 467 else
72cf2d4f 468 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 469
6658ffb8 470 tlb_flush_page(env, addr);
a1d1bb31
AL
471
472 if (watchpoint)
473 *watchpoint = wp;
474 return 0;
6658ffb8
PB
475}
476
a1d1bb31 477/* Remove a specific watchpoint. */
9349b4f9 478int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 479 int flags)
6658ffb8 480{
b4051334 481 target_ulong len_mask = ~(len - 1);
a1d1bb31 482 CPUWatchpoint *wp;
6658ffb8 483
72cf2d4f 484 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 485 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 486 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 487 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
488 return 0;
489 }
490 }
a1d1bb31 491 return -ENOENT;
6658ffb8
PB
492}
493
a1d1bb31 494/* Remove a specific watchpoint by reference. */
9349b4f9 495void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 496{
72cf2d4f 497 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 498
a1d1bb31
AL
499 tlb_flush_page(env, watchpoint->vaddr);
500
7267c094 501 g_free(watchpoint);
a1d1bb31
AL
502}
503
504/* Remove all matching watchpoints. */
9349b4f9 505void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 506{
c0ce998e 507 CPUWatchpoint *wp, *next;
a1d1bb31 508
72cf2d4f 509 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
510 if (wp->flags & mask)
511 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 512 }
7d03f82f 513}
c527ee8f 514#endif
7d03f82f 515
a1d1bb31 516/* Add a breakpoint. */
9349b4f9 517int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 518 CPUBreakpoint **breakpoint)
4c3a88a2 519{
1fddef4b 520#if defined(TARGET_HAS_ICE)
c0ce998e 521 CPUBreakpoint *bp;
3b46e624 522
7267c094 523 bp = g_malloc(sizeof(*bp));
4c3a88a2 524
a1d1bb31
AL
525 bp->pc = pc;
526 bp->flags = flags;
527
2dc9f411 528 /* keep all GDB-injected breakpoints in front */
c0ce998e 529 if (flags & BP_GDB)
72cf2d4f 530 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 531 else
72cf2d4f 532 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 533
d720b93d 534 breakpoint_invalidate(env, pc);
a1d1bb31
AL
535
536 if (breakpoint)
537 *breakpoint = bp;
4c3a88a2
FB
538 return 0;
539#else
a1d1bb31 540 return -ENOSYS;
4c3a88a2
FB
541#endif
542}
543
a1d1bb31 544/* Remove a specific breakpoint. */
9349b4f9 545int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 546{
7d03f82f 547#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
548 CPUBreakpoint *bp;
549
72cf2d4f 550 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
551 if (bp->pc == pc && bp->flags == flags) {
552 cpu_breakpoint_remove_by_ref(env, bp);
553 return 0;
554 }
7d03f82f 555 }
a1d1bb31
AL
556 return -ENOENT;
557#else
558 return -ENOSYS;
7d03f82f
EI
559#endif
560}
561
a1d1bb31 562/* Remove a specific breakpoint by reference. */
9349b4f9 563void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 564{
1fddef4b 565#if defined(TARGET_HAS_ICE)
72cf2d4f 566 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 567
a1d1bb31
AL
568 breakpoint_invalidate(env, breakpoint->pc);
569
7267c094 570 g_free(breakpoint);
a1d1bb31
AL
571#endif
572}
573
574/* Remove all matching breakpoints. */
9349b4f9 575void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
576{
577#if defined(TARGET_HAS_ICE)
c0ce998e 578 CPUBreakpoint *bp, *next;
a1d1bb31 579
72cf2d4f 580 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
581 if (bp->flags & mask)
582 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 583 }
4c3a88a2
FB
584#endif
585}
586
c33a346e
FB
587/* enable or disable single step mode. EXCP_DEBUG is returned by the
588 CPU loop after each instruction */
9349b4f9 589void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 590{
1fddef4b 591#if defined(TARGET_HAS_ICE)
c33a346e
FB
592 if (env->singlestep_enabled != enabled) {
593 env->singlestep_enabled = enabled;
e22a25c9
AL
594 if (kvm_enabled())
595 kvm_update_guest_debug(env, 0);
596 else {
ccbb4d44 597 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
598 /* XXX: only flush what is necessary */
599 tb_flush(env);
600 }
c33a346e
FB
601 }
602#endif
603}
604
9349b4f9 605void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e 606{
878096ee 607 CPUState *cpu = ENV_GET_CPU(env);
7501267e 608 va_list ap;
493ae1f0 609 va_list ap2;
7501267e
FB
610
611 va_start(ap, fmt);
493ae1f0 612 va_copy(ap2, ap);
7501267e
FB
613 fprintf(stderr, "qemu: fatal: ");
614 vfprintf(stderr, fmt, ap);
615 fprintf(stderr, "\n");
878096ee 616 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
617 if (qemu_log_enabled()) {
618 qemu_log("qemu: fatal: ");
619 qemu_log_vprintf(fmt, ap2);
620 qemu_log("\n");
6fd2a026 621 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 622 qemu_log_flush();
93fcfe39 623 qemu_log_close();
924edcae 624 }
493ae1f0 625 va_end(ap2);
f9373291 626 va_end(ap);
fd052bf6
RV
627#if defined(CONFIG_USER_ONLY)
628 {
629 struct sigaction act;
630 sigfillset(&act.sa_mask);
631 act.sa_handler = SIG_DFL;
632 sigaction(SIGABRT, &act, NULL);
633 }
634#endif
7501267e
FB
635 abort();
636}
637
9349b4f9 638CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 639{
9349b4f9
AF
640 CPUArchState *new_env = cpu_init(env->cpu_model_str);
641 CPUArchState *next_cpu = new_env->next_cpu;
5a38f081
AL
642#if defined(TARGET_HAS_ICE)
643 CPUBreakpoint *bp;
644 CPUWatchpoint *wp;
645#endif
646
9349b4f9 647 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 648
55e5c285 649 /* Preserve chaining. */
c5be9f08 650 new_env->next_cpu = next_cpu;
5a38f081
AL
651
652 /* Clone all break/watchpoints.
653 Note: Once we support ptrace with hw-debug register access, make sure
654 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
655 QTAILQ_INIT(&env->breakpoints);
656 QTAILQ_INIT(&env->watchpoints);
5a38f081 657#if defined(TARGET_HAS_ICE)
72cf2d4f 658 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
659 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
660 }
72cf2d4f 661 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
662 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
663 wp->flags, NULL);
664 }
665#endif
666
c5be9f08
TS
667 return new_env;
668}
669
0124311e 670#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
671static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
672 uintptr_t length)
673{
674 uintptr_t start1;
675
676 /* we modify the TLB cache so that the dirty bit will be set again
677 when accessing the range */
678 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
679 /* Check that we don't span multiple blocks - this breaks the
680 address comparisons below. */
681 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
682 != (end - 1) - start) {
683 abort();
684 }
685 cpu_tlb_reset_dirty_all(start1, length);
686
687}
688
5579c7f3 689/* Note: start and end must be within the same ram block. */
c227f099 690void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 691 int dirty_flags)
1ccde1cb 692{
d24981d3 693 uintptr_t length;
1ccde1cb
FB
694
695 start &= TARGET_PAGE_MASK;
696 end = TARGET_PAGE_ALIGN(end);
697
698 length = end - start;
699 if (length == 0)
700 return;
f7c11b53 701 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 702
d24981d3
JQ
703 if (tcg_enabled()) {
704 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 705 }
1ccde1cb
FB
706}
707
8b9c99d9 708static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 709{
f6f3fbca 710 int ret = 0;
74576198 711 in_migration = enable;
f6f3fbca 712 return ret;
74576198
AL
713}
714
a8170e5e 715hwaddr memory_region_section_get_iotlb(CPUArchState *env,
149f54b5
PB
716 MemoryRegionSection *section,
717 target_ulong vaddr,
718 hwaddr paddr, hwaddr xlat,
719 int prot,
720 target_ulong *address)
e5548617 721{
a8170e5e 722 hwaddr iotlb;
e5548617
BS
723 CPUWatchpoint *wp;
724
cc5bea60 725 if (memory_region_is_ram(section->mr)) {
e5548617
BS
726 /* Normal RAM. */
727 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 728 + xlat;
e5548617 729 if (!section->readonly) {
b41aac4f 730 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 731 } else {
b41aac4f 732 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
733 }
734 } else {
0475d94f 735 iotlb = section - address_space_memory.dispatch->sections;
149f54b5 736 iotlb += xlat;
e5548617
BS
737 }
738
739 /* Make accesses to pages with watchpoints go via the
740 watchpoint trap routines. */
741 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
742 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
743 /* Avoid trapping reads of pages with a write breakpoint. */
744 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 745 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
746 *address |= TLB_MMIO;
747 break;
748 }
749 }
750 }
751
752 return iotlb;
753}
9fa3e853
FB
754#endif /* defined(CONFIG_USER_ONLY) */
755
e2eef170 756#if !defined(CONFIG_USER_ONLY)
8da3ff18 757
c227f099 758static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 759 uint16_t section);
acc9d80b 760static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 761
5312bd8b
AK
762static uint16_t phys_section_add(MemoryRegionSection *section)
763{
68f3f65b
PB
764 /* The physical section number is ORed with a page-aligned
765 * pointer to produce the iotlb entries. Thus it should
766 * never overflow into the page-aligned value.
767 */
9affd6fc 768 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
68f3f65b 769
9affd6fc
PB
770 if (next_map.sections_nb == next_map.sections_nb_alloc) {
771 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
772 16);
773 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
774 next_map.sections_nb_alloc);
5312bd8b 775 }
9affd6fc 776 next_map.sections[next_map.sections_nb] = *section;
dfde4e6e 777 memory_region_ref(section->mr);
9affd6fc 778 return next_map.sections_nb++;
5312bd8b
AK
779}
780
058bc4b5
PB
781static void phys_section_destroy(MemoryRegion *mr)
782{
dfde4e6e
PB
783 memory_region_unref(mr);
784
058bc4b5
PB
785 if (mr->subpage) {
786 subpage_t *subpage = container_of(mr, subpage_t, iomem);
787 memory_region_destroy(&subpage->iomem);
788 g_free(subpage);
789 }
790}
791
6092666e 792static void phys_sections_free(PhysPageMap *map)
5312bd8b 793{
9affd6fc
PB
794 while (map->sections_nb > 0) {
795 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
796 phys_section_destroy(section->mr);
797 }
9affd6fc
PB
798 g_free(map->sections);
799 g_free(map->nodes);
6092666e 800 g_free(map);
5312bd8b
AK
801}
802
ac1970fb 803static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
804{
805 subpage_t *subpage;
a8170e5e 806 hwaddr base = section->offset_within_address_space
0f0cb164 807 & TARGET_PAGE_MASK;
9affd6fc
PB
808 MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
809 next_map.nodes, next_map.sections);
0f0cb164
AK
810 MemoryRegionSection subsection = {
811 .offset_within_address_space = base,
052e87b0 812 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 813 };
a8170e5e 814 hwaddr start, end;
0f0cb164 815
f3705d53 816 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 817
f3705d53 818 if (!(existing->mr->subpage)) {
acc9d80b 819 subpage = subpage_init(d->as, base);
0f0cb164 820 subsection.mr = &subpage->iomem;
ac1970fb 821 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 822 phys_section_add(&subsection));
0f0cb164 823 } else {
f3705d53 824 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
825 }
826 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 827 end = start + int128_get64(section->size) - 1;
0f0cb164
AK
828 subpage_register(subpage, start, end, phys_section_add(section));
829}
830
831
052e87b0
PB
832static void register_multipage(AddressSpaceDispatch *d,
833 MemoryRegionSection *section)
33417e70 834{
a8170e5e 835 hwaddr start_addr = section->offset_within_address_space;
5312bd8b 836 uint16_t section_index = phys_section_add(section);
052e87b0
PB
837 uint64_t num_pages = int128_get64(int128_rshift(section->size,
838 TARGET_PAGE_BITS));
dd81124b 839
733d5ef5
PB
840 assert(num_pages);
841 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
842}
843
ac1970fb 844static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 845{
89ae337a 846 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 847 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 848 MemoryRegionSection now = *section, remain = *section;
052e87b0 849 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 850
733d5ef5
PB
851 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
852 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
853 - now.offset_within_address_space;
854
052e87b0 855 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 856 register_subpage(d, &now);
733d5ef5 857 } else {
052e87b0 858 now.size = int128_zero();
733d5ef5 859 }
052e87b0
PB
860 while (int128_ne(remain.size, now.size)) {
861 remain.size = int128_sub(remain.size, now.size);
862 remain.offset_within_address_space += int128_get64(now.size);
863 remain.offset_within_region += int128_get64(now.size);
69b67646 864 now = remain;
052e87b0 865 if (int128_lt(remain.size, page_size)) {
733d5ef5
PB
866 register_subpage(d, &now);
867 } else if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
052e87b0 868 now.size = page_size;
ac1970fb 869 register_subpage(d, &now);
69b67646 870 } else {
052e87b0 871 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 872 register_multipage(d, &now);
69b67646 873 }
0f0cb164
AK
874 }
875}
876
62a2744c
SY
877void qemu_flush_coalesced_mmio_buffer(void)
878{
879 if (kvm_enabled())
880 kvm_flush_coalesced_mmio_buffer();
881}
882
b2a8658e
UD
883void qemu_mutex_lock_ramlist(void)
884{
885 qemu_mutex_lock(&ram_list.mutex);
886}
887
888void qemu_mutex_unlock_ramlist(void)
889{
890 qemu_mutex_unlock(&ram_list.mutex);
891}
892
c902760f
MT
893#if defined(__linux__) && !defined(TARGET_S390X)
894
895#include <sys/vfs.h>
896
897#define HUGETLBFS_MAGIC 0x958458f6
898
899static long gethugepagesize(const char *path)
900{
901 struct statfs fs;
902 int ret;
903
904 do {
9742bf26 905 ret = statfs(path, &fs);
c902760f
MT
906 } while (ret != 0 && errno == EINTR);
907
908 if (ret != 0) {
9742bf26
YT
909 perror(path);
910 return 0;
c902760f
MT
911 }
912
913 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 914 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
915
916 return fs.f_bsize;
917}
918
04b16653
AW
919static void *file_ram_alloc(RAMBlock *block,
920 ram_addr_t memory,
921 const char *path)
c902760f
MT
922{
923 char *filename;
8ca761f6
PF
924 char *sanitized_name;
925 char *c;
c902760f
MT
926 void *area;
927 int fd;
928#ifdef MAP_POPULATE
929 int flags;
930#endif
931 unsigned long hpagesize;
932
933 hpagesize = gethugepagesize(path);
934 if (!hpagesize) {
9742bf26 935 return NULL;
c902760f
MT
936 }
937
938 if (memory < hpagesize) {
939 return NULL;
940 }
941
942 if (kvm_enabled() && !kvm_has_sync_mmu()) {
943 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
944 return NULL;
945 }
946
8ca761f6
PF
947 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
948 sanitized_name = g_strdup(block->mr->name);
949 for (c = sanitized_name; *c != '\0'; c++) {
950 if (*c == '/')
951 *c = '_';
952 }
953
954 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
955 sanitized_name);
956 g_free(sanitized_name);
c902760f
MT
957
958 fd = mkstemp(filename);
959 if (fd < 0) {
9742bf26 960 perror("unable to create backing store for hugepages");
e4ada482 961 g_free(filename);
9742bf26 962 return NULL;
c902760f
MT
963 }
964 unlink(filename);
e4ada482 965 g_free(filename);
c902760f
MT
966
967 memory = (memory+hpagesize-1) & ~(hpagesize-1);
968
969 /*
970 * ftruncate is not supported by hugetlbfs in older
971 * hosts, so don't bother bailing out on errors.
972 * If anything goes wrong with it under other filesystems,
973 * mmap will fail.
974 */
975 if (ftruncate(fd, memory))
9742bf26 976 perror("ftruncate");
c902760f
MT
977
978#ifdef MAP_POPULATE
979 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
980 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
981 * to sidestep this quirk.
982 */
983 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
984 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
985#else
986 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
987#endif
988 if (area == MAP_FAILED) {
9742bf26
YT
989 perror("file_ram_alloc: can't mmap RAM pages");
990 close(fd);
991 return (NULL);
c902760f 992 }
04b16653 993 block->fd = fd;
c902760f
MT
994 return area;
995}
996#endif
997
d17b5288 998static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
999{
1000 RAMBlock *block, *next_block;
3e837b2c 1001 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1002
49cd9ac6
SH
1003 assert(size != 0); /* it would hand out same offset multiple times */
1004
a3161038 1005 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1006 return 0;
1007
a3161038 1008 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1009 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1010
1011 end = block->offset + block->length;
1012
a3161038 1013 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1014 if (next_block->offset >= end) {
1015 next = MIN(next, next_block->offset);
1016 }
1017 }
1018 if (next - end >= size && next - end < mingap) {
3e837b2c 1019 offset = end;
04b16653
AW
1020 mingap = next - end;
1021 }
1022 }
3e837b2c
AW
1023
1024 if (offset == RAM_ADDR_MAX) {
1025 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1026 (uint64_t)size);
1027 abort();
1028 }
1029
04b16653
AW
1030 return offset;
1031}
1032
652d7ec2 1033ram_addr_t last_ram_offset(void)
d17b5288
AW
1034{
1035 RAMBlock *block;
1036 ram_addr_t last = 0;
1037
a3161038 1038 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1039 last = MAX(last, block->offset + block->length);
1040
1041 return last;
1042}
1043
ddb97f1d
JB
1044static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1045{
1046 int ret;
1047 QemuOpts *machine_opts;
1048
1049 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1050 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1051 if (machine_opts &&
1052 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
1053 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1054 if (ret) {
1055 perror("qemu_madvise");
1056 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1057 "but dump_guest_core=off specified\n");
1058 }
1059 }
1060}
1061
c5705a77 1062void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
1063{
1064 RAMBlock *new_block, *block;
1065
c5705a77 1066 new_block = NULL;
a3161038 1067 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
1068 if (block->offset == addr) {
1069 new_block = block;
1070 break;
1071 }
1072 }
1073 assert(new_block);
1074 assert(!new_block->idstr[0]);
84b89d78 1075
09e5ab63
AL
1076 if (dev) {
1077 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1078 if (id) {
1079 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1080 g_free(id);
84b89d78
CM
1081 }
1082 }
1083 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1084
b2a8658e
UD
1085 /* This assumes the iothread lock is taken here too. */
1086 qemu_mutex_lock_ramlist();
a3161038 1087 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1088 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1089 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1090 new_block->idstr);
1091 abort();
1092 }
1093 }
b2a8658e 1094 qemu_mutex_unlock_ramlist();
c5705a77
AK
1095}
1096
8490fc78
LC
1097static int memory_try_enable_merging(void *addr, size_t len)
1098{
1099 QemuOpts *opts;
1100
1101 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1102 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1103 /* disabled by the user */
1104 return 0;
1105 }
1106
1107 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1108}
1109
c5705a77
AK
1110ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1111 MemoryRegion *mr)
1112{
abb26d63 1113 RAMBlock *block, *new_block;
c5705a77
AK
1114
1115 size = TARGET_PAGE_ALIGN(size);
1116 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1117
b2a8658e
UD
1118 /* This assumes the iothread lock is taken here too. */
1119 qemu_mutex_lock_ramlist();
7c637366 1120 new_block->mr = mr;
432d268c 1121 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1122 if (host) {
1123 new_block->host = host;
cd19cfa2 1124 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1125 } else {
1126 if (mem_path) {
c902760f 1127#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1128 new_block->host = file_ram_alloc(new_block, size, mem_path);
1129 if (!new_block->host) {
6eebf958 1130 new_block->host = qemu_anon_ram_alloc(size);
8490fc78 1131 memory_try_enable_merging(new_block->host, size);
6977dfe6 1132 }
c902760f 1133#else
6977dfe6
YT
1134 fprintf(stderr, "-mem-path option unsupported\n");
1135 exit(1);
c902760f 1136#endif
6977dfe6 1137 } else {
868bb33f 1138 if (xen_enabled()) {
fce537d4 1139 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1140 } else if (kvm_enabled()) {
1141 /* some s390/kvm configurations have special constraints */
6eebf958 1142 new_block->host = kvm_ram_alloc(size);
432d268c 1143 } else {
6eebf958 1144 new_block->host = qemu_anon_ram_alloc(size);
432d268c 1145 }
8490fc78 1146 memory_try_enable_merging(new_block->host, size);
6977dfe6 1147 }
c902760f 1148 }
94a6b54f
PB
1149 new_block->length = size;
1150
abb26d63
PB
1151 /* Keep the list sorted from biggest to smallest block. */
1152 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1153 if (block->length < new_block->length) {
1154 break;
1155 }
1156 }
1157 if (block) {
1158 QTAILQ_INSERT_BEFORE(block, new_block, next);
1159 } else {
1160 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1161 }
0d6d3c87 1162 ram_list.mru_block = NULL;
94a6b54f 1163
f798b07f 1164 ram_list.version++;
b2a8658e 1165 qemu_mutex_unlock_ramlist();
f798b07f 1166
7267c094 1167 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1168 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1169 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1170 0, size >> TARGET_PAGE_BITS);
1720aeee 1171 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1172
ddb97f1d 1173 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1174 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1175
6f0437e8
JK
1176 if (kvm_enabled())
1177 kvm_setup_guest_memory(new_block->host, size);
1178
94a6b54f
PB
1179 return new_block->offset;
1180}
e9a1ab19 1181
c5705a77 1182ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1183{
c5705a77 1184 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1185}
1186
1f2e98b6
AW
1187void qemu_ram_free_from_ptr(ram_addr_t addr)
1188{
1189 RAMBlock *block;
1190
b2a8658e
UD
1191 /* This assumes the iothread lock is taken here too. */
1192 qemu_mutex_lock_ramlist();
a3161038 1193 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1194 if (addr == block->offset) {
a3161038 1195 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1196 ram_list.mru_block = NULL;
f798b07f 1197 ram_list.version++;
7267c094 1198 g_free(block);
b2a8658e 1199 break;
1f2e98b6
AW
1200 }
1201 }
b2a8658e 1202 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1203}
1204
c227f099 1205void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1206{
04b16653
AW
1207 RAMBlock *block;
1208
b2a8658e
UD
1209 /* This assumes the iothread lock is taken here too. */
1210 qemu_mutex_lock_ramlist();
a3161038 1211 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1212 if (addr == block->offset) {
a3161038 1213 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1214 ram_list.mru_block = NULL;
f798b07f 1215 ram_list.version++;
cd19cfa2
HY
1216 if (block->flags & RAM_PREALLOC_MASK) {
1217 ;
1218 } else if (mem_path) {
04b16653
AW
1219#if defined (__linux__) && !defined(TARGET_S390X)
1220 if (block->fd) {
1221 munmap(block->host, block->length);
1222 close(block->fd);
1223 } else {
e7a09b92 1224 qemu_anon_ram_free(block->host, block->length);
04b16653 1225 }
fd28aa13
JK
1226#else
1227 abort();
04b16653
AW
1228#endif
1229 } else {
868bb33f 1230 if (xen_enabled()) {
e41d7c69 1231 xen_invalidate_map_cache_entry(block->host);
432d268c 1232 } else {
e7a09b92 1233 qemu_anon_ram_free(block->host, block->length);
432d268c 1234 }
04b16653 1235 }
7267c094 1236 g_free(block);
b2a8658e 1237 break;
04b16653
AW
1238 }
1239 }
b2a8658e 1240 qemu_mutex_unlock_ramlist();
04b16653 1241
e9a1ab19
FB
1242}
1243
cd19cfa2
HY
1244#ifndef _WIN32
1245void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1246{
1247 RAMBlock *block;
1248 ram_addr_t offset;
1249 int flags;
1250 void *area, *vaddr;
1251
a3161038 1252 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1253 offset = addr - block->offset;
1254 if (offset < block->length) {
1255 vaddr = block->host + offset;
1256 if (block->flags & RAM_PREALLOC_MASK) {
1257 ;
1258 } else {
1259 flags = MAP_FIXED;
1260 munmap(vaddr, length);
1261 if (mem_path) {
1262#if defined(__linux__) && !defined(TARGET_S390X)
1263 if (block->fd) {
1264#ifdef MAP_POPULATE
1265 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1266 MAP_PRIVATE;
1267#else
1268 flags |= MAP_PRIVATE;
1269#endif
1270 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1271 flags, block->fd, offset);
1272 } else {
1273 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1274 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1275 flags, -1, 0);
1276 }
fd28aa13
JK
1277#else
1278 abort();
cd19cfa2
HY
1279#endif
1280 } else {
1281#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1282 flags |= MAP_SHARED | MAP_ANONYMOUS;
1283 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1284 flags, -1, 0);
1285#else
1286 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1287 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1288 flags, -1, 0);
1289#endif
1290 }
1291 if (area != vaddr) {
f15fbc4b
AP
1292 fprintf(stderr, "Could not remap addr: "
1293 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1294 length, addr);
1295 exit(1);
1296 }
8490fc78 1297 memory_try_enable_merging(vaddr, length);
ddb97f1d 1298 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1299 }
1300 return;
1301 }
1302 }
1303}
1304#endif /* !_WIN32 */
1305
1b5ec234 1306static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
dc828ca1 1307{
94a6b54f
PB
1308 RAMBlock *block;
1309
b2a8658e 1310 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1311 block = ram_list.mru_block;
1312 if (block && addr - block->offset < block->length) {
1313 goto found;
1314 }
a3161038 1315 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1316 if (addr - block->offset < block->length) {
0d6d3c87 1317 goto found;
f471a17e 1318 }
94a6b54f 1319 }
f471a17e
AW
1320
1321 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1322 abort();
1323
0d6d3c87
PB
1324found:
1325 ram_list.mru_block = block;
1b5ec234
PB
1326 return block;
1327}
1328
1329/* Return a host pointer to ram allocated with qemu_ram_alloc.
1330 With the exception of the softmmu code in this file, this should
1331 only be used for local memory (e.g. video ram) that the device owns,
1332 and knows it isn't going to access beyond the end of the block.
1333
1334 It should not be used for general purpose DMA.
1335 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1336 */
1337void *qemu_get_ram_ptr(ram_addr_t addr)
1338{
1339 RAMBlock *block = qemu_get_ram_block(addr);
1340
0d6d3c87
PB
1341 if (xen_enabled()) {
1342 /* We need to check if the requested address is in the RAM
1343 * because we don't want to map the entire memory in QEMU.
1344 * In that case just map until the end of the page.
1345 */
1346 if (block->offset == 0) {
1347 return xen_map_cache(addr, 0, 0);
1348 } else if (block->host == NULL) {
1349 block->host =
1350 xen_map_cache(block->offset, block->length, 1);
1351 }
1352 }
1353 return block->host + (addr - block->offset);
dc828ca1
PB
1354}
1355
0d6d3c87
PB
1356/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1357 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1358 *
1359 * ??? Is this still necessary?
b2e0a138 1360 */
8b9c99d9 1361static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1362{
1363 RAMBlock *block;
1364
b2a8658e 1365 /* The list is protected by the iothread lock here. */
a3161038 1366 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1367 if (addr - block->offset < block->length) {
868bb33f 1368 if (xen_enabled()) {
432d268c
JN
1369 /* We need to check if the requested address is in the RAM
1370 * because we don't want to map the entire memory in QEMU.
712c2b41 1371 * In that case just map until the end of the page.
432d268c
JN
1372 */
1373 if (block->offset == 0) {
e41d7c69 1374 return xen_map_cache(addr, 0, 0);
432d268c 1375 } else if (block->host == NULL) {
e41d7c69
JK
1376 block->host =
1377 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1378 }
1379 }
b2e0a138
MT
1380 return block->host + (addr - block->offset);
1381 }
1382 }
1383
1384 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1385 abort();
1386
1387 return NULL;
1388}
1389
38bee5dc
SS
1390/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1391 * but takes a size argument */
8b9c99d9 1392static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1393{
8ab934f9
SS
1394 if (*size == 0) {
1395 return NULL;
1396 }
868bb33f 1397 if (xen_enabled()) {
e41d7c69 1398 return xen_map_cache(addr, *size, 1);
868bb33f 1399 } else {
38bee5dc
SS
1400 RAMBlock *block;
1401
a3161038 1402 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1403 if (addr - block->offset < block->length) {
1404 if (addr - block->offset + *size > block->length)
1405 *size = block->length - addr + block->offset;
1406 return block->host + (addr - block->offset);
1407 }
1408 }
1409
1410 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1411 abort();
38bee5dc
SS
1412 }
1413}
1414
7443b437
PB
1415/* Some of the softmmu routines need to translate from a host pointer
1416 (typically a TLB entry) back to a ram offset. */
1b5ec234 1417MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1418{
94a6b54f
PB
1419 RAMBlock *block;
1420 uint8_t *host = ptr;
1421
868bb33f 1422 if (xen_enabled()) {
e41d7c69 1423 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1424 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1425 }
1426
23887b79
PB
1427 block = ram_list.mru_block;
1428 if (block && block->host && host - block->host < block->length) {
1429 goto found;
1430 }
1431
a3161038 1432 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1433 /* This case append when the block is not mapped. */
1434 if (block->host == NULL) {
1435 continue;
1436 }
f471a17e 1437 if (host - block->host < block->length) {
23887b79 1438 goto found;
f471a17e 1439 }
94a6b54f 1440 }
432d268c 1441
1b5ec234 1442 return NULL;
23887b79
PB
1443
1444found:
1445 *ram_addr = block->offset + (host - block->host);
1b5ec234 1446 return block->mr;
e890261f 1447}
f471a17e 1448
a8170e5e 1449static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1450 uint64_t val, unsigned size)
9fa3e853 1451{
3a7d929e 1452 int dirty_flags;
f7c11b53 1453 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1454 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
0e0df1e2 1455 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1456 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1457 }
0e0df1e2
AK
1458 switch (size) {
1459 case 1:
1460 stb_p(qemu_get_ram_ptr(ram_addr), val);
1461 break;
1462 case 2:
1463 stw_p(qemu_get_ram_ptr(ram_addr), val);
1464 break;
1465 case 4:
1466 stl_p(qemu_get_ram_ptr(ram_addr), val);
1467 break;
1468 default:
1469 abort();
3a7d929e 1470 }
f23db169 1471 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1472 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1473 /* we remove the notdirty callback only if the code has been
1474 flushed */
1475 if (dirty_flags == 0xff)
2e70f6ef 1476 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1477}
1478
b018ddf6
PB
1479static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1480 unsigned size, bool is_write)
1481{
1482 return is_write;
1483}
1484
0e0df1e2 1485static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1486 .write = notdirty_mem_write,
b018ddf6 1487 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1488 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1489};
1490
0f459d16 1491/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1492static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1493{
9349b4f9 1494 CPUArchState *env = cpu_single_env;
06d55cc1 1495 target_ulong pc, cs_base;
0f459d16 1496 target_ulong vaddr;
a1d1bb31 1497 CPUWatchpoint *wp;
06d55cc1 1498 int cpu_flags;
0f459d16 1499
06d55cc1
AL
1500 if (env->watchpoint_hit) {
1501 /* We re-entered the check after replacing the TB. Now raise
1502 * the debug interrupt so that is will trigger after the
1503 * current instruction. */
c3affe56 1504 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1505 return;
1506 }
2e70f6ef 1507 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1508 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1509 if ((vaddr == (wp->vaddr & len_mask) ||
1510 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1511 wp->flags |= BP_WATCHPOINT_HIT;
1512 if (!env->watchpoint_hit) {
1513 env->watchpoint_hit = wp;
5a316526 1514 tb_check_watchpoint(env);
6e140f28
AL
1515 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1516 env->exception_index = EXCP_DEBUG;
488d6577 1517 cpu_loop_exit(env);
6e140f28
AL
1518 } else {
1519 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1520 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1521 cpu_resume_from_signal(env, NULL);
6e140f28 1522 }
06d55cc1 1523 }
6e140f28
AL
1524 } else {
1525 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1526 }
1527 }
1528}
1529
6658ffb8
PB
1530/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1531 so these check for a hit then pass through to the normal out-of-line
1532 phys routines. */
a8170e5e 1533static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1534 unsigned size)
6658ffb8 1535{
1ec9b909
AK
1536 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1537 switch (size) {
1538 case 1: return ldub_phys(addr);
1539 case 2: return lduw_phys(addr);
1540 case 4: return ldl_phys(addr);
1541 default: abort();
1542 }
6658ffb8
PB
1543}
1544
a8170e5e 1545static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1546 uint64_t val, unsigned size)
6658ffb8 1547{
1ec9b909
AK
1548 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1549 switch (size) {
67364150
MF
1550 case 1:
1551 stb_phys(addr, val);
1552 break;
1553 case 2:
1554 stw_phys(addr, val);
1555 break;
1556 case 4:
1557 stl_phys(addr, val);
1558 break;
1ec9b909
AK
1559 default: abort();
1560 }
6658ffb8
PB
1561}
1562
1ec9b909
AK
1563static const MemoryRegionOps watch_mem_ops = {
1564 .read = watch_mem_read,
1565 .write = watch_mem_write,
1566 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1567};
6658ffb8 1568
a8170e5e 1569static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1570 unsigned len)
db7b5426 1571{
acc9d80b
JK
1572 subpage_t *subpage = opaque;
1573 uint8_t buf[4];
791af8c8 1574
db7b5426 1575#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1576 printf("%s: subpage %p len %d addr " TARGET_FMT_plx "\n", __func__,
1577 subpage, len, addr);
db7b5426 1578#endif
acc9d80b
JK
1579 address_space_read(subpage->as, addr + subpage->base, buf, len);
1580 switch (len) {
1581 case 1:
1582 return ldub_p(buf);
1583 case 2:
1584 return lduw_p(buf);
1585 case 4:
1586 return ldl_p(buf);
1587 default:
1588 abort();
1589 }
db7b5426
BS
1590}
1591
a8170e5e 1592static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1593 uint64_t value, unsigned len)
db7b5426 1594{
acc9d80b
JK
1595 subpage_t *subpage = opaque;
1596 uint8_t buf[4];
1597
db7b5426 1598#if defined(DEBUG_SUBPAGE)
70c68e44 1599 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
acc9d80b
JK
1600 " value %"PRIx64"\n",
1601 __func__, subpage, len, addr, value);
db7b5426 1602#endif
acc9d80b
JK
1603 switch (len) {
1604 case 1:
1605 stb_p(buf, value);
1606 break;
1607 case 2:
1608 stw_p(buf, value);
1609 break;
1610 case 4:
1611 stl_p(buf, value);
1612 break;
1613 default:
1614 abort();
1615 }
1616 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1617}
1618
c353e4cc
PB
1619static bool subpage_accepts(void *opaque, hwaddr addr,
1620 unsigned size, bool is_write)
1621{
acc9d80b 1622 subpage_t *subpage = opaque;
c353e4cc 1623#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1624 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx "\n",
1625 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1626#endif
1627
acc9d80b
JK
1628 return address_space_access_valid(subpage->as, addr + subpage->base,
1629 size, is_write);
c353e4cc
PB
1630}
1631
70c68e44
AK
1632static const MemoryRegionOps subpage_ops = {
1633 .read = subpage_read,
1634 .write = subpage_write,
c353e4cc 1635 .valid.accepts = subpage_accepts,
70c68e44 1636 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1637};
1638
c227f099 1639static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1640 uint16_t section)
db7b5426
BS
1641{
1642 int idx, eidx;
1643
1644 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1645 return -1;
1646 idx = SUBPAGE_IDX(start);
1647 eidx = SUBPAGE_IDX(end);
1648#if defined(DEBUG_SUBPAGE)
0bf9e31a 1649 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1650 mmio, start, end, idx, eidx, memory);
1651#endif
db7b5426 1652 for (; idx <= eidx; idx++) {
5312bd8b 1653 mmio->sub_section[idx] = section;
db7b5426
BS
1654 }
1655
1656 return 0;
1657}
1658
acc9d80b 1659static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1660{
c227f099 1661 subpage_t *mmio;
db7b5426 1662
7267c094 1663 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1664
acc9d80b 1665 mmio->as = as;
1eec614b 1666 mmio->base = base;
2c9b15ca 1667 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
70c68e44 1668 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1669 mmio->iomem.subpage = true;
db7b5426 1670#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1671 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1672 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1673#endif
b41aac4f 1674 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1675
1676 return mmio;
1677}
1678
5312bd8b
AK
1679static uint16_t dummy_section(MemoryRegion *mr)
1680{
1681 MemoryRegionSection section = {
1682 .mr = mr,
1683 .offset_within_address_space = 0,
1684 .offset_within_region = 0,
052e87b0 1685 .size = int128_2_64(),
5312bd8b
AK
1686 };
1687
1688 return phys_section_add(&section);
1689}
1690
a8170e5e 1691MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1692{
0475d94f 1693 return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1694}
1695
e9179ce1
AK
1696static void io_mem_init(void)
1697{
2c9b15ca
PB
1698 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1699 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
0e0df1e2 1700 "unassigned", UINT64_MAX);
2c9b15ca 1701 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
0e0df1e2 1702 "notdirty", UINT64_MAX);
2c9b15ca 1703 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1ec9b909 1704 "watch", UINT64_MAX);
e9179ce1
AK
1705}
1706
ac1970fb 1707static void mem_begin(MemoryListener *listener)
00752703
PB
1708{
1709 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1710 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1711
1712 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1713 d->as = as;
1714 as->next_dispatch = d;
1715}
1716
1717static void mem_commit(MemoryListener *listener)
ac1970fb 1718{
89ae337a 1719 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
1720 AddressSpaceDispatch *cur = as->dispatch;
1721 AddressSpaceDispatch *next = as->next_dispatch;
1722
1723 next->nodes = next_map.nodes;
1724 next->sections = next_map.sections;
ac1970fb 1725
0475d94f
PB
1726 as->dispatch = next;
1727 g_free(cur);
ac1970fb
AK
1728}
1729
50c1e149
AK
1730static void core_begin(MemoryListener *listener)
1731{
b41aac4f
LPF
1732 uint16_t n;
1733
6092666e
PB
1734 prev_map = g_new(PhysPageMap, 1);
1735 *prev_map = next_map;
1736
9affd6fc 1737 memset(&next_map, 0, sizeof(next_map));
b41aac4f
LPF
1738 n = dummy_section(&io_mem_unassigned);
1739 assert(n == PHYS_SECTION_UNASSIGNED);
1740 n = dummy_section(&io_mem_notdirty);
1741 assert(n == PHYS_SECTION_NOTDIRTY);
1742 n = dummy_section(&io_mem_rom);
1743 assert(n == PHYS_SECTION_ROM);
1744 n = dummy_section(&io_mem_watch);
1745 assert(n == PHYS_SECTION_WATCH);
50c1e149
AK
1746}
1747
9affd6fc
PB
1748/* This listener's commit run after the other AddressSpaceDispatch listeners'.
1749 * All AddressSpaceDispatch instances have switched to the next map.
1750 */
1751static void core_commit(MemoryListener *listener)
1752{
6092666e 1753 phys_sections_free(prev_map);
9affd6fc
PB
1754}
1755
1d71148e 1756static void tcg_commit(MemoryListener *listener)
50c1e149 1757{
9349b4f9 1758 CPUArchState *env;
117712c3
AK
1759
1760 /* since each CPU stores ram addresses in its TLB cache, we must
1761 reset the modified entries */
1762 /* XXX: slow ! */
1763 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1764 tlb_flush(env, 1);
1765 }
50c1e149
AK
1766}
1767
93632747
AK
1768static void core_log_global_start(MemoryListener *listener)
1769{
1770 cpu_physical_memory_set_dirty_tracking(1);
1771}
1772
1773static void core_log_global_stop(MemoryListener *listener)
1774{
1775 cpu_physical_memory_set_dirty_tracking(0);
1776}
1777
93632747 1778static MemoryListener core_memory_listener = {
50c1e149 1779 .begin = core_begin,
9affd6fc 1780 .commit = core_commit,
93632747
AK
1781 .log_global_start = core_log_global_start,
1782 .log_global_stop = core_log_global_stop,
ac1970fb 1783 .priority = 1,
93632747
AK
1784};
1785
1d71148e
AK
1786static MemoryListener tcg_memory_listener = {
1787 .commit = tcg_commit,
1788};
1789
ac1970fb
AK
1790void address_space_init_dispatch(AddressSpace *as)
1791{
00752703 1792 as->dispatch = NULL;
89ae337a 1793 as->dispatch_listener = (MemoryListener) {
ac1970fb 1794 .begin = mem_begin,
00752703 1795 .commit = mem_commit,
ac1970fb
AK
1796 .region_add = mem_add,
1797 .region_nop = mem_add,
1798 .priority = 0,
1799 };
89ae337a 1800 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
1801}
1802
83f3c251
AK
1803void address_space_destroy_dispatch(AddressSpace *as)
1804{
1805 AddressSpaceDispatch *d = as->dispatch;
1806
89ae337a 1807 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
1808 g_free(d);
1809 as->dispatch = NULL;
1810}
1811
62152b8a
AK
1812static void memory_map_init(void)
1813{
7267c094 1814 system_memory = g_malloc(sizeof(*system_memory));
2c9b15ca 1815 memory_region_init(system_memory, NULL, "system", INT64_MAX);
7dca8043 1816 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1817
7267c094 1818 system_io = g_malloc(sizeof(*system_io));
2c9b15ca 1819 memory_region_init(system_io, NULL, "io", 65536);
7dca8043 1820 address_space_init(&address_space_io, system_io, "I/O");
93632747 1821
f6790af6 1822 memory_listener_register(&core_memory_listener, &address_space_memory);
f6790af6 1823 memory_listener_register(&tcg_memory_listener, &address_space_memory);
62152b8a
AK
1824}
1825
1826MemoryRegion *get_system_memory(void)
1827{
1828 return system_memory;
1829}
1830
309cb471
AK
1831MemoryRegion *get_system_io(void)
1832{
1833 return system_io;
1834}
1835
e2eef170
PB
1836#endif /* !defined(CONFIG_USER_ONLY) */
1837
13eb76e0
FB
1838/* physical memory access (slow version, mainly for debug) */
1839#if defined(CONFIG_USER_ONLY)
9349b4f9 1840int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1841 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1842{
1843 int l, flags;
1844 target_ulong page;
53a5960a 1845 void * p;
13eb76e0
FB
1846
1847 while (len > 0) {
1848 page = addr & TARGET_PAGE_MASK;
1849 l = (page + TARGET_PAGE_SIZE) - addr;
1850 if (l > len)
1851 l = len;
1852 flags = page_get_flags(page);
1853 if (!(flags & PAGE_VALID))
a68fe89c 1854 return -1;
13eb76e0
FB
1855 if (is_write) {
1856 if (!(flags & PAGE_WRITE))
a68fe89c 1857 return -1;
579a97f7 1858 /* XXX: this code should not depend on lock_user */
72fb7daa 1859 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1860 return -1;
72fb7daa
AJ
1861 memcpy(p, buf, l);
1862 unlock_user(p, addr, l);
13eb76e0
FB
1863 } else {
1864 if (!(flags & PAGE_READ))
a68fe89c 1865 return -1;
579a97f7 1866 /* XXX: this code should not depend on lock_user */
72fb7daa 1867 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1868 return -1;
72fb7daa 1869 memcpy(buf, p, l);
5b257578 1870 unlock_user(p, addr, 0);
13eb76e0
FB
1871 }
1872 len -= l;
1873 buf += l;
1874 addr += l;
1875 }
a68fe89c 1876 return 0;
13eb76e0 1877}
8df1cd07 1878
13eb76e0 1879#else
51d7a9eb 1880
a8170e5e
AK
1881static void invalidate_and_set_dirty(hwaddr addr,
1882 hwaddr length)
51d7a9eb
AP
1883{
1884 if (!cpu_physical_memory_is_dirty(addr)) {
1885 /* invalidate code */
1886 tb_invalidate_phys_page_range(addr, addr + length, 0);
1887 /* set dirty bit */
1888 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1889 }
e226939d 1890 xen_modified_memory(addr, length);
51d7a9eb
AP
1891}
1892
2bbfa05d
PB
1893static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1894{
1895 if (memory_region_is_ram(mr)) {
1896 return !(is_write && mr->readonly);
1897 }
1898 if (memory_region_is_romd(mr)) {
1899 return !is_write;
1900 }
1901
1902 return false;
1903}
1904
f52cc467 1905static inline int memory_access_size(MemoryRegion *mr, int l, hwaddr addr)
82f2563f 1906{
f52cc467 1907 if (l >= 4 && (((addr & 3) == 0 || mr->ops->impl.unaligned))) {
82f2563f
PB
1908 return 4;
1909 }
f52cc467 1910 if (l >= 2 && (((addr & 1) == 0) || mr->ops->impl.unaligned)) {
82f2563f
PB
1911 return 2;
1912 }
1913 return 1;
1914}
1915
fd8aaa76 1916bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1917 int len, bool is_write)
13eb76e0 1918{
149f54b5 1919 hwaddr l;
13eb76e0 1920 uint8_t *ptr;
791af8c8 1921 uint64_t val;
149f54b5 1922 hwaddr addr1;
5c8a00ce 1923 MemoryRegion *mr;
fd8aaa76 1924 bool error = false;
3b46e624 1925
13eb76e0 1926 while (len > 0) {
149f54b5 1927 l = len;
5c8a00ce 1928 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 1929
13eb76e0 1930 if (is_write) {
5c8a00ce
PB
1931 if (!memory_access_is_direct(mr, is_write)) {
1932 l = memory_access_size(mr, l, addr1);
6a00d601
FB
1933 /* XXX: could force cpu_single_env to NULL to avoid
1934 potential bugs */
82f2563f 1935 if (l == 4) {
1c213d19 1936 /* 32 bit write access */
c27004ec 1937 val = ldl_p(buf);
5c8a00ce 1938 error |= io_mem_write(mr, addr1, val, 4);
82f2563f 1939 } else if (l == 2) {
1c213d19 1940 /* 16 bit write access */
c27004ec 1941 val = lduw_p(buf);
5c8a00ce 1942 error |= io_mem_write(mr, addr1, val, 2);
13eb76e0 1943 } else {
1c213d19 1944 /* 8 bit write access */
c27004ec 1945 val = ldub_p(buf);
5c8a00ce 1946 error |= io_mem_write(mr, addr1, val, 1);
13eb76e0 1947 }
2bbfa05d 1948 } else {
5c8a00ce 1949 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 1950 /* RAM case */
5579c7f3 1951 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1952 memcpy(ptr, buf, l);
51d7a9eb 1953 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
1954 }
1955 } else {
5c8a00ce 1956 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 1957 /* I/O case */
5c8a00ce 1958 l = memory_access_size(mr, l, addr1);
82f2563f 1959 if (l == 4) {
13eb76e0 1960 /* 32 bit read access */
5c8a00ce 1961 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 1962 stl_p(buf, val);
82f2563f 1963 } else if (l == 2) {
13eb76e0 1964 /* 16 bit read access */
5c8a00ce 1965 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 1966 stw_p(buf, val);
13eb76e0 1967 } else {
1c213d19 1968 /* 8 bit read access */
5c8a00ce 1969 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 1970 stb_p(buf, val);
13eb76e0
FB
1971 }
1972 } else {
1973 /* RAM case */
5c8a00ce 1974 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 1975 memcpy(buf, ptr, l);
13eb76e0
FB
1976 }
1977 }
1978 len -= l;
1979 buf += l;
1980 addr += l;
1981 }
fd8aaa76
PB
1982
1983 return error;
13eb76e0 1984}
8df1cd07 1985
fd8aaa76 1986bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1987 const uint8_t *buf, int len)
1988{
fd8aaa76 1989 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
1990}
1991
fd8aaa76 1992bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 1993{
fd8aaa76 1994 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
1995}
1996
1997
a8170e5e 1998void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1999 int len, int is_write)
2000{
fd8aaa76 2001 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2002}
2003
d0ecd2aa 2004/* used for ROM loading : can write in RAM and ROM */
a8170e5e 2005void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
2006 const uint8_t *buf, int len)
2007{
149f54b5 2008 hwaddr l;
d0ecd2aa 2009 uint8_t *ptr;
149f54b5 2010 hwaddr addr1;
5c8a00ce 2011 MemoryRegion *mr;
3b46e624 2012
d0ecd2aa 2013 while (len > 0) {
149f54b5 2014 l = len;
5c8a00ce
PB
2015 mr = address_space_translate(&address_space_memory,
2016 addr, &addr1, &l, true);
3b46e624 2017
5c8a00ce
PB
2018 if (!(memory_region_is_ram(mr) ||
2019 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2020 /* do nothing */
2021 } else {
5c8a00ce 2022 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2023 /* ROM/RAM case */
5579c7f3 2024 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2025 memcpy(ptr, buf, l);
51d7a9eb 2026 invalidate_and_set_dirty(addr1, l);
d0ecd2aa
FB
2027 }
2028 len -= l;
2029 buf += l;
2030 addr += l;
2031 }
2032}
2033
6d16c2f8 2034typedef struct {
d3e71559 2035 MemoryRegion *mr;
6d16c2f8 2036 void *buffer;
a8170e5e
AK
2037 hwaddr addr;
2038 hwaddr len;
6d16c2f8
AL
2039} BounceBuffer;
2040
2041static BounceBuffer bounce;
2042
ba223c29
AL
2043typedef struct MapClient {
2044 void *opaque;
2045 void (*callback)(void *opaque);
72cf2d4f 2046 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2047} MapClient;
2048
72cf2d4f
BS
2049static QLIST_HEAD(map_client_list, MapClient) map_client_list
2050 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2051
2052void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2053{
7267c094 2054 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2055
2056 client->opaque = opaque;
2057 client->callback = callback;
72cf2d4f 2058 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2059 return client;
2060}
2061
8b9c99d9 2062static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2063{
2064 MapClient *client = (MapClient *)_client;
2065
72cf2d4f 2066 QLIST_REMOVE(client, link);
7267c094 2067 g_free(client);
ba223c29
AL
2068}
2069
2070static void cpu_notify_map_clients(void)
2071{
2072 MapClient *client;
2073
72cf2d4f
BS
2074 while (!QLIST_EMPTY(&map_client_list)) {
2075 client = QLIST_FIRST(&map_client_list);
ba223c29 2076 client->callback(client->opaque);
34d5e948 2077 cpu_unregister_map_client(client);
ba223c29
AL
2078 }
2079}
2080
51644ab7
PB
2081bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2082{
5c8a00ce 2083 MemoryRegion *mr;
51644ab7
PB
2084 hwaddr l, xlat;
2085
2086 while (len > 0) {
2087 l = len;
5c8a00ce
PB
2088 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2089 if (!memory_access_is_direct(mr, is_write)) {
2090 l = memory_access_size(mr, l, addr);
2091 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2092 return false;
2093 }
2094 }
2095
2096 len -= l;
2097 addr += l;
2098 }
2099 return true;
2100}
2101
6d16c2f8
AL
2102/* Map a physical memory region into a host virtual address.
2103 * May map a subset of the requested range, given by and returned in *plen.
2104 * May return NULL if resources needed to perform the mapping are exhausted.
2105 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2106 * Use cpu_register_map_client() to know when retrying the map operation is
2107 * likely to succeed.
6d16c2f8 2108 */
ac1970fb 2109void *address_space_map(AddressSpace *as,
a8170e5e
AK
2110 hwaddr addr,
2111 hwaddr *plen,
ac1970fb 2112 bool is_write)
6d16c2f8 2113{
a8170e5e 2114 hwaddr len = *plen;
e3127ae0
PB
2115 hwaddr done = 0;
2116 hwaddr l, xlat, base;
2117 MemoryRegion *mr, *this_mr;
2118 ram_addr_t raddr;
6d16c2f8 2119
e3127ae0
PB
2120 if (len == 0) {
2121 return NULL;
2122 }
38bee5dc 2123
e3127ae0
PB
2124 l = len;
2125 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2126 if (!memory_access_is_direct(mr, is_write)) {
2127 if (bounce.buffer) {
2128 return NULL;
6d16c2f8 2129 }
e3127ae0
PB
2130 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2131 bounce.addr = addr;
2132 bounce.len = l;
d3e71559
PB
2133
2134 memory_region_ref(mr);
2135 bounce.mr = mr;
e3127ae0
PB
2136 if (!is_write) {
2137 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2138 }
6d16c2f8 2139
e3127ae0
PB
2140 *plen = l;
2141 return bounce.buffer;
2142 }
2143
2144 base = xlat;
2145 raddr = memory_region_get_ram_addr(mr);
2146
2147 for (;;) {
6d16c2f8
AL
2148 len -= l;
2149 addr += l;
e3127ae0
PB
2150 done += l;
2151 if (len == 0) {
2152 break;
2153 }
2154
2155 l = len;
2156 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2157 if (this_mr != mr || xlat != base + done) {
2158 break;
2159 }
6d16c2f8 2160 }
e3127ae0 2161
d3e71559 2162 memory_region_ref(mr);
e3127ae0
PB
2163 *plen = done;
2164 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2165}
2166
ac1970fb 2167/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2168 * Will also mark the memory as dirty if is_write == 1. access_len gives
2169 * the amount of memory that was actually read or written by the caller.
2170 */
a8170e5e
AK
2171void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2172 int is_write, hwaddr access_len)
6d16c2f8
AL
2173{
2174 if (buffer != bounce.buffer) {
d3e71559
PB
2175 MemoryRegion *mr;
2176 ram_addr_t addr1;
2177
2178 mr = qemu_ram_addr_from_host(buffer, &addr1);
2179 assert(mr != NULL);
6d16c2f8 2180 if (is_write) {
6d16c2f8
AL
2181 while (access_len) {
2182 unsigned l;
2183 l = TARGET_PAGE_SIZE;
2184 if (l > access_len)
2185 l = access_len;
51d7a9eb 2186 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2187 addr1 += l;
2188 access_len -= l;
2189 }
2190 }
868bb33f 2191 if (xen_enabled()) {
e41d7c69 2192 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2193 }
d3e71559 2194 memory_region_unref(mr);
6d16c2f8
AL
2195 return;
2196 }
2197 if (is_write) {
ac1970fb 2198 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2199 }
f8a83245 2200 qemu_vfree(bounce.buffer);
6d16c2f8 2201 bounce.buffer = NULL;
d3e71559 2202 memory_region_unref(bounce.mr);
ba223c29 2203 cpu_notify_map_clients();
6d16c2f8 2204}
d0ecd2aa 2205
a8170e5e
AK
2206void *cpu_physical_memory_map(hwaddr addr,
2207 hwaddr *plen,
ac1970fb
AK
2208 int is_write)
2209{
2210 return address_space_map(&address_space_memory, addr, plen, is_write);
2211}
2212
a8170e5e
AK
2213void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2214 int is_write, hwaddr access_len)
ac1970fb
AK
2215{
2216 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2217}
2218
8df1cd07 2219/* warning: addr must be aligned */
a8170e5e 2220static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2221 enum device_endian endian)
8df1cd07 2222{
8df1cd07 2223 uint8_t *ptr;
791af8c8 2224 uint64_t val;
5c8a00ce 2225 MemoryRegion *mr;
149f54b5
PB
2226 hwaddr l = 4;
2227 hwaddr addr1;
8df1cd07 2228
5c8a00ce
PB
2229 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2230 false);
2231 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2232 /* I/O case */
5c8a00ce 2233 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2234#if defined(TARGET_WORDS_BIGENDIAN)
2235 if (endian == DEVICE_LITTLE_ENDIAN) {
2236 val = bswap32(val);
2237 }
2238#else
2239 if (endian == DEVICE_BIG_ENDIAN) {
2240 val = bswap32(val);
2241 }
2242#endif
8df1cd07
FB
2243 } else {
2244 /* RAM case */
5c8a00ce 2245 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2246 & TARGET_PAGE_MASK)
149f54b5 2247 + addr1);
1e78bcc1
AG
2248 switch (endian) {
2249 case DEVICE_LITTLE_ENDIAN:
2250 val = ldl_le_p(ptr);
2251 break;
2252 case DEVICE_BIG_ENDIAN:
2253 val = ldl_be_p(ptr);
2254 break;
2255 default:
2256 val = ldl_p(ptr);
2257 break;
2258 }
8df1cd07
FB
2259 }
2260 return val;
2261}
2262
a8170e5e 2263uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2264{
2265 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2266}
2267
a8170e5e 2268uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2269{
2270 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2271}
2272
a8170e5e 2273uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2274{
2275 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2276}
2277
84b7b8e7 2278/* warning: addr must be aligned */
a8170e5e 2279static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2280 enum device_endian endian)
84b7b8e7 2281{
84b7b8e7
FB
2282 uint8_t *ptr;
2283 uint64_t val;
5c8a00ce 2284 MemoryRegion *mr;
149f54b5
PB
2285 hwaddr l = 8;
2286 hwaddr addr1;
84b7b8e7 2287
5c8a00ce
PB
2288 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2289 false);
2290 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2291 /* I/O case */
5c8a00ce 2292 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2293#if defined(TARGET_WORDS_BIGENDIAN)
2294 if (endian == DEVICE_LITTLE_ENDIAN) {
2295 val = bswap64(val);
2296 }
2297#else
2298 if (endian == DEVICE_BIG_ENDIAN) {
2299 val = bswap64(val);
2300 }
84b7b8e7
FB
2301#endif
2302 } else {
2303 /* RAM case */
5c8a00ce 2304 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2305 & TARGET_PAGE_MASK)
149f54b5 2306 + addr1);
1e78bcc1
AG
2307 switch (endian) {
2308 case DEVICE_LITTLE_ENDIAN:
2309 val = ldq_le_p(ptr);
2310 break;
2311 case DEVICE_BIG_ENDIAN:
2312 val = ldq_be_p(ptr);
2313 break;
2314 default:
2315 val = ldq_p(ptr);
2316 break;
2317 }
84b7b8e7
FB
2318 }
2319 return val;
2320}
2321
a8170e5e 2322uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2323{
2324 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2325}
2326
a8170e5e 2327uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2328{
2329 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2330}
2331
a8170e5e 2332uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2333{
2334 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2335}
2336
aab33094 2337/* XXX: optimize */
a8170e5e 2338uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2339{
2340 uint8_t val;
2341 cpu_physical_memory_read(addr, &val, 1);
2342 return val;
2343}
2344
733f0b02 2345/* warning: addr must be aligned */
a8170e5e 2346static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2347 enum device_endian endian)
aab33094 2348{
733f0b02
MT
2349 uint8_t *ptr;
2350 uint64_t val;
5c8a00ce 2351 MemoryRegion *mr;
149f54b5
PB
2352 hwaddr l = 2;
2353 hwaddr addr1;
733f0b02 2354
5c8a00ce
PB
2355 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2356 false);
2357 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2358 /* I/O case */
5c8a00ce 2359 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2360#if defined(TARGET_WORDS_BIGENDIAN)
2361 if (endian == DEVICE_LITTLE_ENDIAN) {
2362 val = bswap16(val);
2363 }
2364#else
2365 if (endian == DEVICE_BIG_ENDIAN) {
2366 val = bswap16(val);
2367 }
2368#endif
733f0b02
MT
2369 } else {
2370 /* RAM case */
5c8a00ce 2371 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2372 & TARGET_PAGE_MASK)
149f54b5 2373 + addr1);
1e78bcc1
AG
2374 switch (endian) {
2375 case DEVICE_LITTLE_ENDIAN:
2376 val = lduw_le_p(ptr);
2377 break;
2378 case DEVICE_BIG_ENDIAN:
2379 val = lduw_be_p(ptr);
2380 break;
2381 default:
2382 val = lduw_p(ptr);
2383 break;
2384 }
733f0b02
MT
2385 }
2386 return val;
aab33094
FB
2387}
2388
a8170e5e 2389uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2390{
2391 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2392}
2393
a8170e5e 2394uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2395{
2396 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2397}
2398
a8170e5e 2399uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2400{
2401 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2402}
2403
8df1cd07
FB
2404/* warning: addr must be aligned. The ram page is not masked as dirty
2405 and the code inside is not invalidated. It is useful if the dirty
2406 bits are used to track modified PTEs */
a8170e5e 2407void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2408{
8df1cd07 2409 uint8_t *ptr;
5c8a00ce 2410 MemoryRegion *mr;
149f54b5
PB
2411 hwaddr l = 4;
2412 hwaddr addr1;
8df1cd07 2413
5c8a00ce
PB
2414 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2415 true);
2416 if (l < 4 || !memory_access_is_direct(mr, true)) {
2417 io_mem_write(mr, addr1, val, 4);
8df1cd07 2418 } else {
5c8a00ce 2419 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2420 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2421 stl_p(ptr, val);
74576198
AL
2422
2423 if (unlikely(in_migration)) {
2424 if (!cpu_physical_memory_is_dirty(addr1)) {
2425 /* invalidate code */
2426 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2427 /* set dirty bit */
f7c11b53
YT
2428 cpu_physical_memory_set_dirty_flags(
2429 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2430 }
2431 }
8df1cd07
FB
2432 }
2433}
2434
2435/* warning: addr must be aligned */
a8170e5e 2436static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2437 enum device_endian endian)
8df1cd07 2438{
8df1cd07 2439 uint8_t *ptr;
5c8a00ce 2440 MemoryRegion *mr;
149f54b5
PB
2441 hwaddr l = 4;
2442 hwaddr addr1;
8df1cd07 2443
5c8a00ce
PB
2444 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2445 true);
2446 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2447#if defined(TARGET_WORDS_BIGENDIAN)
2448 if (endian == DEVICE_LITTLE_ENDIAN) {
2449 val = bswap32(val);
2450 }
2451#else
2452 if (endian == DEVICE_BIG_ENDIAN) {
2453 val = bswap32(val);
2454 }
2455#endif
5c8a00ce 2456 io_mem_write(mr, addr1, val, 4);
8df1cd07 2457 } else {
8df1cd07 2458 /* RAM case */
5c8a00ce 2459 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2460 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2461 switch (endian) {
2462 case DEVICE_LITTLE_ENDIAN:
2463 stl_le_p(ptr, val);
2464 break;
2465 case DEVICE_BIG_ENDIAN:
2466 stl_be_p(ptr, val);
2467 break;
2468 default:
2469 stl_p(ptr, val);
2470 break;
2471 }
51d7a9eb 2472 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2473 }
2474}
2475
a8170e5e 2476void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2477{
2478 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2479}
2480
a8170e5e 2481void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2482{
2483 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2484}
2485
a8170e5e 2486void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2487{
2488 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2489}
2490
aab33094 2491/* XXX: optimize */
a8170e5e 2492void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2493{
2494 uint8_t v = val;
2495 cpu_physical_memory_write(addr, &v, 1);
2496}
2497
733f0b02 2498/* warning: addr must be aligned */
a8170e5e 2499static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2500 enum device_endian endian)
aab33094 2501{
733f0b02 2502 uint8_t *ptr;
5c8a00ce 2503 MemoryRegion *mr;
149f54b5
PB
2504 hwaddr l = 2;
2505 hwaddr addr1;
733f0b02 2506
5c8a00ce
PB
2507 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2508 true);
2509 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2510#if defined(TARGET_WORDS_BIGENDIAN)
2511 if (endian == DEVICE_LITTLE_ENDIAN) {
2512 val = bswap16(val);
2513 }
2514#else
2515 if (endian == DEVICE_BIG_ENDIAN) {
2516 val = bswap16(val);
2517 }
2518#endif
5c8a00ce 2519 io_mem_write(mr, addr1, val, 2);
733f0b02 2520 } else {
733f0b02 2521 /* RAM case */
5c8a00ce 2522 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2523 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2524 switch (endian) {
2525 case DEVICE_LITTLE_ENDIAN:
2526 stw_le_p(ptr, val);
2527 break;
2528 case DEVICE_BIG_ENDIAN:
2529 stw_be_p(ptr, val);
2530 break;
2531 default:
2532 stw_p(ptr, val);
2533 break;
2534 }
51d7a9eb 2535 invalidate_and_set_dirty(addr1, 2);
733f0b02 2536 }
aab33094
FB
2537}
2538
a8170e5e 2539void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2540{
2541 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2542}
2543
a8170e5e 2544void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2545{
2546 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2547}
2548
a8170e5e 2549void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2550{
2551 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2552}
2553
aab33094 2554/* XXX: optimize */
a8170e5e 2555void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2556{
2557 val = tswap64(val);
71d2b725 2558 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2559}
2560
a8170e5e 2561void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2562{
2563 val = cpu_to_le64(val);
2564 cpu_physical_memory_write(addr, &val, 8);
2565}
2566
a8170e5e 2567void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2568{
2569 val = cpu_to_be64(val);
2570 cpu_physical_memory_write(addr, &val, 8);
2571}
2572
5e2972fd 2573/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2574int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2575 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2576{
2577 int l;
a8170e5e 2578 hwaddr phys_addr;
9b3c35e0 2579 target_ulong page;
13eb76e0
FB
2580
2581 while (len > 0) {
2582 page = addr & TARGET_PAGE_MASK;
2583 phys_addr = cpu_get_phys_page_debug(env, page);
2584 /* if no physical page mapped, return an error */
2585 if (phys_addr == -1)
2586 return -1;
2587 l = (page + TARGET_PAGE_SIZE) - addr;
2588 if (l > len)
2589 l = len;
5e2972fd 2590 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2591 if (is_write)
2592 cpu_physical_memory_write_rom(phys_addr, buf, l);
2593 else
5e2972fd 2594 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2595 len -= l;
2596 buf += l;
2597 addr += l;
2598 }
2599 return 0;
2600}
a68fe89c 2601#endif
13eb76e0 2602
8e4a424b
BS
2603#if !defined(CONFIG_USER_ONLY)
2604
2605/*
2606 * A helper function for the _utterly broken_ virtio device model to find out if
2607 * it's running on a big endian machine. Don't do this at home kids!
2608 */
2609bool virtio_is_big_endian(void);
2610bool virtio_is_big_endian(void)
2611{
2612#if defined(TARGET_WORDS_BIGENDIAN)
2613 return true;
2614#else
2615 return false;
2616#endif
2617}
2618
2619#endif
2620
76f35538 2621#ifndef CONFIG_USER_ONLY
a8170e5e 2622bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2623{
5c8a00ce 2624 MemoryRegion*mr;
149f54b5 2625 hwaddr l = 1;
76f35538 2626
5c8a00ce
PB
2627 mr = address_space_translate(&address_space_memory,
2628 phys_addr, &phys_addr, &l, false);
76f35538 2629
5c8a00ce
PB
2630 return !(memory_region_is_ram(mr) ||
2631 memory_region_is_romd(mr));
76f35538 2632}
bd2fa51f
MH
2633
2634void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2635{
2636 RAMBlock *block;
2637
2638 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2639 func(block->host, block->offset, block->length, opaque);
2640 }
2641}
ec3f8c99 2642#endif