]> git.proxmox.com Git - qemu.git/blame - exec.c
cmd646: fix build when DEBUG_IDE is enabled.
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
0d09e41a 34#include "hw/xen/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
db7b5426 53//#define DEBUG_SUBPAGE
1196be37 54
e2eef170 55#if !defined(CONFIG_USER_ONLY)
9fa3e853 56int phys_ram_fd;
74576198 57static int in_migration;
94a6b54f 58
a3161038 59RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
60
61static MemoryRegion *system_memory;
309cb471 62static MemoryRegion *system_io;
62152b8a 63
f6790af6
AK
64AddressSpace address_space_io;
65AddressSpace address_space_memory;
2673a5da 66
0844e007 67MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 68static MemoryRegion io_mem_unassigned;
0e0df1e2 69
e2eef170 70#endif
9fa3e853 71
9349b4f9 72CPUArchState *first_cpu;
6a00d601
FB
73/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
9349b4f9 75DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 76/* 0 = Do not count executed instructions.
bf20dc07 77 1 = Precise instruction counting.
2e70f6ef 78 2 = Adaptive rate instruction counting. */
5708fc66 79int use_icount;
6a00d601 80
e2eef170 81#if !defined(CONFIG_USER_ONLY)
4346ae3e 82
1db8abb1
PB
83typedef struct PhysPageEntry PhysPageEntry;
84
85struct PhysPageEntry {
86 uint16_t is_leaf : 1;
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
88 uint16_t ptr : 15;
89};
90
91struct AddressSpaceDispatch {
92 /* This is a multi-level map on the physical address space.
93 * The bottom level has pointers to MemoryRegionSections.
94 */
95 PhysPageEntry phys_map;
96 MemoryListener listener;
acc9d80b 97 AddressSpace *as;
1db8abb1
PB
98};
99
90260c6c
JK
100#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
101typedef struct subpage_t {
102 MemoryRegion iomem;
acc9d80b 103 AddressSpace *as;
90260c6c
JK
104 hwaddr base;
105 uint16_t sub_section[TARGET_PAGE_SIZE];
106} subpage_t;
107
5312bd8b
AK
108static MemoryRegionSection *phys_sections;
109static unsigned phys_sections_nb, phys_sections_nb_alloc;
110static uint16_t phys_section_unassigned;
aa102231
AK
111static uint16_t phys_section_notdirty;
112static uint16_t phys_section_rom;
113static uint16_t phys_section_watch;
5312bd8b 114
d6f2ea22
AK
115/* Simple allocator for PhysPageEntry nodes */
116static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
117static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
118
07f07b31 119#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 120
e2eef170 121static void io_mem_init(void);
62152b8a 122static void memory_map_init(void);
8b9c99d9 123static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 124
1ec9b909 125static MemoryRegion io_mem_watch;
6658ffb8 126#endif
fd6ce8f6 127
6d9a1304 128#if !defined(CONFIG_USER_ONLY)
d6f2ea22 129
f7bf5461 130static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 131{
f7bf5461 132 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
133 typedef PhysPageEntry Node[L2_SIZE];
134 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
135 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
136 phys_map_nodes_nb + nodes);
d6f2ea22
AK
137 phys_map_nodes = g_renew(Node, phys_map_nodes,
138 phys_map_nodes_nb_alloc);
139 }
f7bf5461
AK
140}
141
142static uint16_t phys_map_node_alloc(void)
143{
144 unsigned i;
145 uint16_t ret;
146
147 ret = phys_map_nodes_nb++;
148 assert(ret != PHYS_MAP_NODE_NIL);
149 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 150 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 151 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 152 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 153 }
f7bf5461 154 return ret;
d6f2ea22
AK
155}
156
157static void phys_map_nodes_reset(void)
158{
159 phys_map_nodes_nb = 0;
160}
161
92e873b9 162
a8170e5e
AK
163static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
164 hwaddr *nb, uint16_t leaf,
2999097b 165 int level)
f7bf5461
AK
166{
167 PhysPageEntry *p;
168 int i;
a8170e5e 169 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 170
07f07b31 171 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
172 lp->ptr = phys_map_node_alloc();
173 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
174 if (level == 0) {
175 for (i = 0; i < L2_SIZE; i++) {
07f07b31 176 p[i].is_leaf = 1;
c19e8800 177 p[i].ptr = phys_section_unassigned;
4346ae3e 178 }
67c4d23c 179 }
f7bf5461 180 } else {
c19e8800 181 p = phys_map_nodes[lp->ptr];
92e873b9 182 }
2999097b 183 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 184
2999097b 185 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
186 if ((*index & (step - 1)) == 0 && *nb >= step) {
187 lp->is_leaf = true;
c19e8800 188 lp->ptr = leaf;
07f07b31
AK
189 *index += step;
190 *nb -= step;
2999097b
AK
191 } else {
192 phys_page_set_level(lp, index, nb, leaf, level - 1);
193 }
194 ++lp;
f7bf5461
AK
195 }
196}
197
ac1970fb 198static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 199 hwaddr index, hwaddr nb,
2999097b 200 uint16_t leaf)
f7bf5461 201{
2999097b 202 /* Wildly overreserve - it doesn't matter much. */
07f07b31 203 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 204
ac1970fb 205 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
206}
207
149f54b5 208static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
92e873b9 209{
ac1970fb 210 PhysPageEntry lp = d->phys_map;
31ab2b4a
AK
211 PhysPageEntry *p;
212 int i;
f1f6e3b8 213
07f07b31 214 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 215 if (lp.ptr == PHYS_MAP_NODE_NIL) {
fd298934 216 return &phys_sections[phys_section_unassigned];
31ab2b4a 217 }
c19e8800 218 p = phys_map_nodes[lp.ptr];
31ab2b4a 219 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 220 }
fd298934 221 return &phys_sections[lp.ptr];
f3705d53
AK
222}
223
e5548617
BS
224bool memory_region_is_unassigned(MemoryRegion *mr)
225{
2a8e7499 226 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 227 && mr != &io_mem_watch;
fd6ce8f6 228}
149f54b5 229
9f029603 230static MemoryRegionSection *address_space_lookup_region(AddressSpace *as,
90260c6c
JK
231 hwaddr addr,
232 bool resolve_subpage)
9f029603 233{
90260c6c
JK
234 MemoryRegionSection *section;
235 subpage_t *subpage;
236
237 section = phys_page_find(as->dispatch, addr >> TARGET_PAGE_BITS);
238 if (resolve_subpage && section->mr->subpage) {
239 subpage = container_of(section->mr, subpage_t, iomem);
240 section = &phys_sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
241 }
242 return section;
9f029603
JK
243}
244
90260c6c
JK
245static MemoryRegionSection *
246address_space_translate_internal(AddressSpace *as, hwaddr addr, hwaddr *xlat,
247 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
248{
249 MemoryRegionSection *section;
250 Int128 diff;
251
90260c6c 252 section = address_space_lookup_region(as, addr, resolve_subpage);
149f54b5
PB
253 /* Compute offset within MemoryRegionSection */
254 addr -= section->offset_within_address_space;
255
256 /* Compute offset within MemoryRegion */
257 *xlat = addr + section->offset_within_region;
258
259 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 260 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
261 return section;
262}
90260c6c 263
5c8a00ce
PB
264MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
265 hwaddr *xlat, hwaddr *plen,
266 bool is_write)
90260c6c 267{
30951157
AK
268 IOMMUTLBEntry iotlb;
269 MemoryRegionSection *section;
270 MemoryRegion *mr;
271 hwaddr len = *plen;
272
273 for (;;) {
274 section = address_space_translate_internal(as, addr, &addr, plen, true);
275 mr = section->mr;
276
277 if (!mr->iommu_ops) {
278 break;
279 }
280
281 iotlb = mr->iommu_ops->translate(mr, addr);
282 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
283 | (addr & iotlb.addr_mask));
284 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
285 if (!(iotlb.perm & (1 << is_write))) {
286 mr = &io_mem_unassigned;
287 break;
288 }
289
290 as = iotlb.target_as;
291 }
292
293 *plen = len;
294 *xlat = addr;
295 return mr;
90260c6c
JK
296}
297
298MemoryRegionSection *
299address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
300 hwaddr *plen)
301{
30951157
AK
302 MemoryRegionSection *section;
303 section = address_space_translate_internal(as, addr, xlat, plen, false);
304
305 assert(!section->mr->iommu_ops);
306 return section;
90260c6c 307}
5b6dd868 308#endif
fd6ce8f6 309
5b6dd868 310void cpu_exec_init_all(void)
fdbb84d1 311{
5b6dd868 312#if !defined(CONFIG_USER_ONLY)
b2a8658e 313 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
314 memory_map_init();
315 io_mem_init();
fdbb84d1 316#endif
5b6dd868 317}
fdbb84d1 318
b170fce3 319#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
320
321static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 322{
259186a7 323 CPUState *cpu = opaque;
a513fe19 324
5b6dd868
BS
325 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
326 version_id is increased. */
259186a7
AF
327 cpu->interrupt_request &= ~0x01;
328 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
329
330 return 0;
a513fe19 331}
7501267e 332
5b6dd868
BS
333static const VMStateDescription vmstate_cpu_common = {
334 .name = "cpu_common",
335 .version_id = 1,
336 .minimum_version_id = 1,
337 .minimum_version_id_old = 1,
338 .post_load = cpu_common_post_load,
339 .fields = (VMStateField []) {
259186a7
AF
340 VMSTATE_UINT32(halted, CPUState),
341 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
342 VMSTATE_END_OF_LIST()
343 }
344};
b170fce3
AF
345#else
346#define vmstate_cpu_common vmstate_dummy
5b6dd868 347#endif
ea041c0e 348
38d8f5c8 349CPUState *qemu_get_cpu(int index)
ea041c0e 350{
5b6dd868 351 CPUArchState *env = first_cpu;
38d8f5c8 352 CPUState *cpu = NULL;
ea041c0e 353
5b6dd868 354 while (env) {
55e5c285
AF
355 cpu = ENV_GET_CPU(env);
356 if (cpu->cpu_index == index) {
5b6dd868 357 break;
55e5c285 358 }
5b6dd868 359 env = env->next_cpu;
ea041c0e 360 }
5b6dd868 361
d76fddae 362 return env ? cpu : NULL;
ea041c0e
FB
363}
364
d6b9e0d6
MT
365void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
366{
367 CPUArchState *env = first_cpu;
368
369 while (env) {
370 func(ENV_GET_CPU(env), data);
371 env = env->next_cpu;
372 }
373}
374
5b6dd868 375void cpu_exec_init(CPUArchState *env)
ea041c0e 376{
5b6dd868 377 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 378 CPUClass *cc = CPU_GET_CLASS(cpu);
5b6dd868
BS
379 CPUArchState **penv;
380 int cpu_index;
381
382#if defined(CONFIG_USER_ONLY)
383 cpu_list_lock();
384#endif
385 env->next_cpu = NULL;
386 penv = &first_cpu;
387 cpu_index = 0;
388 while (*penv != NULL) {
389 penv = &(*penv)->next_cpu;
390 cpu_index++;
391 }
55e5c285 392 cpu->cpu_index = cpu_index;
1b1ed8dc 393 cpu->numa_node = 0;
5b6dd868
BS
394 QTAILQ_INIT(&env->breakpoints);
395 QTAILQ_INIT(&env->watchpoints);
396#ifndef CONFIG_USER_ONLY
397 cpu->thread_id = qemu_get_thread_id();
398#endif
399 *penv = env;
400#if defined(CONFIG_USER_ONLY)
401 cpu_list_unlock();
402#endif
259186a7 403 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
5b6dd868 404#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
405 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
406 cpu_save, cpu_load, env);
b170fce3 407 assert(cc->vmsd == NULL);
5b6dd868 408#endif
b170fce3
AF
409 if (cc->vmsd != NULL) {
410 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
411 }
ea041c0e
FB
412}
413
1fddef4b 414#if defined(TARGET_HAS_ICE)
94df27fd 415#if defined(CONFIG_USER_ONLY)
9349b4f9 416static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
417{
418 tb_invalidate_phys_page_range(pc, pc + 1, 0);
419}
420#else
1e7855a5
MF
421static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
422{
9d70c4b7
MF
423 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
424 (pc & ~TARGET_PAGE_MASK));
1e7855a5 425}
c27004ec 426#endif
94df27fd 427#endif /* TARGET_HAS_ICE */
d720b93d 428
c527ee8f 429#if defined(CONFIG_USER_ONLY)
9349b4f9 430void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
431
432{
433}
434
9349b4f9 435int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
436 int flags, CPUWatchpoint **watchpoint)
437{
438 return -ENOSYS;
439}
440#else
6658ffb8 441/* Add a watchpoint. */
9349b4f9 442int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 443 int flags, CPUWatchpoint **watchpoint)
6658ffb8 444{
b4051334 445 target_ulong len_mask = ~(len - 1);
c0ce998e 446 CPUWatchpoint *wp;
6658ffb8 447
b4051334 448 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
449 if ((len & (len - 1)) || (addr & ~len_mask) ||
450 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
451 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
452 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
453 return -EINVAL;
454 }
7267c094 455 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
456
457 wp->vaddr = addr;
b4051334 458 wp->len_mask = len_mask;
a1d1bb31
AL
459 wp->flags = flags;
460
2dc9f411 461 /* keep all GDB-injected watchpoints in front */
c0ce998e 462 if (flags & BP_GDB)
72cf2d4f 463 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 464 else
72cf2d4f 465 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 466
6658ffb8 467 tlb_flush_page(env, addr);
a1d1bb31
AL
468
469 if (watchpoint)
470 *watchpoint = wp;
471 return 0;
6658ffb8
PB
472}
473
a1d1bb31 474/* Remove a specific watchpoint. */
9349b4f9 475int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 476 int flags)
6658ffb8 477{
b4051334 478 target_ulong len_mask = ~(len - 1);
a1d1bb31 479 CPUWatchpoint *wp;
6658ffb8 480
72cf2d4f 481 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 482 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 483 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 484 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
485 return 0;
486 }
487 }
a1d1bb31 488 return -ENOENT;
6658ffb8
PB
489}
490
a1d1bb31 491/* Remove a specific watchpoint by reference. */
9349b4f9 492void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 493{
72cf2d4f 494 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 495
a1d1bb31
AL
496 tlb_flush_page(env, watchpoint->vaddr);
497
7267c094 498 g_free(watchpoint);
a1d1bb31
AL
499}
500
501/* Remove all matching watchpoints. */
9349b4f9 502void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 503{
c0ce998e 504 CPUWatchpoint *wp, *next;
a1d1bb31 505
72cf2d4f 506 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
507 if (wp->flags & mask)
508 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 509 }
7d03f82f 510}
c527ee8f 511#endif
7d03f82f 512
a1d1bb31 513/* Add a breakpoint. */
9349b4f9 514int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 515 CPUBreakpoint **breakpoint)
4c3a88a2 516{
1fddef4b 517#if defined(TARGET_HAS_ICE)
c0ce998e 518 CPUBreakpoint *bp;
3b46e624 519
7267c094 520 bp = g_malloc(sizeof(*bp));
4c3a88a2 521
a1d1bb31
AL
522 bp->pc = pc;
523 bp->flags = flags;
524
2dc9f411 525 /* keep all GDB-injected breakpoints in front */
c0ce998e 526 if (flags & BP_GDB)
72cf2d4f 527 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 528 else
72cf2d4f 529 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 530
d720b93d 531 breakpoint_invalidate(env, pc);
a1d1bb31
AL
532
533 if (breakpoint)
534 *breakpoint = bp;
4c3a88a2
FB
535 return 0;
536#else
a1d1bb31 537 return -ENOSYS;
4c3a88a2
FB
538#endif
539}
540
a1d1bb31 541/* Remove a specific breakpoint. */
9349b4f9 542int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 543{
7d03f82f 544#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
545 CPUBreakpoint *bp;
546
72cf2d4f 547 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
548 if (bp->pc == pc && bp->flags == flags) {
549 cpu_breakpoint_remove_by_ref(env, bp);
550 return 0;
551 }
7d03f82f 552 }
a1d1bb31
AL
553 return -ENOENT;
554#else
555 return -ENOSYS;
7d03f82f
EI
556#endif
557}
558
a1d1bb31 559/* Remove a specific breakpoint by reference. */
9349b4f9 560void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 561{
1fddef4b 562#if defined(TARGET_HAS_ICE)
72cf2d4f 563 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 564
a1d1bb31
AL
565 breakpoint_invalidate(env, breakpoint->pc);
566
7267c094 567 g_free(breakpoint);
a1d1bb31
AL
568#endif
569}
570
571/* Remove all matching breakpoints. */
9349b4f9 572void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
573{
574#if defined(TARGET_HAS_ICE)
c0ce998e 575 CPUBreakpoint *bp, *next;
a1d1bb31 576
72cf2d4f 577 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
578 if (bp->flags & mask)
579 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 580 }
4c3a88a2
FB
581#endif
582}
583
c33a346e
FB
584/* enable or disable single step mode. EXCP_DEBUG is returned by the
585 CPU loop after each instruction */
9349b4f9 586void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 587{
1fddef4b 588#if defined(TARGET_HAS_ICE)
c33a346e
FB
589 if (env->singlestep_enabled != enabled) {
590 env->singlestep_enabled = enabled;
e22a25c9
AL
591 if (kvm_enabled())
592 kvm_update_guest_debug(env, 0);
593 else {
ccbb4d44 594 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
595 /* XXX: only flush what is necessary */
596 tb_flush(env);
597 }
c33a346e
FB
598 }
599#endif
600}
601
9349b4f9 602void cpu_exit(CPUArchState *env)
3098dba0 603{
fcd7d003
AF
604 CPUState *cpu = ENV_GET_CPU(env);
605
606 cpu->exit_request = 1;
378df4b2 607 cpu->tcg_exit_req = 1;
3098dba0
AJ
608}
609
9349b4f9 610void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
611{
612 va_list ap;
493ae1f0 613 va_list ap2;
7501267e
FB
614
615 va_start(ap, fmt);
493ae1f0 616 va_copy(ap2, ap);
7501267e
FB
617 fprintf(stderr, "qemu: fatal: ");
618 vfprintf(stderr, fmt, ap);
619 fprintf(stderr, "\n");
6fd2a026 620 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
621 if (qemu_log_enabled()) {
622 qemu_log("qemu: fatal: ");
623 qemu_log_vprintf(fmt, ap2);
624 qemu_log("\n");
6fd2a026 625 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 626 qemu_log_flush();
93fcfe39 627 qemu_log_close();
924edcae 628 }
493ae1f0 629 va_end(ap2);
f9373291 630 va_end(ap);
fd052bf6
RV
631#if defined(CONFIG_USER_ONLY)
632 {
633 struct sigaction act;
634 sigfillset(&act.sa_mask);
635 act.sa_handler = SIG_DFL;
636 sigaction(SIGABRT, &act, NULL);
637 }
638#endif
7501267e
FB
639 abort();
640}
641
9349b4f9 642CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 643{
9349b4f9
AF
644 CPUArchState *new_env = cpu_init(env->cpu_model_str);
645 CPUArchState *next_cpu = new_env->next_cpu;
5a38f081
AL
646#if defined(TARGET_HAS_ICE)
647 CPUBreakpoint *bp;
648 CPUWatchpoint *wp;
649#endif
650
9349b4f9 651 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 652
55e5c285 653 /* Preserve chaining. */
c5be9f08 654 new_env->next_cpu = next_cpu;
5a38f081
AL
655
656 /* Clone all break/watchpoints.
657 Note: Once we support ptrace with hw-debug register access, make sure
658 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
659 QTAILQ_INIT(&env->breakpoints);
660 QTAILQ_INIT(&env->watchpoints);
5a38f081 661#if defined(TARGET_HAS_ICE)
72cf2d4f 662 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
663 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
664 }
72cf2d4f 665 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
666 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
667 wp->flags, NULL);
668 }
669#endif
670
c5be9f08
TS
671 return new_env;
672}
673
0124311e 674#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
675static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
676 uintptr_t length)
677{
678 uintptr_t start1;
679
680 /* we modify the TLB cache so that the dirty bit will be set again
681 when accessing the range */
682 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
683 /* Check that we don't span multiple blocks - this breaks the
684 address comparisons below. */
685 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
686 != (end - 1) - start) {
687 abort();
688 }
689 cpu_tlb_reset_dirty_all(start1, length);
690
691}
692
5579c7f3 693/* Note: start and end must be within the same ram block. */
c227f099 694void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 695 int dirty_flags)
1ccde1cb 696{
d24981d3 697 uintptr_t length;
1ccde1cb
FB
698
699 start &= TARGET_PAGE_MASK;
700 end = TARGET_PAGE_ALIGN(end);
701
702 length = end - start;
703 if (length == 0)
704 return;
f7c11b53 705 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 706
d24981d3
JQ
707 if (tcg_enabled()) {
708 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 709 }
1ccde1cb
FB
710}
711
8b9c99d9 712static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 713{
f6f3fbca 714 int ret = 0;
74576198 715 in_migration = enable;
f6f3fbca 716 return ret;
74576198
AL
717}
718
a8170e5e 719hwaddr memory_region_section_get_iotlb(CPUArchState *env,
149f54b5
PB
720 MemoryRegionSection *section,
721 target_ulong vaddr,
722 hwaddr paddr, hwaddr xlat,
723 int prot,
724 target_ulong *address)
e5548617 725{
a8170e5e 726 hwaddr iotlb;
e5548617
BS
727 CPUWatchpoint *wp;
728
cc5bea60 729 if (memory_region_is_ram(section->mr)) {
e5548617
BS
730 /* Normal RAM. */
731 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 732 + xlat;
e5548617
BS
733 if (!section->readonly) {
734 iotlb |= phys_section_notdirty;
735 } else {
736 iotlb |= phys_section_rom;
737 }
738 } else {
e5548617 739 iotlb = section - phys_sections;
149f54b5 740 iotlb += xlat;
e5548617
BS
741 }
742
743 /* Make accesses to pages with watchpoints go via the
744 watchpoint trap routines. */
745 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
746 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
747 /* Avoid trapping reads of pages with a write breakpoint. */
748 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
749 iotlb = phys_section_watch + paddr;
750 *address |= TLB_MMIO;
751 break;
752 }
753 }
754 }
755
756 return iotlb;
757}
9fa3e853
FB
758#endif /* defined(CONFIG_USER_ONLY) */
759
e2eef170 760#if !defined(CONFIG_USER_ONLY)
8da3ff18 761
c227f099 762static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 763 uint16_t section);
acc9d80b 764static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
5312bd8b 765static void destroy_page_desc(uint16_t section_index)
54688b1e 766{
5312bd8b
AK
767 MemoryRegionSection *section = &phys_sections[section_index];
768 MemoryRegion *mr = section->mr;
54688b1e
AK
769
770 if (mr->subpage) {
771 subpage_t *subpage = container_of(mr, subpage_t, iomem);
772 memory_region_destroy(&subpage->iomem);
773 g_free(subpage);
774 }
775}
776
4346ae3e 777static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
778{
779 unsigned i;
d6f2ea22 780 PhysPageEntry *p;
54688b1e 781
c19e8800 782 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
783 return;
784 }
785
c19e8800 786 p = phys_map_nodes[lp->ptr];
4346ae3e 787 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 788 if (!p[i].is_leaf) {
54688b1e 789 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 790 } else {
c19e8800 791 destroy_page_desc(p[i].ptr);
54688b1e 792 }
54688b1e 793 }
07f07b31 794 lp->is_leaf = 0;
c19e8800 795 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
796}
797
ac1970fb 798static void destroy_all_mappings(AddressSpaceDispatch *d)
54688b1e 799{
ac1970fb 800 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
d6f2ea22 801 phys_map_nodes_reset();
54688b1e
AK
802}
803
5312bd8b
AK
804static uint16_t phys_section_add(MemoryRegionSection *section)
805{
68f3f65b
PB
806 /* The physical section number is ORed with a page-aligned
807 * pointer to produce the iotlb entries. Thus it should
808 * never overflow into the page-aligned value.
809 */
810 assert(phys_sections_nb < TARGET_PAGE_SIZE);
811
5312bd8b
AK
812 if (phys_sections_nb == phys_sections_nb_alloc) {
813 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
814 phys_sections = g_renew(MemoryRegionSection, phys_sections,
815 phys_sections_nb_alloc);
816 }
817 phys_sections[phys_sections_nb] = *section;
818 return phys_sections_nb++;
819}
820
821static void phys_sections_clear(void)
822{
823 phys_sections_nb = 0;
824}
825
ac1970fb 826static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
827{
828 subpage_t *subpage;
a8170e5e 829 hwaddr base = section->offset_within_address_space
0f0cb164 830 & TARGET_PAGE_MASK;
ac1970fb 831 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
0f0cb164
AK
832 MemoryRegionSection subsection = {
833 .offset_within_address_space = base,
052e87b0 834 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 835 };
a8170e5e 836 hwaddr start, end;
0f0cb164 837
f3705d53 838 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 839
f3705d53 840 if (!(existing->mr->subpage)) {
acc9d80b 841 subpage = subpage_init(d->as, base);
0f0cb164 842 subsection.mr = &subpage->iomem;
ac1970fb 843 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 844 phys_section_add(&subsection));
0f0cb164 845 } else {
f3705d53 846 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
847 }
848 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 849 end = start + int128_get64(section->size) - 1;
0f0cb164
AK
850 subpage_register(subpage, start, end, phys_section_add(section));
851}
852
853
052e87b0
PB
854static void register_multipage(AddressSpaceDispatch *d,
855 MemoryRegionSection *section)
33417e70 856{
a8170e5e 857 hwaddr start_addr = section->offset_within_address_space;
5312bd8b 858 uint16_t section_index = phys_section_add(section);
052e87b0
PB
859 uint64_t num_pages = int128_get64(int128_rshift(section->size,
860 TARGET_PAGE_BITS));
dd81124b 861
733d5ef5
PB
862 assert(num_pages);
863 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
864}
865
ac1970fb 866static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 867{
ac1970fb 868 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
99b9cc06 869 MemoryRegionSection now = *section, remain = *section;
052e87b0 870 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 871
733d5ef5
PB
872 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
873 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
874 - now.offset_within_address_space;
875
052e87b0 876 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 877 register_subpage(d, &now);
733d5ef5 878 } else {
052e87b0 879 now.size = int128_zero();
733d5ef5 880 }
052e87b0
PB
881 while (int128_ne(remain.size, now.size)) {
882 remain.size = int128_sub(remain.size, now.size);
883 remain.offset_within_address_space += int128_get64(now.size);
884 remain.offset_within_region += int128_get64(now.size);
69b67646 885 now = remain;
052e87b0 886 if (int128_lt(remain.size, page_size)) {
733d5ef5
PB
887 register_subpage(d, &now);
888 } else if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
052e87b0 889 now.size = page_size;
ac1970fb 890 register_subpage(d, &now);
69b67646 891 } else {
052e87b0 892 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 893 register_multipage(d, &now);
69b67646 894 }
0f0cb164
AK
895 }
896}
897
62a2744c
SY
898void qemu_flush_coalesced_mmio_buffer(void)
899{
900 if (kvm_enabled())
901 kvm_flush_coalesced_mmio_buffer();
902}
903
b2a8658e
UD
904void qemu_mutex_lock_ramlist(void)
905{
906 qemu_mutex_lock(&ram_list.mutex);
907}
908
909void qemu_mutex_unlock_ramlist(void)
910{
911 qemu_mutex_unlock(&ram_list.mutex);
912}
913
c902760f
MT
914#if defined(__linux__) && !defined(TARGET_S390X)
915
916#include <sys/vfs.h>
917
918#define HUGETLBFS_MAGIC 0x958458f6
919
920static long gethugepagesize(const char *path)
921{
922 struct statfs fs;
923 int ret;
924
925 do {
9742bf26 926 ret = statfs(path, &fs);
c902760f
MT
927 } while (ret != 0 && errno == EINTR);
928
929 if (ret != 0) {
9742bf26
YT
930 perror(path);
931 return 0;
c902760f
MT
932 }
933
934 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 935 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
936
937 return fs.f_bsize;
938}
939
04b16653
AW
940static void *file_ram_alloc(RAMBlock *block,
941 ram_addr_t memory,
942 const char *path)
c902760f
MT
943{
944 char *filename;
8ca761f6
PF
945 char *sanitized_name;
946 char *c;
c902760f
MT
947 void *area;
948 int fd;
949#ifdef MAP_POPULATE
950 int flags;
951#endif
952 unsigned long hpagesize;
953
954 hpagesize = gethugepagesize(path);
955 if (!hpagesize) {
9742bf26 956 return NULL;
c902760f
MT
957 }
958
959 if (memory < hpagesize) {
960 return NULL;
961 }
962
963 if (kvm_enabled() && !kvm_has_sync_mmu()) {
964 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
965 return NULL;
966 }
967
8ca761f6
PF
968 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
969 sanitized_name = g_strdup(block->mr->name);
970 for (c = sanitized_name; *c != '\0'; c++) {
971 if (*c == '/')
972 *c = '_';
973 }
974
975 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
976 sanitized_name);
977 g_free(sanitized_name);
c902760f
MT
978
979 fd = mkstemp(filename);
980 if (fd < 0) {
9742bf26 981 perror("unable to create backing store for hugepages");
e4ada482 982 g_free(filename);
9742bf26 983 return NULL;
c902760f
MT
984 }
985 unlink(filename);
e4ada482 986 g_free(filename);
c902760f
MT
987
988 memory = (memory+hpagesize-1) & ~(hpagesize-1);
989
990 /*
991 * ftruncate is not supported by hugetlbfs in older
992 * hosts, so don't bother bailing out on errors.
993 * If anything goes wrong with it under other filesystems,
994 * mmap will fail.
995 */
996 if (ftruncate(fd, memory))
9742bf26 997 perror("ftruncate");
c902760f
MT
998
999#ifdef MAP_POPULATE
1000 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
1001 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
1002 * to sidestep this quirk.
1003 */
1004 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
1005 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
1006#else
1007 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1008#endif
1009 if (area == MAP_FAILED) {
9742bf26
YT
1010 perror("file_ram_alloc: can't mmap RAM pages");
1011 close(fd);
1012 return (NULL);
c902760f 1013 }
04b16653 1014 block->fd = fd;
c902760f
MT
1015 return area;
1016}
1017#endif
1018
d17b5288 1019static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1020{
1021 RAMBlock *block, *next_block;
3e837b2c 1022 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1023
49cd9ac6
SH
1024 assert(size != 0); /* it would hand out same offset multiple times */
1025
a3161038 1026 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1027 return 0;
1028
a3161038 1029 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1030 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1031
1032 end = block->offset + block->length;
1033
a3161038 1034 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1035 if (next_block->offset >= end) {
1036 next = MIN(next, next_block->offset);
1037 }
1038 }
1039 if (next - end >= size && next - end < mingap) {
3e837b2c 1040 offset = end;
04b16653
AW
1041 mingap = next - end;
1042 }
1043 }
3e837b2c
AW
1044
1045 if (offset == RAM_ADDR_MAX) {
1046 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1047 (uint64_t)size);
1048 abort();
1049 }
1050
04b16653
AW
1051 return offset;
1052}
1053
652d7ec2 1054ram_addr_t last_ram_offset(void)
d17b5288
AW
1055{
1056 RAMBlock *block;
1057 ram_addr_t last = 0;
1058
a3161038 1059 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1060 last = MAX(last, block->offset + block->length);
1061
1062 return last;
1063}
1064
ddb97f1d
JB
1065static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1066{
1067 int ret;
1068 QemuOpts *machine_opts;
1069
1070 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1071 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1072 if (machine_opts &&
1073 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
1074 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1075 if (ret) {
1076 perror("qemu_madvise");
1077 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1078 "but dump_guest_core=off specified\n");
1079 }
1080 }
1081}
1082
c5705a77 1083void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
1084{
1085 RAMBlock *new_block, *block;
1086
c5705a77 1087 new_block = NULL;
a3161038 1088 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
1089 if (block->offset == addr) {
1090 new_block = block;
1091 break;
1092 }
1093 }
1094 assert(new_block);
1095 assert(!new_block->idstr[0]);
84b89d78 1096
09e5ab63
AL
1097 if (dev) {
1098 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1099 if (id) {
1100 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1101 g_free(id);
84b89d78
CM
1102 }
1103 }
1104 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1105
b2a8658e
UD
1106 /* This assumes the iothread lock is taken here too. */
1107 qemu_mutex_lock_ramlist();
a3161038 1108 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1109 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1110 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1111 new_block->idstr);
1112 abort();
1113 }
1114 }
b2a8658e 1115 qemu_mutex_unlock_ramlist();
c5705a77
AK
1116}
1117
8490fc78
LC
1118static int memory_try_enable_merging(void *addr, size_t len)
1119{
1120 QemuOpts *opts;
1121
1122 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1123 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1124 /* disabled by the user */
1125 return 0;
1126 }
1127
1128 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1129}
1130
c5705a77
AK
1131ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1132 MemoryRegion *mr)
1133{
abb26d63 1134 RAMBlock *block, *new_block;
c5705a77
AK
1135
1136 size = TARGET_PAGE_ALIGN(size);
1137 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1138
b2a8658e
UD
1139 /* This assumes the iothread lock is taken here too. */
1140 qemu_mutex_lock_ramlist();
7c637366 1141 new_block->mr = mr;
432d268c 1142 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1143 if (host) {
1144 new_block->host = host;
cd19cfa2 1145 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1146 } else {
1147 if (mem_path) {
c902760f 1148#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1149 new_block->host = file_ram_alloc(new_block, size, mem_path);
1150 if (!new_block->host) {
6eebf958 1151 new_block->host = qemu_anon_ram_alloc(size);
8490fc78 1152 memory_try_enable_merging(new_block->host, size);
6977dfe6 1153 }
c902760f 1154#else
6977dfe6
YT
1155 fprintf(stderr, "-mem-path option unsupported\n");
1156 exit(1);
c902760f 1157#endif
6977dfe6 1158 } else {
868bb33f 1159 if (xen_enabled()) {
fce537d4 1160 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1161 } else if (kvm_enabled()) {
1162 /* some s390/kvm configurations have special constraints */
6eebf958 1163 new_block->host = kvm_ram_alloc(size);
432d268c 1164 } else {
6eebf958 1165 new_block->host = qemu_anon_ram_alloc(size);
432d268c 1166 }
8490fc78 1167 memory_try_enable_merging(new_block->host, size);
6977dfe6 1168 }
c902760f 1169 }
94a6b54f
PB
1170 new_block->length = size;
1171
abb26d63
PB
1172 /* Keep the list sorted from biggest to smallest block. */
1173 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1174 if (block->length < new_block->length) {
1175 break;
1176 }
1177 }
1178 if (block) {
1179 QTAILQ_INSERT_BEFORE(block, new_block, next);
1180 } else {
1181 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1182 }
0d6d3c87 1183 ram_list.mru_block = NULL;
94a6b54f 1184
f798b07f 1185 ram_list.version++;
b2a8658e 1186 qemu_mutex_unlock_ramlist();
f798b07f 1187
7267c094 1188 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1189 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1190 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1191 0, size >> TARGET_PAGE_BITS);
1720aeee 1192 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1193
ddb97f1d 1194 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1195 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1196
6f0437e8
JK
1197 if (kvm_enabled())
1198 kvm_setup_guest_memory(new_block->host, size);
1199
94a6b54f
PB
1200 return new_block->offset;
1201}
e9a1ab19 1202
c5705a77 1203ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1204{
c5705a77 1205 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1206}
1207
1f2e98b6
AW
1208void qemu_ram_free_from_ptr(ram_addr_t addr)
1209{
1210 RAMBlock *block;
1211
b2a8658e
UD
1212 /* This assumes the iothread lock is taken here too. */
1213 qemu_mutex_lock_ramlist();
a3161038 1214 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1215 if (addr == block->offset) {
a3161038 1216 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1217 ram_list.mru_block = NULL;
f798b07f 1218 ram_list.version++;
7267c094 1219 g_free(block);
b2a8658e 1220 break;
1f2e98b6
AW
1221 }
1222 }
b2a8658e 1223 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1224}
1225
c227f099 1226void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1227{
04b16653
AW
1228 RAMBlock *block;
1229
b2a8658e
UD
1230 /* This assumes the iothread lock is taken here too. */
1231 qemu_mutex_lock_ramlist();
a3161038 1232 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1233 if (addr == block->offset) {
a3161038 1234 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1235 ram_list.mru_block = NULL;
f798b07f 1236 ram_list.version++;
cd19cfa2
HY
1237 if (block->flags & RAM_PREALLOC_MASK) {
1238 ;
1239 } else if (mem_path) {
04b16653
AW
1240#if defined (__linux__) && !defined(TARGET_S390X)
1241 if (block->fd) {
1242 munmap(block->host, block->length);
1243 close(block->fd);
1244 } else {
e7a09b92 1245 qemu_anon_ram_free(block->host, block->length);
04b16653 1246 }
fd28aa13
JK
1247#else
1248 abort();
04b16653
AW
1249#endif
1250 } else {
868bb33f 1251 if (xen_enabled()) {
e41d7c69 1252 xen_invalidate_map_cache_entry(block->host);
432d268c 1253 } else {
e7a09b92 1254 qemu_anon_ram_free(block->host, block->length);
432d268c 1255 }
04b16653 1256 }
7267c094 1257 g_free(block);
b2a8658e 1258 break;
04b16653
AW
1259 }
1260 }
b2a8658e 1261 qemu_mutex_unlock_ramlist();
04b16653 1262
e9a1ab19
FB
1263}
1264
cd19cfa2
HY
1265#ifndef _WIN32
1266void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1267{
1268 RAMBlock *block;
1269 ram_addr_t offset;
1270 int flags;
1271 void *area, *vaddr;
1272
a3161038 1273 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1274 offset = addr - block->offset;
1275 if (offset < block->length) {
1276 vaddr = block->host + offset;
1277 if (block->flags & RAM_PREALLOC_MASK) {
1278 ;
1279 } else {
1280 flags = MAP_FIXED;
1281 munmap(vaddr, length);
1282 if (mem_path) {
1283#if defined(__linux__) && !defined(TARGET_S390X)
1284 if (block->fd) {
1285#ifdef MAP_POPULATE
1286 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1287 MAP_PRIVATE;
1288#else
1289 flags |= MAP_PRIVATE;
1290#endif
1291 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1292 flags, block->fd, offset);
1293 } else {
1294 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1295 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1296 flags, -1, 0);
1297 }
fd28aa13
JK
1298#else
1299 abort();
cd19cfa2
HY
1300#endif
1301 } else {
1302#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1303 flags |= MAP_SHARED | MAP_ANONYMOUS;
1304 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1305 flags, -1, 0);
1306#else
1307 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1308 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1309 flags, -1, 0);
1310#endif
1311 }
1312 if (area != vaddr) {
f15fbc4b
AP
1313 fprintf(stderr, "Could not remap addr: "
1314 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1315 length, addr);
1316 exit(1);
1317 }
8490fc78 1318 memory_try_enable_merging(vaddr, length);
ddb97f1d 1319 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1320 }
1321 return;
1322 }
1323 }
1324}
1325#endif /* !_WIN32 */
1326
dc828ca1 1327/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
1328 With the exception of the softmmu code in this file, this should
1329 only be used for local memory (e.g. video ram) that the device owns,
1330 and knows it isn't going to access beyond the end of the block.
1331
1332 It should not be used for general purpose DMA.
1333 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1334 */
c227f099 1335void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 1336{
94a6b54f
PB
1337 RAMBlock *block;
1338
b2a8658e 1339 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1340 block = ram_list.mru_block;
1341 if (block && addr - block->offset < block->length) {
1342 goto found;
1343 }
a3161038 1344 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1345 if (addr - block->offset < block->length) {
0d6d3c87 1346 goto found;
f471a17e 1347 }
94a6b54f 1348 }
f471a17e
AW
1349
1350 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1351 abort();
1352
0d6d3c87
PB
1353found:
1354 ram_list.mru_block = block;
1355 if (xen_enabled()) {
1356 /* We need to check if the requested address is in the RAM
1357 * because we don't want to map the entire memory in QEMU.
1358 * In that case just map until the end of the page.
1359 */
1360 if (block->offset == 0) {
1361 return xen_map_cache(addr, 0, 0);
1362 } else if (block->host == NULL) {
1363 block->host =
1364 xen_map_cache(block->offset, block->length, 1);
1365 }
1366 }
1367 return block->host + (addr - block->offset);
dc828ca1
PB
1368}
1369
0d6d3c87
PB
1370/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1371 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1372 *
1373 * ??? Is this still necessary?
b2e0a138 1374 */
8b9c99d9 1375static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1376{
1377 RAMBlock *block;
1378
b2a8658e 1379 /* The list is protected by the iothread lock here. */
a3161038 1380 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1381 if (addr - block->offset < block->length) {
868bb33f 1382 if (xen_enabled()) {
432d268c
JN
1383 /* We need to check if the requested address is in the RAM
1384 * because we don't want to map the entire memory in QEMU.
712c2b41 1385 * In that case just map until the end of the page.
432d268c
JN
1386 */
1387 if (block->offset == 0) {
e41d7c69 1388 return xen_map_cache(addr, 0, 0);
432d268c 1389 } else if (block->host == NULL) {
e41d7c69
JK
1390 block->host =
1391 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1392 }
1393 }
b2e0a138
MT
1394 return block->host + (addr - block->offset);
1395 }
1396 }
1397
1398 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1399 abort();
1400
1401 return NULL;
1402}
1403
38bee5dc
SS
1404/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1405 * but takes a size argument */
8b9c99d9 1406static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1407{
8ab934f9
SS
1408 if (*size == 0) {
1409 return NULL;
1410 }
868bb33f 1411 if (xen_enabled()) {
e41d7c69 1412 return xen_map_cache(addr, *size, 1);
868bb33f 1413 } else {
38bee5dc
SS
1414 RAMBlock *block;
1415
a3161038 1416 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1417 if (addr - block->offset < block->length) {
1418 if (addr - block->offset + *size > block->length)
1419 *size = block->length - addr + block->offset;
1420 return block->host + (addr - block->offset);
1421 }
1422 }
1423
1424 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1425 abort();
38bee5dc
SS
1426 }
1427}
1428
e890261f 1429int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1430{
94a6b54f
PB
1431 RAMBlock *block;
1432 uint8_t *host = ptr;
1433
868bb33f 1434 if (xen_enabled()) {
e41d7c69 1435 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
1436 return 0;
1437 }
1438
a3161038 1439 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1440 /* This case append when the block is not mapped. */
1441 if (block->host == NULL) {
1442 continue;
1443 }
f471a17e 1444 if (host - block->host < block->length) {
e890261f
MT
1445 *ram_addr = block->offset + (host - block->host);
1446 return 0;
f471a17e 1447 }
94a6b54f 1448 }
432d268c 1449
e890261f
MT
1450 return -1;
1451}
f471a17e 1452
e890261f
MT
1453/* Some of the softmmu routines need to translate from a host pointer
1454 (typically a TLB entry) back to a ram offset. */
1455ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1456{
1457 ram_addr_t ram_addr;
f471a17e 1458
e890261f
MT
1459 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1460 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1461 abort();
1462 }
1463 return ram_addr;
5579c7f3
PB
1464}
1465
a8170e5e 1466static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1467 uint64_t val, unsigned size)
9fa3e853 1468{
3a7d929e 1469 int dirty_flags;
f7c11b53 1470 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1471 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
0e0df1e2 1472 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1473 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1474 }
0e0df1e2
AK
1475 switch (size) {
1476 case 1:
1477 stb_p(qemu_get_ram_ptr(ram_addr), val);
1478 break;
1479 case 2:
1480 stw_p(qemu_get_ram_ptr(ram_addr), val);
1481 break;
1482 case 4:
1483 stl_p(qemu_get_ram_ptr(ram_addr), val);
1484 break;
1485 default:
1486 abort();
3a7d929e 1487 }
f23db169 1488 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1489 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1490 /* we remove the notdirty callback only if the code has been
1491 flushed */
1492 if (dirty_flags == 0xff)
2e70f6ef 1493 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1494}
1495
b018ddf6
PB
1496static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1497 unsigned size, bool is_write)
1498{
1499 return is_write;
1500}
1501
0e0df1e2 1502static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1503 .write = notdirty_mem_write,
b018ddf6 1504 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1505 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1506};
1507
0f459d16 1508/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1509static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1510{
9349b4f9 1511 CPUArchState *env = cpu_single_env;
06d55cc1 1512 target_ulong pc, cs_base;
0f459d16 1513 target_ulong vaddr;
a1d1bb31 1514 CPUWatchpoint *wp;
06d55cc1 1515 int cpu_flags;
0f459d16 1516
06d55cc1
AL
1517 if (env->watchpoint_hit) {
1518 /* We re-entered the check after replacing the TB. Now raise
1519 * the debug interrupt so that is will trigger after the
1520 * current instruction. */
c3affe56 1521 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1522 return;
1523 }
2e70f6ef 1524 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1525 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1526 if ((vaddr == (wp->vaddr & len_mask) ||
1527 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1528 wp->flags |= BP_WATCHPOINT_HIT;
1529 if (!env->watchpoint_hit) {
1530 env->watchpoint_hit = wp;
5a316526 1531 tb_check_watchpoint(env);
6e140f28
AL
1532 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1533 env->exception_index = EXCP_DEBUG;
488d6577 1534 cpu_loop_exit(env);
6e140f28
AL
1535 } else {
1536 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1537 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1538 cpu_resume_from_signal(env, NULL);
6e140f28 1539 }
06d55cc1 1540 }
6e140f28
AL
1541 } else {
1542 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1543 }
1544 }
1545}
1546
6658ffb8
PB
1547/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1548 so these check for a hit then pass through to the normal out-of-line
1549 phys routines. */
a8170e5e 1550static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1551 unsigned size)
6658ffb8 1552{
1ec9b909
AK
1553 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1554 switch (size) {
1555 case 1: return ldub_phys(addr);
1556 case 2: return lduw_phys(addr);
1557 case 4: return ldl_phys(addr);
1558 default: abort();
1559 }
6658ffb8
PB
1560}
1561
a8170e5e 1562static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1563 uint64_t val, unsigned size)
6658ffb8 1564{
1ec9b909
AK
1565 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1566 switch (size) {
67364150
MF
1567 case 1:
1568 stb_phys(addr, val);
1569 break;
1570 case 2:
1571 stw_phys(addr, val);
1572 break;
1573 case 4:
1574 stl_phys(addr, val);
1575 break;
1ec9b909
AK
1576 default: abort();
1577 }
6658ffb8
PB
1578}
1579
1ec9b909
AK
1580static const MemoryRegionOps watch_mem_ops = {
1581 .read = watch_mem_read,
1582 .write = watch_mem_write,
1583 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1584};
6658ffb8 1585
a8170e5e 1586static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1587 unsigned len)
db7b5426 1588{
acc9d80b
JK
1589 subpage_t *subpage = opaque;
1590 uint8_t buf[4];
791af8c8 1591
db7b5426 1592#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1593 printf("%s: subpage %p len %d addr " TARGET_FMT_plx "\n", __func__,
1594 subpage, len, addr);
db7b5426 1595#endif
acc9d80b
JK
1596 address_space_read(subpage->as, addr + subpage->base, buf, len);
1597 switch (len) {
1598 case 1:
1599 return ldub_p(buf);
1600 case 2:
1601 return lduw_p(buf);
1602 case 4:
1603 return ldl_p(buf);
1604 default:
1605 abort();
1606 }
db7b5426
BS
1607}
1608
a8170e5e 1609static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1610 uint64_t value, unsigned len)
db7b5426 1611{
acc9d80b
JK
1612 subpage_t *subpage = opaque;
1613 uint8_t buf[4];
1614
db7b5426 1615#if defined(DEBUG_SUBPAGE)
70c68e44 1616 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
acc9d80b
JK
1617 " value %"PRIx64"\n",
1618 __func__, subpage, len, addr, value);
db7b5426 1619#endif
acc9d80b
JK
1620 switch (len) {
1621 case 1:
1622 stb_p(buf, value);
1623 break;
1624 case 2:
1625 stw_p(buf, value);
1626 break;
1627 case 4:
1628 stl_p(buf, value);
1629 break;
1630 default:
1631 abort();
1632 }
1633 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1634}
1635
c353e4cc
PB
1636static bool subpage_accepts(void *opaque, hwaddr addr,
1637 unsigned size, bool is_write)
1638{
acc9d80b 1639 subpage_t *subpage = opaque;
c353e4cc 1640#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1641 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx "\n",
1642 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1643#endif
1644
acc9d80b
JK
1645 return address_space_access_valid(subpage->as, addr + subpage->base,
1646 size, is_write);
c353e4cc
PB
1647}
1648
70c68e44
AK
1649static const MemoryRegionOps subpage_ops = {
1650 .read = subpage_read,
1651 .write = subpage_write,
c353e4cc 1652 .valid.accepts = subpage_accepts,
70c68e44 1653 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1654};
1655
c227f099 1656static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1657 uint16_t section)
db7b5426
BS
1658{
1659 int idx, eidx;
1660
1661 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1662 return -1;
1663 idx = SUBPAGE_IDX(start);
1664 eidx = SUBPAGE_IDX(end);
1665#if defined(DEBUG_SUBPAGE)
0bf9e31a 1666 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1667 mmio, start, end, idx, eidx, memory);
1668#endif
db7b5426 1669 for (; idx <= eidx; idx++) {
5312bd8b 1670 mmio->sub_section[idx] = section;
db7b5426
BS
1671 }
1672
1673 return 0;
1674}
1675
acc9d80b 1676static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1677{
c227f099 1678 subpage_t *mmio;
db7b5426 1679
7267c094 1680 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1681
acc9d80b 1682 mmio->as = as;
1eec614b 1683 mmio->base = base;
70c68e44
AK
1684 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1685 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1686 mmio->iomem.subpage = true;
db7b5426 1687#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1688 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1689 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1690#endif
0f0cb164 1691 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
1692
1693 return mmio;
1694}
1695
5312bd8b
AK
1696static uint16_t dummy_section(MemoryRegion *mr)
1697{
1698 MemoryRegionSection section = {
1699 .mr = mr,
1700 .offset_within_address_space = 0,
1701 .offset_within_region = 0,
052e87b0 1702 .size = int128_2_64(),
5312bd8b
AK
1703 };
1704
1705 return phys_section_add(&section);
1706}
1707
a8170e5e 1708MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1709{
37ec01d4 1710 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1711}
1712
e9179ce1
AK
1713static void io_mem_init(void)
1714{
bf8d5166 1715 memory_region_init_io(&io_mem_rom, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
0e0df1e2
AK
1716 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1717 "unassigned", UINT64_MAX);
1718 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1719 "notdirty", UINT64_MAX);
1ec9b909
AK
1720 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1721 "watch", UINT64_MAX);
e9179ce1
AK
1722}
1723
ac1970fb
AK
1724static void mem_begin(MemoryListener *listener)
1725{
1726 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1727
1728 destroy_all_mappings(d);
1729 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1730}
1731
50c1e149
AK
1732static void core_begin(MemoryListener *listener)
1733{
5312bd8b
AK
1734 phys_sections_clear();
1735 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
1736 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1737 phys_section_rom = dummy_section(&io_mem_rom);
1738 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
1739}
1740
1d71148e 1741static void tcg_commit(MemoryListener *listener)
50c1e149 1742{
9349b4f9 1743 CPUArchState *env;
117712c3
AK
1744
1745 /* since each CPU stores ram addresses in its TLB cache, we must
1746 reset the modified entries */
1747 /* XXX: slow ! */
1748 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1749 tlb_flush(env, 1);
1750 }
50c1e149
AK
1751}
1752
93632747
AK
1753static void core_log_global_start(MemoryListener *listener)
1754{
1755 cpu_physical_memory_set_dirty_tracking(1);
1756}
1757
1758static void core_log_global_stop(MemoryListener *listener)
1759{
1760 cpu_physical_memory_set_dirty_tracking(0);
1761}
1762
4855d41a
AK
1763static void io_region_add(MemoryListener *listener,
1764 MemoryRegionSection *section)
1765{
a2d33521
AK
1766 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1767
1768 mrio->mr = section->mr;
1769 mrio->offset = section->offset_within_region;
1770 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
052e87b0
PB
1771 section->offset_within_address_space,
1772 int128_get64(section->size));
a2d33521 1773 ioport_register(&mrio->iorange);
4855d41a
AK
1774}
1775
1776static void io_region_del(MemoryListener *listener,
1777 MemoryRegionSection *section)
1778{
052e87b0
PB
1779 isa_unassign_ioport(section->offset_within_address_space,
1780 int128_get64(section->size));
4855d41a
AK
1781}
1782
93632747 1783static MemoryListener core_memory_listener = {
50c1e149 1784 .begin = core_begin,
93632747
AK
1785 .log_global_start = core_log_global_start,
1786 .log_global_stop = core_log_global_stop,
ac1970fb 1787 .priority = 1,
93632747
AK
1788};
1789
4855d41a
AK
1790static MemoryListener io_memory_listener = {
1791 .region_add = io_region_add,
1792 .region_del = io_region_del,
4855d41a
AK
1793 .priority = 0,
1794};
1795
1d71148e
AK
1796static MemoryListener tcg_memory_listener = {
1797 .commit = tcg_commit,
1798};
1799
ac1970fb
AK
1800void address_space_init_dispatch(AddressSpace *as)
1801{
1802 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1803
1804 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1805 d->listener = (MemoryListener) {
1806 .begin = mem_begin,
1807 .region_add = mem_add,
1808 .region_nop = mem_add,
1809 .priority = 0,
1810 };
acc9d80b 1811 d->as = as;
ac1970fb
AK
1812 as->dispatch = d;
1813 memory_listener_register(&d->listener, as);
1814}
1815
83f3c251
AK
1816void address_space_destroy_dispatch(AddressSpace *as)
1817{
1818 AddressSpaceDispatch *d = as->dispatch;
1819
1820 memory_listener_unregister(&d->listener);
1821 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1822 g_free(d);
1823 as->dispatch = NULL;
1824}
1825
62152b8a
AK
1826static void memory_map_init(void)
1827{
7267c094 1828 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 1829 memory_region_init(system_memory, "system", INT64_MAX);
7dca8043 1830 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1831
7267c094 1832 system_io = g_malloc(sizeof(*system_io));
309cb471 1833 memory_region_init(system_io, "io", 65536);
7dca8043 1834 address_space_init(&address_space_io, system_io, "I/O");
93632747 1835
f6790af6
AK
1836 memory_listener_register(&core_memory_listener, &address_space_memory);
1837 memory_listener_register(&io_memory_listener, &address_space_io);
1838 memory_listener_register(&tcg_memory_listener, &address_space_memory);
62152b8a
AK
1839}
1840
1841MemoryRegion *get_system_memory(void)
1842{
1843 return system_memory;
1844}
1845
309cb471
AK
1846MemoryRegion *get_system_io(void)
1847{
1848 return system_io;
1849}
1850
e2eef170
PB
1851#endif /* !defined(CONFIG_USER_ONLY) */
1852
13eb76e0
FB
1853/* physical memory access (slow version, mainly for debug) */
1854#if defined(CONFIG_USER_ONLY)
9349b4f9 1855int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1856 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1857{
1858 int l, flags;
1859 target_ulong page;
53a5960a 1860 void * p;
13eb76e0
FB
1861
1862 while (len > 0) {
1863 page = addr & TARGET_PAGE_MASK;
1864 l = (page + TARGET_PAGE_SIZE) - addr;
1865 if (l > len)
1866 l = len;
1867 flags = page_get_flags(page);
1868 if (!(flags & PAGE_VALID))
a68fe89c 1869 return -1;
13eb76e0
FB
1870 if (is_write) {
1871 if (!(flags & PAGE_WRITE))
a68fe89c 1872 return -1;
579a97f7 1873 /* XXX: this code should not depend on lock_user */
72fb7daa 1874 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1875 return -1;
72fb7daa
AJ
1876 memcpy(p, buf, l);
1877 unlock_user(p, addr, l);
13eb76e0
FB
1878 } else {
1879 if (!(flags & PAGE_READ))
a68fe89c 1880 return -1;
579a97f7 1881 /* XXX: this code should not depend on lock_user */
72fb7daa 1882 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1883 return -1;
72fb7daa 1884 memcpy(buf, p, l);
5b257578 1885 unlock_user(p, addr, 0);
13eb76e0
FB
1886 }
1887 len -= l;
1888 buf += l;
1889 addr += l;
1890 }
a68fe89c 1891 return 0;
13eb76e0 1892}
8df1cd07 1893
13eb76e0 1894#else
51d7a9eb 1895
a8170e5e
AK
1896static void invalidate_and_set_dirty(hwaddr addr,
1897 hwaddr length)
51d7a9eb
AP
1898{
1899 if (!cpu_physical_memory_is_dirty(addr)) {
1900 /* invalidate code */
1901 tb_invalidate_phys_page_range(addr, addr + length, 0);
1902 /* set dirty bit */
1903 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1904 }
e226939d 1905 xen_modified_memory(addr, length);
51d7a9eb
AP
1906}
1907
2bbfa05d
PB
1908static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1909{
1910 if (memory_region_is_ram(mr)) {
1911 return !(is_write && mr->readonly);
1912 }
1913 if (memory_region_is_romd(mr)) {
1914 return !is_write;
1915 }
1916
1917 return false;
1918}
1919
f52cc467 1920static inline int memory_access_size(MemoryRegion *mr, int l, hwaddr addr)
82f2563f 1921{
f52cc467 1922 if (l >= 4 && (((addr & 3) == 0 || mr->ops->impl.unaligned))) {
82f2563f
PB
1923 return 4;
1924 }
f52cc467 1925 if (l >= 2 && (((addr & 1) == 0) || mr->ops->impl.unaligned)) {
82f2563f
PB
1926 return 2;
1927 }
1928 return 1;
1929}
1930
fd8aaa76 1931bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1932 int len, bool is_write)
13eb76e0 1933{
149f54b5 1934 hwaddr l;
13eb76e0 1935 uint8_t *ptr;
791af8c8 1936 uint64_t val;
149f54b5 1937 hwaddr addr1;
5c8a00ce 1938 MemoryRegion *mr;
fd8aaa76 1939 bool error = false;
3b46e624 1940
13eb76e0 1941 while (len > 0) {
149f54b5 1942 l = len;
5c8a00ce 1943 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 1944
13eb76e0 1945 if (is_write) {
5c8a00ce
PB
1946 if (!memory_access_is_direct(mr, is_write)) {
1947 l = memory_access_size(mr, l, addr1);
6a00d601
FB
1948 /* XXX: could force cpu_single_env to NULL to avoid
1949 potential bugs */
82f2563f 1950 if (l == 4) {
1c213d19 1951 /* 32 bit write access */
c27004ec 1952 val = ldl_p(buf);
5c8a00ce 1953 error |= io_mem_write(mr, addr1, val, 4);
82f2563f 1954 } else if (l == 2) {
1c213d19 1955 /* 16 bit write access */
c27004ec 1956 val = lduw_p(buf);
5c8a00ce 1957 error |= io_mem_write(mr, addr1, val, 2);
13eb76e0 1958 } else {
1c213d19 1959 /* 8 bit write access */
c27004ec 1960 val = ldub_p(buf);
5c8a00ce 1961 error |= io_mem_write(mr, addr1, val, 1);
13eb76e0 1962 }
2bbfa05d 1963 } else {
5c8a00ce 1964 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 1965 /* RAM case */
5579c7f3 1966 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1967 memcpy(ptr, buf, l);
51d7a9eb 1968 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
1969 }
1970 } else {
5c8a00ce 1971 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 1972 /* I/O case */
5c8a00ce 1973 l = memory_access_size(mr, l, addr1);
82f2563f 1974 if (l == 4) {
13eb76e0 1975 /* 32 bit read access */
5c8a00ce 1976 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 1977 stl_p(buf, val);
82f2563f 1978 } else if (l == 2) {
13eb76e0 1979 /* 16 bit read access */
5c8a00ce 1980 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 1981 stw_p(buf, val);
13eb76e0 1982 } else {
1c213d19 1983 /* 8 bit read access */
5c8a00ce 1984 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 1985 stb_p(buf, val);
13eb76e0
FB
1986 }
1987 } else {
1988 /* RAM case */
5c8a00ce 1989 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 1990 memcpy(buf, ptr, l);
13eb76e0
FB
1991 }
1992 }
1993 len -= l;
1994 buf += l;
1995 addr += l;
1996 }
fd8aaa76
PB
1997
1998 return error;
13eb76e0 1999}
8df1cd07 2000
fd8aaa76 2001bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
2002 const uint8_t *buf, int len)
2003{
fd8aaa76 2004 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
2005}
2006
fd8aaa76 2007bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 2008{
fd8aaa76 2009 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
2010}
2011
2012
a8170e5e 2013void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2014 int len, int is_write)
2015{
fd8aaa76 2016 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2017}
2018
d0ecd2aa 2019/* used for ROM loading : can write in RAM and ROM */
a8170e5e 2020void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
2021 const uint8_t *buf, int len)
2022{
149f54b5 2023 hwaddr l;
d0ecd2aa 2024 uint8_t *ptr;
149f54b5 2025 hwaddr addr1;
5c8a00ce 2026 MemoryRegion *mr;
3b46e624 2027
d0ecd2aa 2028 while (len > 0) {
149f54b5 2029 l = len;
5c8a00ce
PB
2030 mr = address_space_translate(&address_space_memory,
2031 addr, &addr1, &l, true);
3b46e624 2032
5c8a00ce
PB
2033 if (!(memory_region_is_ram(mr) ||
2034 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2035 /* do nothing */
2036 } else {
5c8a00ce 2037 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2038 /* ROM/RAM case */
5579c7f3 2039 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2040 memcpy(ptr, buf, l);
51d7a9eb 2041 invalidate_and_set_dirty(addr1, l);
d0ecd2aa
FB
2042 }
2043 len -= l;
2044 buf += l;
2045 addr += l;
2046 }
2047}
2048
6d16c2f8
AL
2049typedef struct {
2050 void *buffer;
a8170e5e
AK
2051 hwaddr addr;
2052 hwaddr len;
6d16c2f8
AL
2053} BounceBuffer;
2054
2055static BounceBuffer bounce;
2056
ba223c29
AL
2057typedef struct MapClient {
2058 void *opaque;
2059 void (*callback)(void *opaque);
72cf2d4f 2060 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2061} MapClient;
2062
72cf2d4f
BS
2063static QLIST_HEAD(map_client_list, MapClient) map_client_list
2064 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2065
2066void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2067{
7267c094 2068 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2069
2070 client->opaque = opaque;
2071 client->callback = callback;
72cf2d4f 2072 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2073 return client;
2074}
2075
8b9c99d9 2076static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2077{
2078 MapClient *client = (MapClient *)_client;
2079
72cf2d4f 2080 QLIST_REMOVE(client, link);
7267c094 2081 g_free(client);
ba223c29
AL
2082}
2083
2084static void cpu_notify_map_clients(void)
2085{
2086 MapClient *client;
2087
72cf2d4f
BS
2088 while (!QLIST_EMPTY(&map_client_list)) {
2089 client = QLIST_FIRST(&map_client_list);
ba223c29 2090 client->callback(client->opaque);
34d5e948 2091 cpu_unregister_map_client(client);
ba223c29
AL
2092 }
2093}
2094
51644ab7
PB
2095bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2096{
5c8a00ce 2097 MemoryRegion *mr;
51644ab7
PB
2098 hwaddr l, xlat;
2099
2100 while (len > 0) {
2101 l = len;
5c8a00ce
PB
2102 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2103 if (!memory_access_is_direct(mr, is_write)) {
2104 l = memory_access_size(mr, l, addr);
2105 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2106 return false;
2107 }
2108 }
2109
2110 len -= l;
2111 addr += l;
2112 }
2113 return true;
2114}
2115
6d16c2f8
AL
2116/* Map a physical memory region into a host virtual address.
2117 * May map a subset of the requested range, given by and returned in *plen.
2118 * May return NULL if resources needed to perform the mapping are exhausted.
2119 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2120 * Use cpu_register_map_client() to know when retrying the map operation is
2121 * likely to succeed.
6d16c2f8 2122 */
ac1970fb 2123void *address_space_map(AddressSpace *as,
a8170e5e
AK
2124 hwaddr addr,
2125 hwaddr *plen,
ac1970fb 2126 bool is_write)
6d16c2f8 2127{
a8170e5e
AK
2128 hwaddr len = *plen;
2129 hwaddr todo = 0;
149f54b5 2130 hwaddr l, xlat;
5c8a00ce 2131 MemoryRegion *mr;
f15fbc4b 2132 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
2133 ram_addr_t rlen;
2134 void *ret;
6d16c2f8
AL
2135
2136 while (len > 0) {
149f54b5 2137 l = len;
5c8a00ce 2138 mr = address_space_translate(as, addr, &xlat, &l, is_write);
6d16c2f8 2139
5c8a00ce 2140 if (!memory_access_is_direct(mr, is_write)) {
38bee5dc 2141 if (todo || bounce.buffer) {
6d16c2f8
AL
2142 break;
2143 }
2144 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2145 bounce.addr = addr;
2146 bounce.len = l;
2147 if (!is_write) {
ac1970fb 2148 address_space_read(as, addr, bounce.buffer, l);
6d16c2f8 2149 }
38bee5dc
SS
2150
2151 *plen = l;
2152 return bounce.buffer;
6d16c2f8 2153 }
8ab934f9 2154 if (!todo) {
5c8a00ce 2155 raddr = memory_region_get_ram_addr(mr) + xlat;
149f54b5 2156 } else {
5c8a00ce 2157 if (memory_region_get_ram_addr(mr) + xlat != raddr + todo) {
149f54b5
PB
2158 break;
2159 }
8ab934f9 2160 }
6d16c2f8
AL
2161
2162 len -= l;
2163 addr += l;
38bee5dc 2164 todo += l;
6d16c2f8 2165 }
8ab934f9
SS
2166 rlen = todo;
2167 ret = qemu_ram_ptr_length(raddr, &rlen);
2168 *plen = rlen;
2169 return ret;
6d16c2f8
AL
2170}
2171
ac1970fb 2172/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2173 * Will also mark the memory as dirty if is_write == 1. access_len gives
2174 * the amount of memory that was actually read or written by the caller.
2175 */
a8170e5e
AK
2176void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2177 int is_write, hwaddr access_len)
6d16c2f8
AL
2178{
2179 if (buffer != bounce.buffer) {
2180 if (is_write) {
e890261f 2181 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
2182 while (access_len) {
2183 unsigned l;
2184 l = TARGET_PAGE_SIZE;
2185 if (l > access_len)
2186 l = access_len;
51d7a9eb 2187 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2188 addr1 += l;
2189 access_len -= l;
2190 }
2191 }
868bb33f 2192 if (xen_enabled()) {
e41d7c69 2193 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2194 }
6d16c2f8
AL
2195 return;
2196 }
2197 if (is_write) {
ac1970fb 2198 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2199 }
f8a83245 2200 qemu_vfree(bounce.buffer);
6d16c2f8 2201 bounce.buffer = NULL;
ba223c29 2202 cpu_notify_map_clients();
6d16c2f8 2203}
d0ecd2aa 2204
a8170e5e
AK
2205void *cpu_physical_memory_map(hwaddr addr,
2206 hwaddr *plen,
ac1970fb
AK
2207 int is_write)
2208{
2209 return address_space_map(&address_space_memory, addr, plen, is_write);
2210}
2211
a8170e5e
AK
2212void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2213 int is_write, hwaddr access_len)
ac1970fb
AK
2214{
2215 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2216}
2217
8df1cd07 2218/* warning: addr must be aligned */
a8170e5e 2219static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2220 enum device_endian endian)
8df1cd07 2221{
8df1cd07 2222 uint8_t *ptr;
791af8c8 2223 uint64_t val;
5c8a00ce 2224 MemoryRegion *mr;
149f54b5
PB
2225 hwaddr l = 4;
2226 hwaddr addr1;
8df1cd07 2227
5c8a00ce
PB
2228 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2229 false);
2230 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2231 /* I/O case */
5c8a00ce 2232 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2233#if defined(TARGET_WORDS_BIGENDIAN)
2234 if (endian == DEVICE_LITTLE_ENDIAN) {
2235 val = bswap32(val);
2236 }
2237#else
2238 if (endian == DEVICE_BIG_ENDIAN) {
2239 val = bswap32(val);
2240 }
2241#endif
8df1cd07
FB
2242 } else {
2243 /* RAM case */
5c8a00ce 2244 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2245 & TARGET_PAGE_MASK)
149f54b5 2246 + addr1);
1e78bcc1
AG
2247 switch (endian) {
2248 case DEVICE_LITTLE_ENDIAN:
2249 val = ldl_le_p(ptr);
2250 break;
2251 case DEVICE_BIG_ENDIAN:
2252 val = ldl_be_p(ptr);
2253 break;
2254 default:
2255 val = ldl_p(ptr);
2256 break;
2257 }
8df1cd07
FB
2258 }
2259 return val;
2260}
2261
a8170e5e 2262uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2263{
2264 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2265}
2266
a8170e5e 2267uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2268{
2269 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2270}
2271
a8170e5e 2272uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2273{
2274 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2275}
2276
84b7b8e7 2277/* warning: addr must be aligned */
a8170e5e 2278static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2279 enum device_endian endian)
84b7b8e7 2280{
84b7b8e7
FB
2281 uint8_t *ptr;
2282 uint64_t val;
5c8a00ce 2283 MemoryRegion *mr;
149f54b5
PB
2284 hwaddr l = 8;
2285 hwaddr addr1;
84b7b8e7 2286
5c8a00ce
PB
2287 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2288 false);
2289 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2290 /* I/O case */
5c8a00ce 2291 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2292#if defined(TARGET_WORDS_BIGENDIAN)
2293 if (endian == DEVICE_LITTLE_ENDIAN) {
2294 val = bswap64(val);
2295 }
2296#else
2297 if (endian == DEVICE_BIG_ENDIAN) {
2298 val = bswap64(val);
2299 }
84b7b8e7
FB
2300#endif
2301 } else {
2302 /* RAM case */
5c8a00ce 2303 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2304 & TARGET_PAGE_MASK)
149f54b5 2305 + addr1);
1e78bcc1
AG
2306 switch (endian) {
2307 case DEVICE_LITTLE_ENDIAN:
2308 val = ldq_le_p(ptr);
2309 break;
2310 case DEVICE_BIG_ENDIAN:
2311 val = ldq_be_p(ptr);
2312 break;
2313 default:
2314 val = ldq_p(ptr);
2315 break;
2316 }
84b7b8e7
FB
2317 }
2318 return val;
2319}
2320
a8170e5e 2321uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2322{
2323 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2324}
2325
a8170e5e 2326uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2327{
2328 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2329}
2330
a8170e5e 2331uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2332{
2333 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2334}
2335
aab33094 2336/* XXX: optimize */
a8170e5e 2337uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2338{
2339 uint8_t val;
2340 cpu_physical_memory_read(addr, &val, 1);
2341 return val;
2342}
2343
733f0b02 2344/* warning: addr must be aligned */
a8170e5e 2345static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2346 enum device_endian endian)
aab33094 2347{
733f0b02
MT
2348 uint8_t *ptr;
2349 uint64_t val;
5c8a00ce 2350 MemoryRegion *mr;
149f54b5
PB
2351 hwaddr l = 2;
2352 hwaddr addr1;
733f0b02 2353
5c8a00ce
PB
2354 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2355 false);
2356 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2357 /* I/O case */
5c8a00ce 2358 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2359#if defined(TARGET_WORDS_BIGENDIAN)
2360 if (endian == DEVICE_LITTLE_ENDIAN) {
2361 val = bswap16(val);
2362 }
2363#else
2364 if (endian == DEVICE_BIG_ENDIAN) {
2365 val = bswap16(val);
2366 }
2367#endif
733f0b02
MT
2368 } else {
2369 /* RAM case */
5c8a00ce 2370 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2371 & TARGET_PAGE_MASK)
149f54b5 2372 + addr1);
1e78bcc1
AG
2373 switch (endian) {
2374 case DEVICE_LITTLE_ENDIAN:
2375 val = lduw_le_p(ptr);
2376 break;
2377 case DEVICE_BIG_ENDIAN:
2378 val = lduw_be_p(ptr);
2379 break;
2380 default:
2381 val = lduw_p(ptr);
2382 break;
2383 }
733f0b02
MT
2384 }
2385 return val;
aab33094
FB
2386}
2387
a8170e5e 2388uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2389{
2390 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2391}
2392
a8170e5e 2393uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2394{
2395 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2396}
2397
a8170e5e 2398uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2399{
2400 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2401}
2402
8df1cd07
FB
2403/* warning: addr must be aligned. The ram page is not masked as dirty
2404 and the code inside is not invalidated. It is useful if the dirty
2405 bits are used to track modified PTEs */
a8170e5e 2406void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2407{
8df1cd07 2408 uint8_t *ptr;
5c8a00ce 2409 MemoryRegion *mr;
149f54b5
PB
2410 hwaddr l = 4;
2411 hwaddr addr1;
8df1cd07 2412
5c8a00ce
PB
2413 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2414 true);
2415 if (l < 4 || !memory_access_is_direct(mr, true)) {
2416 io_mem_write(mr, addr1, val, 4);
8df1cd07 2417 } else {
5c8a00ce 2418 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2419 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2420 stl_p(ptr, val);
74576198
AL
2421
2422 if (unlikely(in_migration)) {
2423 if (!cpu_physical_memory_is_dirty(addr1)) {
2424 /* invalidate code */
2425 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2426 /* set dirty bit */
f7c11b53
YT
2427 cpu_physical_memory_set_dirty_flags(
2428 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2429 }
2430 }
8df1cd07
FB
2431 }
2432}
2433
2434/* warning: addr must be aligned */
a8170e5e 2435static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2436 enum device_endian endian)
8df1cd07 2437{
8df1cd07 2438 uint8_t *ptr;
5c8a00ce 2439 MemoryRegion *mr;
149f54b5
PB
2440 hwaddr l = 4;
2441 hwaddr addr1;
8df1cd07 2442
5c8a00ce
PB
2443 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2444 true);
2445 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2446#if defined(TARGET_WORDS_BIGENDIAN)
2447 if (endian == DEVICE_LITTLE_ENDIAN) {
2448 val = bswap32(val);
2449 }
2450#else
2451 if (endian == DEVICE_BIG_ENDIAN) {
2452 val = bswap32(val);
2453 }
2454#endif
5c8a00ce 2455 io_mem_write(mr, addr1, val, 4);
8df1cd07 2456 } else {
8df1cd07 2457 /* RAM case */
5c8a00ce 2458 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2459 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2460 switch (endian) {
2461 case DEVICE_LITTLE_ENDIAN:
2462 stl_le_p(ptr, val);
2463 break;
2464 case DEVICE_BIG_ENDIAN:
2465 stl_be_p(ptr, val);
2466 break;
2467 default:
2468 stl_p(ptr, val);
2469 break;
2470 }
51d7a9eb 2471 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2472 }
2473}
2474
a8170e5e 2475void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2476{
2477 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2478}
2479
a8170e5e 2480void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2481{
2482 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2483}
2484
a8170e5e 2485void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2486{
2487 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2488}
2489
aab33094 2490/* XXX: optimize */
a8170e5e 2491void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2492{
2493 uint8_t v = val;
2494 cpu_physical_memory_write(addr, &v, 1);
2495}
2496
733f0b02 2497/* warning: addr must be aligned */
a8170e5e 2498static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2499 enum device_endian endian)
aab33094 2500{
733f0b02 2501 uint8_t *ptr;
5c8a00ce 2502 MemoryRegion *mr;
149f54b5
PB
2503 hwaddr l = 2;
2504 hwaddr addr1;
733f0b02 2505
5c8a00ce
PB
2506 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2507 true);
2508 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2509#if defined(TARGET_WORDS_BIGENDIAN)
2510 if (endian == DEVICE_LITTLE_ENDIAN) {
2511 val = bswap16(val);
2512 }
2513#else
2514 if (endian == DEVICE_BIG_ENDIAN) {
2515 val = bswap16(val);
2516 }
2517#endif
5c8a00ce 2518 io_mem_write(mr, addr1, val, 2);
733f0b02 2519 } else {
733f0b02 2520 /* RAM case */
5c8a00ce 2521 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2522 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2523 switch (endian) {
2524 case DEVICE_LITTLE_ENDIAN:
2525 stw_le_p(ptr, val);
2526 break;
2527 case DEVICE_BIG_ENDIAN:
2528 stw_be_p(ptr, val);
2529 break;
2530 default:
2531 stw_p(ptr, val);
2532 break;
2533 }
51d7a9eb 2534 invalidate_and_set_dirty(addr1, 2);
733f0b02 2535 }
aab33094
FB
2536}
2537
a8170e5e 2538void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2539{
2540 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2541}
2542
a8170e5e 2543void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2544{
2545 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2546}
2547
a8170e5e 2548void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2549{
2550 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2551}
2552
aab33094 2553/* XXX: optimize */
a8170e5e 2554void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2555{
2556 val = tswap64(val);
71d2b725 2557 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2558}
2559
a8170e5e 2560void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2561{
2562 val = cpu_to_le64(val);
2563 cpu_physical_memory_write(addr, &val, 8);
2564}
2565
a8170e5e 2566void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2567{
2568 val = cpu_to_be64(val);
2569 cpu_physical_memory_write(addr, &val, 8);
2570}
2571
5e2972fd 2572/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2573int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2574 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2575{
2576 int l;
a8170e5e 2577 hwaddr phys_addr;
9b3c35e0 2578 target_ulong page;
13eb76e0
FB
2579
2580 while (len > 0) {
2581 page = addr & TARGET_PAGE_MASK;
2582 phys_addr = cpu_get_phys_page_debug(env, page);
2583 /* if no physical page mapped, return an error */
2584 if (phys_addr == -1)
2585 return -1;
2586 l = (page + TARGET_PAGE_SIZE) - addr;
2587 if (l > len)
2588 l = len;
5e2972fd 2589 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2590 if (is_write)
2591 cpu_physical_memory_write_rom(phys_addr, buf, l);
2592 else
5e2972fd 2593 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2594 len -= l;
2595 buf += l;
2596 addr += l;
2597 }
2598 return 0;
2599}
a68fe89c 2600#endif
13eb76e0 2601
8e4a424b
BS
2602#if !defined(CONFIG_USER_ONLY)
2603
2604/*
2605 * A helper function for the _utterly broken_ virtio device model to find out if
2606 * it's running on a big endian machine. Don't do this at home kids!
2607 */
2608bool virtio_is_big_endian(void);
2609bool virtio_is_big_endian(void)
2610{
2611#if defined(TARGET_WORDS_BIGENDIAN)
2612 return true;
2613#else
2614 return false;
2615#endif
2616}
2617
2618#endif
2619
76f35538 2620#ifndef CONFIG_USER_ONLY
a8170e5e 2621bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2622{
5c8a00ce 2623 MemoryRegion*mr;
149f54b5 2624 hwaddr l = 1;
76f35538 2625
5c8a00ce
PB
2626 mr = address_space_translate(&address_space_memory,
2627 phys_addr, &phys_addr, &l, false);
76f35538 2628
5c8a00ce
PB
2629 return !(memory_region_is_ram(mr) ||
2630 memory_region_is_romd(mr));
76f35538 2631}
bd2fa51f
MH
2632
2633void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2634{
2635 RAMBlock *block;
2636
2637 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2638 func(block->host, block->offset, block->length, opaque);
2639 }
2640}
ec3f8c99 2641#endif