]> git.proxmox.com Git - qemu.git/blame - exec.c
cpu: Change cpu_exit() argument to CPUState
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
0d09e41a 34#include "hw/xen/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
db7b5426 53//#define DEBUG_SUBPAGE
1196be37 54
e2eef170 55#if !defined(CONFIG_USER_ONLY)
9fa3e853 56int phys_ram_fd;
74576198 57static int in_migration;
94a6b54f 58
a3161038 59RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
60
61static MemoryRegion *system_memory;
309cb471 62static MemoryRegion *system_io;
62152b8a 63
f6790af6
AK
64AddressSpace address_space_io;
65AddressSpace address_space_memory;
2673a5da 66
0844e007 67MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 68static MemoryRegion io_mem_unassigned;
0e0df1e2 69
e2eef170 70#endif
9fa3e853 71
9349b4f9 72CPUArchState *first_cpu;
6a00d601
FB
73/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
9349b4f9 75DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 76/* 0 = Do not count executed instructions.
bf20dc07 77 1 = Precise instruction counting.
2e70f6ef 78 2 = Adaptive rate instruction counting. */
5708fc66 79int use_icount;
6a00d601 80
e2eef170 81#if !defined(CONFIG_USER_ONLY)
4346ae3e 82
1db8abb1
PB
83typedef struct PhysPageEntry PhysPageEntry;
84
85struct PhysPageEntry {
86 uint16_t is_leaf : 1;
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
88 uint16_t ptr : 15;
89};
90
91struct AddressSpaceDispatch {
92 /* This is a multi-level map on the physical address space.
93 * The bottom level has pointers to MemoryRegionSections.
94 */
95 PhysPageEntry phys_map;
96 MemoryListener listener;
acc9d80b 97 AddressSpace *as;
1db8abb1
PB
98};
99
90260c6c
JK
100#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
101typedef struct subpage_t {
102 MemoryRegion iomem;
acc9d80b 103 AddressSpace *as;
90260c6c
JK
104 hwaddr base;
105 uint16_t sub_section[TARGET_PAGE_SIZE];
106} subpage_t;
107
5312bd8b
AK
108static MemoryRegionSection *phys_sections;
109static unsigned phys_sections_nb, phys_sections_nb_alloc;
110static uint16_t phys_section_unassigned;
aa102231
AK
111static uint16_t phys_section_notdirty;
112static uint16_t phys_section_rom;
113static uint16_t phys_section_watch;
5312bd8b 114
d6f2ea22
AK
115/* Simple allocator for PhysPageEntry nodes */
116static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
117static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
118
07f07b31 119#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 120
e2eef170 121static void io_mem_init(void);
62152b8a 122static void memory_map_init(void);
8b9c99d9 123static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 124
1ec9b909 125static MemoryRegion io_mem_watch;
6658ffb8 126#endif
fd6ce8f6 127
6d9a1304 128#if !defined(CONFIG_USER_ONLY)
d6f2ea22 129
f7bf5461 130static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 131{
f7bf5461 132 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
133 typedef PhysPageEntry Node[L2_SIZE];
134 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
135 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
136 phys_map_nodes_nb + nodes);
d6f2ea22
AK
137 phys_map_nodes = g_renew(Node, phys_map_nodes,
138 phys_map_nodes_nb_alloc);
139 }
f7bf5461
AK
140}
141
142static uint16_t phys_map_node_alloc(void)
143{
144 unsigned i;
145 uint16_t ret;
146
147 ret = phys_map_nodes_nb++;
148 assert(ret != PHYS_MAP_NODE_NIL);
149 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 150 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 151 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 152 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 153 }
f7bf5461 154 return ret;
d6f2ea22
AK
155}
156
157static void phys_map_nodes_reset(void)
158{
159 phys_map_nodes_nb = 0;
160}
161
92e873b9 162
a8170e5e
AK
163static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
164 hwaddr *nb, uint16_t leaf,
2999097b 165 int level)
f7bf5461
AK
166{
167 PhysPageEntry *p;
168 int i;
a8170e5e 169 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 170
07f07b31 171 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
172 lp->ptr = phys_map_node_alloc();
173 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
174 if (level == 0) {
175 for (i = 0; i < L2_SIZE; i++) {
07f07b31 176 p[i].is_leaf = 1;
c19e8800 177 p[i].ptr = phys_section_unassigned;
4346ae3e 178 }
67c4d23c 179 }
f7bf5461 180 } else {
c19e8800 181 p = phys_map_nodes[lp->ptr];
92e873b9 182 }
2999097b 183 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 184
2999097b 185 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
186 if ((*index & (step - 1)) == 0 && *nb >= step) {
187 lp->is_leaf = true;
c19e8800 188 lp->ptr = leaf;
07f07b31
AK
189 *index += step;
190 *nb -= step;
2999097b
AK
191 } else {
192 phys_page_set_level(lp, index, nb, leaf, level - 1);
193 }
194 ++lp;
f7bf5461
AK
195 }
196}
197
ac1970fb 198static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 199 hwaddr index, hwaddr nb,
2999097b 200 uint16_t leaf)
f7bf5461 201{
2999097b 202 /* Wildly overreserve - it doesn't matter much. */
07f07b31 203 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 204
ac1970fb 205 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
206}
207
149f54b5 208static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
92e873b9 209{
ac1970fb 210 PhysPageEntry lp = d->phys_map;
31ab2b4a
AK
211 PhysPageEntry *p;
212 int i;
f1f6e3b8 213
07f07b31 214 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 215 if (lp.ptr == PHYS_MAP_NODE_NIL) {
fd298934 216 return &phys_sections[phys_section_unassigned];
31ab2b4a 217 }
c19e8800 218 p = phys_map_nodes[lp.ptr];
31ab2b4a 219 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 220 }
fd298934 221 return &phys_sections[lp.ptr];
f3705d53
AK
222}
223
e5548617
BS
224bool memory_region_is_unassigned(MemoryRegion *mr)
225{
2a8e7499 226 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 227 && mr != &io_mem_watch;
fd6ce8f6 228}
149f54b5 229
9f029603 230static MemoryRegionSection *address_space_lookup_region(AddressSpace *as,
90260c6c
JK
231 hwaddr addr,
232 bool resolve_subpage)
9f029603 233{
90260c6c
JK
234 MemoryRegionSection *section;
235 subpage_t *subpage;
236
237 section = phys_page_find(as->dispatch, addr >> TARGET_PAGE_BITS);
238 if (resolve_subpage && section->mr->subpage) {
239 subpage = container_of(section->mr, subpage_t, iomem);
240 section = &phys_sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
241 }
242 return section;
9f029603
JK
243}
244
90260c6c
JK
245static MemoryRegionSection *
246address_space_translate_internal(AddressSpace *as, hwaddr addr, hwaddr *xlat,
247 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
248{
249 MemoryRegionSection *section;
250 Int128 diff;
251
90260c6c 252 section = address_space_lookup_region(as, addr, resolve_subpage);
149f54b5
PB
253 /* Compute offset within MemoryRegionSection */
254 addr -= section->offset_within_address_space;
255
256 /* Compute offset within MemoryRegion */
257 *xlat = addr + section->offset_within_region;
258
259 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 260 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
261 return section;
262}
90260c6c 263
5c8a00ce
PB
264MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
265 hwaddr *xlat, hwaddr *plen,
266 bool is_write)
90260c6c 267{
30951157
AK
268 IOMMUTLBEntry iotlb;
269 MemoryRegionSection *section;
270 MemoryRegion *mr;
271 hwaddr len = *plen;
272
273 for (;;) {
274 section = address_space_translate_internal(as, addr, &addr, plen, true);
275 mr = section->mr;
276
277 if (!mr->iommu_ops) {
278 break;
279 }
280
281 iotlb = mr->iommu_ops->translate(mr, addr);
282 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
283 | (addr & iotlb.addr_mask));
284 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
285 if (!(iotlb.perm & (1 << is_write))) {
286 mr = &io_mem_unassigned;
287 break;
288 }
289
290 as = iotlb.target_as;
291 }
292
293 *plen = len;
294 *xlat = addr;
295 return mr;
90260c6c
JK
296}
297
298MemoryRegionSection *
299address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
300 hwaddr *plen)
301{
30951157
AK
302 MemoryRegionSection *section;
303 section = address_space_translate_internal(as, addr, xlat, plen, false);
304
305 assert(!section->mr->iommu_ops);
306 return section;
90260c6c 307}
5b6dd868 308#endif
fd6ce8f6 309
5b6dd868 310void cpu_exec_init_all(void)
fdbb84d1 311{
5b6dd868 312#if !defined(CONFIG_USER_ONLY)
b2a8658e 313 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
314 memory_map_init();
315 io_mem_init();
fdbb84d1 316#endif
5b6dd868 317}
fdbb84d1 318
b170fce3 319#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
320
321static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 322{
259186a7 323 CPUState *cpu = opaque;
a513fe19 324
5b6dd868
BS
325 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
326 version_id is increased. */
259186a7
AF
327 cpu->interrupt_request &= ~0x01;
328 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
329
330 return 0;
a513fe19 331}
7501267e 332
1a1562f5 333const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
334 .name = "cpu_common",
335 .version_id = 1,
336 .minimum_version_id = 1,
337 .minimum_version_id_old = 1,
338 .post_load = cpu_common_post_load,
339 .fields = (VMStateField []) {
259186a7
AF
340 VMSTATE_UINT32(halted, CPUState),
341 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
342 VMSTATE_END_OF_LIST()
343 }
344};
1a1562f5 345
5b6dd868 346#endif
ea041c0e 347
38d8f5c8 348CPUState *qemu_get_cpu(int index)
ea041c0e 349{
5b6dd868 350 CPUArchState *env = first_cpu;
38d8f5c8 351 CPUState *cpu = NULL;
ea041c0e 352
5b6dd868 353 while (env) {
55e5c285
AF
354 cpu = ENV_GET_CPU(env);
355 if (cpu->cpu_index == index) {
5b6dd868 356 break;
55e5c285 357 }
5b6dd868 358 env = env->next_cpu;
ea041c0e 359 }
5b6dd868 360
d76fddae 361 return env ? cpu : NULL;
ea041c0e
FB
362}
363
d6b9e0d6
MT
364void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
365{
366 CPUArchState *env = first_cpu;
367
368 while (env) {
369 func(ENV_GET_CPU(env), data);
370 env = env->next_cpu;
371 }
372}
373
5b6dd868 374void cpu_exec_init(CPUArchState *env)
ea041c0e 375{
5b6dd868 376 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 377 CPUClass *cc = CPU_GET_CLASS(cpu);
5b6dd868
BS
378 CPUArchState **penv;
379 int cpu_index;
380
381#if defined(CONFIG_USER_ONLY)
382 cpu_list_lock();
383#endif
384 env->next_cpu = NULL;
385 penv = &first_cpu;
386 cpu_index = 0;
387 while (*penv != NULL) {
388 penv = &(*penv)->next_cpu;
389 cpu_index++;
390 }
55e5c285 391 cpu->cpu_index = cpu_index;
1b1ed8dc 392 cpu->numa_node = 0;
5b6dd868
BS
393 QTAILQ_INIT(&env->breakpoints);
394 QTAILQ_INIT(&env->watchpoints);
395#ifndef CONFIG_USER_ONLY
396 cpu->thread_id = qemu_get_thread_id();
397#endif
398 *penv = env;
399#if defined(CONFIG_USER_ONLY)
400 cpu_list_unlock();
401#endif
259186a7 402 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
5b6dd868 403#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
404 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
405 cpu_save, cpu_load, env);
b170fce3 406 assert(cc->vmsd == NULL);
5b6dd868 407#endif
b170fce3
AF
408 if (cc->vmsd != NULL) {
409 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
410 }
ea041c0e
FB
411}
412
1fddef4b 413#if defined(TARGET_HAS_ICE)
94df27fd 414#if defined(CONFIG_USER_ONLY)
9349b4f9 415static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
416{
417 tb_invalidate_phys_page_range(pc, pc + 1, 0);
418}
419#else
1e7855a5
MF
420static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
421{
9d70c4b7
MF
422 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
423 (pc & ~TARGET_PAGE_MASK));
1e7855a5 424}
c27004ec 425#endif
94df27fd 426#endif /* TARGET_HAS_ICE */
d720b93d 427
c527ee8f 428#if defined(CONFIG_USER_ONLY)
9349b4f9 429void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
430
431{
432}
433
9349b4f9 434int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
435 int flags, CPUWatchpoint **watchpoint)
436{
437 return -ENOSYS;
438}
439#else
6658ffb8 440/* Add a watchpoint. */
9349b4f9 441int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 442 int flags, CPUWatchpoint **watchpoint)
6658ffb8 443{
b4051334 444 target_ulong len_mask = ~(len - 1);
c0ce998e 445 CPUWatchpoint *wp;
6658ffb8 446
b4051334 447 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
448 if ((len & (len - 1)) || (addr & ~len_mask) ||
449 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
450 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
451 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
452 return -EINVAL;
453 }
7267c094 454 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
455
456 wp->vaddr = addr;
b4051334 457 wp->len_mask = len_mask;
a1d1bb31
AL
458 wp->flags = flags;
459
2dc9f411 460 /* keep all GDB-injected watchpoints in front */
c0ce998e 461 if (flags & BP_GDB)
72cf2d4f 462 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 463 else
72cf2d4f 464 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 465
6658ffb8 466 tlb_flush_page(env, addr);
a1d1bb31
AL
467
468 if (watchpoint)
469 *watchpoint = wp;
470 return 0;
6658ffb8
PB
471}
472
a1d1bb31 473/* Remove a specific watchpoint. */
9349b4f9 474int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 475 int flags)
6658ffb8 476{
b4051334 477 target_ulong len_mask = ~(len - 1);
a1d1bb31 478 CPUWatchpoint *wp;
6658ffb8 479
72cf2d4f 480 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 481 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 482 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 483 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
484 return 0;
485 }
486 }
a1d1bb31 487 return -ENOENT;
6658ffb8
PB
488}
489
a1d1bb31 490/* Remove a specific watchpoint by reference. */
9349b4f9 491void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 492{
72cf2d4f 493 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 494
a1d1bb31
AL
495 tlb_flush_page(env, watchpoint->vaddr);
496
7267c094 497 g_free(watchpoint);
a1d1bb31
AL
498}
499
500/* Remove all matching watchpoints. */
9349b4f9 501void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 502{
c0ce998e 503 CPUWatchpoint *wp, *next;
a1d1bb31 504
72cf2d4f 505 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
506 if (wp->flags & mask)
507 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 508 }
7d03f82f 509}
c527ee8f 510#endif
7d03f82f 511
a1d1bb31 512/* Add a breakpoint. */
9349b4f9 513int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 514 CPUBreakpoint **breakpoint)
4c3a88a2 515{
1fddef4b 516#if defined(TARGET_HAS_ICE)
c0ce998e 517 CPUBreakpoint *bp;
3b46e624 518
7267c094 519 bp = g_malloc(sizeof(*bp));
4c3a88a2 520
a1d1bb31
AL
521 bp->pc = pc;
522 bp->flags = flags;
523
2dc9f411 524 /* keep all GDB-injected breakpoints in front */
c0ce998e 525 if (flags & BP_GDB)
72cf2d4f 526 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 527 else
72cf2d4f 528 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 529
d720b93d 530 breakpoint_invalidate(env, pc);
a1d1bb31
AL
531
532 if (breakpoint)
533 *breakpoint = bp;
4c3a88a2
FB
534 return 0;
535#else
a1d1bb31 536 return -ENOSYS;
4c3a88a2
FB
537#endif
538}
539
a1d1bb31 540/* Remove a specific breakpoint. */
9349b4f9 541int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 542{
7d03f82f 543#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
544 CPUBreakpoint *bp;
545
72cf2d4f 546 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
547 if (bp->pc == pc && bp->flags == flags) {
548 cpu_breakpoint_remove_by_ref(env, bp);
549 return 0;
550 }
7d03f82f 551 }
a1d1bb31
AL
552 return -ENOENT;
553#else
554 return -ENOSYS;
7d03f82f
EI
555#endif
556}
557
a1d1bb31 558/* Remove a specific breakpoint by reference. */
9349b4f9 559void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 560{
1fddef4b 561#if defined(TARGET_HAS_ICE)
72cf2d4f 562 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 563
a1d1bb31
AL
564 breakpoint_invalidate(env, breakpoint->pc);
565
7267c094 566 g_free(breakpoint);
a1d1bb31
AL
567#endif
568}
569
570/* Remove all matching breakpoints. */
9349b4f9 571void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
572{
573#if defined(TARGET_HAS_ICE)
c0ce998e 574 CPUBreakpoint *bp, *next;
a1d1bb31 575
72cf2d4f 576 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
577 if (bp->flags & mask)
578 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 579 }
4c3a88a2
FB
580#endif
581}
582
c33a346e
FB
583/* enable or disable single step mode. EXCP_DEBUG is returned by the
584 CPU loop after each instruction */
9349b4f9 585void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 586{
1fddef4b 587#if defined(TARGET_HAS_ICE)
c33a346e
FB
588 if (env->singlestep_enabled != enabled) {
589 env->singlestep_enabled = enabled;
e22a25c9
AL
590 if (kvm_enabled())
591 kvm_update_guest_debug(env, 0);
592 else {
ccbb4d44 593 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
594 /* XXX: only flush what is necessary */
595 tb_flush(env);
596 }
c33a346e
FB
597 }
598#endif
599}
600
9349b4f9 601void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
602{
603 va_list ap;
493ae1f0 604 va_list ap2;
7501267e
FB
605
606 va_start(ap, fmt);
493ae1f0 607 va_copy(ap2, ap);
7501267e
FB
608 fprintf(stderr, "qemu: fatal: ");
609 vfprintf(stderr, fmt, ap);
610 fprintf(stderr, "\n");
6fd2a026 611 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
612 if (qemu_log_enabled()) {
613 qemu_log("qemu: fatal: ");
614 qemu_log_vprintf(fmt, ap2);
615 qemu_log("\n");
6fd2a026 616 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 617 qemu_log_flush();
93fcfe39 618 qemu_log_close();
924edcae 619 }
493ae1f0 620 va_end(ap2);
f9373291 621 va_end(ap);
fd052bf6
RV
622#if defined(CONFIG_USER_ONLY)
623 {
624 struct sigaction act;
625 sigfillset(&act.sa_mask);
626 act.sa_handler = SIG_DFL;
627 sigaction(SIGABRT, &act, NULL);
628 }
629#endif
7501267e
FB
630 abort();
631}
632
9349b4f9 633CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 634{
9349b4f9
AF
635 CPUArchState *new_env = cpu_init(env->cpu_model_str);
636 CPUArchState *next_cpu = new_env->next_cpu;
5a38f081
AL
637#if defined(TARGET_HAS_ICE)
638 CPUBreakpoint *bp;
639 CPUWatchpoint *wp;
640#endif
641
9349b4f9 642 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 643
55e5c285 644 /* Preserve chaining. */
c5be9f08 645 new_env->next_cpu = next_cpu;
5a38f081
AL
646
647 /* Clone all break/watchpoints.
648 Note: Once we support ptrace with hw-debug register access, make sure
649 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
650 QTAILQ_INIT(&env->breakpoints);
651 QTAILQ_INIT(&env->watchpoints);
5a38f081 652#if defined(TARGET_HAS_ICE)
72cf2d4f 653 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
654 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
655 }
72cf2d4f 656 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
657 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
658 wp->flags, NULL);
659 }
660#endif
661
c5be9f08
TS
662 return new_env;
663}
664
0124311e 665#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
666static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
667 uintptr_t length)
668{
669 uintptr_t start1;
670
671 /* we modify the TLB cache so that the dirty bit will be set again
672 when accessing the range */
673 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
674 /* Check that we don't span multiple blocks - this breaks the
675 address comparisons below. */
676 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
677 != (end - 1) - start) {
678 abort();
679 }
680 cpu_tlb_reset_dirty_all(start1, length);
681
682}
683
5579c7f3 684/* Note: start and end must be within the same ram block. */
c227f099 685void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 686 int dirty_flags)
1ccde1cb 687{
d24981d3 688 uintptr_t length;
1ccde1cb
FB
689
690 start &= TARGET_PAGE_MASK;
691 end = TARGET_PAGE_ALIGN(end);
692
693 length = end - start;
694 if (length == 0)
695 return;
f7c11b53 696 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 697
d24981d3
JQ
698 if (tcg_enabled()) {
699 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 700 }
1ccde1cb
FB
701}
702
8b9c99d9 703static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 704{
f6f3fbca 705 int ret = 0;
74576198 706 in_migration = enable;
f6f3fbca 707 return ret;
74576198
AL
708}
709
a8170e5e 710hwaddr memory_region_section_get_iotlb(CPUArchState *env,
149f54b5
PB
711 MemoryRegionSection *section,
712 target_ulong vaddr,
713 hwaddr paddr, hwaddr xlat,
714 int prot,
715 target_ulong *address)
e5548617 716{
a8170e5e 717 hwaddr iotlb;
e5548617
BS
718 CPUWatchpoint *wp;
719
cc5bea60 720 if (memory_region_is_ram(section->mr)) {
e5548617
BS
721 /* Normal RAM. */
722 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 723 + xlat;
e5548617
BS
724 if (!section->readonly) {
725 iotlb |= phys_section_notdirty;
726 } else {
727 iotlb |= phys_section_rom;
728 }
729 } else {
e5548617 730 iotlb = section - phys_sections;
149f54b5 731 iotlb += xlat;
e5548617
BS
732 }
733
734 /* Make accesses to pages with watchpoints go via the
735 watchpoint trap routines. */
736 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
737 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
738 /* Avoid trapping reads of pages with a write breakpoint. */
739 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
740 iotlb = phys_section_watch + paddr;
741 *address |= TLB_MMIO;
742 break;
743 }
744 }
745 }
746
747 return iotlb;
748}
9fa3e853
FB
749#endif /* defined(CONFIG_USER_ONLY) */
750
e2eef170 751#if !defined(CONFIG_USER_ONLY)
8da3ff18 752
c227f099 753static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 754 uint16_t section);
acc9d80b 755static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
5312bd8b 756static void destroy_page_desc(uint16_t section_index)
54688b1e 757{
5312bd8b
AK
758 MemoryRegionSection *section = &phys_sections[section_index];
759 MemoryRegion *mr = section->mr;
54688b1e
AK
760
761 if (mr->subpage) {
762 subpage_t *subpage = container_of(mr, subpage_t, iomem);
763 memory_region_destroy(&subpage->iomem);
764 g_free(subpage);
765 }
766}
767
4346ae3e 768static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
769{
770 unsigned i;
d6f2ea22 771 PhysPageEntry *p;
54688b1e 772
c19e8800 773 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
774 return;
775 }
776
c19e8800 777 p = phys_map_nodes[lp->ptr];
4346ae3e 778 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 779 if (!p[i].is_leaf) {
54688b1e 780 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 781 } else {
c19e8800 782 destroy_page_desc(p[i].ptr);
54688b1e 783 }
54688b1e 784 }
07f07b31 785 lp->is_leaf = 0;
c19e8800 786 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
787}
788
ac1970fb 789static void destroy_all_mappings(AddressSpaceDispatch *d)
54688b1e 790{
ac1970fb 791 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
d6f2ea22 792 phys_map_nodes_reset();
54688b1e
AK
793}
794
5312bd8b
AK
795static uint16_t phys_section_add(MemoryRegionSection *section)
796{
68f3f65b
PB
797 /* The physical section number is ORed with a page-aligned
798 * pointer to produce the iotlb entries. Thus it should
799 * never overflow into the page-aligned value.
800 */
801 assert(phys_sections_nb < TARGET_PAGE_SIZE);
802
5312bd8b
AK
803 if (phys_sections_nb == phys_sections_nb_alloc) {
804 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
805 phys_sections = g_renew(MemoryRegionSection, phys_sections,
806 phys_sections_nb_alloc);
807 }
808 phys_sections[phys_sections_nb] = *section;
809 return phys_sections_nb++;
810}
811
812static void phys_sections_clear(void)
813{
814 phys_sections_nb = 0;
815}
816
ac1970fb 817static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
818{
819 subpage_t *subpage;
a8170e5e 820 hwaddr base = section->offset_within_address_space
0f0cb164 821 & TARGET_PAGE_MASK;
ac1970fb 822 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
0f0cb164
AK
823 MemoryRegionSection subsection = {
824 .offset_within_address_space = base,
052e87b0 825 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 826 };
a8170e5e 827 hwaddr start, end;
0f0cb164 828
f3705d53 829 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 830
f3705d53 831 if (!(existing->mr->subpage)) {
acc9d80b 832 subpage = subpage_init(d->as, base);
0f0cb164 833 subsection.mr = &subpage->iomem;
ac1970fb 834 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 835 phys_section_add(&subsection));
0f0cb164 836 } else {
f3705d53 837 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
838 }
839 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 840 end = start + int128_get64(section->size) - 1;
0f0cb164
AK
841 subpage_register(subpage, start, end, phys_section_add(section));
842}
843
844
052e87b0
PB
845static void register_multipage(AddressSpaceDispatch *d,
846 MemoryRegionSection *section)
33417e70 847{
a8170e5e 848 hwaddr start_addr = section->offset_within_address_space;
5312bd8b 849 uint16_t section_index = phys_section_add(section);
052e87b0
PB
850 uint64_t num_pages = int128_get64(int128_rshift(section->size,
851 TARGET_PAGE_BITS));
dd81124b 852
733d5ef5
PB
853 assert(num_pages);
854 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
855}
856
ac1970fb 857static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 858{
ac1970fb 859 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
99b9cc06 860 MemoryRegionSection now = *section, remain = *section;
052e87b0 861 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 862
733d5ef5
PB
863 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
864 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
865 - now.offset_within_address_space;
866
052e87b0 867 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 868 register_subpage(d, &now);
733d5ef5 869 } else {
052e87b0 870 now.size = int128_zero();
733d5ef5 871 }
052e87b0
PB
872 while (int128_ne(remain.size, now.size)) {
873 remain.size = int128_sub(remain.size, now.size);
874 remain.offset_within_address_space += int128_get64(now.size);
875 remain.offset_within_region += int128_get64(now.size);
69b67646 876 now = remain;
052e87b0 877 if (int128_lt(remain.size, page_size)) {
733d5ef5
PB
878 register_subpage(d, &now);
879 } else if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
052e87b0 880 now.size = page_size;
ac1970fb 881 register_subpage(d, &now);
69b67646 882 } else {
052e87b0 883 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 884 register_multipage(d, &now);
69b67646 885 }
0f0cb164
AK
886 }
887}
888
62a2744c
SY
889void qemu_flush_coalesced_mmio_buffer(void)
890{
891 if (kvm_enabled())
892 kvm_flush_coalesced_mmio_buffer();
893}
894
b2a8658e
UD
895void qemu_mutex_lock_ramlist(void)
896{
897 qemu_mutex_lock(&ram_list.mutex);
898}
899
900void qemu_mutex_unlock_ramlist(void)
901{
902 qemu_mutex_unlock(&ram_list.mutex);
903}
904
c902760f
MT
905#if defined(__linux__) && !defined(TARGET_S390X)
906
907#include <sys/vfs.h>
908
909#define HUGETLBFS_MAGIC 0x958458f6
910
911static long gethugepagesize(const char *path)
912{
913 struct statfs fs;
914 int ret;
915
916 do {
9742bf26 917 ret = statfs(path, &fs);
c902760f
MT
918 } while (ret != 0 && errno == EINTR);
919
920 if (ret != 0) {
9742bf26
YT
921 perror(path);
922 return 0;
c902760f
MT
923 }
924
925 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 926 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
927
928 return fs.f_bsize;
929}
930
04b16653
AW
931static void *file_ram_alloc(RAMBlock *block,
932 ram_addr_t memory,
933 const char *path)
c902760f
MT
934{
935 char *filename;
8ca761f6
PF
936 char *sanitized_name;
937 char *c;
c902760f
MT
938 void *area;
939 int fd;
940#ifdef MAP_POPULATE
941 int flags;
942#endif
943 unsigned long hpagesize;
944
945 hpagesize = gethugepagesize(path);
946 if (!hpagesize) {
9742bf26 947 return NULL;
c902760f
MT
948 }
949
950 if (memory < hpagesize) {
951 return NULL;
952 }
953
954 if (kvm_enabled() && !kvm_has_sync_mmu()) {
955 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
956 return NULL;
957 }
958
8ca761f6
PF
959 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
960 sanitized_name = g_strdup(block->mr->name);
961 for (c = sanitized_name; *c != '\0'; c++) {
962 if (*c == '/')
963 *c = '_';
964 }
965
966 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
967 sanitized_name);
968 g_free(sanitized_name);
c902760f
MT
969
970 fd = mkstemp(filename);
971 if (fd < 0) {
9742bf26 972 perror("unable to create backing store for hugepages");
e4ada482 973 g_free(filename);
9742bf26 974 return NULL;
c902760f
MT
975 }
976 unlink(filename);
e4ada482 977 g_free(filename);
c902760f
MT
978
979 memory = (memory+hpagesize-1) & ~(hpagesize-1);
980
981 /*
982 * ftruncate is not supported by hugetlbfs in older
983 * hosts, so don't bother bailing out on errors.
984 * If anything goes wrong with it under other filesystems,
985 * mmap will fail.
986 */
987 if (ftruncate(fd, memory))
9742bf26 988 perror("ftruncate");
c902760f
MT
989
990#ifdef MAP_POPULATE
991 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
992 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
993 * to sidestep this quirk.
994 */
995 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
996 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
997#else
998 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
999#endif
1000 if (area == MAP_FAILED) {
9742bf26
YT
1001 perror("file_ram_alloc: can't mmap RAM pages");
1002 close(fd);
1003 return (NULL);
c902760f 1004 }
04b16653 1005 block->fd = fd;
c902760f
MT
1006 return area;
1007}
1008#endif
1009
d17b5288 1010static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1011{
1012 RAMBlock *block, *next_block;
3e837b2c 1013 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1014
49cd9ac6
SH
1015 assert(size != 0); /* it would hand out same offset multiple times */
1016
a3161038 1017 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1018 return 0;
1019
a3161038 1020 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1021 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1022
1023 end = block->offset + block->length;
1024
a3161038 1025 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1026 if (next_block->offset >= end) {
1027 next = MIN(next, next_block->offset);
1028 }
1029 }
1030 if (next - end >= size && next - end < mingap) {
3e837b2c 1031 offset = end;
04b16653
AW
1032 mingap = next - end;
1033 }
1034 }
3e837b2c
AW
1035
1036 if (offset == RAM_ADDR_MAX) {
1037 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1038 (uint64_t)size);
1039 abort();
1040 }
1041
04b16653
AW
1042 return offset;
1043}
1044
652d7ec2 1045ram_addr_t last_ram_offset(void)
d17b5288
AW
1046{
1047 RAMBlock *block;
1048 ram_addr_t last = 0;
1049
a3161038 1050 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1051 last = MAX(last, block->offset + block->length);
1052
1053 return last;
1054}
1055
ddb97f1d
JB
1056static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1057{
1058 int ret;
1059 QemuOpts *machine_opts;
1060
1061 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1062 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1063 if (machine_opts &&
1064 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
1065 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1066 if (ret) {
1067 perror("qemu_madvise");
1068 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1069 "but dump_guest_core=off specified\n");
1070 }
1071 }
1072}
1073
c5705a77 1074void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
1075{
1076 RAMBlock *new_block, *block;
1077
c5705a77 1078 new_block = NULL;
a3161038 1079 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
1080 if (block->offset == addr) {
1081 new_block = block;
1082 break;
1083 }
1084 }
1085 assert(new_block);
1086 assert(!new_block->idstr[0]);
84b89d78 1087
09e5ab63
AL
1088 if (dev) {
1089 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1090 if (id) {
1091 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1092 g_free(id);
84b89d78
CM
1093 }
1094 }
1095 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1096
b2a8658e
UD
1097 /* This assumes the iothread lock is taken here too. */
1098 qemu_mutex_lock_ramlist();
a3161038 1099 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1100 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1101 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1102 new_block->idstr);
1103 abort();
1104 }
1105 }
b2a8658e 1106 qemu_mutex_unlock_ramlist();
c5705a77
AK
1107}
1108
8490fc78
LC
1109static int memory_try_enable_merging(void *addr, size_t len)
1110{
1111 QemuOpts *opts;
1112
1113 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1114 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1115 /* disabled by the user */
1116 return 0;
1117 }
1118
1119 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1120}
1121
c5705a77
AK
1122ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1123 MemoryRegion *mr)
1124{
abb26d63 1125 RAMBlock *block, *new_block;
c5705a77
AK
1126
1127 size = TARGET_PAGE_ALIGN(size);
1128 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1129
b2a8658e
UD
1130 /* This assumes the iothread lock is taken here too. */
1131 qemu_mutex_lock_ramlist();
7c637366 1132 new_block->mr = mr;
432d268c 1133 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1134 if (host) {
1135 new_block->host = host;
cd19cfa2 1136 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1137 } else {
1138 if (mem_path) {
c902760f 1139#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1140 new_block->host = file_ram_alloc(new_block, size, mem_path);
1141 if (!new_block->host) {
6eebf958 1142 new_block->host = qemu_anon_ram_alloc(size);
8490fc78 1143 memory_try_enable_merging(new_block->host, size);
6977dfe6 1144 }
c902760f 1145#else
6977dfe6
YT
1146 fprintf(stderr, "-mem-path option unsupported\n");
1147 exit(1);
c902760f 1148#endif
6977dfe6 1149 } else {
868bb33f 1150 if (xen_enabled()) {
fce537d4 1151 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1152 } else if (kvm_enabled()) {
1153 /* some s390/kvm configurations have special constraints */
6eebf958 1154 new_block->host = kvm_ram_alloc(size);
432d268c 1155 } else {
6eebf958 1156 new_block->host = qemu_anon_ram_alloc(size);
432d268c 1157 }
8490fc78 1158 memory_try_enable_merging(new_block->host, size);
6977dfe6 1159 }
c902760f 1160 }
94a6b54f
PB
1161 new_block->length = size;
1162
abb26d63
PB
1163 /* Keep the list sorted from biggest to smallest block. */
1164 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1165 if (block->length < new_block->length) {
1166 break;
1167 }
1168 }
1169 if (block) {
1170 QTAILQ_INSERT_BEFORE(block, new_block, next);
1171 } else {
1172 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1173 }
0d6d3c87 1174 ram_list.mru_block = NULL;
94a6b54f 1175
f798b07f 1176 ram_list.version++;
b2a8658e 1177 qemu_mutex_unlock_ramlist();
f798b07f 1178
7267c094 1179 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1180 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1181 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1182 0, size >> TARGET_PAGE_BITS);
1720aeee 1183 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1184
ddb97f1d 1185 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1186 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1187
6f0437e8
JK
1188 if (kvm_enabled())
1189 kvm_setup_guest_memory(new_block->host, size);
1190
94a6b54f
PB
1191 return new_block->offset;
1192}
e9a1ab19 1193
c5705a77 1194ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1195{
c5705a77 1196 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1197}
1198
1f2e98b6
AW
1199void qemu_ram_free_from_ptr(ram_addr_t addr)
1200{
1201 RAMBlock *block;
1202
b2a8658e
UD
1203 /* This assumes the iothread lock is taken here too. */
1204 qemu_mutex_lock_ramlist();
a3161038 1205 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1206 if (addr == block->offset) {
a3161038 1207 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1208 ram_list.mru_block = NULL;
f798b07f 1209 ram_list.version++;
7267c094 1210 g_free(block);
b2a8658e 1211 break;
1f2e98b6
AW
1212 }
1213 }
b2a8658e 1214 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1215}
1216
c227f099 1217void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1218{
04b16653
AW
1219 RAMBlock *block;
1220
b2a8658e
UD
1221 /* This assumes the iothread lock is taken here too. */
1222 qemu_mutex_lock_ramlist();
a3161038 1223 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1224 if (addr == block->offset) {
a3161038 1225 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1226 ram_list.mru_block = NULL;
f798b07f 1227 ram_list.version++;
cd19cfa2
HY
1228 if (block->flags & RAM_PREALLOC_MASK) {
1229 ;
1230 } else if (mem_path) {
04b16653
AW
1231#if defined (__linux__) && !defined(TARGET_S390X)
1232 if (block->fd) {
1233 munmap(block->host, block->length);
1234 close(block->fd);
1235 } else {
e7a09b92 1236 qemu_anon_ram_free(block->host, block->length);
04b16653 1237 }
fd28aa13
JK
1238#else
1239 abort();
04b16653
AW
1240#endif
1241 } else {
868bb33f 1242 if (xen_enabled()) {
e41d7c69 1243 xen_invalidate_map_cache_entry(block->host);
432d268c 1244 } else {
e7a09b92 1245 qemu_anon_ram_free(block->host, block->length);
432d268c 1246 }
04b16653 1247 }
7267c094 1248 g_free(block);
b2a8658e 1249 break;
04b16653
AW
1250 }
1251 }
b2a8658e 1252 qemu_mutex_unlock_ramlist();
04b16653 1253
e9a1ab19
FB
1254}
1255
cd19cfa2
HY
1256#ifndef _WIN32
1257void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1258{
1259 RAMBlock *block;
1260 ram_addr_t offset;
1261 int flags;
1262 void *area, *vaddr;
1263
a3161038 1264 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1265 offset = addr - block->offset;
1266 if (offset < block->length) {
1267 vaddr = block->host + offset;
1268 if (block->flags & RAM_PREALLOC_MASK) {
1269 ;
1270 } else {
1271 flags = MAP_FIXED;
1272 munmap(vaddr, length);
1273 if (mem_path) {
1274#if defined(__linux__) && !defined(TARGET_S390X)
1275 if (block->fd) {
1276#ifdef MAP_POPULATE
1277 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1278 MAP_PRIVATE;
1279#else
1280 flags |= MAP_PRIVATE;
1281#endif
1282 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1283 flags, block->fd, offset);
1284 } else {
1285 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1286 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1287 flags, -1, 0);
1288 }
fd28aa13
JK
1289#else
1290 abort();
cd19cfa2
HY
1291#endif
1292 } else {
1293#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1294 flags |= MAP_SHARED | MAP_ANONYMOUS;
1295 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1296 flags, -1, 0);
1297#else
1298 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1299 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1300 flags, -1, 0);
1301#endif
1302 }
1303 if (area != vaddr) {
f15fbc4b
AP
1304 fprintf(stderr, "Could not remap addr: "
1305 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1306 length, addr);
1307 exit(1);
1308 }
8490fc78 1309 memory_try_enable_merging(vaddr, length);
ddb97f1d 1310 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1311 }
1312 return;
1313 }
1314 }
1315}
1316#endif /* !_WIN32 */
1317
dc828ca1 1318/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
1319 With the exception of the softmmu code in this file, this should
1320 only be used for local memory (e.g. video ram) that the device owns,
1321 and knows it isn't going to access beyond the end of the block.
1322
1323 It should not be used for general purpose DMA.
1324 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1325 */
c227f099 1326void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 1327{
94a6b54f
PB
1328 RAMBlock *block;
1329
b2a8658e 1330 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1331 block = ram_list.mru_block;
1332 if (block && addr - block->offset < block->length) {
1333 goto found;
1334 }
a3161038 1335 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1336 if (addr - block->offset < block->length) {
0d6d3c87 1337 goto found;
f471a17e 1338 }
94a6b54f 1339 }
f471a17e
AW
1340
1341 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1342 abort();
1343
0d6d3c87
PB
1344found:
1345 ram_list.mru_block = block;
1346 if (xen_enabled()) {
1347 /* We need to check if the requested address is in the RAM
1348 * because we don't want to map the entire memory in QEMU.
1349 * In that case just map until the end of the page.
1350 */
1351 if (block->offset == 0) {
1352 return xen_map_cache(addr, 0, 0);
1353 } else if (block->host == NULL) {
1354 block->host =
1355 xen_map_cache(block->offset, block->length, 1);
1356 }
1357 }
1358 return block->host + (addr - block->offset);
dc828ca1
PB
1359}
1360
0d6d3c87
PB
1361/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1362 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1363 *
1364 * ??? Is this still necessary?
b2e0a138 1365 */
8b9c99d9 1366static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1367{
1368 RAMBlock *block;
1369
b2a8658e 1370 /* The list is protected by the iothread lock here. */
a3161038 1371 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1372 if (addr - block->offset < block->length) {
868bb33f 1373 if (xen_enabled()) {
432d268c
JN
1374 /* We need to check if the requested address is in the RAM
1375 * because we don't want to map the entire memory in QEMU.
712c2b41 1376 * In that case just map until the end of the page.
432d268c
JN
1377 */
1378 if (block->offset == 0) {
e41d7c69 1379 return xen_map_cache(addr, 0, 0);
432d268c 1380 } else if (block->host == NULL) {
e41d7c69
JK
1381 block->host =
1382 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1383 }
1384 }
b2e0a138
MT
1385 return block->host + (addr - block->offset);
1386 }
1387 }
1388
1389 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1390 abort();
1391
1392 return NULL;
1393}
1394
38bee5dc
SS
1395/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1396 * but takes a size argument */
8b9c99d9 1397static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1398{
8ab934f9
SS
1399 if (*size == 0) {
1400 return NULL;
1401 }
868bb33f 1402 if (xen_enabled()) {
e41d7c69 1403 return xen_map_cache(addr, *size, 1);
868bb33f 1404 } else {
38bee5dc
SS
1405 RAMBlock *block;
1406
a3161038 1407 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1408 if (addr - block->offset < block->length) {
1409 if (addr - block->offset + *size > block->length)
1410 *size = block->length - addr + block->offset;
1411 return block->host + (addr - block->offset);
1412 }
1413 }
1414
1415 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1416 abort();
38bee5dc
SS
1417 }
1418}
1419
e890261f 1420int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1421{
94a6b54f
PB
1422 RAMBlock *block;
1423 uint8_t *host = ptr;
1424
868bb33f 1425 if (xen_enabled()) {
e41d7c69 1426 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
1427 return 0;
1428 }
1429
a3161038 1430 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1431 /* This case append when the block is not mapped. */
1432 if (block->host == NULL) {
1433 continue;
1434 }
f471a17e 1435 if (host - block->host < block->length) {
e890261f
MT
1436 *ram_addr = block->offset + (host - block->host);
1437 return 0;
f471a17e 1438 }
94a6b54f 1439 }
432d268c 1440
e890261f
MT
1441 return -1;
1442}
f471a17e 1443
e890261f
MT
1444/* Some of the softmmu routines need to translate from a host pointer
1445 (typically a TLB entry) back to a ram offset. */
1446ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1447{
1448 ram_addr_t ram_addr;
f471a17e 1449
e890261f
MT
1450 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1451 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1452 abort();
1453 }
1454 return ram_addr;
5579c7f3
PB
1455}
1456
a8170e5e 1457static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1458 uint64_t val, unsigned size)
9fa3e853 1459{
3a7d929e 1460 int dirty_flags;
f7c11b53 1461 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1462 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
0e0df1e2 1463 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1464 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1465 }
0e0df1e2
AK
1466 switch (size) {
1467 case 1:
1468 stb_p(qemu_get_ram_ptr(ram_addr), val);
1469 break;
1470 case 2:
1471 stw_p(qemu_get_ram_ptr(ram_addr), val);
1472 break;
1473 case 4:
1474 stl_p(qemu_get_ram_ptr(ram_addr), val);
1475 break;
1476 default:
1477 abort();
3a7d929e 1478 }
f23db169 1479 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1480 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1481 /* we remove the notdirty callback only if the code has been
1482 flushed */
1483 if (dirty_flags == 0xff)
2e70f6ef 1484 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1485}
1486
b018ddf6
PB
1487static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1488 unsigned size, bool is_write)
1489{
1490 return is_write;
1491}
1492
0e0df1e2 1493static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1494 .write = notdirty_mem_write,
b018ddf6 1495 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1496 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1497};
1498
0f459d16 1499/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1500static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1501{
9349b4f9 1502 CPUArchState *env = cpu_single_env;
06d55cc1 1503 target_ulong pc, cs_base;
0f459d16 1504 target_ulong vaddr;
a1d1bb31 1505 CPUWatchpoint *wp;
06d55cc1 1506 int cpu_flags;
0f459d16 1507
06d55cc1
AL
1508 if (env->watchpoint_hit) {
1509 /* We re-entered the check after replacing the TB. Now raise
1510 * the debug interrupt so that is will trigger after the
1511 * current instruction. */
c3affe56 1512 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1513 return;
1514 }
2e70f6ef 1515 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1516 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1517 if ((vaddr == (wp->vaddr & len_mask) ||
1518 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1519 wp->flags |= BP_WATCHPOINT_HIT;
1520 if (!env->watchpoint_hit) {
1521 env->watchpoint_hit = wp;
5a316526 1522 tb_check_watchpoint(env);
6e140f28
AL
1523 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1524 env->exception_index = EXCP_DEBUG;
488d6577 1525 cpu_loop_exit(env);
6e140f28
AL
1526 } else {
1527 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1528 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1529 cpu_resume_from_signal(env, NULL);
6e140f28 1530 }
06d55cc1 1531 }
6e140f28
AL
1532 } else {
1533 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1534 }
1535 }
1536}
1537
6658ffb8
PB
1538/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1539 so these check for a hit then pass through to the normal out-of-line
1540 phys routines. */
a8170e5e 1541static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1542 unsigned size)
6658ffb8 1543{
1ec9b909
AK
1544 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1545 switch (size) {
1546 case 1: return ldub_phys(addr);
1547 case 2: return lduw_phys(addr);
1548 case 4: return ldl_phys(addr);
1549 default: abort();
1550 }
6658ffb8
PB
1551}
1552
a8170e5e 1553static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1554 uint64_t val, unsigned size)
6658ffb8 1555{
1ec9b909
AK
1556 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1557 switch (size) {
67364150
MF
1558 case 1:
1559 stb_phys(addr, val);
1560 break;
1561 case 2:
1562 stw_phys(addr, val);
1563 break;
1564 case 4:
1565 stl_phys(addr, val);
1566 break;
1ec9b909
AK
1567 default: abort();
1568 }
6658ffb8
PB
1569}
1570
1ec9b909
AK
1571static const MemoryRegionOps watch_mem_ops = {
1572 .read = watch_mem_read,
1573 .write = watch_mem_write,
1574 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1575};
6658ffb8 1576
a8170e5e 1577static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1578 unsigned len)
db7b5426 1579{
acc9d80b
JK
1580 subpage_t *subpage = opaque;
1581 uint8_t buf[4];
791af8c8 1582
db7b5426 1583#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1584 printf("%s: subpage %p len %d addr " TARGET_FMT_plx "\n", __func__,
1585 subpage, len, addr);
db7b5426 1586#endif
acc9d80b
JK
1587 address_space_read(subpage->as, addr + subpage->base, buf, len);
1588 switch (len) {
1589 case 1:
1590 return ldub_p(buf);
1591 case 2:
1592 return lduw_p(buf);
1593 case 4:
1594 return ldl_p(buf);
1595 default:
1596 abort();
1597 }
db7b5426
BS
1598}
1599
a8170e5e 1600static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1601 uint64_t value, unsigned len)
db7b5426 1602{
acc9d80b
JK
1603 subpage_t *subpage = opaque;
1604 uint8_t buf[4];
1605
db7b5426 1606#if defined(DEBUG_SUBPAGE)
70c68e44 1607 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
acc9d80b
JK
1608 " value %"PRIx64"\n",
1609 __func__, subpage, len, addr, value);
db7b5426 1610#endif
acc9d80b
JK
1611 switch (len) {
1612 case 1:
1613 stb_p(buf, value);
1614 break;
1615 case 2:
1616 stw_p(buf, value);
1617 break;
1618 case 4:
1619 stl_p(buf, value);
1620 break;
1621 default:
1622 abort();
1623 }
1624 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1625}
1626
c353e4cc
PB
1627static bool subpage_accepts(void *opaque, hwaddr addr,
1628 unsigned size, bool is_write)
1629{
acc9d80b 1630 subpage_t *subpage = opaque;
c353e4cc 1631#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1632 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx "\n",
1633 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1634#endif
1635
acc9d80b
JK
1636 return address_space_access_valid(subpage->as, addr + subpage->base,
1637 size, is_write);
c353e4cc
PB
1638}
1639
70c68e44
AK
1640static const MemoryRegionOps subpage_ops = {
1641 .read = subpage_read,
1642 .write = subpage_write,
c353e4cc 1643 .valid.accepts = subpage_accepts,
70c68e44 1644 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1645};
1646
c227f099 1647static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1648 uint16_t section)
db7b5426
BS
1649{
1650 int idx, eidx;
1651
1652 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1653 return -1;
1654 idx = SUBPAGE_IDX(start);
1655 eidx = SUBPAGE_IDX(end);
1656#if defined(DEBUG_SUBPAGE)
0bf9e31a 1657 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1658 mmio, start, end, idx, eidx, memory);
1659#endif
db7b5426 1660 for (; idx <= eidx; idx++) {
5312bd8b 1661 mmio->sub_section[idx] = section;
db7b5426
BS
1662 }
1663
1664 return 0;
1665}
1666
acc9d80b 1667static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1668{
c227f099 1669 subpage_t *mmio;
db7b5426 1670
7267c094 1671 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1672
acc9d80b 1673 mmio->as = as;
1eec614b 1674 mmio->base = base;
70c68e44
AK
1675 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1676 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1677 mmio->iomem.subpage = true;
db7b5426 1678#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1679 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1680 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1681#endif
0f0cb164 1682 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
1683
1684 return mmio;
1685}
1686
5312bd8b
AK
1687static uint16_t dummy_section(MemoryRegion *mr)
1688{
1689 MemoryRegionSection section = {
1690 .mr = mr,
1691 .offset_within_address_space = 0,
1692 .offset_within_region = 0,
052e87b0 1693 .size = int128_2_64(),
5312bd8b
AK
1694 };
1695
1696 return phys_section_add(&section);
1697}
1698
a8170e5e 1699MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1700{
37ec01d4 1701 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1702}
1703
e9179ce1
AK
1704static void io_mem_init(void)
1705{
bf8d5166 1706 memory_region_init_io(&io_mem_rom, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
0e0df1e2
AK
1707 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1708 "unassigned", UINT64_MAX);
1709 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1710 "notdirty", UINT64_MAX);
1ec9b909
AK
1711 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1712 "watch", UINT64_MAX);
e9179ce1
AK
1713}
1714
ac1970fb
AK
1715static void mem_begin(MemoryListener *listener)
1716{
1717 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1718
1719 destroy_all_mappings(d);
1720 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1721}
1722
50c1e149
AK
1723static void core_begin(MemoryListener *listener)
1724{
5312bd8b
AK
1725 phys_sections_clear();
1726 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
1727 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1728 phys_section_rom = dummy_section(&io_mem_rom);
1729 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
1730}
1731
1d71148e 1732static void tcg_commit(MemoryListener *listener)
50c1e149 1733{
9349b4f9 1734 CPUArchState *env;
117712c3
AK
1735
1736 /* since each CPU stores ram addresses in its TLB cache, we must
1737 reset the modified entries */
1738 /* XXX: slow ! */
1739 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1740 tlb_flush(env, 1);
1741 }
50c1e149
AK
1742}
1743
93632747
AK
1744static void core_log_global_start(MemoryListener *listener)
1745{
1746 cpu_physical_memory_set_dirty_tracking(1);
1747}
1748
1749static void core_log_global_stop(MemoryListener *listener)
1750{
1751 cpu_physical_memory_set_dirty_tracking(0);
1752}
1753
4855d41a
AK
1754static void io_region_add(MemoryListener *listener,
1755 MemoryRegionSection *section)
1756{
a2d33521
AK
1757 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1758
1759 mrio->mr = section->mr;
1760 mrio->offset = section->offset_within_region;
1761 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
052e87b0
PB
1762 section->offset_within_address_space,
1763 int128_get64(section->size));
a2d33521 1764 ioport_register(&mrio->iorange);
4855d41a
AK
1765}
1766
1767static void io_region_del(MemoryListener *listener,
1768 MemoryRegionSection *section)
1769{
052e87b0
PB
1770 isa_unassign_ioport(section->offset_within_address_space,
1771 int128_get64(section->size));
4855d41a
AK
1772}
1773
93632747 1774static MemoryListener core_memory_listener = {
50c1e149 1775 .begin = core_begin,
93632747
AK
1776 .log_global_start = core_log_global_start,
1777 .log_global_stop = core_log_global_stop,
ac1970fb 1778 .priority = 1,
93632747
AK
1779};
1780
4855d41a
AK
1781static MemoryListener io_memory_listener = {
1782 .region_add = io_region_add,
1783 .region_del = io_region_del,
4855d41a
AK
1784 .priority = 0,
1785};
1786
1d71148e
AK
1787static MemoryListener tcg_memory_listener = {
1788 .commit = tcg_commit,
1789};
1790
ac1970fb
AK
1791void address_space_init_dispatch(AddressSpace *as)
1792{
1793 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1794
1795 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1796 d->listener = (MemoryListener) {
1797 .begin = mem_begin,
1798 .region_add = mem_add,
1799 .region_nop = mem_add,
1800 .priority = 0,
1801 };
acc9d80b 1802 d->as = as;
ac1970fb
AK
1803 as->dispatch = d;
1804 memory_listener_register(&d->listener, as);
1805}
1806
83f3c251
AK
1807void address_space_destroy_dispatch(AddressSpace *as)
1808{
1809 AddressSpaceDispatch *d = as->dispatch;
1810
1811 memory_listener_unregister(&d->listener);
1812 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1813 g_free(d);
1814 as->dispatch = NULL;
1815}
1816
62152b8a
AK
1817static void memory_map_init(void)
1818{
7267c094 1819 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 1820 memory_region_init(system_memory, "system", INT64_MAX);
7dca8043 1821 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1822
7267c094 1823 system_io = g_malloc(sizeof(*system_io));
309cb471 1824 memory_region_init(system_io, "io", 65536);
7dca8043 1825 address_space_init(&address_space_io, system_io, "I/O");
93632747 1826
f6790af6
AK
1827 memory_listener_register(&core_memory_listener, &address_space_memory);
1828 memory_listener_register(&io_memory_listener, &address_space_io);
1829 memory_listener_register(&tcg_memory_listener, &address_space_memory);
62152b8a
AK
1830}
1831
1832MemoryRegion *get_system_memory(void)
1833{
1834 return system_memory;
1835}
1836
309cb471
AK
1837MemoryRegion *get_system_io(void)
1838{
1839 return system_io;
1840}
1841
e2eef170
PB
1842#endif /* !defined(CONFIG_USER_ONLY) */
1843
13eb76e0
FB
1844/* physical memory access (slow version, mainly for debug) */
1845#if defined(CONFIG_USER_ONLY)
9349b4f9 1846int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1847 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1848{
1849 int l, flags;
1850 target_ulong page;
53a5960a 1851 void * p;
13eb76e0
FB
1852
1853 while (len > 0) {
1854 page = addr & TARGET_PAGE_MASK;
1855 l = (page + TARGET_PAGE_SIZE) - addr;
1856 if (l > len)
1857 l = len;
1858 flags = page_get_flags(page);
1859 if (!(flags & PAGE_VALID))
a68fe89c 1860 return -1;
13eb76e0
FB
1861 if (is_write) {
1862 if (!(flags & PAGE_WRITE))
a68fe89c 1863 return -1;
579a97f7 1864 /* XXX: this code should not depend on lock_user */
72fb7daa 1865 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1866 return -1;
72fb7daa
AJ
1867 memcpy(p, buf, l);
1868 unlock_user(p, addr, l);
13eb76e0
FB
1869 } else {
1870 if (!(flags & PAGE_READ))
a68fe89c 1871 return -1;
579a97f7 1872 /* XXX: this code should not depend on lock_user */
72fb7daa 1873 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1874 return -1;
72fb7daa 1875 memcpy(buf, p, l);
5b257578 1876 unlock_user(p, addr, 0);
13eb76e0
FB
1877 }
1878 len -= l;
1879 buf += l;
1880 addr += l;
1881 }
a68fe89c 1882 return 0;
13eb76e0 1883}
8df1cd07 1884
13eb76e0 1885#else
51d7a9eb 1886
a8170e5e
AK
1887static void invalidate_and_set_dirty(hwaddr addr,
1888 hwaddr length)
51d7a9eb
AP
1889{
1890 if (!cpu_physical_memory_is_dirty(addr)) {
1891 /* invalidate code */
1892 tb_invalidate_phys_page_range(addr, addr + length, 0);
1893 /* set dirty bit */
1894 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1895 }
e226939d 1896 xen_modified_memory(addr, length);
51d7a9eb
AP
1897}
1898
2bbfa05d
PB
1899static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1900{
1901 if (memory_region_is_ram(mr)) {
1902 return !(is_write && mr->readonly);
1903 }
1904 if (memory_region_is_romd(mr)) {
1905 return !is_write;
1906 }
1907
1908 return false;
1909}
1910
f52cc467 1911static inline int memory_access_size(MemoryRegion *mr, int l, hwaddr addr)
82f2563f 1912{
f52cc467 1913 if (l >= 4 && (((addr & 3) == 0 || mr->ops->impl.unaligned))) {
82f2563f
PB
1914 return 4;
1915 }
f52cc467 1916 if (l >= 2 && (((addr & 1) == 0) || mr->ops->impl.unaligned)) {
82f2563f
PB
1917 return 2;
1918 }
1919 return 1;
1920}
1921
fd8aaa76 1922bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1923 int len, bool is_write)
13eb76e0 1924{
149f54b5 1925 hwaddr l;
13eb76e0 1926 uint8_t *ptr;
791af8c8 1927 uint64_t val;
149f54b5 1928 hwaddr addr1;
5c8a00ce 1929 MemoryRegion *mr;
fd8aaa76 1930 bool error = false;
3b46e624 1931
13eb76e0 1932 while (len > 0) {
149f54b5 1933 l = len;
5c8a00ce 1934 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 1935
13eb76e0 1936 if (is_write) {
5c8a00ce
PB
1937 if (!memory_access_is_direct(mr, is_write)) {
1938 l = memory_access_size(mr, l, addr1);
6a00d601
FB
1939 /* XXX: could force cpu_single_env to NULL to avoid
1940 potential bugs */
82f2563f 1941 if (l == 4) {
1c213d19 1942 /* 32 bit write access */
c27004ec 1943 val = ldl_p(buf);
5c8a00ce 1944 error |= io_mem_write(mr, addr1, val, 4);
82f2563f 1945 } else if (l == 2) {
1c213d19 1946 /* 16 bit write access */
c27004ec 1947 val = lduw_p(buf);
5c8a00ce 1948 error |= io_mem_write(mr, addr1, val, 2);
13eb76e0 1949 } else {
1c213d19 1950 /* 8 bit write access */
c27004ec 1951 val = ldub_p(buf);
5c8a00ce 1952 error |= io_mem_write(mr, addr1, val, 1);
13eb76e0 1953 }
2bbfa05d 1954 } else {
5c8a00ce 1955 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 1956 /* RAM case */
5579c7f3 1957 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1958 memcpy(ptr, buf, l);
51d7a9eb 1959 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
1960 }
1961 } else {
5c8a00ce 1962 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 1963 /* I/O case */
5c8a00ce 1964 l = memory_access_size(mr, l, addr1);
82f2563f 1965 if (l == 4) {
13eb76e0 1966 /* 32 bit read access */
5c8a00ce 1967 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 1968 stl_p(buf, val);
82f2563f 1969 } else if (l == 2) {
13eb76e0 1970 /* 16 bit read access */
5c8a00ce 1971 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 1972 stw_p(buf, val);
13eb76e0 1973 } else {
1c213d19 1974 /* 8 bit read access */
5c8a00ce 1975 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 1976 stb_p(buf, val);
13eb76e0
FB
1977 }
1978 } else {
1979 /* RAM case */
5c8a00ce 1980 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 1981 memcpy(buf, ptr, l);
13eb76e0
FB
1982 }
1983 }
1984 len -= l;
1985 buf += l;
1986 addr += l;
1987 }
fd8aaa76
PB
1988
1989 return error;
13eb76e0 1990}
8df1cd07 1991
fd8aaa76 1992bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1993 const uint8_t *buf, int len)
1994{
fd8aaa76 1995 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
1996}
1997
fd8aaa76 1998bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 1999{
fd8aaa76 2000 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
2001}
2002
2003
a8170e5e 2004void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2005 int len, int is_write)
2006{
fd8aaa76 2007 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2008}
2009
d0ecd2aa 2010/* used for ROM loading : can write in RAM and ROM */
a8170e5e 2011void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
2012 const uint8_t *buf, int len)
2013{
149f54b5 2014 hwaddr l;
d0ecd2aa 2015 uint8_t *ptr;
149f54b5 2016 hwaddr addr1;
5c8a00ce 2017 MemoryRegion *mr;
3b46e624 2018
d0ecd2aa 2019 while (len > 0) {
149f54b5 2020 l = len;
5c8a00ce
PB
2021 mr = address_space_translate(&address_space_memory,
2022 addr, &addr1, &l, true);
3b46e624 2023
5c8a00ce
PB
2024 if (!(memory_region_is_ram(mr) ||
2025 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2026 /* do nothing */
2027 } else {
5c8a00ce 2028 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2029 /* ROM/RAM case */
5579c7f3 2030 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2031 memcpy(ptr, buf, l);
51d7a9eb 2032 invalidate_and_set_dirty(addr1, l);
d0ecd2aa
FB
2033 }
2034 len -= l;
2035 buf += l;
2036 addr += l;
2037 }
2038}
2039
6d16c2f8
AL
2040typedef struct {
2041 void *buffer;
a8170e5e
AK
2042 hwaddr addr;
2043 hwaddr len;
6d16c2f8
AL
2044} BounceBuffer;
2045
2046static BounceBuffer bounce;
2047
ba223c29
AL
2048typedef struct MapClient {
2049 void *opaque;
2050 void (*callback)(void *opaque);
72cf2d4f 2051 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2052} MapClient;
2053
72cf2d4f
BS
2054static QLIST_HEAD(map_client_list, MapClient) map_client_list
2055 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2056
2057void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2058{
7267c094 2059 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2060
2061 client->opaque = opaque;
2062 client->callback = callback;
72cf2d4f 2063 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2064 return client;
2065}
2066
8b9c99d9 2067static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2068{
2069 MapClient *client = (MapClient *)_client;
2070
72cf2d4f 2071 QLIST_REMOVE(client, link);
7267c094 2072 g_free(client);
ba223c29
AL
2073}
2074
2075static void cpu_notify_map_clients(void)
2076{
2077 MapClient *client;
2078
72cf2d4f
BS
2079 while (!QLIST_EMPTY(&map_client_list)) {
2080 client = QLIST_FIRST(&map_client_list);
ba223c29 2081 client->callback(client->opaque);
34d5e948 2082 cpu_unregister_map_client(client);
ba223c29
AL
2083 }
2084}
2085
51644ab7
PB
2086bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2087{
5c8a00ce 2088 MemoryRegion *mr;
51644ab7
PB
2089 hwaddr l, xlat;
2090
2091 while (len > 0) {
2092 l = len;
5c8a00ce
PB
2093 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2094 if (!memory_access_is_direct(mr, is_write)) {
2095 l = memory_access_size(mr, l, addr);
2096 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2097 return false;
2098 }
2099 }
2100
2101 len -= l;
2102 addr += l;
2103 }
2104 return true;
2105}
2106
6d16c2f8
AL
2107/* Map a physical memory region into a host virtual address.
2108 * May map a subset of the requested range, given by and returned in *plen.
2109 * May return NULL if resources needed to perform the mapping are exhausted.
2110 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2111 * Use cpu_register_map_client() to know when retrying the map operation is
2112 * likely to succeed.
6d16c2f8 2113 */
ac1970fb 2114void *address_space_map(AddressSpace *as,
a8170e5e
AK
2115 hwaddr addr,
2116 hwaddr *plen,
ac1970fb 2117 bool is_write)
6d16c2f8 2118{
a8170e5e
AK
2119 hwaddr len = *plen;
2120 hwaddr todo = 0;
149f54b5 2121 hwaddr l, xlat;
5c8a00ce 2122 MemoryRegion *mr;
f15fbc4b 2123 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
2124 ram_addr_t rlen;
2125 void *ret;
6d16c2f8
AL
2126
2127 while (len > 0) {
149f54b5 2128 l = len;
5c8a00ce 2129 mr = address_space_translate(as, addr, &xlat, &l, is_write);
6d16c2f8 2130
5c8a00ce 2131 if (!memory_access_is_direct(mr, is_write)) {
38bee5dc 2132 if (todo || bounce.buffer) {
6d16c2f8
AL
2133 break;
2134 }
2135 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2136 bounce.addr = addr;
2137 bounce.len = l;
2138 if (!is_write) {
ac1970fb 2139 address_space_read(as, addr, bounce.buffer, l);
6d16c2f8 2140 }
38bee5dc
SS
2141
2142 *plen = l;
2143 return bounce.buffer;
6d16c2f8 2144 }
8ab934f9 2145 if (!todo) {
5c8a00ce 2146 raddr = memory_region_get_ram_addr(mr) + xlat;
149f54b5 2147 } else {
5c8a00ce 2148 if (memory_region_get_ram_addr(mr) + xlat != raddr + todo) {
149f54b5
PB
2149 break;
2150 }
8ab934f9 2151 }
6d16c2f8
AL
2152
2153 len -= l;
2154 addr += l;
38bee5dc 2155 todo += l;
6d16c2f8 2156 }
8ab934f9
SS
2157 rlen = todo;
2158 ret = qemu_ram_ptr_length(raddr, &rlen);
2159 *plen = rlen;
2160 return ret;
6d16c2f8
AL
2161}
2162
ac1970fb 2163/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2164 * Will also mark the memory as dirty if is_write == 1. access_len gives
2165 * the amount of memory that was actually read or written by the caller.
2166 */
a8170e5e
AK
2167void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2168 int is_write, hwaddr access_len)
6d16c2f8
AL
2169{
2170 if (buffer != bounce.buffer) {
2171 if (is_write) {
e890261f 2172 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
2173 while (access_len) {
2174 unsigned l;
2175 l = TARGET_PAGE_SIZE;
2176 if (l > access_len)
2177 l = access_len;
51d7a9eb 2178 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2179 addr1 += l;
2180 access_len -= l;
2181 }
2182 }
868bb33f 2183 if (xen_enabled()) {
e41d7c69 2184 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2185 }
6d16c2f8
AL
2186 return;
2187 }
2188 if (is_write) {
ac1970fb 2189 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2190 }
f8a83245 2191 qemu_vfree(bounce.buffer);
6d16c2f8 2192 bounce.buffer = NULL;
ba223c29 2193 cpu_notify_map_clients();
6d16c2f8 2194}
d0ecd2aa 2195
a8170e5e
AK
2196void *cpu_physical_memory_map(hwaddr addr,
2197 hwaddr *plen,
ac1970fb
AK
2198 int is_write)
2199{
2200 return address_space_map(&address_space_memory, addr, plen, is_write);
2201}
2202
a8170e5e
AK
2203void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2204 int is_write, hwaddr access_len)
ac1970fb
AK
2205{
2206 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2207}
2208
8df1cd07 2209/* warning: addr must be aligned */
a8170e5e 2210static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2211 enum device_endian endian)
8df1cd07 2212{
8df1cd07 2213 uint8_t *ptr;
791af8c8 2214 uint64_t val;
5c8a00ce 2215 MemoryRegion *mr;
149f54b5
PB
2216 hwaddr l = 4;
2217 hwaddr addr1;
8df1cd07 2218
5c8a00ce
PB
2219 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2220 false);
2221 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2222 /* I/O case */
5c8a00ce 2223 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2224#if defined(TARGET_WORDS_BIGENDIAN)
2225 if (endian == DEVICE_LITTLE_ENDIAN) {
2226 val = bswap32(val);
2227 }
2228#else
2229 if (endian == DEVICE_BIG_ENDIAN) {
2230 val = bswap32(val);
2231 }
2232#endif
8df1cd07
FB
2233 } else {
2234 /* RAM case */
5c8a00ce 2235 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2236 & TARGET_PAGE_MASK)
149f54b5 2237 + addr1);
1e78bcc1
AG
2238 switch (endian) {
2239 case DEVICE_LITTLE_ENDIAN:
2240 val = ldl_le_p(ptr);
2241 break;
2242 case DEVICE_BIG_ENDIAN:
2243 val = ldl_be_p(ptr);
2244 break;
2245 default:
2246 val = ldl_p(ptr);
2247 break;
2248 }
8df1cd07
FB
2249 }
2250 return val;
2251}
2252
a8170e5e 2253uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2254{
2255 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2256}
2257
a8170e5e 2258uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2259{
2260 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2261}
2262
a8170e5e 2263uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2264{
2265 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2266}
2267
84b7b8e7 2268/* warning: addr must be aligned */
a8170e5e 2269static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2270 enum device_endian endian)
84b7b8e7 2271{
84b7b8e7
FB
2272 uint8_t *ptr;
2273 uint64_t val;
5c8a00ce 2274 MemoryRegion *mr;
149f54b5
PB
2275 hwaddr l = 8;
2276 hwaddr addr1;
84b7b8e7 2277
5c8a00ce
PB
2278 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2279 false);
2280 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2281 /* I/O case */
5c8a00ce 2282 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2283#if defined(TARGET_WORDS_BIGENDIAN)
2284 if (endian == DEVICE_LITTLE_ENDIAN) {
2285 val = bswap64(val);
2286 }
2287#else
2288 if (endian == DEVICE_BIG_ENDIAN) {
2289 val = bswap64(val);
2290 }
84b7b8e7
FB
2291#endif
2292 } else {
2293 /* RAM case */
5c8a00ce 2294 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2295 & TARGET_PAGE_MASK)
149f54b5 2296 + addr1);
1e78bcc1
AG
2297 switch (endian) {
2298 case DEVICE_LITTLE_ENDIAN:
2299 val = ldq_le_p(ptr);
2300 break;
2301 case DEVICE_BIG_ENDIAN:
2302 val = ldq_be_p(ptr);
2303 break;
2304 default:
2305 val = ldq_p(ptr);
2306 break;
2307 }
84b7b8e7
FB
2308 }
2309 return val;
2310}
2311
a8170e5e 2312uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2313{
2314 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2315}
2316
a8170e5e 2317uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2318{
2319 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2320}
2321
a8170e5e 2322uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2323{
2324 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2325}
2326
aab33094 2327/* XXX: optimize */
a8170e5e 2328uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2329{
2330 uint8_t val;
2331 cpu_physical_memory_read(addr, &val, 1);
2332 return val;
2333}
2334
733f0b02 2335/* warning: addr must be aligned */
a8170e5e 2336static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2337 enum device_endian endian)
aab33094 2338{
733f0b02
MT
2339 uint8_t *ptr;
2340 uint64_t val;
5c8a00ce 2341 MemoryRegion *mr;
149f54b5
PB
2342 hwaddr l = 2;
2343 hwaddr addr1;
733f0b02 2344
5c8a00ce
PB
2345 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2346 false);
2347 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2348 /* I/O case */
5c8a00ce 2349 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2350#if defined(TARGET_WORDS_BIGENDIAN)
2351 if (endian == DEVICE_LITTLE_ENDIAN) {
2352 val = bswap16(val);
2353 }
2354#else
2355 if (endian == DEVICE_BIG_ENDIAN) {
2356 val = bswap16(val);
2357 }
2358#endif
733f0b02
MT
2359 } else {
2360 /* RAM case */
5c8a00ce 2361 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2362 & TARGET_PAGE_MASK)
149f54b5 2363 + addr1);
1e78bcc1
AG
2364 switch (endian) {
2365 case DEVICE_LITTLE_ENDIAN:
2366 val = lduw_le_p(ptr);
2367 break;
2368 case DEVICE_BIG_ENDIAN:
2369 val = lduw_be_p(ptr);
2370 break;
2371 default:
2372 val = lduw_p(ptr);
2373 break;
2374 }
733f0b02
MT
2375 }
2376 return val;
aab33094
FB
2377}
2378
a8170e5e 2379uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2380{
2381 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2382}
2383
a8170e5e 2384uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2385{
2386 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2387}
2388
a8170e5e 2389uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2390{
2391 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2392}
2393
8df1cd07
FB
2394/* warning: addr must be aligned. The ram page is not masked as dirty
2395 and the code inside is not invalidated. It is useful if the dirty
2396 bits are used to track modified PTEs */
a8170e5e 2397void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2398{
8df1cd07 2399 uint8_t *ptr;
5c8a00ce 2400 MemoryRegion *mr;
149f54b5
PB
2401 hwaddr l = 4;
2402 hwaddr addr1;
8df1cd07 2403
5c8a00ce
PB
2404 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2405 true);
2406 if (l < 4 || !memory_access_is_direct(mr, true)) {
2407 io_mem_write(mr, addr1, val, 4);
8df1cd07 2408 } else {
5c8a00ce 2409 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2410 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2411 stl_p(ptr, val);
74576198
AL
2412
2413 if (unlikely(in_migration)) {
2414 if (!cpu_physical_memory_is_dirty(addr1)) {
2415 /* invalidate code */
2416 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2417 /* set dirty bit */
f7c11b53
YT
2418 cpu_physical_memory_set_dirty_flags(
2419 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2420 }
2421 }
8df1cd07
FB
2422 }
2423}
2424
2425/* warning: addr must be aligned */
a8170e5e 2426static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2427 enum device_endian endian)
8df1cd07 2428{
8df1cd07 2429 uint8_t *ptr;
5c8a00ce 2430 MemoryRegion *mr;
149f54b5
PB
2431 hwaddr l = 4;
2432 hwaddr addr1;
8df1cd07 2433
5c8a00ce
PB
2434 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2435 true);
2436 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2437#if defined(TARGET_WORDS_BIGENDIAN)
2438 if (endian == DEVICE_LITTLE_ENDIAN) {
2439 val = bswap32(val);
2440 }
2441#else
2442 if (endian == DEVICE_BIG_ENDIAN) {
2443 val = bswap32(val);
2444 }
2445#endif
5c8a00ce 2446 io_mem_write(mr, addr1, val, 4);
8df1cd07 2447 } else {
8df1cd07 2448 /* RAM case */
5c8a00ce 2449 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2450 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2451 switch (endian) {
2452 case DEVICE_LITTLE_ENDIAN:
2453 stl_le_p(ptr, val);
2454 break;
2455 case DEVICE_BIG_ENDIAN:
2456 stl_be_p(ptr, val);
2457 break;
2458 default:
2459 stl_p(ptr, val);
2460 break;
2461 }
51d7a9eb 2462 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2463 }
2464}
2465
a8170e5e 2466void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2467{
2468 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2469}
2470
a8170e5e 2471void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2472{
2473 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2474}
2475
a8170e5e 2476void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2477{
2478 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2479}
2480
aab33094 2481/* XXX: optimize */
a8170e5e 2482void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2483{
2484 uint8_t v = val;
2485 cpu_physical_memory_write(addr, &v, 1);
2486}
2487
733f0b02 2488/* warning: addr must be aligned */
a8170e5e 2489static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2490 enum device_endian endian)
aab33094 2491{
733f0b02 2492 uint8_t *ptr;
5c8a00ce 2493 MemoryRegion *mr;
149f54b5
PB
2494 hwaddr l = 2;
2495 hwaddr addr1;
733f0b02 2496
5c8a00ce
PB
2497 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2498 true);
2499 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2500#if defined(TARGET_WORDS_BIGENDIAN)
2501 if (endian == DEVICE_LITTLE_ENDIAN) {
2502 val = bswap16(val);
2503 }
2504#else
2505 if (endian == DEVICE_BIG_ENDIAN) {
2506 val = bswap16(val);
2507 }
2508#endif
5c8a00ce 2509 io_mem_write(mr, addr1, val, 2);
733f0b02 2510 } else {
733f0b02 2511 /* RAM case */
5c8a00ce 2512 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2513 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2514 switch (endian) {
2515 case DEVICE_LITTLE_ENDIAN:
2516 stw_le_p(ptr, val);
2517 break;
2518 case DEVICE_BIG_ENDIAN:
2519 stw_be_p(ptr, val);
2520 break;
2521 default:
2522 stw_p(ptr, val);
2523 break;
2524 }
51d7a9eb 2525 invalidate_and_set_dirty(addr1, 2);
733f0b02 2526 }
aab33094
FB
2527}
2528
a8170e5e 2529void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2530{
2531 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2532}
2533
a8170e5e 2534void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2535{
2536 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2537}
2538
a8170e5e 2539void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2540{
2541 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2542}
2543
aab33094 2544/* XXX: optimize */
a8170e5e 2545void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2546{
2547 val = tswap64(val);
71d2b725 2548 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2549}
2550
a8170e5e 2551void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2552{
2553 val = cpu_to_le64(val);
2554 cpu_physical_memory_write(addr, &val, 8);
2555}
2556
a8170e5e 2557void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2558{
2559 val = cpu_to_be64(val);
2560 cpu_physical_memory_write(addr, &val, 8);
2561}
2562
5e2972fd 2563/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2564int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2565 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2566{
2567 int l;
a8170e5e 2568 hwaddr phys_addr;
9b3c35e0 2569 target_ulong page;
13eb76e0
FB
2570
2571 while (len > 0) {
2572 page = addr & TARGET_PAGE_MASK;
2573 phys_addr = cpu_get_phys_page_debug(env, page);
2574 /* if no physical page mapped, return an error */
2575 if (phys_addr == -1)
2576 return -1;
2577 l = (page + TARGET_PAGE_SIZE) - addr;
2578 if (l > len)
2579 l = len;
5e2972fd 2580 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2581 if (is_write)
2582 cpu_physical_memory_write_rom(phys_addr, buf, l);
2583 else
5e2972fd 2584 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2585 len -= l;
2586 buf += l;
2587 addr += l;
2588 }
2589 return 0;
2590}
a68fe89c 2591#endif
13eb76e0 2592
8e4a424b
BS
2593#if !defined(CONFIG_USER_ONLY)
2594
2595/*
2596 * A helper function for the _utterly broken_ virtio device model to find out if
2597 * it's running on a big endian machine. Don't do this at home kids!
2598 */
2599bool virtio_is_big_endian(void);
2600bool virtio_is_big_endian(void)
2601{
2602#if defined(TARGET_WORDS_BIGENDIAN)
2603 return true;
2604#else
2605 return false;
2606#endif
2607}
2608
2609#endif
2610
76f35538 2611#ifndef CONFIG_USER_ONLY
a8170e5e 2612bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2613{
5c8a00ce 2614 MemoryRegion*mr;
149f54b5 2615 hwaddr l = 1;
76f35538 2616
5c8a00ce
PB
2617 mr = address_space_translate(&address_space_memory,
2618 phys_addr, &phys_addr, &l, false);
76f35538 2619
5c8a00ce
PB
2620 return !(memory_region_is_ram(mr) ||
2621 memory_region_is_romd(mr));
76f35538 2622}
bd2fa51f
MH
2623
2624void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2625{
2626 RAMBlock *block;
2627
2628 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2629 func(block->host, block->offset, block->length, opaque);
2630 }
2631}
ec3f8c99 2632#endif