]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
memory: add owner argument to initialization functions
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
0d09e41a 34#include "hw/xen/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
db7b5426 53//#define DEBUG_SUBPAGE
1196be37 54
e2eef170 55#if !defined(CONFIG_USER_ONLY)
9fa3e853 56int phys_ram_fd;
74576198 57static int in_migration;
94a6b54f 58
a3161038 59RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
60
61static MemoryRegion *system_memory;
309cb471 62static MemoryRegion *system_io;
62152b8a 63
f6790af6
AK
64AddressSpace address_space_io;
65AddressSpace address_space_memory;
2673a5da 66
0844e007 67MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 68static MemoryRegion io_mem_unassigned;
0e0df1e2 69
e2eef170 70#endif
9fa3e853 71
9349b4f9 72CPUArchState *first_cpu;
6a00d601
FB
73/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
9349b4f9 75DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 76/* 0 = Do not count executed instructions.
bf20dc07 77 1 = Precise instruction counting.
2e70f6ef 78 2 = Adaptive rate instruction counting. */
5708fc66 79int use_icount;
6a00d601 80
e2eef170 81#if !defined(CONFIG_USER_ONLY)
4346ae3e 82
1db8abb1
PB
83typedef struct PhysPageEntry PhysPageEntry;
84
85struct PhysPageEntry {
86 uint16_t is_leaf : 1;
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
88 uint16_t ptr : 15;
89};
90
91struct AddressSpaceDispatch {
92 /* This is a multi-level map on the physical address space.
93 * The bottom level has pointers to MemoryRegionSections.
94 */
95 PhysPageEntry phys_map;
96 MemoryListener listener;
acc9d80b 97 AddressSpace *as;
1db8abb1
PB
98};
99
90260c6c
JK
100#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
101typedef struct subpage_t {
102 MemoryRegion iomem;
acc9d80b 103 AddressSpace *as;
90260c6c
JK
104 hwaddr base;
105 uint16_t sub_section[TARGET_PAGE_SIZE];
106} subpage_t;
107
5312bd8b
AK
108static MemoryRegionSection *phys_sections;
109static unsigned phys_sections_nb, phys_sections_nb_alloc;
110static uint16_t phys_section_unassigned;
aa102231
AK
111static uint16_t phys_section_notdirty;
112static uint16_t phys_section_rom;
113static uint16_t phys_section_watch;
5312bd8b 114
d6f2ea22
AK
115/* Simple allocator for PhysPageEntry nodes */
116static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
117static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
118
07f07b31 119#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 120
e2eef170 121static void io_mem_init(void);
62152b8a 122static void memory_map_init(void);
8b9c99d9 123static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 124
1ec9b909 125static MemoryRegion io_mem_watch;
6658ffb8 126#endif
fd6ce8f6 127
6d9a1304 128#if !defined(CONFIG_USER_ONLY)
d6f2ea22 129
f7bf5461 130static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 131{
f7bf5461 132 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
133 typedef PhysPageEntry Node[L2_SIZE];
134 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
135 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
136 phys_map_nodes_nb + nodes);
d6f2ea22
AK
137 phys_map_nodes = g_renew(Node, phys_map_nodes,
138 phys_map_nodes_nb_alloc);
139 }
f7bf5461
AK
140}
141
142static uint16_t phys_map_node_alloc(void)
143{
144 unsigned i;
145 uint16_t ret;
146
147 ret = phys_map_nodes_nb++;
148 assert(ret != PHYS_MAP_NODE_NIL);
149 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 150 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 151 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 152 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 153 }
f7bf5461 154 return ret;
d6f2ea22
AK
155}
156
157static void phys_map_nodes_reset(void)
158{
159 phys_map_nodes_nb = 0;
160}
161
92e873b9 162
a8170e5e
AK
163static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
164 hwaddr *nb, uint16_t leaf,
2999097b 165 int level)
f7bf5461
AK
166{
167 PhysPageEntry *p;
168 int i;
a8170e5e 169 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 170
07f07b31 171 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
172 lp->ptr = phys_map_node_alloc();
173 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
174 if (level == 0) {
175 for (i = 0; i < L2_SIZE; i++) {
07f07b31 176 p[i].is_leaf = 1;
c19e8800 177 p[i].ptr = phys_section_unassigned;
4346ae3e 178 }
67c4d23c 179 }
f7bf5461 180 } else {
c19e8800 181 p = phys_map_nodes[lp->ptr];
92e873b9 182 }
2999097b 183 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 184
2999097b 185 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
186 if ((*index & (step - 1)) == 0 && *nb >= step) {
187 lp->is_leaf = true;
c19e8800 188 lp->ptr = leaf;
07f07b31
AK
189 *index += step;
190 *nb -= step;
2999097b
AK
191 } else {
192 phys_page_set_level(lp, index, nb, leaf, level - 1);
193 }
194 ++lp;
f7bf5461
AK
195 }
196}
197
ac1970fb 198static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 199 hwaddr index, hwaddr nb,
2999097b 200 uint16_t leaf)
f7bf5461 201{
2999097b 202 /* Wildly overreserve - it doesn't matter much. */
07f07b31 203 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 204
ac1970fb 205 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
206}
207
149f54b5 208static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
92e873b9 209{
ac1970fb 210 PhysPageEntry lp = d->phys_map;
31ab2b4a
AK
211 PhysPageEntry *p;
212 int i;
f1f6e3b8 213
07f07b31 214 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 215 if (lp.ptr == PHYS_MAP_NODE_NIL) {
fd298934 216 return &phys_sections[phys_section_unassigned];
31ab2b4a 217 }
c19e8800 218 p = phys_map_nodes[lp.ptr];
31ab2b4a 219 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 220 }
fd298934 221 return &phys_sections[lp.ptr];
f3705d53
AK
222}
223
e5548617
BS
224bool memory_region_is_unassigned(MemoryRegion *mr)
225{
2a8e7499 226 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 227 && mr != &io_mem_watch;
fd6ce8f6 228}
149f54b5 229
9f029603 230static MemoryRegionSection *address_space_lookup_region(AddressSpace *as,
90260c6c
JK
231 hwaddr addr,
232 bool resolve_subpage)
9f029603 233{
90260c6c
JK
234 MemoryRegionSection *section;
235 subpage_t *subpage;
236
237 section = phys_page_find(as->dispatch, addr >> TARGET_PAGE_BITS);
238 if (resolve_subpage && section->mr->subpage) {
239 subpage = container_of(section->mr, subpage_t, iomem);
240 section = &phys_sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
241 }
242 return section;
9f029603
JK
243}
244
90260c6c
JK
245static MemoryRegionSection *
246address_space_translate_internal(AddressSpace *as, hwaddr addr, hwaddr *xlat,
247 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
248{
249 MemoryRegionSection *section;
250 Int128 diff;
251
90260c6c 252 section = address_space_lookup_region(as, addr, resolve_subpage);
149f54b5
PB
253 /* Compute offset within MemoryRegionSection */
254 addr -= section->offset_within_address_space;
255
256 /* Compute offset within MemoryRegion */
257 *xlat = addr + section->offset_within_region;
258
259 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 260 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
261 return section;
262}
90260c6c 263
5c8a00ce
PB
264MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
265 hwaddr *xlat, hwaddr *plen,
266 bool is_write)
90260c6c 267{
30951157
AK
268 IOMMUTLBEntry iotlb;
269 MemoryRegionSection *section;
270 MemoryRegion *mr;
271 hwaddr len = *plen;
272
273 for (;;) {
274 section = address_space_translate_internal(as, addr, &addr, plen, true);
275 mr = section->mr;
276
277 if (!mr->iommu_ops) {
278 break;
279 }
280
281 iotlb = mr->iommu_ops->translate(mr, addr);
282 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
283 | (addr & iotlb.addr_mask));
284 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
285 if (!(iotlb.perm & (1 << is_write))) {
286 mr = &io_mem_unassigned;
287 break;
288 }
289
290 as = iotlb.target_as;
291 }
292
293 *plen = len;
294 *xlat = addr;
295 return mr;
90260c6c
JK
296}
297
298MemoryRegionSection *
299address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
300 hwaddr *plen)
301{
30951157
AK
302 MemoryRegionSection *section;
303 section = address_space_translate_internal(as, addr, xlat, plen, false);
304
305 assert(!section->mr->iommu_ops);
306 return section;
90260c6c 307}
5b6dd868 308#endif
fd6ce8f6 309
5b6dd868 310void cpu_exec_init_all(void)
fdbb84d1 311{
5b6dd868 312#if !defined(CONFIG_USER_ONLY)
b2a8658e 313 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
314 memory_map_init();
315 io_mem_init();
fdbb84d1 316#endif
5b6dd868 317}
fdbb84d1 318
b170fce3 319#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
320
321static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 322{
259186a7 323 CPUState *cpu = opaque;
a513fe19 324
5b6dd868
BS
325 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
326 version_id is increased. */
259186a7
AF
327 cpu->interrupt_request &= ~0x01;
328 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
329
330 return 0;
a513fe19 331}
7501267e 332
1a1562f5 333const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
334 .name = "cpu_common",
335 .version_id = 1,
336 .minimum_version_id = 1,
337 .minimum_version_id_old = 1,
338 .post_load = cpu_common_post_load,
339 .fields = (VMStateField []) {
259186a7
AF
340 VMSTATE_UINT32(halted, CPUState),
341 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
342 VMSTATE_END_OF_LIST()
343 }
344};
1a1562f5 345
5b6dd868 346#endif
ea041c0e 347
38d8f5c8 348CPUState *qemu_get_cpu(int index)
ea041c0e 349{
5b6dd868 350 CPUArchState *env = first_cpu;
38d8f5c8 351 CPUState *cpu = NULL;
ea041c0e 352
5b6dd868 353 while (env) {
55e5c285
AF
354 cpu = ENV_GET_CPU(env);
355 if (cpu->cpu_index == index) {
5b6dd868 356 break;
55e5c285 357 }
5b6dd868 358 env = env->next_cpu;
ea041c0e 359 }
5b6dd868 360
d76fddae 361 return env ? cpu : NULL;
ea041c0e
FB
362}
363
d6b9e0d6
MT
364void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
365{
366 CPUArchState *env = first_cpu;
367
368 while (env) {
369 func(ENV_GET_CPU(env), data);
370 env = env->next_cpu;
371 }
372}
373
5b6dd868 374void cpu_exec_init(CPUArchState *env)
ea041c0e 375{
5b6dd868 376 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 377 CPUClass *cc = CPU_GET_CLASS(cpu);
5b6dd868
BS
378 CPUArchState **penv;
379 int cpu_index;
380
381#if defined(CONFIG_USER_ONLY)
382 cpu_list_lock();
383#endif
384 env->next_cpu = NULL;
385 penv = &first_cpu;
386 cpu_index = 0;
387 while (*penv != NULL) {
388 penv = &(*penv)->next_cpu;
389 cpu_index++;
390 }
55e5c285 391 cpu->cpu_index = cpu_index;
1b1ed8dc 392 cpu->numa_node = 0;
5b6dd868
BS
393 QTAILQ_INIT(&env->breakpoints);
394 QTAILQ_INIT(&env->watchpoints);
395#ifndef CONFIG_USER_ONLY
396 cpu->thread_id = qemu_get_thread_id();
397#endif
398 *penv = env;
399#if defined(CONFIG_USER_ONLY)
400 cpu_list_unlock();
401#endif
259186a7 402 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
5b6dd868 403#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
404 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
405 cpu_save, cpu_load, env);
b170fce3 406 assert(cc->vmsd == NULL);
5b6dd868 407#endif
b170fce3
AF
408 if (cc->vmsd != NULL) {
409 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
410 }
ea041c0e
FB
411}
412
1fddef4b 413#if defined(TARGET_HAS_ICE)
94df27fd 414#if defined(CONFIG_USER_ONLY)
9349b4f9 415static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
416{
417 tb_invalidate_phys_page_range(pc, pc + 1, 0);
418}
419#else
1e7855a5
MF
420static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
421{
9d70c4b7
MF
422 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
423 (pc & ~TARGET_PAGE_MASK));
1e7855a5 424}
c27004ec 425#endif
94df27fd 426#endif /* TARGET_HAS_ICE */
d720b93d 427
c527ee8f 428#if defined(CONFIG_USER_ONLY)
9349b4f9 429void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
430
431{
432}
433
9349b4f9 434int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
435 int flags, CPUWatchpoint **watchpoint)
436{
437 return -ENOSYS;
438}
439#else
6658ffb8 440/* Add a watchpoint. */
9349b4f9 441int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 442 int flags, CPUWatchpoint **watchpoint)
6658ffb8 443{
b4051334 444 target_ulong len_mask = ~(len - 1);
c0ce998e 445 CPUWatchpoint *wp;
6658ffb8 446
b4051334 447 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
448 if ((len & (len - 1)) || (addr & ~len_mask) ||
449 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
450 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
451 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
452 return -EINVAL;
453 }
7267c094 454 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
455
456 wp->vaddr = addr;
b4051334 457 wp->len_mask = len_mask;
a1d1bb31
AL
458 wp->flags = flags;
459
2dc9f411 460 /* keep all GDB-injected watchpoints in front */
c0ce998e 461 if (flags & BP_GDB)
72cf2d4f 462 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 463 else
72cf2d4f 464 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 465
6658ffb8 466 tlb_flush_page(env, addr);
a1d1bb31
AL
467
468 if (watchpoint)
469 *watchpoint = wp;
470 return 0;
6658ffb8
PB
471}
472
a1d1bb31 473/* Remove a specific watchpoint. */
9349b4f9 474int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 475 int flags)
6658ffb8 476{
b4051334 477 target_ulong len_mask = ~(len - 1);
a1d1bb31 478 CPUWatchpoint *wp;
6658ffb8 479
72cf2d4f 480 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 481 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 482 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 483 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
484 return 0;
485 }
486 }
a1d1bb31 487 return -ENOENT;
6658ffb8
PB
488}
489
a1d1bb31 490/* Remove a specific watchpoint by reference. */
9349b4f9 491void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 492{
72cf2d4f 493 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 494
a1d1bb31
AL
495 tlb_flush_page(env, watchpoint->vaddr);
496
7267c094 497 g_free(watchpoint);
a1d1bb31
AL
498}
499
500/* Remove all matching watchpoints. */
9349b4f9 501void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 502{
c0ce998e 503 CPUWatchpoint *wp, *next;
a1d1bb31 504
72cf2d4f 505 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
506 if (wp->flags & mask)
507 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 508 }
7d03f82f 509}
c527ee8f 510#endif
7d03f82f 511
a1d1bb31 512/* Add a breakpoint. */
9349b4f9 513int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 514 CPUBreakpoint **breakpoint)
4c3a88a2 515{
1fddef4b 516#if defined(TARGET_HAS_ICE)
c0ce998e 517 CPUBreakpoint *bp;
3b46e624 518
7267c094 519 bp = g_malloc(sizeof(*bp));
4c3a88a2 520
a1d1bb31
AL
521 bp->pc = pc;
522 bp->flags = flags;
523
2dc9f411 524 /* keep all GDB-injected breakpoints in front */
c0ce998e 525 if (flags & BP_GDB)
72cf2d4f 526 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 527 else
72cf2d4f 528 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 529
d720b93d 530 breakpoint_invalidate(env, pc);
a1d1bb31
AL
531
532 if (breakpoint)
533 *breakpoint = bp;
4c3a88a2
FB
534 return 0;
535#else
a1d1bb31 536 return -ENOSYS;
4c3a88a2
FB
537#endif
538}
539
a1d1bb31 540/* Remove a specific breakpoint. */
9349b4f9 541int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 542{
7d03f82f 543#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
544 CPUBreakpoint *bp;
545
72cf2d4f 546 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
547 if (bp->pc == pc && bp->flags == flags) {
548 cpu_breakpoint_remove_by_ref(env, bp);
549 return 0;
550 }
7d03f82f 551 }
a1d1bb31
AL
552 return -ENOENT;
553#else
554 return -ENOSYS;
7d03f82f
EI
555#endif
556}
557
a1d1bb31 558/* Remove a specific breakpoint by reference. */
9349b4f9 559void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 560{
1fddef4b 561#if defined(TARGET_HAS_ICE)
72cf2d4f 562 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 563
a1d1bb31
AL
564 breakpoint_invalidate(env, breakpoint->pc);
565
7267c094 566 g_free(breakpoint);
a1d1bb31
AL
567#endif
568}
569
570/* Remove all matching breakpoints. */
9349b4f9 571void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
572{
573#if defined(TARGET_HAS_ICE)
c0ce998e 574 CPUBreakpoint *bp, *next;
a1d1bb31 575
72cf2d4f 576 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
577 if (bp->flags & mask)
578 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 579 }
4c3a88a2
FB
580#endif
581}
582
c33a346e
FB
583/* enable or disable single step mode. EXCP_DEBUG is returned by the
584 CPU loop after each instruction */
9349b4f9 585void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 586{
1fddef4b 587#if defined(TARGET_HAS_ICE)
c33a346e
FB
588 if (env->singlestep_enabled != enabled) {
589 env->singlestep_enabled = enabled;
e22a25c9
AL
590 if (kvm_enabled())
591 kvm_update_guest_debug(env, 0);
592 else {
ccbb4d44 593 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
594 /* XXX: only flush what is necessary */
595 tb_flush(env);
596 }
c33a346e
FB
597 }
598#endif
599}
600
9349b4f9 601void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e 602{
878096ee 603 CPUState *cpu = ENV_GET_CPU(env);
7501267e 604 va_list ap;
493ae1f0 605 va_list ap2;
7501267e
FB
606
607 va_start(ap, fmt);
493ae1f0 608 va_copy(ap2, ap);
7501267e
FB
609 fprintf(stderr, "qemu: fatal: ");
610 vfprintf(stderr, fmt, ap);
611 fprintf(stderr, "\n");
878096ee 612 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
613 if (qemu_log_enabled()) {
614 qemu_log("qemu: fatal: ");
615 qemu_log_vprintf(fmt, ap2);
616 qemu_log("\n");
6fd2a026 617 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 618 qemu_log_flush();
93fcfe39 619 qemu_log_close();
924edcae 620 }
493ae1f0 621 va_end(ap2);
f9373291 622 va_end(ap);
fd052bf6
RV
623#if defined(CONFIG_USER_ONLY)
624 {
625 struct sigaction act;
626 sigfillset(&act.sa_mask);
627 act.sa_handler = SIG_DFL;
628 sigaction(SIGABRT, &act, NULL);
629 }
630#endif
7501267e
FB
631 abort();
632}
633
9349b4f9 634CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 635{
9349b4f9
AF
636 CPUArchState *new_env = cpu_init(env->cpu_model_str);
637 CPUArchState *next_cpu = new_env->next_cpu;
5a38f081
AL
638#if defined(TARGET_HAS_ICE)
639 CPUBreakpoint *bp;
640 CPUWatchpoint *wp;
641#endif
642
9349b4f9 643 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 644
55e5c285 645 /* Preserve chaining. */
c5be9f08 646 new_env->next_cpu = next_cpu;
5a38f081
AL
647
648 /* Clone all break/watchpoints.
649 Note: Once we support ptrace with hw-debug register access, make sure
650 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
651 QTAILQ_INIT(&env->breakpoints);
652 QTAILQ_INIT(&env->watchpoints);
5a38f081 653#if defined(TARGET_HAS_ICE)
72cf2d4f 654 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
655 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
656 }
72cf2d4f 657 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
658 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
659 wp->flags, NULL);
660 }
661#endif
662
c5be9f08
TS
663 return new_env;
664}
665
0124311e 666#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
667static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
668 uintptr_t length)
669{
670 uintptr_t start1;
671
672 /* we modify the TLB cache so that the dirty bit will be set again
673 when accessing the range */
674 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
675 /* Check that we don't span multiple blocks - this breaks the
676 address comparisons below. */
677 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
678 != (end - 1) - start) {
679 abort();
680 }
681 cpu_tlb_reset_dirty_all(start1, length);
682
683}
684
5579c7f3 685/* Note: start and end must be within the same ram block. */
c227f099 686void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 687 int dirty_flags)
1ccde1cb 688{
d24981d3 689 uintptr_t length;
1ccde1cb
FB
690
691 start &= TARGET_PAGE_MASK;
692 end = TARGET_PAGE_ALIGN(end);
693
694 length = end - start;
695 if (length == 0)
696 return;
f7c11b53 697 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 698
d24981d3
JQ
699 if (tcg_enabled()) {
700 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 701 }
1ccde1cb
FB
702}
703
8b9c99d9 704static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 705{
f6f3fbca 706 int ret = 0;
74576198 707 in_migration = enable;
f6f3fbca 708 return ret;
74576198
AL
709}
710
a8170e5e 711hwaddr memory_region_section_get_iotlb(CPUArchState *env,
149f54b5
PB
712 MemoryRegionSection *section,
713 target_ulong vaddr,
714 hwaddr paddr, hwaddr xlat,
715 int prot,
716 target_ulong *address)
e5548617 717{
a8170e5e 718 hwaddr iotlb;
e5548617
BS
719 CPUWatchpoint *wp;
720
cc5bea60 721 if (memory_region_is_ram(section->mr)) {
e5548617
BS
722 /* Normal RAM. */
723 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 724 + xlat;
e5548617
BS
725 if (!section->readonly) {
726 iotlb |= phys_section_notdirty;
727 } else {
728 iotlb |= phys_section_rom;
729 }
730 } else {
e5548617 731 iotlb = section - phys_sections;
149f54b5 732 iotlb += xlat;
e5548617
BS
733 }
734
735 /* Make accesses to pages with watchpoints go via the
736 watchpoint trap routines. */
737 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
738 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
739 /* Avoid trapping reads of pages with a write breakpoint. */
740 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
741 iotlb = phys_section_watch + paddr;
742 *address |= TLB_MMIO;
743 break;
744 }
745 }
746 }
747
748 return iotlb;
749}
9fa3e853
FB
750#endif /* defined(CONFIG_USER_ONLY) */
751
e2eef170 752#if !defined(CONFIG_USER_ONLY)
8da3ff18 753
c227f099 754static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 755 uint16_t section);
acc9d80b 756static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
5312bd8b 757static void destroy_page_desc(uint16_t section_index)
54688b1e 758{
5312bd8b
AK
759 MemoryRegionSection *section = &phys_sections[section_index];
760 MemoryRegion *mr = section->mr;
54688b1e
AK
761
762 if (mr->subpage) {
763 subpage_t *subpage = container_of(mr, subpage_t, iomem);
764 memory_region_destroy(&subpage->iomem);
765 g_free(subpage);
766 }
767}
768
4346ae3e 769static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
770{
771 unsigned i;
d6f2ea22 772 PhysPageEntry *p;
54688b1e 773
c19e8800 774 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
775 return;
776 }
777
c19e8800 778 p = phys_map_nodes[lp->ptr];
4346ae3e 779 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 780 if (!p[i].is_leaf) {
54688b1e 781 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 782 } else {
c19e8800 783 destroy_page_desc(p[i].ptr);
54688b1e 784 }
54688b1e 785 }
07f07b31 786 lp->is_leaf = 0;
c19e8800 787 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
788}
789
ac1970fb 790static void destroy_all_mappings(AddressSpaceDispatch *d)
54688b1e 791{
ac1970fb 792 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
d6f2ea22 793 phys_map_nodes_reset();
54688b1e
AK
794}
795
5312bd8b
AK
796static uint16_t phys_section_add(MemoryRegionSection *section)
797{
68f3f65b
PB
798 /* The physical section number is ORed with a page-aligned
799 * pointer to produce the iotlb entries. Thus it should
800 * never overflow into the page-aligned value.
801 */
802 assert(phys_sections_nb < TARGET_PAGE_SIZE);
803
5312bd8b
AK
804 if (phys_sections_nb == phys_sections_nb_alloc) {
805 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
806 phys_sections = g_renew(MemoryRegionSection, phys_sections,
807 phys_sections_nb_alloc);
808 }
809 phys_sections[phys_sections_nb] = *section;
810 return phys_sections_nb++;
811}
812
813static void phys_sections_clear(void)
814{
815 phys_sections_nb = 0;
816}
817
ac1970fb 818static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
819{
820 subpage_t *subpage;
a8170e5e 821 hwaddr base = section->offset_within_address_space
0f0cb164 822 & TARGET_PAGE_MASK;
ac1970fb 823 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
0f0cb164
AK
824 MemoryRegionSection subsection = {
825 .offset_within_address_space = base,
052e87b0 826 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 827 };
a8170e5e 828 hwaddr start, end;
0f0cb164 829
f3705d53 830 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 831
f3705d53 832 if (!(existing->mr->subpage)) {
acc9d80b 833 subpage = subpage_init(d->as, base);
0f0cb164 834 subsection.mr = &subpage->iomem;
ac1970fb 835 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 836 phys_section_add(&subsection));
0f0cb164 837 } else {
f3705d53 838 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
839 }
840 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 841 end = start + int128_get64(section->size) - 1;
0f0cb164
AK
842 subpage_register(subpage, start, end, phys_section_add(section));
843}
844
845
052e87b0
PB
846static void register_multipage(AddressSpaceDispatch *d,
847 MemoryRegionSection *section)
33417e70 848{
a8170e5e 849 hwaddr start_addr = section->offset_within_address_space;
5312bd8b 850 uint16_t section_index = phys_section_add(section);
052e87b0
PB
851 uint64_t num_pages = int128_get64(int128_rshift(section->size,
852 TARGET_PAGE_BITS));
dd81124b 853
733d5ef5
PB
854 assert(num_pages);
855 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
856}
857
ac1970fb 858static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 859{
ac1970fb 860 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
99b9cc06 861 MemoryRegionSection now = *section, remain = *section;
052e87b0 862 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 863
733d5ef5
PB
864 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
865 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
866 - now.offset_within_address_space;
867
052e87b0 868 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 869 register_subpage(d, &now);
733d5ef5 870 } else {
052e87b0 871 now.size = int128_zero();
733d5ef5 872 }
052e87b0
PB
873 while (int128_ne(remain.size, now.size)) {
874 remain.size = int128_sub(remain.size, now.size);
875 remain.offset_within_address_space += int128_get64(now.size);
876 remain.offset_within_region += int128_get64(now.size);
69b67646 877 now = remain;
052e87b0 878 if (int128_lt(remain.size, page_size)) {
733d5ef5
PB
879 register_subpage(d, &now);
880 } else if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
052e87b0 881 now.size = page_size;
ac1970fb 882 register_subpage(d, &now);
69b67646 883 } else {
052e87b0 884 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 885 register_multipage(d, &now);
69b67646 886 }
0f0cb164
AK
887 }
888}
889
62a2744c
SY
890void qemu_flush_coalesced_mmio_buffer(void)
891{
892 if (kvm_enabled())
893 kvm_flush_coalesced_mmio_buffer();
894}
895
b2a8658e
UD
896void qemu_mutex_lock_ramlist(void)
897{
898 qemu_mutex_lock(&ram_list.mutex);
899}
900
901void qemu_mutex_unlock_ramlist(void)
902{
903 qemu_mutex_unlock(&ram_list.mutex);
904}
905
c902760f
MT
906#if defined(__linux__) && !defined(TARGET_S390X)
907
908#include <sys/vfs.h>
909
910#define HUGETLBFS_MAGIC 0x958458f6
911
912static long gethugepagesize(const char *path)
913{
914 struct statfs fs;
915 int ret;
916
917 do {
9742bf26 918 ret = statfs(path, &fs);
c902760f
MT
919 } while (ret != 0 && errno == EINTR);
920
921 if (ret != 0) {
9742bf26
YT
922 perror(path);
923 return 0;
c902760f
MT
924 }
925
926 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 927 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
928
929 return fs.f_bsize;
930}
931
04b16653
AW
932static void *file_ram_alloc(RAMBlock *block,
933 ram_addr_t memory,
934 const char *path)
c902760f
MT
935{
936 char *filename;
8ca761f6
PF
937 char *sanitized_name;
938 char *c;
c902760f
MT
939 void *area;
940 int fd;
941#ifdef MAP_POPULATE
942 int flags;
943#endif
944 unsigned long hpagesize;
945
946 hpagesize = gethugepagesize(path);
947 if (!hpagesize) {
9742bf26 948 return NULL;
c902760f
MT
949 }
950
951 if (memory < hpagesize) {
952 return NULL;
953 }
954
955 if (kvm_enabled() && !kvm_has_sync_mmu()) {
956 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
957 return NULL;
958 }
959
8ca761f6
PF
960 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
961 sanitized_name = g_strdup(block->mr->name);
962 for (c = sanitized_name; *c != '\0'; c++) {
963 if (*c == '/')
964 *c = '_';
965 }
966
967 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
968 sanitized_name);
969 g_free(sanitized_name);
c902760f
MT
970
971 fd = mkstemp(filename);
972 if (fd < 0) {
9742bf26 973 perror("unable to create backing store for hugepages");
e4ada482 974 g_free(filename);
9742bf26 975 return NULL;
c902760f
MT
976 }
977 unlink(filename);
e4ada482 978 g_free(filename);
c902760f
MT
979
980 memory = (memory+hpagesize-1) & ~(hpagesize-1);
981
982 /*
983 * ftruncate is not supported by hugetlbfs in older
984 * hosts, so don't bother bailing out on errors.
985 * If anything goes wrong with it under other filesystems,
986 * mmap will fail.
987 */
988 if (ftruncate(fd, memory))
9742bf26 989 perror("ftruncate");
c902760f
MT
990
991#ifdef MAP_POPULATE
992 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
993 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
994 * to sidestep this quirk.
995 */
996 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
997 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
998#else
999 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1000#endif
1001 if (area == MAP_FAILED) {
9742bf26
YT
1002 perror("file_ram_alloc: can't mmap RAM pages");
1003 close(fd);
1004 return (NULL);
c902760f 1005 }
04b16653 1006 block->fd = fd;
c902760f
MT
1007 return area;
1008}
1009#endif
1010
d17b5288 1011static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1012{
1013 RAMBlock *block, *next_block;
3e837b2c 1014 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1015
49cd9ac6
SH
1016 assert(size != 0); /* it would hand out same offset multiple times */
1017
a3161038 1018 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1019 return 0;
1020
a3161038 1021 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1022 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1023
1024 end = block->offset + block->length;
1025
a3161038 1026 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1027 if (next_block->offset >= end) {
1028 next = MIN(next, next_block->offset);
1029 }
1030 }
1031 if (next - end >= size && next - end < mingap) {
3e837b2c 1032 offset = end;
04b16653
AW
1033 mingap = next - end;
1034 }
1035 }
3e837b2c
AW
1036
1037 if (offset == RAM_ADDR_MAX) {
1038 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1039 (uint64_t)size);
1040 abort();
1041 }
1042
04b16653
AW
1043 return offset;
1044}
1045
652d7ec2 1046ram_addr_t last_ram_offset(void)
d17b5288
AW
1047{
1048 RAMBlock *block;
1049 ram_addr_t last = 0;
1050
a3161038 1051 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1052 last = MAX(last, block->offset + block->length);
1053
1054 return last;
1055}
1056
ddb97f1d
JB
1057static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1058{
1059 int ret;
1060 QemuOpts *machine_opts;
1061
1062 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1063 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1064 if (machine_opts &&
1065 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
1066 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1067 if (ret) {
1068 perror("qemu_madvise");
1069 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1070 "but dump_guest_core=off specified\n");
1071 }
1072 }
1073}
1074
c5705a77 1075void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
1076{
1077 RAMBlock *new_block, *block;
1078
c5705a77 1079 new_block = NULL;
a3161038 1080 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
1081 if (block->offset == addr) {
1082 new_block = block;
1083 break;
1084 }
1085 }
1086 assert(new_block);
1087 assert(!new_block->idstr[0]);
84b89d78 1088
09e5ab63
AL
1089 if (dev) {
1090 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1091 if (id) {
1092 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1093 g_free(id);
84b89d78
CM
1094 }
1095 }
1096 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1097
b2a8658e
UD
1098 /* This assumes the iothread lock is taken here too. */
1099 qemu_mutex_lock_ramlist();
a3161038 1100 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1101 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1102 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1103 new_block->idstr);
1104 abort();
1105 }
1106 }
b2a8658e 1107 qemu_mutex_unlock_ramlist();
c5705a77
AK
1108}
1109
8490fc78
LC
1110static int memory_try_enable_merging(void *addr, size_t len)
1111{
1112 QemuOpts *opts;
1113
1114 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1115 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1116 /* disabled by the user */
1117 return 0;
1118 }
1119
1120 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1121}
1122
c5705a77
AK
1123ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1124 MemoryRegion *mr)
1125{
abb26d63 1126 RAMBlock *block, *new_block;
c5705a77
AK
1127
1128 size = TARGET_PAGE_ALIGN(size);
1129 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1130
b2a8658e
UD
1131 /* This assumes the iothread lock is taken here too. */
1132 qemu_mutex_lock_ramlist();
7c637366 1133 new_block->mr = mr;
432d268c 1134 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1135 if (host) {
1136 new_block->host = host;
cd19cfa2 1137 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1138 } else {
1139 if (mem_path) {
c902760f 1140#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1141 new_block->host = file_ram_alloc(new_block, size, mem_path);
1142 if (!new_block->host) {
6eebf958 1143 new_block->host = qemu_anon_ram_alloc(size);
8490fc78 1144 memory_try_enable_merging(new_block->host, size);
6977dfe6 1145 }
c902760f 1146#else
6977dfe6
YT
1147 fprintf(stderr, "-mem-path option unsupported\n");
1148 exit(1);
c902760f 1149#endif
6977dfe6 1150 } else {
868bb33f 1151 if (xen_enabled()) {
fce537d4 1152 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1153 } else if (kvm_enabled()) {
1154 /* some s390/kvm configurations have special constraints */
6eebf958 1155 new_block->host = kvm_ram_alloc(size);
432d268c 1156 } else {
6eebf958 1157 new_block->host = qemu_anon_ram_alloc(size);
432d268c 1158 }
8490fc78 1159 memory_try_enable_merging(new_block->host, size);
6977dfe6 1160 }
c902760f 1161 }
94a6b54f
PB
1162 new_block->length = size;
1163
abb26d63
PB
1164 /* Keep the list sorted from biggest to smallest block. */
1165 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1166 if (block->length < new_block->length) {
1167 break;
1168 }
1169 }
1170 if (block) {
1171 QTAILQ_INSERT_BEFORE(block, new_block, next);
1172 } else {
1173 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1174 }
0d6d3c87 1175 ram_list.mru_block = NULL;
94a6b54f 1176
f798b07f 1177 ram_list.version++;
b2a8658e 1178 qemu_mutex_unlock_ramlist();
f798b07f 1179
7267c094 1180 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1181 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1182 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1183 0, size >> TARGET_PAGE_BITS);
1720aeee 1184 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1185
ddb97f1d 1186 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1187 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1188
6f0437e8
JK
1189 if (kvm_enabled())
1190 kvm_setup_guest_memory(new_block->host, size);
1191
94a6b54f
PB
1192 return new_block->offset;
1193}
e9a1ab19 1194
c5705a77 1195ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1196{
c5705a77 1197 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1198}
1199
1f2e98b6
AW
1200void qemu_ram_free_from_ptr(ram_addr_t addr)
1201{
1202 RAMBlock *block;
1203
b2a8658e
UD
1204 /* This assumes the iothread lock is taken here too. */
1205 qemu_mutex_lock_ramlist();
a3161038 1206 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1207 if (addr == block->offset) {
a3161038 1208 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1209 ram_list.mru_block = NULL;
f798b07f 1210 ram_list.version++;
7267c094 1211 g_free(block);
b2a8658e 1212 break;
1f2e98b6
AW
1213 }
1214 }
b2a8658e 1215 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1216}
1217
c227f099 1218void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1219{
04b16653
AW
1220 RAMBlock *block;
1221
b2a8658e
UD
1222 /* This assumes the iothread lock is taken here too. */
1223 qemu_mutex_lock_ramlist();
a3161038 1224 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1225 if (addr == block->offset) {
a3161038 1226 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1227 ram_list.mru_block = NULL;
f798b07f 1228 ram_list.version++;
cd19cfa2
HY
1229 if (block->flags & RAM_PREALLOC_MASK) {
1230 ;
1231 } else if (mem_path) {
04b16653
AW
1232#if defined (__linux__) && !defined(TARGET_S390X)
1233 if (block->fd) {
1234 munmap(block->host, block->length);
1235 close(block->fd);
1236 } else {
e7a09b92 1237 qemu_anon_ram_free(block->host, block->length);
04b16653 1238 }
fd28aa13
JK
1239#else
1240 abort();
04b16653
AW
1241#endif
1242 } else {
868bb33f 1243 if (xen_enabled()) {
e41d7c69 1244 xen_invalidate_map_cache_entry(block->host);
432d268c 1245 } else {
e7a09b92 1246 qemu_anon_ram_free(block->host, block->length);
432d268c 1247 }
04b16653 1248 }
7267c094 1249 g_free(block);
b2a8658e 1250 break;
04b16653
AW
1251 }
1252 }
b2a8658e 1253 qemu_mutex_unlock_ramlist();
04b16653 1254
e9a1ab19
FB
1255}
1256
cd19cfa2
HY
1257#ifndef _WIN32
1258void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1259{
1260 RAMBlock *block;
1261 ram_addr_t offset;
1262 int flags;
1263 void *area, *vaddr;
1264
a3161038 1265 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1266 offset = addr - block->offset;
1267 if (offset < block->length) {
1268 vaddr = block->host + offset;
1269 if (block->flags & RAM_PREALLOC_MASK) {
1270 ;
1271 } else {
1272 flags = MAP_FIXED;
1273 munmap(vaddr, length);
1274 if (mem_path) {
1275#if defined(__linux__) && !defined(TARGET_S390X)
1276 if (block->fd) {
1277#ifdef MAP_POPULATE
1278 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1279 MAP_PRIVATE;
1280#else
1281 flags |= MAP_PRIVATE;
1282#endif
1283 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1284 flags, block->fd, offset);
1285 } else {
1286 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1287 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1288 flags, -1, 0);
1289 }
fd28aa13
JK
1290#else
1291 abort();
cd19cfa2
HY
1292#endif
1293 } else {
1294#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1295 flags |= MAP_SHARED | MAP_ANONYMOUS;
1296 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1297 flags, -1, 0);
1298#else
1299 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1300 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1301 flags, -1, 0);
1302#endif
1303 }
1304 if (area != vaddr) {
f15fbc4b
AP
1305 fprintf(stderr, "Could not remap addr: "
1306 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1307 length, addr);
1308 exit(1);
1309 }
8490fc78 1310 memory_try_enable_merging(vaddr, length);
ddb97f1d 1311 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1312 }
1313 return;
1314 }
1315 }
1316}
1317#endif /* !_WIN32 */
1318
dc828ca1 1319/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
1320 With the exception of the softmmu code in this file, this should
1321 only be used for local memory (e.g. video ram) that the device owns,
1322 and knows it isn't going to access beyond the end of the block.
1323
1324 It should not be used for general purpose DMA.
1325 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1326 */
c227f099 1327void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 1328{
94a6b54f
PB
1329 RAMBlock *block;
1330
b2a8658e 1331 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1332 block = ram_list.mru_block;
1333 if (block && addr - block->offset < block->length) {
1334 goto found;
1335 }
a3161038 1336 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1337 if (addr - block->offset < block->length) {
0d6d3c87 1338 goto found;
f471a17e 1339 }
94a6b54f 1340 }
f471a17e
AW
1341
1342 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1343 abort();
1344
0d6d3c87
PB
1345found:
1346 ram_list.mru_block = block;
1347 if (xen_enabled()) {
1348 /* We need to check if the requested address is in the RAM
1349 * because we don't want to map the entire memory in QEMU.
1350 * In that case just map until the end of the page.
1351 */
1352 if (block->offset == 0) {
1353 return xen_map_cache(addr, 0, 0);
1354 } else if (block->host == NULL) {
1355 block->host =
1356 xen_map_cache(block->offset, block->length, 1);
1357 }
1358 }
1359 return block->host + (addr - block->offset);
dc828ca1
PB
1360}
1361
0d6d3c87
PB
1362/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1363 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1364 *
1365 * ??? Is this still necessary?
b2e0a138 1366 */
8b9c99d9 1367static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1368{
1369 RAMBlock *block;
1370
b2a8658e 1371 /* The list is protected by the iothread lock here. */
a3161038 1372 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1373 if (addr - block->offset < block->length) {
868bb33f 1374 if (xen_enabled()) {
432d268c
JN
1375 /* We need to check if the requested address is in the RAM
1376 * because we don't want to map the entire memory in QEMU.
712c2b41 1377 * In that case just map until the end of the page.
432d268c
JN
1378 */
1379 if (block->offset == 0) {
e41d7c69 1380 return xen_map_cache(addr, 0, 0);
432d268c 1381 } else if (block->host == NULL) {
e41d7c69
JK
1382 block->host =
1383 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1384 }
1385 }
b2e0a138
MT
1386 return block->host + (addr - block->offset);
1387 }
1388 }
1389
1390 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1391 abort();
1392
1393 return NULL;
1394}
1395
38bee5dc
SS
1396/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1397 * but takes a size argument */
8b9c99d9 1398static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1399{
8ab934f9
SS
1400 if (*size == 0) {
1401 return NULL;
1402 }
868bb33f 1403 if (xen_enabled()) {
e41d7c69 1404 return xen_map_cache(addr, *size, 1);
868bb33f 1405 } else {
38bee5dc
SS
1406 RAMBlock *block;
1407
a3161038 1408 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1409 if (addr - block->offset < block->length) {
1410 if (addr - block->offset + *size > block->length)
1411 *size = block->length - addr + block->offset;
1412 return block->host + (addr - block->offset);
1413 }
1414 }
1415
1416 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1417 abort();
38bee5dc
SS
1418 }
1419}
1420
e890261f 1421int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1422{
94a6b54f
PB
1423 RAMBlock *block;
1424 uint8_t *host = ptr;
1425
868bb33f 1426 if (xen_enabled()) {
e41d7c69 1427 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
1428 return 0;
1429 }
1430
a3161038 1431 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1432 /* This case append when the block is not mapped. */
1433 if (block->host == NULL) {
1434 continue;
1435 }
f471a17e 1436 if (host - block->host < block->length) {
e890261f
MT
1437 *ram_addr = block->offset + (host - block->host);
1438 return 0;
f471a17e 1439 }
94a6b54f 1440 }
432d268c 1441
e890261f
MT
1442 return -1;
1443}
f471a17e 1444
e890261f
MT
1445/* Some of the softmmu routines need to translate from a host pointer
1446 (typically a TLB entry) back to a ram offset. */
1447ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1448{
1449 ram_addr_t ram_addr;
f471a17e 1450
e890261f
MT
1451 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1452 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1453 abort();
1454 }
1455 return ram_addr;
5579c7f3
PB
1456}
1457
a8170e5e 1458static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1459 uint64_t val, unsigned size)
9fa3e853 1460{
3a7d929e 1461 int dirty_flags;
f7c11b53 1462 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1463 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
0e0df1e2 1464 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1465 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1466 }
0e0df1e2
AK
1467 switch (size) {
1468 case 1:
1469 stb_p(qemu_get_ram_ptr(ram_addr), val);
1470 break;
1471 case 2:
1472 stw_p(qemu_get_ram_ptr(ram_addr), val);
1473 break;
1474 case 4:
1475 stl_p(qemu_get_ram_ptr(ram_addr), val);
1476 break;
1477 default:
1478 abort();
3a7d929e 1479 }
f23db169 1480 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1481 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1482 /* we remove the notdirty callback only if the code has been
1483 flushed */
1484 if (dirty_flags == 0xff)
2e70f6ef 1485 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1486}
1487
b018ddf6
PB
1488static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1489 unsigned size, bool is_write)
1490{
1491 return is_write;
1492}
1493
0e0df1e2 1494static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1495 .write = notdirty_mem_write,
b018ddf6 1496 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1497 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1498};
1499
0f459d16 1500/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1501static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1502{
9349b4f9 1503 CPUArchState *env = cpu_single_env;
06d55cc1 1504 target_ulong pc, cs_base;
0f459d16 1505 target_ulong vaddr;
a1d1bb31 1506 CPUWatchpoint *wp;
06d55cc1 1507 int cpu_flags;
0f459d16 1508
06d55cc1
AL
1509 if (env->watchpoint_hit) {
1510 /* We re-entered the check after replacing the TB. Now raise
1511 * the debug interrupt so that is will trigger after the
1512 * current instruction. */
c3affe56 1513 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1514 return;
1515 }
2e70f6ef 1516 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1517 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1518 if ((vaddr == (wp->vaddr & len_mask) ||
1519 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1520 wp->flags |= BP_WATCHPOINT_HIT;
1521 if (!env->watchpoint_hit) {
1522 env->watchpoint_hit = wp;
5a316526 1523 tb_check_watchpoint(env);
6e140f28
AL
1524 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1525 env->exception_index = EXCP_DEBUG;
488d6577 1526 cpu_loop_exit(env);
6e140f28
AL
1527 } else {
1528 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1529 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1530 cpu_resume_from_signal(env, NULL);
6e140f28 1531 }
06d55cc1 1532 }
6e140f28
AL
1533 } else {
1534 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1535 }
1536 }
1537}
1538
6658ffb8
PB
1539/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1540 so these check for a hit then pass through to the normal out-of-line
1541 phys routines. */
a8170e5e 1542static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1543 unsigned size)
6658ffb8 1544{
1ec9b909
AK
1545 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1546 switch (size) {
1547 case 1: return ldub_phys(addr);
1548 case 2: return lduw_phys(addr);
1549 case 4: return ldl_phys(addr);
1550 default: abort();
1551 }
6658ffb8
PB
1552}
1553
a8170e5e 1554static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1555 uint64_t val, unsigned size)
6658ffb8 1556{
1ec9b909
AK
1557 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1558 switch (size) {
67364150
MF
1559 case 1:
1560 stb_phys(addr, val);
1561 break;
1562 case 2:
1563 stw_phys(addr, val);
1564 break;
1565 case 4:
1566 stl_phys(addr, val);
1567 break;
1ec9b909
AK
1568 default: abort();
1569 }
6658ffb8
PB
1570}
1571
1ec9b909
AK
1572static const MemoryRegionOps watch_mem_ops = {
1573 .read = watch_mem_read,
1574 .write = watch_mem_write,
1575 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1576};
6658ffb8 1577
a8170e5e 1578static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1579 unsigned len)
db7b5426 1580{
acc9d80b
JK
1581 subpage_t *subpage = opaque;
1582 uint8_t buf[4];
791af8c8 1583
db7b5426 1584#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1585 printf("%s: subpage %p len %d addr " TARGET_FMT_plx "\n", __func__,
1586 subpage, len, addr);
db7b5426 1587#endif
acc9d80b
JK
1588 address_space_read(subpage->as, addr + subpage->base, buf, len);
1589 switch (len) {
1590 case 1:
1591 return ldub_p(buf);
1592 case 2:
1593 return lduw_p(buf);
1594 case 4:
1595 return ldl_p(buf);
1596 default:
1597 abort();
1598 }
db7b5426
BS
1599}
1600
a8170e5e 1601static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1602 uint64_t value, unsigned len)
db7b5426 1603{
acc9d80b
JK
1604 subpage_t *subpage = opaque;
1605 uint8_t buf[4];
1606
db7b5426 1607#if defined(DEBUG_SUBPAGE)
70c68e44 1608 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
acc9d80b
JK
1609 " value %"PRIx64"\n",
1610 __func__, subpage, len, addr, value);
db7b5426 1611#endif
acc9d80b
JK
1612 switch (len) {
1613 case 1:
1614 stb_p(buf, value);
1615 break;
1616 case 2:
1617 stw_p(buf, value);
1618 break;
1619 case 4:
1620 stl_p(buf, value);
1621 break;
1622 default:
1623 abort();
1624 }
1625 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1626}
1627
c353e4cc
PB
1628static bool subpage_accepts(void *opaque, hwaddr addr,
1629 unsigned size, bool is_write)
1630{
acc9d80b 1631 subpage_t *subpage = opaque;
c353e4cc 1632#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1633 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx "\n",
1634 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1635#endif
1636
acc9d80b
JK
1637 return address_space_access_valid(subpage->as, addr + subpage->base,
1638 size, is_write);
c353e4cc
PB
1639}
1640
70c68e44
AK
1641static const MemoryRegionOps subpage_ops = {
1642 .read = subpage_read,
1643 .write = subpage_write,
c353e4cc 1644 .valid.accepts = subpage_accepts,
70c68e44 1645 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1646};
1647
c227f099 1648static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1649 uint16_t section)
db7b5426
BS
1650{
1651 int idx, eidx;
1652
1653 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1654 return -1;
1655 idx = SUBPAGE_IDX(start);
1656 eidx = SUBPAGE_IDX(end);
1657#if defined(DEBUG_SUBPAGE)
0bf9e31a 1658 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1659 mmio, start, end, idx, eidx, memory);
1660#endif
db7b5426 1661 for (; idx <= eidx; idx++) {
5312bd8b 1662 mmio->sub_section[idx] = section;
db7b5426
BS
1663 }
1664
1665 return 0;
1666}
1667
acc9d80b 1668static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1669{
c227f099 1670 subpage_t *mmio;
db7b5426 1671
7267c094 1672 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1673
acc9d80b 1674 mmio->as = as;
1eec614b 1675 mmio->base = base;
2c9b15ca 1676 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
70c68e44 1677 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1678 mmio->iomem.subpage = true;
db7b5426 1679#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1680 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1681 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1682#endif
0f0cb164 1683 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
1684
1685 return mmio;
1686}
1687
5312bd8b
AK
1688static uint16_t dummy_section(MemoryRegion *mr)
1689{
1690 MemoryRegionSection section = {
1691 .mr = mr,
1692 .offset_within_address_space = 0,
1693 .offset_within_region = 0,
052e87b0 1694 .size = int128_2_64(),
5312bd8b
AK
1695 };
1696
1697 return phys_section_add(&section);
1698}
1699
a8170e5e 1700MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1701{
37ec01d4 1702 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1703}
1704
e9179ce1
AK
1705static void io_mem_init(void)
1706{
2c9b15ca
PB
1707 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1708 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
0e0df1e2 1709 "unassigned", UINT64_MAX);
2c9b15ca 1710 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
0e0df1e2 1711 "notdirty", UINT64_MAX);
2c9b15ca 1712 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1ec9b909 1713 "watch", UINT64_MAX);
e9179ce1
AK
1714}
1715
ac1970fb
AK
1716static void mem_begin(MemoryListener *listener)
1717{
1718 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1719
1720 destroy_all_mappings(d);
1721 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1722}
1723
50c1e149
AK
1724static void core_begin(MemoryListener *listener)
1725{
5312bd8b
AK
1726 phys_sections_clear();
1727 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
1728 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1729 phys_section_rom = dummy_section(&io_mem_rom);
1730 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
1731}
1732
1d71148e 1733static void tcg_commit(MemoryListener *listener)
50c1e149 1734{
9349b4f9 1735 CPUArchState *env;
117712c3
AK
1736
1737 /* since each CPU stores ram addresses in its TLB cache, we must
1738 reset the modified entries */
1739 /* XXX: slow ! */
1740 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1741 tlb_flush(env, 1);
1742 }
50c1e149
AK
1743}
1744
93632747
AK
1745static void core_log_global_start(MemoryListener *listener)
1746{
1747 cpu_physical_memory_set_dirty_tracking(1);
1748}
1749
1750static void core_log_global_stop(MemoryListener *listener)
1751{
1752 cpu_physical_memory_set_dirty_tracking(0);
1753}
1754
93632747 1755static MemoryListener core_memory_listener = {
50c1e149 1756 .begin = core_begin,
93632747
AK
1757 .log_global_start = core_log_global_start,
1758 .log_global_stop = core_log_global_stop,
ac1970fb 1759 .priority = 1,
93632747
AK
1760};
1761
1d71148e
AK
1762static MemoryListener tcg_memory_listener = {
1763 .commit = tcg_commit,
1764};
1765
ac1970fb
AK
1766void address_space_init_dispatch(AddressSpace *as)
1767{
1768 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1769
1770 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1771 d->listener = (MemoryListener) {
1772 .begin = mem_begin,
1773 .region_add = mem_add,
1774 .region_nop = mem_add,
1775 .priority = 0,
1776 };
acc9d80b 1777 d->as = as;
ac1970fb
AK
1778 as->dispatch = d;
1779 memory_listener_register(&d->listener, as);
1780}
1781
83f3c251
AK
1782void address_space_destroy_dispatch(AddressSpace *as)
1783{
1784 AddressSpaceDispatch *d = as->dispatch;
1785
1786 memory_listener_unregister(&d->listener);
1787 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1788 g_free(d);
1789 as->dispatch = NULL;
1790}
1791
62152b8a
AK
1792static void memory_map_init(void)
1793{
7267c094 1794 system_memory = g_malloc(sizeof(*system_memory));
2c9b15ca 1795 memory_region_init(system_memory, NULL, "system", INT64_MAX);
7dca8043 1796 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1797
7267c094 1798 system_io = g_malloc(sizeof(*system_io));
2c9b15ca 1799 memory_region_init(system_io, NULL, "io", 65536);
7dca8043 1800 address_space_init(&address_space_io, system_io, "I/O");
93632747 1801
f6790af6 1802 memory_listener_register(&core_memory_listener, &address_space_memory);
f6790af6 1803 memory_listener_register(&tcg_memory_listener, &address_space_memory);
62152b8a
AK
1804}
1805
1806MemoryRegion *get_system_memory(void)
1807{
1808 return system_memory;
1809}
1810
309cb471
AK
1811MemoryRegion *get_system_io(void)
1812{
1813 return system_io;
1814}
1815
e2eef170
PB
1816#endif /* !defined(CONFIG_USER_ONLY) */
1817
13eb76e0
FB
1818/* physical memory access (slow version, mainly for debug) */
1819#if defined(CONFIG_USER_ONLY)
9349b4f9 1820int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1821 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1822{
1823 int l, flags;
1824 target_ulong page;
53a5960a 1825 void * p;
13eb76e0
FB
1826
1827 while (len > 0) {
1828 page = addr & TARGET_PAGE_MASK;
1829 l = (page + TARGET_PAGE_SIZE) - addr;
1830 if (l > len)
1831 l = len;
1832 flags = page_get_flags(page);
1833 if (!(flags & PAGE_VALID))
a68fe89c 1834 return -1;
13eb76e0
FB
1835 if (is_write) {
1836 if (!(flags & PAGE_WRITE))
a68fe89c 1837 return -1;
579a97f7 1838 /* XXX: this code should not depend on lock_user */
72fb7daa 1839 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1840 return -1;
72fb7daa
AJ
1841 memcpy(p, buf, l);
1842 unlock_user(p, addr, l);
13eb76e0
FB
1843 } else {
1844 if (!(flags & PAGE_READ))
a68fe89c 1845 return -1;
579a97f7 1846 /* XXX: this code should not depend on lock_user */
72fb7daa 1847 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1848 return -1;
72fb7daa 1849 memcpy(buf, p, l);
5b257578 1850 unlock_user(p, addr, 0);
13eb76e0
FB
1851 }
1852 len -= l;
1853 buf += l;
1854 addr += l;
1855 }
a68fe89c 1856 return 0;
13eb76e0 1857}
8df1cd07 1858
13eb76e0 1859#else
51d7a9eb 1860
a8170e5e
AK
1861static void invalidate_and_set_dirty(hwaddr addr,
1862 hwaddr length)
51d7a9eb
AP
1863{
1864 if (!cpu_physical_memory_is_dirty(addr)) {
1865 /* invalidate code */
1866 tb_invalidate_phys_page_range(addr, addr + length, 0);
1867 /* set dirty bit */
1868 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1869 }
e226939d 1870 xen_modified_memory(addr, length);
51d7a9eb
AP
1871}
1872
2bbfa05d
PB
1873static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1874{
1875 if (memory_region_is_ram(mr)) {
1876 return !(is_write && mr->readonly);
1877 }
1878 if (memory_region_is_romd(mr)) {
1879 return !is_write;
1880 }
1881
1882 return false;
1883}
1884
f52cc467 1885static inline int memory_access_size(MemoryRegion *mr, int l, hwaddr addr)
82f2563f 1886{
f52cc467 1887 if (l >= 4 && (((addr & 3) == 0 || mr->ops->impl.unaligned))) {
82f2563f
PB
1888 return 4;
1889 }
f52cc467 1890 if (l >= 2 && (((addr & 1) == 0) || mr->ops->impl.unaligned)) {
82f2563f
PB
1891 return 2;
1892 }
1893 return 1;
1894}
1895
fd8aaa76 1896bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1897 int len, bool is_write)
13eb76e0 1898{
149f54b5 1899 hwaddr l;
13eb76e0 1900 uint8_t *ptr;
791af8c8 1901 uint64_t val;
149f54b5 1902 hwaddr addr1;
5c8a00ce 1903 MemoryRegion *mr;
fd8aaa76 1904 bool error = false;
3b46e624 1905
13eb76e0 1906 while (len > 0) {
149f54b5 1907 l = len;
5c8a00ce 1908 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 1909
13eb76e0 1910 if (is_write) {
5c8a00ce
PB
1911 if (!memory_access_is_direct(mr, is_write)) {
1912 l = memory_access_size(mr, l, addr1);
6a00d601
FB
1913 /* XXX: could force cpu_single_env to NULL to avoid
1914 potential bugs */
82f2563f 1915 if (l == 4) {
1c213d19 1916 /* 32 bit write access */
c27004ec 1917 val = ldl_p(buf);
5c8a00ce 1918 error |= io_mem_write(mr, addr1, val, 4);
82f2563f 1919 } else if (l == 2) {
1c213d19 1920 /* 16 bit write access */
c27004ec 1921 val = lduw_p(buf);
5c8a00ce 1922 error |= io_mem_write(mr, addr1, val, 2);
13eb76e0 1923 } else {
1c213d19 1924 /* 8 bit write access */
c27004ec 1925 val = ldub_p(buf);
5c8a00ce 1926 error |= io_mem_write(mr, addr1, val, 1);
13eb76e0 1927 }
2bbfa05d 1928 } else {
5c8a00ce 1929 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 1930 /* RAM case */
5579c7f3 1931 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1932 memcpy(ptr, buf, l);
51d7a9eb 1933 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
1934 }
1935 } else {
5c8a00ce 1936 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 1937 /* I/O case */
5c8a00ce 1938 l = memory_access_size(mr, l, addr1);
82f2563f 1939 if (l == 4) {
13eb76e0 1940 /* 32 bit read access */
5c8a00ce 1941 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 1942 stl_p(buf, val);
82f2563f 1943 } else if (l == 2) {
13eb76e0 1944 /* 16 bit read access */
5c8a00ce 1945 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 1946 stw_p(buf, val);
13eb76e0 1947 } else {
1c213d19 1948 /* 8 bit read access */
5c8a00ce 1949 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 1950 stb_p(buf, val);
13eb76e0
FB
1951 }
1952 } else {
1953 /* RAM case */
5c8a00ce 1954 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 1955 memcpy(buf, ptr, l);
13eb76e0
FB
1956 }
1957 }
1958 len -= l;
1959 buf += l;
1960 addr += l;
1961 }
fd8aaa76
PB
1962
1963 return error;
13eb76e0 1964}
8df1cd07 1965
fd8aaa76 1966bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1967 const uint8_t *buf, int len)
1968{
fd8aaa76 1969 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
1970}
1971
fd8aaa76 1972bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 1973{
fd8aaa76 1974 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
1975}
1976
1977
a8170e5e 1978void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1979 int len, int is_write)
1980{
fd8aaa76 1981 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
1982}
1983
d0ecd2aa 1984/* used for ROM loading : can write in RAM and ROM */
a8170e5e 1985void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
1986 const uint8_t *buf, int len)
1987{
149f54b5 1988 hwaddr l;
d0ecd2aa 1989 uint8_t *ptr;
149f54b5 1990 hwaddr addr1;
5c8a00ce 1991 MemoryRegion *mr;
3b46e624 1992
d0ecd2aa 1993 while (len > 0) {
149f54b5 1994 l = len;
5c8a00ce
PB
1995 mr = address_space_translate(&address_space_memory,
1996 addr, &addr1, &l, true);
3b46e624 1997
5c8a00ce
PB
1998 if (!(memory_region_is_ram(mr) ||
1999 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2000 /* do nothing */
2001 } else {
5c8a00ce 2002 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2003 /* ROM/RAM case */
5579c7f3 2004 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2005 memcpy(ptr, buf, l);
51d7a9eb 2006 invalidate_and_set_dirty(addr1, l);
d0ecd2aa
FB
2007 }
2008 len -= l;
2009 buf += l;
2010 addr += l;
2011 }
2012}
2013
6d16c2f8
AL
2014typedef struct {
2015 void *buffer;
a8170e5e
AK
2016 hwaddr addr;
2017 hwaddr len;
6d16c2f8
AL
2018} BounceBuffer;
2019
2020static BounceBuffer bounce;
2021
ba223c29
AL
2022typedef struct MapClient {
2023 void *opaque;
2024 void (*callback)(void *opaque);
72cf2d4f 2025 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2026} MapClient;
2027
72cf2d4f
BS
2028static QLIST_HEAD(map_client_list, MapClient) map_client_list
2029 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2030
2031void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2032{
7267c094 2033 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2034
2035 client->opaque = opaque;
2036 client->callback = callback;
72cf2d4f 2037 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2038 return client;
2039}
2040
8b9c99d9 2041static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2042{
2043 MapClient *client = (MapClient *)_client;
2044
72cf2d4f 2045 QLIST_REMOVE(client, link);
7267c094 2046 g_free(client);
ba223c29
AL
2047}
2048
2049static void cpu_notify_map_clients(void)
2050{
2051 MapClient *client;
2052
72cf2d4f
BS
2053 while (!QLIST_EMPTY(&map_client_list)) {
2054 client = QLIST_FIRST(&map_client_list);
ba223c29 2055 client->callback(client->opaque);
34d5e948 2056 cpu_unregister_map_client(client);
ba223c29
AL
2057 }
2058}
2059
51644ab7
PB
2060bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2061{
5c8a00ce 2062 MemoryRegion *mr;
51644ab7
PB
2063 hwaddr l, xlat;
2064
2065 while (len > 0) {
2066 l = len;
5c8a00ce
PB
2067 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2068 if (!memory_access_is_direct(mr, is_write)) {
2069 l = memory_access_size(mr, l, addr);
2070 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2071 return false;
2072 }
2073 }
2074
2075 len -= l;
2076 addr += l;
2077 }
2078 return true;
2079}
2080
6d16c2f8
AL
2081/* Map a physical memory region into a host virtual address.
2082 * May map a subset of the requested range, given by and returned in *plen.
2083 * May return NULL if resources needed to perform the mapping are exhausted.
2084 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2085 * Use cpu_register_map_client() to know when retrying the map operation is
2086 * likely to succeed.
6d16c2f8 2087 */
ac1970fb 2088void *address_space_map(AddressSpace *as,
a8170e5e
AK
2089 hwaddr addr,
2090 hwaddr *plen,
ac1970fb 2091 bool is_write)
6d16c2f8 2092{
a8170e5e
AK
2093 hwaddr len = *plen;
2094 hwaddr todo = 0;
149f54b5 2095 hwaddr l, xlat;
5c8a00ce 2096 MemoryRegion *mr;
f15fbc4b 2097 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
2098 ram_addr_t rlen;
2099 void *ret;
6d16c2f8
AL
2100
2101 while (len > 0) {
149f54b5 2102 l = len;
5c8a00ce 2103 mr = address_space_translate(as, addr, &xlat, &l, is_write);
6d16c2f8 2104
5c8a00ce 2105 if (!memory_access_is_direct(mr, is_write)) {
38bee5dc 2106 if (todo || bounce.buffer) {
6d16c2f8
AL
2107 break;
2108 }
2109 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2110 bounce.addr = addr;
2111 bounce.len = l;
2112 if (!is_write) {
ac1970fb 2113 address_space_read(as, addr, bounce.buffer, l);
6d16c2f8 2114 }
38bee5dc
SS
2115
2116 *plen = l;
2117 return bounce.buffer;
6d16c2f8 2118 }
8ab934f9 2119 if (!todo) {
5c8a00ce 2120 raddr = memory_region_get_ram_addr(mr) + xlat;
149f54b5 2121 } else {
5c8a00ce 2122 if (memory_region_get_ram_addr(mr) + xlat != raddr + todo) {
149f54b5
PB
2123 break;
2124 }
8ab934f9 2125 }
6d16c2f8
AL
2126
2127 len -= l;
2128 addr += l;
38bee5dc 2129 todo += l;
6d16c2f8 2130 }
8ab934f9
SS
2131 rlen = todo;
2132 ret = qemu_ram_ptr_length(raddr, &rlen);
2133 *plen = rlen;
2134 return ret;
6d16c2f8
AL
2135}
2136
ac1970fb 2137/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2138 * Will also mark the memory as dirty if is_write == 1. access_len gives
2139 * the amount of memory that was actually read or written by the caller.
2140 */
a8170e5e
AK
2141void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2142 int is_write, hwaddr access_len)
6d16c2f8
AL
2143{
2144 if (buffer != bounce.buffer) {
2145 if (is_write) {
e890261f 2146 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
2147 while (access_len) {
2148 unsigned l;
2149 l = TARGET_PAGE_SIZE;
2150 if (l > access_len)
2151 l = access_len;
51d7a9eb 2152 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2153 addr1 += l;
2154 access_len -= l;
2155 }
2156 }
868bb33f 2157 if (xen_enabled()) {
e41d7c69 2158 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2159 }
6d16c2f8
AL
2160 return;
2161 }
2162 if (is_write) {
ac1970fb 2163 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2164 }
f8a83245 2165 qemu_vfree(bounce.buffer);
6d16c2f8 2166 bounce.buffer = NULL;
ba223c29 2167 cpu_notify_map_clients();
6d16c2f8 2168}
d0ecd2aa 2169
a8170e5e
AK
2170void *cpu_physical_memory_map(hwaddr addr,
2171 hwaddr *plen,
ac1970fb
AK
2172 int is_write)
2173{
2174 return address_space_map(&address_space_memory, addr, plen, is_write);
2175}
2176
a8170e5e
AK
2177void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2178 int is_write, hwaddr access_len)
ac1970fb
AK
2179{
2180 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2181}
2182
8df1cd07 2183/* warning: addr must be aligned */
a8170e5e 2184static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2185 enum device_endian endian)
8df1cd07 2186{
8df1cd07 2187 uint8_t *ptr;
791af8c8 2188 uint64_t val;
5c8a00ce 2189 MemoryRegion *mr;
149f54b5
PB
2190 hwaddr l = 4;
2191 hwaddr addr1;
8df1cd07 2192
5c8a00ce
PB
2193 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2194 false);
2195 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2196 /* I/O case */
5c8a00ce 2197 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2198#if defined(TARGET_WORDS_BIGENDIAN)
2199 if (endian == DEVICE_LITTLE_ENDIAN) {
2200 val = bswap32(val);
2201 }
2202#else
2203 if (endian == DEVICE_BIG_ENDIAN) {
2204 val = bswap32(val);
2205 }
2206#endif
8df1cd07
FB
2207 } else {
2208 /* RAM case */
5c8a00ce 2209 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2210 & TARGET_PAGE_MASK)
149f54b5 2211 + addr1);
1e78bcc1
AG
2212 switch (endian) {
2213 case DEVICE_LITTLE_ENDIAN:
2214 val = ldl_le_p(ptr);
2215 break;
2216 case DEVICE_BIG_ENDIAN:
2217 val = ldl_be_p(ptr);
2218 break;
2219 default:
2220 val = ldl_p(ptr);
2221 break;
2222 }
8df1cd07
FB
2223 }
2224 return val;
2225}
2226
a8170e5e 2227uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2228{
2229 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2230}
2231
a8170e5e 2232uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2233{
2234 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2235}
2236
a8170e5e 2237uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2238{
2239 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2240}
2241
84b7b8e7 2242/* warning: addr must be aligned */
a8170e5e 2243static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2244 enum device_endian endian)
84b7b8e7 2245{
84b7b8e7
FB
2246 uint8_t *ptr;
2247 uint64_t val;
5c8a00ce 2248 MemoryRegion *mr;
149f54b5
PB
2249 hwaddr l = 8;
2250 hwaddr addr1;
84b7b8e7 2251
5c8a00ce
PB
2252 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2253 false);
2254 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2255 /* I/O case */
5c8a00ce 2256 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2257#if defined(TARGET_WORDS_BIGENDIAN)
2258 if (endian == DEVICE_LITTLE_ENDIAN) {
2259 val = bswap64(val);
2260 }
2261#else
2262 if (endian == DEVICE_BIG_ENDIAN) {
2263 val = bswap64(val);
2264 }
84b7b8e7
FB
2265#endif
2266 } else {
2267 /* RAM case */
5c8a00ce 2268 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2269 & TARGET_PAGE_MASK)
149f54b5 2270 + addr1);
1e78bcc1
AG
2271 switch (endian) {
2272 case DEVICE_LITTLE_ENDIAN:
2273 val = ldq_le_p(ptr);
2274 break;
2275 case DEVICE_BIG_ENDIAN:
2276 val = ldq_be_p(ptr);
2277 break;
2278 default:
2279 val = ldq_p(ptr);
2280 break;
2281 }
84b7b8e7
FB
2282 }
2283 return val;
2284}
2285
a8170e5e 2286uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2287{
2288 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2289}
2290
a8170e5e 2291uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2292{
2293 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2294}
2295
a8170e5e 2296uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2297{
2298 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2299}
2300
aab33094 2301/* XXX: optimize */
a8170e5e 2302uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2303{
2304 uint8_t val;
2305 cpu_physical_memory_read(addr, &val, 1);
2306 return val;
2307}
2308
733f0b02 2309/* warning: addr must be aligned */
a8170e5e 2310static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2311 enum device_endian endian)
aab33094 2312{
733f0b02
MT
2313 uint8_t *ptr;
2314 uint64_t val;
5c8a00ce 2315 MemoryRegion *mr;
149f54b5
PB
2316 hwaddr l = 2;
2317 hwaddr addr1;
733f0b02 2318
5c8a00ce
PB
2319 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2320 false);
2321 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2322 /* I/O case */
5c8a00ce 2323 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2324#if defined(TARGET_WORDS_BIGENDIAN)
2325 if (endian == DEVICE_LITTLE_ENDIAN) {
2326 val = bswap16(val);
2327 }
2328#else
2329 if (endian == DEVICE_BIG_ENDIAN) {
2330 val = bswap16(val);
2331 }
2332#endif
733f0b02
MT
2333 } else {
2334 /* RAM case */
5c8a00ce 2335 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2336 & TARGET_PAGE_MASK)
149f54b5 2337 + addr1);
1e78bcc1
AG
2338 switch (endian) {
2339 case DEVICE_LITTLE_ENDIAN:
2340 val = lduw_le_p(ptr);
2341 break;
2342 case DEVICE_BIG_ENDIAN:
2343 val = lduw_be_p(ptr);
2344 break;
2345 default:
2346 val = lduw_p(ptr);
2347 break;
2348 }
733f0b02
MT
2349 }
2350 return val;
aab33094
FB
2351}
2352
a8170e5e 2353uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2354{
2355 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2356}
2357
a8170e5e 2358uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2359{
2360 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2361}
2362
a8170e5e 2363uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2364{
2365 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2366}
2367
8df1cd07
FB
2368/* warning: addr must be aligned. The ram page is not masked as dirty
2369 and the code inside is not invalidated. It is useful if the dirty
2370 bits are used to track modified PTEs */
a8170e5e 2371void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2372{
8df1cd07 2373 uint8_t *ptr;
5c8a00ce 2374 MemoryRegion *mr;
149f54b5
PB
2375 hwaddr l = 4;
2376 hwaddr addr1;
8df1cd07 2377
5c8a00ce
PB
2378 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2379 true);
2380 if (l < 4 || !memory_access_is_direct(mr, true)) {
2381 io_mem_write(mr, addr1, val, 4);
8df1cd07 2382 } else {
5c8a00ce 2383 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2384 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2385 stl_p(ptr, val);
74576198
AL
2386
2387 if (unlikely(in_migration)) {
2388 if (!cpu_physical_memory_is_dirty(addr1)) {
2389 /* invalidate code */
2390 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2391 /* set dirty bit */
f7c11b53
YT
2392 cpu_physical_memory_set_dirty_flags(
2393 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2394 }
2395 }
8df1cd07
FB
2396 }
2397}
2398
2399/* warning: addr must be aligned */
a8170e5e 2400static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2401 enum device_endian endian)
8df1cd07 2402{
8df1cd07 2403 uint8_t *ptr;
5c8a00ce 2404 MemoryRegion *mr;
149f54b5
PB
2405 hwaddr l = 4;
2406 hwaddr addr1;
8df1cd07 2407
5c8a00ce
PB
2408 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2409 true);
2410 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2411#if defined(TARGET_WORDS_BIGENDIAN)
2412 if (endian == DEVICE_LITTLE_ENDIAN) {
2413 val = bswap32(val);
2414 }
2415#else
2416 if (endian == DEVICE_BIG_ENDIAN) {
2417 val = bswap32(val);
2418 }
2419#endif
5c8a00ce 2420 io_mem_write(mr, addr1, val, 4);
8df1cd07 2421 } else {
8df1cd07 2422 /* RAM case */
5c8a00ce 2423 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2424 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2425 switch (endian) {
2426 case DEVICE_LITTLE_ENDIAN:
2427 stl_le_p(ptr, val);
2428 break;
2429 case DEVICE_BIG_ENDIAN:
2430 stl_be_p(ptr, val);
2431 break;
2432 default:
2433 stl_p(ptr, val);
2434 break;
2435 }
51d7a9eb 2436 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2437 }
2438}
2439
a8170e5e 2440void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2441{
2442 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2443}
2444
a8170e5e 2445void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2446{
2447 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2448}
2449
a8170e5e 2450void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2451{
2452 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2453}
2454
aab33094 2455/* XXX: optimize */
a8170e5e 2456void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2457{
2458 uint8_t v = val;
2459 cpu_physical_memory_write(addr, &v, 1);
2460}
2461
733f0b02 2462/* warning: addr must be aligned */
a8170e5e 2463static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2464 enum device_endian endian)
aab33094 2465{
733f0b02 2466 uint8_t *ptr;
5c8a00ce 2467 MemoryRegion *mr;
149f54b5
PB
2468 hwaddr l = 2;
2469 hwaddr addr1;
733f0b02 2470
5c8a00ce
PB
2471 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2472 true);
2473 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2474#if defined(TARGET_WORDS_BIGENDIAN)
2475 if (endian == DEVICE_LITTLE_ENDIAN) {
2476 val = bswap16(val);
2477 }
2478#else
2479 if (endian == DEVICE_BIG_ENDIAN) {
2480 val = bswap16(val);
2481 }
2482#endif
5c8a00ce 2483 io_mem_write(mr, addr1, val, 2);
733f0b02 2484 } else {
733f0b02 2485 /* RAM case */
5c8a00ce 2486 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2487 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2488 switch (endian) {
2489 case DEVICE_LITTLE_ENDIAN:
2490 stw_le_p(ptr, val);
2491 break;
2492 case DEVICE_BIG_ENDIAN:
2493 stw_be_p(ptr, val);
2494 break;
2495 default:
2496 stw_p(ptr, val);
2497 break;
2498 }
51d7a9eb 2499 invalidate_and_set_dirty(addr1, 2);
733f0b02 2500 }
aab33094
FB
2501}
2502
a8170e5e 2503void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2504{
2505 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2506}
2507
a8170e5e 2508void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2509{
2510 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2511}
2512
a8170e5e 2513void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2514{
2515 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2516}
2517
aab33094 2518/* XXX: optimize */
a8170e5e 2519void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2520{
2521 val = tswap64(val);
71d2b725 2522 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2523}
2524
a8170e5e 2525void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2526{
2527 val = cpu_to_le64(val);
2528 cpu_physical_memory_write(addr, &val, 8);
2529}
2530
a8170e5e 2531void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2532{
2533 val = cpu_to_be64(val);
2534 cpu_physical_memory_write(addr, &val, 8);
2535}
2536
5e2972fd 2537/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2538int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2539 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2540{
2541 int l;
a8170e5e 2542 hwaddr phys_addr;
9b3c35e0 2543 target_ulong page;
13eb76e0
FB
2544
2545 while (len > 0) {
2546 page = addr & TARGET_PAGE_MASK;
2547 phys_addr = cpu_get_phys_page_debug(env, page);
2548 /* if no physical page mapped, return an error */
2549 if (phys_addr == -1)
2550 return -1;
2551 l = (page + TARGET_PAGE_SIZE) - addr;
2552 if (l > len)
2553 l = len;
5e2972fd 2554 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2555 if (is_write)
2556 cpu_physical_memory_write_rom(phys_addr, buf, l);
2557 else
5e2972fd 2558 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2559 len -= l;
2560 buf += l;
2561 addr += l;
2562 }
2563 return 0;
2564}
a68fe89c 2565#endif
13eb76e0 2566
8e4a424b
BS
2567#if !defined(CONFIG_USER_ONLY)
2568
2569/*
2570 * A helper function for the _utterly broken_ virtio device model to find out if
2571 * it's running on a big endian machine. Don't do this at home kids!
2572 */
2573bool virtio_is_big_endian(void);
2574bool virtio_is_big_endian(void)
2575{
2576#if defined(TARGET_WORDS_BIGENDIAN)
2577 return true;
2578#else
2579 return false;
2580#endif
2581}
2582
2583#endif
2584
76f35538 2585#ifndef CONFIG_USER_ONLY
a8170e5e 2586bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2587{
5c8a00ce 2588 MemoryRegion*mr;
149f54b5 2589 hwaddr l = 1;
76f35538 2590
5c8a00ce
PB
2591 mr = address_space_translate(&address_space_memory,
2592 phys_addr, &phys_addr, &l, false);
76f35538 2593
5c8a00ce
PB
2594 return !(memory_region_is_ram(mr) ||
2595 memory_region_is_romd(mr));
76f35538 2596}
bd2fa51f
MH
2597
2598void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2599{
2600 RAMBlock *block;
2601
2602 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2603 func(block->host, block->offset, block->length, opaque);
2604 }
2605}
ec3f8c99 2606#endif