]> git.proxmox.com Git - qemu.git/blame - exec.c
exec: remove cur_map
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
0d09e41a 34#include "hw/xen/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
db7b5426 53//#define DEBUG_SUBPAGE
1196be37 54
e2eef170 55#if !defined(CONFIG_USER_ONLY)
9fa3e853 56int phys_ram_fd;
74576198 57static int in_migration;
94a6b54f 58
a3161038 59RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
60
61static MemoryRegion *system_memory;
309cb471 62static MemoryRegion *system_io;
62152b8a 63
f6790af6
AK
64AddressSpace address_space_io;
65AddressSpace address_space_memory;
2673a5da 66
0844e007 67MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 68static MemoryRegion io_mem_unassigned;
0e0df1e2 69
e2eef170 70#endif
9fa3e853 71
9349b4f9 72CPUArchState *first_cpu;
6a00d601
FB
73/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
9349b4f9 75DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 76/* 0 = Do not count executed instructions.
bf20dc07 77 1 = Precise instruction counting.
2e70f6ef 78 2 = Adaptive rate instruction counting. */
5708fc66 79int use_icount;
6a00d601 80
e2eef170 81#if !defined(CONFIG_USER_ONLY)
4346ae3e 82
1db8abb1
PB
83typedef struct PhysPageEntry PhysPageEntry;
84
85struct PhysPageEntry {
86 uint16_t is_leaf : 1;
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
88 uint16_t ptr : 15;
89};
90
0475d94f
PB
91typedef PhysPageEntry Node[L2_SIZE];
92
1db8abb1
PB
93struct AddressSpaceDispatch {
94 /* This is a multi-level map on the physical address space.
95 * The bottom level has pointers to MemoryRegionSections.
96 */
97 PhysPageEntry phys_map;
0475d94f
PB
98 Node *nodes;
99 MemoryRegionSection *sections;
acc9d80b 100 AddressSpace *as;
1db8abb1
PB
101};
102
90260c6c
JK
103#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
104typedef struct subpage_t {
105 MemoryRegion iomem;
acc9d80b 106 AddressSpace *as;
90260c6c
JK
107 hwaddr base;
108 uint16_t sub_section[TARGET_PAGE_SIZE];
109} subpage_t;
110
b41aac4f
LPF
111#define PHYS_SECTION_UNASSIGNED 0
112#define PHYS_SECTION_NOTDIRTY 1
113#define PHYS_SECTION_ROM 2
114#define PHYS_SECTION_WATCH 3
5312bd8b 115
9affd6fc
PB
116typedef struct PhysPageMap {
117 unsigned sections_nb;
118 unsigned sections_nb_alloc;
119 unsigned nodes_nb;
120 unsigned nodes_nb_alloc;
121 Node *nodes;
122 MemoryRegionSection *sections;
123} PhysPageMap;
124
6092666e 125static PhysPageMap *prev_map;
9affd6fc 126static PhysPageMap next_map;
d6f2ea22 127
07f07b31 128#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 129
e2eef170 130static void io_mem_init(void);
62152b8a 131static void memory_map_init(void);
8b9c99d9 132static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 133
1ec9b909 134static MemoryRegion io_mem_watch;
6658ffb8 135#endif
fd6ce8f6 136
6d9a1304 137#if !defined(CONFIG_USER_ONLY)
d6f2ea22 138
f7bf5461 139static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 140{
9affd6fc
PB
141 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
142 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
143 16);
144 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
145 next_map.nodes_nb + nodes);
146 next_map.nodes = g_renew(Node, next_map.nodes,
147 next_map.nodes_nb_alloc);
d6f2ea22 148 }
f7bf5461
AK
149}
150
151static uint16_t phys_map_node_alloc(void)
152{
153 unsigned i;
154 uint16_t ret;
155
9affd6fc 156 ret = next_map.nodes_nb++;
f7bf5461 157 assert(ret != PHYS_MAP_NODE_NIL);
9affd6fc 158 assert(ret != next_map.nodes_nb_alloc);
d6f2ea22 159 for (i = 0; i < L2_SIZE; ++i) {
9affd6fc
PB
160 next_map.nodes[ret][i].is_leaf = 0;
161 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 162 }
f7bf5461 163 return ret;
d6f2ea22
AK
164}
165
a8170e5e
AK
166static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
167 hwaddr *nb, uint16_t leaf,
2999097b 168 int level)
f7bf5461
AK
169{
170 PhysPageEntry *p;
171 int i;
a8170e5e 172 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 173
07f07b31 174 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800 175 lp->ptr = phys_map_node_alloc();
9affd6fc 176 p = next_map.nodes[lp->ptr];
f7bf5461
AK
177 if (level == 0) {
178 for (i = 0; i < L2_SIZE; i++) {
07f07b31 179 p[i].is_leaf = 1;
b41aac4f 180 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 181 }
67c4d23c 182 }
f7bf5461 183 } else {
9affd6fc 184 p = next_map.nodes[lp->ptr];
92e873b9 185 }
2999097b 186 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 187
2999097b 188 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
189 if ((*index & (step - 1)) == 0 && *nb >= step) {
190 lp->is_leaf = true;
c19e8800 191 lp->ptr = leaf;
07f07b31
AK
192 *index += step;
193 *nb -= step;
2999097b
AK
194 } else {
195 phys_page_set_level(lp, index, nb, leaf, level - 1);
196 }
197 ++lp;
f7bf5461
AK
198 }
199}
200
ac1970fb 201static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 202 hwaddr index, hwaddr nb,
2999097b 203 uint16_t leaf)
f7bf5461 204{
2999097b 205 /* Wildly overreserve - it doesn't matter much. */
07f07b31 206 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 207
ac1970fb 208 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
209}
210
9affd6fc
PB
211static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
212 Node *nodes, MemoryRegionSection *sections)
92e873b9 213{
31ab2b4a
AK
214 PhysPageEntry *p;
215 int i;
f1f6e3b8 216
07f07b31 217 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 218 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 219 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 220 }
9affd6fc 221 p = nodes[lp.ptr];
31ab2b4a 222 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 223 }
9affd6fc 224 return &sections[lp.ptr];
f3705d53
AK
225}
226
e5548617
BS
227bool memory_region_is_unassigned(MemoryRegion *mr)
228{
2a8e7499 229 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 230 && mr != &io_mem_watch;
fd6ce8f6 231}
149f54b5 232
9f029603 233static MemoryRegionSection *address_space_lookup_region(AddressSpace *as,
90260c6c
JK
234 hwaddr addr,
235 bool resolve_subpage)
9f029603 236{
0475d94f 237 AddressSpaceDispatch *d = as->dispatch;
90260c6c
JK
238 MemoryRegionSection *section;
239 subpage_t *subpage;
240
0475d94f
PB
241 section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
242 d->nodes, d->sections);
90260c6c
JK
243 if (resolve_subpage && section->mr->subpage) {
244 subpage = container_of(section->mr, subpage_t, iomem);
0475d94f 245 section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
246 }
247 return section;
9f029603
JK
248}
249
90260c6c
JK
250static MemoryRegionSection *
251address_space_translate_internal(AddressSpace *as, hwaddr addr, hwaddr *xlat,
252 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
253{
254 MemoryRegionSection *section;
255 Int128 diff;
256
90260c6c 257 section = address_space_lookup_region(as, addr, resolve_subpage);
149f54b5
PB
258 /* Compute offset within MemoryRegionSection */
259 addr -= section->offset_within_address_space;
260
261 /* Compute offset within MemoryRegion */
262 *xlat = addr + section->offset_within_region;
263
264 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 265 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
266 return section;
267}
90260c6c 268
5c8a00ce
PB
269MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
270 hwaddr *xlat, hwaddr *plen,
271 bool is_write)
90260c6c 272{
30951157
AK
273 IOMMUTLBEntry iotlb;
274 MemoryRegionSection *section;
275 MemoryRegion *mr;
276 hwaddr len = *plen;
277
278 for (;;) {
279 section = address_space_translate_internal(as, addr, &addr, plen, true);
280 mr = section->mr;
281
282 if (!mr->iommu_ops) {
283 break;
284 }
285
286 iotlb = mr->iommu_ops->translate(mr, addr);
287 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
288 | (addr & iotlb.addr_mask));
289 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
290 if (!(iotlb.perm & (1 << is_write))) {
291 mr = &io_mem_unassigned;
292 break;
293 }
294
295 as = iotlb.target_as;
296 }
297
298 *plen = len;
299 *xlat = addr;
300 return mr;
90260c6c
JK
301}
302
303MemoryRegionSection *
304address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
305 hwaddr *plen)
306{
30951157
AK
307 MemoryRegionSection *section;
308 section = address_space_translate_internal(as, addr, xlat, plen, false);
309
310 assert(!section->mr->iommu_ops);
311 return section;
90260c6c 312}
5b6dd868 313#endif
fd6ce8f6 314
5b6dd868 315void cpu_exec_init_all(void)
fdbb84d1 316{
5b6dd868 317#if !defined(CONFIG_USER_ONLY)
b2a8658e 318 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
319 memory_map_init();
320 io_mem_init();
fdbb84d1 321#endif
5b6dd868 322}
fdbb84d1 323
b170fce3 324#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
325
326static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 327{
259186a7 328 CPUState *cpu = opaque;
a513fe19 329
5b6dd868
BS
330 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
331 version_id is increased. */
259186a7
AF
332 cpu->interrupt_request &= ~0x01;
333 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
334
335 return 0;
a513fe19 336}
7501267e 337
1a1562f5 338const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
339 .name = "cpu_common",
340 .version_id = 1,
341 .minimum_version_id = 1,
342 .minimum_version_id_old = 1,
343 .post_load = cpu_common_post_load,
344 .fields = (VMStateField []) {
259186a7
AF
345 VMSTATE_UINT32(halted, CPUState),
346 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
347 VMSTATE_END_OF_LIST()
348 }
349};
1a1562f5 350
5b6dd868 351#endif
ea041c0e 352
38d8f5c8 353CPUState *qemu_get_cpu(int index)
ea041c0e 354{
5b6dd868 355 CPUArchState *env = first_cpu;
38d8f5c8 356 CPUState *cpu = NULL;
ea041c0e 357
5b6dd868 358 while (env) {
55e5c285
AF
359 cpu = ENV_GET_CPU(env);
360 if (cpu->cpu_index == index) {
5b6dd868 361 break;
55e5c285 362 }
5b6dd868 363 env = env->next_cpu;
ea041c0e 364 }
5b6dd868 365
d76fddae 366 return env ? cpu : NULL;
ea041c0e
FB
367}
368
d6b9e0d6
MT
369void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
370{
371 CPUArchState *env = first_cpu;
372
373 while (env) {
374 func(ENV_GET_CPU(env), data);
375 env = env->next_cpu;
376 }
377}
378
5b6dd868 379void cpu_exec_init(CPUArchState *env)
ea041c0e 380{
5b6dd868 381 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 382 CPUClass *cc = CPU_GET_CLASS(cpu);
5b6dd868
BS
383 CPUArchState **penv;
384 int cpu_index;
385
386#if defined(CONFIG_USER_ONLY)
387 cpu_list_lock();
388#endif
389 env->next_cpu = NULL;
390 penv = &first_cpu;
391 cpu_index = 0;
392 while (*penv != NULL) {
393 penv = &(*penv)->next_cpu;
394 cpu_index++;
395 }
55e5c285 396 cpu->cpu_index = cpu_index;
1b1ed8dc 397 cpu->numa_node = 0;
5b6dd868
BS
398 QTAILQ_INIT(&env->breakpoints);
399 QTAILQ_INIT(&env->watchpoints);
400#ifndef CONFIG_USER_ONLY
401 cpu->thread_id = qemu_get_thread_id();
402#endif
403 *penv = env;
404#if defined(CONFIG_USER_ONLY)
405 cpu_list_unlock();
406#endif
259186a7 407 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
5b6dd868 408#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
409 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
410 cpu_save, cpu_load, env);
b170fce3 411 assert(cc->vmsd == NULL);
5b6dd868 412#endif
b170fce3
AF
413 if (cc->vmsd != NULL) {
414 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
415 }
ea041c0e
FB
416}
417
1fddef4b 418#if defined(TARGET_HAS_ICE)
94df27fd 419#if defined(CONFIG_USER_ONLY)
9349b4f9 420static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
421{
422 tb_invalidate_phys_page_range(pc, pc + 1, 0);
423}
424#else
1e7855a5
MF
425static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
426{
9d70c4b7
MF
427 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
428 (pc & ~TARGET_PAGE_MASK));
1e7855a5 429}
c27004ec 430#endif
94df27fd 431#endif /* TARGET_HAS_ICE */
d720b93d 432
c527ee8f 433#if defined(CONFIG_USER_ONLY)
9349b4f9 434void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
435
436{
437}
438
9349b4f9 439int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
440 int flags, CPUWatchpoint **watchpoint)
441{
442 return -ENOSYS;
443}
444#else
6658ffb8 445/* Add a watchpoint. */
9349b4f9 446int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 447 int flags, CPUWatchpoint **watchpoint)
6658ffb8 448{
b4051334 449 target_ulong len_mask = ~(len - 1);
c0ce998e 450 CPUWatchpoint *wp;
6658ffb8 451
b4051334 452 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
453 if ((len & (len - 1)) || (addr & ~len_mask) ||
454 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
455 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
456 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
457 return -EINVAL;
458 }
7267c094 459 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
460
461 wp->vaddr = addr;
b4051334 462 wp->len_mask = len_mask;
a1d1bb31
AL
463 wp->flags = flags;
464
2dc9f411 465 /* keep all GDB-injected watchpoints in front */
c0ce998e 466 if (flags & BP_GDB)
72cf2d4f 467 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 468 else
72cf2d4f 469 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 470
6658ffb8 471 tlb_flush_page(env, addr);
a1d1bb31
AL
472
473 if (watchpoint)
474 *watchpoint = wp;
475 return 0;
6658ffb8
PB
476}
477
a1d1bb31 478/* Remove a specific watchpoint. */
9349b4f9 479int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 480 int flags)
6658ffb8 481{
b4051334 482 target_ulong len_mask = ~(len - 1);
a1d1bb31 483 CPUWatchpoint *wp;
6658ffb8 484
72cf2d4f 485 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 486 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 487 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 488 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
489 return 0;
490 }
491 }
a1d1bb31 492 return -ENOENT;
6658ffb8
PB
493}
494
a1d1bb31 495/* Remove a specific watchpoint by reference. */
9349b4f9 496void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 497{
72cf2d4f 498 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 499
a1d1bb31
AL
500 tlb_flush_page(env, watchpoint->vaddr);
501
7267c094 502 g_free(watchpoint);
a1d1bb31
AL
503}
504
505/* Remove all matching watchpoints. */
9349b4f9 506void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 507{
c0ce998e 508 CPUWatchpoint *wp, *next;
a1d1bb31 509
72cf2d4f 510 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
511 if (wp->flags & mask)
512 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 513 }
7d03f82f 514}
c527ee8f 515#endif
7d03f82f 516
a1d1bb31 517/* Add a breakpoint. */
9349b4f9 518int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 519 CPUBreakpoint **breakpoint)
4c3a88a2 520{
1fddef4b 521#if defined(TARGET_HAS_ICE)
c0ce998e 522 CPUBreakpoint *bp;
3b46e624 523
7267c094 524 bp = g_malloc(sizeof(*bp));
4c3a88a2 525
a1d1bb31
AL
526 bp->pc = pc;
527 bp->flags = flags;
528
2dc9f411 529 /* keep all GDB-injected breakpoints in front */
c0ce998e 530 if (flags & BP_GDB)
72cf2d4f 531 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 532 else
72cf2d4f 533 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 534
d720b93d 535 breakpoint_invalidate(env, pc);
a1d1bb31
AL
536
537 if (breakpoint)
538 *breakpoint = bp;
4c3a88a2
FB
539 return 0;
540#else
a1d1bb31 541 return -ENOSYS;
4c3a88a2
FB
542#endif
543}
544
a1d1bb31 545/* Remove a specific breakpoint. */
9349b4f9 546int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 547{
7d03f82f 548#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
549 CPUBreakpoint *bp;
550
72cf2d4f 551 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
552 if (bp->pc == pc && bp->flags == flags) {
553 cpu_breakpoint_remove_by_ref(env, bp);
554 return 0;
555 }
7d03f82f 556 }
a1d1bb31
AL
557 return -ENOENT;
558#else
559 return -ENOSYS;
7d03f82f
EI
560#endif
561}
562
a1d1bb31 563/* Remove a specific breakpoint by reference. */
9349b4f9 564void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 565{
1fddef4b 566#if defined(TARGET_HAS_ICE)
72cf2d4f 567 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 568
a1d1bb31
AL
569 breakpoint_invalidate(env, breakpoint->pc);
570
7267c094 571 g_free(breakpoint);
a1d1bb31
AL
572#endif
573}
574
575/* Remove all matching breakpoints. */
9349b4f9 576void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
577{
578#if defined(TARGET_HAS_ICE)
c0ce998e 579 CPUBreakpoint *bp, *next;
a1d1bb31 580
72cf2d4f 581 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
582 if (bp->flags & mask)
583 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 584 }
4c3a88a2
FB
585#endif
586}
587
c33a346e
FB
588/* enable or disable single step mode. EXCP_DEBUG is returned by the
589 CPU loop after each instruction */
9349b4f9 590void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 591{
1fddef4b 592#if defined(TARGET_HAS_ICE)
c33a346e
FB
593 if (env->singlestep_enabled != enabled) {
594 env->singlestep_enabled = enabled;
e22a25c9
AL
595 if (kvm_enabled())
596 kvm_update_guest_debug(env, 0);
597 else {
ccbb4d44 598 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
599 /* XXX: only flush what is necessary */
600 tb_flush(env);
601 }
c33a346e
FB
602 }
603#endif
604}
605
9349b4f9 606void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e 607{
878096ee 608 CPUState *cpu = ENV_GET_CPU(env);
7501267e 609 va_list ap;
493ae1f0 610 va_list ap2;
7501267e
FB
611
612 va_start(ap, fmt);
493ae1f0 613 va_copy(ap2, ap);
7501267e
FB
614 fprintf(stderr, "qemu: fatal: ");
615 vfprintf(stderr, fmt, ap);
616 fprintf(stderr, "\n");
878096ee 617 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
618 if (qemu_log_enabled()) {
619 qemu_log("qemu: fatal: ");
620 qemu_log_vprintf(fmt, ap2);
621 qemu_log("\n");
6fd2a026 622 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 623 qemu_log_flush();
93fcfe39 624 qemu_log_close();
924edcae 625 }
493ae1f0 626 va_end(ap2);
f9373291 627 va_end(ap);
fd052bf6
RV
628#if defined(CONFIG_USER_ONLY)
629 {
630 struct sigaction act;
631 sigfillset(&act.sa_mask);
632 act.sa_handler = SIG_DFL;
633 sigaction(SIGABRT, &act, NULL);
634 }
635#endif
7501267e
FB
636 abort();
637}
638
9349b4f9 639CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 640{
9349b4f9
AF
641 CPUArchState *new_env = cpu_init(env->cpu_model_str);
642 CPUArchState *next_cpu = new_env->next_cpu;
5a38f081
AL
643#if defined(TARGET_HAS_ICE)
644 CPUBreakpoint *bp;
645 CPUWatchpoint *wp;
646#endif
647
9349b4f9 648 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 649
55e5c285 650 /* Preserve chaining. */
c5be9f08 651 new_env->next_cpu = next_cpu;
5a38f081
AL
652
653 /* Clone all break/watchpoints.
654 Note: Once we support ptrace with hw-debug register access, make sure
655 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
656 QTAILQ_INIT(&env->breakpoints);
657 QTAILQ_INIT(&env->watchpoints);
5a38f081 658#if defined(TARGET_HAS_ICE)
72cf2d4f 659 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
660 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
661 }
72cf2d4f 662 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
663 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
664 wp->flags, NULL);
665 }
666#endif
667
c5be9f08
TS
668 return new_env;
669}
670
0124311e 671#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
672static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
673 uintptr_t length)
674{
675 uintptr_t start1;
676
677 /* we modify the TLB cache so that the dirty bit will be set again
678 when accessing the range */
679 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
680 /* Check that we don't span multiple blocks - this breaks the
681 address comparisons below. */
682 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
683 != (end - 1) - start) {
684 abort();
685 }
686 cpu_tlb_reset_dirty_all(start1, length);
687
688}
689
5579c7f3 690/* Note: start and end must be within the same ram block. */
c227f099 691void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 692 int dirty_flags)
1ccde1cb 693{
d24981d3 694 uintptr_t length;
1ccde1cb
FB
695
696 start &= TARGET_PAGE_MASK;
697 end = TARGET_PAGE_ALIGN(end);
698
699 length = end - start;
700 if (length == 0)
701 return;
f7c11b53 702 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 703
d24981d3
JQ
704 if (tcg_enabled()) {
705 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 706 }
1ccde1cb
FB
707}
708
8b9c99d9 709static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 710{
f6f3fbca 711 int ret = 0;
74576198 712 in_migration = enable;
f6f3fbca 713 return ret;
74576198
AL
714}
715
a8170e5e 716hwaddr memory_region_section_get_iotlb(CPUArchState *env,
149f54b5
PB
717 MemoryRegionSection *section,
718 target_ulong vaddr,
719 hwaddr paddr, hwaddr xlat,
720 int prot,
721 target_ulong *address)
e5548617 722{
a8170e5e 723 hwaddr iotlb;
e5548617
BS
724 CPUWatchpoint *wp;
725
cc5bea60 726 if (memory_region_is_ram(section->mr)) {
e5548617
BS
727 /* Normal RAM. */
728 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 729 + xlat;
e5548617 730 if (!section->readonly) {
b41aac4f 731 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 732 } else {
b41aac4f 733 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
734 }
735 } else {
0475d94f 736 iotlb = section - address_space_memory.dispatch->sections;
149f54b5 737 iotlb += xlat;
e5548617
BS
738 }
739
740 /* Make accesses to pages with watchpoints go via the
741 watchpoint trap routines. */
742 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
743 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
744 /* Avoid trapping reads of pages with a write breakpoint. */
745 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 746 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
747 *address |= TLB_MMIO;
748 break;
749 }
750 }
751 }
752
753 return iotlb;
754}
9fa3e853
FB
755#endif /* defined(CONFIG_USER_ONLY) */
756
e2eef170 757#if !defined(CONFIG_USER_ONLY)
8da3ff18 758
c227f099 759static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 760 uint16_t section);
acc9d80b 761static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 762
5312bd8b
AK
763static uint16_t phys_section_add(MemoryRegionSection *section)
764{
68f3f65b
PB
765 /* The physical section number is ORed with a page-aligned
766 * pointer to produce the iotlb entries. Thus it should
767 * never overflow into the page-aligned value.
768 */
9affd6fc 769 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
68f3f65b 770
9affd6fc
PB
771 if (next_map.sections_nb == next_map.sections_nb_alloc) {
772 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
773 16);
774 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
775 next_map.sections_nb_alloc);
5312bd8b 776 }
9affd6fc 777 next_map.sections[next_map.sections_nb] = *section;
dfde4e6e 778 memory_region_ref(section->mr);
9affd6fc 779 return next_map.sections_nb++;
5312bd8b
AK
780}
781
058bc4b5
PB
782static void phys_section_destroy(MemoryRegion *mr)
783{
dfde4e6e
PB
784 memory_region_unref(mr);
785
058bc4b5
PB
786 if (mr->subpage) {
787 subpage_t *subpage = container_of(mr, subpage_t, iomem);
788 memory_region_destroy(&subpage->iomem);
789 g_free(subpage);
790 }
791}
792
6092666e 793static void phys_sections_free(PhysPageMap *map)
5312bd8b 794{
9affd6fc
PB
795 while (map->sections_nb > 0) {
796 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
797 phys_section_destroy(section->mr);
798 }
9affd6fc
PB
799 g_free(map->sections);
800 g_free(map->nodes);
6092666e 801 g_free(map);
5312bd8b
AK
802}
803
ac1970fb 804static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
805{
806 subpage_t *subpage;
a8170e5e 807 hwaddr base = section->offset_within_address_space
0f0cb164 808 & TARGET_PAGE_MASK;
9affd6fc
PB
809 MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
810 next_map.nodes, next_map.sections);
0f0cb164
AK
811 MemoryRegionSection subsection = {
812 .offset_within_address_space = base,
052e87b0 813 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 814 };
a8170e5e 815 hwaddr start, end;
0f0cb164 816
f3705d53 817 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 818
f3705d53 819 if (!(existing->mr->subpage)) {
acc9d80b 820 subpage = subpage_init(d->as, base);
0f0cb164 821 subsection.mr = &subpage->iomem;
ac1970fb 822 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 823 phys_section_add(&subsection));
0f0cb164 824 } else {
f3705d53 825 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
826 }
827 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 828 end = start + int128_get64(section->size) - 1;
0f0cb164
AK
829 subpage_register(subpage, start, end, phys_section_add(section));
830}
831
832
052e87b0
PB
833static void register_multipage(AddressSpaceDispatch *d,
834 MemoryRegionSection *section)
33417e70 835{
a8170e5e 836 hwaddr start_addr = section->offset_within_address_space;
5312bd8b 837 uint16_t section_index = phys_section_add(section);
052e87b0
PB
838 uint64_t num_pages = int128_get64(int128_rshift(section->size,
839 TARGET_PAGE_BITS));
dd81124b 840
733d5ef5
PB
841 assert(num_pages);
842 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
843}
844
ac1970fb 845static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 846{
89ae337a 847 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 848 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 849 MemoryRegionSection now = *section, remain = *section;
052e87b0 850 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 851
733d5ef5
PB
852 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
853 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
854 - now.offset_within_address_space;
855
052e87b0 856 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 857 register_subpage(d, &now);
733d5ef5 858 } else {
052e87b0 859 now.size = int128_zero();
733d5ef5 860 }
052e87b0
PB
861 while (int128_ne(remain.size, now.size)) {
862 remain.size = int128_sub(remain.size, now.size);
863 remain.offset_within_address_space += int128_get64(now.size);
864 remain.offset_within_region += int128_get64(now.size);
69b67646 865 now = remain;
052e87b0 866 if (int128_lt(remain.size, page_size)) {
733d5ef5
PB
867 register_subpage(d, &now);
868 } else if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
052e87b0 869 now.size = page_size;
ac1970fb 870 register_subpage(d, &now);
69b67646 871 } else {
052e87b0 872 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 873 register_multipage(d, &now);
69b67646 874 }
0f0cb164
AK
875 }
876}
877
62a2744c
SY
878void qemu_flush_coalesced_mmio_buffer(void)
879{
880 if (kvm_enabled())
881 kvm_flush_coalesced_mmio_buffer();
882}
883
b2a8658e
UD
884void qemu_mutex_lock_ramlist(void)
885{
886 qemu_mutex_lock(&ram_list.mutex);
887}
888
889void qemu_mutex_unlock_ramlist(void)
890{
891 qemu_mutex_unlock(&ram_list.mutex);
892}
893
c902760f
MT
894#if defined(__linux__) && !defined(TARGET_S390X)
895
896#include <sys/vfs.h>
897
898#define HUGETLBFS_MAGIC 0x958458f6
899
900static long gethugepagesize(const char *path)
901{
902 struct statfs fs;
903 int ret;
904
905 do {
9742bf26 906 ret = statfs(path, &fs);
c902760f
MT
907 } while (ret != 0 && errno == EINTR);
908
909 if (ret != 0) {
9742bf26
YT
910 perror(path);
911 return 0;
c902760f
MT
912 }
913
914 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 915 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
916
917 return fs.f_bsize;
918}
919
04b16653
AW
920static void *file_ram_alloc(RAMBlock *block,
921 ram_addr_t memory,
922 const char *path)
c902760f
MT
923{
924 char *filename;
8ca761f6
PF
925 char *sanitized_name;
926 char *c;
c902760f
MT
927 void *area;
928 int fd;
929#ifdef MAP_POPULATE
930 int flags;
931#endif
932 unsigned long hpagesize;
933
934 hpagesize = gethugepagesize(path);
935 if (!hpagesize) {
9742bf26 936 return NULL;
c902760f
MT
937 }
938
939 if (memory < hpagesize) {
940 return NULL;
941 }
942
943 if (kvm_enabled() && !kvm_has_sync_mmu()) {
944 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
945 return NULL;
946 }
947
8ca761f6
PF
948 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
949 sanitized_name = g_strdup(block->mr->name);
950 for (c = sanitized_name; *c != '\0'; c++) {
951 if (*c == '/')
952 *c = '_';
953 }
954
955 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
956 sanitized_name);
957 g_free(sanitized_name);
c902760f
MT
958
959 fd = mkstemp(filename);
960 if (fd < 0) {
9742bf26 961 perror("unable to create backing store for hugepages");
e4ada482 962 g_free(filename);
9742bf26 963 return NULL;
c902760f
MT
964 }
965 unlink(filename);
e4ada482 966 g_free(filename);
c902760f
MT
967
968 memory = (memory+hpagesize-1) & ~(hpagesize-1);
969
970 /*
971 * ftruncate is not supported by hugetlbfs in older
972 * hosts, so don't bother bailing out on errors.
973 * If anything goes wrong with it under other filesystems,
974 * mmap will fail.
975 */
976 if (ftruncate(fd, memory))
9742bf26 977 perror("ftruncate");
c902760f
MT
978
979#ifdef MAP_POPULATE
980 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
981 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
982 * to sidestep this quirk.
983 */
984 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
985 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
986#else
987 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
988#endif
989 if (area == MAP_FAILED) {
9742bf26
YT
990 perror("file_ram_alloc: can't mmap RAM pages");
991 close(fd);
992 return (NULL);
c902760f 993 }
04b16653 994 block->fd = fd;
c902760f
MT
995 return area;
996}
997#endif
998
d17b5288 999static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1000{
1001 RAMBlock *block, *next_block;
3e837b2c 1002 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1003
49cd9ac6
SH
1004 assert(size != 0); /* it would hand out same offset multiple times */
1005
a3161038 1006 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1007 return 0;
1008
a3161038 1009 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1010 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1011
1012 end = block->offset + block->length;
1013
a3161038 1014 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1015 if (next_block->offset >= end) {
1016 next = MIN(next, next_block->offset);
1017 }
1018 }
1019 if (next - end >= size && next - end < mingap) {
3e837b2c 1020 offset = end;
04b16653
AW
1021 mingap = next - end;
1022 }
1023 }
3e837b2c
AW
1024
1025 if (offset == RAM_ADDR_MAX) {
1026 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1027 (uint64_t)size);
1028 abort();
1029 }
1030
04b16653
AW
1031 return offset;
1032}
1033
652d7ec2 1034ram_addr_t last_ram_offset(void)
d17b5288
AW
1035{
1036 RAMBlock *block;
1037 ram_addr_t last = 0;
1038
a3161038 1039 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1040 last = MAX(last, block->offset + block->length);
1041
1042 return last;
1043}
1044
ddb97f1d
JB
1045static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1046{
1047 int ret;
1048 QemuOpts *machine_opts;
1049
1050 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1051 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1052 if (machine_opts &&
1053 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
1054 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1055 if (ret) {
1056 perror("qemu_madvise");
1057 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1058 "but dump_guest_core=off specified\n");
1059 }
1060 }
1061}
1062
c5705a77 1063void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
1064{
1065 RAMBlock *new_block, *block;
1066
c5705a77 1067 new_block = NULL;
a3161038 1068 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
1069 if (block->offset == addr) {
1070 new_block = block;
1071 break;
1072 }
1073 }
1074 assert(new_block);
1075 assert(!new_block->idstr[0]);
84b89d78 1076
09e5ab63
AL
1077 if (dev) {
1078 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1079 if (id) {
1080 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1081 g_free(id);
84b89d78
CM
1082 }
1083 }
1084 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1085
b2a8658e
UD
1086 /* This assumes the iothread lock is taken here too. */
1087 qemu_mutex_lock_ramlist();
a3161038 1088 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1089 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1090 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1091 new_block->idstr);
1092 abort();
1093 }
1094 }
b2a8658e 1095 qemu_mutex_unlock_ramlist();
c5705a77
AK
1096}
1097
8490fc78
LC
1098static int memory_try_enable_merging(void *addr, size_t len)
1099{
1100 QemuOpts *opts;
1101
1102 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1103 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1104 /* disabled by the user */
1105 return 0;
1106 }
1107
1108 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1109}
1110
c5705a77
AK
1111ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1112 MemoryRegion *mr)
1113{
abb26d63 1114 RAMBlock *block, *new_block;
c5705a77
AK
1115
1116 size = TARGET_PAGE_ALIGN(size);
1117 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1118
b2a8658e
UD
1119 /* This assumes the iothread lock is taken here too. */
1120 qemu_mutex_lock_ramlist();
7c637366 1121 new_block->mr = mr;
432d268c 1122 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1123 if (host) {
1124 new_block->host = host;
cd19cfa2 1125 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1126 } else {
1127 if (mem_path) {
c902760f 1128#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1129 new_block->host = file_ram_alloc(new_block, size, mem_path);
1130 if (!new_block->host) {
6eebf958 1131 new_block->host = qemu_anon_ram_alloc(size);
8490fc78 1132 memory_try_enable_merging(new_block->host, size);
6977dfe6 1133 }
c902760f 1134#else
6977dfe6
YT
1135 fprintf(stderr, "-mem-path option unsupported\n");
1136 exit(1);
c902760f 1137#endif
6977dfe6 1138 } else {
868bb33f 1139 if (xen_enabled()) {
fce537d4 1140 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1141 } else if (kvm_enabled()) {
1142 /* some s390/kvm configurations have special constraints */
6eebf958 1143 new_block->host = kvm_ram_alloc(size);
432d268c 1144 } else {
6eebf958 1145 new_block->host = qemu_anon_ram_alloc(size);
432d268c 1146 }
8490fc78 1147 memory_try_enable_merging(new_block->host, size);
6977dfe6 1148 }
c902760f 1149 }
94a6b54f
PB
1150 new_block->length = size;
1151
abb26d63
PB
1152 /* Keep the list sorted from biggest to smallest block. */
1153 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1154 if (block->length < new_block->length) {
1155 break;
1156 }
1157 }
1158 if (block) {
1159 QTAILQ_INSERT_BEFORE(block, new_block, next);
1160 } else {
1161 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1162 }
0d6d3c87 1163 ram_list.mru_block = NULL;
94a6b54f 1164
f798b07f 1165 ram_list.version++;
b2a8658e 1166 qemu_mutex_unlock_ramlist();
f798b07f 1167
7267c094 1168 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1169 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1170 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1171 0, size >> TARGET_PAGE_BITS);
1720aeee 1172 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1173
ddb97f1d 1174 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1175 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1176
6f0437e8
JK
1177 if (kvm_enabled())
1178 kvm_setup_guest_memory(new_block->host, size);
1179
94a6b54f
PB
1180 return new_block->offset;
1181}
e9a1ab19 1182
c5705a77 1183ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1184{
c5705a77 1185 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1186}
1187
1f2e98b6
AW
1188void qemu_ram_free_from_ptr(ram_addr_t addr)
1189{
1190 RAMBlock *block;
1191
b2a8658e
UD
1192 /* This assumes the iothread lock is taken here too. */
1193 qemu_mutex_lock_ramlist();
a3161038 1194 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1195 if (addr == block->offset) {
a3161038 1196 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1197 ram_list.mru_block = NULL;
f798b07f 1198 ram_list.version++;
7267c094 1199 g_free(block);
b2a8658e 1200 break;
1f2e98b6
AW
1201 }
1202 }
b2a8658e 1203 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1204}
1205
c227f099 1206void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1207{
04b16653
AW
1208 RAMBlock *block;
1209
b2a8658e
UD
1210 /* This assumes the iothread lock is taken here too. */
1211 qemu_mutex_lock_ramlist();
a3161038 1212 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1213 if (addr == block->offset) {
a3161038 1214 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1215 ram_list.mru_block = NULL;
f798b07f 1216 ram_list.version++;
cd19cfa2
HY
1217 if (block->flags & RAM_PREALLOC_MASK) {
1218 ;
1219 } else if (mem_path) {
04b16653
AW
1220#if defined (__linux__) && !defined(TARGET_S390X)
1221 if (block->fd) {
1222 munmap(block->host, block->length);
1223 close(block->fd);
1224 } else {
e7a09b92 1225 qemu_anon_ram_free(block->host, block->length);
04b16653 1226 }
fd28aa13
JK
1227#else
1228 abort();
04b16653
AW
1229#endif
1230 } else {
868bb33f 1231 if (xen_enabled()) {
e41d7c69 1232 xen_invalidate_map_cache_entry(block->host);
432d268c 1233 } else {
e7a09b92 1234 qemu_anon_ram_free(block->host, block->length);
432d268c 1235 }
04b16653 1236 }
7267c094 1237 g_free(block);
b2a8658e 1238 break;
04b16653
AW
1239 }
1240 }
b2a8658e 1241 qemu_mutex_unlock_ramlist();
04b16653 1242
e9a1ab19
FB
1243}
1244
cd19cfa2
HY
1245#ifndef _WIN32
1246void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1247{
1248 RAMBlock *block;
1249 ram_addr_t offset;
1250 int flags;
1251 void *area, *vaddr;
1252
a3161038 1253 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1254 offset = addr - block->offset;
1255 if (offset < block->length) {
1256 vaddr = block->host + offset;
1257 if (block->flags & RAM_PREALLOC_MASK) {
1258 ;
1259 } else {
1260 flags = MAP_FIXED;
1261 munmap(vaddr, length);
1262 if (mem_path) {
1263#if defined(__linux__) && !defined(TARGET_S390X)
1264 if (block->fd) {
1265#ifdef MAP_POPULATE
1266 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1267 MAP_PRIVATE;
1268#else
1269 flags |= MAP_PRIVATE;
1270#endif
1271 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1272 flags, block->fd, offset);
1273 } else {
1274 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1275 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1276 flags, -1, 0);
1277 }
fd28aa13
JK
1278#else
1279 abort();
cd19cfa2
HY
1280#endif
1281 } else {
1282#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1283 flags |= MAP_SHARED | MAP_ANONYMOUS;
1284 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1285 flags, -1, 0);
1286#else
1287 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1288 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1289 flags, -1, 0);
1290#endif
1291 }
1292 if (area != vaddr) {
f15fbc4b
AP
1293 fprintf(stderr, "Could not remap addr: "
1294 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1295 length, addr);
1296 exit(1);
1297 }
8490fc78 1298 memory_try_enable_merging(vaddr, length);
ddb97f1d 1299 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1300 }
1301 return;
1302 }
1303 }
1304}
1305#endif /* !_WIN32 */
1306
1b5ec234 1307static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
dc828ca1 1308{
94a6b54f
PB
1309 RAMBlock *block;
1310
b2a8658e 1311 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1312 block = ram_list.mru_block;
1313 if (block && addr - block->offset < block->length) {
1314 goto found;
1315 }
a3161038 1316 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1317 if (addr - block->offset < block->length) {
0d6d3c87 1318 goto found;
f471a17e 1319 }
94a6b54f 1320 }
f471a17e
AW
1321
1322 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1323 abort();
1324
0d6d3c87
PB
1325found:
1326 ram_list.mru_block = block;
1b5ec234
PB
1327 return block;
1328}
1329
1330/* Return a host pointer to ram allocated with qemu_ram_alloc.
1331 With the exception of the softmmu code in this file, this should
1332 only be used for local memory (e.g. video ram) that the device owns,
1333 and knows it isn't going to access beyond the end of the block.
1334
1335 It should not be used for general purpose DMA.
1336 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1337 */
1338void *qemu_get_ram_ptr(ram_addr_t addr)
1339{
1340 RAMBlock *block = qemu_get_ram_block(addr);
1341
0d6d3c87
PB
1342 if (xen_enabled()) {
1343 /* We need to check if the requested address is in the RAM
1344 * because we don't want to map the entire memory in QEMU.
1345 * In that case just map until the end of the page.
1346 */
1347 if (block->offset == 0) {
1348 return xen_map_cache(addr, 0, 0);
1349 } else if (block->host == NULL) {
1350 block->host =
1351 xen_map_cache(block->offset, block->length, 1);
1352 }
1353 }
1354 return block->host + (addr - block->offset);
dc828ca1
PB
1355}
1356
0d6d3c87
PB
1357/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1358 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1359 *
1360 * ??? Is this still necessary?
b2e0a138 1361 */
8b9c99d9 1362static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1363{
1364 RAMBlock *block;
1365
b2a8658e 1366 /* The list is protected by the iothread lock here. */
a3161038 1367 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1368 if (addr - block->offset < block->length) {
868bb33f 1369 if (xen_enabled()) {
432d268c
JN
1370 /* We need to check if the requested address is in the RAM
1371 * because we don't want to map the entire memory in QEMU.
712c2b41 1372 * In that case just map until the end of the page.
432d268c
JN
1373 */
1374 if (block->offset == 0) {
e41d7c69 1375 return xen_map_cache(addr, 0, 0);
432d268c 1376 } else if (block->host == NULL) {
e41d7c69
JK
1377 block->host =
1378 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1379 }
1380 }
b2e0a138
MT
1381 return block->host + (addr - block->offset);
1382 }
1383 }
1384
1385 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1386 abort();
1387
1388 return NULL;
1389}
1390
38bee5dc
SS
1391/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1392 * but takes a size argument */
8b9c99d9 1393static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1394{
8ab934f9
SS
1395 if (*size == 0) {
1396 return NULL;
1397 }
868bb33f 1398 if (xen_enabled()) {
e41d7c69 1399 return xen_map_cache(addr, *size, 1);
868bb33f 1400 } else {
38bee5dc
SS
1401 RAMBlock *block;
1402
a3161038 1403 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1404 if (addr - block->offset < block->length) {
1405 if (addr - block->offset + *size > block->length)
1406 *size = block->length - addr + block->offset;
1407 return block->host + (addr - block->offset);
1408 }
1409 }
1410
1411 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1412 abort();
38bee5dc
SS
1413 }
1414}
1415
7443b437
PB
1416/* Some of the softmmu routines need to translate from a host pointer
1417 (typically a TLB entry) back to a ram offset. */
1b5ec234 1418MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1419{
94a6b54f
PB
1420 RAMBlock *block;
1421 uint8_t *host = ptr;
1422
868bb33f 1423 if (xen_enabled()) {
e41d7c69 1424 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1425 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1426 }
1427
23887b79
PB
1428 block = ram_list.mru_block;
1429 if (block && block->host && host - block->host < block->length) {
1430 goto found;
1431 }
1432
a3161038 1433 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1434 /* This case append when the block is not mapped. */
1435 if (block->host == NULL) {
1436 continue;
1437 }
f471a17e 1438 if (host - block->host < block->length) {
23887b79 1439 goto found;
f471a17e 1440 }
94a6b54f 1441 }
432d268c 1442
1b5ec234 1443 return NULL;
23887b79
PB
1444
1445found:
1446 *ram_addr = block->offset + (host - block->host);
1b5ec234 1447 return block->mr;
e890261f 1448}
f471a17e 1449
a8170e5e 1450static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1451 uint64_t val, unsigned size)
9fa3e853 1452{
3a7d929e 1453 int dirty_flags;
f7c11b53 1454 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1455 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
0e0df1e2 1456 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1457 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1458 }
0e0df1e2
AK
1459 switch (size) {
1460 case 1:
1461 stb_p(qemu_get_ram_ptr(ram_addr), val);
1462 break;
1463 case 2:
1464 stw_p(qemu_get_ram_ptr(ram_addr), val);
1465 break;
1466 case 4:
1467 stl_p(qemu_get_ram_ptr(ram_addr), val);
1468 break;
1469 default:
1470 abort();
3a7d929e 1471 }
f23db169 1472 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1473 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1474 /* we remove the notdirty callback only if the code has been
1475 flushed */
1476 if (dirty_flags == 0xff)
2e70f6ef 1477 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1478}
1479
b018ddf6
PB
1480static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1481 unsigned size, bool is_write)
1482{
1483 return is_write;
1484}
1485
0e0df1e2 1486static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1487 .write = notdirty_mem_write,
b018ddf6 1488 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1489 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1490};
1491
0f459d16 1492/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1493static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1494{
9349b4f9 1495 CPUArchState *env = cpu_single_env;
06d55cc1 1496 target_ulong pc, cs_base;
0f459d16 1497 target_ulong vaddr;
a1d1bb31 1498 CPUWatchpoint *wp;
06d55cc1 1499 int cpu_flags;
0f459d16 1500
06d55cc1
AL
1501 if (env->watchpoint_hit) {
1502 /* We re-entered the check after replacing the TB. Now raise
1503 * the debug interrupt so that is will trigger after the
1504 * current instruction. */
c3affe56 1505 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1506 return;
1507 }
2e70f6ef 1508 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1509 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1510 if ((vaddr == (wp->vaddr & len_mask) ||
1511 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1512 wp->flags |= BP_WATCHPOINT_HIT;
1513 if (!env->watchpoint_hit) {
1514 env->watchpoint_hit = wp;
5a316526 1515 tb_check_watchpoint(env);
6e140f28
AL
1516 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1517 env->exception_index = EXCP_DEBUG;
488d6577 1518 cpu_loop_exit(env);
6e140f28
AL
1519 } else {
1520 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1521 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1522 cpu_resume_from_signal(env, NULL);
6e140f28 1523 }
06d55cc1 1524 }
6e140f28
AL
1525 } else {
1526 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1527 }
1528 }
1529}
1530
6658ffb8
PB
1531/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1532 so these check for a hit then pass through to the normal out-of-line
1533 phys routines. */
a8170e5e 1534static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1535 unsigned size)
6658ffb8 1536{
1ec9b909
AK
1537 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1538 switch (size) {
1539 case 1: return ldub_phys(addr);
1540 case 2: return lduw_phys(addr);
1541 case 4: return ldl_phys(addr);
1542 default: abort();
1543 }
6658ffb8
PB
1544}
1545
a8170e5e 1546static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1547 uint64_t val, unsigned size)
6658ffb8 1548{
1ec9b909
AK
1549 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1550 switch (size) {
67364150
MF
1551 case 1:
1552 stb_phys(addr, val);
1553 break;
1554 case 2:
1555 stw_phys(addr, val);
1556 break;
1557 case 4:
1558 stl_phys(addr, val);
1559 break;
1ec9b909
AK
1560 default: abort();
1561 }
6658ffb8
PB
1562}
1563
1ec9b909
AK
1564static const MemoryRegionOps watch_mem_ops = {
1565 .read = watch_mem_read,
1566 .write = watch_mem_write,
1567 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1568};
6658ffb8 1569
a8170e5e 1570static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1571 unsigned len)
db7b5426 1572{
acc9d80b
JK
1573 subpage_t *subpage = opaque;
1574 uint8_t buf[4];
791af8c8 1575
db7b5426 1576#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1577 printf("%s: subpage %p len %d addr " TARGET_FMT_plx "\n", __func__,
1578 subpage, len, addr);
db7b5426 1579#endif
acc9d80b
JK
1580 address_space_read(subpage->as, addr + subpage->base, buf, len);
1581 switch (len) {
1582 case 1:
1583 return ldub_p(buf);
1584 case 2:
1585 return lduw_p(buf);
1586 case 4:
1587 return ldl_p(buf);
1588 default:
1589 abort();
1590 }
db7b5426
BS
1591}
1592
a8170e5e 1593static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1594 uint64_t value, unsigned len)
db7b5426 1595{
acc9d80b
JK
1596 subpage_t *subpage = opaque;
1597 uint8_t buf[4];
1598
db7b5426 1599#if defined(DEBUG_SUBPAGE)
70c68e44 1600 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
acc9d80b
JK
1601 " value %"PRIx64"\n",
1602 __func__, subpage, len, addr, value);
db7b5426 1603#endif
acc9d80b
JK
1604 switch (len) {
1605 case 1:
1606 stb_p(buf, value);
1607 break;
1608 case 2:
1609 stw_p(buf, value);
1610 break;
1611 case 4:
1612 stl_p(buf, value);
1613 break;
1614 default:
1615 abort();
1616 }
1617 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1618}
1619
c353e4cc
PB
1620static bool subpage_accepts(void *opaque, hwaddr addr,
1621 unsigned size, bool is_write)
1622{
acc9d80b 1623 subpage_t *subpage = opaque;
c353e4cc 1624#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1625 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx "\n",
1626 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1627#endif
1628
acc9d80b
JK
1629 return address_space_access_valid(subpage->as, addr + subpage->base,
1630 size, is_write);
c353e4cc
PB
1631}
1632
70c68e44
AK
1633static const MemoryRegionOps subpage_ops = {
1634 .read = subpage_read,
1635 .write = subpage_write,
c353e4cc 1636 .valid.accepts = subpage_accepts,
70c68e44 1637 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1638};
1639
c227f099 1640static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1641 uint16_t section)
db7b5426
BS
1642{
1643 int idx, eidx;
1644
1645 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1646 return -1;
1647 idx = SUBPAGE_IDX(start);
1648 eidx = SUBPAGE_IDX(end);
1649#if defined(DEBUG_SUBPAGE)
0bf9e31a 1650 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1651 mmio, start, end, idx, eidx, memory);
1652#endif
db7b5426 1653 for (; idx <= eidx; idx++) {
5312bd8b 1654 mmio->sub_section[idx] = section;
db7b5426
BS
1655 }
1656
1657 return 0;
1658}
1659
acc9d80b 1660static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1661{
c227f099 1662 subpage_t *mmio;
db7b5426 1663
7267c094 1664 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1665
acc9d80b 1666 mmio->as = as;
1eec614b 1667 mmio->base = base;
2c9b15ca 1668 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
70c68e44 1669 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1670 mmio->iomem.subpage = true;
db7b5426 1671#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1672 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1673 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1674#endif
b41aac4f 1675 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1676
1677 return mmio;
1678}
1679
5312bd8b
AK
1680static uint16_t dummy_section(MemoryRegion *mr)
1681{
1682 MemoryRegionSection section = {
1683 .mr = mr,
1684 .offset_within_address_space = 0,
1685 .offset_within_region = 0,
052e87b0 1686 .size = int128_2_64(),
5312bd8b
AK
1687 };
1688
1689 return phys_section_add(&section);
1690}
1691
a8170e5e 1692MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1693{
0475d94f 1694 return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1695}
1696
e9179ce1
AK
1697static void io_mem_init(void)
1698{
2c9b15ca
PB
1699 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1700 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
0e0df1e2 1701 "unassigned", UINT64_MAX);
2c9b15ca 1702 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
0e0df1e2 1703 "notdirty", UINT64_MAX);
2c9b15ca 1704 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1ec9b909 1705 "watch", UINT64_MAX);
e9179ce1
AK
1706}
1707
ac1970fb 1708static void mem_begin(MemoryListener *listener)
00752703
PB
1709{
1710 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1711 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1712
1713 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1714 d->as = as;
1715 as->next_dispatch = d;
1716}
1717
1718static void mem_commit(MemoryListener *listener)
ac1970fb 1719{
89ae337a 1720 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
1721 AddressSpaceDispatch *cur = as->dispatch;
1722 AddressSpaceDispatch *next = as->next_dispatch;
1723
1724 next->nodes = next_map.nodes;
1725 next->sections = next_map.sections;
ac1970fb 1726
0475d94f
PB
1727 as->dispatch = next;
1728 g_free(cur);
ac1970fb
AK
1729}
1730
50c1e149
AK
1731static void core_begin(MemoryListener *listener)
1732{
b41aac4f
LPF
1733 uint16_t n;
1734
6092666e
PB
1735 prev_map = g_new(PhysPageMap, 1);
1736 *prev_map = next_map;
1737
9affd6fc 1738 memset(&next_map, 0, sizeof(next_map));
b41aac4f
LPF
1739 n = dummy_section(&io_mem_unassigned);
1740 assert(n == PHYS_SECTION_UNASSIGNED);
1741 n = dummy_section(&io_mem_notdirty);
1742 assert(n == PHYS_SECTION_NOTDIRTY);
1743 n = dummy_section(&io_mem_rom);
1744 assert(n == PHYS_SECTION_ROM);
1745 n = dummy_section(&io_mem_watch);
1746 assert(n == PHYS_SECTION_WATCH);
50c1e149
AK
1747}
1748
9affd6fc
PB
1749/* This listener's commit run after the other AddressSpaceDispatch listeners'.
1750 * All AddressSpaceDispatch instances have switched to the next map.
1751 */
1752static void core_commit(MemoryListener *listener)
1753{
6092666e 1754 phys_sections_free(prev_map);
9affd6fc
PB
1755}
1756
1d71148e 1757static void tcg_commit(MemoryListener *listener)
50c1e149 1758{
9349b4f9 1759 CPUArchState *env;
117712c3
AK
1760
1761 /* since each CPU stores ram addresses in its TLB cache, we must
1762 reset the modified entries */
1763 /* XXX: slow ! */
1764 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1765 tlb_flush(env, 1);
1766 }
50c1e149
AK
1767}
1768
93632747
AK
1769static void core_log_global_start(MemoryListener *listener)
1770{
1771 cpu_physical_memory_set_dirty_tracking(1);
1772}
1773
1774static void core_log_global_stop(MemoryListener *listener)
1775{
1776 cpu_physical_memory_set_dirty_tracking(0);
1777}
1778
93632747 1779static MemoryListener core_memory_listener = {
50c1e149 1780 .begin = core_begin,
9affd6fc 1781 .commit = core_commit,
93632747
AK
1782 .log_global_start = core_log_global_start,
1783 .log_global_stop = core_log_global_stop,
ac1970fb 1784 .priority = 1,
93632747
AK
1785};
1786
1d71148e
AK
1787static MemoryListener tcg_memory_listener = {
1788 .commit = tcg_commit,
1789};
1790
ac1970fb
AK
1791void address_space_init_dispatch(AddressSpace *as)
1792{
00752703 1793 as->dispatch = NULL;
89ae337a 1794 as->dispatch_listener = (MemoryListener) {
ac1970fb 1795 .begin = mem_begin,
00752703 1796 .commit = mem_commit,
ac1970fb
AK
1797 .region_add = mem_add,
1798 .region_nop = mem_add,
1799 .priority = 0,
1800 };
89ae337a 1801 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
1802}
1803
83f3c251
AK
1804void address_space_destroy_dispatch(AddressSpace *as)
1805{
1806 AddressSpaceDispatch *d = as->dispatch;
1807
89ae337a 1808 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
1809 g_free(d);
1810 as->dispatch = NULL;
1811}
1812
62152b8a
AK
1813static void memory_map_init(void)
1814{
7267c094 1815 system_memory = g_malloc(sizeof(*system_memory));
2c9b15ca 1816 memory_region_init(system_memory, NULL, "system", INT64_MAX);
7dca8043 1817 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1818
7267c094 1819 system_io = g_malloc(sizeof(*system_io));
2c9b15ca 1820 memory_region_init(system_io, NULL, "io", 65536);
7dca8043 1821 address_space_init(&address_space_io, system_io, "I/O");
93632747 1822
f6790af6 1823 memory_listener_register(&core_memory_listener, &address_space_memory);
f6790af6 1824 memory_listener_register(&tcg_memory_listener, &address_space_memory);
62152b8a
AK
1825}
1826
1827MemoryRegion *get_system_memory(void)
1828{
1829 return system_memory;
1830}
1831
309cb471
AK
1832MemoryRegion *get_system_io(void)
1833{
1834 return system_io;
1835}
1836
e2eef170
PB
1837#endif /* !defined(CONFIG_USER_ONLY) */
1838
13eb76e0
FB
1839/* physical memory access (slow version, mainly for debug) */
1840#if defined(CONFIG_USER_ONLY)
9349b4f9 1841int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1842 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1843{
1844 int l, flags;
1845 target_ulong page;
53a5960a 1846 void * p;
13eb76e0
FB
1847
1848 while (len > 0) {
1849 page = addr & TARGET_PAGE_MASK;
1850 l = (page + TARGET_PAGE_SIZE) - addr;
1851 if (l > len)
1852 l = len;
1853 flags = page_get_flags(page);
1854 if (!(flags & PAGE_VALID))
a68fe89c 1855 return -1;
13eb76e0
FB
1856 if (is_write) {
1857 if (!(flags & PAGE_WRITE))
a68fe89c 1858 return -1;
579a97f7 1859 /* XXX: this code should not depend on lock_user */
72fb7daa 1860 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1861 return -1;
72fb7daa
AJ
1862 memcpy(p, buf, l);
1863 unlock_user(p, addr, l);
13eb76e0
FB
1864 } else {
1865 if (!(flags & PAGE_READ))
a68fe89c 1866 return -1;
579a97f7 1867 /* XXX: this code should not depend on lock_user */
72fb7daa 1868 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1869 return -1;
72fb7daa 1870 memcpy(buf, p, l);
5b257578 1871 unlock_user(p, addr, 0);
13eb76e0
FB
1872 }
1873 len -= l;
1874 buf += l;
1875 addr += l;
1876 }
a68fe89c 1877 return 0;
13eb76e0 1878}
8df1cd07 1879
13eb76e0 1880#else
51d7a9eb 1881
a8170e5e
AK
1882static void invalidate_and_set_dirty(hwaddr addr,
1883 hwaddr length)
51d7a9eb
AP
1884{
1885 if (!cpu_physical_memory_is_dirty(addr)) {
1886 /* invalidate code */
1887 tb_invalidate_phys_page_range(addr, addr + length, 0);
1888 /* set dirty bit */
1889 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1890 }
e226939d 1891 xen_modified_memory(addr, length);
51d7a9eb
AP
1892}
1893
2bbfa05d
PB
1894static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1895{
1896 if (memory_region_is_ram(mr)) {
1897 return !(is_write && mr->readonly);
1898 }
1899 if (memory_region_is_romd(mr)) {
1900 return !is_write;
1901 }
1902
1903 return false;
1904}
1905
f52cc467 1906static inline int memory_access_size(MemoryRegion *mr, int l, hwaddr addr)
82f2563f 1907{
f52cc467 1908 if (l >= 4 && (((addr & 3) == 0 || mr->ops->impl.unaligned))) {
82f2563f
PB
1909 return 4;
1910 }
f52cc467 1911 if (l >= 2 && (((addr & 1) == 0) || mr->ops->impl.unaligned)) {
82f2563f
PB
1912 return 2;
1913 }
1914 return 1;
1915}
1916
fd8aaa76 1917bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1918 int len, bool is_write)
13eb76e0 1919{
149f54b5 1920 hwaddr l;
13eb76e0 1921 uint8_t *ptr;
791af8c8 1922 uint64_t val;
149f54b5 1923 hwaddr addr1;
5c8a00ce 1924 MemoryRegion *mr;
fd8aaa76 1925 bool error = false;
3b46e624 1926
13eb76e0 1927 while (len > 0) {
149f54b5 1928 l = len;
5c8a00ce 1929 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 1930
13eb76e0 1931 if (is_write) {
5c8a00ce
PB
1932 if (!memory_access_is_direct(mr, is_write)) {
1933 l = memory_access_size(mr, l, addr1);
6a00d601
FB
1934 /* XXX: could force cpu_single_env to NULL to avoid
1935 potential bugs */
82f2563f 1936 if (l == 4) {
1c213d19 1937 /* 32 bit write access */
c27004ec 1938 val = ldl_p(buf);
5c8a00ce 1939 error |= io_mem_write(mr, addr1, val, 4);
82f2563f 1940 } else if (l == 2) {
1c213d19 1941 /* 16 bit write access */
c27004ec 1942 val = lduw_p(buf);
5c8a00ce 1943 error |= io_mem_write(mr, addr1, val, 2);
13eb76e0 1944 } else {
1c213d19 1945 /* 8 bit write access */
c27004ec 1946 val = ldub_p(buf);
5c8a00ce 1947 error |= io_mem_write(mr, addr1, val, 1);
13eb76e0 1948 }
2bbfa05d 1949 } else {
5c8a00ce 1950 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 1951 /* RAM case */
5579c7f3 1952 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1953 memcpy(ptr, buf, l);
51d7a9eb 1954 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
1955 }
1956 } else {
5c8a00ce 1957 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 1958 /* I/O case */
5c8a00ce 1959 l = memory_access_size(mr, l, addr1);
82f2563f 1960 if (l == 4) {
13eb76e0 1961 /* 32 bit read access */
5c8a00ce 1962 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 1963 stl_p(buf, val);
82f2563f 1964 } else if (l == 2) {
13eb76e0 1965 /* 16 bit read access */
5c8a00ce 1966 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 1967 stw_p(buf, val);
13eb76e0 1968 } else {
1c213d19 1969 /* 8 bit read access */
5c8a00ce 1970 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 1971 stb_p(buf, val);
13eb76e0
FB
1972 }
1973 } else {
1974 /* RAM case */
5c8a00ce 1975 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 1976 memcpy(buf, ptr, l);
13eb76e0
FB
1977 }
1978 }
1979 len -= l;
1980 buf += l;
1981 addr += l;
1982 }
fd8aaa76
PB
1983
1984 return error;
13eb76e0 1985}
8df1cd07 1986
fd8aaa76 1987bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1988 const uint8_t *buf, int len)
1989{
fd8aaa76 1990 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
1991}
1992
fd8aaa76 1993bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 1994{
fd8aaa76 1995 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
1996}
1997
1998
a8170e5e 1999void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2000 int len, int is_write)
2001{
fd8aaa76 2002 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2003}
2004
d0ecd2aa 2005/* used for ROM loading : can write in RAM and ROM */
a8170e5e 2006void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
2007 const uint8_t *buf, int len)
2008{
149f54b5 2009 hwaddr l;
d0ecd2aa 2010 uint8_t *ptr;
149f54b5 2011 hwaddr addr1;
5c8a00ce 2012 MemoryRegion *mr;
3b46e624 2013
d0ecd2aa 2014 while (len > 0) {
149f54b5 2015 l = len;
5c8a00ce
PB
2016 mr = address_space_translate(&address_space_memory,
2017 addr, &addr1, &l, true);
3b46e624 2018
5c8a00ce
PB
2019 if (!(memory_region_is_ram(mr) ||
2020 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2021 /* do nothing */
2022 } else {
5c8a00ce 2023 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2024 /* ROM/RAM case */
5579c7f3 2025 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2026 memcpy(ptr, buf, l);
51d7a9eb 2027 invalidate_and_set_dirty(addr1, l);
d0ecd2aa
FB
2028 }
2029 len -= l;
2030 buf += l;
2031 addr += l;
2032 }
2033}
2034
6d16c2f8 2035typedef struct {
d3e71559 2036 MemoryRegion *mr;
6d16c2f8 2037 void *buffer;
a8170e5e
AK
2038 hwaddr addr;
2039 hwaddr len;
6d16c2f8
AL
2040} BounceBuffer;
2041
2042static BounceBuffer bounce;
2043
ba223c29
AL
2044typedef struct MapClient {
2045 void *opaque;
2046 void (*callback)(void *opaque);
72cf2d4f 2047 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2048} MapClient;
2049
72cf2d4f
BS
2050static QLIST_HEAD(map_client_list, MapClient) map_client_list
2051 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2052
2053void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2054{
7267c094 2055 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2056
2057 client->opaque = opaque;
2058 client->callback = callback;
72cf2d4f 2059 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2060 return client;
2061}
2062
8b9c99d9 2063static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2064{
2065 MapClient *client = (MapClient *)_client;
2066
72cf2d4f 2067 QLIST_REMOVE(client, link);
7267c094 2068 g_free(client);
ba223c29
AL
2069}
2070
2071static void cpu_notify_map_clients(void)
2072{
2073 MapClient *client;
2074
72cf2d4f
BS
2075 while (!QLIST_EMPTY(&map_client_list)) {
2076 client = QLIST_FIRST(&map_client_list);
ba223c29 2077 client->callback(client->opaque);
34d5e948 2078 cpu_unregister_map_client(client);
ba223c29
AL
2079 }
2080}
2081
51644ab7
PB
2082bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2083{
5c8a00ce 2084 MemoryRegion *mr;
51644ab7
PB
2085 hwaddr l, xlat;
2086
2087 while (len > 0) {
2088 l = len;
5c8a00ce
PB
2089 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2090 if (!memory_access_is_direct(mr, is_write)) {
2091 l = memory_access_size(mr, l, addr);
2092 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2093 return false;
2094 }
2095 }
2096
2097 len -= l;
2098 addr += l;
2099 }
2100 return true;
2101}
2102
6d16c2f8
AL
2103/* Map a physical memory region into a host virtual address.
2104 * May map a subset of the requested range, given by and returned in *plen.
2105 * May return NULL if resources needed to perform the mapping are exhausted.
2106 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2107 * Use cpu_register_map_client() to know when retrying the map operation is
2108 * likely to succeed.
6d16c2f8 2109 */
ac1970fb 2110void *address_space_map(AddressSpace *as,
a8170e5e
AK
2111 hwaddr addr,
2112 hwaddr *plen,
ac1970fb 2113 bool is_write)
6d16c2f8 2114{
a8170e5e 2115 hwaddr len = *plen;
e3127ae0
PB
2116 hwaddr done = 0;
2117 hwaddr l, xlat, base;
2118 MemoryRegion *mr, *this_mr;
2119 ram_addr_t raddr;
6d16c2f8 2120
e3127ae0
PB
2121 if (len == 0) {
2122 return NULL;
2123 }
38bee5dc 2124
e3127ae0
PB
2125 l = len;
2126 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2127 if (!memory_access_is_direct(mr, is_write)) {
2128 if (bounce.buffer) {
2129 return NULL;
6d16c2f8 2130 }
e3127ae0
PB
2131 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2132 bounce.addr = addr;
2133 bounce.len = l;
d3e71559
PB
2134
2135 memory_region_ref(mr);
2136 bounce.mr = mr;
e3127ae0
PB
2137 if (!is_write) {
2138 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2139 }
6d16c2f8 2140
e3127ae0
PB
2141 *plen = l;
2142 return bounce.buffer;
2143 }
2144
2145 base = xlat;
2146 raddr = memory_region_get_ram_addr(mr);
2147
2148 for (;;) {
6d16c2f8
AL
2149 len -= l;
2150 addr += l;
e3127ae0
PB
2151 done += l;
2152 if (len == 0) {
2153 break;
2154 }
2155
2156 l = len;
2157 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2158 if (this_mr != mr || xlat != base + done) {
2159 break;
2160 }
6d16c2f8 2161 }
e3127ae0 2162
d3e71559 2163 memory_region_ref(mr);
e3127ae0
PB
2164 *plen = done;
2165 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2166}
2167
ac1970fb 2168/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2169 * Will also mark the memory as dirty if is_write == 1. access_len gives
2170 * the amount of memory that was actually read or written by the caller.
2171 */
a8170e5e
AK
2172void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2173 int is_write, hwaddr access_len)
6d16c2f8
AL
2174{
2175 if (buffer != bounce.buffer) {
d3e71559
PB
2176 MemoryRegion *mr;
2177 ram_addr_t addr1;
2178
2179 mr = qemu_ram_addr_from_host(buffer, &addr1);
2180 assert(mr != NULL);
6d16c2f8 2181 if (is_write) {
6d16c2f8
AL
2182 while (access_len) {
2183 unsigned l;
2184 l = TARGET_PAGE_SIZE;
2185 if (l > access_len)
2186 l = access_len;
51d7a9eb 2187 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2188 addr1 += l;
2189 access_len -= l;
2190 }
2191 }
868bb33f 2192 if (xen_enabled()) {
e41d7c69 2193 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2194 }
d3e71559 2195 memory_region_unref(mr);
6d16c2f8
AL
2196 return;
2197 }
2198 if (is_write) {
ac1970fb 2199 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2200 }
f8a83245 2201 qemu_vfree(bounce.buffer);
6d16c2f8 2202 bounce.buffer = NULL;
d3e71559 2203 memory_region_unref(bounce.mr);
ba223c29 2204 cpu_notify_map_clients();
6d16c2f8 2205}
d0ecd2aa 2206
a8170e5e
AK
2207void *cpu_physical_memory_map(hwaddr addr,
2208 hwaddr *plen,
ac1970fb
AK
2209 int is_write)
2210{
2211 return address_space_map(&address_space_memory, addr, plen, is_write);
2212}
2213
a8170e5e
AK
2214void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2215 int is_write, hwaddr access_len)
ac1970fb
AK
2216{
2217 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2218}
2219
8df1cd07 2220/* warning: addr must be aligned */
a8170e5e 2221static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2222 enum device_endian endian)
8df1cd07 2223{
8df1cd07 2224 uint8_t *ptr;
791af8c8 2225 uint64_t val;
5c8a00ce 2226 MemoryRegion *mr;
149f54b5
PB
2227 hwaddr l = 4;
2228 hwaddr addr1;
8df1cd07 2229
5c8a00ce
PB
2230 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2231 false);
2232 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2233 /* I/O case */
5c8a00ce 2234 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2235#if defined(TARGET_WORDS_BIGENDIAN)
2236 if (endian == DEVICE_LITTLE_ENDIAN) {
2237 val = bswap32(val);
2238 }
2239#else
2240 if (endian == DEVICE_BIG_ENDIAN) {
2241 val = bswap32(val);
2242 }
2243#endif
8df1cd07
FB
2244 } else {
2245 /* RAM case */
5c8a00ce 2246 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2247 & TARGET_PAGE_MASK)
149f54b5 2248 + addr1);
1e78bcc1
AG
2249 switch (endian) {
2250 case DEVICE_LITTLE_ENDIAN:
2251 val = ldl_le_p(ptr);
2252 break;
2253 case DEVICE_BIG_ENDIAN:
2254 val = ldl_be_p(ptr);
2255 break;
2256 default:
2257 val = ldl_p(ptr);
2258 break;
2259 }
8df1cd07
FB
2260 }
2261 return val;
2262}
2263
a8170e5e 2264uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2265{
2266 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2267}
2268
a8170e5e 2269uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2270{
2271 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2272}
2273
a8170e5e 2274uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2275{
2276 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2277}
2278
84b7b8e7 2279/* warning: addr must be aligned */
a8170e5e 2280static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2281 enum device_endian endian)
84b7b8e7 2282{
84b7b8e7
FB
2283 uint8_t *ptr;
2284 uint64_t val;
5c8a00ce 2285 MemoryRegion *mr;
149f54b5
PB
2286 hwaddr l = 8;
2287 hwaddr addr1;
84b7b8e7 2288
5c8a00ce
PB
2289 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2290 false);
2291 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2292 /* I/O case */
5c8a00ce 2293 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2294#if defined(TARGET_WORDS_BIGENDIAN)
2295 if (endian == DEVICE_LITTLE_ENDIAN) {
2296 val = bswap64(val);
2297 }
2298#else
2299 if (endian == DEVICE_BIG_ENDIAN) {
2300 val = bswap64(val);
2301 }
84b7b8e7
FB
2302#endif
2303 } else {
2304 /* RAM case */
5c8a00ce 2305 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2306 & TARGET_PAGE_MASK)
149f54b5 2307 + addr1);
1e78bcc1
AG
2308 switch (endian) {
2309 case DEVICE_LITTLE_ENDIAN:
2310 val = ldq_le_p(ptr);
2311 break;
2312 case DEVICE_BIG_ENDIAN:
2313 val = ldq_be_p(ptr);
2314 break;
2315 default:
2316 val = ldq_p(ptr);
2317 break;
2318 }
84b7b8e7
FB
2319 }
2320 return val;
2321}
2322
a8170e5e 2323uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2324{
2325 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2326}
2327
a8170e5e 2328uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2329{
2330 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2331}
2332
a8170e5e 2333uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2334{
2335 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2336}
2337
aab33094 2338/* XXX: optimize */
a8170e5e 2339uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2340{
2341 uint8_t val;
2342 cpu_physical_memory_read(addr, &val, 1);
2343 return val;
2344}
2345
733f0b02 2346/* warning: addr must be aligned */
a8170e5e 2347static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2348 enum device_endian endian)
aab33094 2349{
733f0b02
MT
2350 uint8_t *ptr;
2351 uint64_t val;
5c8a00ce 2352 MemoryRegion *mr;
149f54b5
PB
2353 hwaddr l = 2;
2354 hwaddr addr1;
733f0b02 2355
5c8a00ce
PB
2356 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2357 false);
2358 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2359 /* I/O case */
5c8a00ce 2360 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2361#if defined(TARGET_WORDS_BIGENDIAN)
2362 if (endian == DEVICE_LITTLE_ENDIAN) {
2363 val = bswap16(val);
2364 }
2365#else
2366 if (endian == DEVICE_BIG_ENDIAN) {
2367 val = bswap16(val);
2368 }
2369#endif
733f0b02
MT
2370 } else {
2371 /* RAM case */
5c8a00ce 2372 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2373 & TARGET_PAGE_MASK)
149f54b5 2374 + addr1);
1e78bcc1
AG
2375 switch (endian) {
2376 case DEVICE_LITTLE_ENDIAN:
2377 val = lduw_le_p(ptr);
2378 break;
2379 case DEVICE_BIG_ENDIAN:
2380 val = lduw_be_p(ptr);
2381 break;
2382 default:
2383 val = lduw_p(ptr);
2384 break;
2385 }
733f0b02
MT
2386 }
2387 return val;
aab33094
FB
2388}
2389
a8170e5e 2390uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2391{
2392 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2393}
2394
a8170e5e 2395uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2396{
2397 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2398}
2399
a8170e5e 2400uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2401{
2402 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2403}
2404
8df1cd07
FB
2405/* warning: addr must be aligned. The ram page is not masked as dirty
2406 and the code inside is not invalidated. It is useful if the dirty
2407 bits are used to track modified PTEs */
a8170e5e 2408void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2409{
8df1cd07 2410 uint8_t *ptr;
5c8a00ce 2411 MemoryRegion *mr;
149f54b5
PB
2412 hwaddr l = 4;
2413 hwaddr addr1;
8df1cd07 2414
5c8a00ce
PB
2415 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2416 true);
2417 if (l < 4 || !memory_access_is_direct(mr, true)) {
2418 io_mem_write(mr, addr1, val, 4);
8df1cd07 2419 } else {
5c8a00ce 2420 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2421 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2422 stl_p(ptr, val);
74576198
AL
2423
2424 if (unlikely(in_migration)) {
2425 if (!cpu_physical_memory_is_dirty(addr1)) {
2426 /* invalidate code */
2427 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2428 /* set dirty bit */
f7c11b53
YT
2429 cpu_physical_memory_set_dirty_flags(
2430 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2431 }
2432 }
8df1cd07
FB
2433 }
2434}
2435
2436/* warning: addr must be aligned */
a8170e5e 2437static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2438 enum device_endian endian)
8df1cd07 2439{
8df1cd07 2440 uint8_t *ptr;
5c8a00ce 2441 MemoryRegion *mr;
149f54b5
PB
2442 hwaddr l = 4;
2443 hwaddr addr1;
8df1cd07 2444
5c8a00ce
PB
2445 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2446 true);
2447 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2448#if defined(TARGET_WORDS_BIGENDIAN)
2449 if (endian == DEVICE_LITTLE_ENDIAN) {
2450 val = bswap32(val);
2451 }
2452#else
2453 if (endian == DEVICE_BIG_ENDIAN) {
2454 val = bswap32(val);
2455 }
2456#endif
5c8a00ce 2457 io_mem_write(mr, addr1, val, 4);
8df1cd07 2458 } else {
8df1cd07 2459 /* RAM case */
5c8a00ce 2460 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2461 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2462 switch (endian) {
2463 case DEVICE_LITTLE_ENDIAN:
2464 stl_le_p(ptr, val);
2465 break;
2466 case DEVICE_BIG_ENDIAN:
2467 stl_be_p(ptr, val);
2468 break;
2469 default:
2470 stl_p(ptr, val);
2471 break;
2472 }
51d7a9eb 2473 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2474 }
2475}
2476
a8170e5e 2477void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2478{
2479 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2480}
2481
a8170e5e 2482void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2483{
2484 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2485}
2486
a8170e5e 2487void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2488{
2489 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2490}
2491
aab33094 2492/* XXX: optimize */
a8170e5e 2493void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2494{
2495 uint8_t v = val;
2496 cpu_physical_memory_write(addr, &v, 1);
2497}
2498
733f0b02 2499/* warning: addr must be aligned */
a8170e5e 2500static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2501 enum device_endian endian)
aab33094 2502{
733f0b02 2503 uint8_t *ptr;
5c8a00ce 2504 MemoryRegion *mr;
149f54b5
PB
2505 hwaddr l = 2;
2506 hwaddr addr1;
733f0b02 2507
5c8a00ce
PB
2508 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2509 true);
2510 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2511#if defined(TARGET_WORDS_BIGENDIAN)
2512 if (endian == DEVICE_LITTLE_ENDIAN) {
2513 val = bswap16(val);
2514 }
2515#else
2516 if (endian == DEVICE_BIG_ENDIAN) {
2517 val = bswap16(val);
2518 }
2519#endif
5c8a00ce 2520 io_mem_write(mr, addr1, val, 2);
733f0b02 2521 } else {
733f0b02 2522 /* RAM case */
5c8a00ce 2523 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2524 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2525 switch (endian) {
2526 case DEVICE_LITTLE_ENDIAN:
2527 stw_le_p(ptr, val);
2528 break;
2529 case DEVICE_BIG_ENDIAN:
2530 stw_be_p(ptr, val);
2531 break;
2532 default:
2533 stw_p(ptr, val);
2534 break;
2535 }
51d7a9eb 2536 invalidate_and_set_dirty(addr1, 2);
733f0b02 2537 }
aab33094
FB
2538}
2539
a8170e5e 2540void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2541{
2542 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2543}
2544
a8170e5e 2545void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2546{
2547 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2548}
2549
a8170e5e 2550void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2551{
2552 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2553}
2554
aab33094 2555/* XXX: optimize */
a8170e5e 2556void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2557{
2558 val = tswap64(val);
71d2b725 2559 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2560}
2561
a8170e5e 2562void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2563{
2564 val = cpu_to_le64(val);
2565 cpu_physical_memory_write(addr, &val, 8);
2566}
2567
a8170e5e 2568void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2569{
2570 val = cpu_to_be64(val);
2571 cpu_physical_memory_write(addr, &val, 8);
2572}
2573
5e2972fd 2574/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2575int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2576 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2577{
2578 int l;
a8170e5e 2579 hwaddr phys_addr;
9b3c35e0 2580 target_ulong page;
13eb76e0
FB
2581
2582 while (len > 0) {
2583 page = addr & TARGET_PAGE_MASK;
2584 phys_addr = cpu_get_phys_page_debug(env, page);
2585 /* if no physical page mapped, return an error */
2586 if (phys_addr == -1)
2587 return -1;
2588 l = (page + TARGET_PAGE_SIZE) - addr;
2589 if (l > len)
2590 l = len;
5e2972fd 2591 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2592 if (is_write)
2593 cpu_physical_memory_write_rom(phys_addr, buf, l);
2594 else
5e2972fd 2595 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2596 len -= l;
2597 buf += l;
2598 addr += l;
2599 }
2600 return 0;
2601}
a68fe89c 2602#endif
13eb76e0 2603
8e4a424b
BS
2604#if !defined(CONFIG_USER_ONLY)
2605
2606/*
2607 * A helper function for the _utterly broken_ virtio device model to find out if
2608 * it's running on a big endian machine. Don't do this at home kids!
2609 */
2610bool virtio_is_big_endian(void);
2611bool virtio_is_big_endian(void)
2612{
2613#if defined(TARGET_WORDS_BIGENDIAN)
2614 return true;
2615#else
2616 return false;
2617#endif
2618}
2619
2620#endif
2621
76f35538 2622#ifndef CONFIG_USER_ONLY
a8170e5e 2623bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2624{
5c8a00ce 2625 MemoryRegion*mr;
149f54b5 2626 hwaddr l = 1;
76f35538 2627
5c8a00ce
PB
2628 mr = address_space_translate(&address_space_memory,
2629 phys_addr, &phys_addr, &l, false);
76f35538 2630
5c8a00ce
PB
2631 return !(memory_region_is_ram(mr) ||
2632 memory_region_is_romd(mr));
76f35538 2633}
bd2fa51f
MH
2634
2635void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2636{
2637 RAMBlock *block;
2638
2639 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2640 func(block->host, block->offset, block->length, opaque);
2641 }
2642}
ec3f8c99 2643#endif