]> git.proxmox.com Git - qemu.git/blame - exec.c
exec: Remove unused global variable phys_ram_fd
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
0d09e41a 34#include "hw/xen/xen.h"
1de7afc9
PB
35#include "qemu/timer.h"
36#include "qemu/config-file.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
67d95c15 52
db7b5426 53//#define DEBUG_SUBPAGE
1196be37 54
e2eef170 55#if !defined(CONFIG_USER_ONLY)
74576198 56static int in_migration;
94a6b54f 57
a3161038 58RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
59
60static MemoryRegion *system_memory;
309cb471 61static MemoryRegion *system_io;
62152b8a 62
f6790af6
AK
63AddressSpace address_space_io;
64AddressSpace address_space_memory;
2673a5da 65
0844e007 66MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 67static MemoryRegion io_mem_unassigned;
0e0df1e2 68
e2eef170 69#endif
9fa3e853 70
9349b4f9 71CPUArchState *first_cpu;
6a00d601
FB
72/* current CPU in the current thread. It is only valid inside
73 cpu_exec() */
9349b4f9 74DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 75/* 0 = Do not count executed instructions.
bf20dc07 76 1 = Precise instruction counting.
2e70f6ef 77 2 = Adaptive rate instruction counting. */
5708fc66 78int use_icount;
6a00d601 79
e2eef170 80#if !defined(CONFIG_USER_ONLY)
4346ae3e 81
1db8abb1
PB
82typedef struct PhysPageEntry PhysPageEntry;
83
84struct PhysPageEntry {
85 uint16_t is_leaf : 1;
86 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
87 uint16_t ptr : 15;
88};
89
0475d94f
PB
90typedef PhysPageEntry Node[L2_SIZE];
91
1db8abb1
PB
92struct AddressSpaceDispatch {
93 /* This is a multi-level map on the physical address space.
94 * The bottom level has pointers to MemoryRegionSections.
95 */
96 PhysPageEntry phys_map;
0475d94f
PB
97 Node *nodes;
98 MemoryRegionSection *sections;
acc9d80b 99 AddressSpace *as;
1db8abb1
PB
100};
101
90260c6c
JK
102#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
103typedef struct subpage_t {
104 MemoryRegion iomem;
acc9d80b 105 AddressSpace *as;
90260c6c
JK
106 hwaddr base;
107 uint16_t sub_section[TARGET_PAGE_SIZE];
108} subpage_t;
109
b41aac4f
LPF
110#define PHYS_SECTION_UNASSIGNED 0
111#define PHYS_SECTION_NOTDIRTY 1
112#define PHYS_SECTION_ROM 2
113#define PHYS_SECTION_WATCH 3
5312bd8b 114
9affd6fc
PB
115typedef struct PhysPageMap {
116 unsigned sections_nb;
117 unsigned sections_nb_alloc;
118 unsigned nodes_nb;
119 unsigned nodes_nb_alloc;
120 Node *nodes;
121 MemoryRegionSection *sections;
122} PhysPageMap;
123
6092666e 124static PhysPageMap *prev_map;
9affd6fc 125static PhysPageMap next_map;
d6f2ea22 126
07f07b31 127#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 128
e2eef170 129static void io_mem_init(void);
62152b8a 130static void memory_map_init(void);
8b9c99d9 131static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 132
1ec9b909 133static MemoryRegion io_mem_watch;
6658ffb8 134#endif
fd6ce8f6 135
6d9a1304 136#if !defined(CONFIG_USER_ONLY)
d6f2ea22 137
f7bf5461 138static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 139{
9affd6fc
PB
140 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
141 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
142 16);
143 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
144 next_map.nodes_nb + nodes);
145 next_map.nodes = g_renew(Node, next_map.nodes,
146 next_map.nodes_nb_alloc);
d6f2ea22 147 }
f7bf5461
AK
148}
149
150static uint16_t phys_map_node_alloc(void)
151{
152 unsigned i;
153 uint16_t ret;
154
9affd6fc 155 ret = next_map.nodes_nb++;
f7bf5461 156 assert(ret != PHYS_MAP_NODE_NIL);
9affd6fc 157 assert(ret != next_map.nodes_nb_alloc);
d6f2ea22 158 for (i = 0; i < L2_SIZE; ++i) {
9affd6fc
PB
159 next_map.nodes[ret][i].is_leaf = 0;
160 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 161 }
f7bf5461 162 return ret;
d6f2ea22
AK
163}
164
a8170e5e
AK
165static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
166 hwaddr *nb, uint16_t leaf,
2999097b 167 int level)
f7bf5461
AK
168{
169 PhysPageEntry *p;
170 int i;
a8170e5e 171 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 172
07f07b31 173 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800 174 lp->ptr = phys_map_node_alloc();
9affd6fc 175 p = next_map.nodes[lp->ptr];
f7bf5461
AK
176 if (level == 0) {
177 for (i = 0; i < L2_SIZE; i++) {
07f07b31 178 p[i].is_leaf = 1;
b41aac4f 179 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 180 }
67c4d23c 181 }
f7bf5461 182 } else {
9affd6fc 183 p = next_map.nodes[lp->ptr];
92e873b9 184 }
2999097b 185 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 186
2999097b 187 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
188 if ((*index & (step - 1)) == 0 && *nb >= step) {
189 lp->is_leaf = true;
c19e8800 190 lp->ptr = leaf;
07f07b31
AK
191 *index += step;
192 *nb -= step;
2999097b
AK
193 } else {
194 phys_page_set_level(lp, index, nb, leaf, level - 1);
195 }
196 ++lp;
f7bf5461
AK
197 }
198}
199
ac1970fb 200static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 201 hwaddr index, hwaddr nb,
2999097b 202 uint16_t leaf)
f7bf5461 203{
2999097b 204 /* Wildly overreserve - it doesn't matter much. */
07f07b31 205 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 206
ac1970fb 207 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
208}
209
9affd6fc
PB
210static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
211 Node *nodes, MemoryRegionSection *sections)
92e873b9 212{
31ab2b4a
AK
213 PhysPageEntry *p;
214 int i;
f1f6e3b8 215
07f07b31 216 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 217 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 218 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 219 }
9affd6fc 220 p = nodes[lp.ptr];
31ab2b4a 221 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 222 }
9affd6fc 223 return &sections[lp.ptr];
f3705d53
AK
224}
225
e5548617
BS
226bool memory_region_is_unassigned(MemoryRegion *mr)
227{
2a8e7499 228 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 229 && mr != &io_mem_watch;
fd6ce8f6 230}
149f54b5 231
c7086b4a 232static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
233 hwaddr addr,
234 bool resolve_subpage)
9f029603 235{
90260c6c
JK
236 MemoryRegionSection *section;
237 subpage_t *subpage;
238
0475d94f
PB
239 section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
240 d->nodes, d->sections);
90260c6c
JK
241 if (resolve_subpage && section->mr->subpage) {
242 subpage = container_of(section->mr, subpage_t, iomem);
0475d94f 243 section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
244 }
245 return section;
9f029603
JK
246}
247
90260c6c 248static MemoryRegionSection *
c7086b4a 249address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 250 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
251{
252 MemoryRegionSection *section;
253 Int128 diff;
254
c7086b4a 255 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
256 /* Compute offset within MemoryRegionSection */
257 addr -= section->offset_within_address_space;
258
259 /* Compute offset within MemoryRegion */
260 *xlat = addr + section->offset_within_region;
261
262 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 263 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
264 return section;
265}
90260c6c 266
5c8a00ce
PB
267MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
268 hwaddr *xlat, hwaddr *plen,
269 bool is_write)
90260c6c 270{
30951157
AK
271 IOMMUTLBEntry iotlb;
272 MemoryRegionSection *section;
273 MemoryRegion *mr;
274 hwaddr len = *plen;
275
276 for (;;) {
c7086b4a 277 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
278 mr = section->mr;
279
280 if (!mr->iommu_ops) {
281 break;
282 }
283
284 iotlb = mr->iommu_ops->translate(mr, addr);
285 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
286 | (addr & iotlb.addr_mask));
287 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
288 if (!(iotlb.perm & (1 << is_write))) {
289 mr = &io_mem_unassigned;
290 break;
291 }
292
293 as = iotlb.target_as;
294 }
295
296 *plen = len;
297 *xlat = addr;
298 return mr;
90260c6c
JK
299}
300
301MemoryRegionSection *
302address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
303 hwaddr *plen)
304{
30951157 305 MemoryRegionSection *section;
c7086b4a 306 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
307
308 assert(!section->mr->iommu_ops);
309 return section;
90260c6c 310}
5b6dd868 311#endif
fd6ce8f6 312
5b6dd868 313void cpu_exec_init_all(void)
fdbb84d1 314{
5b6dd868 315#if !defined(CONFIG_USER_ONLY)
b2a8658e 316 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
317 memory_map_init();
318 io_mem_init();
fdbb84d1 319#endif
5b6dd868 320}
fdbb84d1 321
b170fce3 322#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
323
324static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 325{
259186a7 326 CPUState *cpu = opaque;
a513fe19 327
5b6dd868
BS
328 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
329 version_id is increased. */
259186a7
AF
330 cpu->interrupt_request &= ~0x01;
331 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
332
333 return 0;
a513fe19 334}
7501267e 335
1a1562f5 336const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
337 .name = "cpu_common",
338 .version_id = 1,
339 .minimum_version_id = 1,
340 .minimum_version_id_old = 1,
341 .post_load = cpu_common_post_load,
342 .fields = (VMStateField []) {
259186a7
AF
343 VMSTATE_UINT32(halted, CPUState),
344 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
345 VMSTATE_END_OF_LIST()
346 }
347};
1a1562f5 348
5b6dd868 349#endif
ea041c0e 350
38d8f5c8 351CPUState *qemu_get_cpu(int index)
ea041c0e 352{
5b6dd868 353 CPUArchState *env = first_cpu;
38d8f5c8 354 CPUState *cpu = NULL;
ea041c0e 355
5b6dd868 356 while (env) {
55e5c285
AF
357 cpu = ENV_GET_CPU(env);
358 if (cpu->cpu_index == index) {
5b6dd868 359 break;
55e5c285 360 }
5b6dd868 361 env = env->next_cpu;
ea041c0e 362 }
5b6dd868 363
d76fddae 364 return env ? cpu : NULL;
ea041c0e
FB
365}
366
d6b9e0d6
MT
367void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
368{
369 CPUArchState *env = first_cpu;
370
371 while (env) {
372 func(ENV_GET_CPU(env), data);
373 env = env->next_cpu;
374 }
375}
376
5b6dd868 377void cpu_exec_init(CPUArchState *env)
ea041c0e 378{
5b6dd868 379 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 380 CPUClass *cc = CPU_GET_CLASS(cpu);
5b6dd868
BS
381 CPUArchState **penv;
382 int cpu_index;
383
384#if defined(CONFIG_USER_ONLY)
385 cpu_list_lock();
386#endif
387 env->next_cpu = NULL;
388 penv = &first_cpu;
389 cpu_index = 0;
390 while (*penv != NULL) {
391 penv = &(*penv)->next_cpu;
392 cpu_index++;
393 }
55e5c285 394 cpu->cpu_index = cpu_index;
1b1ed8dc 395 cpu->numa_node = 0;
5b6dd868
BS
396 QTAILQ_INIT(&env->breakpoints);
397 QTAILQ_INIT(&env->watchpoints);
398#ifndef CONFIG_USER_ONLY
399 cpu->thread_id = qemu_get_thread_id();
400#endif
401 *penv = env;
402#if defined(CONFIG_USER_ONLY)
403 cpu_list_unlock();
404#endif
259186a7 405 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
5b6dd868 406#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
407 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
408 cpu_save, cpu_load, env);
b170fce3 409 assert(cc->vmsd == NULL);
5b6dd868 410#endif
b170fce3
AF
411 if (cc->vmsd != NULL) {
412 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
413 }
ea041c0e
FB
414}
415
1fddef4b 416#if defined(TARGET_HAS_ICE)
94df27fd 417#if defined(CONFIG_USER_ONLY)
9349b4f9 418static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
419{
420 tb_invalidate_phys_page_range(pc, pc + 1, 0);
421}
422#else
1e7855a5
MF
423static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
424{
9d70c4b7
MF
425 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
426 (pc & ~TARGET_PAGE_MASK));
1e7855a5 427}
c27004ec 428#endif
94df27fd 429#endif /* TARGET_HAS_ICE */
d720b93d 430
c527ee8f 431#if defined(CONFIG_USER_ONLY)
9349b4f9 432void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
433
434{
435}
436
9349b4f9 437int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
438 int flags, CPUWatchpoint **watchpoint)
439{
440 return -ENOSYS;
441}
442#else
6658ffb8 443/* Add a watchpoint. */
9349b4f9 444int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 445 int flags, CPUWatchpoint **watchpoint)
6658ffb8 446{
b4051334 447 target_ulong len_mask = ~(len - 1);
c0ce998e 448 CPUWatchpoint *wp;
6658ffb8 449
b4051334 450 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
451 if ((len & (len - 1)) || (addr & ~len_mask) ||
452 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
453 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
454 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
455 return -EINVAL;
456 }
7267c094 457 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
458
459 wp->vaddr = addr;
b4051334 460 wp->len_mask = len_mask;
a1d1bb31
AL
461 wp->flags = flags;
462
2dc9f411 463 /* keep all GDB-injected watchpoints in front */
c0ce998e 464 if (flags & BP_GDB)
72cf2d4f 465 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 466 else
72cf2d4f 467 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 468
6658ffb8 469 tlb_flush_page(env, addr);
a1d1bb31
AL
470
471 if (watchpoint)
472 *watchpoint = wp;
473 return 0;
6658ffb8
PB
474}
475
a1d1bb31 476/* Remove a specific watchpoint. */
9349b4f9 477int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 478 int flags)
6658ffb8 479{
b4051334 480 target_ulong len_mask = ~(len - 1);
a1d1bb31 481 CPUWatchpoint *wp;
6658ffb8 482
72cf2d4f 483 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 484 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 485 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 486 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
487 return 0;
488 }
489 }
a1d1bb31 490 return -ENOENT;
6658ffb8
PB
491}
492
a1d1bb31 493/* Remove a specific watchpoint by reference. */
9349b4f9 494void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 495{
72cf2d4f 496 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 497
a1d1bb31
AL
498 tlb_flush_page(env, watchpoint->vaddr);
499
7267c094 500 g_free(watchpoint);
a1d1bb31
AL
501}
502
503/* Remove all matching watchpoints. */
9349b4f9 504void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 505{
c0ce998e 506 CPUWatchpoint *wp, *next;
a1d1bb31 507
72cf2d4f 508 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
509 if (wp->flags & mask)
510 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 511 }
7d03f82f 512}
c527ee8f 513#endif
7d03f82f 514
a1d1bb31 515/* Add a breakpoint. */
9349b4f9 516int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 517 CPUBreakpoint **breakpoint)
4c3a88a2 518{
1fddef4b 519#if defined(TARGET_HAS_ICE)
c0ce998e 520 CPUBreakpoint *bp;
3b46e624 521
7267c094 522 bp = g_malloc(sizeof(*bp));
4c3a88a2 523
a1d1bb31
AL
524 bp->pc = pc;
525 bp->flags = flags;
526
2dc9f411 527 /* keep all GDB-injected breakpoints in front */
c0ce998e 528 if (flags & BP_GDB)
72cf2d4f 529 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 530 else
72cf2d4f 531 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 532
d720b93d 533 breakpoint_invalidate(env, pc);
a1d1bb31
AL
534
535 if (breakpoint)
536 *breakpoint = bp;
4c3a88a2
FB
537 return 0;
538#else
a1d1bb31 539 return -ENOSYS;
4c3a88a2
FB
540#endif
541}
542
a1d1bb31 543/* Remove a specific breakpoint. */
9349b4f9 544int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 545{
7d03f82f 546#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
547 CPUBreakpoint *bp;
548
72cf2d4f 549 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
550 if (bp->pc == pc && bp->flags == flags) {
551 cpu_breakpoint_remove_by_ref(env, bp);
552 return 0;
553 }
7d03f82f 554 }
a1d1bb31
AL
555 return -ENOENT;
556#else
557 return -ENOSYS;
7d03f82f
EI
558#endif
559}
560
a1d1bb31 561/* Remove a specific breakpoint by reference. */
9349b4f9 562void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 563{
1fddef4b 564#if defined(TARGET_HAS_ICE)
72cf2d4f 565 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 566
a1d1bb31
AL
567 breakpoint_invalidate(env, breakpoint->pc);
568
7267c094 569 g_free(breakpoint);
a1d1bb31
AL
570#endif
571}
572
573/* Remove all matching breakpoints. */
9349b4f9 574void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
575{
576#if defined(TARGET_HAS_ICE)
c0ce998e 577 CPUBreakpoint *bp, *next;
a1d1bb31 578
72cf2d4f 579 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
580 if (bp->flags & mask)
581 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 582 }
4c3a88a2
FB
583#endif
584}
585
c33a346e
FB
586/* enable or disable single step mode. EXCP_DEBUG is returned by the
587 CPU loop after each instruction */
9349b4f9 588void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 589{
1fddef4b 590#if defined(TARGET_HAS_ICE)
c33a346e
FB
591 if (env->singlestep_enabled != enabled) {
592 env->singlestep_enabled = enabled;
e22a25c9
AL
593 if (kvm_enabled())
594 kvm_update_guest_debug(env, 0);
595 else {
ccbb4d44 596 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
597 /* XXX: only flush what is necessary */
598 tb_flush(env);
599 }
c33a346e
FB
600 }
601#endif
602}
603
9349b4f9 604void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e 605{
878096ee 606 CPUState *cpu = ENV_GET_CPU(env);
7501267e 607 va_list ap;
493ae1f0 608 va_list ap2;
7501267e
FB
609
610 va_start(ap, fmt);
493ae1f0 611 va_copy(ap2, ap);
7501267e
FB
612 fprintf(stderr, "qemu: fatal: ");
613 vfprintf(stderr, fmt, ap);
614 fprintf(stderr, "\n");
878096ee 615 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
616 if (qemu_log_enabled()) {
617 qemu_log("qemu: fatal: ");
618 qemu_log_vprintf(fmt, ap2);
619 qemu_log("\n");
6fd2a026 620 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 621 qemu_log_flush();
93fcfe39 622 qemu_log_close();
924edcae 623 }
493ae1f0 624 va_end(ap2);
f9373291 625 va_end(ap);
fd052bf6
RV
626#if defined(CONFIG_USER_ONLY)
627 {
628 struct sigaction act;
629 sigfillset(&act.sa_mask);
630 act.sa_handler = SIG_DFL;
631 sigaction(SIGABRT, &act, NULL);
632 }
633#endif
7501267e
FB
634 abort();
635}
636
9349b4f9 637CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 638{
9349b4f9
AF
639 CPUArchState *new_env = cpu_init(env->cpu_model_str);
640 CPUArchState *next_cpu = new_env->next_cpu;
5a38f081
AL
641#if defined(TARGET_HAS_ICE)
642 CPUBreakpoint *bp;
643 CPUWatchpoint *wp;
644#endif
645
9349b4f9 646 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081 647
55e5c285 648 /* Preserve chaining. */
c5be9f08 649 new_env->next_cpu = next_cpu;
5a38f081
AL
650
651 /* Clone all break/watchpoints.
652 Note: Once we support ptrace with hw-debug register access, make sure
653 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
654 QTAILQ_INIT(&env->breakpoints);
655 QTAILQ_INIT(&env->watchpoints);
5a38f081 656#if defined(TARGET_HAS_ICE)
72cf2d4f 657 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
658 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
659 }
72cf2d4f 660 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
661 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
662 wp->flags, NULL);
663 }
664#endif
665
c5be9f08
TS
666 return new_env;
667}
668
0124311e 669#if !defined(CONFIG_USER_ONLY)
d24981d3
JQ
670static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
671 uintptr_t length)
672{
673 uintptr_t start1;
674
675 /* we modify the TLB cache so that the dirty bit will be set again
676 when accessing the range */
677 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
678 /* Check that we don't span multiple blocks - this breaks the
679 address comparisons below. */
680 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
681 != (end - 1) - start) {
682 abort();
683 }
684 cpu_tlb_reset_dirty_all(start1, length);
685
686}
687
5579c7f3 688/* Note: start and end must be within the same ram block. */
c227f099 689void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 690 int dirty_flags)
1ccde1cb 691{
d24981d3 692 uintptr_t length;
1ccde1cb
FB
693
694 start &= TARGET_PAGE_MASK;
695 end = TARGET_PAGE_ALIGN(end);
696
697 length = end - start;
698 if (length == 0)
699 return;
f7c11b53 700 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 701
d24981d3
JQ
702 if (tcg_enabled()) {
703 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 704 }
1ccde1cb
FB
705}
706
8b9c99d9 707static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 708{
f6f3fbca 709 int ret = 0;
74576198 710 in_migration = enable;
f6f3fbca 711 return ret;
74576198
AL
712}
713
a8170e5e 714hwaddr memory_region_section_get_iotlb(CPUArchState *env,
149f54b5
PB
715 MemoryRegionSection *section,
716 target_ulong vaddr,
717 hwaddr paddr, hwaddr xlat,
718 int prot,
719 target_ulong *address)
e5548617 720{
a8170e5e 721 hwaddr iotlb;
e5548617
BS
722 CPUWatchpoint *wp;
723
cc5bea60 724 if (memory_region_is_ram(section->mr)) {
e5548617
BS
725 /* Normal RAM. */
726 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 727 + xlat;
e5548617 728 if (!section->readonly) {
b41aac4f 729 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 730 } else {
b41aac4f 731 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
732 }
733 } else {
0475d94f 734 iotlb = section - address_space_memory.dispatch->sections;
149f54b5 735 iotlb += xlat;
e5548617
BS
736 }
737
738 /* Make accesses to pages with watchpoints go via the
739 watchpoint trap routines. */
740 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
741 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
742 /* Avoid trapping reads of pages with a write breakpoint. */
743 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 744 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
745 *address |= TLB_MMIO;
746 break;
747 }
748 }
749 }
750
751 return iotlb;
752}
9fa3e853
FB
753#endif /* defined(CONFIG_USER_ONLY) */
754
e2eef170 755#if !defined(CONFIG_USER_ONLY)
8da3ff18 756
c227f099 757static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 758 uint16_t section);
acc9d80b 759static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 760
5312bd8b
AK
761static uint16_t phys_section_add(MemoryRegionSection *section)
762{
68f3f65b
PB
763 /* The physical section number is ORed with a page-aligned
764 * pointer to produce the iotlb entries. Thus it should
765 * never overflow into the page-aligned value.
766 */
9affd6fc 767 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
68f3f65b 768
9affd6fc
PB
769 if (next_map.sections_nb == next_map.sections_nb_alloc) {
770 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
771 16);
772 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
773 next_map.sections_nb_alloc);
5312bd8b 774 }
9affd6fc 775 next_map.sections[next_map.sections_nb] = *section;
dfde4e6e 776 memory_region_ref(section->mr);
9affd6fc 777 return next_map.sections_nb++;
5312bd8b
AK
778}
779
058bc4b5
PB
780static void phys_section_destroy(MemoryRegion *mr)
781{
dfde4e6e
PB
782 memory_region_unref(mr);
783
058bc4b5
PB
784 if (mr->subpage) {
785 subpage_t *subpage = container_of(mr, subpage_t, iomem);
786 memory_region_destroy(&subpage->iomem);
787 g_free(subpage);
788 }
789}
790
6092666e 791static void phys_sections_free(PhysPageMap *map)
5312bd8b 792{
9affd6fc
PB
793 while (map->sections_nb > 0) {
794 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
795 phys_section_destroy(section->mr);
796 }
9affd6fc
PB
797 g_free(map->sections);
798 g_free(map->nodes);
6092666e 799 g_free(map);
5312bd8b
AK
800}
801
ac1970fb 802static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
803{
804 subpage_t *subpage;
a8170e5e 805 hwaddr base = section->offset_within_address_space
0f0cb164 806 & TARGET_PAGE_MASK;
9affd6fc
PB
807 MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
808 next_map.nodes, next_map.sections);
0f0cb164
AK
809 MemoryRegionSection subsection = {
810 .offset_within_address_space = base,
052e87b0 811 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 812 };
a8170e5e 813 hwaddr start, end;
0f0cb164 814
f3705d53 815 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 816
f3705d53 817 if (!(existing->mr->subpage)) {
acc9d80b 818 subpage = subpage_init(d->as, base);
0f0cb164 819 subsection.mr = &subpage->iomem;
ac1970fb 820 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 821 phys_section_add(&subsection));
0f0cb164 822 } else {
f3705d53 823 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
824 }
825 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 826 end = start + int128_get64(section->size) - 1;
0f0cb164
AK
827 subpage_register(subpage, start, end, phys_section_add(section));
828}
829
830
052e87b0
PB
831static void register_multipage(AddressSpaceDispatch *d,
832 MemoryRegionSection *section)
33417e70 833{
a8170e5e 834 hwaddr start_addr = section->offset_within_address_space;
5312bd8b 835 uint16_t section_index = phys_section_add(section);
052e87b0
PB
836 uint64_t num_pages = int128_get64(int128_rshift(section->size,
837 TARGET_PAGE_BITS));
dd81124b 838
733d5ef5
PB
839 assert(num_pages);
840 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
841}
842
ac1970fb 843static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 844{
89ae337a 845 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 846 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 847 MemoryRegionSection now = *section, remain = *section;
052e87b0 848 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 849
733d5ef5
PB
850 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
851 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
852 - now.offset_within_address_space;
853
052e87b0 854 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 855 register_subpage(d, &now);
733d5ef5 856 } else {
052e87b0 857 now.size = int128_zero();
733d5ef5 858 }
052e87b0
PB
859 while (int128_ne(remain.size, now.size)) {
860 remain.size = int128_sub(remain.size, now.size);
861 remain.offset_within_address_space += int128_get64(now.size);
862 remain.offset_within_region += int128_get64(now.size);
69b67646 863 now = remain;
052e87b0 864 if (int128_lt(remain.size, page_size)) {
733d5ef5
PB
865 register_subpage(d, &now);
866 } else if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
052e87b0 867 now.size = page_size;
ac1970fb 868 register_subpage(d, &now);
69b67646 869 } else {
052e87b0 870 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 871 register_multipage(d, &now);
69b67646 872 }
0f0cb164
AK
873 }
874}
875
62a2744c
SY
876void qemu_flush_coalesced_mmio_buffer(void)
877{
878 if (kvm_enabled())
879 kvm_flush_coalesced_mmio_buffer();
880}
881
b2a8658e
UD
882void qemu_mutex_lock_ramlist(void)
883{
884 qemu_mutex_lock(&ram_list.mutex);
885}
886
887void qemu_mutex_unlock_ramlist(void)
888{
889 qemu_mutex_unlock(&ram_list.mutex);
890}
891
c902760f
MT
892#if defined(__linux__) && !defined(TARGET_S390X)
893
894#include <sys/vfs.h>
895
896#define HUGETLBFS_MAGIC 0x958458f6
897
898static long gethugepagesize(const char *path)
899{
900 struct statfs fs;
901 int ret;
902
903 do {
9742bf26 904 ret = statfs(path, &fs);
c902760f
MT
905 } while (ret != 0 && errno == EINTR);
906
907 if (ret != 0) {
9742bf26
YT
908 perror(path);
909 return 0;
c902760f
MT
910 }
911
912 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 913 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
914
915 return fs.f_bsize;
916}
917
04b16653
AW
918static void *file_ram_alloc(RAMBlock *block,
919 ram_addr_t memory,
920 const char *path)
c902760f
MT
921{
922 char *filename;
8ca761f6
PF
923 char *sanitized_name;
924 char *c;
c902760f
MT
925 void *area;
926 int fd;
927#ifdef MAP_POPULATE
928 int flags;
929#endif
930 unsigned long hpagesize;
931
932 hpagesize = gethugepagesize(path);
933 if (!hpagesize) {
9742bf26 934 return NULL;
c902760f
MT
935 }
936
937 if (memory < hpagesize) {
938 return NULL;
939 }
940
941 if (kvm_enabled() && !kvm_has_sync_mmu()) {
942 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
943 return NULL;
944 }
945
8ca761f6
PF
946 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
947 sanitized_name = g_strdup(block->mr->name);
948 for (c = sanitized_name; *c != '\0'; c++) {
949 if (*c == '/')
950 *c = '_';
951 }
952
953 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
954 sanitized_name);
955 g_free(sanitized_name);
c902760f
MT
956
957 fd = mkstemp(filename);
958 if (fd < 0) {
9742bf26 959 perror("unable to create backing store for hugepages");
e4ada482 960 g_free(filename);
9742bf26 961 return NULL;
c902760f
MT
962 }
963 unlink(filename);
e4ada482 964 g_free(filename);
c902760f
MT
965
966 memory = (memory+hpagesize-1) & ~(hpagesize-1);
967
968 /*
969 * ftruncate is not supported by hugetlbfs in older
970 * hosts, so don't bother bailing out on errors.
971 * If anything goes wrong with it under other filesystems,
972 * mmap will fail.
973 */
974 if (ftruncate(fd, memory))
9742bf26 975 perror("ftruncate");
c902760f
MT
976
977#ifdef MAP_POPULATE
978 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
979 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
980 * to sidestep this quirk.
981 */
982 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
983 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
984#else
985 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
986#endif
987 if (area == MAP_FAILED) {
9742bf26
YT
988 perror("file_ram_alloc: can't mmap RAM pages");
989 close(fd);
990 return (NULL);
c902760f 991 }
04b16653 992 block->fd = fd;
c902760f
MT
993 return area;
994}
995#endif
996
d17b5288 997static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
998{
999 RAMBlock *block, *next_block;
3e837b2c 1000 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1001
49cd9ac6
SH
1002 assert(size != 0); /* it would hand out same offset multiple times */
1003
a3161038 1004 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1005 return 0;
1006
a3161038 1007 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1008 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1009
1010 end = block->offset + block->length;
1011
a3161038 1012 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1013 if (next_block->offset >= end) {
1014 next = MIN(next, next_block->offset);
1015 }
1016 }
1017 if (next - end >= size && next - end < mingap) {
3e837b2c 1018 offset = end;
04b16653
AW
1019 mingap = next - end;
1020 }
1021 }
3e837b2c
AW
1022
1023 if (offset == RAM_ADDR_MAX) {
1024 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1025 (uint64_t)size);
1026 abort();
1027 }
1028
04b16653
AW
1029 return offset;
1030}
1031
652d7ec2 1032ram_addr_t last_ram_offset(void)
d17b5288
AW
1033{
1034 RAMBlock *block;
1035 ram_addr_t last = 0;
1036
a3161038 1037 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1038 last = MAX(last, block->offset + block->length);
1039
1040 return last;
1041}
1042
ddb97f1d
JB
1043static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1044{
1045 int ret;
1046 QemuOpts *machine_opts;
1047
1048 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1049 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1050 if (machine_opts &&
1051 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
1052 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1053 if (ret) {
1054 perror("qemu_madvise");
1055 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1056 "but dump_guest_core=off specified\n");
1057 }
1058 }
1059}
1060
c5705a77 1061void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
1062{
1063 RAMBlock *new_block, *block;
1064
c5705a77 1065 new_block = NULL;
a3161038 1066 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
1067 if (block->offset == addr) {
1068 new_block = block;
1069 break;
1070 }
1071 }
1072 assert(new_block);
1073 assert(!new_block->idstr[0]);
84b89d78 1074
09e5ab63
AL
1075 if (dev) {
1076 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1077 if (id) {
1078 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1079 g_free(id);
84b89d78
CM
1080 }
1081 }
1082 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1083
b2a8658e
UD
1084 /* This assumes the iothread lock is taken here too. */
1085 qemu_mutex_lock_ramlist();
a3161038 1086 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1087 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1088 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1089 new_block->idstr);
1090 abort();
1091 }
1092 }
b2a8658e 1093 qemu_mutex_unlock_ramlist();
c5705a77
AK
1094}
1095
8490fc78
LC
1096static int memory_try_enable_merging(void *addr, size_t len)
1097{
1098 QemuOpts *opts;
1099
1100 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1101 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1102 /* disabled by the user */
1103 return 0;
1104 }
1105
1106 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1107}
1108
c5705a77
AK
1109ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1110 MemoryRegion *mr)
1111{
abb26d63 1112 RAMBlock *block, *new_block;
c5705a77
AK
1113
1114 size = TARGET_PAGE_ALIGN(size);
1115 new_block = g_malloc0(sizeof(*new_block));
84b89d78 1116
b2a8658e
UD
1117 /* This assumes the iothread lock is taken here too. */
1118 qemu_mutex_lock_ramlist();
7c637366 1119 new_block->mr = mr;
432d268c 1120 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1121 if (host) {
1122 new_block->host = host;
cd19cfa2 1123 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
1124 } else {
1125 if (mem_path) {
c902760f 1126#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
1127 new_block->host = file_ram_alloc(new_block, size, mem_path);
1128 if (!new_block->host) {
6eebf958 1129 new_block->host = qemu_anon_ram_alloc(size);
8490fc78 1130 memory_try_enable_merging(new_block->host, size);
6977dfe6 1131 }
c902760f 1132#else
6977dfe6
YT
1133 fprintf(stderr, "-mem-path option unsupported\n");
1134 exit(1);
c902760f 1135#endif
6977dfe6 1136 } else {
868bb33f 1137 if (xen_enabled()) {
fce537d4 1138 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
1139 } else if (kvm_enabled()) {
1140 /* some s390/kvm configurations have special constraints */
6eebf958 1141 new_block->host = kvm_ram_alloc(size);
432d268c 1142 } else {
6eebf958 1143 new_block->host = qemu_anon_ram_alloc(size);
432d268c 1144 }
8490fc78 1145 memory_try_enable_merging(new_block->host, size);
6977dfe6 1146 }
c902760f 1147 }
94a6b54f
PB
1148 new_block->length = size;
1149
abb26d63
PB
1150 /* Keep the list sorted from biggest to smallest block. */
1151 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1152 if (block->length < new_block->length) {
1153 break;
1154 }
1155 }
1156 if (block) {
1157 QTAILQ_INSERT_BEFORE(block, new_block, next);
1158 } else {
1159 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1160 }
0d6d3c87 1161 ram_list.mru_block = NULL;
94a6b54f 1162
f798b07f 1163 ram_list.version++;
b2a8658e 1164 qemu_mutex_unlock_ramlist();
f798b07f 1165
7267c094 1166 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1167 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1168 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1169 0, size >> TARGET_PAGE_BITS);
1720aeee 1170 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1171
ddb97f1d 1172 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1173 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 1174
6f0437e8
JK
1175 if (kvm_enabled())
1176 kvm_setup_guest_memory(new_block->host, size);
1177
94a6b54f
PB
1178 return new_block->offset;
1179}
e9a1ab19 1180
c5705a77 1181ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1182{
c5705a77 1183 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1184}
1185
1f2e98b6
AW
1186void qemu_ram_free_from_ptr(ram_addr_t addr)
1187{
1188 RAMBlock *block;
1189
b2a8658e
UD
1190 /* This assumes the iothread lock is taken here too. */
1191 qemu_mutex_lock_ramlist();
a3161038 1192 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1193 if (addr == block->offset) {
a3161038 1194 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1195 ram_list.mru_block = NULL;
f798b07f 1196 ram_list.version++;
7267c094 1197 g_free(block);
b2a8658e 1198 break;
1f2e98b6
AW
1199 }
1200 }
b2a8658e 1201 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1202}
1203
c227f099 1204void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1205{
04b16653
AW
1206 RAMBlock *block;
1207
b2a8658e
UD
1208 /* This assumes the iothread lock is taken here too. */
1209 qemu_mutex_lock_ramlist();
a3161038 1210 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1211 if (addr == block->offset) {
a3161038 1212 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1213 ram_list.mru_block = NULL;
f798b07f 1214 ram_list.version++;
cd19cfa2
HY
1215 if (block->flags & RAM_PREALLOC_MASK) {
1216 ;
1217 } else if (mem_path) {
04b16653
AW
1218#if defined (__linux__) && !defined(TARGET_S390X)
1219 if (block->fd) {
1220 munmap(block->host, block->length);
1221 close(block->fd);
1222 } else {
e7a09b92 1223 qemu_anon_ram_free(block->host, block->length);
04b16653 1224 }
fd28aa13
JK
1225#else
1226 abort();
04b16653
AW
1227#endif
1228 } else {
868bb33f 1229 if (xen_enabled()) {
e41d7c69 1230 xen_invalidate_map_cache_entry(block->host);
432d268c 1231 } else {
e7a09b92 1232 qemu_anon_ram_free(block->host, block->length);
432d268c 1233 }
04b16653 1234 }
7267c094 1235 g_free(block);
b2a8658e 1236 break;
04b16653
AW
1237 }
1238 }
b2a8658e 1239 qemu_mutex_unlock_ramlist();
04b16653 1240
e9a1ab19
FB
1241}
1242
cd19cfa2
HY
1243#ifndef _WIN32
1244void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1245{
1246 RAMBlock *block;
1247 ram_addr_t offset;
1248 int flags;
1249 void *area, *vaddr;
1250
a3161038 1251 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1252 offset = addr - block->offset;
1253 if (offset < block->length) {
1254 vaddr = block->host + offset;
1255 if (block->flags & RAM_PREALLOC_MASK) {
1256 ;
1257 } else {
1258 flags = MAP_FIXED;
1259 munmap(vaddr, length);
1260 if (mem_path) {
1261#if defined(__linux__) && !defined(TARGET_S390X)
1262 if (block->fd) {
1263#ifdef MAP_POPULATE
1264 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1265 MAP_PRIVATE;
1266#else
1267 flags |= MAP_PRIVATE;
1268#endif
1269 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1270 flags, block->fd, offset);
1271 } else {
1272 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1273 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1274 flags, -1, 0);
1275 }
fd28aa13
JK
1276#else
1277 abort();
cd19cfa2
HY
1278#endif
1279 } else {
1280#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1281 flags |= MAP_SHARED | MAP_ANONYMOUS;
1282 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1283 flags, -1, 0);
1284#else
1285 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1286 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1287 flags, -1, 0);
1288#endif
1289 }
1290 if (area != vaddr) {
f15fbc4b
AP
1291 fprintf(stderr, "Could not remap addr: "
1292 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1293 length, addr);
1294 exit(1);
1295 }
8490fc78 1296 memory_try_enable_merging(vaddr, length);
ddb97f1d 1297 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1298 }
1299 return;
1300 }
1301 }
1302}
1303#endif /* !_WIN32 */
1304
1b5ec234 1305static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
dc828ca1 1306{
94a6b54f
PB
1307 RAMBlock *block;
1308
b2a8658e 1309 /* The list is protected by the iothread lock here. */
0d6d3c87
PB
1310 block = ram_list.mru_block;
1311 if (block && addr - block->offset < block->length) {
1312 goto found;
1313 }
a3161038 1314 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f471a17e 1315 if (addr - block->offset < block->length) {
0d6d3c87 1316 goto found;
f471a17e 1317 }
94a6b54f 1318 }
f471a17e
AW
1319
1320 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1321 abort();
1322
0d6d3c87
PB
1323found:
1324 ram_list.mru_block = block;
1b5ec234
PB
1325 return block;
1326}
1327
1328/* Return a host pointer to ram allocated with qemu_ram_alloc.
1329 With the exception of the softmmu code in this file, this should
1330 only be used for local memory (e.g. video ram) that the device owns,
1331 and knows it isn't going to access beyond the end of the block.
1332
1333 It should not be used for general purpose DMA.
1334 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1335 */
1336void *qemu_get_ram_ptr(ram_addr_t addr)
1337{
1338 RAMBlock *block = qemu_get_ram_block(addr);
1339
0d6d3c87
PB
1340 if (xen_enabled()) {
1341 /* We need to check if the requested address is in the RAM
1342 * because we don't want to map the entire memory in QEMU.
1343 * In that case just map until the end of the page.
1344 */
1345 if (block->offset == 0) {
1346 return xen_map_cache(addr, 0, 0);
1347 } else if (block->host == NULL) {
1348 block->host =
1349 xen_map_cache(block->offset, block->length, 1);
1350 }
1351 }
1352 return block->host + (addr - block->offset);
dc828ca1
PB
1353}
1354
0d6d3c87
PB
1355/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1356 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1357 *
1358 * ??? Is this still necessary?
b2e0a138 1359 */
8b9c99d9 1360static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
1361{
1362 RAMBlock *block;
1363
b2a8658e 1364 /* The list is protected by the iothread lock here. */
a3161038 1365 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
b2e0a138 1366 if (addr - block->offset < block->length) {
868bb33f 1367 if (xen_enabled()) {
432d268c
JN
1368 /* We need to check if the requested address is in the RAM
1369 * because we don't want to map the entire memory in QEMU.
712c2b41 1370 * In that case just map until the end of the page.
432d268c
JN
1371 */
1372 if (block->offset == 0) {
e41d7c69 1373 return xen_map_cache(addr, 0, 0);
432d268c 1374 } else if (block->host == NULL) {
e41d7c69
JK
1375 block->host =
1376 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
1377 }
1378 }
b2e0a138
MT
1379 return block->host + (addr - block->offset);
1380 }
1381 }
1382
1383 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1384 abort();
1385
1386 return NULL;
1387}
1388
38bee5dc
SS
1389/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1390 * but takes a size argument */
8b9c99d9 1391static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 1392{
8ab934f9
SS
1393 if (*size == 0) {
1394 return NULL;
1395 }
868bb33f 1396 if (xen_enabled()) {
e41d7c69 1397 return xen_map_cache(addr, *size, 1);
868bb33f 1398 } else {
38bee5dc
SS
1399 RAMBlock *block;
1400
a3161038 1401 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1402 if (addr - block->offset < block->length) {
1403 if (addr - block->offset + *size > block->length)
1404 *size = block->length - addr + block->offset;
1405 return block->host + (addr - block->offset);
1406 }
1407 }
1408
1409 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1410 abort();
38bee5dc
SS
1411 }
1412}
1413
7443b437
PB
1414/* Some of the softmmu routines need to translate from a host pointer
1415 (typically a TLB entry) back to a ram offset. */
1b5ec234 1416MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1417{
94a6b54f
PB
1418 RAMBlock *block;
1419 uint8_t *host = ptr;
1420
868bb33f 1421 if (xen_enabled()) {
e41d7c69 1422 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1423 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1424 }
1425
23887b79
PB
1426 block = ram_list.mru_block;
1427 if (block && block->host && host - block->host < block->length) {
1428 goto found;
1429 }
1430
a3161038 1431 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1432 /* This case append when the block is not mapped. */
1433 if (block->host == NULL) {
1434 continue;
1435 }
f471a17e 1436 if (host - block->host < block->length) {
23887b79 1437 goto found;
f471a17e 1438 }
94a6b54f 1439 }
432d268c 1440
1b5ec234 1441 return NULL;
23887b79
PB
1442
1443found:
1444 *ram_addr = block->offset + (host - block->host);
1b5ec234 1445 return block->mr;
e890261f 1446}
f471a17e 1447
a8170e5e 1448static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1449 uint64_t val, unsigned size)
9fa3e853 1450{
3a7d929e 1451 int dirty_flags;
f7c11b53 1452 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1453 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
0e0df1e2 1454 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1455 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1456 }
0e0df1e2
AK
1457 switch (size) {
1458 case 1:
1459 stb_p(qemu_get_ram_ptr(ram_addr), val);
1460 break;
1461 case 2:
1462 stw_p(qemu_get_ram_ptr(ram_addr), val);
1463 break;
1464 case 4:
1465 stl_p(qemu_get_ram_ptr(ram_addr), val);
1466 break;
1467 default:
1468 abort();
3a7d929e 1469 }
f23db169 1470 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1471 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1472 /* we remove the notdirty callback only if the code has been
1473 flushed */
1474 if (dirty_flags == 0xff)
2e70f6ef 1475 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
1476}
1477
b018ddf6
PB
1478static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1479 unsigned size, bool is_write)
1480{
1481 return is_write;
1482}
1483
0e0df1e2 1484static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1485 .write = notdirty_mem_write,
b018ddf6 1486 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1487 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1488};
1489
0f459d16 1490/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1491static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1492{
9349b4f9 1493 CPUArchState *env = cpu_single_env;
06d55cc1 1494 target_ulong pc, cs_base;
0f459d16 1495 target_ulong vaddr;
a1d1bb31 1496 CPUWatchpoint *wp;
06d55cc1 1497 int cpu_flags;
0f459d16 1498
06d55cc1
AL
1499 if (env->watchpoint_hit) {
1500 /* We re-entered the check after replacing the TB. Now raise
1501 * the debug interrupt so that is will trigger after the
1502 * current instruction. */
c3affe56 1503 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1504 return;
1505 }
2e70f6ef 1506 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1507 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1508 if ((vaddr == (wp->vaddr & len_mask) ||
1509 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1510 wp->flags |= BP_WATCHPOINT_HIT;
1511 if (!env->watchpoint_hit) {
1512 env->watchpoint_hit = wp;
5a316526 1513 tb_check_watchpoint(env);
6e140f28
AL
1514 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1515 env->exception_index = EXCP_DEBUG;
488d6577 1516 cpu_loop_exit(env);
6e140f28
AL
1517 } else {
1518 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1519 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1520 cpu_resume_from_signal(env, NULL);
6e140f28 1521 }
06d55cc1 1522 }
6e140f28
AL
1523 } else {
1524 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1525 }
1526 }
1527}
1528
6658ffb8
PB
1529/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1530 so these check for a hit then pass through to the normal out-of-line
1531 phys routines. */
a8170e5e 1532static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1533 unsigned size)
6658ffb8 1534{
1ec9b909
AK
1535 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1536 switch (size) {
1537 case 1: return ldub_phys(addr);
1538 case 2: return lduw_phys(addr);
1539 case 4: return ldl_phys(addr);
1540 default: abort();
1541 }
6658ffb8
PB
1542}
1543
a8170e5e 1544static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1545 uint64_t val, unsigned size)
6658ffb8 1546{
1ec9b909
AK
1547 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1548 switch (size) {
67364150
MF
1549 case 1:
1550 stb_phys(addr, val);
1551 break;
1552 case 2:
1553 stw_phys(addr, val);
1554 break;
1555 case 4:
1556 stl_phys(addr, val);
1557 break;
1ec9b909
AK
1558 default: abort();
1559 }
6658ffb8
PB
1560}
1561
1ec9b909
AK
1562static const MemoryRegionOps watch_mem_ops = {
1563 .read = watch_mem_read,
1564 .write = watch_mem_write,
1565 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1566};
6658ffb8 1567
a8170e5e 1568static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1569 unsigned len)
db7b5426 1570{
acc9d80b
JK
1571 subpage_t *subpage = opaque;
1572 uint8_t buf[4];
791af8c8 1573
db7b5426 1574#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1575 printf("%s: subpage %p len %d addr " TARGET_FMT_plx "\n", __func__,
1576 subpage, len, addr);
db7b5426 1577#endif
acc9d80b
JK
1578 address_space_read(subpage->as, addr + subpage->base, buf, len);
1579 switch (len) {
1580 case 1:
1581 return ldub_p(buf);
1582 case 2:
1583 return lduw_p(buf);
1584 case 4:
1585 return ldl_p(buf);
1586 default:
1587 abort();
1588 }
db7b5426
BS
1589}
1590
a8170e5e 1591static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1592 uint64_t value, unsigned len)
db7b5426 1593{
acc9d80b
JK
1594 subpage_t *subpage = opaque;
1595 uint8_t buf[4];
1596
db7b5426 1597#if defined(DEBUG_SUBPAGE)
70c68e44 1598 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
acc9d80b
JK
1599 " value %"PRIx64"\n",
1600 __func__, subpage, len, addr, value);
db7b5426 1601#endif
acc9d80b
JK
1602 switch (len) {
1603 case 1:
1604 stb_p(buf, value);
1605 break;
1606 case 2:
1607 stw_p(buf, value);
1608 break;
1609 case 4:
1610 stl_p(buf, value);
1611 break;
1612 default:
1613 abort();
1614 }
1615 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1616}
1617
c353e4cc
PB
1618static bool subpage_accepts(void *opaque, hwaddr addr,
1619 unsigned size, bool is_write)
1620{
acc9d80b 1621 subpage_t *subpage = opaque;
c353e4cc 1622#if defined(DEBUG_SUBPAGE)
acc9d80b
JK
1623 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx "\n",
1624 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1625#endif
1626
acc9d80b
JK
1627 return address_space_access_valid(subpage->as, addr + subpage->base,
1628 size, is_write);
c353e4cc
PB
1629}
1630
70c68e44
AK
1631static const MemoryRegionOps subpage_ops = {
1632 .read = subpage_read,
1633 .write = subpage_write,
c353e4cc 1634 .valid.accepts = subpage_accepts,
70c68e44 1635 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1636};
1637
c227f099 1638static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1639 uint16_t section)
db7b5426
BS
1640{
1641 int idx, eidx;
1642
1643 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1644 return -1;
1645 idx = SUBPAGE_IDX(start);
1646 eidx = SUBPAGE_IDX(end);
1647#if defined(DEBUG_SUBPAGE)
0bf9e31a 1648 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
1649 mmio, start, end, idx, eidx, memory);
1650#endif
db7b5426 1651 for (; idx <= eidx; idx++) {
5312bd8b 1652 mmio->sub_section[idx] = section;
db7b5426
BS
1653 }
1654
1655 return 0;
1656}
1657
acc9d80b 1658static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1659{
c227f099 1660 subpage_t *mmio;
db7b5426 1661
7267c094 1662 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1663
acc9d80b 1664 mmio->as = as;
1eec614b 1665 mmio->base = base;
2c9b15ca 1666 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
70c68e44 1667 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1668 mmio->iomem.subpage = true;
db7b5426 1669#if defined(DEBUG_SUBPAGE)
1eec614b
AL
1670 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1671 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 1672#endif
b41aac4f 1673 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1674
1675 return mmio;
1676}
1677
5312bd8b
AK
1678static uint16_t dummy_section(MemoryRegion *mr)
1679{
1680 MemoryRegionSection section = {
1681 .mr = mr,
1682 .offset_within_address_space = 0,
1683 .offset_within_region = 0,
052e87b0 1684 .size = int128_2_64(),
5312bd8b
AK
1685 };
1686
1687 return phys_section_add(&section);
1688}
1689
a8170e5e 1690MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1691{
0475d94f 1692 return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1693}
1694
e9179ce1
AK
1695static void io_mem_init(void)
1696{
2c9b15ca
PB
1697 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1698 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
0e0df1e2 1699 "unassigned", UINT64_MAX);
2c9b15ca 1700 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
0e0df1e2 1701 "notdirty", UINT64_MAX);
2c9b15ca 1702 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1ec9b909 1703 "watch", UINT64_MAX);
e9179ce1
AK
1704}
1705
ac1970fb 1706static void mem_begin(MemoryListener *listener)
00752703
PB
1707{
1708 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1709 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1710
1711 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1712 d->as = as;
1713 as->next_dispatch = d;
1714}
1715
1716static void mem_commit(MemoryListener *listener)
ac1970fb 1717{
89ae337a 1718 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
1719 AddressSpaceDispatch *cur = as->dispatch;
1720 AddressSpaceDispatch *next = as->next_dispatch;
1721
1722 next->nodes = next_map.nodes;
1723 next->sections = next_map.sections;
ac1970fb 1724
0475d94f
PB
1725 as->dispatch = next;
1726 g_free(cur);
ac1970fb
AK
1727}
1728
50c1e149
AK
1729static void core_begin(MemoryListener *listener)
1730{
b41aac4f
LPF
1731 uint16_t n;
1732
6092666e
PB
1733 prev_map = g_new(PhysPageMap, 1);
1734 *prev_map = next_map;
1735
9affd6fc 1736 memset(&next_map, 0, sizeof(next_map));
b41aac4f
LPF
1737 n = dummy_section(&io_mem_unassigned);
1738 assert(n == PHYS_SECTION_UNASSIGNED);
1739 n = dummy_section(&io_mem_notdirty);
1740 assert(n == PHYS_SECTION_NOTDIRTY);
1741 n = dummy_section(&io_mem_rom);
1742 assert(n == PHYS_SECTION_ROM);
1743 n = dummy_section(&io_mem_watch);
1744 assert(n == PHYS_SECTION_WATCH);
50c1e149
AK
1745}
1746
9affd6fc
PB
1747/* This listener's commit run after the other AddressSpaceDispatch listeners'.
1748 * All AddressSpaceDispatch instances have switched to the next map.
1749 */
1750static void core_commit(MemoryListener *listener)
1751{
6092666e 1752 phys_sections_free(prev_map);
9affd6fc
PB
1753}
1754
1d71148e 1755static void tcg_commit(MemoryListener *listener)
50c1e149 1756{
9349b4f9 1757 CPUArchState *env;
117712c3
AK
1758
1759 /* since each CPU stores ram addresses in its TLB cache, we must
1760 reset the modified entries */
1761 /* XXX: slow ! */
1762 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1763 tlb_flush(env, 1);
1764 }
50c1e149
AK
1765}
1766
93632747
AK
1767static void core_log_global_start(MemoryListener *listener)
1768{
1769 cpu_physical_memory_set_dirty_tracking(1);
1770}
1771
1772static void core_log_global_stop(MemoryListener *listener)
1773{
1774 cpu_physical_memory_set_dirty_tracking(0);
1775}
1776
93632747 1777static MemoryListener core_memory_listener = {
50c1e149 1778 .begin = core_begin,
9affd6fc 1779 .commit = core_commit,
93632747
AK
1780 .log_global_start = core_log_global_start,
1781 .log_global_stop = core_log_global_stop,
ac1970fb 1782 .priority = 1,
93632747
AK
1783};
1784
1d71148e
AK
1785static MemoryListener tcg_memory_listener = {
1786 .commit = tcg_commit,
1787};
1788
ac1970fb
AK
1789void address_space_init_dispatch(AddressSpace *as)
1790{
00752703 1791 as->dispatch = NULL;
89ae337a 1792 as->dispatch_listener = (MemoryListener) {
ac1970fb 1793 .begin = mem_begin,
00752703 1794 .commit = mem_commit,
ac1970fb
AK
1795 .region_add = mem_add,
1796 .region_nop = mem_add,
1797 .priority = 0,
1798 };
89ae337a 1799 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
1800}
1801
83f3c251
AK
1802void address_space_destroy_dispatch(AddressSpace *as)
1803{
1804 AddressSpaceDispatch *d = as->dispatch;
1805
89ae337a 1806 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
1807 g_free(d);
1808 as->dispatch = NULL;
1809}
1810
62152b8a
AK
1811static void memory_map_init(void)
1812{
7267c094 1813 system_memory = g_malloc(sizeof(*system_memory));
2c9b15ca 1814 memory_region_init(system_memory, NULL, "system", INT64_MAX);
7dca8043 1815 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1816
7267c094 1817 system_io = g_malloc(sizeof(*system_io));
2c9b15ca 1818 memory_region_init(system_io, NULL, "io", 65536);
7dca8043 1819 address_space_init(&address_space_io, system_io, "I/O");
93632747 1820
f6790af6 1821 memory_listener_register(&core_memory_listener, &address_space_memory);
f6790af6 1822 memory_listener_register(&tcg_memory_listener, &address_space_memory);
62152b8a
AK
1823}
1824
1825MemoryRegion *get_system_memory(void)
1826{
1827 return system_memory;
1828}
1829
309cb471
AK
1830MemoryRegion *get_system_io(void)
1831{
1832 return system_io;
1833}
1834
e2eef170
PB
1835#endif /* !defined(CONFIG_USER_ONLY) */
1836
13eb76e0
FB
1837/* physical memory access (slow version, mainly for debug) */
1838#if defined(CONFIG_USER_ONLY)
9349b4f9 1839int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 1840 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1841{
1842 int l, flags;
1843 target_ulong page;
53a5960a 1844 void * p;
13eb76e0
FB
1845
1846 while (len > 0) {
1847 page = addr & TARGET_PAGE_MASK;
1848 l = (page + TARGET_PAGE_SIZE) - addr;
1849 if (l > len)
1850 l = len;
1851 flags = page_get_flags(page);
1852 if (!(flags & PAGE_VALID))
a68fe89c 1853 return -1;
13eb76e0
FB
1854 if (is_write) {
1855 if (!(flags & PAGE_WRITE))
a68fe89c 1856 return -1;
579a97f7 1857 /* XXX: this code should not depend on lock_user */
72fb7daa 1858 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1859 return -1;
72fb7daa
AJ
1860 memcpy(p, buf, l);
1861 unlock_user(p, addr, l);
13eb76e0
FB
1862 } else {
1863 if (!(flags & PAGE_READ))
a68fe89c 1864 return -1;
579a97f7 1865 /* XXX: this code should not depend on lock_user */
72fb7daa 1866 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1867 return -1;
72fb7daa 1868 memcpy(buf, p, l);
5b257578 1869 unlock_user(p, addr, 0);
13eb76e0
FB
1870 }
1871 len -= l;
1872 buf += l;
1873 addr += l;
1874 }
a68fe89c 1875 return 0;
13eb76e0 1876}
8df1cd07 1877
13eb76e0 1878#else
51d7a9eb 1879
a8170e5e
AK
1880static void invalidate_and_set_dirty(hwaddr addr,
1881 hwaddr length)
51d7a9eb
AP
1882{
1883 if (!cpu_physical_memory_is_dirty(addr)) {
1884 /* invalidate code */
1885 tb_invalidate_phys_page_range(addr, addr + length, 0);
1886 /* set dirty bit */
1887 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1888 }
e226939d 1889 xen_modified_memory(addr, length);
51d7a9eb
AP
1890}
1891
2bbfa05d
PB
1892static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1893{
1894 if (memory_region_is_ram(mr)) {
1895 return !(is_write && mr->readonly);
1896 }
1897 if (memory_region_is_romd(mr)) {
1898 return !is_write;
1899 }
1900
1901 return false;
1902}
1903
f52cc467 1904static inline int memory_access_size(MemoryRegion *mr, int l, hwaddr addr)
82f2563f 1905{
f52cc467 1906 if (l >= 4 && (((addr & 3) == 0 || mr->ops->impl.unaligned))) {
82f2563f
PB
1907 return 4;
1908 }
f52cc467 1909 if (l >= 2 && (((addr & 1) == 0) || mr->ops->impl.unaligned)) {
82f2563f
PB
1910 return 2;
1911 }
1912 return 1;
1913}
1914
fd8aaa76 1915bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1916 int len, bool is_write)
13eb76e0 1917{
149f54b5 1918 hwaddr l;
13eb76e0 1919 uint8_t *ptr;
791af8c8 1920 uint64_t val;
149f54b5 1921 hwaddr addr1;
5c8a00ce 1922 MemoryRegion *mr;
fd8aaa76 1923 bool error = false;
3b46e624 1924
13eb76e0 1925 while (len > 0) {
149f54b5 1926 l = len;
5c8a00ce 1927 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 1928
13eb76e0 1929 if (is_write) {
5c8a00ce
PB
1930 if (!memory_access_is_direct(mr, is_write)) {
1931 l = memory_access_size(mr, l, addr1);
6a00d601
FB
1932 /* XXX: could force cpu_single_env to NULL to avoid
1933 potential bugs */
82f2563f 1934 if (l == 4) {
1c213d19 1935 /* 32 bit write access */
c27004ec 1936 val = ldl_p(buf);
5c8a00ce 1937 error |= io_mem_write(mr, addr1, val, 4);
82f2563f 1938 } else if (l == 2) {
1c213d19 1939 /* 16 bit write access */
c27004ec 1940 val = lduw_p(buf);
5c8a00ce 1941 error |= io_mem_write(mr, addr1, val, 2);
13eb76e0 1942 } else {
1c213d19 1943 /* 8 bit write access */
c27004ec 1944 val = ldub_p(buf);
5c8a00ce 1945 error |= io_mem_write(mr, addr1, val, 1);
13eb76e0 1946 }
2bbfa05d 1947 } else {
5c8a00ce 1948 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 1949 /* RAM case */
5579c7f3 1950 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1951 memcpy(ptr, buf, l);
51d7a9eb 1952 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
1953 }
1954 } else {
5c8a00ce 1955 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 1956 /* I/O case */
5c8a00ce 1957 l = memory_access_size(mr, l, addr1);
82f2563f 1958 if (l == 4) {
13eb76e0 1959 /* 32 bit read access */
5c8a00ce 1960 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 1961 stl_p(buf, val);
82f2563f 1962 } else if (l == 2) {
13eb76e0 1963 /* 16 bit read access */
5c8a00ce 1964 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 1965 stw_p(buf, val);
13eb76e0 1966 } else {
1c213d19 1967 /* 8 bit read access */
5c8a00ce 1968 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 1969 stb_p(buf, val);
13eb76e0
FB
1970 }
1971 } else {
1972 /* RAM case */
5c8a00ce 1973 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 1974 memcpy(buf, ptr, l);
13eb76e0
FB
1975 }
1976 }
1977 len -= l;
1978 buf += l;
1979 addr += l;
1980 }
fd8aaa76
PB
1981
1982 return error;
13eb76e0 1983}
8df1cd07 1984
fd8aaa76 1985bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1986 const uint8_t *buf, int len)
1987{
fd8aaa76 1988 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
1989}
1990
fd8aaa76 1991bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 1992{
fd8aaa76 1993 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
1994}
1995
1996
a8170e5e 1997void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1998 int len, int is_write)
1999{
fd8aaa76 2000 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2001}
2002
d0ecd2aa 2003/* used for ROM loading : can write in RAM and ROM */
a8170e5e 2004void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
2005 const uint8_t *buf, int len)
2006{
149f54b5 2007 hwaddr l;
d0ecd2aa 2008 uint8_t *ptr;
149f54b5 2009 hwaddr addr1;
5c8a00ce 2010 MemoryRegion *mr;
3b46e624 2011
d0ecd2aa 2012 while (len > 0) {
149f54b5 2013 l = len;
5c8a00ce
PB
2014 mr = address_space_translate(&address_space_memory,
2015 addr, &addr1, &l, true);
3b46e624 2016
5c8a00ce
PB
2017 if (!(memory_region_is_ram(mr) ||
2018 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2019 /* do nothing */
2020 } else {
5c8a00ce 2021 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2022 /* ROM/RAM case */
5579c7f3 2023 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2024 memcpy(ptr, buf, l);
51d7a9eb 2025 invalidate_and_set_dirty(addr1, l);
d0ecd2aa
FB
2026 }
2027 len -= l;
2028 buf += l;
2029 addr += l;
2030 }
2031}
2032
6d16c2f8 2033typedef struct {
d3e71559 2034 MemoryRegion *mr;
6d16c2f8 2035 void *buffer;
a8170e5e
AK
2036 hwaddr addr;
2037 hwaddr len;
6d16c2f8
AL
2038} BounceBuffer;
2039
2040static BounceBuffer bounce;
2041
ba223c29
AL
2042typedef struct MapClient {
2043 void *opaque;
2044 void (*callback)(void *opaque);
72cf2d4f 2045 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2046} MapClient;
2047
72cf2d4f
BS
2048static QLIST_HEAD(map_client_list, MapClient) map_client_list
2049 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2050
2051void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2052{
7267c094 2053 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2054
2055 client->opaque = opaque;
2056 client->callback = callback;
72cf2d4f 2057 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2058 return client;
2059}
2060
8b9c99d9 2061static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2062{
2063 MapClient *client = (MapClient *)_client;
2064
72cf2d4f 2065 QLIST_REMOVE(client, link);
7267c094 2066 g_free(client);
ba223c29
AL
2067}
2068
2069static void cpu_notify_map_clients(void)
2070{
2071 MapClient *client;
2072
72cf2d4f
BS
2073 while (!QLIST_EMPTY(&map_client_list)) {
2074 client = QLIST_FIRST(&map_client_list);
ba223c29 2075 client->callback(client->opaque);
34d5e948 2076 cpu_unregister_map_client(client);
ba223c29
AL
2077 }
2078}
2079
51644ab7
PB
2080bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2081{
5c8a00ce 2082 MemoryRegion *mr;
51644ab7
PB
2083 hwaddr l, xlat;
2084
2085 while (len > 0) {
2086 l = len;
5c8a00ce
PB
2087 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2088 if (!memory_access_is_direct(mr, is_write)) {
2089 l = memory_access_size(mr, l, addr);
2090 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2091 return false;
2092 }
2093 }
2094
2095 len -= l;
2096 addr += l;
2097 }
2098 return true;
2099}
2100
6d16c2f8
AL
2101/* Map a physical memory region into a host virtual address.
2102 * May map a subset of the requested range, given by and returned in *plen.
2103 * May return NULL if resources needed to perform the mapping are exhausted.
2104 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2105 * Use cpu_register_map_client() to know when retrying the map operation is
2106 * likely to succeed.
6d16c2f8 2107 */
ac1970fb 2108void *address_space_map(AddressSpace *as,
a8170e5e
AK
2109 hwaddr addr,
2110 hwaddr *plen,
ac1970fb 2111 bool is_write)
6d16c2f8 2112{
a8170e5e 2113 hwaddr len = *plen;
e3127ae0
PB
2114 hwaddr done = 0;
2115 hwaddr l, xlat, base;
2116 MemoryRegion *mr, *this_mr;
2117 ram_addr_t raddr;
6d16c2f8 2118
e3127ae0
PB
2119 if (len == 0) {
2120 return NULL;
2121 }
38bee5dc 2122
e3127ae0
PB
2123 l = len;
2124 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2125 if (!memory_access_is_direct(mr, is_write)) {
2126 if (bounce.buffer) {
2127 return NULL;
6d16c2f8 2128 }
e3127ae0
PB
2129 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2130 bounce.addr = addr;
2131 bounce.len = l;
d3e71559
PB
2132
2133 memory_region_ref(mr);
2134 bounce.mr = mr;
e3127ae0
PB
2135 if (!is_write) {
2136 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2137 }
6d16c2f8 2138
e3127ae0
PB
2139 *plen = l;
2140 return bounce.buffer;
2141 }
2142
2143 base = xlat;
2144 raddr = memory_region_get_ram_addr(mr);
2145
2146 for (;;) {
6d16c2f8
AL
2147 len -= l;
2148 addr += l;
e3127ae0
PB
2149 done += l;
2150 if (len == 0) {
2151 break;
2152 }
2153
2154 l = len;
2155 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2156 if (this_mr != mr || xlat != base + done) {
2157 break;
2158 }
6d16c2f8 2159 }
e3127ae0 2160
d3e71559 2161 memory_region_ref(mr);
e3127ae0
PB
2162 *plen = done;
2163 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2164}
2165
ac1970fb 2166/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2167 * Will also mark the memory as dirty if is_write == 1. access_len gives
2168 * the amount of memory that was actually read or written by the caller.
2169 */
a8170e5e
AK
2170void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2171 int is_write, hwaddr access_len)
6d16c2f8
AL
2172{
2173 if (buffer != bounce.buffer) {
d3e71559
PB
2174 MemoryRegion *mr;
2175 ram_addr_t addr1;
2176
2177 mr = qemu_ram_addr_from_host(buffer, &addr1);
2178 assert(mr != NULL);
6d16c2f8 2179 if (is_write) {
6d16c2f8
AL
2180 while (access_len) {
2181 unsigned l;
2182 l = TARGET_PAGE_SIZE;
2183 if (l > access_len)
2184 l = access_len;
51d7a9eb 2185 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2186 addr1 += l;
2187 access_len -= l;
2188 }
2189 }
868bb33f 2190 if (xen_enabled()) {
e41d7c69 2191 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2192 }
d3e71559 2193 memory_region_unref(mr);
6d16c2f8
AL
2194 return;
2195 }
2196 if (is_write) {
ac1970fb 2197 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2198 }
f8a83245 2199 qemu_vfree(bounce.buffer);
6d16c2f8 2200 bounce.buffer = NULL;
d3e71559 2201 memory_region_unref(bounce.mr);
ba223c29 2202 cpu_notify_map_clients();
6d16c2f8 2203}
d0ecd2aa 2204
a8170e5e
AK
2205void *cpu_physical_memory_map(hwaddr addr,
2206 hwaddr *plen,
ac1970fb
AK
2207 int is_write)
2208{
2209 return address_space_map(&address_space_memory, addr, plen, is_write);
2210}
2211
a8170e5e
AK
2212void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2213 int is_write, hwaddr access_len)
ac1970fb
AK
2214{
2215 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2216}
2217
8df1cd07 2218/* warning: addr must be aligned */
a8170e5e 2219static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2220 enum device_endian endian)
8df1cd07 2221{
8df1cd07 2222 uint8_t *ptr;
791af8c8 2223 uint64_t val;
5c8a00ce 2224 MemoryRegion *mr;
149f54b5
PB
2225 hwaddr l = 4;
2226 hwaddr addr1;
8df1cd07 2227
5c8a00ce
PB
2228 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2229 false);
2230 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2231 /* I/O case */
5c8a00ce 2232 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2233#if defined(TARGET_WORDS_BIGENDIAN)
2234 if (endian == DEVICE_LITTLE_ENDIAN) {
2235 val = bswap32(val);
2236 }
2237#else
2238 if (endian == DEVICE_BIG_ENDIAN) {
2239 val = bswap32(val);
2240 }
2241#endif
8df1cd07
FB
2242 } else {
2243 /* RAM case */
5c8a00ce 2244 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2245 & TARGET_PAGE_MASK)
149f54b5 2246 + addr1);
1e78bcc1
AG
2247 switch (endian) {
2248 case DEVICE_LITTLE_ENDIAN:
2249 val = ldl_le_p(ptr);
2250 break;
2251 case DEVICE_BIG_ENDIAN:
2252 val = ldl_be_p(ptr);
2253 break;
2254 default:
2255 val = ldl_p(ptr);
2256 break;
2257 }
8df1cd07
FB
2258 }
2259 return val;
2260}
2261
a8170e5e 2262uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2263{
2264 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2265}
2266
a8170e5e 2267uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2268{
2269 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2270}
2271
a8170e5e 2272uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2273{
2274 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2275}
2276
84b7b8e7 2277/* warning: addr must be aligned */
a8170e5e 2278static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2279 enum device_endian endian)
84b7b8e7 2280{
84b7b8e7
FB
2281 uint8_t *ptr;
2282 uint64_t val;
5c8a00ce 2283 MemoryRegion *mr;
149f54b5
PB
2284 hwaddr l = 8;
2285 hwaddr addr1;
84b7b8e7 2286
5c8a00ce
PB
2287 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2288 false);
2289 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2290 /* I/O case */
5c8a00ce 2291 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2292#if defined(TARGET_WORDS_BIGENDIAN)
2293 if (endian == DEVICE_LITTLE_ENDIAN) {
2294 val = bswap64(val);
2295 }
2296#else
2297 if (endian == DEVICE_BIG_ENDIAN) {
2298 val = bswap64(val);
2299 }
84b7b8e7
FB
2300#endif
2301 } else {
2302 /* RAM case */
5c8a00ce 2303 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2304 & TARGET_PAGE_MASK)
149f54b5 2305 + addr1);
1e78bcc1
AG
2306 switch (endian) {
2307 case DEVICE_LITTLE_ENDIAN:
2308 val = ldq_le_p(ptr);
2309 break;
2310 case DEVICE_BIG_ENDIAN:
2311 val = ldq_be_p(ptr);
2312 break;
2313 default:
2314 val = ldq_p(ptr);
2315 break;
2316 }
84b7b8e7
FB
2317 }
2318 return val;
2319}
2320
a8170e5e 2321uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2322{
2323 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2324}
2325
a8170e5e 2326uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2327{
2328 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2329}
2330
a8170e5e 2331uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2332{
2333 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2334}
2335
aab33094 2336/* XXX: optimize */
a8170e5e 2337uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2338{
2339 uint8_t val;
2340 cpu_physical_memory_read(addr, &val, 1);
2341 return val;
2342}
2343
733f0b02 2344/* warning: addr must be aligned */
a8170e5e 2345static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2346 enum device_endian endian)
aab33094 2347{
733f0b02
MT
2348 uint8_t *ptr;
2349 uint64_t val;
5c8a00ce 2350 MemoryRegion *mr;
149f54b5
PB
2351 hwaddr l = 2;
2352 hwaddr addr1;
733f0b02 2353
5c8a00ce
PB
2354 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2355 false);
2356 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2357 /* I/O case */
5c8a00ce 2358 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2359#if defined(TARGET_WORDS_BIGENDIAN)
2360 if (endian == DEVICE_LITTLE_ENDIAN) {
2361 val = bswap16(val);
2362 }
2363#else
2364 if (endian == DEVICE_BIG_ENDIAN) {
2365 val = bswap16(val);
2366 }
2367#endif
733f0b02
MT
2368 } else {
2369 /* RAM case */
5c8a00ce 2370 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2371 & TARGET_PAGE_MASK)
149f54b5 2372 + addr1);
1e78bcc1
AG
2373 switch (endian) {
2374 case DEVICE_LITTLE_ENDIAN:
2375 val = lduw_le_p(ptr);
2376 break;
2377 case DEVICE_BIG_ENDIAN:
2378 val = lduw_be_p(ptr);
2379 break;
2380 default:
2381 val = lduw_p(ptr);
2382 break;
2383 }
733f0b02
MT
2384 }
2385 return val;
aab33094
FB
2386}
2387
a8170e5e 2388uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2389{
2390 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2391}
2392
a8170e5e 2393uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2394{
2395 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2396}
2397
a8170e5e 2398uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2399{
2400 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2401}
2402
8df1cd07
FB
2403/* warning: addr must be aligned. The ram page is not masked as dirty
2404 and the code inside is not invalidated. It is useful if the dirty
2405 bits are used to track modified PTEs */
a8170e5e 2406void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2407{
8df1cd07 2408 uint8_t *ptr;
5c8a00ce 2409 MemoryRegion *mr;
149f54b5
PB
2410 hwaddr l = 4;
2411 hwaddr addr1;
8df1cd07 2412
5c8a00ce
PB
2413 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2414 true);
2415 if (l < 4 || !memory_access_is_direct(mr, true)) {
2416 io_mem_write(mr, addr1, val, 4);
8df1cd07 2417 } else {
5c8a00ce 2418 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2419 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2420 stl_p(ptr, val);
74576198
AL
2421
2422 if (unlikely(in_migration)) {
2423 if (!cpu_physical_memory_is_dirty(addr1)) {
2424 /* invalidate code */
2425 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2426 /* set dirty bit */
f7c11b53
YT
2427 cpu_physical_memory_set_dirty_flags(
2428 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2429 }
2430 }
8df1cd07
FB
2431 }
2432}
2433
2434/* warning: addr must be aligned */
a8170e5e 2435static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2436 enum device_endian endian)
8df1cd07 2437{
8df1cd07 2438 uint8_t *ptr;
5c8a00ce 2439 MemoryRegion *mr;
149f54b5
PB
2440 hwaddr l = 4;
2441 hwaddr addr1;
8df1cd07 2442
5c8a00ce
PB
2443 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2444 true);
2445 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2446#if defined(TARGET_WORDS_BIGENDIAN)
2447 if (endian == DEVICE_LITTLE_ENDIAN) {
2448 val = bswap32(val);
2449 }
2450#else
2451 if (endian == DEVICE_BIG_ENDIAN) {
2452 val = bswap32(val);
2453 }
2454#endif
5c8a00ce 2455 io_mem_write(mr, addr1, val, 4);
8df1cd07 2456 } else {
8df1cd07 2457 /* RAM case */
5c8a00ce 2458 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2459 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2460 switch (endian) {
2461 case DEVICE_LITTLE_ENDIAN:
2462 stl_le_p(ptr, val);
2463 break;
2464 case DEVICE_BIG_ENDIAN:
2465 stl_be_p(ptr, val);
2466 break;
2467 default:
2468 stl_p(ptr, val);
2469 break;
2470 }
51d7a9eb 2471 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2472 }
2473}
2474
a8170e5e 2475void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2476{
2477 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2478}
2479
a8170e5e 2480void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2481{
2482 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2483}
2484
a8170e5e 2485void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2486{
2487 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2488}
2489
aab33094 2490/* XXX: optimize */
a8170e5e 2491void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2492{
2493 uint8_t v = val;
2494 cpu_physical_memory_write(addr, &v, 1);
2495}
2496
733f0b02 2497/* warning: addr must be aligned */
a8170e5e 2498static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2499 enum device_endian endian)
aab33094 2500{
733f0b02 2501 uint8_t *ptr;
5c8a00ce 2502 MemoryRegion *mr;
149f54b5
PB
2503 hwaddr l = 2;
2504 hwaddr addr1;
733f0b02 2505
5c8a00ce
PB
2506 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2507 true);
2508 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2509#if defined(TARGET_WORDS_BIGENDIAN)
2510 if (endian == DEVICE_LITTLE_ENDIAN) {
2511 val = bswap16(val);
2512 }
2513#else
2514 if (endian == DEVICE_BIG_ENDIAN) {
2515 val = bswap16(val);
2516 }
2517#endif
5c8a00ce 2518 io_mem_write(mr, addr1, val, 2);
733f0b02 2519 } else {
733f0b02 2520 /* RAM case */
5c8a00ce 2521 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2522 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2523 switch (endian) {
2524 case DEVICE_LITTLE_ENDIAN:
2525 stw_le_p(ptr, val);
2526 break;
2527 case DEVICE_BIG_ENDIAN:
2528 stw_be_p(ptr, val);
2529 break;
2530 default:
2531 stw_p(ptr, val);
2532 break;
2533 }
51d7a9eb 2534 invalidate_and_set_dirty(addr1, 2);
733f0b02 2535 }
aab33094
FB
2536}
2537
a8170e5e 2538void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2539{
2540 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2541}
2542
a8170e5e 2543void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2544{
2545 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2546}
2547
a8170e5e 2548void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2549{
2550 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2551}
2552
aab33094 2553/* XXX: optimize */
a8170e5e 2554void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2555{
2556 val = tswap64(val);
71d2b725 2557 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2558}
2559
a8170e5e 2560void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2561{
2562 val = cpu_to_le64(val);
2563 cpu_physical_memory_write(addr, &val, 8);
2564}
2565
a8170e5e 2566void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2567{
2568 val = cpu_to_be64(val);
2569 cpu_physical_memory_write(addr, &val, 8);
2570}
2571
5e2972fd 2572/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 2573int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 2574 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2575{
2576 int l;
a8170e5e 2577 hwaddr phys_addr;
9b3c35e0 2578 target_ulong page;
13eb76e0
FB
2579
2580 while (len > 0) {
2581 page = addr & TARGET_PAGE_MASK;
2582 phys_addr = cpu_get_phys_page_debug(env, page);
2583 /* if no physical page mapped, return an error */
2584 if (phys_addr == -1)
2585 return -1;
2586 l = (page + TARGET_PAGE_SIZE) - addr;
2587 if (l > len)
2588 l = len;
5e2972fd 2589 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2590 if (is_write)
2591 cpu_physical_memory_write_rom(phys_addr, buf, l);
2592 else
5e2972fd 2593 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2594 len -= l;
2595 buf += l;
2596 addr += l;
2597 }
2598 return 0;
2599}
a68fe89c 2600#endif
13eb76e0 2601
8e4a424b
BS
2602#if !defined(CONFIG_USER_ONLY)
2603
2604/*
2605 * A helper function for the _utterly broken_ virtio device model to find out if
2606 * it's running on a big endian machine. Don't do this at home kids!
2607 */
2608bool virtio_is_big_endian(void);
2609bool virtio_is_big_endian(void)
2610{
2611#if defined(TARGET_WORDS_BIGENDIAN)
2612 return true;
2613#else
2614 return false;
2615#endif
2616}
2617
2618#endif
2619
76f35538 2620#ifndef CONFIG_USER_ONLY
a8170e5e 2621bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2622{
5c8a00ce 2623 MemoryRegion*mr;
149f54b5 2624 hwaddr l = 1;
76f35538 2625
5c8a00ce
PB
2626 mr = address_space_translate(&address_space_memory,
2627 phys_addr, &phys_addr, &l, false);
76f35538 2628
5c8a00ce
PB
2629 return !(memory_region_is_ram(mr) ||
2630 memory_region_is_romd(mr));
76f35538 2631}
bd2fa51f
MH
2632
2633void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2634{
2635 RAMBlock *block;
2636
2637 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2638 func(block->host, block->offset, block->length, opaque);
2639 }
2640}
ec3f8c99 2641#endif