]> git.proxmox.com Git - qemu.git/blame - exec.c
Merge remote-tracking branch 'kraxel/usb.91' into staging
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
2ff3de68 34#include "sysemu/sysemu.h"
0d09e41a 35#include "hw/xen/xen.h"
1de7afc9
PB
36#include "qemu/timer.h"
37#include "qemu/config-file.h"
022c62cb 38#include "exec/memory.h"
9c17d615 39#include "sysemu/dma.h"
022c62cb 40#include "exec/address-spaces.h"
53a5960a
PB
41#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
432d268c 43#else /* !CONFIG_USER_ONLY */
9c17d615 44#include "sysemu/xen-mapcache.h"
6506e4f9 45#include "trace.h"
53a5960a 46#endif
0d6d3c87 47#include "exec/cpu-all.h"
54936004 48
022c62cb 49#include "exec/cputlb.h"
5b6dd868 50#include "translate-all.h"
0cac1b66 51
022c62cb 52#include "exec/memory-internal.h"
67d95c15 53
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
e2eef170 56#if !defined(CONFIG_USER_ONLY)
74576198 57static int in_migration;
94a6b54f 58
a3161038 59RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
60
61static MemoryRegion *system_memory;
309cb471 62static MemoryRegion *system_io;
62152b8a 63
f6790af6
AK
64AddressSpace address_space_io;
65AddressSpace address_space_memory;
2673a5da 66
0844e007 67MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 68static MemoryRegion io_mem_unassigned;
0e0df1e2 69
e2eef170 70#endif
9fa3e853 71
bdc44640 72struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
73/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
4917cf44 75DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 76/* 0 = Do not count executed instructions.
bf20dc07 77 1 = Precise instruction counting.
2e70f6ef 78 2 = Adaptive rate instruction counting. */
5708fc66 79int use_icount;
6a00d601 80
e2eef170 81#if !defined(CONFIG_USER_ONLY)
4346ae3e 82
1db8abb1
PB
83typedef struct PhysPageEntry PhysPageEntry;
84
85struct PhysPageEntry {
86 uint16_t is_leaf : 1;
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
88 uint16_t ptr : 15;
89};
90
0475d94f
PB
91typedef PhysPageEntry Node[L2_SIZE];
92
1db8abb1
PB
93struct AddressSpaceDispatch {
94 /* This is a multi-level map on the physical address space.
95 * The bottom level has pointers to MemoryRegionSections.
96 */
97 PhysPageEntry phys_map;
0475d94f
PB
98 Node *nodes;
99 MemoryRegionSection *sections;
acc9d80b 100 AddressSpace *as;
1db8abb1
PB
101};
102
90260c6c
JK
103#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
104typedef struct subpage_t {
105 MemoryRegion iomem;
acc9d80b 106 AddressSpace *as;
90260c6c
JK
107 hwaddr base;
108 uint16_t sub_section[TARGET_PAGE_SIZE];
109} subpage_t;
110
b41aac4f
LPF
111#define PHYS_SECTION_UNASSIGNED 0
112#define PHYS_SECTION_NOTDIRTY 1
113#define PHYS_SECTION_ROM 2
114#define PHYS_SECTION_WATCH 3
5312bd8b 115
9affd6fc
PB
116typedef struct PhysPageMap {
117 unsigned sections_nb;
118 unsigned sections_nb_alloc;
119 unsigned nodes_nb;
120 unsigned nodes_nb_alloc;
121 Node *nodes;
122 MemoryRegionSection *sections;
123} PhysPageMap;
124
6092666e 125static PhysPageMap *prev_map;
9affd6fc 126static PhysPageMap next_map;
d6f2ea22 127
07f07b31 128#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 129
e2eef170 130static void io_mem_init(void);
62152b8a 131static void memory_map_init(void);
e2eef170 132
1ec9b909 133static MemoryRegion io_mem_watch;
6658ffb8 134#endif
fd6ce8f6 135
6d9a1304 136#if !defined(CONFIG_USER_ONLY)
d6f2ea22 137
f7bf5461 138static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 139{
9affd6fc
PB
140 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
141 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
142 16);
143 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
144 next_map.nodes_nb + nodes);
145 next_map.nodes = g_renew(Node, next_map.nodes,
146 next_map.nodes_nb_alloc);
d6f2ea22 147 }
f7bf5461
AK
148}
149
150static uint16_t phys_map_node_alloc(void)
151{
152 unsigned i;
153 uint16_t ret;
154
9affd6fc 155 ret = next_map.nodes_nb++;
f7bf5461 156 assert(ret != PHYS_MAP_NODE_NIL);
9affd6fc 157 assert(ret != next_map.nodes_nb_alloc);
d6f2ea22 158 for (i = 0; i < L2_SIZE; ++i) {
9affd6fc
PB
159 next_map.nodes[ret][i].is_leaf = 0;
160 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 161 }
f7bf5461 162 return ret;
d6f2ea22
AK
163}
164
a8170e5e
AK
165static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
166 hwaddr *nb, uint16_t leaf,
2999097b 167 int level)
f7bf5461
AK
168{
169 PhysPageEntry *p;
170 int i;
a8170e5e 171 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 172
07f07b31 173 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800 174 lp->ptr = phys_map_node_alloc();
9affd6fc 175 p = next_map.nodes[lp->ptr];
f7bf5461
AK
176 if (level == 0) {
177 for (i = 0; i < L2_SIZE; i++) {
07f07b31 178 p[i].is_leaf = 1;
b41aac4f 179 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 180 }
67c4d23c 181 }
f7bf5461 182 } else {
9affd6fc 183 p = next_map.nodes[lp->ptr];
92e873b9 184 }
2999097b 185 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 186
2999097b 187 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
188 if ((*index & (step - 1)) == 0 && *nb >= step) {
189 lp->is_leaf = true;
c19e8800 190 lp->ptr = leaf;
07f07b31
AK
191 *index += step;
192 *nb -= step;
2999097b
AK
193 } else {
194 phys_page_set_level(lp, index, nb, leaf, level - 1);
195 }
196 ++lp;
f7bf5461
AK
197 }
198}
199
ac1970fb 200static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 201 hwaddr index, hwaddr nb,
2999097b 202 uint16_t leaf)
f7bf5461 203{
2999097b 204 /* Wildly overreserve - it doesn't matter much. */
07f07b31 205 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 206
ac1970fb 207 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
208}
209
9affd6fc
PB
210static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
211 Node *nodes, MemoryRegionSection *sections)
92e873b9 212{
31ab2b4a
AK
213 PhysPageEntry *p;
214 int i;
f1f6e3b8 215
07f07b31 216 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 217 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 218 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 219 }
9affd6fc 220 p = nodes[lp.ptr];
31ab2b4a 221 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 222 }
9affd6fc 223 return &sections[lp.ptr];
f3705d53
AK
224}
225
e5548617
BS
226bool memory_region_is_unassigned(MemoryRegion *mr)
227{
2a8e7499 228 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 229 && mr != &io_mem_watch;
fd6ce8f6 230}
149f54b5 231
c7086b4a 232static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
233 hwaddr addr,
234 bool resolve_subpage)
9f029603 235{
90260c6c
JK
236 MemoryRegionSection *section;
237 subpage_t *subpage;
238
0475d94f
PB
239 section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
240 d->nodes, d->sections);
90260c6c
JK
241 if (resolve_subpage && section->mr->subpage) {
242 subpage = container_of(section->mr, subpage_t, iomem);
0475d94f 243 section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
244 }
245 return section;
9f029603
JK
246}
247
90260c6c 248static MemoryRegionSection *
c7086b4a 249address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 250 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
251{
252 MemoryRegionSection *section;
253 Int128 diff;
254
c7086b4a 255 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
256 /* Compute offset within MemoryRegionSection */
257 addr -= section->offset_within_address_space;
258
259 /* Compute offset within MemoryRegion */
260 *xlat = addr + section->offset_within_region;
261
262 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 263 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
264 return section;
265}
90260c6c 266
5c8a00ce
PB
267MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
268 hwaddr *xlat, hwaddr *plen,
269 bool is_write)
90260c6c 270{
30951157
AK
271 IOMMUTLBEntry iotlb;
272 MemoryRegionSection *section;
273 MemoryRegion *mr;
274 hwaddr len = *plen;
275
276 for (;;) {
c7086b4a 277 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
278 mr = section->mr;
279
280 if (!mr->iommu_ops) {
281 break;
282 }
283
284 iotlb = mr->iommu_ops->translate(mr, addr);
285 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
286 | (addr & iotlb.addr_mask));
287 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
288 if (!(iotlb.perm & (1 << is_write))) {
289 mr = &io_mem_unassigned;
290 break;
291 }
292
293 as = iotlb.target_as;
294 }
295
296 *plen = len;
297 *xlat = addr;
298 return mr;
90260c6c
JK
299}
300
301MemoryRegionSection *
302address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
303 hwaddr *plen)
304{
30951157 305 MemoryRegionSection *section;
c7086b4a 306 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
307
308 assert(!section->mr->iommu_ops);
309 return section;
90260c6c 310}
5b6dd868 311#endif
fd6ce8f6 312
5b6dd868 313void cpu_exec_init_all(void)
fdbb84d1 314{
5b6dd868 315#if !defined(CONFIG_USER_ONLY)
b2a8658e 316 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
317 memory_map_init();
318 io_mem_init();
fdbb84d1 319#endif
5b6dd868 320}
fdbb84d1 321
b170fce3 322#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
323
324static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 325{
259186a7 326 CPUState *cpu = opaque;
a513fe19 327
5b6dd868
BS
328 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
329 version_id is increased. */
259186a7
AF
330 cpu->interrupt_request &= ~0x01;
331 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
332
333 return 0;
a513fe19 334}
7501267e 335
1a1562f5 336const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
337 .name = "cpu_common",
338 .version_id = 1,
339 .minimum_version_id = 1,
340 .minimum_version_id_old = 1,
341 .post_load = cpu_common_post_load,
342 .fields = (VMStateField []) {
259186a7
AF
343 VMSTATE_UINT32(halted, CPUState),
344 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
345 VMSTATE_END_OF_LIST()
346 }
347};
1a1562f5 348
5b6dd868 349#endif
ea041c0e 350
38d8f5c8 351CPUState *qemu_get_cpu(int index)
ea041c0e 352{
bdc44640 353 CPUState *cpu;
ea041c0e 354
bdc44640 355 CPU_FOREACH(cpu) {
55e5c285 356 if (cpu->cpu_index == index) {
bdc44640 357 return cpu;
55e5c285 358 }
ea041c0e 359 }
5b6dd868 360
bdc44640 361 return NULL;
ea041c0e
FB
362}
363
5b6dd868 364void cpu_exec_init(CPUArchState *env)
ea041c0e 365{
5b6dd868 366 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 367 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 368 CPUState *some_cpu;
5b6dd868
BS
369 int cpu_index;
370
371#if defined(CONFIG_USER_ONLY)
372 cpu_list_lock();
373#endif
5b6dd868 374 cpu_index = 0;
bdc44640 375 CPU_FOREACH(some_cpu) {
5b6dd868
BS
376 cpu_index++;
377 }
55e5c285 378 cpu->cpu_index = cpu_index;
1b1ed8dc 379 cpu->numa_node = 0;
5b6dd868
BS
380 QTAILQ_INIT(&env->breakpoints);
381 QTAILQ_INIT(&env->watchpoints);
382#ifndef CONFIG_USER_ONLY
383 cpu->thread_id = qemu_get_thread_id();
384#endif
bdc44640 385 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
386#if defined(CONFIG_USER_ONLY)
387 cpu_list_unlock();
388#endif
e0d47944
AF
389 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
390 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
391 }
5b6dd868 392#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
393 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
394 cpu_save, cpu_load, env);
b170fce3 395 assert(cc->vmsd == NULL);
e0d47944 396 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 397#endif
b170fce3
AF
398 if (cc->vmsd != NULL) {
399 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
400 }
ea041c0e
FB
401}
402
1fddef4b 403#if defined(TARGET_HAS_ICE)
94df27fd 404#if defined(CONFIG_USER_ONLY)
00b941e5 405static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
406{
407 tb_invalidate_phys_page_range(pc, pc + 1, 0);
408}
409#else
00b941e5 410static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 411{
00b941e5 412 tb_invalidate_phys_addr(cpu_get_phys_page_debug(cpu, pc) |
9d70c4b7 413 (pc & ~TARGET_PAGE_MASK));
1e7855a5 414}
c27004ec 415#endif
94df27fd 416#endif /* TARGET_HAS_ICE */
d720b93d 417
c527ee8f 418#if defined(CONFIG_USER_ONLY)
9349b4f9 419void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
420
421{
422}
423
9349b4f9 424int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
425 int flags, CPUWatchpoint **watchpoint)
426{
427 return -ENOSYS;
428}
429#else
6658ffb8 430/* Add a watchpoint. */
9349b4f9 431int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 432 int flags, CPUWatchpoint **watchpoint)
6658ffb8 433{
b4051334 434 target_ulong len_mask = ~(len - 1);
c0ce998e 435 CPUWatchpoint *wp;
6658ffb8 436
b4051334 437 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
438 if ((len & (len - 1)) || (addr & ~len_mask) ||
439 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
440 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
441 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
442 return -EINVAL;
443 }
7267c094 444 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
445
446 wp->vaddr = addr;
b4051334 447 wp->len_mask = len_mask;
a1d1bb31
AL
448 wp->flags = flags;
449
2dc9f411 450 /* keep all GDB-injected watchpoints in front */
c0ce998e 451 if (flags & BP_GDB)
72cf2d4f 452 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 453 else
72cf2d4f 454 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 455
6658ffb8 456 tlb_flush_page(env, addr);
a1d1bb31
AL
457
458 if (watchpoint)
459 *watchpoint = wp;
460 return 0;
6658ffb8
PB
461}
462
a1d1bb31 463/* Remove a specific watchpoint. */
9349b4f9 464int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 465 int flags)
6658ffb8 466{
b4051334 467 target_ulong len_mask = ~(len - 1);
a1d1bb31 468 CPUWatchpoint *wp;
6658ffb8 469
72cf2d4f 470 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 471 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 472 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 473 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
474 return 0;
475 }
476 }
a1d1bb31 477 return -ENOENT;
6658ffb8
PB
478}
479
a1d1bb31 480/* Remove a specific watchpoint by reference. */
9349b4f9 481void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 482{
72cf2d4f 483 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 484
a1d1bb31
AL
485 tlb_flush_page(env, watchpoint->vaddr);
486
7267c094 487 g_free(watchpoint);
a1d1bb31
AL
488}
489
490/* Remove all matching watchpoints. */
9349b4f9 491void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 492{
c0ce998e 493 CPUWatchpoint *wp, *next;
a1d1bb31 494
72cf2d4f 495 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
496 if (wp->flags & mask)
497 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 498 }
7d03f82f 499}
c527ee8f 500#endif
7d03f82f 501
a1d1bb31 502/* Add a breakpoint. */
9349b4f9 503int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 504 CPUBreakpoint **breakpoint)
4c3a88a2 505{
1fddef4b 506#if defined(TARGET_HAS_ICE)
c0ce998e 507 CPUBreakpoint *bp;
3b46e624 508
7267c094 509 bp = g_malloc(sizeof(*bp));
4c3a88a2 510
a1d1bb31
AL
511 bp->pc = pc;
512 bp->flags = flags;
513
2dc9f411 514 /* keep all GDB-injected breakpoints in front */
00b941e5 515 if (flags & BP_GDB) {
72cf2d4f 516 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
00b941e5 517 } else {
72cf2d4f 518 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
00b941e5 519 }
3b46e624 520
00b941e5 521 breakpoint_invalidate(ENV_GET_CPU(env), pc);
a1d1bb31 522
00b941e5 523 if (breakpoint) {
a1d1bb31 524 *breakpoint = bp;
00b941e5 525 }
4c3a88a2
FB
526 return 0;
527#else
a1d1bb31 528 return -ENOSYS;
4c3a88a2
FB
529#endif
530}
531
a1d1bb31 532/* Remove a specific breakpoint. */
9349b4f9 533int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 534{
7d03f82f 535#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
536 CPUBreakpoint *bp;
537
72cf2d4f 538 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
539 if (bp->pc == pc && bp->flags == flags) {
540 cpu_breakpoint_remove_by_ref(env, bp);
541 return 0;
542 }
7d03f82f 543 }
a1d1bb31
AL
544 return -ENOENT;
545#else
546 return -ENOSYS;
7d03f82f
EI
547#endif
548}
549
a1d1bb31 550/* Remove a specific breakpoint by reference. */
9349b4f9 551void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 552{
1fddef4b 553#if defined(TARGET_HAS_ICE)
72cf2d4f 554 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 555
00b941e5 556 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
a1d1bb31 557
7267c094 558 g_free(breakpoint);
a1d1bb31
AL
559#endif
560}
561
562/* Remove all matching breakpoints. */
9349b4f9 563void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
564{
565#if defined(TARGET_HAS_ICE)
c0ce998e 566 CPUBreakpoint *bp, *next;
a1d1bb31 567
72cf2d4f 568 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
569 if (bp->flags & mask)
570 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 571 }
4c3a88a2
FB
572#endif
573}
574
c33a346e
FB
575/* enable or disable single step mode. EXCP_DEBUG is returned by the
576 CPU loop after each instruction */
3825b28f 577void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 578{
1fddef4b 579#if defined(TARGET_HAS_ICE)
ed2803da
AF
580 if (cpu->singlestep_enabled != enabled) {
581 cpu->singlestep_enabled = enabled;
582 if (kvm_enabled()) {
38e478ec 583 kvm_update_guest_debug(cpu, 0);
ed2803da 584 } else {
ccbb4d44 585 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 586 /* XXX: only flush what is necessary */
38e478ec 587 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
588 tb_flush(env);
589 }
c33a346e
FB
590 }
591#endif
592}
593
9349b4f9 594void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e 595{
878096ee 596 CPUState *cpu = ENV_GET_CPU(env);
7501267e 597 va_list ap;
493ae1f0 598 va_list ap2;
7501267e
FB
599
600 va_start(ap, fmt);
493ae1f0 601 va_copy(ap2, ap);
7501267e
FB
602 fprintf(stderr, "qemu: fatal: ");
603 vfprintf(stderr, fmt, ap);
604 fprintf(stderr, "\n");
878096ee 605 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
606 if (qemu_log_enabled()) {
607 qemu_log("qemu: fatal: ");
608 qemu_log_vprintf(fmt, ap2);
609 qemu_log("\n");
a0762859 610 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 611 qemu_log_flush();
93fcfe39 612 qemu_log_close();
924edcae 613 }
493ae1f0 614 va_end(ap2);
f9373291 615 va_end(ap);
fd052bf6
RV
616#if defined(CONFIG_USER_ONLY)
617 {
618 struct sigaction act;
619 sigfillset(&act.sa_mask);
620 act.sa_handler = SIG_DFL;
621 sigaction(SIGABRT, &act, NULL);
622 }
623#endif
7501267e
FB
624 abort();
625}
626
0124311e 627#if !defined(CONFIG_USER_ONLY)
041603fe
PB
628static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
629{
630 RAMBlock *block;
631
632 /* The list is protected by the iothread lock here. */
633 block = ram_list.mru_block;
634 if (block && addr - block->offset < block->length) {
635 goto found;
636 }
637 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
638 if (addr - block->offset < block->length) {
639 goto found;
640 }
641 }
642
643 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
644 abort();
645
646found:
647 ram_list.mru_block = block;
648 return block;
649}
650
d24981d3
JQ
651static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
652 uintptr_t length)
653{
041603fe
PB
654 RAMBlock *block;
655 ram_addr_t start1;
d24981d3 656
041603fe
PB
657 block = qemu_get_ram_block(start);
658 assert(block == qemu_get_ram_block(end - 1));
659 start1 = (uintptr_t)block->host + (start - block->offset);
660 cpu_tlb_reset_dirty_all(start1, length);
d24981d3
JQ
661}
662
5579c7f3 663/* Note: start and end must be within the same ram block. */
c227f099 664void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 665 int dirty_flags)
1ccde1cb 666{
d24981d3 667 uintptr_t length;
1ccde1cb
FB
668
669 start &= TARGET_PAGE_MASK;
670 end = TARGET_PAGE_ALIGN(end);
671
672 length = end - start;
673 if (length == 0)
674 return;
f7c11b53 675 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 676
d24981d3
JQ
677 if (tcg_enabled()) {
678 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 679 }
1ccde1cb
FB
680}
681
8b9c99d9 682static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 683{
f6f3fbca 684 int ret = 0;
74576198 685 in_migration = enable;
f6f3fbca 686 return ret;
74576198
AL
687}
688
a8170e5e 689hwaddr memory_region_section_get_iotlb(CPUArchState *env,
149f54b5
PB
690 MemoryRegionSection *section,
691 target_ulong vaddr,
692 hwaddr paddr, hwaddr xlat,
693 int prot,
694 target_ulong *address)
e5548617 695{
a8170e5e 696 hwaddr iotlb;
e5548617
BS
697 CPUWatchpoint *wp;
698
cc5bea60 699 if (memory_region_is_ram(section->mr)) {
e5548617
BS
700 /* Normal RAM. */
701 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 702 + xlat;
e5548617 703 if (!section->readonly) {
b41aac4f 704 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 705 } else {
b41aac4f 706 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
707 }
708 } else {
0475d94f 709 iotlb = section - address_space_memory.dispatch->sections;
149f54b5 710 iotlb += xlat;
e5548617
BS
711 }
712
713 /* Make accesses to pages with watchpoints go via the
714 watchpoint trap routines. */
715 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
716 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
717 /* Avoid trapping reads of pages with a write breakpoint. */
718 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 719 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
720 *address |= TLB_MMIO;
721 break;
722 }
723 }
724 }
725
726 return iotlb;
727}
9fa3e853
FB
728#endif /* defined(CONFIG_USER_ONLY) */
729
e2eef170 730#if !defined(CONFIG_USER_ONLY)
8da3ff18 731
c227f099 732static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 733 uint16_t section);
acc9d80b 734static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 735
575ddeb4 736static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
91138037
MA
737
738/*
739 * Set a custom physical guest memory alloator.
740 * Accelerators with unusual needs may need this. Hopefully, we can
741 * get rid of it eventually.
742 */
575ddeb4 743void phys_mem_set_alloc(void *(*alloc)(size_t))
91138037
MA
744{
745 phys_mem_alloc = alloc;
746}
747
5312bd8b
AK
748static uint16_t phys_section_add(MemoryRegionSection *section)
749{
68f3f65b
PB
750 /* The physical section number is ORed with a page-aligned
751 * pointer to produce the iotlb entries. Thus it should
752 * never overflow into the page-aligned value.
753 */
9affd6fc 754 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
68f3f65b 755
9affd6fc
PB
756 if (next_map.sections_nb == next_map.sections_nb_alloc) {
757 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
758 16);
759 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
760 next_map.sections_nb_alloc);
5312bd8b 761 }
9affd6fc 762 next_map.sections[next_map.sections_nb] = *section;
dfde4e6e 763 memory_region_ref(section->mr);
9affd6fc 764 return next_map.sections_nb++;
5312bd8b
AK
765}
766
058bc4b5
PB
767static void phys_section_destroy(MemoryRegion *mr)
768{
dfde4e6e
PB
769 memory_region_unref(mr);
770
058bc4b5
PB
771 if (mr->subpage) {
772 subpage_t *subpage = container_of(mr, subpage_t, iomem);
773 memory_region_destroy(&subpage->iomem);
774 g_free(subpage);
775 }
776}
777
6092666e 778static void phys_sections_free(PhysPageMap *map)
5312bd8b 779{
9affd6fc
PB
780 while (map->sections_nb > 0) {
781 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
782 phys_section_destroy(section->mr);
783 }
9affd6fc
PB
784 g_free(map->sections);
785 g_free(map->nodes);
6092666e 786 g_free(map);
5312bd8b
AK
787}
788
ac1970fb 789static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
790{
791 subpage_t *subpage;
a8170e5e 792 hwaddr base = section->offset_within_address_space
0f0cb164 793 & TARGET_PAGE_MASK;
9affd6fc
PB
794 MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
795 next_map.nodes, next_map.sections);
0f0cb164
AK
796 MemoryRegionSection subsection = {
797 .offset_within_address_space = base,
052e87b0 798 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 799 };
a8170e5e 800 hwaddr start, end;
0f0cb164 801
f3705d53 802 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 803
f3705d53 804 if (!(existing->mr->subpage)) {
acc9d80b 805 subpage = subpage_init(d->as, base);
0f0cb164 806 subsection.mr = &subpage->iomem;
ac1970fb 807 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 808 phys_section_add(&subsection));
0f0cb164 809 } else {
f3705d53 810 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
811 }
812 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 813 end = start + int128_get64(section->size) - 1;
0f0cb164
AK
814 subpage_register(subpage, start, end, phys_section_add(section));
815}
816
817
052e87b0
PB
818static void register_multipage(AddressSpaceDispatch *d,
819 MemoryRegionSection *section)
33417e70 820{
a8170e5e 821 hwaddr start_addr = section->offset_within_address_space;
5312bd8b 822 uint16_t section_index = phys_section_add(section);
052e87b0
PB
823 uint64_t num_pages = int128_get64(int128_rshift(section->size,
824 TARGET_PAGE_BITS));
dd81124b 825
733d5ef5
PB
826 assert(num_pages);
827 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
828}
829
ac1970fb 830static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 831{
89ae337a 832 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 833 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 834 MemoryRegionSection now = *section, remain = *section;
052e87b0 835 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 836
733d5ef5
PB
837 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
838 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
839 - now.offset_within_address_space;
840
052e87b0 841 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 842 register_subpage(d, &now);
733d5ef5 843 } else {
052e87b0 844 now.size = int128_zero();
733d5ef5 845 }
052e87b0
PB
846 while (int128_ne(remain.size, now.size)) {
847 remain.size = int128_sub(remain.size, now.size);
848 remain.offset_within_address_space += int128_get64(now.size);
849 remain.offset_within_region += int128_get64(now.size);
69b67646 850 now = remain;
052e87b0 851 if (int128_lt(remain.size, page_size)) {
733d5ef5 852 register_subpage(d, &now);
88266249 853 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 854 now.size = page_size;
ac1970fb 855 register_subpage(d, &now);
69b67646 856 } else {
052e87b0 857 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 858 register_multipage(d, &now);
69b67646 859 }
0f0cb164
AK
860 }
861}
862
62a2744c
SY
863void qemu_flush_coalesced_mmio_buffer(void)
864{
865 if (kvm_enabled())
866 kvm_flush_coalesced_mmio_buffer();
867}
868
b2a8658e
UD
869void qemu_mutex_lock_ramlist(void)
870{
871 qemu_mutex_lock(&ram_list.mutex);
872}
873
874void qemu_mutex_unlock_ramlist(void)
875{
876 qemu_mutex_unlock(&ram_list.mutex);
877}
878
e1e84ba0 879#ifdef __linux__
c902760f
MT
880
881#include <sys/vfs.h>
882
883#define HUGETLBFS_MAGIC 0x958458f6
884
885static long gethugepagesize(const char *path)
886{
887 struct statfs fs;
888 int ret;
889
890 do {
9742bf26 891 ret = statfs(path, &fs);
c902760f
MT
892 } while (ret != 0 && errno == EINTR);
893
894 if (ret != 0) {
9742bf26
YT
895 perror(path);
896 return 0;
c902760f
MT
897 }
898
899 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 900 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
901
902 return fs.f_bsize;
903}
904
04b16653
AW
905static void *file_ram_alloc(RAMBlock *block,
906 ram_addr_t memory,
907 const char *path)
c902760f
MT
908{
909 char *filename;
8ca761f6
PF
910 char *sanitized_name;
911 char *c;
c902760f
MT
912 void *area;
913 int fd;
914#ifdef MAP_POPULATE
915 int flags;
916#endif
917 unsigned long hpagesize;
918
919 hpagesize = gethugepagesize(path);
920 if (!hpagesize) {
9742bf26 921 return NULL;
c902760f
MT
922 }
923
924 if (memory < hpagesize) {
925 return NULL;
926 }
927
928 if (kvm_enabled() && !kvm_has_sync_mmu()) {
929 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
930 return NULL;
931 }
932
8ca761f6
PF
933 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
934 sanitized_name = g_strdup(block->mr->name);
935 for (c = sanitized_name; *c != '\0'; c++) {
936 if (*c == '/')
937 *c = '_';
938 }
939
940 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
941 sanitized_name);
942 g_free(sanitized_name);
c902760f
MT
943
944 fd = mkstemp(filename);
945 if (fd < 0) {
9742bf26 946 perror("unable to create backing store for hugepages");
e4ada482 947 g_free(filename);
9742bf26 948 return NULL;
c902760f
MT
949 }
950 unlink(filename);
e4ada482 951 g_free(filename);
c902760f
MT
952
953 memory = (memory+hpagesize-1) & ~(hpagesize-1);
954
955 /*
956 * ftruncate is not supported by hugetlbfs in older
957 * hosts, so don't bother bailing out on errors.
958 * If anything goes wrong with it under other filesystems,
959 * mmap will fail.
960 */
961 if (ftruncate(fd, memory))
9742bf26 962 perror("ftruncate");
c902760f
MT
963
964#ifdef MAP_POPULATE
965 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
966 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
967 * to sidestep this quirk.
968 */
969 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
970 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
971#else
972 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
973#endif
974 if (area == MAP_FAILED) {
9742bf26
YT
975 perror("file_ram_alloc: can't mmap RAM pages");
976 close(fd);
977 return (NULL);
c902760f 978 }
04b16653 979 block->fd = fd;
c902760f
MT
980 return area;
981}
e1e84ba0
MA
982#else
983static void *file_ram_alloc(RAMBlock *block,
984 ram_addr_t memory,
985 const char *path)
986{
987 fprintf(stderr, "-mem-path not supported on this host\n");
988 exit(1);
989}
c902760f
MT
990#endif
991
d17b5288 992static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
993{
994 RAMBlock *block, *next_block;
3e837b2c 995 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 996
49cd9ac6
SH
997 assert(size != 0); /* it would hand out same offset multiple times */
998
a3161038 999 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1000 return 0;
1001
a3161038 1002 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1003 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1004
1005 end = block->offset + block->length;
1006
a3161038 1007 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1008 if (next_block->offset >= end) {
1009 next = MIN(next, next_block->offset);
1010 }
1011 }
1012 if (next - end >= size && next - end < mingap) {
3e837b2c 1013 offset = end;
04b16653
AW
1014 mingap = next - end;
1015 }
1016 }
3e837b2c
AW
1017
1018 if (offset == RAM_ADDR_MAX) {
1019 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1020 (uint64_t)size);
1021 abort();
1022 }
1023
04b16653
AW
1024 return offset;
1025}
1026
652d7ec2 1027ram_addr_t last_ram_offset(void)
d17b5288
AW
1028{
1029 RAMBlock *block;
1030 ram_addr_t last = 0;
1031
a3161038 1032 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1033 last = MAX(last, block->offset + block->length);
1034
1035 return last;
1036}
1037
ddb97f1d
JB
1038static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1039{
1040 int ret;
ddb97f1d
JB
1041
1042 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1043 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1044 "dump-guest-core", true)) {
ddb97f1d
JB
1045 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1046 if (ret) {
1047 perror("qemu_madvise");
1048 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1049 "but dump_guest_core=off specified\n");
1050 }
1051 }
1052}
1053
c5705a77 1054void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
1055{
1056 RAMBlock *new_block, *block;
1057
c5705a77 1058 new_block = NULL;
a3161038 1059 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
1060 if (block->offset == addr) {
1061 new_block = block;
1062 break;
1063 }
1064 }
1065 assert(new_block);
1066 assert(!new_block->idstr[0]);
84b89d78 1067
09e5ab63
AL
1068 if (dev) {
1069 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1070 if (id) {
1071 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1072 g_free(id);
84b89d78
CM
1073 }
1074 }
1075 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1076
b2a8658e
UD
1077 /* This assumes the iothread lock is taken here too. */
1078 qemu_mutex_lock_ramlist();
a3161038 1079 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1080 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1081 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1082 new_block->idstr);
1083 abort();
1084 }
1085 }
b2a8658e 1086 qemu_mutex_unlock_ramlist();
c5705a77
AK
1087}
1088
8490fc78
LC
1089static int memory_try_enable_merging(void *addr, size_t len)
1090{
2ff3de68 1091 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1092 /* disabled by the user */
1093 return 0;
1094 }
1095
1096 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1097}
1098
c5705a77
AK
1099ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1100 MemoryRegion *mr)
1101{
abb26d63 1102 RAMBlock *block, *new_block;
c5705a77
AK
1103
1104 size = TARGET_PAGE_ALIGN(size);
1105 new_block = g_malloc0(sizeof(*new_block));
3435f395 1106 new_block->fd = -1;
84b89d78 1107
b2a8658e
UD
1108 /* This assumes the iothread lock is taken here too. */
1109 qemu_mutex_lock_ramlist();
7c637366 1110 new_block->mr = mr;
432d268c 1111 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1112 if (host) {
1113 new_block->host = host;
cd19cfa2 1114 new_block->flags |= RAM_PREALLOC_MASK;
dfeaf2ab
MA
1115 } else if (xen_enabled()) {
1116 if (mem_path) {
1117 fprintf(stderr, "-mem-path not supported with Xen\n");
1118 exit(1);
1119 }
1120 xen_ram_alloc(new_block->offset, size, mr);
6977dfe6
YT
1121 } else {
1122 if (mem_path) {
e1e84ba0
MA
1123 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1124 /*
1125 * file_ram_alloc() needs to allocate just like
1126 * phys_mem_alloc, but we haven't bothered to provide
1127 * a hook there.
1128 */
1129 fprintf(stderr,
1130 "-mem-path not supported with this accelerator\n");
1131 exit(1);
1132 }
6977dfe6 1133 new_block->host = file_ram_alloc(new_block, size, mem_path);
0628c182
MA
1134 }
1135 if (!new_block->host) {
91138037 1136 new_block->host = phys_mem_alloc(size);
39228250
MA
1137 if (!new_block->host) {
1138 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1139 new_block->mr->name, strerror(errno));
1140 exit(1);
1141 }
8490fc78 1142 memory_try_enable_merging(new_block->host, size);
6977dfe6 1143 }
c902760f 1144 }
94a6b54f
PB
1145 new_block->length = size;
1146
abb26d63
PB
1147 /* Keep the list sorted from biggest to smallest block. */
1148 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1149 if (block->length < new_block->length) {
1150 break;
1151 }
1152 }
1153 if (block) {
1154 QTAILQ_INSERT_BEFORE(block, new_block, next);
1155 } else {
1156 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1157 }
0d6d3c87 1158 ram_list.mru_block = NULL;
94a6b54f 1159
f798b07f 1160 ram_list.version++;
b2a8658e 1161 qemu_mutex_unlock_ramlist();
f798b07f 1162
7267c094 1163 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1164 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1165 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1166 0, size >> TARGET_PAGE_BITS);
1720aeee 1167 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1168
ddb97f1d 1169 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1170 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
3e469dbf 1171 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
ddb97f1d 1172
6f0437e8
JK
1173 if (kvm_enabled())
1174 kvm_setup_guest_memory(new_block->host, size);
1175
94a6b54f
PB
1176 return new_block->offset;
1177}
e9a1ab19 1178
c5705a77 1179ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1180{
c5705a77 1181 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1182}
1183
1f2e98b6
AW
1184void qemu_ram_free_from_ptr(ram_addr_t addr)
1185{
1186 RAMBlock *block;
1187
b2a8658e
UD
1188 /* This assumes the iothread lock is taken here too. */
1189 qemu_mutex_lock_ramlist();
a3161038 1190 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1191 if (addr == block->offset) {
a3161038 1192 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1193 ram_list.mru_block = NULL;
f798b07f 1194 ram_list.version++;
7267c094 1195 g_free(block);
b2a8658e 1196 break;
1f2e98b6
AW
1197 }
1198 }
b2a8658e 1199 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1200}
1201
c227f099 1202void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1203{
04b16653
AW
1204 RAMBlock *block;
1205
b2a8658e
UD
1206 /* This assumes the iothread lock is taken here too. */
1207 qemu_mutex_lock_ramlist();
a3161038 1208 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1209 if (addr == block->offset) {
a3161038 1210 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1211 ram_list.mru_block = NULL;
f798b07f 1212 ram_list.version++;
cd19cfa2
HY
1213 if (block->flags & RAM_PREALLOC_MASK) {
1214 ;
dfeaf2ab
MA
1215 } else if (xen_enabled()) {
1216 xen_invalidate_map_cache_entry(block->host);
089f3f76 1217#ifndef _WIN32
3435f395
MA
1218 } else if (block->fd >= 0) {
1219 munmap(block->host, block->length);
1220 close(block->fd);
089f3f76 1221#endif
04b16653 1222 } else {
dfeaf2ab 1223 qemu_anon_ram_free(block->host, block->length);
04b16653 1224 }
7267c094 1225 g_free(block);
b2a8658e 1226 break;
04b16653
AW
1227 }
1228 }
b2a8658e 1229 qemu_mutex_unlock_ramlist();
04b16653 1230
e9a1ab19
FB
1231}
1232
cd19cfa2
HY
1233#ifndef _WIN32
1234void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1235{
1236 RAMBlock *block;
1237 ram_addr_t offset;
1238 int flags;
1239 void *area, *vaddr;
1240
a3161038 1241 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1242 offset = addr - block->offset;
1243 if (offset < block->length) {
1244 vaddr = block->host + offset;
1245 if (block->flags & RAM_PREALLOC_MASK) {
1246 ;
dfeaf2ab
MA
1247 } else if (xen_enabled()) {
1248 abort();
cd19cfa2
HY
1249 } else {
1250 flags = MAP_FIXED;
1251 munmap(vaddr, length);
3435f395 1252 if (block->fd >= 0) {
cd19cfa2 1253#ifdef MAP_POPULATE
3435f395
MA
1254 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1255 MAP_PRIVATE;
fd28aa13 1256#else
3435f395 1257 flags |= MAP_PRIVATE;
cd19cfa2 1258#endif
3435f395
MA
1259 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1260 flags, block->fd, offset);
cd19cfa2 1261 } else {
2eb9fbaa
MA
1262 /*
1263 * Remap needs to match alloc. Accelerators that
1264 * set phys_mem_alloc never remap. If they did,
1265 * we'd need a remap hook here.
1266 */
1267 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1268
cd19cfa2
HY
1269 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1270 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1271 flags, -1, 0);
cd19cfa2
HY
1272 }
1273 if (area != vaddr) {
f15fbc4b
AP
1274 fprintf(stderr, "Could not remap addr: "
1275 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1276 length, addr);
1277 exit(1);
1278 }
8490fc78 1279 memory_try_enable_merging(vaddr, length);
ddb97f1d 1280 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1281 }
1282 return;
1283 }
1284 }
1285}
1286#endif /* !_WIN32 */
1287
1b5ec234
PB
1288/* Return a host pointer to ram allocated with qemu_ram_alloc.
1289 With the exception of the softmmu code in this file, this should
1290 only be used for local memory (e.g. video ram) that the device owns,
1291 and knows it isn't going to access beyond the end of the block.
1292
1293 It should not be used for general purpose DMA.
1294 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1295 */
1296void *qemu_get_ram_ptr(ram_addr_t addr)
1297{
1298 RAMBlock *block = qemu_get_ram_block(addr);
1299
0d6d3c87
PB
1300 if (xen_enabled()) {
1301 /* We need to check if the requested address is in the RAM
1302 * because we don't want to map the entire memory in QEMU.
1303 * In that case just map until the end of the page.
1304 */
1305 if (block->offset == 0) {
1306 return xen_map_cache(addr, 0, 0);
1307 } else if (block->host == NULL) {
1308 block->host =
1309 xen_map_cache(block->offset, block->length, 1);
1310 }
1311 }
1312 return block->host + (addr - block->offset);
dc828ca1
PB
1313}
1314
38bee5dc
SS
1315/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1316 * but takes a size argument */
cb85f7ab 1317static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1318{
8ab934f9
SS
1319 if (*size == 0) {
1320 return NULL;
1321 }
868bb33f 1322 if (xen_enabled()) {
e41d7c69 1323 return xen_map_cache(addr, *size, 1);
868bb33f 1324 } else {
38bee5dc
SS
1325 RAMBlock *block;
1326
a3161038 1327 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1328 if (addr - block->offset < block->length) {
1329 if (addr - block->offset + *size > block->length)
1330 *size = block->length - addr + block->offset;
1331 return block->host + (addr - block->offset);
1332 }
1333 }
1334
1335 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1336 abort();
38bee5dc
SS
1337 }
1338}
1339
7443b437
PB
1340/* Some of the softmmu routines need to translate from a host pointer
1341 (typically a TLB entry) back to a ram offset. */
1b5ec234 1342MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1343{
94a6b54f
PB
1344 RAMBlock *block;
1345 uint8_t *host = ptr;
1346
868bb33f 1347 if (xen_enabled()) {
e41d7c69 1348 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1349 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1350 }
1351
23887b79
PB
1352 block = ram_list.mru_block;
1353 if (block && block->host && host - block->host < block->length) {
1354 goto found;
1355 }
1356
a3161038 1357 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1358 /* This case append when the block is not mapped. */
1359 if (block->host == NULL) {
1360 continue;
1361 }
f471a17e 1362 if (host - block->host < block->length) {
23887b79 1363 goto found;
f471a17e 1364 }
94a6b54f 1365 }
432d268c 1366
1b5ec234 1367 return NULL;
23887b79
PB
1368
1369found:
1370 *ram_addr = block->offset + (host - block->host);
1b5ec234 1371 return block->mr;
e890261f 1372}
f471a17e 1373
a8170e5e 1374static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1375 uint64_t val, unsigned size)
9fa3e853 1376{
3a7d929e 1377 int dirty_flags;
f7c11b53 1378 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1379 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
0e0df1e2 1380 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1381 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1382 }
0e0df1e2
AK
1383 switch (size) {
1384 case 1:
1385 stb_p(qemu_get_ram_ptr(ram_addr), val);
1386 break;
1387 case 2:
1388 stw_p(qemu_get_ram_ptr(ram_addr), val);
1389 break;
1390 case 4:
1391 stl_p(qemu_get_ram_ptr(ram_addr), val);
1392 break;
1393 default:
1394 abort();
3a7d929e 1395 }
f23db169 1396 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1397 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1398 /* we remove the notdirty callback only if the code has been
1399 flushed */
4917cf44
AF
1400 if (dirty_flags == 0xff) {
1401 CPUArchState *env = current_cpu->env_ptr;
1402 tlb_set_dirty(env, env->mem_io_vaddr);
1403 }
9fa3e853
FB
1404}
1405
b018ddf6
PB
1406static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1407 unsigned size, bool is_write)
1408{
1409 return is_write;
1410}
1411
0e0df1e2 1412static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1413 .write = notdirty_mem_write,
b018ddf6 1414 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1415 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1416};
1417
0f459d16 1418/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1419static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1420{
4917cf44 1421 CPUArchState *env = current_cpu->env_ptr;
06d55cc1 1422 target_ulong pc, cs_base;
0f459d16 1423 target_ulong vaddr;
a1d1bb31 1424 CPUWatchpoint *wp;
06d55cc1 1425 int cpu_flags;
0f459d16 1426
06d55cc1
AL
1427 if (env->watchpoint_hit) {
1428 /* We re-entered the check after replacing the TB. Now raise
1429 * the debug interrupt so that is will trigger after the
1430 * current instruction. */
c3affe56 1431 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1432 return;
1433 }
2e70f6ef 1434 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1435 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1436 if ((vaddr == (wp->vaddr & len_mask) ||
1437 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1438 wp->flags |= BP_WATCHPOINT_HIT;
1439 if (!env->watchpoint_hit) {
1440 env->watchpoint_hit = wp;
5a316526 1441 tb_check_watchpoint(env);
6e140f28
AL
1442 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1443 env->exception_index = EXCP_DEBUG;
488d6577 1444 cpu_loop_exit(env);
6e140f28
AL
1445 } else {
1446 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1447 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1448 cpu_resume_from_signal(env, NULL);
6e140f28 1449 }
06d55cc1 1450 }
6e140f28
AL
1451 } else {
1452 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1453 }
1454 }
1455}
1456
6658ffb8
PB
1457/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1458 so these check for a hit then pass through to the normal out-of-line
1459 phys routines. */
a8170e5e 1460static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1461 unsigned size)
6658ffb8 1462{
1ec9b909
AK
1463 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1464 switch (size) {
1465 case 1: return ldub_phys(addr);
1466 case 2: return lduw_phys(addr);
1467 case 4: return ldl_phys(addr);
1468 default: abort();
1469 }
6658ffb8
PB
1470}
1471
a8170e5e 1472static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1473 uint64_t val, unsigned size)
6658ffb8 1474{
1ec9b909
AK
1475 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1476 switch (size) {
67364150
MF
1477 case 1:
1478 stb_phys(addr, val);
1479 break;
1480 case 2:
1481 stw_phys(addr, val);
1482 break;
1483 case 4:
1484 stl_phys(addr, val);
1485 break;
1ec9b909
AK
1486 default: abort();
1487 }
6658ffb8
PB
1488}
1489
1ec9b909
AK
1490static const MemoryRegionOps watch_mem_ops = {
1491 .read = watch_mem_read,
1492 .write = watch_mem_write,
1493 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1494};
6658ffb8 1495
a8170e5e 1496static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1497 unsigned len)
db7b5426 1498{
acc9d80b
JK
1499 subpage_t *subpage = opaque;
1500 uint8_t buf[4];
791af8c8 1501
db7b5426 1502#if defined(DEBUG_SUBPAGE)
016e9d62 1503 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1504 subpage, len, addr);
db7b5426 1505#endif
acc9d80b
JK
1506 address_space_read(subpage->as, addr + subpage->base, buf, len);
1507 switch (len) {
1508 case 1:
1509 return ldub_p(buf);
1510 case 2:
1511 return lduw_p(buf);
1512 case 4:
1513 return ldl_p(buf);
1514 default:
1515 abort();
1516 }
db7b5426
BS
1517}
1518
a8170e5e 1519static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1520 uint64_t value, unsigned len)
db7b5426 1521{
acc9d80b
JK
1522 subpage_t *subpage = opaque;
1523 uint8_t buf[4];
1524
db7b5426 1525#if defined(DEBUG_SUBPAGE)
016e9d62 1526 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1527 " value %"PRIx64"\n",
1528 __func__, subpage, len, addr, value);
db7b5426 1529#endif
acc9d80b
JK
1530 switch (len) {
1531 case 1:
1532 stb_p(buf, value);
1533 break;
1534 case 2:
1535 stw_p(buf, value);
1536 break;
1537 case 4:
1538 stl_p(buf, value);
1539 break;
1540 default:
1541 abort();
1542 }
1543 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1544}
1545
c353e4cc 1546static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 1547 unsigned len, bool is_write)
c353e4cc 1548{
acc9d80b 1549 subpage_t *subpage = opaque;
c353e4cc 1550#if defined(DEBUG_SUBPAGE)
016e9d62 1551 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 1552 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1553#endif
1554
acc9d80b 1555 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 1556 len, is_write);
c353e4cc
PB
1557}
1558
70c68e44
AK
1559static const MemoryRegionOps subpage_ops = {
1560 .read = subpage_read,
1561 .write = subpage_write,
c353e4cc 1562 .valid.accepts = subpage_accepts,
70c68e44 1563 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1564};
1565
c227f099 1566static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1567 uint16_t section)
db7b5426
BS
1568{
1569 int idx, eidx;
1570
1571 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1572 return -1;
1573 idx = SUBPAGE_IDX(start);
1574 eidx = SUBPAGE_IDX(end);
1575#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1576 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1577 __func__, mmio, start, end, idx, eidx, section);
db7b5426 1578#endif
db7b5426 1579 for (; idx <= eidx; idx++) {
5312bd8b 1580 mmio->sub_section[idx] = section;
db7b5426
BS
1581 }
1582
1583 return 0;
1584}
1585
acc9d80b 1586static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1587{
c227f099 1588 subpage_t *mmio;
db7b5426 1589
7267c094 1590 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1591
acc9d80b 1592 mmio->as = as;
1eec614b 1593 mmio->base = base;
2c9b15ca 1594 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
70c68e44 1595 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1596 mmio->iomem.subpage = true;
db7b5426 1597#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1598 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1599 mmio, base, TARGET_PAGE_SIZE);
db7b5426 1600#endif
b41aac4f 1601 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1602
1603 return mmio;
1604}
1605
5312bd8b
AK
1606static uint16_t dummy_section(MemoryRegion *mr)
1607{
1608 MemoryRegionSection section = {
1609 .mr = mr,
1610 .offset_within_address_space = 0,
1611 .offset_within_region = 0,
052e87b0 1612 .size = int128_2_64(),
5312bd8b
AK
1613 };
1614
1615 return phys_section_add(&section);
1616}
1617
a8170e5e 1618MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1619{
0475d94f 1620 return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1621}
1622
e9179ce1
AK
1623static void io_mem_init(void)
1624{
2c9b15ca
PB
1625 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1626 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
0e0df1e2 1627 "unassigned", UINT64_MAX);
2c9b15ca 1628 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
0e0df1e2 1629 "notdirty", UINT64_MAX);
2c9b15ca 1630 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1ec9b909 1631 "watch", UINT64_MAX);
e9179ce1
AK
1632}
1633
ac1970fb 1634static void mem_begin(MemoryListener *listener)
00752703
PB
1635{
1636 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1637 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1638
1639 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1640 d->as = as;
1641 as->next_dispatch = d;
1642}
1643
1644static void mem_commit(MemoryListener *listener)
ac1970fb 1645{
89ae337a 1646 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
1647 AddressSpaceDispatch *cur = as->dispatch;
1648 AddressSpaceDispatch *next = as->next_dispatch;
1649
1650 next->nodes = next_map.nodes;
1651 next->sections = next_map.sections;
ac1970fb 1652
0475d94f
PB
1653 as->dispatch = next;
1654 g_free(cur);
ac1970fb
AK
1655}
1656
50c1e149
AK
1657static void core_begin(MemoryListener *listener)
1658{
b41aac4f
LPF
1659 uint16_t n;
1660
6092666e
PB
1661 prev_map = g_new(PhysPageMap, 1);
1662 *prev_map = next_map;
1663
9affd6fc 1664 memset(&next_map, 0, sizeof(next_map));
b41aac4f
LPF
1665 n = dummy_section(&io_mem_unassigned);
1666 assert(n == PHYS_SECTION_UNASSIGNED);
1667 n = dummy_section(&io_mem_notdirty);
1668 assert(n == PHYS_SECTION_NOTDIRTY);
1669 n = dummy_section(&io_mem_rom);
1670 assert(n == PHYS_SECTION_ROM);
1671 n = dummy_section(&io_mem_watch);
1672 assert(n == PHYS_SECTION_WATCH);
50c1e149
AK
1673}
1674
9affd6fc
PB
1675/* This listener's commit run after the other AddressSpaceDispatch listeners'.
1676 * All AddressSpaceDispatch instances have switched to the next map.
1677 */
1678static void core_commit(MemoryListener *listener)
1679{
6092666e 1680 phys_sections_free(prev_map);
9affd6fc
PB
1681}
1682
1d71148e 1683static void tcg_commit(MemoryListener *listener)
50c1e149 1684{
182735ef 1685 CPUState *cpu;
117712c3
AK
1686
1687 /* since each CPU stores ram addresses in its TLB cache, we must
1688 reset the modified entries */
1689 /* XXX: slow ! */
bdc44640 1690 CPU_FOREACH(cpu) {
182735ef
AF
1691 CPUArchState *env = cpu->env_ptr;
1692
117712c3
AK
1693 tlb_flush(env, 1);
1694 }
50c1e149
AK
1695}
1696
93632747
AK
1697static void core_log_global_start(MemoryListener *listener)
1698{
1699 cpu_physical_memory_set_dirty_tracking(1);
1700}
1701
1702static void core_log_global_stop(MemoryListener *listener)
1703{
1704 cpu_physical_memory_set_dirty_tracking(0);
1705}
1706
93632747 1707static MemoryListener core_memory_listener = {
50c1e149 1708 .begin = core_begin,
9affd6fc 1709 .commit = core_commit,
93632747
AK
1710 .log_global_start = core_log_global_start,
1711 .log_global_stop = core_log_global_stop,
ac1970fb 1712 .priority = 1,
93632747
AK
1713};
1714
1d71148e
AK
1715static MemoryListener tcg_memory_listener = {
1716 .commit = tcg_commit,
1717};
1718
ac1970fb
AK
1719void address_space_init_dispatch(AddressSpace *as)
1720{
00752703 1721 as->dispatch = NULL;
89ae337a 1722 as->dispatch_listener = (MemoryListener) {
ac1970fb 1723 .begin = mem_begin,
00752703 1724 .commit = mem_commit,
ac1970fb
AK
1725 .region_add = mem_add,
1726 .region_nop = mem_add,
1727 .priority = 0,
1728 };
89ae337a 1729 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
1730}
1731
83f3c251
AK
1732void address_space_destroy_dispatch(AddressSpace *as)
1733{
1734 AddressSpaceDispatch *d = as->dispatch;
1735
89ae337a 1736 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
1737 g_free(d);
1738 as->dispatch = NULL;
1739}
1740
62152b8a
AK
1741static void memory_map_init(void)
1742{
7267c094 1743 system_memory = g_malloc(sizeof(*system_memory));
2c9b15ca 1744 memory_region_init(system_memory, NULL, "system", INT64_MAX);
7dca8043 1745 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1746
7267c094 1747 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
1748 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1749 65536);
7dca8043 1750 address_space_init(&address_space_io, system_io, "I/O");
93632747 1751
f6790af6 1752 memory_listener_register(&core_memory_listener, &address_space_memory);
2641689a 1753 if (tcg_enabled()) {
1754 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1755 }
62152b8a
AK
1756}
1757
1758MemoryRegion *get_system_memory(void)
1759{
1760 return system_memory;
1761}
1762
309cb471
AK
1763MemoryRegion *get_system_io(void)
1764{
1765 return system_io;
1766}
1767
e2eef170
PB
1768#endif /* !defined(CONFIG_USER_ONLY) */
1769
13eb76e0
FB
1770/* physical memory access (slow version, mainly for debug) */
1771#if defined(CONFIG_USER_ONLY)
f17ec444 1772int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 1773 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1774{
1775 int l, flags;
1776 target_ulong page;
53a5960a 1777 void * p;
13eb76e0
FB
1778
1779 while (len > 0) {
1780 page = addr & TARGET_PAGE_MASK;
1781 l = (page + TARGET_PAGE_SIZE) - addr;
1782 if (l > len)
1783 l = len;
1784 flags = page_get_flags(page);
1785 if (!(flags & PAGE_VALID))
a68fe89c 1786 return -1;
13eb76e0
FB
1787 if (is_write) {
1788 if (!(flags & PAGE_WRITE))
a68fe89c 1789 return -1;
579a97f7 1790 /* XXX: this code should not depend on lock_user */
72fb7daa 1791 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1792 return -1;
72fb7daa
AJ
1793 memcpy(p, buf, l);
1794 unlock_user(p, addr, l);
13eb76e0
FB
1795 } else {
1796 if (!(flags & PAGE_READ))
a68fe89c 1797 return -1;
579a97f7 1798 /* XXX: this code should not depend on lock_user */
72fb7daa 1799 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1800 return -1;
72fb7daa 1801 memcpy(buf, p, l);
5b257578 1802 unlock_user(p, addr, 0);
13eb76e0
FB
1803 }
1804 len -= l;
1805 buf += l;
1806 addr += l;
1807 }
a68fe89c 1808 return 0;
13eb76e0 1809}
8df1cd07 1810
13eb76e0 1811#else
51d7a9eb 1812
a8170e5e
AK
1813static void invalidate_and_set_dirty(hwaddr addr,
1814 hwaddr length)
51d7a9eb
AP
1815{
1816 if (!cpu_physical_memory_is_dirty(addr)) {
1817 /* invalidate code */
1818 tb_invalidate_phys_page_range(addr, addr + length, 0);
1819 /* set dirty bit */
1820 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1821 }
e226939d 1822 xen_modified_memory(addr, length);
51d7a9eb
AP
1823}
1824
2bbfa05d
PB
1825static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1826{
1827 if (memory_region_is_ram(mr)) {
1828 return !(is_write && mr->readonly);
1829 }
1830 if (memory_region_is_romd(mr)) {
1831 return !is_write;
1832 }
1833
1834 return false;
1835}
1836
23326164 1837static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 1838{
e1622f4b 1839 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
1840
1841 /* Regions are assumed to support 1-4 byte accesses unless
1842 otherwise specified. */
23326164
RH
1843 if (access_size_max == 0) {
1844 access_size_max = 4;
1845 }
1846
1847 /* Bound the maximum access by the alignment of the address. */
1848 if (!mr->ops->impl.unaligned) {
1849 unsigned align_size_max = addr & -addr;
1850 if (align_size_max != 0 && align_size_max < access_size_max) {
1851 access_size_max = align_size_max;
1852 }
82f2563f 1853 }
23326164
RH
1854
1855 /* Don't attempt accesses larger than the maximum. */
1856 if (l > access_size_max) {
1857 l = access_size_max;
82f2563f 1858 }
098178f2
PB
1859 if (l & (l - 1)) {
1860 l = 1 << (qemu_fls(l) - 1);
1861 }
23326164
RH
1862
1863 return l;
82f2563f
PB
1864}
1865
fd8aaa76 1866bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1867 int len, bool is_write)
13eb76e0 1868{
149f54b5 1869 hwaddr l;
13eb76e0 1870 uint8_t *ptr;
791af8c8 1871 uint64_t val;
149f54b5 1872 hwaddr addr1;
5c8a00ce 1873 MemoryRegion *mr;
fd8aaa76 1874 bool error = false;
3b46e624 1875
13eb76e0 1876 while (len > 0) {
149f54b5 1877 l = len;
5c8a00ce 1878 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 1879
13eb76e0 1880 if (is_write) {
5c8a00ce
PB
1881 if (!memory_access_is_direct(mr, is_write)) {
1882 l = memory_access_size(mr, l, addr1);
4917cf44 1883 /* XXX: could force current_cpu to NULL to avoid
6a00d601 1884 potential bugs */
23326164
RH
1885 switch (l) {
1886 case 8:
1887 /* 64 bit write access */
1888 val = ldq_p(buf);
1889 error |= io_mem_write(mr, addr1, val, 8);
1890 break;
1891 case 4:
1c213d19 1892 /* 32 bit write access */
c27004ec 1893 val = ldl_p(buf);
5c8a00ce 1894 error |= io_mem_write(mr, addr1, val, 4);
23326164
RH
1895 break;
1896 case 2:
1c213d19 1897 /* 16 bit write access */
c27004ec 1898 val = lduw_p(buf);
5c8a00ce 1899 error |= io_mem_write(mr, addr1, val, 2);
23326164
RH
1900 break;
1901 case 1:
1c213d19 1902 /* 8 bit write access */
c27004ec 1903 val = ldub_p(buf);
5c8a00ce 1904 error |= io_mem_write(mr, addr1, val, 1);
23326164
RH
1905 break;
1906 default:
1907 abort();
13eb76e0 1908 }
2bbfa05d 1909 } else {
5c8a00ce 1910 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 1911 /* RAM case */
5579c7f3 1912 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1913 memcpy(ptr, buf, l);
51d7a9eb 1914 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
1915 }
1916 } else {
5c8a00ce 1917 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 1918 /* I/O case */
5c8a00ce 1919 l = memory_access_size(mr, l, addr1);
23326164
RH
1920 switch (l) {
1921 case 8:
1922 /* 64 bit read access */
1923 error |= io_mem_read(mr, addr1, &val, 8);
1924 stq_p(buf, val);
1925 break;
1926 case 4:
13eb76e0 1927 /* 32 bit read access */
5c8a00ce 1928 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 1929 stl_p(buf, val);
23326164
RH
1930 break;
1931 case 2:
13eb76e0 1932 /* 16 bit read access */
5c8a00ce 1933 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 1934 stw_p(buf, val);
23326164
RH
1935 break;
1936 case 1:
1c213d19 1937 /* 8 bit read access */
5c8a00ce 1938 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 1939 stb_p(buf, val);
23326164
RH
1940 break;
1941 default:
1942 abort();
13eb76e0
FB
1943 }
1944 } else {
1945 /* RAM case */
5c8a00ce 1946 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 1947 memcpy(buf, ptr, l);
13eb76e0
FB
1948 }
1949 }
1950 len -= l;
1951 buf += l;
1952 addr += l;
1953 }
fd8aaa76
PB
1954
1955 return error;
13eb76e0 1956}
8df1cd07 1957
fd8aaa76 1958bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1959 const uint8_t *buf, int len)
1960{
fd8aaa76 1961 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
1962}
1963
fd8aaa76 1964bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 1965{
fd8aaa76 1966 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
1967}
1968
1969
a8170e5e 1970void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1971 int len, int is_write)
1972{
fd8aaa76 1973 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
1974}
1975
d0ecd2aa 1976/* used for ROM loading : can write in RAM and ROM */
a8170e5e 1977void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
1978 const uint8_t *buf, int len)
1979{
149f54b5 1980 hwaddr l;
d0ecd2aa 1981 uint8_t *ptr;
149f54b5 1982 hwaddr addr1;
5c8a00ce 1983 MemoryRegion *mr;
3b46e624 1984
d0ecd2aa 1985 while (len > 0) {
149f54b5 1986 l = len;
5c8a00ce
PB
1987 mr = address_space_translate(&address_space_memory,
1988 addr, &addr1, &l, true);
3b46e624 1989
5c8a00ce
PB
1990 if (!(memory_region_is_ram(mr) ||
1991 memory_region_is_romd(mr))) {
d0ecd2aa
FB
1992 /* do nothing */
1993 } else {
5c8a00ce 1994 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 1995 /* ROM/RAM case */
5579c7f3 1996 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 1997 memcpy(ptr, buf, l);
51d7a9eb 1998 invalidate_and_set_dirty(addr1, l);
d0ecd2aa
FB
1999 }
2000 len -= l;
2001 buf += l;
2002 addr += l;
2003 }
2004}
2005
6d16c2f8 2006typedef struct {
d3e71559 2007 MemoryRegion *mr;
6d16c2f8 2008 void *buffer;
a8170e5e
AK
2009 hwaddr addr;
2010 hwaddr len;
6d16c2f8
AL
2011} BounceBuffer;
2012
2013static BounceBuffer bounce;
2014
ba223c29
AL
2015typedef struct MapClient {
2016 void *opaque;
2017 void (*callback)(void *opaque);
72cf2d4f 2018 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2019} MapClient;
2020
72cf2d4f
BS
2021static QLIST_HEAD(map_client_list, MapClient) map_client_list
2022 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2023
2024void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2025{
7267c094 2026 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2027
2028 client->opaque = opaque;
2029 client->callback = callback;
72cf2d4f 2030 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2031 return client;
2032}
2033
8b9c99d9 2034static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2035{
2036 MapClient *client = (MapClient *)_client;
2037
72cf2d4f 2038 QLIST_REMOVE(client, link);
7267c094 2039 g_free(client);
ba223c29
AL
2040}
2041
2042static void cpu_notify_map_clients(void)
2043{
2044 MapClient *client;
2045
72cf2d4f
BS
2046 while (!QLIST_EMPTY(&map_client_list)) {
2047 client = QLIST_FIRST(&map_client_list);
ba223c29 2048 client->callback(client->opaque);
34d5e948 2049 cpu_unregister_map_client(client);
ba223c29
AL
2050 }
2051}
2052
51644ab7
PB
2053bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2054{
5c8a00ce 2055 MemoryRegion *mr;
51644ab7
PB
2056 hwaddr l, xlat;
2057
2058 while (len > 0) {
2059 l = len;
5c8a00ce
PB
2060 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2061 if (!memory_access_is_direct(mr, is_write)) {
2062 l = memory_access_size(mr, l, addr);
2063 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2064 return false;
2065 }
2066 }
2067
2068 len -= l;
2069 addr += l;
2070 }
2071 return true;
2072}
2073
6d16c2f8
AL
2074/* Map a physical memory region into a host virtual address.
2075 * May map a subset of the requested range, given by and returned in *plen.
2076 * May return NULL if resources needed to perform the mapping are exhausted.
2077 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2078 * Use cpu_register_map_client() to know when retrying the map operation is
2079 * likely to succeed.
6d16c2f8 2080 */
ac1970fb 2081void *address_space_map(AddressSpace *as,
a8170e5e
AK
2082 hwaddr addr,
2083 hwaddr *plen,
ac1970fb 2084 bool is_write)
6d16c2f8 2085{
a8170e5e 2086 hwaddr len = *plen;
e3127ae0
PB
2087 hwaddr done = 0;
2088 hwaddr l, xlat, base;
2089 MemoryRegion *mr, *this_mr;
2090 ram_addr_t raddr;
6d16c2f8 2091
e3127ae0
PB
2092 if (len == 0) {
2093 return NULL;
2094 }
38bee5dc 2095
e3127ae0
PB
2096 l = len;
2097 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2098 if (!memory_access_is_direct(mr, is_write)) {
2099 if (bounce.buffer) {
2100 return NULL;
6d16c2f8 2101 }
e3127ae0
PB
2102 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2103 bounce.addr = addr;
2104 bounce.len = l;
d3e71559
PB
2105
2106 memory_region_ref(mr);
2107 bounce.mr = mr;
e3127ae0
PB
2108 if (!is_write) {
2109 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2110 }
6d16c2f8 2111
e3127ae0
PB
2112 *plen = l;
2113 return bounce.buffer;
2114 }
2115
2116 base = xlat;
2117 raddr = memory_region_get_ram_addr(mr);
2118
2119 for (;;) {
6d16c2f8
AL
2120 len -= l;
2121 addr += l;
e3127ae0
PB
2122 done += l;
2123 if (len == 0) {
2124 break;
2125 }
2126
2127 l = len;
2128 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2129 if (this_mr != mr || xlat != base + done) {
2130 break;
2131 }
6d16c2f8 2132 }
e3127ae0 2133
d3e71559 2134 memory_region_ref(mr);
e3127ae0
PB
2135 *plen = done;
2136 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2137}
2138
ac1970fb 2139/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2140 * Will also mark the memory as dirty if is_write == 1. access_len gives
2141 * the amount of memory that was actually read or written by the caller.
2142 */
a8170e5e
AK
2143void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2144 int is_write, hwaddr access_len)
6d16c2f8
AL
2145{
2146 if (buffer != bounce.buffer) {
d3e71559
PB
2147 MemoryRegion *mr;
2148 ram_addr_t addr1;
2149
2150 mr = qemu_ram_addr_from_host(buffer, &addr1);
2151 assert(mr != NULL);
6d16c2f8 2152 if (is_write) {
6d16c2f8
AL
2153 while (access_len) {
2154 unsigned l;
2155 l = TARGET_PAGE_SIZE;
2156 if (l > access_len)
2157 l = access_len;
51d7a9eb 2158 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2159 addr1 += l;
2160 access_len -= l;
2161 }
2162 }
868bb33f 2163 if (xen_enabled()) {
e41d7c69 2164 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2165 }
d3e71559 2166 memory_region_unref(mr);
6d16c2f8
AL
2167 return;
2168 }
2169 if (is_write) {
ac1970fb 2170 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2171 }
f8a83245 2172 qemu_vfree(bounce.buffer);
6d16c2f8 2173 bounce.buffer = NULL;
d3e71559 2174 memory_region_unref(bounce.mr);
ba223c29 2175 cpu_notify_map_clients();
6d16c2f8 2176}
d0ecd2aa 2177
a8170e5e
AK
2178void *cpu_physical_memory_map(hwaddr addr,
2179 hwaddr *plen,
ac1970fb
AK
2180 int is_write)
2181{
2182 return address_space_map(&address_space_memory, addr, plen, is_write);
2183}
2184
a8170e5e
AK
2185void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2186 int is_write, hwaddr access_len)
ac1970fb
AK
2187{
2188 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2189}
2190
8df1cd07 2191/* warning: addr must be aligned */
a8170e5e 2192static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2193 enum device_endian endian)
8df1cd07 2194{
8df1cd07 2195 uint8_t *ptr;
791af8c8 2196 uint64_t val;
5c8a00ce 2197 MemoryRegion *mr;
149f54b5
PB
2198 hwaddr l = 4;
2199 hwaddr addr1;
8df1cd07 2200
5c8a00ce
PB
2201 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2202 false);
2203 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2204 /* I/O case */
5c8a00ce 2205 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2206#if defined(TARGET_WORDS_BIGENDIAN)
2207 if (endian == DEVICE_LITTLE_ENDIAN) {
2208 val = bswap32(val);
2209 }
2210#else
2211 if (endian == DEVICE_BIG_ENDIAN) {
2212 val = bswap32(val);
2213 }
2214#endif
8df1cd07
FB
2215 } else {
2216 /* RAM case */
5c8a00ce 2217 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2218 & TARGET_PAGE_MASK)
149f54b5 2219 + addr1);
1e78bcc1
AG
2220 switch (endian) {
2221 case DEVICE_LITTLE_ENDIAN:
2222 val = ldl_le_p(ptr);
2223 break;
2224 case DEVICE_BIG_ENDIAN:
2225 val = ldl_be_p(ptr);
2226 break;
2227 default:
2228 val = ldl_p(ptr);
2229 break;
2230 }
8df1cd07
FB
2231 }
2232 return val;
2233}
2234
a8170e5e 2235uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2236{
2237 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2238}
2239
a8170e5e 2240uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2241{
2242 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2243}
2244
a8170e5e 2245uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2246{
2247 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2248}
2249
84b7b8e7 2250/* warning: addr must be aligned */
a8170e5e 2251static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2252 enum device_endian endian)
84b7b8e7 2253{
84b7b8e7
FB
2254 uint8_t *ptr;
2255 uint64_t val;
5c8a00ce 2256 MemoryRegion *mr;
149f54b5
PB
2257 hwaddr l = 8;
2258 hwaddr addr1;
84b7b8e7 2259
5c8a00ce
PB
2260 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2261 false);
2262 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2263 /* I/O case */
5c8a00ce 2264 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2265#if defined(TARGET_WORDS_BIGENDIAN)
2266 if (endian == DEVICE_LITTLE_ENDIAN) {
2267 val = bswap64(val);
2268 }
2269#else
2270 if (endian == DEVICE_BIG_ENDIAN) {
2271 val = bswap64(val);
2272 }
84b7b8e7
FB
2273#endif
2274 } else {
2275 /* RAM case */
5c8a00ce 2276 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2277 & TARGET_PAGE_MASK)
149f54b5 2278 + addr1);
1e78bcc1
AG
2279 switch (endian) {
2280 case DEVICE_LITTLE_ENDIAN:
2281 val = ldq_le_p(ptr);
2282 break;
2283 case DEVICE_BIG_ENDIAN:
2284 val = ldq_be_p(ptr);
2285 break;
2286 default:
2287 val = ldq_p(ptr);
2288 break;
2289 }
84b7b8e7
FB
2290 }
2291 return val;
2292}
2293
a8170e5e 2294uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2295{
2296 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2297}
2298
a8170e5e 2299uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2300{
2301 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2302}
2303
a8170e5e 2304uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2305{
2306 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2307}
2308
aab33094 2309/* XXX: optimize */
a8170e5e 2310uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2311{
2312 uint8_t val;
2313 cpu_physical_memory_read(addr, &val, 1);
2314 return val;
2315}
2316
733f0b02 2317/* warning: addr must be aligned */
a8170e5e 2318static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2319 enum device_endian endian)
aab33094 2320{
733f0b02
MT
2321 uint8_t *ptr;
2322 uint64_t val;
5c8a00ce 2323 MemoryRegion *mr;
149f54b5
PB
2324 hwaddr l = 2;
2325 hwaddr addr1;
733f0b02 2326
5c8a00ce
PB
2327 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2328 false);
2329 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2330 /* I/O case */
5c8a00ce 2331 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2332#if defined(TARGET_WORDS_BIGENDIAN)
2333 if (endian == DEVICE_LITTLE_ENDIAN) {
2334 val = bswap16(val);
2335 }
2336#else
2337 if (endian == DEVICE_BIG_ENDIAN) {
2338 val = bswap16(val);
2339 }
2340#endif
733f0b02
MT
2341 } else {
2342 /* RAM case */
5c8a00ce 2343 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2344 & TARGET_PAGE_MASK)
149f54b5 2345 + addr1);
1e78bcc1
AG
2346 switch (endian) {
2347 case DEVICE_LITTLE_ENDIAN:
2348 val = lduw_le_p(ptr);
2349 break;
2350 case DEVICE_BIG_ENDIAN:
2351 val = lduw_be_p(ptr);
2352 break;
2353 default:
2354 val = lduw_p(ptr);
2355 break;
2356 }
733f0b02
MT
2357 }
2358 return val;
aab33094
FB
2359}
2360
a8170e5e 2361uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2362{
2363 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2364}
2365
a8170e5e 2366uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2367{
2368 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2369}
2370
a8170e5e 2371uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2372{
2373 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2374}
2375
8df1cd07
FB
2376/* warning: addr must be aligned. The ram page is not masked as dirty
2377 and the code inside is not invalidated. It is useful if the dirty
2378 bits are used to track modified PTEs */
a8170e5e 2379void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2380{
8df1cd07 2381 uint8_t *ptr;
5c8a00ce 2382 MemoryRegion *mr;
149f54b5
PB
2383 hwaddr l = 4;
2384 hwaddr addr1;
8df1cd07 2385
5c8a00ce
PB
2386 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2387 true);
2388 if (l < 4 || !memory_access_is_direct(mr, true)) {
2389 io_mem_write(mr, addr1, val, 4);
8df1cd07 2390 } else {
5c8a00ce 2391 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2392 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2393 stl_p(ptr, val);
74576198
AL
2394
2395 if (unlikely(in_migration)) {
2396 if (!cpu_physical_memory_is_dirty(addr1)) {
2397 /* invalidate code */
2398 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2399 /* set dirty bit */
f7c11b53
YT
2400 cpu_physical_memory_set_dirty_flags(
2401 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2402 }
2403 }
8df1cd07
FB
2404 }
2405}
2406
2407/* warning: addr must be aligned */
a8170e5e 2408static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2409 enum device_endian endian)
8df1cd07 2410{
8df1cd07 2411 uint8_t *ptr;
5c8a00ce 2412 MemoryRegion *mr;
149f54b5
PB
2413 hwaddr l = 4;
2414 hwaddr addr1;
8df1cd07 2415
5c8a00ce
PB
2416 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2417 true);
2418 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2419#if defined(TARGET_WORDS_BIGENDIAN)
2420 if (endian == DEVICE_LITTLE_ENDIAN) {
2421 val = bswap32(val);
2422 }
2423#else
2424 if (endian == DEVICE_BIG_ENDIAN) {
2425 val = bswap32(val);
2426 }
2427#endif
5c8a00ce 2428 io_mem_write(mr, addr1, val, 4);
8df1cd07 2429 } else {
8df1cd07 2430 /* RAM case */
5c8a00ce 2431 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2432 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2433 switch (endian) {
2434 case DEVICE_LITTLE_ENDIAN:
2435 stl_le_p(ptr, val);
2436 break;
2437 case DEVICE_BIG_ENDIAN:
2438 stl_be_p(ptr, val);
2439 break;
2440 default:
2441 stl_p(ptr, val);
2442 break;
2443 }
51d7a9eb 2444 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2445 }
2446}
2447
a8170e5e 2448void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2449{
2450 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2451}
2452
a8170e5e 2453void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2454{
2455 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2456}
2457
a8170e5e 2458void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2459{
2460 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2461}
2462
aab33094 2463/* XXX: optimize */
a8170e5e 2464void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2465{
2466 uint8_t v = val;
2467 cpu_physical_memory_write(addr, &v, 1);
2468}
2469
733f0b02 2470/* warning: addr must be aligned */
a8170e5e 2471static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2472 enum device_endian endian)
aab33094 2473{
733f0b02 2474 uint8_t *ptr;
5c8a00ce 2475 MemoryRegion *mr;
149f54b5
PB
2476 hwaddr l = 2;
2477 hwaddr addr1;
733f0b02 2478
5c8a00ce
PB
2479 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2480 true);
2481 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2482#if defined(TARGET_WORDS_BIGENDIAN)
2483 if (endian == DEVICE_LITTLE_ENDIAN) {
2484 val = bswap16(val);
2485 }
2486#else
2487 if (endian == DEVICE_BIG_ENDIAN) {
2488 val = bswap16(val);
2489 }
2490#endif
5c8a00ce 2491 io_mem_write(mr, addr1, val, 2);
733f0b02 2492 } else {
733f0b02 2493 /* RAM case */
5c8a00ce 2494 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2495 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2496 switch (endian) {
2497 case DEVICE_LITTLE_ENDIAN:
2498 stw_le_p(ptr, val);
2499 break;
2500 case DEVICE_BIG_ENDIAN:
2501 stw_be_p(ptr, val);
2502 break;
2503 default:
2504 stw_p(ptr, val);
2505 break;
2506 }
51d7a9eb 2507 invalidate_and_set_dirty(addr1, 2);
733f0b02 2508 }
aab33094
FB
2509}
2510
a8170e5e 2511void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2512{
2513 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2514}
2515
a8170e5e 2516void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2517{
2518 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2519}
2520
a8170e5e 2521void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2522{
2523 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2524}
2525
aab33094 2526/* XXX: optimize */
a8170e5e 2527void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2528{
2529 val = tswap64(val);
71d2b725 2530 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2531}
2532
a8170e5e 2533void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2534{
2535 val = cpu_to_le64(val);
2536 cpu_physical_memory_write(addr, &val, 8);
2537}
2538
a8170e5e 2539void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2540{
2541 val = cpu_to_be64(val);
2542 cpu_physical_memory_write(addr, &val, 8);
2543}
2544
5e2972fd 2545/* virtual memory access for debug (includes writing to ROM) */
f17ec444 2546int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 2547 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2548{
2549 int l;
a8170e5e 2550 hwaddr phys_addr;
9b3c35e0 2551 target_ulong page;
13eb76e0
FB
2552
2553 while (len > 0) {
2554 page = addr & TARGET_PAGE_MASK;
f17ec444 2555 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
2556 /* if no physical page mapped, return an error */
2557 if (phys_addr == -1)
2558 return -1;
2559 l = (page + TARGET_PAGE_SIZE) - addr;
2560 if (l > len)
2561 l = len;
5e2972fd 2562 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2563 if (is_write)
2564 cpu_physical_memory_write_rom(phys_addr, buf, l);
2565 else
5e2972fd 2566 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2567 len -= l;
2568 buf += l;
2569 addr += l;
2570 }
2571 return 0;
2572}
a68fe89c 2573#endif
13eb76e0 2574
8e4a424b
BS
2575#if !defined(CONFIG_USER_ONLY)
2576
2577/*
2578 * A helper function for the _utterly broken_ virtio device model to find out if
2579 * it's running on a big endian machine. Don't do this at home kids!
2580 */
2581bool virtio_is_big_endian(void);
2582bool virtio_is_big_endian(void)
2583{
2584#if defined(TARGET_WORDS_BIGENDIAN)
2585 return true;
2586#else
2587 return false;
2588#endif
2589}
2590
2591#endif
2592
76f35538 2593#ifndef CONFIG_USER_ONLY
a8170e5e 2594bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2595{
5c8a00ce 2596 MemoryRegion*mr;
149f54b5 2597 hwaddr l = 1;
76f35538 2598
5c8a00ce
PB
2599 mr = address_space_translate(&address_space_memory,
2600 phys_addr, &phys_addr, &l, false);
76f35538 2601
5c8a00ce
PB
2602 return !(memory_region_is_ram(mr) ||
2603 memory_region_is_romd(mr));
76f35538 2604}
bd2fa51f
MH
2605
2606void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2607{
2608 RAMBlock *block;
2609
2610 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2611 func(block->host, block->offset, block->length, opaque);
2612 }
2613}
ec3f8c99 2614#endif