]> git.proxmox.com Git - qemu.git/blame - exec.c
hw/i386/Makefile.obj: use $(PYTHON) to run .py scripts consistently
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
2ff3de68 34#include "sysemu/sysemu.h"
0d09e41a 35#include "hw/xen/xen.h"
1de7afc9
PB
36#include "qemu/timer.h"
37#include "qemu/config-file.h"
022c62cb 38#include "exec/memory.h"
9c17d615 39#include "sysemu/dma.h"
022c62cb 40#include "exec/address-spaces.h"
53a5960a
PB
41#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
432d268c 43#else /* !CONFIG_USER_ONLY */
9c17d615 44#include "sysemu/xen-mapcache.h"
6506e4f9 45#include "trace.h"
53a5960a 46#endif
0d6d3c87 47#include "exec/cpu-all.h"
54936004 48
022c62cb 49#include "exec/cputlb.h"
5b6dd868 50#include "translate-all.h"
0cac1b66 51
022c62cb 52#include "exec/memory-internal.h"
67d95c15 53
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
e2eef170 56#if !defined(CONFIG_USER_ONLY)
74576198 57static int in_migration;
94a6b54f 58
a3161038 59RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
60
61static MemoryRegion *system_memory;
309cb471 62static MemoryRegion *system_io;
62152b8a 63
f6790af6
AK
64AddressSpace address_space_io;
65AddressSpace address_space_memory;
2673a5da 66
0844e007 67MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 68static MemoryRegion io_mem_unassigned;
0e0df1e2 69
e2eef170 70#endif
9fa3e853 71
bdc44640 72struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
73/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
4917cf44 75DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 76/* 0 = Do not count executed instructions.
bf20dc07 77 1 = Precise instruction counting.
2e70f6ef 78 2 = Adaptive rate instruction counting. */
5708fc66 79int use_icount;
6a00d601 80
e2eef170 81#if !defined(CONFIG_USER_ONLY)
4346ae3e 82
1db8abb1
PB
83typedef struct PhysPageEntry PhysPageEntry;
84
85struct PhysPageEntry {
86 uint16_t is_leaf : 1;
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
88 uint16_t ptr : 15;
89};
90
0475d94f
PB
91typedef PhysPageEntry Node[L2_SIZE];
92
1db8abb1
PB
93struct AddressSpaceDispatch {
94 /* This is a multi-level map on the physical address space.
95 * The bottom level has pointers to MemoryRegionSections.
96 */
97 PhysPageEntry phys_map;
0475d94f
PB
98 Node *nodes;
99 MemoryRegionSection *sections;
acc9d80b 100 AddressSpace *as;
1db8abb1
PB
101};
102
90260c6c
JK
103#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
104typedef struct subpage_t {
105 MemoryRegion iomem;
acc9d80b 106 AddressSpace *as;
90260c6c
JK
107 hwaddr base;
108 uint16_t sub_section[TARGET_PAGE_SIZE];
109} subpage_t;
110
b41aac4f
LPF
111#define PHYS_SECTION_UNASSIGNED 0
112#define PHYS_SECTION_NOTDIRTY 1
113#define PHYS_SECTION_ROM 2
114#define PHYS_SECTION_WATCH 3
5312bd8b 115
9affd6fc
PB
116typedef struct PhysPageMap {
117 unsigned sections_nb;
118 unsigned sections_nb_alloc;
119 unsigned nodes_nb;
120 unsigned nodes_nb_alloc;
121 Node *nodes;
122 MemoryRegionSection *sections;
123} PhysPageMap;
124
6092666e 125static PhysPageMap *prev_map;
9affd6fc 126static PhysPageMap next_map;
d6f2ea22 127
07f07b31 128#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 129
e2eef170 130static void io_mem_init(void);
62152b8a 131static void memory_map_init(void);
e2eef170 132
1ec9b909 133static MemoryRegion io_mem_watch;
6658ffb8 134#endif
fd6ce8f6 135
6d9a1304 136#if !defined(CONFIG_USER_ONLY)
d6f2ea22 137
f7bf5461 138static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 139{
9affd6fc
PB
140 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
141 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
142 16);
143 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
144 next_map.nodes_nb + nodes);
145 next_map.nodes = g_renew(Node, next_map.nodes,
146 next_map.nodes_nb_alloc);
d6f2ea22 147 }
f7bf5461
AK
148}
149
150static uint16_t phys_map_node_alloc(void)
151{
152 unsigned i;
153 uint16_t ret;
154
9affd6fc 155 ret = next_map.nodes_nb++;
f7bf5461 156 assert(ret != PHYS_MAP_NODE_NIL);
9affd6fc 157 assert(ret != next_map.nodes_nb_alloc);
d6f2ea22 158 for (i = 0; i < L2_SIZE; ++i) {
9affd6fc
PB
159 next_map.nodes[ret][i].is_leaf = 0;
160 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 161 }
f7bf5461 162 return ret;
d6f2ea22
AK
163}
164
a8170e5e
AK
165static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
166 hwaddr *nb, uint16_t leaf,
2999097b 167 int level)
f7bf5461
AK
168{
169 PhysPageEntry *p;
170 int i;
a8170e5e 171 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 172
07f07b31 173 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800 174 lp->ptr = phys_map_node_alloc();
9affd6fc 175 p = next_map.nodes[lp->ptr];
f7bf5461
AK
176 if (level == 0) {
177 for (i = 0; i < L2_SIZE; i++) {
07f07b31 178 p[i].is_leaf = 1;
b41aac4f 179 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 180 }
67c4d23c 181 }
f7bf5461 182 } else {
9affd6fc 183 p = next_map.nodes[lp->ptr];
92e873b9 184 }
2999097b 185 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 186
2999097b 187 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
188 if ((*index & (step - 1)) == 0 && *nb >= step) {
189 lp->is_leaf = true;
c19e8800 190 lp->ptr = leaf;
07f07b31
AK
191 *index += step;
192 *nb -= step;
2999097b
AK
193 } else {
194 phys_page_set_level(lp, index, nb, leaf, level - 1);
195 }
196 ++lp;
f7bf5461
AK
197 }
198}
199
ac1970fb 200static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 201 hwaddr index, hwaddr nb,
2999097b 202 uint16_t leaf)
f7bf5461 203{
2999097b 204 /* Wildly overreserve - it doesn't matter much. */
07f07b31 205 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 206
ac1970fb 207 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
208}
209
9affd6fc
PB
210static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
211 Node *nodes, MemoryRegionSection *sections)
92e873b9 212{
31ab2b4a
AK
213 PhysPageEntry *p;
214 int i;
f1f6e3b8 215
07f07b31 216 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 217 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 218 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 219 }
9affd6fc 220 p = nodes[lp.ptr];
31ab2b4a 221 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 222 }
9affd6fc 223 return &sections[lp.ptr];
f3705d53
AK
224}
225
e5548617
BS
226bool memory_region_is_unassigned(MemoryRegion *mr)
227{
2a8e7499 228 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 229 && mr != &io_mem_watch;
fd6ce8f6 230}
149f54b5 231
c7086b4a 232static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
233 hwaddr addr,
234 bool resolve_subpage)
9f029603 235{
90260c6c
JK
236 MemoryRegionSection *section;
237 subpage_t *subpage;
238
0475d94f
PB
239 section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
240 d->nodes, d->sections);
90260c6c
JK
241 if (resolve_subpage && section->mr->subpage) {
242 subpage = container_of(section->mr, subpage_t, iomem);
0475d94f 243 section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
244 }
245 return section;
9f029603
JK
246}
247
90260c6c 248static MemoryRegionSection *
c7086b4a 249address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 250 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
251{
252 MemoryRegionSection *section;
253 Int128 diff;
254
c7086b4a 255 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
256 /* Compute offset within MemoryRegionSection */
257 addr -= section->offset_within_address_space;
258
259 /* Compute offset within MemoryRegion */
260 *xlat = addr + section->offset_within_region;
261
262 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 263 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
264 return section;
265}
90260c6c 266
5c8a00ce
PB
267MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
268 hwaddr *xlat, hwaddr *plen,
269 bool is_write)
90260c6c 270{
30951157
AK
271 IOMMUTLBEntry iotlb;
272 MemoryRegionSection *section;
273 MemoryRegion *mr;
274 hwaddr len = *plen;
275
276 for (;;) {
c7086b4a 277 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
278 mr = section->mr;
279
280 if (!mr->iommu_ops) {
281 break;
282 }
283
284 iotlb = mr->iommu_ops->translate(mr, addr);
285 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
286 | (addr & iotlb.addr_mask));
287 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
288 if (!(iotlb.perm & (1 << is_write))) {
289 mr = &io_mem_unassigned;
290 break;
291 }
292
293 as = iotlb.target_as;
294 }
295
296 *plen = len;
297 *xlat = addr;
298 return mr;
90260c6c
JK
299}
300
301MemoryRegionSection *
302address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
303 hwaddr *plen)
304{
30951157 305 MemoryRegionSection *section;
c7086b4a 306 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
307
308 assert(!section->mr->iommu_ops);
309 return section;
90260c6c 310}
5b6dd868 311#endif
fd6ce8f6 312
5b6dd868 313void cpu_exec_init_all(void)
fdbb84d1 314{
5b6dd868 315#if !defined(CONFIG_USER_ONLY)
b2a8658e 316 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
317 memory_map_init();
318 io_mem_init();
fdbb84d1 319#endif
5b6dd868 320}
fdbb84d1 321
b170fce3 322#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
323
324static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 325{
259186a7 326 CPUState *cpu = opaque;
a513fe19 327
5b6dd868
BS
328 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
329 version_id is increased. */
259186a7
AF
330 cpu->interrupt_request &= ~0x01;
331 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
332
333 return 0;
a513fe19 334}
7501267e 335
1a1562f5 336const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
337 .name = "cpu_common",
338 .version_id = 1,
339 .minimum_version_id = 1,
340 .minimum_version_id_old = 1,
341 .post_load = cpu_common_post_load,
342 .fields = (VMStateField []) {
259186a7
AF
343 VMSTATE_UINT32(halted, CPUState),
344 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
345 VMSTATE_END_OF_LIST()
346 }
347};
1a1562f5 348
5b6dd868 349#endif
ea041c0e 350
38d8f5c8 351CPUState *qemu_get_cpu(int index)
ea041c0e 352{
bdc44640 353 CPUState *cpu;
ea041c0e 354
bdc44640 355 CPU_FOREACH(cpu) {
55e5c285 356 if (cpu->cpu_index == index) {
bdc44640 357 return cpu;
55e5c285 358 }
ea041c0e 359 }
5b6dd868 360
bdc44640 361 return NULL;
ea041c0e
FB
362}
363
5b6dd868 364void cpu_exec_init(CPUArchState *env)
ea041c0e 365{
5b6dd868 366 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 367 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 368 CPUState *some_cpu;
5b6dd868
BS
369 int cpu_index;
370
371#if defined(CONFIG_USER_ONLY)
372 cpu_list_lock();
373#endif
5b6dd868 374 cpu_index = 0;
bdc44640 375 CPU_FOREACH(some_cpu) {
5b6dd868
BS
376 cpu_index++;
377 }
55e5c285 378 cpu->cpu_index = cpu_index;
1b1ed8dc 379 cpu->numa_node = 0;
5b6dd868
BS
380 QTAILQ_INIT(&env->breakpoints);
381 QTAILQ_INIT(&env->watchpoints);
382#ifndef CONFIG_USER_ONLY
383 cpu->thread_id = qemu_get_thread_id();
384#endif
bdc44640 385 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
386#if defined(CONFIG_USER_ONLY)
387 cpu_list_unlock();
388#endif
e0d47944
AF
389 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
390 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
391 }
5b6dd868 392#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
393 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
394 cpu_save, cpu_load, env);
b170fce3 395 assert(cc->vmsd == NULL);
e0d47944 396 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 397#endif
b170fce3
AF
398 if (cc->vmsd != NULL) {
399 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
400 }
ea041c0e
FB
401}
402
1fddef4b 403#if defined(TARGET_HAS_ICE)
94df27fd 404#if defined(CONFIG_USER_ONLY)
00b941e5 405static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
406{
407 tb_invalidate_phys_page_range(pc, pc + 1, 0);
408}
409#else
00b941e5 410static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 411{
00b941e5 412 tb_invalidate_phys_addr(cpu_get_phys_page_debug(cpu, pc) |
9d70c4b7 413 (pc & ~TARGET_PAGE_MASK));
1e7855a5 414}
c27004ec 415#endif
94df27fd 416#endif /* TARGET_HAS_ICE */
d720b93d 417
c527ee8f 418#if defined(CONFIG_USER_ONLY)
9349b4f9 419void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
420
421{
422}
423
9349b4f9 424int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
425 int flags, CPUWatchpoint **watchpoint)
426{
427 return -ENOSYS;
428}
429#else
6658ffb8 430/* Add a watchpoint. */
9349b4f9 431int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 432 int flags, CPUWatchpoint **watchpoint)
6658ffb8 433{
b4051334 434 target_ulong len_mask = ~(len - 1);
c0ce998e 435 CPUWatchpoint *wp;
6658ffb8 436
b4051334 437 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
438 if ((len & (len - 1)) || (addr & ~len_mask) ||
439 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
440 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
441 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
442 return -EINVAL;
443 }
7267c094 444 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
445
446 wp->vaddr = addr;
b4051334 447 wp->len_mask = len_mask;
a1d1bb31
AL
448 wp->flags = flags;
449
2dc9f411 450 /* keep all GDB-injected watchpoints in front */
c0ce998e 451 if (flags & BP_GDB)
72cf2d4f 452 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 453 else
72cf2d4f 454 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 455
6658ffb8 456 tlb_flush_page(env, addr);
a1d1bb31
AL
457
458 if (watchpoint)
459 *watchpoint = wp;
460 return 0;
6658ffb8
PB
461}
462
a1d1bb31 463/* Remove a specific watchpoint. */
9349b4f9 464int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 465 int flags)
6658ffb8 466{
b4051334 467 target_ulong len_mask = ~(len - 1);
a1d1bb31 468 CPUWatchpoint *wp;
6658ffb8 469
72cf2d4f 470 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 471 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 472 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 473 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
474 return 0;
475 }
476 }
a1d1bb31 477 return -ENOENT;
6658ffb8
PB
478}
479
a1d1bb31 480/* Remove a specific watchpoint by reference. */
9349b4f9 481void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 482{
72cf2d4f 483 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 484
a1d1bb31
AL
485 tlb_flush_page(env, watchpoint->vaddr);
486
7267c094 487 g_free(watchpoint);
a1d1bb31
AL
488}
489
490/* Remove all matching watchpoints. */
9349b4f9 491void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 492{
c0ce998e 493 CPUWatchpoint *wp, *next;
a1d1bb31 494
72cf2d4f 495 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
496 if (wp->flags & mask)
497 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 498 }
7d03f82f 499}
c527ee8f 500#endif
7d03f82f 501
a1d1bb31 502/* Add a breakpoint. */
9349b4f9 503int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 504 CPUBreakpoint **breakpoint)
4c3a88a2 505{
1fddef4b 506#if defined(TARGET_HAS_ICE)
c0ce998e 507 CPUBreakpoint *bp;
3b46e624 508
7267c094 509 bp = g_malloc(sizeof(*bp));
4c3a88a2 510
a1d1bb31
AL
511 bp->pc = pc;
512 bp->flags = flags;
513
2dc9f411 514 /* keep all GDB-injected breakpoints in front */
00b941e5 515 if (flags & BP_GDB) {
72cf2d4f 516 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
00b941e5 517 } else {
72cf2d4f 518 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
00b941e5 519 }
3b46e624 520
00b941e5 521 breakpoint_invalidate(ENV_GET_CPU(env), pc);
a1d1bb31 522
00b941e5 523 if (breakpoint) {
a1d1bb31 524 *breakpoint = bp;
00b941e5 525 }
4c3a88a2
FB
526 return 0;
527#else
a1d1bb31 528 return -ENOSYS;
4c3a88a2
FB
529#endif
530}
531
a1d1bb31 532/* Remove a specific breakpoint. */
9349b4f9 533int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 534{
7d03f82f 535#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
536 CPUBreakpoint *bp;
537
72cf2d4f 538 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
539 if (bp->pc == pc && bp->flags == flags) {
540 cpu_breakpoint_remove_by_ref(env, bp);
541 return 0;
542 }
7d03f82f 543 }
a1d1bb31
AL
544 return -ENOENT;
545#else
546 return -ENOSYS;
7d03f82f
EI
547#endif
548}
549
a1d1bb31 550/* Remove a specific breakpoint by reference. */
9349b4f9 551void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 552{
1fddef4b 553#if defined(TARGET_HAS_ICE)
72cf2d4f 554 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 555
00b941e5 556 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
a1d1bb31 557
7267c094 558 g_free(breakpoint);
a1d1bb31
AL
559#endif
560}
561
562/* Remove all matching breakpoints. */
9349b4f9 563void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
564{
565#if defined(TARGET_HAS_ICE)
c0ce998e 566 CPUBreakpoint *bp, *next;
a1d1bb31 567
72cf2d4f 568 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
569 if (bp->flags & mask)
570 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 571 }
4c3a88a2
FB
572#endif
573}
574
c33a346e
FB
575/* enable or disable single step mode. EXCP_DEBUG is returned by the
576 CPU loop after each instruction */
3825b28f 577void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 578{
1fddef4b 579#if defined(TARGET_HAS_ICE)
ed2803da
AF
580 if (cpu->singlestep_enabled != enabled) {
581 cpu->singlestep_enabled = enabled;
582 if (kvm_enabled()) {
38e478ec 583 kvm_update_guest_debug(cpu, 0);
ed2803da 584 } else {
ccbb4d44 585 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 586 /* XXX: only flush what is necessary */
38e478ec 587 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
588 tb_flush(env);
589 }
c33a346e
FB
590 }
591#endif
592}
593
9349b4f9 594void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e 595{
878096ee 596 CPUState *cpu = ENV_GET_CPU(env);
7501267e 597 va_list ap;
493ae1f0 598 va_list ap2;
7501267e
FB
599
600 va_start(ap, fmt);
493ae1f0 601 va_copy(ap2, ap);
7501267e
FB
602 fprintf(stderr, "qemu: fatal: ");
603 vfprintf(stderr, fmt, ap);
604 fprintf(stderr, "\n");
878096ee 605 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
606 if (qemu_log_enabled()) {
607 qemu_log("qemu: fatal: ");
608 qemu_log_vprintf(fmt, ap2);
609 qemu_log("\n");
a0762859 610 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 611 qemu_log_flush();
93fcfe39 612 qemu_log_close();
924edcae 613 }
493ae1f0 614 va_end(ap2);
f9373291 615 va_end(ap);
fd052bf6
RV
616#if defined(CONFIG_USER_ONLY)
617 {
618 struct sigaction act;
619 sigfillset(&act.sa_mask);
620 act.sa_handler = SIG_DFL;
621 sigaction(SIGABRT, &act, NULL);
622 }
623#endif
7501267e
FB
624 abort();
625}
626
0124311e 627#if !defined(CONFIG_USER_ONLY)
041603fe
PB
628static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
629{
630 RAMBlock *block;
631
632 /* The list is protected by the iothread lock here. */
633 block = ram_list.mru_block;
634 if (block && addr - block->offset < block->length) {
635 goto found;
636 }
637 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
638 if (addr - block->offset < block->length) {
639 goto found;
640 }
641 }
642
643 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
644 abort();
645
646found:
647 ram_list.mru_block = block;
648 return block;
649}
650
d24981d3
JQ
651static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
652 uintptr_t length)
653{
041603fe
PB
654 RAMBlock *block;
655 ram_addr_t start1;
d24981d3 656
041603fe
PB
657 block = qemu_get_ram_block(start);
658 assert(block == qemu_get_ram_block(end - 1));
659 start1 = (uintptr_t)block->host + (start - block->offset);
660 cpu_tlb_reset_dirty_all(start1, length);
d24981d3
JQ
661}
662
5579c7f3 663/* Note: start and end must be within the same ram block. */
c227f099 664void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 665 int dirty_flags)
1ccde1cb 666{
d24981d3 667 uintptr_t length;
1ccde1cb
FB
668
669 start &= TARGET_PAGE_MASK;
670 end = TARGET_PAGE_ALIGN(end);
671
672 length = end - start;
673 if (length == 0)
674 return;
f7c11b53 675 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 676
d24981d3
JQ
677 if (tcg_enabled()) {
678 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 679 }
1ccde1cb
FB
680}
681
8b9c99d9 682static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 683{
f6f3fbca 684 int ret = 0;
74576198 685 in_migration = enable;
f6f3fbca 686 return ret;
74576198
AL
687}
688
a8170e5e 689hwaddr memory_region_section_get_iotlb(CPUArchState *env,
149f54b5
PB
690 MemoryRegionSection *section,
691 target_ulong vaddr,
692 hwaddr paddr, hwaddr xlat,
693 int prot,
694 target_ulong *address)
e5548617 695{
a8170e5e 696 hwaddr iotlb;
e5548617
BS
697 CPUWatchpoint *wp;
698
cc5bea60 699 if (memory_region_is_ram(section->mr)) {
e5548617
BS
700 /* Normal RAM. */
701 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 702 + xlat;
e5548617 703 if (!section->readonly) {
b41aac4f 704 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 705 } else {
b41aac4f 706 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
707 }
708 } else {
0475d94f 709 iotlb = section - address_space_memory.dispatch->sections;
149f54b5 710 iotlb += xlat;
e5548617
BS
711 }
712
713 /* Make accesses to pages with watchpoints go via the
714 watchpoint trap routines. */
715 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
716 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
717 /* Avoid trapping reads of pages with a write breakpoint. */
718 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 719 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
720 *address |= TLB_MMIO;
721 break;
722 }
723 }
724 }
725
726 return iotlb;
727}
9fa3e853
FB
728#endif /* defined(CONFIG_USER_ONLY) */
729
e2eef170 730#if !defined(CONFIG_USER_ONLY)
8da3ff18 731
c227f099 732static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 733 uint16_t section);
acc9d80b 734static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 735
575ddeb4 736static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
91138037
MA
737
738/*
739 * Set a custom physical guest memory alloator.
740 * Accelerators with unusual needs may need this. Hopefully, we can
741 * get rid of it eventually.
742 */
575ddeb4 743void phys_mem_set_alloc(void *(*alloc)(size_t))
91138037
MA
744{
745 phys_mem_alloc = alloc;
746}
747
5312bd8b
AK
748static uint16_t phys_section_add(MemoryRegionSection *section)
749{
68f3f65b
PB
750 /* The physical section number is ORed with a page-aligned
751 * pointer to produce the iotlb entries. Thus it should
752 * never overflow into the page-aligned value.
753 */
9affd6fc 754 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
68f3f65b 755
9affd6fc
PB
756 if (next_map.sections_nb == next_map.sections_nb_alloc) {
757 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
758 16);
759 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
760 next_map.sections_nb_alloc);
5312bd8b 761 }
9affd6fc 762 next_map.sections[next_map.sections_nb] = *section;
dfde4e6e 763 memory_region_ref(section->mr);
9affd6fc 764 return next_map.sections_nb++;
5312bd8b
AK
765}
766
058bc4b5
PB
767static void phys_section_destroy(MemoryRegion *mr)
768{
dfde4e6e
PB
769 memory_region_unref(mr);
770
058bc4b5
PB
771 if (mr->subpage) {
772 subpage_t *subpage = container_of(mr, subpage_t, iomem);
773 memory_region_destroy(&subpage->iomem);
774 g_free(subpage);
775 }
776}
777
6092666e 778static void phys_sections_free(PhysPageMap *map)
5312bd8b 779{
9affd6fc
PB
780 while (map->sections_nb > 0) {
781 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
782 phys_section_destroy(section->mr);
783 }
9affd6fc
PB
784 g_free(map->sections);
785 g_free(map->nodes);
6092666e 786 g_free(map);
5312bd8b
AK
787}
788
ac1970fb 789static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
790{
791 subpage_t *subpage;
a8170e5e 792 hwaddr base = section->offset_within_address_space
0f0cb164 793 & TARGET_PAGE_MASK;
9affd6fc
PB
794 MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
795 next_map.nodes, next_map.sections);
0f0cb164
AK
796 MemoryRegionSection subsection = {
797 .offset_within_address_space = base,
052e87b0 798 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 799 };
a8170e5e 800 hwaddr start, end;
0f0cb164 801
f3705d53 802 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 803
f3705d53 804 if (!(existing->mr->subpage)) {
acc9d80b 805 subpage = subpage_init(d->as, base);
0f0cb164 806 subsection.mr = &subpage->iomem;
ac1970fb 807 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 808 phys_section_add(&subsection));
0f0cb164 809 } else {
f3705d53 810 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
811 }
812 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 813 end = start + int128_get64(section->size) - 1;
0f0cb164
AK
814 subpage_register(subpage, start, end, phys_section_add(section));
815}
816
817
052e87b0
PB
818static void register_multipage(AddressSpaceDispatch *d,
819 MemoryRegionSection *section)
33417e70 820{
a8170e5e 821 hwaddr start_addr = section->offset_within_address_space;
5312bd8b 822 uint16_t section_index = phys_section_add(section);
052e87b0
PB
823 uint64_t num_pages = int128_get64(int128_rshift(section->size,
824 TARGET_PAGE_BITS));
dd81124b 825
733d5ef5
PB
826 assert(num_pages);
827 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
828}
829
ac1970fb 830static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 831{
89ae337a 832 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 833 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 834 MemoryRegionSection now = *section, remain = *section;
052e87b0 835 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 836
733d5ef5
PB
837 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
838 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
839 - now.offset_within_address_space;
840
052e87b0 841 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 842 register_subpage(d, &now);
733d5ef5 843 } else {
052e87b0 844 now.size = int128_zero();
733d5ef5 845 }
052e87b0
PB
846 while (int128_ne(remain.size, now.size)) {
847 remain.size = int128_sub(remain.size, now.size);
848 remain.offset_within_address_space += int128_get64(now.size);
849 remain.offset_within_region += int128_get64(now.size);
69b67646 850 now = remain;
052e87b0 851 if (int128_lt(remain.size, page_size)) {
733d5ef5 852 register_subpage(d, &now);
88266249 853 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 854 now.size = page_size;
ac1970fb 855 register_subpage(d, &now);
69b67646 856 } else {
052e87b0 857 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 858 register_multipage(d, &now);
69b67646 859 }
0f0cb164
AK
860 }
861}
862
62a2744c
SY
863void qemu_flush_coalesced_mmio_buffer(void)
864{
865 if (kvm_enabled())
866 kvm_flush_coalesced_mmio_buffer();
867}
868
b2a8658e
UD
869void qemu_mutex_lock_ramlist(void)
870{
871 qemu_mutex_lock(&ram_list.mutex);
872}
873
874void qemu_mutex_unlock_ramlist(void)
875{
876 qemu_mutex_unlock(&ram_list.mutex);
877}
878
e1e84ba0 879#ifdef __linux__
c902760f
MT
880
881#include <sys/vfs.h>
882
883#define HUGETLBFS_MAGIC 0x958458f6
884
885static long gethugepagesize(const char *path)
886{
887 struct statfs fs;
888 int ret;
889
890 do {
9742bf26 891 ret = statfs(path, &fs);
c902760f
MT
892 } while (ret != 0 && errno == EINTR);
893
894 if (ret != 0) {
9742bf26
YT
895 perror(path);
896 return 0;
c902760f
MT
897 }
898
899 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 900 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
901
902 return fs.f_bsize;
903}
904
04b16653
AW
905static void *file_ram_alloc(RAMBlock *block,
906 ram_addr_t memory,
907 const char *path)
c902760f
MT
908{
909 char *filename;
8ca761f6
PF
910 char *sanitized_name;
911 char *c;
c902760f
MT
912 void *area;
913 int fd;
914#ifdef MAP_POPULATE
915 int flags;
916#endif
917 unsigned long hpagesize;
918
919 hpagesize = gethugepagesize(path);
920 if (!hpagesize) {
9742bf26 921 return NULL;
c902760f
MT
922 }
923
924 if (memory < hpagesize) {
925 return NULL;
926 }
927
928 if (kvm_enabled() && !kvm_has_sync_mmu()) {
929 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
930 return NULL;
931 }
932
8ca761f6
PF
933 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
934 sanitized_name = g_strdup(block->mr->name);
935 for (c = sanitized_name; *c != '\0'; c++) {
936 if (*c == '/')
937 *c = '_';
938 }
939
940 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
941 sanitized_name);
942 g_free(sanitized_name);
c902760f
MT
943
944 fd = mkstemp(filename);
945 if (fd < 0) {
9742bf26 946 perror("unable to create backing store for hugepages");
e4ada482 947 g_free(filename);
9742bf26 948 return NULL;
c902760f
MT
949 }
950 unlink(filename);
e4ada482 951 g_free(filename);
c902760f
MT
952
953 memory = (memory+hpagesize-1) & ~(hpagesize-1);
954
955 /*
956 * ftruncate is not supported by hugetlbfs in older
957 * hosts, so don't bother bailing out on errors.
958 * If anything goes wrong with it under other filesystems,
959 * mmap will fail.
960 */
961 if (ftruncate(fd, memory))
9742bf26 962 perror("ftruncate");
c902760f
MT
963
964#ifdef MAP_POPULATE
965 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
966 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
967 * to sidestep this quirk.
968 */
969 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
970 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
971#else
972 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
973#endif
974 if (area == MAP_FAILED) {
9742bf26
YT
975 perror("file_ram_alloc: can't mmap RAM pages");
976 close(fd);
977 return (NULL);
c902760f 978 }
04b16653 979 block->fd = fd;
c902760f
MT
980 return area;
981}
e1e84ba0
MA
982#else
983static void *file_ram_alloc(RAMBlock *block,
984 ram_addr_t memory,
985 const char *path)
986{
987 fprintf(stderr, "-mem-path not supported on this host\n");
988 exit(1);
989}
c902760f
MT
990#endif
991
d17b5288 992static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
993{
994 RAMBlock *block, *next_block;
3e837b2c 995 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 996
49cd9ac6
SH
997 assert(size != 0); /* it would hand out same offset multiple times */
998
a3161038 999 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1000 return 0;
1001
a3161038 1002 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1003 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1004
1005 end = block->offset + block->length;
1006
a3161038 1007 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1008 if (next_block->offset >= end) {
1009 next = MIN(next, next_block->offset);
1010 }
1011 }
1012 if (next - end >= size && next - end < mingap) {
3e837b2c 1013 offset = end;
04b16653
AW
1014 mingap = next - end;
1015 }
1016 }
3e837b2c
AW
1017
1018 if (offset == RAM_ADDR_MAX) {
1019 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1020 (uint64_t)size);
1021 abort();
1022 }
1023
04b16653
AW
1024 return offset;
1025}
1026
652d7ec2 1027ram_addr_t last_ram_offset(void)
d17b5288
AW
1028{
1029 RAMBlock *block;
1030 ram_addr_t last = 0;
1031
a3161038 1032 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1033 last = MAX(last, block->offset + block->length);
1034
1035 return last;
1036}
1037
ddb97f1d
JB
1038static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1039{
1040 int ret;
ddb97f1d
JB
1041
1042 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1043 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1044 "dump-guest-core", true)) {
ddb97f1d
JB
1045 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1046 if (ret) {
1047 perror("qemu_madvise");
1048 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1049 "but dump_guest_core=off specified\n");
1050 }
1051 }
1052}
1053
c5705a77 1054void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
1055{
1056 RAMBlock *new_block, *block;
1057
c5705a77 1058 new_block = NULL;
a3161038 1059 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
1060 if (block->offset == addr) {
1061 new_block = block;
1062 break;
1063 }
1064 }
1065 assert(new_block);
1066 assert(!new_block->idstr[0]);
84b89d78 1067
09e5ab63
AL
1068 if (dev) {
1069 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1070 if (id) {
1071 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1072 g_free(id);
84b89d78
CM
1073 }
1074 }
1075 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1076
b2a8658e
UD
1077 /* This assumes the iothread lock is taken here too. */
1078 qemu_mutex_lock_ramlist();
a3161038 1079 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1080 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1081 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1082 new_block->idstr);
1083 abort();
1084 }
1085 }
b2a8658e 1086 qemu_mutex_unlock_ramlist();
c5705a77
AK
1087}
1088
8490fc78
LC
1089static int memory_try_enable_merging(void *addr, size_t len)
1090{
2ff3de68 1091 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1092 /* disabled by the user */
1093 return 0;
1094 }
1095
1096 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1097}
1098
c5705a77
AK
1099ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1100 MemoryRegion *mr)
1101{
abb26d63 1102 RAMBlock *block, *new_block;
c5705a77
AK
1103
1104 size = TARGET_PAGE_ALIGN(size);
1105 new_block = g_malloc0(sizeof(*new_block));
3435f395 1106 new_block->fd = -1;
84b89d78 1107
b2a8658e
UD
1108 /* This assumes the iothread lock is taken here too. */
1109 qemu_mutex_lock_ramlist();
7c637366 1110 new_block->mr = mr;
432d268c 1111 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1112 if (host) {
1113 new_block->host = host;
cd19cfa2 1114 new_block->flags |= RAM_PREALLOC_MASK;
dfeaf2ab
MA
1115 } else if (xen_enabled()) {
1116 if (mem_path) {
1117 fprintf(stderr, "-mem-path not supported with Xen\n");
1118 exit(1);
1119 }
1120 xen_ram_alloc(new_block->offset, size, mr);
6977dfe6
YT
1121 } else {
1122 if (mem_path) {
e1e84ba0
MA
1123 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1124 /*
1125 * file_ram_alloc() needs to allocate just like
1126 * phys_mem_alloc, but we haven't bothered to provide
1127 * a hook there.
1128 */
1129 fprintf(stderr,
1130 "-mem-path not supported with this accelerator\n");
1131 exit(1);
1132 }
6977dfe6 1133 new_block->host = file_ram_alloc(new_block, size, mem_path);
0628c182
MA
1134 }
1135 if (!new_block->host) {
91138037 1136 new_block->host = phys_mem_alloc(size);
39228250
MA
1137 if (!new_block->host) {
1138 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1139 new_block->mr->name, strerror(errno));
1140 exit(1);
1141 }
8490fc78 1142 memory_try_enable_merging(new_block->host, size);
6977dfe6 1143 }
c902760f 1144 }
94a6b54f
PB
1145 new_block->length = size;
1146
abb26d63
PB
1147 /* Keep the list sorted from biggest to smallest block. */
1148 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1149 if (block->length < new_block->length) {
1150 break;
1151 }
1152 }
1153 if (block) {
1154 QTAILQ_INSERT_BEFORE(block, new_block, next);
1155 } else {
1156 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1157 }
0d6d3c87 1158 ram_list.mru_block = NULL;
94a6b54f 1159
f798b07f 1160 ram_list.version++;
b2a8658e 1161 qemu_mutex_unlock_ramlist();
f798b07f 1162
7267c094 1163 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1164 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1165 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1166 0, size >> TARGET_PAGE_BITS);
1720aeee 1167 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1168
ddb97f1d 1169 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1170 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
3e469dbf 1171 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
ddb97f1d 1172
6f0437e8
JK
1173 if (kvm_enabled())
1174 kvm_setup_guest_memory(new_block->host, size);
1175
94a6b54f
PB
1176 return new_block->offset;
1177}
e9a1ab19 1178
c5705a77 1179ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1180{
c5705a77 1181 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1182}
1183
1f2e98b6
AW
1184void qemu_ram_free_from_ptr(ram_addr_t addr)
1185{
1186 RAMBlock *block;
1187
b2a8658e
UD
1188 /* This assumes the iothread lock is taken here too. */
1189 qemu_mutex_lock_ramlist();
a3161038 1190 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1191 if (addr == block->offset) {
a3161038 1192 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1193 ram_list.mru_block = NULL;
f798b07f 1194 ram_list.version++;
7267c094 1195 g_free(block);
b2a8658e 1196 break;
1f2e98b6
AW
1197 }
1198 }
b2a8658e 1199 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1200}
1201
c227f099 1202void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1203{
04b16653
AW
1204 RAMBlock *block;
1205
b2a8658e
UD
1206 /* This assumes the iothread lock is taken here too. */
1207 qemu_mutex_lock_ramlist();
a3161038 1208 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1209 if (addr == block->offset) {
a3161038 1210 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1211 ram_list.mru_block = NULL;
f798b07f 1212 ram_list.version++;
cd19cfa2
HY
1213 if (block->flags & RAM_PREALLOC_MASK) {
1214 ;
dfeaf2ab
MA
1215 } else if (xen_enabled()) {
1216 xen_invalidate_map_cache_entry(block->host);
089f3f76 1217#ifndef _WIN32
3435f395
MA
1218 } else if (block->fd >= 0) {
1219 munmap(block->host, block->length);
1220 close(block->fd);
089f3f76 1221#endif
04b16653 1222 } else {
dfeaf2ab 1223 qemu_anon_ram_free(block->host, block->length);
04b16653 1224 }
7267c094 1225 g_free(block);
b2a8658e 1226 break;
04b16653
AW
1227 }
1228 }
b2a8658e 1229 qemu_mutex_unlock_ramlist();
04b16653 1230
e9a1ab19
FB
1231}
1232
cd19cfa2
HY
1233#ifndef _WIN32
1234void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1235{
1236 RAMBlock *block;
1237 ram_addr_t offset;
1238 int flags;
1239 void *area, *vaddr;
1240
a3161038 1241 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1242 offset = addr - block->offset;
1243 if (offset < block->length) {
1244 vaddr = block->host + offset;
1245 if (block->flags & RAM_PREALLOC_MASK) {
1246 ;
dfeaf2ab
MA
1247 } else if (xen_enabled()) {
1248 abort();
cd19cfa2
HY
1249 } else {
1250 flags = MAP_FIXED;
1251 munmap(vaddr, length);
3435f395 1252 if (block->fd >= 0) {
cd19cfa2 1253#ifdef MAP_POPULATE
3435f395
MA
1254 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1255 MAP_PRIVATE;
fd28aa13 1256#else
3435f395 1257 flags |= MAP_PRIVATE;
cd19cfa2 1258#endif
3435f395
MA
1259 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1260 flags, block->fd, offset);
cd19cfa2 1261 } else {
2eb9fbaa
MA
1262 /*
1263 * Remap needs to match alloc. Accelerators that
1264 * set phys_mem_alloc never remap. If they did,
1265 * we'd need a remap hook here.
1266 */
1267 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1268
cd19cfa2
HY
1269 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1270 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1271 flags, -1, 0);
cd19cfa2
HY
1272 }
1273 if (area != vaddr) {
f15fbc4b
AP
1274 fprintf(stderr, "Could not remap addr: "
1275 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1276 length, addr);
1277 exit(1);
1278 }
8490fc78 1279 memory_try_enable_merging(vaddr, length);
ddb97f1d 1280 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1281 }
1282 return;
1283 }
1284 }
1285}
1286#endif /* !_WIN32 */
1287
1b5ec234
PB
1288/* Return a host pointer to ram allocated with qemu_ram_alloc.
1289 With the exception of the softmmu code in this file, this should
1290 only be used for local memory (e.g. video ram) that the device owns,
1291 and knows it isn't going to access beyond the end of the block.
1292
1293 It should not be used for general purpose DMA.
1294 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1295 */
1296void *qemu_get_ram_ptr(ram_addr_t addr)
1297{
1298 RAMBlock *block = qemu_get_ram_block(addr);
1299
0d6d3c87
PB
1300 if (xen_enabled()) {
1301 /* We need to check if the requested address is in the RAM
1302 * because we don't want to map the entire memory in QEMU.
1303 * In that case just map until the end of the page.
1304 */
1305 if (block->offset == 0) {
1306 return xen_map_cache(addr, 0, 0);
1307 } else if (block->host == NULL) {
1308 block->host =
1309 xen_map_cache(block->offset, block->length, 1);
1310 }
1311 }
1312 return block->host + (addr - block->offset);
dc828ca1
PB
1313}
1314
38bee5dc
SS
1315/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1316 * but takes a size argument */
cb85f7ab 1317static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1318{
8ab934f9
SS
1319 if (*size == 0) {
1320 return NULL;
1321 }
868bb33f 1322 if (xen_enabled()) {
e41d7c69 1323 return xen_map_cache(addr, *size, 1);
868bb33f 1324 } else {
38bee5dc
SS
1325 RAMBlock *block;
1326
a3161038 1327 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1328 if (addr - block->offset < block->length) {
1329 if (addr - block->offset + *size > block->length)
1330 *size = block->length - addr + block->offset;
1331 return block->host + (addr - block->offset);
1332 }
1333 }
1334
1335 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1336 abort();
38bee5dc
SS
1337 }
1338}
1339
7443b437
PB
1340/* Some of the softmmu routines need to translate from a host pointer
1341 (typically a TLB entry) back to a ram offset. */
1b5ec234 1342MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1343{
94a6b54f
PB
1344 RAMBlock *block;
1345 uint8_t *host = ptr;
1346
868bb33f 1347 if (xen_enabled()) {
e41d7c69 1348 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1349 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1350 }
1351
23887b79
PB
1352 block = ram_list.mru_block;
1353 if (block && block->host && host - block->host < block->length) {
1354 goto found;
1355 }
1356
a3161038 1357 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1358 /* This case append when the block is not mapped. */
1359 if (block->host == NULL) {
1360 continue;
1361 }
f471a17e 1362 if (host - block->host < block->length) {
23887b79 1363 goto found;
f471a17e 1364 }
94a6b54f 1365 }
432d268c 1366
1b5ec234 1367 return NULL;
23887b79
PB
1368
1369found:
1370 *ram_addr = block->offset + (host - block->host);
1b5ec234 1371 return block->mr;
e890261f 1372}
f471a17e 1373
a8170e5e 1374static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1375 uint64_t val, unsigned size)
9fa3e853 1376{
3a7d929e 1377 int dirty_flags;
f7c11b53 1378 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1379 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
0e0df1e2 1380 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1381 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1382 }
0e0df1e2
AK
1383 switch (size) {
1384 case 1:
1385 stb_p(qemu_get_ram_ptr(ram_addr), val);
1386 break;
1387 case 2:
1388 stw_p(qemu_get_ram_ptr(ram_addr), val);
1389 break;
1390 case 4:
1391 stl_p(qemu_get_ram_ptr(ram_addr), val);
1392 break;
1393 default:
1394 abort();
3a7d929e 1395 }
f23db169 1396 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1397 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1398 /* we remove the notdirty callback only if the code has been
1399 flushed */
4917cf44
AF
1400 if (dirty_flags == 0xff) {
1401 CPUArchState *env = current_cpu->env_ptr;
1402 tlb_set_dirty(env, env->mem_io_vaddr);
1403 }
9fa3e853
FB
1404}
1405
b018ddf6
PB
1406static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1407 unsigned size, bool is_write)
1408{
1409 return is_write;
1410}
1411
0e0df1e2 1412static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1413 .write = notdirty_mem_write,
b018ddf6 1414 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1415 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1416};
1417
0f459d16 1418/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1419static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1420{
4917cf44 1421 CPUArchState *env = current_cpu->env_ptr;
06d55cc1 1422 target_ulong pc, cs_base;
0f459d16 1423 target_ulong vaddr;
a1d1bb31 1424 CPUWatchpoint *wp;
06d55cc1 1425 int cpu_flags;
0f459d16 1426
06d55cc1
AL
1427 if (env->watchpoint_hit) {
1428 /* We re-entered the check after replacing the TB. Now raise
1429 * the debug interrupt so that is will trigger after the
1430 * current instruction. */
c3affe56 1431 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1432 return;
1433 }
2e70f6ef 1434 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1435 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1436 if ((vaddr == (wp->vaddr & len_mask) ||
1437 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1438 wp->flags |= BP_WATCHPOINT_HIT;
1439 if (!env->watchpoint_hit) {
1440 env->watchpoint_hit = wp;
5a316526 1441 tb_check_watchpoint(env);
6e140f28
AL
1442 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1443 env->exception_index = EXCP_DEBUG;
488d6577 1444 cpu_loop_exit(env);
6e140f28
AL
1445 } else {
1446 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1447 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1448 cpu_resume_from_signal(env, NULL);
6e140f28 1449 }
06d55cc1 1450 }
6e140f28
AL
1451 } else {
1452 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1453 }
1454 }
1455}
1456
6658ffb8
PB
1457/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1458 so these check for a hit then pass through to the normal out-of-line
1459 phys routines. */
a8170e5e 1460static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1461 unsigned size)
6658ffb8 1462{
1ec9b909
AK
1463 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1464 switch (size) {
1465 case 1: return ldub_phys(addr);
1466 case 2: return lduw_phys(addr);
1467 case 4: return ldl_phys(addr);
1468 default: abort();
1469 }
6658ffb8
PB
1470}
1471
a8170e5e 1472static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1473 uint64_t val, unsigned size)
6658ffb8 1474{
1ec9b909
AK
1475 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1476 switch (size) {
67364150
MF
1477 case 1:
1478 stb_phys(addr, val);
1479 break;
1480 case 2:
1481 stw_phys(addr, val);
1482 break;
1483 case 4:
1484 stl_phys(addr, val);
1485 break;
1ec9b909
AK
1486 default: abort();
1487 }
6658ffb8
PB
1488}
1489
1ec9b909
AK
1490static const MemoryRegionOps watch_mem_ops = {
1491 .read = watch_mem_read,
1492 .write = watch_mem_write,
1493 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1494};
6658ffb8 1495
a8170e5e 1496static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1497 unsigned len)
db7b5426 1498{
acc9d80b
JK
1499 subpage_t *subpage = opaque;
1500 uint8_t buf[4];
791af8c8 1501
db7b5426 1502#if defined(DEBUG_SUBPAGE)
016e9d62 1503 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1504 subpage, len, addr);
db7b5426 1505#endif
acc9d80b
JK
1506 address_space_read(subpage->as, addr + subpage->base, buf, len);
1507 switch (len) {
1508 case 1:
1509 return ldub_p(buf);
1510 case 2:
1511 return lduw_p(buf);
1512 case 4:
1513 return ldl_p(buf);
1514 default:
1515 abort();
1516 }
db7b5426
BS
1517}
1518
a8170e5e 1519static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1520 uint64_t value, unsigned len)
db7b5426 1521{
acc9d80b
JK
1522 subpage_t *subpage = opaque;
1523 uint8_t buf[4];
1524
db7b5426 1525#if defined(DEBUG_SUBPAGE)
016e9d62 1526 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1527 " value %"PRIx64"\n",
1528 __func__, subpage, len, addr, value);
db7b5426 1529#endif
acc9d80b
JK
1530 switch (len) {
1531 case 1:
1532 stb_p(buf, value);
1533 break;
1534 case 2:
1535 stw_p(buf, value);
1536 break;
1537 case 4:
1538 stl_p(buf, value);
1539 break;
1540 default:
1541 abort();
1542 }
1543 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1544}
1545
c353e4cc 1546static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 1547 unsigned len, bool is_write)
c353e4cc 1548{
acc9d80b 1549 subpage_t *subpage = opaque;
c353e4cc 1550#if defined(DEBUG_SUBPAGE)
016e9d62 1551 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 1552 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1553#endif
1554
acc9d80b 1555 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 1556 len, is_write);
c353e4cc
PB
1557}
1558
70c68e44
AK
1559static const MemoryRegionOps subpage_ops = {
1560 .read = subpage_read,
1561 .write = subpage_write,
c353e4cc 1562 .valid.accepts = subpage_accepts,
70c68e44 1563 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1564};
1565
c227f099 1566static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1567 uint16_t section)
db7b5426
BS
1568{
1569 int idx, eidx;
1570
1571 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1572 return -1;
1573 idx = SUBPAGE_IDX(start);
1574 eidx = SUBPAGE_IDX(end);
1575#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1576 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1577 __func__, mmio, start, end, idx, eidx, section);
db7b5426 1578#endif
db7b5426 1579 for (; idx <= eidx; idx++) {
5312bd8b 1580 mmio->sub_section[idx] = section;
db7b5426
BS
1581 }
1582
1583 return 0;
1584}
1585
acc9d80b 1586static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1587{
c227f099 1588 subpage_t *mmio;
db7b5426 1589
7267c094 1590 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1591
acc9d80b 1592 mmio->as = as;
1eec614b 1593 mmio->base = base;
2c9b15ca 1594 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
70c68e44 1595 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1596 mmio->iomem.subpage = true;
db7b5426 1597#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1598 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1599 mmio, base, TARGET_PAGE_SIZE);
db7b5426 1600#endif
b41aac4f 1601 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1602
1603 return mmio;
1604}
1605
5312bd8b
AK
1606static uint16_t dummy_section(MemoryRegion *mr)
1607{
1608 MemoryRegionSection section = {
1609 .mr = mr,
1610 .offset_within_address_space = 0,
1611 .offset_within_region = 0,
052e87b0 1612 .size = int128_2_64(),
5312bd8b
AK
1613 };
1614
1615 return phys_section_add(&section);
1616}
1617
a8170e5e 1618MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1619{
0475d94f 1620 return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1621}
1622
e9179ce1
AK
1623static void io_mem_init(void)
1624{
2c9b15ca
PB
1625 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1626 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
0e0df1e2 1627 "unassigned", UINT64_MAX);
2c9b15ca 1628 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
0e0df1e2 1629 "notdirty", UINT64_MAX);
2c9b15ca 1630 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1ec9b909 1631 "watch", UINT64_MAX);
e9179ce1
AK
1632}
1633
ac1970fb 1634static void mem_begin(MemoryListener *listener)
00752703
PB
1635{
1636 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1637 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1638
1639 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1640 d->as = as;
1641 as->next_dispatch = d;
1642}
1643
1644static void mem_commit(MemoryListener *listener)
ac1970fb 1645{
89ae337a 1646 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
1647 AddressSpaceDispatch *cur = as->dispatch;
1648 AddressSpaceDispatch *next = as->next_dispatch;
1649
1650 next->nodes = next_map.nodes;
1651 next->sections = next_map.sections;
ac1970fb 1652
0475d94f
PB
1653 as->dispatch = next;
1654 g_free(cur);
ac1970fb
AK
1655}
1656
50c1e149
AK
1657static void core_begin(MemoryListener *listener)
1658{
b41aac4f
LPF
1659 uint16_t n;
1660
6092666e
PB
1661 prev_map = g_new(PhysPageMap, 1);
1662 *prev_map = next_map;
1663
9affd6fc 1664 memset(&next_map, 0, sizeof(next_map));
b41aac4f
LPF
1665 n = dummy_section(&io_mem_unassigned);
1666 assert(n == PHYS_SECTION_UNASSIGNED);
1667 n = dummy_section(&io_mem_notdirty);
1668 assert(n == PHYS_SECTION_NOTDIRTY);
1669 n = dummy_section(&io_mem_rom);
1670 assert(n == PHYS_SECTION_ROM);
1671 n = dummy_section(&io_mem_watch);
1672 assert(n == PHYS_SECTION_WATCH);
50c1e149
AK
1673}
1674
9affd6fc
PB
1675/* This listener's commit run after the other AddressSpaceDispatch listeners'.
1676 * All AddressSpaceDispatch instances have switched to the next map.
1677 */
1678static void core_commit(MemoryListener *listener)
1679{
6092666e 1680 phys_sections_free(prev_map);
9affd6fc
PB
1681}
1682
1d71148e 1683static void tcg_commit(MemoryListener *listener)
50c1e149 1684{
182735ef 1685 CPUState *cpu;
117712c3
AK
1686
1687 /* since each CPU stores ram addresses in its TLB cache, we must
1688 reset the modified entries */
1689 /* XXX: slow ! */
bdc44640 1690 CPU_FOREACH(cpu) {
182735ef
AF
1691 CPUArchState *env = cpu->env_ptr;
1692
117712c3
AK
1693 tlb_flush(env, 1);
1694 }
50c1e149
AK
1695}
1696
93632747
AK
1697static void core_log_global_start(MemoryListener *listener)
1698{
1699 cpu_physical_memory_set_dirty_tracking(1);
1700}
1701
1702static void core_log_global_stop(MemoryListener *listener)
1703{
1704 cpu_physical_memory_set_dirty_tracking(0);
1705}
1706
93632747 1707static MemoryListener core_memory_listener = {
50c1e149 1708 .begin = core_begin,
9affd6fc 1709 .commit = core_commit,
93632747
AK
1710 .log_global_start = core_log_global_start,
1711 .log_global_stop = core_log_global_stop,
ac1970fb 1712 .priority = 1,
93632747
AK
1713};
1714
1d71148e
AK
1715static MemoryListener tcg_memory_listener = {
1716 .commit = tcg_commit,
1717};
1718
ac1970fb
AK
1719void address_space_init_dispatch(AddressSpace *as)
1720{
00752703 1721 as->dispatch = NULL;
89ae337a 1722 as->dispatch_listener = (MemoryListener) {
ac1970fb 1723 .begin = mem_begin,
00752703 1724 .commit = mem_commit,
ac1970fb
AK
1725 .region_add = mem_add,
1726 .region_nop = mem_add,
1727 .priority = 0,
1728 };
89ae337a 1729 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
1730}
1731
83f3c251
AK
1732void address_space_destroy_dispatch(AddressSpace *as)
1733{
1734 AddressSpaceDispatch *d = as->dispatch;
1735
89ae337a 1736 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
1737 g_free(d);
1738 as->dispatch = NULL;
1739}
1740
62152b8a
AK
1741static void memory_map_init(void)
1742{
7267c094 1743 system_memory = g_malloc(sizeof(*system_memory));
818f86b8
MT
1744
1745 assert(TARGET_PHYS_ADDR_SPACE_BITS <= 64);
1746
1747 memory_region_init(system_memory, NULL, "system",
1748 TARGET_PHYS_ADDR_SPACE_BITS == 64 ?
1749 UINT64_MAX : (0x1ULL << TARGET_PHYS_ADDR_SPACE_BITS));
7dca8043 1750 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1751
7267c094 1752 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
1753 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1754 65536);
7dca8043 1755 address_space_init(&address_space_io, system_io, "I/O");
93632747 1756
f6790af6 1757 memory_listener_register(&core_memory_listener, &address_space_memory);
2641689a 1758 if (tcg_enabled()) {
1759 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1760 }
62152b8a
AK
1761}
1762
1763MemoryRegion *get_system_memory(void)
1764{
1765 return system_memory;
1766}
1767
309cb471
AK
1768MemoryRegion *get_system_io(void)
1769{
1770 return system_io;
1771}
1772
e2eef170
PB
1773#endif /* !defined(CONFIG_USER_ONLY) */
1774
13eb76e0
FB
1775/* physical memory access (slow version, mainly for debug) */
1776#if defined(CONFIG_USER_ONLY)
f17ec444 1777int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 1778 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1779{
1780 int l, flags;
1781 target_ulong page;
53a5960a 1782 void * p;
13eb76e0
FB
1783
1784 while (len > 0) {
1785 page = addr & TARGET_PAGE_MASK;
1786 l = (page + TARGET_PAGE_SIZE) - addr;
1787 if (l > len)
1788 l = len;
1789 flags = page_get_flags(page);
1790 if (!(flags & PAGE_VALID))
a68fe89c 1791 return -1;
13eb76e0
FB
1792 if (is_write) {
1793 if (!(flags & PAGE_WRITE))
a68fe89c 1794 return -1;
579a97f7 1795 /* XXX: this code should not depend on lock_user */
72fb7daa 1796 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1797 return -1;
72fb7daa
AJ
1798 memcpy(p, buf, l);
1799 unlock_user(p, addr, l);
13eb76e0
FB
1800 } else {
1801 if (!(flags & PAGE_READ))
a68fe89c 1802 return -1;
579a97f7 1803 /* XXX: this code should not depend on lock_user */
72fb7daa 1804 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1805 return -1;
72fb7daa 1806 memcpy(buf, p, l);
5b257578 1807 unlock_user(p, addr, 0);
13eb76e0
FB
1808 }
1809 len -= l;
1810 buf += l;
1811 addr += l;
1812 }
a68fe89c 1813 return 0;
13eb76e0 1814}
8df1cd07 1815
13eb76e0 1816#else
51d7a9eb 1817
a8170e5e
AK
1818static void invalidate_and_set_dirty(hwaddr addr,
1819 hwaddr length)
51d7a9eb
AP
1820{
1821 if (!cpu_physical_memory_is_dirty(addr)) {
1822 /* invalidate code */
1823 tb_invalidate_phys_page_range(addr, addr + length, 0);
1824 /* set dirty bit */
1825 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1826 }
e226939d 1827 xen_modified_memory(addr, length);
51d7a9eb
AP
1828}
1829
2bbfa05d
PB
1830static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1831{
1832 if (memory_region_is_ram(mr)) {
1833 return !(is_write && mr->readonly);
1834 }
1835 if (memory_region_is_romd(mr)) {
1836 return !is_write;
1837 }
1838
1839 return false;
1840}
1841
23326164 1842static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 1843{
e1622f4b 1844 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
1845
1846 /* Regions are assumed to support 1-4 byte accesses unless
1847 otherwise specified. */
23326164
RH
1848 if (access_size_max == 0) {
1849 access_size_max = 4;
1850 }
1851
1852 /* Bound the maximum access by the alignment of the address. */
1853 if (!mr->ops->impl.unaligned) {
1854 unsigned align_size_max = addr & -addr;
1855 if (align_size_max != 0 && align_size_max < access_size_max) {
1856 access_size_max = align_size_max;
1857 }
82f2563f 1858 }
23326164
RH
1859
1860 /* Don't attempt accesses larger than the maximum. */
1861 if (l > access_size_max) {
1862 l = access_size_max;
82f2563f 1863 }
098178f2
PB
1864 if (l & (l - 1)) {
1865 l = 1 << (qemu_fls(l) - 1);
1866 }
23326164
RH
1867
1868 return l;
82f2563f
PB
1869}
1870
fd8aaa76 1871bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1872 int len, bool is_write)
13eb76e0 1873{
149f54b5 1874 hwaddr l;
13eb76e0 1875 uint8_t *ptr;
791af8c8 1876 uint64_t val;
149f54b5 1877 hwaddr addr1;
5c8a00ce 1878 MemoryRegion *mr;
fd8aaa76 1879 bool error = false;
3b46e624 1880
13eb76e0 1881 while (len > 0) {
149f54b5 1882 l = len;
5c8a00ce 1883 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 1884
13eb76e0 1885 if (is_write) {
5c8a00ce
PB
1886 if (!memory_access_is_direct(mr, is_write)) {
1887 l = memory_access_size(mr, l, addr1);
4917cf44 1888 /* XXX: could force current_cpu to NULL to avoid
6a00d601 1889 potential bugs */
23326164
RH
1890 switch (l) {
1891 case 8:
1892 /* 64 bit write access */
1893 val = ldq_p(buf);
1894 error |= io_mem_write(mr, addr1, val, 8);
1895 break;
1896 case 4:
1c213d19 1897 /* 32 bit write access */
c27004ec 1898 val = ldl_p(buf);
5c8a00ce 1899 error |= io_mem_write(mr, addr1, val, 4);
23326164
RH
1900 break;
1901 case 2:
1c213d19 1902 /* 16 bit write access */
c27004ec 1903 val = lduw_p(buf);
5c8a00ce 1904 error |= io_mem_write(mr, addr1, val, 2);
23326164
RH
1905 break;
1906 case 1:
1c213d19 1907 /* 8 bit write access */
c27004ec 1908 val = ldub_p(buf);
5c8a00ce 1909 error |= io_mem_write(mr, addr1, val, 1);
23326164
RH
1910 break;
1911 default:
1912 abort();
13eb76e0 1913 }
2bbfa05d 1914 } else {
5c8a00ce 1915 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 1916 /* RAM case */
5579c7f3 1917 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1918 memcpy(ptr, buf, l);
51d7a9eb 1919 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
1920 }
1921 } else {
5c8a00ce 1922 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 1923 /* I/O case */
5c8a00ce 1924 l = memory_access_size(mr, l, addr1);
23326164
RH
1925 switch (l) {
1926 case 8:
1927 /* 64 bit read access */
1928 error |= io_mem_read(mr, addr1, &val, 8);
1929 stq_p(buf, val);
1930 break;
1931 case 4:
13eb76e0 1932 /* 32 bit read access */
5c8a00ce 1933 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 1934 stl_p(buf, val);
23326164
RH
1935 break;
1936 case 2:
13eb76e0 1937 /* 16 bit read access */
5c8a00ce 1938 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 1939 stw_p(buf, val);
23326164
RH
1940 break;
1941 case 1:
1c213d19 1942 /* 8 bit read access */
5c8a00ce 1943 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 1944 stb_p(buf, val);
23326164
RH
1945 break;
1946 default:
1947 abort();
13eb76e0
FB
1948 }
1949 } else {
1950 /* RAM case */
5c8a00ce 1951 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 1952 memcpy(buf, ptr, l);
13eb76e0
FB
1953 }
1954 }
1955 len -= l;
1956 buf += l;
1957 addr += l;
1958 }
fd8aaa76
PB
1959
1960 return error;
13eb76e0 1961}
8df1cd07 1962
fd8aaa76 1963bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1964 const uint8_t *buf, int len)
1965{
fd8aaa76 1966 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
1967}
1968
fd8aaa76 1969bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 1970{
fd8aaa76 1971 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
1972}
1973
1974
a8170e5e 1975void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1976 int len, int is_write)
1977{
fd8aaa76 1978 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
1979}
1980
d0ecd2aa 1981/* used for ROM loading : can write in RAM and ROM */
a8170e5e 1982void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
1983 const uint8_t *buf, int len)
1984{
149f54b5 1985 hwaddr l;
d0ecd2aa 1986 uint8_t *ptr;
149f54b5 1987 hwaddr addr1;
5c8a00ce 1988 MemoryRegion *mr;
3b46e624 1989
d0ecd2aa 1990 while (len > 0) {
149f54b5 1991 l = len;
5c8a00ce
PB
1992 mr = address_space_translate(&address_space_memory,
1993 addr, &addr1, &l, true);
3b46e624 1994
5c8a00ce
PB
1995 if (!(memory_region_is_ram(mr) ||
1996 memory_region_is_romd(mr))) {
d0ecd2aa
FB
1997 /* do nothing */
1998 } else {
5c8a00ce 1999 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2000 /* ROM/RAM case */
5579c7f3 2001 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 2002 memcpy(ptr, buf, l);
51d7a9eb 2003 invalidate_and_set_dirty(addr1, l);
d0ecd2aa
FB
2004 }
2005 len -= l;
2006 buf += l;
2007 addr += l;
2008 }
2009}
2010
6d16c2f8 2011typedef struct {
d3e71559 2012 MemoryRegion *mr;
6d16c2f8 2013 void *buffer;
a8170e5e
AK
2014 hwaddr addr;
2015 hwaddr len;
6d16c2f8
AL
2016} BounceBuffer;
2017
2018static BounceBuffer bounce;
2019
ba223c29
AL
2020typedef struct MapClient {
2021 void *opaque;
2022 void (*callback)(void *opaque);
72cf2d4f 2023 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2024} MapClient;
2025
72cf2d4f
BS
2026static QLIST_HEAD(map_client_list, MapClient) map_client_list
2027 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2028
2029void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2030{
7267c094 2031 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2032
2033 client->opaque = opaque;
2034 client->callback = callback;
72cf2d4f 2035 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2036 return client;
2037}
2038
8b9c99d9 2039static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2040{
2041 MapClient *client = (MapClient *)_client;
2042
72cf2d4f 2043 QLIST_REMOVE(client, link);
7267c094 2044 g_free(client);
ba223c29
AL
2045}
2046
2047static void cpu_notify_map_clients(void)
2048{
2049 MapClient *client;
2050
72cf2d4f
BS
2051 while (!QLIST_EMPTY(&map_client_list)) {
2052 client = QLIST_FIRST(&map_client_list);
ba223c29 2053 client->callback(client->opaque);
34d5e948 2054 cpu_unregister_map_client(client);
ba223c29
AL
2055 }
2056}
2057
51644ab7
PB
2058bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2059{
5c8a00ce 2060 MemoryRegion *mr;
51644ab7
PB
2061 hwaddr l, xlat;
2062
2063 while (len > 0) {
2064 l = len;
5c8a00ce
PB
2065 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2066 if (!memory_access_is_direct(mr, is_write)) {
2067 l = memory_access_size(mr, l, addr);
2068 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2069 return false;
2070 }
2071 }
2072
2073 len -= l;
2074 addr += l;
2075 }
2076 return true;
2077}
2078
6d16c2f8
AL
2079/* Map a physical memory region into a host virtual address.
2080 * May map a subset of the requested range, given by and returned in *plen.
2081 * May return NULL if resources needed to perform the mapping are exhausted.
2082 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2083 * Use cpu_register_map_client() to know when retrying the map operation is
2084 * likely to succeed.
6d16c2f8 2085 */
ac1970fb 2086void *address_space_map(AddressSpace *as,
a8170e5e
AK
2087 hwaddr addr,
2088 hwaddr *plen,
ac1970fb 2089 bool is_write)
6d16c2f8 2090{
a8170e5e 2091 hwaddr len = *plen;
e3127ae0
PB
2092 hwaddr done = 0;
2093 hwaddr l, xlat, base;
2094 MemoryRegion *mr, *this_mr;
2095 ram_addr_t raddr;
6d16c2f8 2096
e3127ae0
PB
2097 if (len == 0) {
2098 return NULL;
2099 }
38bee5dc 2100
e3127ae0
PB
2101 l = len;
2102 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2103 if (!memory_access_is_direct(mr, is_write)) {
2104 if (bounce.buffer) {
2105 return NULL;
6d16c2f8 2106 }
e85d9db5
KW
2107 /* Avoid unbounded allocations */
2108 l = MIN(l, TARGET_PAGE_SIZE);
2109 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2110 bounce.addr = addr;
2111 bounce.len = l;
d3e71559
PB
2112
2113 memory_region_ref(mr);
2114 bounce.mr = mr;
e3127ae0
PB
2115 if (!is_write) {
2116 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2117 }
6d16c2f8 2118
e3127ae0
PB
2119 *plen = l;
2120 return bounce.buffer;
2121 }
2122
2123 base = xlat;
2124 raddr = memory_region_get_ram_addr(mr);
2125
2126 for (;;) {
6d16c2f8
AL
2127 len -= l;
2128 addr += l;
e3127ae0
PB
2129 done += l;
2130 if (len == 0) {
2131 break;
2132 }
2133
2134 l = len;
2135 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2136 if (this_mr != mr || xlat != base + done) {
2137 break;
2138 }
6d16c2f8 2139 }
e3127ae0 2140
d3e71559 2141 memory_region_ref(mr);
e3127ae0
PB
2142 *plen = done;
2143 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2144}
2145
ac1970fb 2146/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2147 * Will also mark the memory as dirty if is_write == 1. access_len gives
2148 * the amount of memory that was actually read or written by the caller.
2149 */
a8170e5e
AK
2150void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2151 int is_write, hwaddr access_len)
6d16c2f8
AL
2152{
2153 if (buffer != bounce.buffer) {
d3e71559
PB
2154 MemoryRegion *mr;
2155 ram_addr_t addr1;
2156
2157 mr = qemu_ram_addr_from_host(buffer, &addr1);
2158 assert(mr != NULL);
6d16c2f8 2159 if (is_write) {
6d16c2f8
AL
2160 while (access_len) {
2161 unsigned l;
2162 l = TARGET_PAGE_SIZE;
2163 if (l > access_len)
2164 l = access_len;
51d7a9eb 2165 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2166 addr1 += l;
2167 access_len -= l;
2168 }
2169 }
868bb33f 2170 if (xen_enabled()) {
e41d7c69 2171 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2172 }
d3e71559 2173 memory_region_unref(mr);
6d16c2f8
AL
2174 return;
2175 }
2176 if (is_write) {
ac1970fb 2177 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2178 }
f8a83245 2179 qemu_vfree(bounce.buffer);
6d16c2f8 2180 bounce.buffer = NULL;
d3e71559 2181 memory_region_unref(bounce.mr);
ba223c29 2182 cpu_notify_map_clients();
6d16c2f8 2183}
d0ecd2aa 2184
a8170e5e
AK
2185void *cpu_physical_memory_map(hwaddr addr,
2186 hwaddr *plen,
ac1970fb
AK
2187 int is_write)
2188{
2189 return address_space_map(&address_space_memory, addr, plen, is_write);
2190}
2191
a8170e5e
AK
2192void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2193 int is_write, hwaddr access_len)
ac1970fb
AK
2194{
2195 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2196}
2197
8df1cd07 2198/* warning: addr must be aligned */
a8170e5e 2199static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2200 enum device_endian endian)
8df1cd07 2201{
8df1cd07 2202 uint8_t *ptr;
791af8c8 2203 uint64_t val;
5c8a00ce 2204 MemoryRegion *mr;
149f54b5
PB
2205 hwaddr l = 4;
2206 hwaddr addr1;
8df1cd07 2207
5c8a00ce
PB
2208 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2209 false);
2210 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2211 /* I/O case */
5c8a00ce 2212 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2213#if defined(TARGET_WORDS_BIGENDIAN)
2214 if (endian == DEVICE_LITTLE_ENDIAN) {
2215 val = bswap32(val);
2216 }
2217#else
2218 if (endian == DEVICE_BIG_ENDIAN) {
2219 val = bswap32(val);
2220 }
2221#endif
8df1cd07
FB
2222 } else {
2223 /* RAM case */
5c8a00ce 2224 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2225 & TARGET_PAGE_MASK)
149f54b5 2226 + addr1);
1e78bcc1
AG
2227 switch (endian) {
2228 case DEVICE_LITTLE_ENDIAN:
2229 val = ldl_le_p(ptr);
2230 break;
2231 case DEVICE_BIG_ENDIAN:
2232 val = ldl_be_p(ptr);
2233 break;
2234 default:
2235 val = ldl_p(ptr);
2236 break;
2237 }
8df1cd07
FB
2238 }
2239 return val;
2240}
2241
a8170e5e 2242uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2243{
2244 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2245}
2246
a8170e5e 2247uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2248{
2249 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2250}
2251
a8170e5e 2252uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2253{
2254 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2255}
2256
84b7b8e7 2257/* warning: addr must be aligned */
a8170e5e 2258static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2259 enum device_endian endian)
84b7b8e7 2260{
84b7b8e7
FB
2261 uint8_t *ptr;
2262 uint64_t val;
5c8a00ce 2263 MemoryRegion *mr;
149f54b5
PB
2264 hwaddr l = 8;
2265 hwaddr addr1;
84b7b8e7 2266
5c8a00ce
PB
2267 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2268 false);
2269 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2270 /* I/O case */
5c8a00ce 2271 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2272#if defined(TARGET_WORDS_BIGENDIAN)
2273 if (endian == DEVICE_LITTLE_ENDIAN) {
2274 val = bswap64(val);
2275 }
2276#else
2277 if (endian == DEVICE_BIG_ENDIAN) {
2278 val = bswap64(val);
2279 }
84b7b8e7
FB
2280#endif
2281 } else {
2282 /* RAM case */
5c8a00ce 2283 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2284 & TARGET_PAGE_MASK)
149f54b5 2285 + addr1);
1e78bcc1
AG
2286 switch (endian) {
2287 case DEVICE_LITTLE_ENDIAN:
2288 val = ldq_le_p(ptr);
2289 break;
2290 case DEVICE_BIG_ENDIAN:
2291 val = ldq_be_p(ptr);
2292 break;
2293 default:
2294 val = ldq_p(ptr);
2295 break;
2296 }
84b7b8e7
FB
2297 }
2298 return val;
2299}
2300
a8170e5e 2301uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2302{
2303 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2304}
2305
a8170e5e 2306uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2307{
2308 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2309}
2310
a8170e5e 2311uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2312{
2313 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2314}
2315
aab33094 2316/* XXX: optimize */
a8170e5e 2317uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2318{
2319 uint8_t val;
2320 cpu_physical_memory_read(addr, &val, 1);
2321 return val;
2322}
2323
733f0b02 2324/* warning: addr must be aligned */
a8170e5e 2325static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2326 enum device_endian endian)
aab33094 2327{
733f0b02
MT
2328 uint8_t *ptr;
2329 uint64_t val;
5c8a00ce 2330 MemoryRegion *mr;
149f54b5
PB
2331 hwaddr l = 2;
2332 hwaddr addr1;
733f0b02 2333
5c8a00ce
PB
2334 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2335 false);
2336 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2337 /* I/O case */
5c8a00ce 2338 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2339#if defined(TARGET_WORDS_BIGENDIAN)
2340 if (endian == DEVICE_LITTLE_ENDIAN) {
2341 val = bswap16(val);
2342 }
2343#else
2344 if (endian == DEVICE_BIG_ENDIAN) {
2345 val = bswap16(val);
2346 }
2347#endif
733f0b02
MT
2348 } else {
2349 /* RAM case */
5c8a00ce 2350 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2351 & TARGET_PAGE_MASK)
149f54b5 2352 + addr1);
1e78bcc1
AG
2353 switch (endian) {
2354 case DEVICE_LITTLE_ENDIAN:
2355 val = lduw_le_p(ptr);
2356 break;
2357 case DEVICE_BIG_ENDIAN:
2358 val = lduw_be_p(ptr);
2359 break;
2360 default:
2361 val = lduw_p(ptr);
2362 break;
2363 }
733f0b02
MT
2364 }
2365 return val;
aab33094
FB
2366}
2367
a8170e5e 2368uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2369{
2370 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2371}
2372
a8170e5e 2373uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2374{
2375 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2376}
2377
a8170e5e 2378uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2379{
2380 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2381}
2382
8df1cd07
FB
2383/* warning: addr must be aligned. The ram page is not masked as dirty
2384 and the code inside is not invalidated. It is useful if the dirty
2385 bits are used to track modified PTEs */
a8170e5e 2386void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2387{
8df1cd07 2388 uint8_t *ptr;
5c8a00ce 2389 MemoryRegion *mr;
149f54b5
PB
2390 hwaddr l = 4;
2391 hwaddr addr1;
8df1cd07 2392
5c8a00ce
PB
2393 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2394 true);
2395 if (l < 4 || !memory_access_is_direct(mr, true)) {
2396 io_mem_write(mr, addr1, val, 4);
8df1cd07 2397 } else {
5c8a00ce 2398 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2399 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2400 stl_p(ptr, val);
74576198
AL
2401
2402 if (unlikely(in_migration)) {
2403 if (!cpu_physical_memory_is_dirty(addr1)) {
2404 /* invalidate code */
2405 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2406 /* set dirty bit */
f7c11b53
YT
2407 cpu_physical_memory_set_dirty_flags(
2408 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2409 }
2410 }
8df1cd07
FB
2411 }
2412}
2413
2414/* warning: addr must be aligned */
a8170e5e 2415static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2416 enum device_endian endian)
8df1cd07 2417{
8df1cd07 2418 uint8_t *ptr;
5c8a00ce 2419 MemoryRegion *mr;
149f54b5
PB
2420 hwaddr l = 4;
2421 hwaddr addr1;
8df1cd07 2422
5c8a00ce
PB
2423 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2424 true);
2425 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2426#if defined(TARGET_WORDS_BIGENDIAN)
2427 if (endian == DEVICE_LITTLE_ENDIAN) {
2428 val = bswap32(val);
2429 }
2430#else
2431 if (endian == DEVICE_BIG_ENDIAN) {
2432 val = bswap32(val);
2433 }
2434#endif
5c8a00ce 2435 io_mem_write(mr, addr1, val, 4);
8df1cd07 2436 } else {
8df1cd07 2437 /* RAM case */
5c8a00ce 2438 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2439 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2440 switch (endian) {
2441 case DEVICE_LITTLE_ENDIAN:
2442 stl_le_p(ptr, val);
2443 break;
2444 case DEVICE_BIG_ENDIAN:
2445 stl_be_p(ptr, val);
2446 break;
2447 default:
2448 stl_p(ptr, val);
2449 break;
2450 }
51d7a9eb 2451 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2452 }
2453}
2454
a8170e5e 2455void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2456{
2457 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2458}
2459
a8170e5e 2460void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2461{
2462 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2463}
2464
a8170e5e 2465void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2466{
2467 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2468}
2469
aab33094 2470/* XXX: optimize */
a8170e5e 2471void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2472{
2473 uint8_t v = val;
2474 cpu_physical_memory_write(addr, &v, 1);
2475}
2476
733f0b02 2477/* warning: addr must be aligned */
a8170e5e 2478static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2479 enum device_endian endian)
aab33094 2480{
733f0b02 2481 uint8_t *ptr;
5c8a00ce 2482 MemoryRegion *mr;
149f54b5
PB
2483 hwaddr l = 2;
2484 hwaddr addr1;
733f0b02 2485
5c8a00ce
PB
2486 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2487 true);
2488 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2489#if defined(TARGET_WORDS_BIGENDIAN)
2490 if (endian == DEVICE_LITTLE_ENDIAN) {
2491 val = bswap16(val);
2492 }
2493#else
2494 if (endian == DEVICE_BIG_ENDIAN) {
2495 val = bswap16(val);
2496 }
2497#endif
5c8a00ce 2498 io_mem_write(mr, addr1, val, 2);
733f0b02 2499 } else {
733f0b02 2500 /* RAM case */
5c8a00ce 2501 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2502 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2503 switch (endian) {
2504 case DEVICE_LITTLE_ENDIAN:
2505 stw_le_p(ptr, val);
2506 break;
2507 case DEVICE_BIG_ENDIAN:
2508 stw_be_p(ptr, val);
2509 break;
2510 default:
2511 stw_p(ptr, val);
2512 break;
2513 }
51d7a9eb 2514 invalidate_and_set_dirty(addr1, 2);
733f0b02 2515 }
aab33094
FB
2516}
2517
a8170e5e 2518void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2519{
2520 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2521}
2522
a8170e5e 2523void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2524{
2525 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2526}
2527
a8170e5e 2528void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2529{
2530 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2531}
2532
aab33094 2533/* XXX: optimize */
a8170e5e 2534void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2535{
2536 val = tswap64(val);
71d2b725 2537 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2538}
2539
a8170e5e 2540void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2541{
2542 val = cpu_to_le64(val);
2543 cpu_physical_memory_write(addr, &val, 8);
2544}
2545
a8170e5e 2546void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2547{
2548 val = cpu_to_be64(val);
2549 cpu_physical_memory_write(addr, &val, 8);
2550}
2551
5e2972fd 2552/* virtual memory access for debug (includes writing to ROM) */
f17ec444 2553int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 2554 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2555{
2556 int l;
a8170e5e 2557 hwaddr phys_addr;
9b3c35e0 2558 target_ulong page;
13eb76e0
FB
2559
2560 while (len > 0) {
2561 page = addr & TARGET_PAGE_MASK;
f17ec444 2562 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
2563 /* if no physical page mapped, return an error */
2564 if (phys_addr == -1)
2565 return -1;
2566 l = (page + TARGET_PAGE_SIZE) - addr;
2567 if (l > len)
2568 l = len;
5e2972fd 2569 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2570 if (is_write)
2571 cpu_physical_memory_write_rom(phys_addr, buf, l);
2572 else
5e2972fd 2573 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2574 len -= l;
2575 buf += l;
2576 addr += l;
2577 }
2578 return 0;
2579}
a68fe89c 2580#endif
13eb76e0 2581
8e4a424b
BS
2582#if !defined(CONFIG_USER_ONLY)
2583
2584/*
2585 * A helper function for the _utterly broken_ virtio device model to find out if
2586 * it's running on a big endian machine. Don't do this at home kids!
2587 */
2588bool virtio_is_big_endian(void);
2589bool virtio_is_big_endian(void)
2590{
2591#if defined(TARGET_WORDS_BIGENDIAN)
2592 return true;
2593#else
2594 return false;
2595#endif
2596}
2597
2598#endif
2599
76f35538 2600#ifndef CONFIG_USER_ONLY
a8170e5e 2601bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2602{
5c8a00ce 2603 MemoryRegion*mr;
149f54b5 2604 hwaddr l = 1;
76f35538 2605
5c8a00ce
PB
2606 mr = address_space_translate(&address_space_memory,
2607 phys_addr, &phys_addr, &l, false);
76f35538 2608
5c8a00ce
PB
2609 return !(memory_region_is_ram(mr) ||
2610 memory_region_is_romd(mr));
76f35538 2611}
bd2fa51f
MH
2612
2613void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2614{
2615 RAMBlock *block;
2616
2617 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2618 func(block->host, block->offset, block->length, opaque);
2619 }
2620}
ec3f8c99 2621#endif