]> git.proxmox.com Git - qemu.git/blame - exec.c
rng-egd: remove redundant free
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
1de7afc9 32#include "qemu/osdep.h"
9c17d615 33#include "sysemu/kvm.h"
2ff3de68 34#include "sysemu/sysemu.h"
0d09e41a 35#include "hw/xen/xen.h"
1de7afc9
PB
36#include "qemu/timer.h"
37#include "qemu/config-file.h"
022c62cb 38#include "exec/memory.h"
9c17d615 39#include "sysemu/dma.h"
022c62cb 40#include "exec/address-spaces.h"
53a5960a
PB
41#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
432d268c 43#else /* !CONFIG_USER_ONLY */
9c17d615 44#include "sysemu/xen-mapcache.h"
6506e4f9 45#include "trace.h"
53a5960a 46#endif
0d6d3c87 47#include "exec/cpu-all.h"
54936004 48
022c62cb 49#include "exec/cputlb.h"
5b6dd868 50#include "translate-all.h"
0cac1b66 51
022c62cb 52#include "exec/memory-internal.h"
67d95c15 53
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
e2eef170 56#if !defined(CONFIG_USER_ONLY)
74576198 57static int in_migration;
94a6b54f 58
a3161038 59RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
60
61static MemoryRegion *system_memory;
309cb471 62static MemoryRegion *system_io;
62152b8a 63
f6790af6
AK
64AddressSpace address_space_io;
65AddressSpace address_space_memory;
2673a5da 66
0844e007 67MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 68static MemoryRegion io_mem_unassigned;
0e0df1e2 69
e2eef170 70#endif
9fa3e853 71
bdc44640 72struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
73/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
4917cf44 75DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 76/* 0 = Do not count executed instructions.
bf20dc07 77 1 = Precise instruction counting.
2e70f6ef 78 2 = Adaptive rate instruction counting. */
5708fc66 79int use_icount;
6a00d601 80
e2eef170 81#if !defined(CONFIG_USER_ONLY)
4346ae3e 82
1db8abb1
PB
83typedef struct PhysPageEntry PhysPageEntry;
84
85struct PhysPageEntry {
86 uint16_t is_leaf : 1;
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
88 uint16_t ptr : 15;
89};
90
0475d94f
PB
91typedef PhysPageEntry Node[L2_SIZE];
92
1db8abb1
PB
93struct AddressSpaceDispatch {
94 /* This is a multi-level map on the physical address space.
95 * The bottom level has pointers to MemoryRegionSections.
96 */
97 PhysPageEntry phys_map;
0475d94f
PB
98 Node *nodes;
99 MemoryRegionSection *sections;
acc9d80b 100 AddressSpace *as;
1db8abb1
PB
101};
102
90260c6c
JK
103#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
104typedef struct subpage_t {
105 MemoryRegion iomem;
acc9d80b 106 AddressSpace *as;
90260c6c
JK
107 hwaddr base;
108 uint16_t sub_section[TARGET_PAGE_SIZE];
109} subpage_t;
110
b41aac4f
LPF
111#define PHYS_SECTION_UNASSIGNED 0
112#define PHYS_SECTION_NOTDIRTY 1
113#define PHYS_SECTION_ROM 2
114#define PHYS_SECTION_WATCH 3
5312bd8b 115
9affd6fc
PB
116typedef struct PhysPageMap {
117 unsigned sections_nb;
118 unsigned sections_nb_alloc;
119 unsigned nodes_nb;
120 unsigned nodes_nb_alloc;
121 Node *nodes;
122 MemoryRegionSection *sections;
123} PhysPageMap;
124
6092666e 125static PhysPageMap *prev_map;
9affd6fc 126static PhysPageMap next_map;
d6f2ea22 127
07f07b31 128#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 129
e2eef170 130static void io_mem_init(void);
62152b8a 131static void memory_map_init(void);
e2eef170 132
1ec9b909 133static MemoryRegion io_mem_watch;
6658ffb8 134#endif
fd6ce8f6 135
6d9a1304 136#if !defined(CONFIG_USER_ONLY)
d6f2ea22 137
f7bf5461 138static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 139{
9affd6fc
PB
140 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
141 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
142 16);
143 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
144 next_map.nodes_nb + nodes);
145 next_map.nodes = g_renew(Node, next_map.nodes,
146 next_map.nodes_nb_alloc);
d6f2ea22 147 }
f7bf5461
AK
148}
149
150static uint16_t phys_map_node_alloc(void)
151{
152 unsigned i;
153 uint16_t ret;
154
9affd6fc 155 ret = next_map.nodes_nb++;
f7bf5461 156 assert(ret != PHYS_MAP_NODE_NIL);
9affd6fc 157 assert(ret != next_map.nodes_nb_alloc);
d6f2ea22 158 for (i = 0; i < L2_SIZE; ++i) {
9affd6fc
PB
159 next_map.nodes[ret][i].is_leaf = 0;
160 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 161 }
f7bf5461 162 return ret;
d6f2ea22
AK
163}
164
a8170e5e
AK
165static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
166 hwaddr *nb, uint16_t leaf,
2999097b 167 int level)
f7bf5461
AK
168{
169 PhysPageEntry *p;
170 int i;
a8170e5e 171 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 172
07f07b31 173 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800 174 lp->ptr = phys_map_node_alloc();
9affd6fc 175 p = next_map.nodes[lp->ptr];
f7bf5461
AK
176 if (level == 0) {
177 for (i = 0; i < L2_SIZE; i++) {
07f07b31 178 p[i].is_leaf = 1;
b41aac4f 179 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 180 }
67c4d23c 181 }
f7bf5461 182 } else {
9affd6fc 183 p = next_map.nodes[lp->ptr];
92e873b9 184 }
2999097b 185 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 186
2999097b 187 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
188 if ((*index & (step - 1)) == 0 && *nb >= step) {
189 lp->is_leaf = true;
c19e8800 190 lp->ptr = leaf;
07f07b31
AK
191 *index += step;
192 *nb -= step;
2999097b
AK
193 } else {
194 phys_page_set_level(lp, index, nb, leaf, level - 1);
195 }
196 ++lp;
f7bf5461
AK
197 }
198}
199
ac1970fb 200static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 201 hwaddr index, hwaddr nb,
2999097b 202 uint16_t leaf)
f7bf5461 203{
2999097b 204 /* Wildly overreserve - it doesn't matter much. */
07f07b31 205 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 206
ac1970fb 207 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
208}
209
9affd6fc
PB
210static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
211 Node *nodes, MemoryRegionSection *sections)
92e873b9 212{
31ab2b4a
AK
213 PhysPageEntry *p;
214 int i;
f1f6e3b8 215
07f07b31 216 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 217 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 218 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 219 }
9affd6fc 220 p = nodes[lp.ptr];
31ab2b4a 221 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 222 }
9affd6fc 223 return &sections[lp.ptr];
f3705d53
AK
224}
225
e5548617
BS
226bool memory_region_is_unassigned(MemoryRegion *mr)
227{
2a8e7499 228 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 229 && mr != &io_mem_watch;
fd6ce8f6 230}
149f54b5 231
c7086b4a 232static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
233 hwaddr addr,
234 bool resolve_subpage)
9f029603 235{
90260c6c
JK
236 MemoryRegionSection *section;
237 subpage_t *subpage;
238
0475d94f
PB
239 section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
240 d->nodes, d->sections);
90260c6c
JK
241 if (resolve_subpage && section->mr->subpage) {
242 subpage = container_of(section->mr, subpage_t, iomem);
0475d94f 243 section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
244 }
245 return section;
9f029603
JK
246}
247
90260c6c 248static MemoryRegionSection *
c7086b4a 249address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 250 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
251{
252 MemoryRegionSection *section;
253 Int128 diff;
254
c7086b4a 255 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
256 /* Compute offset within MemoryRegionSection */
257 addr -= section->offset_within_address_space;
258
259 /* Compute offset within MemoryRegion */
260 *xlat = addr + section->offset_within_region;
261
262 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 263 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
264 return section;
265}
90260c6c 266
5c8a00ce
PB
267MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
268 hwaddr *xlat, hwaddr *plen,
269 bool is_write)
90260c6c 270{
30951157
AK
271 IOMMUTLBEntry iotlb;
272 MemoryRegionSection *section;
273 MemoryRegion *mr;
274 hwaddr len = *plen;
275
276 for (;;) {
c7086b4a 277 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
278 mr = section->mr;
279
280 if (!mr->iommu_ops) {
281 break;
282 }
283
284 iotlb = mr->iommu_ops->translate(mr, addr);
285 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
286 | (addr & iotlb.addr_mask));
287 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
288 if (!(iotlb.perm & (1 << is_write))) {
289 mr = &io_mem_unassigned;
290 break;
291 }
292
293 as = iotlb.target_as;
294 }
295
296 *plen = len;
297 *xlat = addr;
298 return mr;
90260c6c
JK
299}
300
301MemoryRegionSection *
302address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
303 hwaddr *plen)
304{
30951157 305 MemoryRegionSection *section;
c7086b4a 306 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
307
308 assert(!section->mr->iommu_ops);
309 return section;
90260c6c 310}
5b6dd868 311#endif
fd6ce8f6 312
5b6dd868 313void cpu_exec_init_all(void)
fdbb84d1 314{
5b6dd868 315#if !defined(CONFIG_USER_ONLY)
b2a8658e 316 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
317 memory_map_init();
318 io_mem_init();
fdbb84d1 319#endif
5b6dd868 320}
fdbb84d1 321
b170fce3 322#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
323
324static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 325{
259186a7 326 CPUState *cpu = opaque;
a513fe19 327
5b6dd868
BS
328 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
329 version_id is increased. */
259186a7
AF
330 cpu->interrupt_request &= ~0x01;
331 tlb_flush(cpu->env_ptr, 1);
5b6dd868
BS
332
333 return 0;
a513fe19 334}
7501267e 335
1a1562f5 336const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
337 .name = "cpu_common",
338 .version_id = 1,
339 .minimum_version_id = 1,
340 .minimum_version_id_old = 1,
341 .post_load = cpu_common_post_load,
342 .fields = (VMStateField []) {
259186a7
AF
343 VMSTATE_UINT32(halted, CPUState),
344 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
345 VMSTATE_END_OF_LIST()
346 }
347};
1a1562f5 348
5b6dd868 349#endif
ea041c0e 350
38d8f5c8 351CPUState *qemu_get_cpu(int index)
ea041c0e 352{
bdc44640 353 CPUState *cpu;
ea041c0e 354
bdc44640 355 CPU_FOREACH(cpu) {
55e5c285 356 if (cpu->cpu_index == index) {
bdc44640 357 return cpu;
55e5c285 358 }
ea041c0e 359 }
5b6dd868 360
bdc44640 361 return NULL;
ea041c0e
FB
362}
363
5b6dd868 364void cpu_exec_init(CPUArchState *env)
ea041c0e 365{
5b6dd868 366 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 367 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 368 CPUState *some_cpu;
5b6dd868
BS
369 int cpu_index;
370
371#if defined(CONFIG_USER_ONLY)
372 cpu_list_lock();
373#endif
5b6dd868 374 cpu_index = 0;
bdc44640 375 CPU_FOREACH(some_cpu) {
5b6dd868
BS
376 cpu_index++;
377 }
55e5c285 378 cpu->cpu_index = cpu_index;
1b1ed8dc 379 cpu->numa_node = 0;
5b6dd868
BS
380 QTAILQ_INIT(&env->breakpoints);
381 QTAILQ_INIT(&env->watchpoints);
382#ifndef CONFIG_USER_ONLY
383 cpu->thread_id = qemu_get_thread_id();
384#endif
bdc44640 385 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
386#if defined(CONFIG_USER_ONLY)
387 cpu_list_unlock();
388#endif
e0d47944
AF
389 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
390 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
391 }
5b6dd868 392#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
393 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
394 cpu_save, cpu_load, env);
b170fce3 395 assert(cc->vmsd == NULL);
e0d47944 396 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 397#endif
b170fce3
AF
398 if (cc->vmsd != NULL) {
399 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
400 }
ea041c0e
FB
401}
402
1fddef4b 403#if defined(TARGET_HAS_ICE)
94df27fd 404#if defined(CONFIG_USER_ONLY)
00b941e5 405static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
406{
407 tb_invalidate_phys_page_range(pc, pc + 1, 0);
408}
409#else
00b941e5 410static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 411{
e8262a1b
MF
412 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
413 if (phys != -1) {
414 tb_invalidate_phys_addr(phys | (pc & ~TARGET_PAGE_MASK));
415 }
1e7855a5 416}
c27004ec 417#endif
94df27fd 418#endif /* TARGET_HAS_ICE */
d720b93d 419
c527ee8f 420#if defined(CONFIG_USER_ONLY)
9349b4f9 421void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
422
423{
424}
425
9349b4f9 426int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
427 int flags, CPUWatchpoint **watchpoint)
428{
429 return -ENOSYS;
430}
431#else
6658ffb8 432/* Add a watchpoint. */
9349b4f9 433int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 434 int flags, CPUWatchpoint **watchpoint)
6658ffb8 435{
b4051334 436 target_ulong len_mask = ~(len - 1);
c0ce998e 437 CPUWatchpoint *wp;
6658ffb8 438
b4051334 439 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
440 if ((len & (len - 1)) || (addr & ~len_mask) ||
441 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
442 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
443 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
444 return -EINVAL;
445 }
7267c094 446 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
447
448 wp->vaddr = addr;
b4051334 449 wp->len_mask = len_mask;
a1d1bb31
AL
450 wp->flags = flags;
451
2dc9f411 452 /* keep all GDB-injected watchpoints in front */
c0ce998e 453 if (flags & BP_GDB)
72cf2d4f 454 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 455 else
72cf2d4f 456 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 457
6658ffb8 458 tlb_flush_page(env, addr);
a1d1bb31
AL
459
460 if (watchpoint)
461 *watchpoint = wp;
462 return 0;
6658ffb8
PB
463}
464
a1d1bb31 465/* Remove a specific watchpoint. */
9349b4f9 466int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 467 int flags)
6658ffb8 468{
b4051334 469 target_ulong len_mask = ~(len - 1);
a1d1bb31 470 CPUWatchpoint *wp;
6658ffb8 471
72cf2d4f 472 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 473 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 474 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 475 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
476 return 0;
477 }
478 }
a1d1bb31 479 return -ENOENT;
6658ffb8
PB
480}
481
a1d1bb31 482/* Remove a specific watchpoint by reference. */
9349b4f9 483void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 484{
72cf2d4f 485 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 486
a1d1bb31
AL
487 tlb_flush_page(env, watchpoint->vaddr);
488
7267c094 489 g_free(watchpoint);
a1d1bb31
AL
490}
491
492/* Remove all matching watchpoints. */
9349b4f9 493void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 494{
c0ce998e 495 CPUWatchpoint *wp, *next;
a1d1bb31 496
72cf2d4f 497 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
498 if (wp->flags & mask)
499 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 500 }
7d03f82f 501}
c527ee8f 502#endif
7d03f82f 503
a1d1bb31 504/* Add a breakpoint. */
9349b4f9 505int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 506 CPUBreakpoint **breakpoint)
4c3a88a2 507{
1fddef4b 508#if defined(TARGET_HAS_ICE)
c0ce998e 509 CPUBreakpoint *bp;
3b46e624 510
7267c094 511 bp = g_malloc(sizeof(*bp));
4c3a88a2 512
a1d1bb31
AL
513 bp->pc = pc;
514 bp->flags = flags;
515
2dc9f411 516 /* keep all GDB-injected breakpoints in front */
00b941e5 517 if (flags & BP_GDB) {
72cf2d4f 518 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
00b941e5 519 } else {
72cf2d4f 520 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
00b941e5 521 }
3b46e624 522
00b941e5 523 breakpoint_invalidate(ENV_GET_CPU(env), pc);
a1d1bb31 524
00b941e5 525 if (breakpoint) {
a1d1bb31 526 *breakpoint = bp;
00b941e5 527 }
4c3a88a2
FB
528 return 0;
529#else
a1d1bb31 530 return -ENOSYS;
4c3a88a2
FB
531#endif
532}
533
a1d1bb31 534/* Remove a specific breakpoint. */
9349b4f9 535int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 536{
7d03f82f 537#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
538 CPUBreakpoint *bp;
539
72cf2d4f 540 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
541 if (bp->pc == pc && bp->flags == flags) {
542 cpu_breakpoint_remove_by_ref(env, bp);
543 return 0;
544 }
7d03f82f 545 }
a1d1bb31
AL
546 return -ENOENT;
547#else
548 return -ENOSYS;
7d03f82f
EI
549#endif
550}
551
a1d1bb31 552/* Remove a specific breakpoint by reference. */
9349b4f9 553void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 554{
1fddef4b 555#if defined(TARGET_HAS_ICE)
72cf2d4f 556 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 557
00b941e5 558 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
a1d1bb31 559
7267c094 560 g_free(breakpoint);
a1d1bb31
AL
561#endif
562}
563
564/* Remove all matching breakpoints. */
9349b4f9 565void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
566{
567#if defined(TARGET_HAS_ICE)
c0ce998e 568 CPUBreakpoint *bp, *next;
a1d1bb31 569
72cf2d4f 570 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
571 if (bp->flags & mask)
572 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 573 }
4c3a88a2
FB
574#endif
575}
576
c33a346e
FB
577/* enable or disable single step mode. EXCP_DEBUG is returned by the
578 CPU loop after each instruction */
3825b28f 579void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 580{
1fddef4b 581#if defined(TARGET_HAS_ICE)
ed2803da
AF
582 if (cpu->singlestep_enabled != enabled) {
583 cpu->singlestep_enabled = enabled;
584 if (kvm_enabled()) {
38e478ec 585 kvm_update_guest_debug(cpu, 0);
ed2803da 586 } else {
ccbb4d44 587 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 588 /* XXX: only flush what is necessary */
38e478ec 589 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
590 tb_flush(env);
591 }
c33a346e
FB
592 }
593#endif
594}
595
9349b4f9 596void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e 597{
878096ee 598 CPUState *cpu = ENV_GET_CPU(env);
7501267e 599 va_list ap;
493ae1f0 600 va_list ap2;
7501267e
FB
601
602 va_start(ap, fmt);
493ae1f0 603 va_copy(ap2, ap);
7501267e
FB
604 fprintf(stderr, "qemu: fatal: ");
605 vfprintf(stderr, fmt, ap);
606 fprintf(stderr, "\n");
878096ee 607 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
608 if (qemu_log_enabled()) {
609 qemu_log("qemu: fatal: ");
610 qemu_log_vprintf(fmt, ap2);
611 qemu_log("\n");
a0762859 612 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 613 qemu_log_flush();
93fcfe39 614 qemu_log_close();
924edcae 615 }
493ae1f0 616 va_end(ap2);
f9373291 617 va_end(ap);
fd052bf6
RV
618#if defined(CONFIG_USER_ONLY)
619 {
620 struct sigaction act;
621 sigfillset(&act.sa_mask);
622 act.sa_handler = SIG_DFL;
623 sigaction(SIGABRT, &act, NULL);
624 }
625#endif
7501267e
FB
626 abort();
627}
628
0124311e 629#if !defined(CONFIG_USER_ONLY)
041603fe
PB
630static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
631{
632 RAMBlock *block;
633
634 /* The list is protected by the iothread lock here. */
635 block = ram_list.mru_block;
636 if (block && addr - block->offset < block->length) {
637 goto found;
638 }
639 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
640 if (addr - block->offset < block->length) {
641 goto found;
642 }
643 }
644
645 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
646 abort();
647
648found:
649 ram_list.mru_block = block;
650 return block;
651}
652
d24981d3
JQ
653static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
654 uintptr_t length)
655{
041603fe
PB
656 RAMBlock *block;
657 ram_addr_t start1;
d24981d3 658
041603fe
PB
659 block = qemu_get_ram_block(start);
660 assert(block == qemu_get_ram_block(end - 1));
661 start1 = (uintptr_t)block->host + (start - block->offset);
662 cpu_tlb_reset_dirty_all(start1, length);
d24981d3
JQ
663}
664
5579c7f3 665/* Note: start and end must be within the same ram block. */
c227f099 666void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 667 int dirty_flags)
1ccde1cb 668{
d24981d3 669 uintptr_t length;
1ccde1cb
FB
670
671 start &= TARGET_PAGE_MASK;
672 end = TARGET_PAGE_ALIGN(end);
673
674 length = end - start;
675 if (length == 0)
676 return;
f7c11b53 677 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 678
d24981d3
JQ
679 if (tcg_enabled()) {
680 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 681 }
1ccde1cb
FB
682}
683
8b9c99d9 684static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 685{
f6f3fbca 686 int ret = 0;
74576198 687 in_migration = enable;
f6f3fbca 688 return ret;
74576198
AL
689}
690
a8170e5e 691hwaddr memory_region_section_get_iotlb(CPUArchState *env,
149f54b5
PB
692 MemoryRegionSection *section,
693 target_ulong vaddr,
694 hwaddr paddr, hwaddr xlat,
695 int prot,
696 target_ulong *address)
e5548617 697{
a8170e5e 698 hwaddr iotlb;
e5548617
BS
699 CPUWatchpoint *wp;
700
cc5bea60 701 if (memory_region_is_ram(section->mr)) {
e5548617
BS
702 /* Normal RAM. */
703 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 704 + xlat;
e5548617 705 if (!section->readonly) {
b41aac4f 706 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 707 } else {
b41aac4f 708 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
709 }
710 } else {
0475d94f 711 iotlb = section - address_space_memory.dispatch->sections;
149f54b5 712 iotlb += xlat;
e5548617
BS
713 }
714
715 /* Make accesses to pages with watchpoints go via the
716 watchpoint trap routines. */
717 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
718 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
719 /* Avoid trapping reads of pages with a write breakpoint. */
720 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 721 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
722 *address |= TLB_MMIO;
723 break;
724 }
725 }
726 }
727
728 return iotlb;
729}
9fa3e853
FB
730#endif /* defined(CONFIG_USER_ONLY) */
731
e2eef170 732#if !defined(CONFIG_USER_ONLY)
8da3ff18 733
c227f099 734static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 735 uint16_t section);
acc9d80b 736static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 737
575ddeb4 738static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
91138037
MA
739
740/*
741 * Set a custom physical guest memory alloator.
742 * Accelerators with unusual needs may need this. Hopefully, we can
743 * get rid of it eventually.
744 */
575ddeb4 745void phys_mem_set_alloc(void *(*alloc)(size_t))
91138037
MA
746{
747 phys_mem_alloc = alloc;
748}
749
5312bd8b
AK
750static uint16_t phys_section_add(MemoryRegionSection *section)
751{
68f3f65b
PB
752 /* The physical section number is ORed with a page-aligned
753 * pointer to produce the iotlb entries. Thus it should
754 * never overflow into the page-aligned value.
755 */
9affd6fc 756 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
68f3f65b 757
9affd6fc
PB
758 if (next_map.sections_nb == next_map.sections_nb_alloc) {
759 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
760 16);
761 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
762 next_map.sections_nb_alloc);
5312bd8b 763 }
9affd6fc 764 next_map.sections[next_map.sections_nb] = *section;
dfde4e6e 765 memory_region_ref(section->mr);
9affd6fc 766 return next_map.sections_nb++;
5312bd8b
AK
767}
768
058bc4b5
PB
769static void phys_section_destroy(MemoryRegion *mr)
770{
dfde4e6e
PB
771 memory_region_unref(mr);
772
058bc4b5
PB
773 if (mr->subpage) {
774 subpage_t *subpage = container_of(mr, subpage_t, iomem);
775 memory_region_destroy(&subpage->iomem);
776 g_free(subpage);
777 }
778}
779
6092666e 780static void phys_sections_free(PhysPageMap *map)
5312bd8b 781{
9affd6fc
PB
782 while (map->sections_nb > 0) {
783 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
784 phys_section_destroy(section->mr);
785 }
9affd6fc
PB
786 g_free(map->sections);
787 g_free(map->nodes);
6092666e 788 g_free(map);
5312bd8b
AK
789}
790
ac1970fb 791static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
792{
793 subpage_t *subpage;
a8170e5e 794 hwaddr base = section->offset_within_address_space
0f0cb164 795 & TARGET_PAGE_MASK;
9affd6fc
PB
796 MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
797 next_map.nodes, next_map.sections);
0f0cb164
AK
798 MemoryRegionSection subsection = {
799 .offset_within_address_space = base,
052e87b0 800 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 801 };
a8170e5e 802 hwaddr start, end;
0f0cb164 803
f3705d53 804 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 805
f3705d53 806 if (!(existing->mr->subpage)) {
acc9d80b 807 subpage = subpage_init(d->as, base);
0f0cb164 808 subsection.mr = &subpage->iomem;
ac1970fb 809 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 810 phys_section_add(&subsection));
0f0cb164 811 } else {
f3705d53 812 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
813 }
814 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 815 end = start + int128_get64(section->size) - 1;
0f0cb164
AK
816 subpage_register(subpage, start, end, phys_section_add(section));
817}
818
819
052e87b0
PB
820static void register_multipage(AddressSpaceDispatch *d,
821 MemoryRegionSection *section)
33417e70 822{
a8170e5e 823 hwaddr start_addr = section->offset_within_address_space;
5312bd8b 824 uint16_t section_index = phys_section_add(section);
052e87b0
PB
825 uint64_t num_pages = int128_get64(int128_rshift(section->size,
826 TARGET_PAGE_BITS));
dd81124b 827
733d5ef5
PB
828 assert(num_pages);
829 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
830}
831
ac1970fb 832static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 833{
89ae337a 834 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 835 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 836 MemoryRegionSection now = *section, remain = *section;
052e87b0 837 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 838
733d5ef5
PB
839 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
840 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
841 - now.offset_within_address_space;
842
052e87b0 843 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 844 register_subpage(d, &now);
733d5ef5 845 } else {
052e87b0 846 now.size = int128_zero();
733d5ef5 847 }
052e87b0
PB
848 while (int128_ne(remain.size, now.size)) {
849 remain.size = int128_sub(remain.size, now.size);
850 remain.offset_within_address_space += int128_get64(now.size);
851 remain.offset_within_region += int128_get64(now.size);
69b67646 852 now = remain;
052e87b0 853 if (int128_lt(remain.size, page_size)) {
733d5ef5 854 register_subpage(d, &now);
88266249 855 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 856 now.size = page_size;
ac1970fb 857 register_subpage(d, &now);
69b67646 858 } else {
052e87b0 859 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 860 register_multipage(d, &now);
69b67646 861 }
0f0cb164
AK
862 }
863}
864
62a2744c
SY
865void qemu_flush_coalesced_mmio_buffer(void)
866{
867 if (kvm_enabled())
868 kvm_flush_coalesced_mmio_buffer();
869}
870
b2a8658e
UD
871void qemu_mutex_lock_ramlist(void)
872{
873 qemu_mutex_lock(&ram_list.mutex);
874}
875
876void qemu_mutex_unlock_ramlist(void)
877{
878 qemu_mutex_unlock(&ram_list.mutex);
879}
880
e1e84ba0 881#ifdef __linux__
c902760f
MT
882
883#include <sys/vfs.h>
884
885#define HUGETLBFS_MAGIC 0x958458f6
886
887static long gethugepagesize(const char *path)
888{
889 struct statfs fs;
890 int ret;
891
892 do {
9742bf26 893 ret = statfs(path, &fs);
c902760f
MT
894 } while (ret != 0 && errno == EINTR);
895
896 if (ret != 0) {
9742bf26
YT
897 perror(path);
898 return 0;
c902760f
MT
899 }
900
901 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 902 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
903
904 return fs.f_bsize;
905}
906
04b16653
AW
907static void *file_ram_alloc(RAMBlock *block,
908 ram_addr_t memory,
909 const char *path)
c902760f
MT
910{
911 char *filename;
8ca761f6
PF
912 char *sanitized_name;
913 char *c;
c902760f
MT
914 void *area;
915 int fd;
916#ifdef MAP_POPULATE
917 int flags;
918#endif
919 unsigned long hpagesize;
920
921 hpagesize = gethugepagesize(path);
922 if (!hpagesize) {
9742bf26 923 return NULL;
c902760f
MT
924 }
925
926 if (memory < hpagesize) {
927 return NULL;
928 }
929
930 if (kvm_enabled() && !kvm_has_sync_mmu()) {
931 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
932 return NULL;
933 }
934
8ca761f6
PF
935 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
936 sanitized_name = g_strdup(block->mr->name);
937 for (c = sanitized_name; *c != '\0'; c++) {
938 if (*c == '/')
939 *c = '_';
940 }
941
942 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
943 sanitized_name);
944 g_free(sanitized_name);
c902760f
MT
945
946 fd = mkstemp(filename);
947 if (fd < 0) {
9742bf26 948 perror("unable to create backing store for hugepages");
e4ada482 949 g_free(filename);
9742bf26 950 return NULL;
c902760f
MT
951 }
952 unlink(filename);
e4ada482 953 g_free(filename);
c902760f
MT
954
955 memory = (memory+hpagesize-1) & ~(hpagesize-1);
956
957 /*
958 * ftruncate is not supported by hugetlbfs in older
959 * hosts, so don't bother bailing out on errors.
960 * If anything goes wrong with it under other filesystems,
961 * mmap will fail.
962 */
963 if (ftruncate(fd, memory))
9742bf26 964 perror("ftruncate");
c902760f
MT
965
966#ifdef MAP_POPULATE
967 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
968 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
969 * to sidestep this quirk.
970 */
971 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
972 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
973#else
974 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
975#endif
976 if (area == MAP_FAILED) {
9742bf26
YT
977 perror("file_ram_alloc: can't mmap RAM pages");
978 close(fd);
979 return (NULL);
c902760f 980 }
04b16653 981 block->fd = fd;
c902760f
MT
982 return area;
983}
e1e84ba0
MA
984#else
985static void *file_ram_alloc(RAMBlock *block,
986 ram_addr_t memory,
987 const char *path)
988{
989 fprintf(stderr, "-mem-path not supported on this host\n");
990 exit(1);
991}
c902760f
MT
992#endif
993
d17b5288 994static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
995{
996 RAMBlock *block, *next_block;
3e837b2c 997 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 998
49cd9ac6
SH
999 assert(size != 0); /* it would hand out same offset multiple times */
1000
a3161038 1001 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1002 return 0;
1003
a3161038 1004 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1005 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1006
1007 end = block->offset + block->length;
1008
a3161038 1009 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1010 if (next_block->offset >= end) {
1011 next = MIN(next, next_block->offset);
1012 }
1013 }
1014 if (next - end >= size && next - end < mingap) {
3e837b2c 1015 offset = end;
04b16653
AW
1016 mingap = next - end;
1017 }
1018 }
3e837b2c
AW
1019
1020 if (offset == RAM_ADDR_MAX) {
1021 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1022 (uint64_t)size);
1023 abort();
1024 }
1025
04b16653
AW
1026 return offset;
1027}
1028
652d7ec2 1029ram_addr_t last_ram_offset(void)
d17b5288
AW
1030{
1031 RAMBlock *block;
1032 ram_addr_t last = 0;
1033
a3161038 1034 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1035 last = MAX(last, block->offset + block->length);
1036
1037 return last;
1038}
1039
ddb97f1d
JB
1040static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1041{
1042 int ret;
ddb97f1d
JB
1043
1044 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1045 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1046 "dump-guest-core", true)) {
ddb97f1d
JB
1047 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1048 if (ret) {
1049 perror("qemu_madvise");
1050 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1051 "but dump_guest_core=off specified\n");
1052 }
1053 }
1054}
1055
c5705a77 1056void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
1057{
1058 RAMBlock *new_block, *block;
1059
c5705a77 1060 new_block = NULL;
a3161038 1061 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77
AK
1062 if (block->offset == addr) {
1063 new_block = block;
1064 break;
1065 }
1066 }
1067 assert(new_block);
1068 assert(!new_block->idstr[0]);
84b89d78 1069
09e5ab63
AL
1070 if (dev) {
1071 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1072 if (id) {
1073 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1074 g_free(id);
84b89d78
CM
1075 }
1076 }
1077 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1078
b2a8658e
UD
1079 /* This assumes the iothread lock is taken here too. */
1080 qemu_mutex_lock_ramlist();
a3161038 1081 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1082 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1083 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1084 new_block->idstr);
1085 abort();
1086 }
1087 }
b2a8658e 1088 qemu_mutex_unlock_ramlist();
c5705a77
AK
1089}
1090
8490fc78
LC
1091static int memory_try_enable_merging(void *addr, size_t len)
1092{
2ff3de68 1093 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1094 /* disabled by the user */
1095 return 0;
1096 }
1097
1098 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1099}
1100
c5705a77
AK
1101ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1102 MemoryRegion *mr)
1103{
abb26d63 1104 RAMBlock *block, *new_block;
c5705a77
AK
1105
1106 size = TARGET_PAGE_ALIGN(size);
1107 new_block = g_malloc0(sizeof(*new_block));
3435f395 1108 new_block->fd = -1;
84b89d78 1109
b2a8658e
UD
1110 /* This assumes the iothread lock is taken here too. */
1111 qemu_mutex_lock_ramlist();
7c637366 1112 new_block->mr = mr;
432d268c 1113 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1114 if (host) {
1115 new_block->host = host;
cd19cfa2 1116 new_block->flags |= RAM_PREALLOC_MASK;
dfeaf2ab
MA
1117 } else if (xen_enabled()) {
1118 if (mem_path) {
1119 fprintf(stderr, "-mem-path not supported with Xen\n");
1120 exit(1);
1121 }
1122 xen_ram_alloc(new_block->offset, size, mr);
6977dfe6
YT
1123 } else {
1124 if (mem_path) {
e1e84ba0
MA
1125 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1126 /*
1127 * file_ram_alloc() needs to allocate just like
1128 * phys_mem_alloc, but we haven't bothered to provide
1129 * a hook there.
1130 */
1131 fprintf(stderr,
1132 "-mem-path not supported with this accelerator\n");
1133 exit(1);
1134 }
6977dfe6 1135 new_block->host = file_ram_alloc(new_block, size, mem_path);
0628c182
MA
1136 }
1137 if (!new_block->host) {
91138037 1138 new_block->host = phys_mem_alloc(size);
39228250
MA
1139 if (!new_block->host) {
1140 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1141 new_block->mr->name, strerror(errno));
1142 exit(1);
1143 }
8490fc78 1144 memory_try_enable_merging(new_block->host, size);
6977dfe6 1145 }
c902760f 1146 }
94a6b54f
PB
1147 new_block->length = size;
1148
abb26d63
PB
1149 /* Keep the list sorted from biggest to smallest block. */
1150 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1151 if (block->length < new_block->length) {
1152 break;
1153 }
1154 }
1155 if (block) {
1156 QTAILQ_INSERT_BEFORE(block, new_block, next);
1157 } else {
1158 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1159 }
0d6d3c87 1160 ram_list.mru_block = NULL;
94a6b54f 1161
f798b07f 1162 ram_list.version++;
b2a8658e 1163 qemu_mutex_unlock_ramlist();
f798b07f 1164
7267c094 1165 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 1166 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
1167 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1168 0, size >> TARGET_PAGE_BITS);
1720aeee 1169 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 1170
ddb97f1d 1171 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1172 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
3e469dbf 1173 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
ddb97f1d 1174
6f0437e8
JK
1175 if (kvm_enabled())
1176 kvm_setup_guest_memory(new_block->host, size);
1177
94a6b54f
PB
1178 return new_block->offset;
1179}
e9a1ab19 1180
c5705a77 1181ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1182{
c5705a77 1183 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1184}
1185
1f2e98b6
AW
1186void qemu_ram_free_from_ptr(ram_addr_t addr)
1187{
1188 RAMBlock *block;
1189
b2a8658e
UD
1190 /* This assumes the iothread lock is taken here too. */
1191 qemu_mutex_lock_ramlist();
a3161038 1192 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1193 if (addr == block->offset) {
a3161038 1194 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1195 ram_list.mru_block = NULL;
f798b07f 1196 ram_list.version++;
7267c094 1197 g_free(block);
b2a8658e 1198 break;
1f2e98b6
AW
1199 }
1200 }
b2a8658e 1201 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1202}
1203
c227f099 1204void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1205{
04b16653
AW
1206 RAMBlock *block;
1207
b2a8658e
UD
1208 /* This assumes the iothread lock is taken here too. */
1209 qemu_mutex_lock_ramlist();
a3161038 1210 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1211 if (addr == block->offset) {
a3161038 1212 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1213 ram_list.mru_block = NULL;
f798b07f 1214 ram_list.version++;
cd19cfa2
HY
1215 if (block->flags & RAM_PREALLOC_MASK) {
1216 ;
dfeaf2ab
MA
1217 } else if (xen_enabled()) {
1218 xen_invalidate_map_cache_entry(block->host);
089f3f76 1219#ifndef _WIN32
3435f395
MA
1220 } else if (block->fd >= 0) {
1221 munmap(block->host, block->length);
1222 close(block->fd);
089f3f76 1223#endif
04b16653 1224 } else {
dfeaf2ab 1225 qemu_anon_ram_free(block->host, block->length);
04b16653 1226 }
7267c094 1227 g_free(block);
b2a8658e 1228 break;
04b16653
AW
1229 }
1230 }
b2a8658e 1231 qemu_mutex_unlock_ramlist();
04b16653 1232
e9a1ab19
FB
1233}
1234
cd19cfa2
HY
1235#ifndef _WIN32
1236void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1237{
1238 RAMBlock *block;
1239 ram_addr_t offset;
1240 int flags;
1241 void *area, *vaddr;
1242
a3161038 1243 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1244 offset = addr - block->offset;
1245 if (offset < block->length) {
1246 vaddr = block->host + offset;
1247 if (block->flags & RAM_PREALLOC_MASK) {
1248 ;
dfeaf2ab
MA
1249 } else if (xen_enabled()) {
1250 abort();
cd19cfa2
HY
1251 } else {
1252 flags = MAP_FIXED;
1253 munmap(vaddr, length);
3435f395 1254 if (block->fd >= 0) {
cd19cfa2 1255#ifdef MAP_POPULATE
3435f395
MA
1256 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1257 MAP_PRIVATE;
fd28aa13 1258#else
3435f395 1259 flags |= MAP_PRIVATE;
cd19cfa2 1260#endif
3435f395
MA
1261 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1262 flags, block->fd, offset);
cd19cfa2 1263 } else {
2eb9fbaa
MA
1264 /*
1265 * Remap needs to match alloc. Accelerators that
1266 * set phys_mem_alloc never remap. If they did,
1267 * we'd need a remap hook here.
1268 */
1269 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1270
cd19cfa2
HY
1271 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1272 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1273 flags, -1, 0);
cd19cfa2
HY
1274 }
1275 if (area != vaddr) {
f15fbc4b
AP
1276 fprintf(stderr, "Could not remap addr: "
1277 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1278 length, addr);
1279 exit(1);
1280 }
8490fc78 1281 memory_try_enable_merging(vaddr, length);
ddb97f1d 1282 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1283 }
1284 return;
1285 }
1286 }
1287}
1288#endif /* !_WIN32 */
1289
1b5ec234
PB
1290/* Return a host pointer to ram allocated with qemu_ram_alloc.
1291 With the exception of the softmmu code in this file, this should
1292 only be used for local memory (e.g. video ram) that the device owns,
1293 and knows it isn't going to access beyond the end of the block.
1294
1295 It should not be used for general purpose DMA.
1296 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1297 */
1298void *qemu_get_ram_ptr(ram_addr_t addr)
1299{
1300 RAMBlock *block = qemu_get_ram_block(addr);
1301
0d6d3c87
PB
1302 if (xen_enabled()) {
1303 /* We need to check if the requested address is in the RAM
1304 * because we don't want to map the entire memory in QEMU.
1305 * In that case just map until the end of the page.
1306 */
1307 if (block->offset == 0) {
1308 return xen_map_cache(addr, 0, 0);
1309 } else if (block->host == NULL) {
1310 block->host =
1311 xen_map_cache(block->offset, block->length, 1);
1312 }
1313 }
1314 return block->host + (addr - block->offset);
dc828ca1
PB
1315}
1316
38bee5dc
SS
1317/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1318 * but takes a size argument */
cb85f7ab 1319static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1320{
8ab934f9
SS
1321 if (*size == 0) {
1322 return NULL;
1323 }
868bb33f 1324 if (xen_enabled()) {
e41d7c69 1325 return xen_map_cache(addr, *size, 1);
868bb33f 1326 } else {
38bee5dc
SS
1327 RAMBlock *block;
1328
a3161038 1329 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1330 if (addr - block->offset < block->length) {
1331 if (addr - block->offset + *size > block->length)
1332 *size = block->length - addr + block->offset;
1333 return block->host + (addr - block->offset);
1334 }
1335 }
1336
1337 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1338 abort();
38bee5dc
SS
1339 }
1340}
1341
7443b437
PB
1342/* Some of the softmmu routines need to translate from a host pointer
1343 (typically a TLB entry) back to a ram offset. */
1b5ec234 1344MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1345{
94a6b54f
PB
1346 RAMBlock *block;
1347 uint8_t *host = ptr;
1348
868bb33f 1349 if (xen_enabled()) {
e41d7c69 1350 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1351 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1352 }
1353
23887b79
PB
1354 block = ram_list.mru_block;
1355 if (block && block->host && host - block->host < block->length) {
1356 goto found;
1357 }
1358
a3161038 1359 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1360 /* This case append when the block is not mapped. */
1361 if (block->host == NULL) {
1362 continue;
1363 }
f471a17e 1364 if (host - block->host < block->length) {
23887b79 1365 goto found;
f471a17e 1366 }
94a6b54f 1367 }
432d268c 1368
1b5ec234 1369 return NULL;
23887b79
PB
1370
1371found:
1372 *ram_addr = block->offset + (host - block->host);
1b5ec234 1373 return block->mr;
e890261f 1374}
f471a17e 1375
a8170e5e 1376static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1377 uint64_t val, unsigned size)
9fa3e853 1378{
3a7d929e 1379 int dirty_flags;
f7c11b53 1380 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1381 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
0e0df1e2 1382 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 1383 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 1384 }
0e0df1e2
AK
1385 switch (size) {
1386 case 1:
1387 stb_p(qemu_get_ram_ptr(ram_addr), val);
1388 break;
1389 case 2:
1390 stw_p(qemu_get_ram_ptr(ram_addr), val);
1391 break;
1392 case 4:
1393 stl_p(qemu_get_ram_ptr(ram_addr), val);
1394 break;
1395 default:
1396 abort();
3a7d929e 1397 }
f23db169 1398 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 1399 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
1400 /* we remove the notdirty callback only if the code has been
1401 flushed */
4917cf44
AF
1402 if (dirty_flags == 0xff) {
1403 CPUArchState *env = current_cpu->env_ptr;
1404 tlb_set_dirty(env, env->mem_io_vaddr);
1405 }
9fa3e853
FB
1406}
1407
b018ddf6
PB
1408static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1409 unsigned size, bool is_write)
1410{
1411 return is_write;
1412}
1413
0e0df1e2 1414static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1415 .write = notdirty_mem_write,
b018ddf6 1416 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1417 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1418};
1419
0f459d16 1420/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1421static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1422{
4917cf44 1423 CPUArchState *env = current_cpu->env_ptr;
06d55cc1 1424 target_ulong pc, cs_base;
0f459d16 1425 target_ulong vaddr;
a1d1bb31 1426 CPUWatchpoint *wp;
06d55cc1 1427 int cpu_flags;
0f459d16 1428
06d55cc1
AL
1429 if (env->watchpoint_hit) {
1430 /* We re-entered the check after replacing the TB. Now raise
1431 * the debug interrupt so that is will trigger after the
1432 * current instruction. */
c3affe56 1433 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1434 return;
1435 }
2e70f6ef 1436 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 1437 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
1438 if ((vaddr == (wp->vaddr & len_mask) ||
1439 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
1440 wp->flags |= BP_WATCHPOINT_HIT;
1441 if (!env->watchpoint_hit) {
1442 env->watchpoint_hit = wp;
5a316526 1443 tb_check_watchpoint(env);
6e140f28
AL
1444 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1445 env->exception_index = EXCP_DEBUG;
488d6577 1446 cpu_loop_exit(env);
6e140f28
AL
1447 } else {
1448 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1449 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 1450 cpu_resume_from_signal(env, NULL);
6e140f28 1451 }
06d55cc1 1452 }
6e140f28
AL
1453 } else {
1454 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1455 }
1456 }
1457}
1458
6658ffb8
PB
1459/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1460 so these check for a hit then pass through to the normal out-of-line
1461 phys routines. */
a8170e5e 1462static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1463 unsigned size)
6658ffb8 1464{
1ec9b909
AK
1465 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1466 switch (size) {
1467 case 1: return ldub_phys(addr);
1468 case 2: return lduw_phys(addr);
1469 case 4: return ldl_phys(addr);
1470 default: abort();
1471 }
6658ffb8
PB
1472}
1473
a8170e5e 1474static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1475 uint64_t val, unsigned size)
6658ffb8 1476{
1ec9b909
AK
1477 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1478 switch (size) {
67364150
MF
1479 case 1:
1480 stb_phys(addr, val);
1481 break;
1482 case 2:
1483 stw_phys(addr, val);
1484 break;
1485 case 4:
1486 stl_phys(addr, val);
1487 break;
1ec9b909
AK
1488 default: abort();
1489 }
6658ffb8
PB
1490}
1491
1ec9b909
AK
1492static const MemoryRegionOps watch_mem_ops = {
1493 .read = watch_mem_read,
1494 .write = watch_mem_write,
1495 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1496};
6658ffb8 1497
a8170e5e 1498static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1499 unsigned len)
db7b5426 1500{
acc9d80b
JK
1501 subpage_t *subpage = opaque;
1502 uint8_t buf[4];
791af8c8 1503
db7b5426 1504#if defined(DEBUG_SUBPAGE)
016e9d62 1505 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1506 subpage, len, addr);
db7b5426 1507#endif
acc9d80b
JK
1508 address_space_read(subpage->as, addr + subpage->base, buf, len);
1509 switch (len) {
1510 case 1:
1511 return ldub_p(buf);
1512 case 2:
1513 return lduw_p(buf);
1514 case 4:
1515 return ldl_p(buf);
1516 default:
1517 abort();
1518 }
db7b5426
BS
1519}
1520
a8170e5e 1521static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1522 uint64_t value, unsigned len)
db7b5426 1523{
acc9d80b
JK
1524 subpage_t *subpage = opaque;
1525 uint8_t buf[4];
1526
db7b5426 1527#if defined(DEBUG_SUBPAGE)
016e9d62 1528 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1529 " value %"PRIx64"\n",
1530 __func__, subpage, len, addr, value);
db7b5426 1531#endif
acc9d80b
JK
1532 switch (len) {
1533 case 1:
1534 stb_p(buf, value);
1535 break;
1536 case 2:
1537 stw_p(buf, value);
1538 break;
1539 case 4:
1540 stl_p(buf, value);
1541 break;
1542 default:
1543 abort();
1544 }
1545 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1546}
1547
c353e4cc 1548static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 1549 unsigned len, bool is_write)
c353e4cc 1550{
acc9d80b 1551 subpage_t *subpage = opaque;
c353e4cc 1552#if defined(DEBUG_SUBPAGE)
016e9d62 1553 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 1554 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1555#endif
1556
acc9d80b 1557 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 1558 len, is_write);
c353e4cc
PB
1559}
1560
70c68e44
AK
1561static const MemoryRegionOps subpage_ops = {
1562 .read = subpage_read,
1563 .write = subpage_write,
c353e4cc 1564 .valid.accepts = subpage_accepts,
70c68e44 1565 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1566};
1567
c227f099 1568static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1569 uint16_t section)
db7b5426
BS
1570{
1571 int idx, eidx;
1572
1573 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1574 return -1;
1575 idx = SUBPAGE_IDX(start);
1576 eidx = SUBPAGE_IDX(end);
1577#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1578 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1579 __func__, mmio, start, end, idx, eidx, section);
db7b5426 1580#endif
db7b5426 1581 for (; idx <= eidx; idx++) {
5312bd8b 1582 mmio->sub_section[idx] = section;
db7b5426
BS
1583 }
1584
1585 return 0;
1586}
1587
acc9d80b 1588static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1589{
c227f099 1590 subpage_t *mmio;
db7b5426 1591
7267c094 1592 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1593
acc9d80b 1594 mmio->as = as;
1eec614b 1595 mmio->base = base;
2c9b15ca 1596 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
70c68e44 1597 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1598 mmio->iomem.subpage = true;
db7b5426 1599#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1600 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1601 mmio, base, TARGET_PAGE_SIZE);
db7b5426 1602#endif
b41aac4f 1603 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1604
1605 return mmio;
1606}
1607
5312bd8b
AK
1608static uint16_t dummy_section(MemoryRegion *mr)
1609{
1610 MemoryRegionSection section = {
1611 .mr = mr,
1612 .offset_within_address_space = 0,
1613 .offset_within_region = 0,
052e87b0 1614 .size = int128_2_64(),
5312bd8b
AK
1615 };
1616
1617 return phys_section_add(&section);
1618}
1619
a8170e5e 1620MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 1621{
0475d94f 1622 return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1623}
1624
e9179ce1
AK
1625static void io_mem_init(void)
1626{
2c9b15ca
PB
1627 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1628 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
0e0df1e2 1629 "unassigned", UINT64_MAX);
2c9b15ca 1630 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
0e0df1e2 1631 "notdirty", UINT64_MAX);
2c9b15ca 1632 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1ec9b909 1633 "watch", UINT64_MAX);
e9179ce1
AK
1634}
1635
ac1970fb 1636static void mem_begin(MemoryListener *listener)
00752703
PB
1637{
1638 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1639 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1640
1641 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1642 d->as = as;
1643 as->next_dispatch = d;
1644}
1645
1646static void mem_commit(MemoryListener *listener)
ac1970fb 1647{
89ae337a 1648 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
1649 AddressSpaceDispatch *cur = as->dispatch;
1650 AddressSpaceDispatch *next = as->next_dispatch;
1651
1652 next->nodes = next_map.nodes;
1653 next->sections = next_map.sections;
ac1970fb 1654
0475d94f
PB
1655 as->dispatch = next;
1656 g_free(cur);
ac1970fb
AK
1657}
1658
50c1e149
AK
1659static void core_begin(MemoryListener *listener)
1660{
b41aac4f
LPF
1661 uint16_t n;
1662
6092666e
PB
1663 prev_map = g_new(PhysPageMap, 1);
1664 *prev_map = next_map;
1665
9affd6fc 1666 memset(&next_map, 0, sizeof(next_map));
b41aac4f
LPF
1667 n = dummy_section(&io_mem_unassigned);
1668 assert(n == PHYS_SECTION_UNASSIGNED);
1669 n = dummy_section(&io_mem_notdirty);
1670 assert(n == PHYS_SECTION_NOTDIRTY);
1671 n = dummy_section(&io_mem_rom);
1672 assert(n == PHYS_SECTION_ROM);
1673 n = dummy_section(&io_mem_watch);
1674 assert(n == PHYS_SECTION_WATCH);
50c1e149
AK
1675}
1676
9affd6fc
PB
1677/* This listener's commit run after the other AddressSpaceDispatch listeners'.
1678 * All AddressSpaceDispatch instances have switched to the next map.
1679 */
1680static void core_commit(MemoryListener *listener)
1681{
6092666e 1682 phys_sections_free(prev_map);
9affd6fc
PB
1683}
1684
1d71148e 1685static void tcg_commit(MemoryListener *listener)
50c1e149 1686{
182735ef 1687 CPUState *cpu;
117712c3
AK
1688
1689 /* since each CPU stores ram addresses in its TLB cache, we must
1690 reset the modified entries */
1691 /* XXX: slow ! */
bdc44640 1692 CPU_FOREACH(cpu) {
182735ef
AF
1693 CPUArchState *env = cpu->env_ptr;
1694
117712c3
AK
1695 tlb_flush(env, 1);
1696 }
50c1e149
AK
1697}
1698
93632747
AK
1699static void core_log_global_start(MemoryListener *listener)
1700{
1701 cpu_physical_memory_set_dirty_tracking(1);
1702}
1703
1704static void core_log_global_stop(MemoryListener *listener)
1705{
1706 cpu_physical_memory_set_dirty_tracking(0);
1707}
1708
93632747 1709static MemoryListener core_memory_listener = {
50c1e149 1710 .begin = core_begin,
9affd6fc 1711 .commit = core_commit,
93632747
AK
1712 .log_global_start = core_log_global_start,
1713 .log_global_stop = core_log_global_stop,
ac1970fb 1714 .priority = 1,
93632747
AK
1715};
1716
1d71148e
AK
1717static MemoryListener tcg_memory_listener = {
1718 .commit = tcg_commit,
1719};
1720
ac1970fb
AK
1721void address_space_init_dispatch(AddressSpace *as)
1722{
00752703 1723 as->dispatch = NULL;
89ae337a 1724 as->dispatch_listener = (MemoryListener) {
ac1970fb 1725 .begin = mem_begin,
00752703 1726 .commit = mem_commit,
ac1970fb
AK
1727 .region_add = mem_add,
1728 .region_nop = mem_add,
1729 .priority = 0,
1730 };
89ae337a 1731 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
1732}
1733
83f3c251
AK
1734void address_space_destroy_dispatch(AddressSpace *as)
1735{
1736 AddressSpaceDispatch *d = as->dispatch;
1737
89ae337a 1738 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
1739 g_free(d);
1740 as->dispatch = NULL;
1741}
1742
62152b8a
AK
1743static void memory_map_init(void)
1744{
7267c094 1745 system_memory = g_malloc(sizeof(*system_memory));
ef9e455d 1746 memory_region_init(system_memory, NULL, "system", INT64_MAX);
7dca8043 1747 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1748
7267c094 1749 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
1750 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1751 65536);
7dca8043 1752 address_space_init(&address_space_io, system_io, "I/O");
93632747 1753
f6790af6 1754 memory_listener_register(&core_memory_listener, &address_space_memory);
2641689a 1755 if (tcg_enabled()) {
1756 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1757 }
62152b8a
AK
1758}
1759
1760MemoryRegion *get_system_memory(void)
1761{
1762 return system_memory;
1763}
1764
309cb471
AK
1765MemoryRegion *get_system_io(void)
1766{
1767 return system_io;
1768}
1769
e2eef170
PB
1770#endif /* !defined(CONFIG_USER_ONLY) */
1771
13eb76e0
FB
1772/* physical memory access (slow version, mainly for debug) */
1773#if defined(CONFIG_USER_ONLY)
f17ec444 1774int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 1775 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1776{
1777 int l, flags;
1778 target_ulong page;
53a5960a 1779 void * p;
13eb76e0
FB
1780
1781 while (len > 0) {
1782 page = addr & TARGET_PAGE_MASK;
1783 l = (page + TARGET_PAGE_SIZE) - addr;
1784 if (l > len)
1785 l = len;
1786 flags = page_get_flags(page);
1787 if (!(flags & PAGE_VALID))
a68fe89c 1788 return -1;
13eb76e0
FB
1789 if (is_write) {
1790 if (!(flags & PAGE_WRITE))
a68fe89c 1791 return -1;
579a97f7 1792 /* XXX: this code should not depend on lock_user */
72fb7daa 1793 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1794 return -1;
72fb7daa
AJ
1795 memcpy(p, buf, l);
1796 unlock_user(p, addr, l);
13eb76e0
FB
1797 } else {
1798 if (!(flags & PAGE_READ))
a68fe89c 1799 return -1;
579a97f7 1800 /* XXX: this code should not depend on lock_user */
72fb7daa 1801 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1802 return -1;
72fb7daa 1803 memcpy(buf, p, l);
5b257578 1804 unlock_user(p, addr, 0);
13eb76e0
FB
1805 }
1806 len -= l;
1807 buf += l;
1808 addr += l;
1809 }
a68fe89c 1810 return 0;
13eb76e0 1811}
8df1cd07 1812
13eb76e0 1813#else
51d7a9eb 1814
a8170e5e
AK
1815static void invalidate_and_set_dirty(hwaddr addr,
1816 hwaddr length)
51d7a9eb
AP
1817{
1818 if (!cpu_physical_memory_is_dirty(addr)) {
1819 /* invalidate code */
1820 tb_invalidate_phys_page_range(addr, addr + length, 0);
1821 /* set dirty bit */
1822 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1823 }
e226939d 1824 xen_modified_memory(addr, length);
51d7a9eb
AP
1825}
1826
2bbfa05d
PB
1827static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1828{
1829 if (memory_region_is_ram(mr)) {
1830 return !(is_write && mr->readonly);
1831 }
1832 if (memory_region_is_romd(mr)) {
1833 return !is_write;
1834 }
1835
1836 return false;
1837}
1838
23326164 1839static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 1840{
e1622f4b 1841 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
1842
1843 /* Regions are assumed to support 1-4 byte accesses unless
1844 otherwise specified. */
23326164
RH
1845 if (access_size_max == 0) {
1846 access_size_max = 4;
1847 }
1848
1849 /* Bound the maximum access by the alignment of the address. */
1850 if (!mr->ops->impl.unaligned) {
1851 unsigned align_size_max = addr & -addr;
1852 if (align_size_max != 0 && align_size_max < access_size_max) {
1853 access_size_max = align_size_max;
1854 }
82f2563f 1855 }
23326164
RH
1856
1857 /* Don't attempt accesses larger than the maximum. */
1858 if (l > access_size_max) {
1859 l = access_size_max;
82f2563f 1860 }
098178f2
PB
1861 if (l & (l - 1)) {
1862 l = 1 << (qemu_fls(l) - 1);
1863 }
23326164
RH
1864
1865 return l;
82f2563f
PB
1866}
1867
fd8aaa76 1868bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 1869 int len, bool is_write)
13eb76e0 1870{
149f54b5 1871 hwaddr l;
13eb76e0 1872 uint8_t *ptr;
791af8c8 1873 uint64_t val;
149f54b5 1874 hwaddr addr1;
5c8a00ce 1875 MemoryRegion *mr;
fd8aaa76 1876 bool error = false;
3b46e624 1877
13eb76e0 1878 while (len > 0) {
149f54b5 1879 l = len;
5c8a00ce 1880 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 1881
13eb76e0 1882 if (is_write) {
5c8a00ce
PB
1883 if (!memory_access_is_direct(mr, is_write)) {
1884 l = memory_access_size(mr, l, addr1);
4917cf44 1885 /* XXX: could force current_cpu to NULL to avoid
6a00d601 1886 potential bugs */
23326164
RH
1887 switch (l) {
1888 case 8:
1889 /* 64 bit write access */
1890 val = ldq_p(buf);
1891 error |= io_mem_write(mr, addr1, val, 8);
1892 break;
1893 case 4:
1c213d19 1894 /* 32 bit write access */
c27004ec 1895 val = ldl_p(buf);
5c8a00ce 1896 error |= io_mem_write(mr, addr1, val, 4);
23326164
RH
1897 break;
1898 case 2:
1c213d19 1899 /* 16 bit write access */
c27004ec 1900 val = lduw_p(buf);
5c8a00ce 1901 error |= io_mem_write(mr, addr1, val, 2);
23326164
RH
1902 break;
1903 case 1:
1c213d19 1904 /* 8 bit write access */
c27004ec 1905 val = ldub_p(buf);
5c8a00ce 1906 error |= io_mem_write(mr, addr1, val, 1);
23326164
RH
1907 break;
1908 default:
1909 abort();
13eb76e0 1910 }
2bbfa05d 1911 } else {
5c8a00ce 1912 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 1913 /* RAM case */
5579c7f3 1914 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 1915 memcpy(ptr, buf, l);
51d7a9eb 1916 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
1917 }
1918 } else {
5c8a00ce 1919 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 1920 /* I/O case */
5c8a00ce 1921 l = memory_access_size(mr, l, addr1);
23326164
RH
1922 switch (l) {
1923 case 8:
1924 /* 64 bit read access */
1925 error |= io_mem_read(mr, addr1, &val, 8);
1926 stq_p(buf, val);
1927 break;
1928 case 4:
13eb76e0 1929 /* 32 bit read access */
5c8a00ce 1930 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 1931 stl_p(buf, val);
23326164
RH
1932 break;
1933 case 2:
13eb76e0 1934 /* 16 bit read access */
5c8a00ce 1935 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 1936 stw_p(buf, val);
23326164
RH
1937 break;
1938 case 1:
1c213d19 1939 /* 8 bit read access */
5c8a00ce 1940 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 1941 stb_p(buf, val);
23326164
RH
1942 break;
1943 default:
1944 abort();
13eb76e0
FB
1945 }
1946 } else {
1947 /* RAM case */
5c8a00ce 1948 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 1949 memcpy(buf, ptr, l);
13eb76e0
FB
1950 }
1951 }
1952 len -= l;
1953 buf += l;
1954 addr += l;
1955 }
fd8aaa76
PB
1956
1957 return error;
13eb76e0 1958}
8df1cd07 1959
fd8aaa76 1960bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1961 const uint8_t *buf, int len)
1962{
fd8aaa76 1963 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
1964}
1965
fd8aaa76 1966bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 1967{
fd8aaa76 1968 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
1969}
1970
1971
a8170e5e 1972void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
1973 int len, int is_write)
1974{
fd8aaa76 1975 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
1976}
1977
d0ecd2aa 1978/* used for ROM loading : can write in RAM and ROM */
a8170e5e 1979void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
1980 const uint8_t *buf, int len)
1981{
149f54b5 1982 hwaddr l;
d0ecd2aa 1983 uint8_t *ptr;
149f54b5 1984 hwaddr addr1;
5c8a00ce 1985 MemoryRegion *mr;
3b46e624 1986
d0ecd2aa 1987 while (len > 0) {
149f54b5 1988 l = len;
5c8a00ce
PB
1989 mr = address_space_translate(&address_space_memory,
1990 addr, &addr1, &l, true);
3b46e624 1991
5c8a00ce
PB
1992 if (!(memory_region_is_ram(mr) ||
1993 memory_region_is_romd(mr))) {
d0ecd2aa
FB
1994 /* do nothing */
1995 } else {
5c8a00ce 1996 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 1997 /* ROM/RAM case */
5579c7f3 1998 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 1999 memcpy(ptr, buf, l);
51d7a9eb 2000 invalidate_and_set_dirty(addr1, l);
d0ecd2aa
FB
2001 }
2002 len -= l;
2003 buf += l;
2004 addr += l;
2005 }
2006}
2007
6d16c2f8 2008typedef struct {
d3e71559 2009 MemoryRegion *mr;
6d16c2f8 2010 void *buffer;
a8170e5e
AK
2011 hwaddr addr;
2012 hwaddr len;
6d16c2f8
AL
2013} BounceBuffer;
2014
2015static BounceBuffer bounce;
2016
ba223c29
AL
2017typedef struct MapClient {
2018 void *opaque;
2019 void (*callback)(void *opaque);
72cf2d4f 2020 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2021} MapClient;
2022
72cf2d4f
BS
2023static QLIST_HEAD(map_client_list, MapClient) map_client_list
2024 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2025
2026void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2027{
7267c094 2028 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2029
2030 client->opaque = opaque;
2031 client->callback = callback;
72cf2d4f 2032 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2033 return client;
2034}
2035
8b9c99d9 2036static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2037{
2038 MapClient *client = (MapClient *)_client;
2039
72cf2d4f 2040 QLIST_REMOVE(client, link);
7267c094 2041 g_free(client);
ba223c29
AL
2042}
2043
2044static void cpu_notify_map_clients(void)
2045{
2046 MapClient *client;
2047
72cf2d4f
BS
2048 while (!QLIST_EMPTY(&map_client_list)) {
2049 client = QLIST_FIRST(&map_client_list);
ba223c29 2050 client->callback(client->opaque);
34d5e948 2051 cpu_unregister_map_client(client);
ba223c29
AL
2052 }
2053}
2054
51644ab7
PB
2055bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2056{
5c8a00ce 2057 MemoryRegion *mr;
51644ab7
PB
2058 hwaddr l, xlat;
2059
2060 while (len > 0) {
2061 l = len;
5c8a00ce
PB
2062 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2063 if (!memory_access_is_direct(mr, is_write)) {
2064 l = memory_access_size(mr, l, addr);
2065 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2066 return false;
2067 }
2068 }
2069
2070 len -= l;
2071 addr += l;
2072 }
2073 return true;
2074}
2075
6d16c2f8
AL
2076/* Map a physical memory region into a host virtual address.
2077 * May map a subset of the requested range, given by and returned in *plen.
2078 * May return NULL if resources needed to perform the mapping are exhausted.
2079 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2080 * Use cpu_register_map_client() to know when retrying the map operation is
2081 * likely to succeed.
6d16c2f8 2082 */
ac1970fb 2083void *address_space_map(AddressSpace *as,
a8170e5e
AK
2084 hwaddr addr,
2085 hwaddr *plen,
ac1970fb 2086 bool is_write)
6d16c2f8 2087{
a8170e5e 2088 hwaddr len = *plen;
e3127ae0
PB
2089 hwaddr done = 0;
2090 hwaddr l, xlat, base;
2091 MemoryRegion *mr, *this_mr;
2092 ram_addr_t raddr;
6d16c2f8 2093
e3127ae0
PB
2094 if (len == 0) {
2095 return NULL;
2096 }
38bee5dc 2097
e3127ae0
PB
2098 l = len;
2099 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2100 if (!memory_access_is_direct(mr, is_write)) {
2101 if (bounce.buffer) {
2102 return NULL;
6d16c2f8 2103 }
e85d9db5
KW
2104 /* Avoid unbounded allocations */
2105 l = MIN(l, TARGET_PAGE_SIZE);
2106 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2107 bounce.addr = addr;
2108 bounce.len = l;
d3e71559
PB
2109
2110 memory_region_ref(mr);
2111 bounce.mr = mr;
e3127ae0
PB
2112 if (!is_write) {
2113 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2114 }
6d16c2f8 2115
e3127ae0
PB
2116 *plen = l;
2117 return bounce.buffer;
2118 }
2119
2120 base = xlat;
2121 raddr = memory_region_get_ram_addr(mr);
2122
2123 for (;;) {
6d16c2f8
AL
2124 len -= l;
2125 addr += l;
e3127ae0
PB
2126 done += l;
2127 if (len == 0) {
2128 break;
2129 }
2130
2131 l = len;
2132 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2133 if (this_mr != mr || xlat != base + done) {
2134 break;
2135 }
6d16c2f8 2136 }
e3127ae0 2137
d3e71559 2138 memory_region_ref(mr);
e3127ae0
PB
2139 *plen = done;
2140 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2141}
2142
ac1970fb 2143/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2144 * Will also mark the memory as dirty if is_write == 1. access_len gives
2145 * the amount of memory that was actually read or written by the caller.
2146 */
a8170e5e
AK
2147void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2148 int is_write, hwaddr access_len)
6d16c2f8
AL
2149{
2150 if (buffer != bounce.buffer) {
d3e71559
PB
2151 MemoryRegion *mr;
2152 ram_addr_t addr1;
2153
2154 mr = qemu_ram_addr_from_host(buffer, &addr1);
2155 assert(mr != NULL);
6d16c2f8 2156 if (is_write) {
6d16c2f8
AL
2157 while (access_len) {
2158 unsigned l;
2159 l = TARGET_PAGE_SIZE;
2160 if (l > access_len)
2161 l = access_len;
51d7a9eb 2162 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2163 addr1 += l;
2164 access_len -= l;
2165 }
2166 }
868bb33f 2167 if (xen_enabled()) {
e41d7c69 2168 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2169 }
d3e71559 2170 memory_region_unref(mr);
6d16c2f8
AL
2171 return;
2172 }
2173 if (is_write) {
ac1970fb 2174 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2175 }
f8a83245 2176 qemu_vfree(bounce.buffer);
6d16c2f8 2177 bounce.buffer = NULL;
d3e71559 2178 memory_region_unref(bounce.mr);
ba223c29 2179 cpu_notify_map_clients();
6d16c2f8 2180}
d0ecd2aa 2181
a8170e5e
AK
2182void *cpu_physical_memory_map(hwaddr addr,
2183 hwaddr *plen,
ac1970fb
AK
2184 int is_write)
2185{
2186 return address_space_map(&address_space_memory, addr, plen, is_write);
2187}
2188
a8170e5e
AK
2189void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2190 int is_write, hwaddr access_len)
ac1970fb
AK
2191{
2192 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2193}
2194
8df1cd07 2195/* warning: addr must be aligned */
a8170e5e 2196static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 2197 enum device_endian endian)
8df1cd07 2198{
8df1cd07 2199 uint8_t *ptr;
791af8c8 2200 uint64_t val;
5c8a00ce 2201 MemoryRegion *mr;
149f54b5
PB
2202 hwaddr l = 4;
2203 hwaddr addr1;
8df1cd07 2204
5c8a00ce
PB
2205 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2206 false);
2207 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2208 /* I/O case */
5c8a00ce 2209 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2210#if defined(TARGET_WORDS_BIGENDIAN)
2211 if (endian == DEVICE_LITTLE_ENDIAN) {
2212 val = bswap32(val);
2213 }
2214#else
2215 if (endian == DEVICE_BIG_ENDIAN) {
2216 val = bswap32(val);
2217 }
2218#endif
8df1cd07
FB
2219 } else {
2220 /* RAM case */
5c8a00ce 2221 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2222 & TARGET_PAGE_MASK)
149f54b5 2223 + addr1);
1e78bcc1
AG
2224 switch (endian) {
2225 case DEVICE_LITTLE_ENDIAN:
2226 val = ldl_le_p(ptr);
2227 break;
2228 case DEVICE_BIG_ENDIAN:
2229 val = ldl_be_p(ptr);
2230 break;
2231 default:
2232 val = ldl_p(ptr);
2233 break;
2234 }
8df1cd07
FB
2235 }
2236 return val;
2237}
2238
a8170e5e 2239uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
2240{
2241 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2242}
2243
a8170e5e 2244uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
2245{
2246 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2247}
2248
a8170e5e 2249uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
2250{
2251 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2252}
2253
84b7b8e7 2254/* warning: addr must be aligned */
a8170e5e 2255static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 2256 enum device_endian endian)
84b7b8e7 2257{
84b7b8e7
FB
2258 uint8_t *ptr;
2259 uint64_t val;
5c8a00ce 2260 MemoryRegion *mr;
149f54b5
PB
2261 hwaddr l = 8;
2262 hwaddr addr1;
84b7b8e7 2263
5c8a00ce
PB
2264 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2265 false);
2266 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2267 /* I/O case */
5c8a00ce 2268 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2269#if defined(TARGET_WORDS_BIGENDIAN)
2270 if (endian == DEVICE_LITTLE_ENDIAN) {
2271 val = bswap64(val);
2272 }
2273#else
2274 if (endian == DEVICE_BIG_ENDIAN) {
2275 val = bswap64(val);
2276 }
84b7b8e7
FB
2277#endif
2278 } else {
2279 /* RAM case */
5c8a00ce 2280 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2281 & TARGET_PAGE_MASK)
149f54b5 2282 + addr1);
1e78bcc1
AG
2283 switch (endian) {
2284 case DEVICE_LITTLE_ENDIAN:
2285 val = ldq_le_p(ptr);
2286 break;
2287 case DEVICE_BIG_ENDIAN:
2288 val = ldq_be_p(ptr);
2289 break;
2290 default:
2291 val = ldq_p(ptr);
2292 break;
2293 }
84b7b8e7
FB
2294 }
2295 return val;
2296}
2297
a8170e5e 2298uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
2299{
2300 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2301}
2302
a8170e5e 2303uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
2304{
2305 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2306}
2307
a8170e5e 2308uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
2309{
2310 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2311}
2312
aab33094 2313/* XXX: optimize */
a8170e5e 2314uint32_t ldub_phys(hwaddr addr)
aab33094
FB
2315{
2316 uint8_t val;
2317 cpu_physical_memory_read(addr, &val, 1);
2318 return val;
2319}
2320
733f0b02 2321/* warning: addr must be aligned */
a8170e5e 2322static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 2323 enum device_endian endian)
aab33094 2324{
733f0b02
MT
2325 uint8_t *ptr;
2326 uint64_t val;
5c8a00ce 2327 MemoryRegion *mr;
149f54b5
PB
2328 hwaddr l = 2;
2329 hwaddr addr1;
733f0b02 2330
5c8a00ce
PB
2331 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2332 false);
2333 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2334 /* I/O case */
5c8a00ce 2335 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2336#if defined(TARGET_WORDS_BIGENDIAN)
2337 if (endian == DEVICE_LITTLE_ENDIAN) {
2338 val = bswap16(val);
2339 }
2340#else
2341 if (endian == DEVICE_BIG_ENDIAN) {
2342 val = bswap16(val);
2343 }
2344#endif
733f0b02
MT
2345 } else {
2346 /* RAM case */
5c8a00ce 2347 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2348 & TARGET_PAGE_MASK)
149f54b5 2349 + addr1);
1e78bcc1
AG
2350 switch (endian) {
2351 case DEVICE_LITTLE_ENDIAN:
2352 val = lduw_le_p(ptr);
2353 break;
2354 case DEVICE_BIG_ENDIAN:
2355 val = lduw_be_p(ptr);
2356 break;
2357 default:
2358 val = lduw_p(ptr);
2359 break;
2360 }
733f0b02
MT
2361 }
2362 return val;
aab33094
FB
2363}
2364
a8170e5e 2365uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
2366{
2367 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2368}
2369
a8170e5e 2370uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
2371{
2372 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2373}
2374
a8170e5e 2375uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
2376{
2377 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2378}
2379
8df1cd07
FB
2380/* warning: addr must be aligned. The ram page is not masked as dirty
2381 and the code inside is not invalidated. It is useful if the dirty
2382 bits are used to track modified PTEs */
a8170e5e 2383void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 2384{
8df1cd07 2385 uint8_t *ptr;
5c8a00ce 2386 MemoryRegion *mr;
149f54b5
PB
2387 hwaddr l = 4;
2388 hwaddr addr1;
8df1cd07 2389
5c8a00ce
PB
2390 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2391 true);
2392 if (l < 4 || !memory_access_is_direct(mr, true)) {
2393 io_mem_write(mr, addr1, val, 4);
8df1cd07 2394 } else {
5c8a00ce 2395 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2396 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2397 stl_p(ptr, val);
74576198
AL
2398
2399 if (unlikely(in_migration)) {
2400 if (!cpu_physical_memory_is_dirty(addr1)) {
2401 /* invalidate code */
2402 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2403 /* set dirty bit */
f7c11b53
YT
2404 cpu_physical_memory_set_dirty_flags(
2405 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
2406 }
2407 }
8df1cd07
FB
2408 }
2409}
2410
2411/* warning: addr must be aligned */
a8170e5e 2412static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2413 enum device_endian endian)
8df1cd07 2414{
8df1cd07 2415 uint8_t *ptr;
5c8a00ce 2416 MemoryRegion *mr;
149f54b5
PB
2417 hwaddr l = 4;
2418 hwaddr addr1;
8df1cd07 2419
5c8a00ce
PB
2420 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2421 true);
2422 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2423#if defined(TARGET_WORDS_BIGENDIAN)
2424 if (endian == DEVICE_LITTLE_ENDIAN) {
2425 val = bswap32(val);
2426 }
2427#else
2428 if (endian == DEVICE_BIG_ENDIAN) {
2429 val = bswap32(val);
2430 }
2431#endif
5c8a00ce 2432 io_mem_write(mr, addr1, val, 4);
8df1cd07 2433 } else {
8df1cd07 2434 /* RAM case */
5c8a00ce 2435 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2436 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2437 switch (endian) {
2438 case DEVICE_LITTLE_ENDIAN:
2439 stl_le_p(ptr, val);
2440 break;
2441 case DEVICE_BIG_ENDIAN:
2442 stl_be_p(ptr, val);
2443 break;
2444 default:
2445 stl_p(ptr, val);
2446 break;
2447 }
51d7a9eb 2448 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2449 }
2450}
2451
a8170e5e 2452void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2453{
2454 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2455}
2456
a8170e5e 2457void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2458{
2459 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2460}
2461
a8170e5e 2462void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2463{
2464 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2465}
2466
aab33094 2467/* XXX: optimize */
a8170e5e 2468void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
2469{
2470 uint8_t v = val;
2471 cpu_physical_memory_write(addr, &v, 1);
2472}
2473
733f0b02 2474/* warning: addr must be aligned */
a8170e5e 2475static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 2476 enum device_endian endian)
aab33094 2477{
733f0b02 2478 uint8_t *ptr;
5c8a00ce 2479 MemoryRegion *mr;
149f54b5
PB
2480 hwaddr l = 2;
2481 hwaddr addr1;
733f0b02 2482
5c8a00ce
PB
2483 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2484 true);
2485 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2486#if defined(TARGET_WORDS_BIGENDIAN)
2487 if (endian == DEVICE_LITTLE_ENDIAN) {
2488 val = bswap16(val);
2489 }
2490#else
2491 if (endian == DEVICE_BIG_ENDIAN) {
2492 val = bswap16(val);
2493 }
2494#endif
5c8a00ce 2495 io_mem_write(mr, addr1, val, 2);
733f0b02 2496 } else {
733f0b02 2497 /* RAM case */
5c8a00ce 2498 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2499 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2500 switch (endian) {
2501 case DEVICE_LITTLE_ENDIAN:
2502 stw_le_p(ptr, val);
2503 break;
2504 case DEVICE_BIG_ENDIAN:
2505 stw_be_p(ptr, val);
2506 break;
2507 default:
2508 stw_p(ptr, val);
2509 break;
2510 }
51d7a9eb 2511 invalidate_and_set_dirty(addr1, 2);
733f0b02 2512 }
aab33094
FB
2513}
2514
a8170e5e 2515void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2516{
2517 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2518}
2519
a8170e5e 2520void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2521{
2522 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2523}
2524
a8170e5e 2525void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
2526{
2527 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2528}
2529
aab33094 2530/* XXX: optimize */
a8170e5e 2531void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
2532{
2533 val = tswap64(val);
71d2b725 2534 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
2535}
2536
a8170e5e 2537void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2538{
2539 val = cpu_to_le64(val);
2540 cpu_physical_memory_write(addr, &val, 8);
2541}
2542
a8170e5e 2543void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
2544{
2545 val = cpu_to_be64(val);
2546 cpu_physical_memory_write(addr, &val, 8);
2547}
2548
5e2972fd 2549/* virtual memory access for debug (includes writing to ROM) */
f17ec444 2550int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 2551 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2552{
2553 int l;
a8170e5e 2554 hwaddr phys_addr;
9b3c35e0 2555 target_ulong page;
13eb76e0
FB
2556
2557 while (len > 0) {
2558 page = addr & TARGET_PAGE_MASK;
f17ec444 2559 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
2560 /* if no physical page mapped, return an error */
2561 if (phys_addr == -1)
2562 return -1;
2563 l = (page + TARGET_PAGE_SIZE) - addr;
2564 if (l > len)
2565 l = len;
5e2972fd 2566 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
2567 if (is_write)
2568 cpu_physical_memory_write_rom(phys_addr, buf, l);
2569 else
5e2972fd 2570 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
2571 len -= l;
2572 buf += l;
2573 addr += l;
2574 }
2575 return 0;
2576}
a68fe89c 2577#endif
13eb76e0 2578
8e4a424b
BS
2579#if !defined(CONFIG_USER_ONLY)
2580
2581/*
2582 * A helper function for the _utterly broken_ virtio device model to find out if
2583 * it's running on a big endian machine. Don't do this at home kids!
2584 */
2585bool virtio_is_big_endian(void);
2586bool virtio_is_big_endian(void)
2587{
2588#if defined(TARGET_WORDS_BIGENDIAN)
2589 return true;
2590#else
2591 return false;
2592#endif
2593}
2594
2595#endif
2596
76f35538 2597#ifndef CONFIG_USER_ONLY
a8170e5e 2598bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2599{
5c8a00ce 2600 MemoryRegion*mr;
149f54b5 2601 hwaddr l = 1;
76f35538 2602
5c8a00ce
PB
2603 mr = address_space_translate(&address_space_memory,
2604 phys_addr, &phys_addr, &l, false);
76f35538 2605
5c8a00ce
PB
2606 return !(memory_region_is_ram(mr) ||
2607 memory_region_is_romd(mr));
76f35538 2608}
bd2fa51f
MH
2609
2610void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2611{
2612 RAMBlock *block;
2613
2614 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2615 func(block->host, block->offset, block->length, opaque);
2616 }
2617}
ec3f8c99 2618#endif