]> git.proxmox.com Git - mirror_qemu.git/blob - exec.c
exec: add parameter errp to gethugepagesize
[mirror_qemu.git] / exec.c
1 /*
2 * Virtual page mapping
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #ifndef _WIN32
21 #include <sys/types.h>
22 #include <sys/mman.h>
23 #endif
24
25 #include "qemu-common.h"
26 #include "cpu.h"
27 #include "tcg.h"
28 #include "hw/hw.h"
29 #include "hw/qdev.h"
30 #include "qemu/osdep.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
33 #include "hw/xen/xen.h"
34 #include "qemu/timer.h"
35 #include "qemu/config-file.h"
36 #include "qemu/error-report.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
41 #include <qemu.h>
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
44 #include "trace.h"
45 #endif
46 #include "exec/cpu-all.h"
47
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
50
51 #include "exec/memory-internal.h"
52 #include "exec/ram_addr.h"
53
54 #include "qemu/range.h"
55
56 //#define DEBUG_SUBPAGE
57
58 #if !defined(CONFIG_USER_ONLY)
59 static bool in_migration;
60
61 RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62
63 static MemoryRegion *system_memory;
64 static MemoryRegion *system_io;
65
66 AddressSpace address_space_io;
67 AddressSpace address_space_memory;
68
69 MemoryRegion io_mem_rom, io_mem_notdirty;
70 static MemoryRegion io_mem_unassigned;
71
72 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73 #define RAM_PREALLOC (1 << 0)
74
75 /* RAM is mmap-ed with MAP_SHARED */
76 #define RAM_SHARED (1 << 1)
77
78 #endif
79
80 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
81 /* current CPU in the current thread. It is only valid inside
82 cpu_exec() */
83 DEFINE_TLS(CPUState *, current_cpu);
84 /* 0 = Do not count executed instructions.
85 1 = Precise instruction counting.
86 2 = Adaptive rate instruction counting. */
87 int use_icount;
88
89 #if !defined(CONFIG_USER_ONLY)
90
91 typedef struct PhysPageEntry PhysPageEntry;
92
93 struct PhysPageEntry {
94 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
95 uint32_t skip : 6;
96 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
97 uint32_t ptr : 26;
98 };
99
100 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
101
102 /* Size of the L2 (and L3, etc) page tables. */
103 #define ADDR_SPACE_BITS 64
104
105 #define P_L2_BITS 9
106 #define P_L2_SIZE (1 << P_L2_BITS)
107
108 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
109
110 typedef PhysPageEntry Node[P_L2_SIZE];
111
112 typedef struct PhysPageMap {
113 unsigned sections_nb;
114 unsigned sections_nb_alloc;
115 unsigned nodes_nb;
116 unsigned nodes_nb_alloc;
117 Node *nodes;
118 MemoryRegionSection *sections;
119 } PhysPageMap;
120
121 struct AddressSpaceDispatch {
122 /* This is a multi-level map on the physical address space.
123 * The bottom level has pointers to MemoryRegionSections.
124 */
125 PhysPageEntry phys_map;
126 PhysPageMap map;
127 AddressSpace *as;
128 };
129
130 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
131 typedef struct subpage_t {
132 MemoryRegion iomem;
133 AddressSpace *as;
134 hwaddr base;
135 uint16_t sub_section[TARGET_PAGE_SIZE];
136 } subpage_t;
137
138 #define PHYS_SECTION_UNASSIGNED 0
139 #define PHYS_SECTION_NOTDIRTY 1
140 #define PHYS_SECTION_ROM 2
141 #define PHYS_SECTION_WATCH 3
142
143 static void io_mem_init(void);
144 static void memory_map_init(void);
145 static void tcg_commit(MemoryListener *listener);
146
147 static MemoryRegion io_mem_watch;
148 #endif
149
150 #if !defined(CONFIG_USER_ONLY)
151
152 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
153 {
154 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
155 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
156 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
157 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
158 }
159 }
160
161 static uint32_t phys_map_node_alloc(PhysPageMap *map)
162 {
163 unsigned i;
164 uint32_t ret;
165
166 ret = map->nodes_nb++;
167 assert(ret != PHYS_MAP_NODE_NIL);
168 assert(ret != map->nodes_nb_alloc);
169 for (i = 0; i < P_L2_SIZE; ++i) {
170 map->nodes[ret][i].skip = 1;
171 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
172 }
173 return ret;
174 }
175
176 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
177 hwaddr *index, hwaddr *nb, uint16_t leaf,
178 int level)
179 {
180 PhysPageEntry *p;
181 int i;
182 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
183
184 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
185 lp->ptr = phys_map_node_alloc(map);
186 p = map->nodes[lp->ptr];
187 if (level == 0) {
188 for (i = 0; i < P_L2_SIZE; i++) {
189 p[i].skip = 0;
190 p[i].ptr = PHYS_SECTION_UNASSIGNED;
191 }
192 }
193 } else {
194 p = map->nodes[lp->ptr];
195 }
196 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
197
198 while (*nb && lp < &p[P_L2_SIZE]) {
199 if ((*index & (step - 1)) == 0 && *nb >= step) {
200 lp->skip = 0;
201 lp->ptr = leaf;
202 *index += step;
203 *nb -= step;
204 } else {
205 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
206 }
207 ++lp;
208 }
209 }
210
211 static void phys_page_set(AddressSpaceDispatch *d,
212 hwaddr index, hwaddr nb,
213 uint16_t leaf)
214 {
215 /* Wildly overreserve - it doesn't matter much. */
216 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
217
218 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
219 }
220
221 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
222 * and update our entry so we can skip it and go directly to the destination.
223 */
224 static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
225 {
226 unsigned valid_ptr = P_L2_SIZE;
227 int valid = 0;
228 PhysPageEntry *p;
229 int i;
230
231 if (lp->ptr == PHYS_MAP_NODE_NIL) {
232 return;
233 }
234
235 p = nodes[lp->ptr];
236 for (i = 0; i < P_L2_SIZE; i++) {
237 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
238 continue;
239 }
240
241 valid_ptr = i;
242 valid++;
243 if (p[i].skip) {
244 phys_page_compact(&p[i], nodes, compacted);
245 }
246 }
247
248 /* We can only compress if there's only one child. */
249 if (valid != 1) {
250 return;
251 }
252
253 assert(valid_ptr < P_L2_SIZE);
254
255 /* Don't compress if it won't fit in the # of bits we have. */
256 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
257 return;
258 }
259
260 lp->ptr = p[valid_ptr].ptr;
261 if (!p[valid_ptr].skip) {
262 /* If our only child is a leaf, make this a leaf. */
263 /* By design, we should have made this node a leaf to begin with so we
264 * should never reach here.
265 * But since it's so simple to handle this, let's do it just in case we
266 * change this rule.
267 */
268 lp->skip = 0;
269 } else {
270 lp->skip += p[valid_ptr].skip;
271 }
272 }
273
274 static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
275 {
276 DECLARE_BITMAP(compacted, nodes_nb);
277
278 if (d->phys_map.skip) {
279 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
280 }
281 }
282
283 static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
284 Node *nodes, MemoryRegionSection *sections)
285 {
286 PhysPageEntry *p;
287 hwaddr index = addr >> TARGET_PAGE_BITS;
288 int i;
289
290 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
291 if (lp.ptr == PHYS_MAP_NODE_NIL) {
292 return &sections[PHYS_SECTION_UNASSIGNED];
293 }
294 p = nodes[lp.ptr];
295 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
296 }
297
298 if (sections[lp.ptr].size.hi ||
299 range_covers_byte(sections[lp.ptr].offset_within_address_space,
300 sections[lp.ptr].size.lo, addr)) {
301 return &sections[lp.ptr];
302 } else {
303 return &sections[PHYS_SECTION_UNASSIGNED];
304 }
305 }
306
307 bool memory_region_is_unassigned(MemoryRegion *mr)
308 {
309 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
310 && mr != &io_mem_watch;
311 }
312
313 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
314 hwaddr addr,
315 bool resolve_subpage)
316 {
317 MemoryRegionSection *section;
318 subpage_t *subpage;
319
320 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
321 if (resolve_subpage && section->mr->subpage) {
322 subpage = container_of(section->mr, subpage_t, iomem);
323 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
324 }
325 return section;
326 }
327
328 static MemoryRegionSection *
329 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
330 hwaddr *plen, bool resolve_subpage)
331 {
332 MemoryRegionSection *section;
333 Int128 diff;
334
335 section = address_space_lookup_region(d, addr, resolve_subpage);
336 /* Compute offset within MemoryRegionSection */
337 addr -= section->offset_within_address_space;
338
339 /* Compute offset within MemoryRegion */
340 *xlat = addr + section->offset_within_region;
341
342 diff = int128_sub(section->mr->size, int128_make64(addr));
343 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
344 return section;
345 }
346
347 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
348 {
349 if (memory_region_is_ram(mr)) {
350 return !(is_write && mr->readonly);
351 }
352 if (memory_region_is_romd(mr)) {
353 return !is_write;
354 }
355
356 return false;
357 }
358
359 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
360 hwaddr *xlat, hwaddr *plen,
361 bool is_write)
362 {
363 IOMMUTLBEntry iotlb;
364 MemoryRegionSection *section;
365 MemoryRegion *mr;
366 hwaddr len = *plen;
367
368 for (;;) {
369 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
370 mr = section->mr;
371
372 if (!mr->iommu_ops) {
373 break;
374 }
375
376 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
377 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
378 | (addr & iotlb.addr_mask));
379 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
380 if (!(iotlb.perm & (1 << is_write))) {
381 mr = &io_mem_unassigned;
382 break;
383 }
384
385 as = iotlb.target_as;
386 }
387
388 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
389 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
390 len = MIN(page, len);
391 }
392
393 *plen = len;
394 *xlat = addr;
395 return mr;
396 }
397
398 MemoryRegionSection *
399 address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
400 hwaddr *plen)
401 {
402 MemoryRegionSection *section;
403 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
404
405 assert(!section->mr->iommu_ops);
406 return section;
407 }
408 #endif
409
410 void cpu_exec_init_all(void)
411 {
412 #if !defined(CONFIG_USER_ONLY)
413 qemu_mutex_init(&ram_list.mutex);
414 memory_map_init();
415 io_mem_init();
416 #endif
417 }
418
419 #if !defined(CONFIG_USER_ONLY)
420
421 static int cpu_common_post_load(void *opaque, int version_id)
422 {
423 CPUState *cpu = opaque;
424
425 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
426 version_id is increased. */
427 cpu->interrupt_request &= ~0x01;
428 tlb_flush(cpu, 1);
429
430 return 0;
431 }
432
433 static int cpu_common_pre_load(void *opaque)
434 {
435 CPUState *cpu = opaque;
436
437 cpu->exception_index = 0;
438
439 return 0;
440 }
441
442 static bool cpu_common_exception_index_needed(void *opaque)
443 {
444 CPUState *cpu = opaque;
445
446 return cpu->exception_index != 0;
447 }
448
449 static const VMStateDescription vmstate_cpu_common_exception_index = {
450 .name = "cpu_common/exception_index",
451 .version_id = 1,
452 .minimum_version_id = 1,
453 .fields = (VMStateField[]) {
454 VMSTATE_INT32(exception_index, CPUState),
455 VMSTATE_END_OF_LIST()
456 }
457 };
458
459 const VMStateDescription vmstate_cpu_common = {
460 .name = "cpu_common",
461 .version_id = 1,
462 .minimum_version_id = 1,
463 .pre_load = cpu_common_pre_load,
464 .post_load = cpu_common_post_load,
465 .fields = (VMStateField[]) {
466 VMSTATE_UINT32(halted, CPUState),
467 VMSTATE_UINT32(interrupt_request, CPUState),
468 VMSTATE_END_OF_LIST()
469 },
470 .subsections = (VMStateSubsection[]) {
471 {
472 .vmsd = &vmstate_cpu_common_exception_index,
473 .needed = cpu_common_exception_index_needed,
474 } , {
475 /* empty */
476 }
477 }
478 };
479
480 #endif
481
482 CPUState *qemu_get_cpu(int index)
483 {
484 CPUState *cpu;
485
486 CPU_FOREACH(cpu) {
487 if (cpu->cpu_index == index) {
488 return cpu;
489 }
490 }
491
492 return NULL;
493 }
494
495 #if !defined(CONFIG_USER_ONLY)
496 void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
497 {
498 /* We only support one address space per cpu at the moment. */
499 assert(cpu->as == as);
500
501 if (cpu->tcg_as_listener) {
502 memory_listener_unregister(cpu->tcg_as_listener);
503 } else {
504 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
505 }
506 cpu->tcg_as_listener->commit = tcg_commit;
507 memory_listener_register(cpu->tcg_as_listener, as);
508 }
509 #endif
510
511 void cpu_exec_init(CPUArchState *env)
512 {
513 CPUState *cpu = ENV_GET_CPU(env);
514 CPUClass *cc = CPU_GET_CLASS(cpu);
515 CPUState *some_cpu;
516 int cpu_index;
517
518 #if defined(CONFIG_USER_ONLY)
519 cpu_list_lock();
520 #endif
521 cpu_index = 0;
522 CPU_FOREACH(some_cpu) {
523 cpu_index++;
524 }
525 cpu->cpu_index = cpu_index;
526 cpu->numa_node = 0;
527 QTAILQ_INIT(&cpu->breakpoints);
528 QTAILQ_INIT(&cpu->watchpoints);
529 #ifndef CONFIG_USER_ONLY
530 cpu->as = &address_space_memory;
531 cpu->thread_id = qemu_get_thread_id();
532 #endif
533 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
534 #if defined(CONFIG_USER_ONLY)
535 cpu_list_unlock();
536 #endif
537 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
538 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
539 }
540 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
541 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
542 cpu_save, cpu_load, env);
543 assert(cc->vmsd == NULL);
544 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
545 #endif
546 if (cc->vmsd != NULL) {
547 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
548 }
549 }
550
551 #if defined(TARGET_HAS_ICE)
552 #if defined(CONFIG_USER_ONLY)
553 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
554 {
555 tb_invalidate_phys_page_range(pc, pc + 1, 0);
556 }
557 #else
558 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
559 {
560 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
561 if (phys != -1) {
562 tb_invalidate_phys_addr(cpu->as,
563 phys | (pc & ~TARGET_PAGE_MASK));
564 }
565 }
566 #endif
567 #endif /* TARGET_HAS_ICE */
568
569 #if defined(CONFIG_USER_ONLY)
570 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
571
572 {
573 }
574
575 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
576 int flags, CPUWatchpoint **watchpoint)
577 {
578 return -ENOSYS;
579 }
580 #else
581 /* Add a watchpoint. */
582 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
583 int flags, CPUWatchpoint **watchpoint)
584 {
585 vaddr len_mask = ~(len - 1);
586 CPUWatchpoint *wp;
587
588 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
589 if ((len & (len - 1)) || (addr & ~len_mask) ||
590 len == 0 || len > TARGET_PAGE_SIZE) {
591 error_report("tried to set invalid watchpoint at %"
592 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
593 return -EINVAL;
594 }
595 wp = g_malloc(sizeof(*wp));
596
597 wp->vaddr = addr;
598 wp->len_mask = len_mask;
599 wp->flags = flags;
600
601 /* keep all GDB-injected watchpoints in front */
602 if (flags & BP_GDB) {
603 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
604 } else {
605 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
606 }
607
608 tlb_flush_page(cpu, addr);
609
610 if (watchpoint)
611 *watchpoint = wp;
612 return 0;
613 }
614
615 /* Remove a specific watchpoint. */
616 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
617 int flags)
618 {
619 vaddr len_mask = ~(len - 1);
620 CPUWatchpoint *wp;
621
622 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
623 if (addr == wp->vaddr && len_mask == wp->len_mask
624 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
625 cpu_watchpoint_remove_by_ref(cpu, wp);
626 return 0;
627 }
628 }
629 return -ENOENT;
630 }
631
632 /* Remove a specific watchpoint by reference. */
633 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
634 {
635 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
636
637 tlb_flush_page(cpu, watchpoint->vaddr);
638
639 g_free(watchpoint);
640 }
641
642 /* Remove all matching watchpoints. */
643 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
644 {
645 CPUWatchpoint *wp, *next;
646
647 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
648 if (wp->flags & mask) {
649 cpu_watchpoint_remove_by_ref(cpu, wp);
650 }
651 }
652 }
653 #endif
654
655 /* Add a breakpoint. */
656 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
657 CPUBreakpoint **breakpoint)
658 {
659 #if defined(TARGET_HAS_ICE)
660 CPUBreakpoint *bp;
661
662 bp = g_malloc(sizeof(*bp));
663
664 bp->pc = pc;
665 bp->flags = flags;
666
667 /* keep all GDB-injected breakpoints in front */
668 if (flags & BP_GDB) {
669 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
670 } else {
671 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
672 }
673
674 breakpoint_invalidate(cpu, pc);
675
676 if (breakpoint) {
677 *breakpoint = bp;
678 }
679 return 0;
680 #else
681 return -ENOSYS;
682 #endif
683 }
684
685 /* Remove a specific breakpoint. */
686 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
687 {
688 #if defined(TARGET_HAS_ICE)
689 CPUBreakpoint *bp;
690
691 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
692 if (bp->pc == pc && bp->flags == flags) {
693 cpu_breakpoint_remove_by_ref(cpu, bp);
694 return 0;
695 }
696 }
697 return -ENOENT;
698 #else
699 return -ENOSYS;
700 #endif
701 }
702
703 /* Remove a specific breakpoint by reference. */
704 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
705 {
706 #if defined(TARGET_HAS_ICE)
707 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
708
709 breakpoint_invalidate(cpu, breakpoint->pc);
710
711 g_free(breakpoint);
712 #endif
713 }
714
715 /* Remove all matching breakpoints. */
716 void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
717 {
718 #if defined(TARGET_HAS_ICE)
719 CPUBreakpoint *bp, *next;
720
721 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
722 if (bp->flags & mask) {
723 cpu_breakpoint_remove_by_ref(cpu, bp);
724 }
725 }
726 #endif
727 }
728
729 /* enable or disable single step mode. EXCP_DEBUG is returned by the
730 CPU loop after each instruction */
731 void cpu_single_step(CPUState *cpu, int enabled)
732 {
733 #if defined(TARGET_HAS_ICE)
734 if (cpu->singlestep_enabled != enabled) {
735 cpu->singlestep_enabled = enabled;
736 if (kvm_enabled()) {
737 kvm_update_guest_debug(cpu, 0);
738 } else {
739 /* must flush all the translated code to avoid inconsistencies */
740 /* XXX: only flush what is necessary */
741 CPUArchState *env = cpu->env_ptr;
742 tb_flush(env);
743 }
744 }
745 #endif
746 }
747
748 void cpu_abort(CPUState *cpu, const char *fmt, ...)
749 {
750 va_list ap;
751 va_list ap2;
752
753 va_start(ap, fmt);
754 va_copy(ap2, ap);
755 fprintf(stderr, "qemu: fatal: ");
756 vfprintf(stderr, fmt, ap);
757 fprintf(stderr, "\n");
758 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
759 if (qemu_log_enabled()) {
760 qemu_log("qemu: fatal: ");
761 qemu_log_vprintf(fmt, ap2);
762 qemu_log("\n");
763 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
764 qemu_log_flush();
765 qemu_log_close();
766 }
767 va_end(ap2);
768 va_end(ap);
769 #if defined(CONFIG_USER_ONLY)
770 {
771 struct sigaction act;
772 sigfillset(&act.sa_mask);
773 act.sa_handler = SIG_DFL;
774 sigaction(SIGABRT, &act, NULL);
775 }
776 #endif
777 abort();
778 }
779
780 #if !defined(CONFIG_USER_ONLY)
781 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
782 {
783 RAMBlock *block;
784
785 /* The list is protected by the iothread lock here. */
786 block = ram_list.mru_block;
787 if (block && addr - block->offset < block->length) {
788 goto found;
789 }
790 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
791 if (addr - block->offset < block->length) {
792 goto found;
793 }
794 }
795
796 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
797 abort();
798
799 found:
800 ram_list.mru_block = block;
801 return block;
802 }
803
804 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
805 {
806 ram_addr_t start1;
807 RAMBlock *block;
808 ram_addr_t end;
809
810 end = TARGET_PAGE_ALIGN(start + length);
811 start &= TARGET_PAGE_MASK;
812
813 block = qemu_get_ram_block(start);
814 assert(block == qemu_get_ram_block(end - 1));
815 start1 = (uintptr_t)block->host + (start - block->offset);
816 cpu_tlb_reset_dirty_all(start1, length);
817 }
818
819 /* Note: start and end must be within the same ram block. */
820 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
821 unsigned client)
822 {
823 if (length == 0)
824 return;
825 cpu_physical_memory_clear_dirty_range(start, length, client);
826
827 if (tcg_enabled()) {
828 tlb_reset_dirty_range_all(start, length);
829 }
830 }
831
832 static void cpu_physical_memory_set_dirty_tracking(bool enable)
833 {
834 in_migration = enable;
835 }
836
837 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
838 MemoryRegionSection *section,
839 target_ulong vaddr,
840 hwaddr paddr, hwaddr xlat,
841 int prot,
842 target_ulong *address)
843 {
844 hwaddr iotlb;
845 CPUWatchpoint *wp;
846
847 if (memory_region_is_ram(section->mr)) {
848 /* Normal RAM. */
849 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
850 + xlat;
851 if (!section->readonly) {
852 iotlb |= PHYS_SECTION_NOTDIRTY;
853 } else {
854 iotlb |= PHYS_SECTION_ROM;
855 }
856 } else {
857 iotlb = section - section->address_space->dispatch->map.sections;
858 iotlb += xlat;
859 }
860
861 /* Make accesses to pages with watchpoints go via the
862 watchpoint trap routines. */
863 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
864 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
865 /* Avoid trapping reads of pages with a write breakpoint. */
866 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
867 iotlb = PHYS_SECTION_WATCH + paddr;
868 *address |= TLB_MMIO;
869 break;
870 }
871 }
872 }
873
874 return iotlb;
875 }
876 #endif /* defined(CONFIG_USER_ONLY) */
877
878 #if !defined(CONFIG_USER_ONLY)
879
880 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
881 uint16_t section);
882 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
883
884 static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
885
886 /*
887 * Set a custom physical guest memory alloator.
888 * Accelerators with unusual needs may need this. Hopefully, we can
889 * get rid of it eventually.
890 */
891 void phys_mem_set_alloc(void *(*alloc)(size_t))
892 {
893 phys_mem_alloc = alloc;
894 }
895
896 static uint16_t phys_section_add(PhysPageMap *map,
897 MemoryRegionSection *section)
898 {
899 /* The physical section number is ORed with a page-aligned
900 * pointer to produce the iotlb entries. Thus it should
901 * never overflow into the page-aligned value.
902 */
903 assert(map->sections_nb < TARGET_PAGE_SIZE);
904
905 if (map->sections_nb == map->sections_nb_alloc) {
906 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
907 map->sections = g_renew(MemoryRegionSection, map->sections,
908 map->sections_nb_alloc);
909 }
910 map->sections[map->sections_nb] = *section;
911 memory_region_ref(section->mr);
912 return map->sections_nb++;
913 }
914
915 static void phys_section_destroy(MemoryRegion *mr)
916 {
917 memory_region_unref(mr);
918
919 if (mr->subpage) {
920 subpage_t *subpage = container_of(mr, subpage_t, iomem);
921 object_unref(OBJECT(&subpage->iomem));
922 g_free(subpage);
923 }
924 }
925
926 static void phys_sections_free(PhysPageMap *map)
927 {
928 while (map->sections_nb > 0) {
929 MemoryRegionSection *section = &map->sections[--map->sections_nb];
930 phys_section_destroy(section->mr);
931 }
932 g_free(map->sections);
933 g_free(map->nodes);
934 }
935
936 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
937 {
938 subpage_t *subpage;
939 hwaddr base = section->offset_within_address_space
940 & TARGET_PAGE_MASK;
941 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
942 d->map.nodes, d->map.sections);
943 MemoryRegionSection subsection = {
944 .offset_within_address_space = base,
945 .size = int128_make64(TARGET_PAGE_SIZE),
946 };
947 hwaddr start, end;
948
949 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
950
951 if (!(existing->mr->subpage)) {
952 subpage = subpage_init(d->as, base);
953 subsection.address_space = d->as;
954 subsection.mr = &subpage->iomem;
955 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
956 phys_section_add(&d->map, &subsection));
957 } else {
958 subpage = container_of(existing->mr, subpage_t, iomem);
959 }
960 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
961 end = start + int128_get64(section->size) - 1;
962 subpage_register(subpage, start, end,
963 phys_section_add(&d->map, section));
964 }
965
966
967 static void register_multipage(AddressSpaceDispatch *d,
968 MemoryRegionSection *section)
969 {
970 hwaddr start_addr = section->offset_within_address_space;
971 uint16_t section_index = phys_section_add(&d->map, section);
972 uint64_t num_pages = int128_get64(int128_rshift(section->size,
973 TARGET_PAGE_BITS));
974
975 assert(num_pages);
976 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
977 }
978
979 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
980 {
981 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
982 AddressSpaceDispatch *d = as->next_dispatch;
983 MemoryRegionSection now = *section, remain = *section;
984 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
985
986 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
987 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
988 - now.offset_within_address_space;
989
990 now.size = int128_min(int128_make64(left), now.size);
991 register_subpage(d, &now);
992 } else {
993 now.size = int128_zero();
994 }
995 while (int128_ne(remain.size, now.size)) {
996 remain.size = int128_sub(remain.size, now.size);
997 remain.offset_within_address_space += int128_get64(now.size);
998 remain.offset_within_region += int128_get64(now.size);
999 now = remain;
1000 if (int128_lt(remain.size, page_size)) {
1001 register_subpage(d, &now);
1002 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1003 now.size = page_size;
1004 register_subpage(d, &now);
1005 } else {
1006 now.size = int128_and(now.size, int128_neg(page_size));
1007 register_multipage(d, &now);
1008 }
1009 }
1010 }
1011
1012 void qemu_flush_coalesced_mmio_buffer(void)
1013 {
1014 if (kvm_enabled())
1015 kvm_flush_coalesced_mmio_buffer();
1016 }
1017
1018 void qemu_mutex_lock_ramlist(void)
1019 {
1020 qemu_mutex_lock(&ram_list.mutex);
1021 }
1022
1023 void qemu_mutex_unlock_ramlist(void)
1024 {
1025 qemu_mutex_unlock(&ram_list.mutex);
1026 }
1027
1028 #ifdef __linux__
1029
1030 #include <sys/vfs.h>
1031
1032 #define HUGETLBFS_MAGIC 0x958458f6
1033
1034 static long gethugepagesize(const char *path, Error **errp)
1035 {
1036 struct statfs fs;
1037 int ret;
1038
1039 do {
1040 ret = statfs(path, &fs);
1041 } while (ret != 0 && errno == EINTR);
1042
1043 if (ret != 0) {
1044 error_setg_errno(errp, errno, "failed to get page size of file %s",
1045 path);
1046 return 0;
1047 }
1048
1049 if (fs.f_type != HUGETLBFS_MAGIC)
1050 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
1051
1052 return fs.f_bsize;
1053 }
1054
1055 static void *file_ram_alloc(RAMBlock *block,
1056 ram_addr_t memory,
1057 const char *path,
1058 Error **errp)
1059 {
1060 char *filename;
1061 char *sanitized_name;
1062 char *c;
1063 void *area = NULL;
1064 int fd;
1065 uint64_t hpagesize;
1066 Error *local_err = NULL;
1067
1068 hpagesize = gethugepagesize(path, &local_err);
1069 if (local_err) {
1070 error_propagate(errp, local_err);
1071 goto error;
1072 }
1073
1074 if (memory < hpagesize) {
1075 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1076 "or larger than huge page size 0x%" PRIx64,
1077 memory, hpagesize);
1078 goto error;
1079 }
1080
1081 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1082 error_setg(errp,
1083 "host lacks kvm mmu notifiers, -mem-path unsupported");
1084 goto error;
1085 }
1086
1087 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1088 sanitized_name = g_strdup(memory_region_name(block->mr));
1089 for (c = sanitized_name; *c != '\0'; c++) {
1090 if (*c == '/')
1091 *c = '_';
1092 }
1093
1094 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1095 sanitized_name);
1096 g_free(sanitized_name);
1097
1098 fd = mkstemp(filename);
1099 if (fd < 0) {
1100 error_setg_errno(errp, errno,
1101 "unable to create backing store for hugepages");
1102 g_free(filename);
1103 goto error;
1104 }
1105 unlink(filename);
1106 g_free(filename);
1107
1108 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1109
1110 /*
1111 * ftruncate is not supported by hugetlbfs in older
1112 * hosts, so don't bother bailing out on errors.
1113 * If anything goes wrong with it under other filesystems,
1114 * mmap will fail.
1115 */
1116 if (ftruncate(fd, memory)) {
1117 perror("ftruncate");
1118 }
1119
1120 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1121 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1122 fd, 0);
1123 if (area == MAP_FAILED) {
1124 error_setg_errno(errp, errno,
1125 "unable to map backing store for hugepages");
1126 close(fd);
1127 goto error;
1128 }
1129
1130 if (mem_prealloc) {
1131 os_mem_prealloc(fd, area, memory);
1132 }
1133
1134 block->fd = fd;
1135 return area;
1136
1137 error:
1138 if (mem_prealloc) {
1139 exit(1);
1140 }
1141 return NULL;
1142 }
1143 #endif
1144
1145 static ram_addr_t find_ram_offset(ram_addr_t size)
1146 {
1147 RAMBlock *block, *next_block;
1148 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1149
1150 assert(size != 0); /* it would hand out same offset multiple times */
1151
1152 if (QTAILQ_EMPTY(&ram_list.blocks))
1153 return 0;
1154
1155 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1156 ram_addr_t end, next = RAM_ADDR_MAX;
1157
1158 end = block->offset + block->length;
1159
1160 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
1161 if (next_block->offset >= end) {
1162 next = MIN(next, next_block->offset);
1163 }
1164 }
1165 if (next - end >= size && next - end < mingap) {
1166 offset = end;
1167 mingap = next - end;
1168 }
1169 }
1170
1171 if (offset == RAM_ADDR_MAX) {
1172 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1173 (uint64_t)size);
1174 abort();
1175 }
1176
1177 return offset;
1178 }
1179
1180 ram_addr_t last_ram_offset(void)
1181 {
1182 RAMBlock *block;
1183 ram_addr_t last = 0;
1184
1185 QTAILQ_FOREACH(block, &ram_list.blocks, next)
1186 last = MAX(last, block->offset + block->length);
1187
1188 return last;
1189 }
1190
1191 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1192 {
1193 int ret;
1194
1195 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1196 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1197 "dump-guest-core", true)) {
1198 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1199 if (ret) {
1200 perror("qemu_madvise");
1201 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1202 "but dump_guest_core=off specified\n");
1203 }
1204 }
1205 }
1206
1207 static RAMBlock *find_ram_block(ram_addr_t addr)
1208 {
1209 RAMBlock *block;
1210
1211 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1212 if (block->offset == addr) {
1213 return block;
1214 }
1215 }
1216
1217 return NULL;
1218 }
1219
1220 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1221 {
1222 RAMBlock *new_block = find_ram_block(addr);
1223 RAMBlock *block;
1224
1225 assert(new_block);
1226 assert(!new_block->idstr[0]);
1227
1228 if (dev) {
1229 char *id = qdev_get_dev_path(dev);
1230 if (id) {
1231 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1232 g_free(id);
1233 }
1234 }
1235 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1236
1237 /* This assumes the iothread lock is taken here too. */
1238 qemu_mutex_lock_ramlist();
1239 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1240 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1241 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1242 new_block->idstr);
1243 abort();
1244 }
1245 }
1246 qemu_mutex_unlock_ramlist();
1247 }
1248
1249 void qemu_ram_unset_idstr(ram_addr_t addr)
1250 {
1251 RAMBlock *block = find_ram_block(addr);
1252
1253 if (block) {
1254 memset(block->idstr, 0, sizeof(block->idstr));
1255 }
1256 }
1257
1258 static int memory_try_enable_merging(void *addr, size_t len)
1259 {
1260 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1261 /* disabled by the user */
1262 return 0;
1263 }
1264
1265 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1266 }
1267
1268 static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
1269 {
1270 RAMBlock *block;
1271 ram_addr_t old_ram_size, new_ram_size;
1272
1273 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1274
1275 /* This assumes the iothread lock is taken here too. */
1276 qemu_mutex_lock_ramlist();
1277 new_block->offset = find_ram_offset(new_block->length);
1278
1279 if (!new_block->host) {
1280 if (xen_enabled()) {
1281 xen_ram_alloc(new_block->offset, new_block->length, new_block->mr);
1282 } else {
1283 new_block->host = phys_mem_alloc(new_block->length);
1284 if (!new_block->host) {
1285 error_setg_errno(errp, errno,
1286 "cannot set up guest memory '%s'",
1287 memory_region_name(new_block->mr));
1288 qemu_mutex_unlock_ramlist();
1289 return -1;
1290 }
1291 memory_try_enable_merging(new_block->host, new_block->length);
1292 }
1293 }
1294
1295 /* Keep the list sorted from biggest to smallest block. */
1296 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1297 if (block->length < new_block->length) {
1298 break;
1299 }
1300 }
1301 if (block) {
1302 QTAILQ_INSERT_BEFORE(block, new_block, next);
1303 } else {
1304 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1305 }
1306 ram_list.mru_block = NULL;
1307
1308 ram_list.version++;
1309 qemu_mutex_unlock_ramlist();
1310
1311 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1312
1313 if (new_ram_size > old_ram_size) {
1314 int i;
1315 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1316 ram_list.dirty_memory[i] =
1317 bitmap_zero_extend(ram_list.dirty_memory[i],
1318 old_ram_size, new_ram_size);
1319 }
1320 }
1321 cpu_physical_memory_set_dirty_range(new_block->offset, new_block->length);
1322
1323 qemu_ram_setup_dump(new_block->host, new_block->length);
1324 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_HUGEPAGE);
1325 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_DONTFORK);
1326
1327 if (kvm_enabled()) {
1328 kvm_setup_guest_memory(new_block->host, new_block->length);
1329 }
1330
1331 return new_block->offset;
1332 }
1333
1334 #ifdef __linux__
1335 ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1336 bool share, const char *mem_path,
1337 Error **errp)
1338 {
1339 RAMBlock *new_block;
1340 ram_addr_t addr;
1341 Error *local_err = NULL;
1342
1343 if (xen_enabled()) {
1344 error_setg(errp, "-mem-path not supported with Xen");
1345 return -1;
1346 }
1347
1348 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1349 /*
1350 * file_ram_alloc() needs to allocate just like
1351 * phys_mem_alloc, but we haven't bothered to provide
1352 * a hook there.
1353 */
1354 error_setg(errp,
1355 "-mem-path not supported with this accelerator");
1356 return -1;
1357 }
1358
1359 size = TARGET_PAGE_ALIGN(size);
1360 new_block = g_malloc0(sizeof(*new_block));
1361 new_block->mr = mr;
1362 new_block->length = size;
1363 new_block->flags = share ? RAM_SHARED : 0;
1364 new_block->host = file_ram_alloc(new_block, size,
1365 mem_path, errp);
1366 if (!new_block->host) {
1367 g_free(new_block);
1368 return -1;
1369 }
1370
1371 addr = ram_block_add(new_block, &local_err);
1372 if (local_err) {
1373 g_free(new_block);
1374 error_propagate(errp, local_err);
1375 return -1;
1376 }
1377 return addr;
1378 }
1379 #endif
1380
1381 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1382 MemoryRegion *mr, Error **errp)
1383 {
1384 RAMBlock *new_block;
1385 ram_addr_t addr;
1386 Error *local_err = NULL;
1387
1388 size = TARGET_PAGE_ALIGN(size);
1389 new_block = g_malloc0(sizeof(*new_block));
1390 new_block->mr = mr;
1391 new_block->length = size;
1392 new_block->fd = -1;
1393 new_block->host = host;
1394 if (host) {
1395 new_block->flags |= RAM_PREALLOC;
1396 }
1397 addr = ram_block_add(new_block, &local_err);
1398 if (local_err) {
1399 g_free(new_block);
1400 error_propagate(errp, local_err);
1401 return -1;
1402 }
1403 return addr;
1404 }
1405
1406 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
1407 {
1408 return qemu_ram_alloc_from_ptr(size, NULL, mr, errp);
1409 }
1410
1411 void qemu_ram_free_from_ptr(ram_addr_t addr)
1412 {
1413 RAMBlock *block;
1414
1415 /* This assumes the iothread lock is taken here too. */
1416 qemu_mutex_lock_ramlist();
1417 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1418 if (addr == block->offset) {
1419 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1420 ram_list.mru_block = NULL;
1421 ram_list.version++;
1422 g_free(block);
1423 break;
1424 }
1425 }
1426 qemu_mutex_unlock_ramlist();
1427 }
1428
1429 void qemu_ram_free(ram_addr_t addr)
1430 {
1431 RAMBlock *block;
1432
1433 /* This assumes the iothread lock is taken here too. */
1434 qemu_mutex_lock_ramlist();
1435 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1436 if (addr == block->offset) {
1437 QTAILQ_REMOVE(&ram_list.blocks, block, next);
1438 ram_list.mru_block = NULL;
1439 ram_list.version++;
1440 if (block->flags & RAM_PREALLOC) {
1441 ;
1442 } else if (xen_enabled()) {
1443 xen_invalidate_map_cache_entry(block->host);
1444 #ifndef _WIN32
1445 } else if (block->fd >= 0) {
1446 munmap(block->host, block->length);
1447 close(block->fd);
1448 #endif
1449 } else {
1450 qemu_anon_ram_free(block->host, block->length);
1451 }
1452 g_free(block);
1453 break;
1454 }
1455 }
1456 qemu_mutex_unlock_ramlist();
1457
1458 }
1459
1460 #ifndef _WIN32
1461 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1462 {
1463 RAMBlock *block;
1464 ram_addr_t offset;
1465 int flags;
1466 void *area, *vaddr;
1467
1468 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1469 offset = addr - block->offset;
1470 if (offset < block->length) {
1471 vaddr = block->host + offset;
1472 if (block->flags & RAM_PREALLOC) {
1473 ;
1474 } else if (xen_enabled()) {
1475 abort();
1476 } else {
1477 flags = MAP_FIXED;
1478 munmap(vaddr, length);
1479 if (block->fd >= 0) {
1480 flags |= (block->flags & RAM_SHARED ?
1481 MAP_SHARED : MAP_PRIVATE);
1482 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1483 flags, block->fd, offset);
1484 } else {
1485 /*
1486 * Remap needs to match alloc. Accelerators that
1487 * set phys_mem_alloc never remap. If they did,
1488 * we'd need a remap hook here.
1489 */
1490 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1491
1492 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1493 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1494 flags, -1, 0);
1495 }
1496 if (area != vaddr) {
1497 fprintf(stderr, "Could not remap addr: "
1498 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1499 length, addr);
1500 exit(1);
1501 }
1502 memory_try_enable_merging(vaddr, length);
1503 qemu_ram_setup_dump(vaddr, length);
1504 }
1505 return;
1506 }
1507 }
1508 }
1509 #endif /* !_WIN32 */
1510
1511 int qemu_get_ram_fd(ram_addr_t addr)
1512 {
1513 RAMBlock *block = qemu_get_ram_block(addr);
1514
1515 return block->fd;
1516 }
1517
1518 void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1519 {
1520 RAMBlock *block = qemu_get_ram_block(addr);
1521
1522 return block->host;
1523 }
1524
1525 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1526 With the exception of the softmmu code in this file, this should
1527 only be used for local memory (e.g. video ram) that the device owns,
1528 and knows it isn't going to access beyond the end of the block.
1529
1530 It should not be used for general purpose DMA.
1531 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1532 */
1533 void *qemu_get_ram_ptr(ram_addr_t addr)
1534 {
1535 RAMBlock *block = qemu_get_ram_block(addr);
1536
1537 if (xen_enabled()) {
1538 /* We need to check if the requested address is in the RAM
1539 * because we don't want to map the entire memory in QEMU.
1540 * In that case just map until the end of the page.
1541 */
1542 if (block->offset == 0) {
1543 return xen_map_cache(addr, 0, 0);
1544 } else if (block->host == NULL) {
1545 block->host =
1546 xen_map_cache(block->offset, block->length, 1);
1547 }
1548 }
1549 return block->host + (addr - block->offset);
1550 }
1551
1552 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1553 * but takes a size argument */
1554 static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
1555 {
1556 if (*size == 0) {
1557 return NULL;
1558 }
1559 if (xen_enabled()) {
1560 return xen_map_cache(addr, *size, 1);
1561 } else {
1562 RAMBlock *block;
1563
1564 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1565 if (addr - block->offset < block->length) {
1566 if (addr - block->offset + *size > block->length)
1567 *size = block->length - addr + block->offset;
1568 return block->host + (addr - block->offset);
1569 }
1570 }
1571
1572 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1573 abort();
1574 }
1575 }
1576
1577 /* Some of the softmmu routines need to translate from a host pointer
1578 (typically a TLB entry) back to a ram offset. */
1579 MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1580 {
1581 RAMBlock *block;
1582 uint8_t *host = ptr;
1583
1584 if (xen_enabled()) {
1585 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1586 return qemu_get_ram_block(*ram_addr)->mr;
1587 }
1588
1589 block = ram_list.mru_block;
1590 if (block && block->host && host - block->host < block->length) {
1591 goto found;
1592 }
1593
1594 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1595 /* This case append when the block is not mapped. */
1596 if (block->host == NULL) {
1597 continue;
1598 }
1599 if (host - block->host < block->length) {
1600 goto found;
1601 }
1602 }
1603
1604 return NULL;
1605
1606 found:
1607 *ram_addr = block->offset + (host - block->host);
1608 return block->mr;
1609 }
1610
1611 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1612 uint64_t val, unsigned size)
1613 {
1614 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1615 tb_invalidate_phys_page_fast(ram_addr, size);
1616 }
1617 switch (size) {
1618 case 1:
1619 stb_p(qemu_get_ram_ptr(ram_addr), val);
1620 break;
1621 case 2:
1622 stw_p(qemu_get_ram_ptr(ram_addr), val);
1623 break;
1624 case 4:
1625 stl_p(qemu_get_ram_ptr(ram_addr), val);
1626 break;
1627 default:
1628 abort();
1629 }
1630 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
1631 /* we remove the notdirty callback only if the code has been
1632 flushed */
1633 if (!cpu_physical_memory_is_clean(ram_addr)) {
1634 CPUArchState *env = current_cpu->env_ptr;
1635 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
1636 }
1637 }
1638
1639 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1640 unsigned size, bool is_write)
1641 {
1642 return is_write;
1643 }
1644
1645 static const MemoryRegionOps notdirty_mem_ops = {
1646 .write = notdirty_mem_write,
1647 .valid.accepts = notdirty_mem_accepts,
1648 .endianness = DEVICE_NATIVE_ENDIAN,
1649 };
1650
1651 /* Generate a debug exception if a watchpoint has been hit. */
1652 static void check_watchpoint(int offset, int len_mask, int flags)
1653 {
1654 CPUState *cpu = current_cpu;
1655 CPUArchState *env = cpu->env_ptr;
1656 target_ulong pc, cs_base;
1657 target_ulong vaddr;
1658 CPUWatchpoint *wp;
1659 int cpu_flags;
1660
1661 if (cpu->watchpoint_hit) {
1662 /* We re-entered the check after replacing the TB. Now raise
1663 * the debug interrupt so that is will trigger after the
1664 * current instruction. */
1665 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
1666 return;
1667 }
1668 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1669 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1670 if ((vaddr == (wp->vaddr & len_mask) ||
1671 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
1672 wp->flags |= BP_WATCHPOINT_HIT;
1673 if (!cpu->watchpoint_hit) {
1674 cpu->watchpoint_hit = wp;
1675 tb_check_watchpoint(cpu);
1676 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1677 cpu->exception_index = EXCP_DEBUG;
1678 cpu_loop_exit(cpu);
1679 } else {
1680 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1681 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
1682 cpu_resume_from_signal(cpu, NULL);
1683 }
1684 }
1685 } else {
1686 wp->flags &= ~BP_WATCHPOINT_HIT;
1687 }
1688 }
1689 }
1690
1691 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1692 so these check for a hit then pass through to the normal out-of-line
1693 phys routines. */
1694 static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1695 unsigned size)
1696 {
1697 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1698 switch (size) {
1699 case 1: return ldub_phys(&address_space_memory, addr);
1700 case 2: return lduw_phys(&address_space_memory, addr);
1701 case 4: return ldl_phys(&address_space_memory, addr);
1702 default: abort();
1703 }
1704 }
1705
1706 static void watch_mem_write(void *opaque, hwaddr addr,
1707 uint64_t val, unsigned size)
1708 {
1709 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1710 switch (size) {
1711 case 1:
1712 stb_phys(&address_space_memory, addr, val);
1713 break;
1714 case 2:
1715 stw_phys(&address_space_memory, addr, val);
1716 break;
1717 case 4:
1718 stl_phys(&address_space_memory, addr, val);
1719 break;
1720 default: abort();
1721 }
1722 }
1723
1724 static const MemoryRegionOps watch_mem_ops = {
1725 .read = watch_mem_read,
1726 .write = watch_mem_write,
1727 .endianness = DEVICE_NATIVE_ENDIAN,
1728 };
1729
1730 static uint64_t subpage_read(void *opaque, hwaddr addr,
1731 unsigned len)
1732 {
1733 subpage_t *subpage = opaque;
1734 uint8_t buf[4];
1735
1736 #if defined(DEBUG_SUBPAGE)
1737 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
1738 subpage, len, addr);
1739 #endif
1740 address_space_read(subpage->as, addr + subpage->base, buf, len);
1741 switch (len) {
1742 case 1:
1743 return ldub_p(buf);
1744 case 2:
1745 return lduw_p(buf);
1746 case 4:
1747 return ldl_p(buf);
1748 default:
1749 abort();
1750 }
1751 }
1752
1753 static void subpage_write(void *opaque, hwaddr addr,
1754 uint64_t value, unsigned len)
1755 {
1756 subpage_t *subpage = opaque;
1757 uint8_t buf[4];
1758
1759 #if defined(DEBUG_SUBPAGE)
1760 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1761 " value %"PRIx64"\n",
1762 __func__, subpage, len, addr, value);
1763 #endif
1764 switch (len) {
1765 case 1:
1766 stb_p(buf, value);
1767 break;
1768 case 2:
1769 stw_p(buf, value);
1770 break;
1771 case 4:
1772 stl_p(buf, value);
1773 break;
1774 default:
1775 abort();
1776 }
1777 address_space_write(subpage->as, addr + subpage->base, buf, len);
1778 }
1779
1780 static bool subpage_accepts(void *opaque, hwaddr addr,
1781 unsigned len, bool is_write)
1782 {
1783 subpage_t *subpage = opaque;
1784 #if defined(DEBUG_SUBPAGE)
1785 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
1786 __func__, subpage, is_write ? 'w' : 'r', len, addr);
1787 #endif
1788
1789 return address_space_access_valid(subpage->as, addr + subpage->base,
1790 len, is_write);
1791 }
1792
1793 static const MemoryRegionOps subpage_ops = {
1794 .read = subpage_read,
1795 .write = subpage_write,
1796 .valid.accepts = subpage_accepts,
1797 .endianness = DEVICE_NATIVE_ENDIAN,
1798 };
1799
1800 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1801 uint16_t section)
1802 {
1803 int idx, eidx;
1804
1805 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1806 return -1;
1807 idx = SUBPAGE_IDX(start);
1808 eidx = SUBPAGE_IDX(end);
1809 #if defined(DEBUG_SUBPAGE)
1810 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1811 __func__, mmio, start, end, idx, eidx, section);
1812 #endif
1813 for (; idx <= eidx; idx++) {
1814 mmio->sub_section[idx] = section;
1815 }
1816
1817 return 0;
1818 }
1819
1820 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
1821 {
1822 subpage_t *mmio;
1823
1824 mmio = g_malloc0(sizeof(subpage_t));
1825
1826 mmio->as = as;
1827 mmio->base = base;
1828 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
1829 NULL, TARGET_PAGE_SIZE);
1830 mmio->iomem.subpage = true;
1831 #if defined(DEBUG_SUBPAGE)
1832 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1833 mmio, base, TARGET_PAGE_SIZE);
1834 #endif
1835 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
1836
1837 return mmio;
1838 }
1839
1840 static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1841 MemoryRegion *mr)
1842 {
1843 assert(as);
1844 MemoryRegionSection section = {
1845 .address_space = as,
1846 .mr = mr,
1847 .offset_within_address_space = 0,
1848 .offset_within_region = 0,
1849 .size = int128_2_64(),
1850 };
1851
1852 return phys_section_add(map, &section);
1853 }
1854
1855 MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
1856 {
1857 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
1858 }
1859
1860 static void io_mem_init(void)
1861 {
1862 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
1863 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1864 NULL, UINT64_MAX);
1865 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1866 NULL, UINT64_MAX);
1867 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1868 NULL, UINT64_MAX);
1869 }
1870
1871 static void mem_begin(MemoryListener *listener)
1872 {
1873 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1874 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1875 uint16_t n;
1876
1877 n = dummy_section(&d->map, as, &io_mem_unassigned);
1878 assert(n == PHYS_SECTION_UNASSIGNED);
1879 n = dummy_section(&d->map, as, &io_mem_notdirty);
1880 assert(n == PHYS_SECTION_NOTDIRTY);
1881 n = dummy_section(&d->map, as, &io_mem_rom);
1882 assert(n == PHYS_SECTION_ROM);
1883 n = dummy_section(&d->map, as, &io_mem_watch);
1884 assert(n == PHYS_SECTION_WATCH);
1885
1886 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
1887 d->as = as;
1888 as->next_dispatch = d;
1889 }
1890
1891 static void mem_commit(MemoryListener *listener)
1892 {
1893 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1894 AddressSpaceDispatch *cur = as->dispatch;
1895 AddressSpaceDispatch *next = as->next_dispatch;
1896
1897 phys_page_compact_all(next, next->map.nodes_nb);
1898
1899 as->dispatch = next;
1900
1901 if (cur) {
1902 phys_sections_free(&cur->map);
1903 g_free(cur);
1904 }
1905 }
1906
1907 static void tcg_commit(MemoryListener *listener)
1908 {
1909 CPUState *cpu;
1910
1911 /* since each CPU stores ram addresses in its TLB cache, we must
1912 reset the modified entries */
1913 /* XXX: slow ! */
1914 CPU_FOREACH(cpu) {
1915 /* FIXME: Disentangle the cpu.h circular files deps so we can
1916 directly get the right CPU from listener. */
1917 if (cpu->tcg_as_listener != listener) {
1918 continue;
1919 }
1920 tlb_flush(cpu, 1);
1921 }
1922 }
1923
1924 static void core_log_global_start(MemoryListener *listener)
1925 {
1926 cpu_physical_memory_set_dirty_tracking(true);
1927 }
1928
1929 static void core_log_global_stop(MemoryListener *listener)
1930 {
1931 cpu_physical_memory_set_dirty_tracking(false);
1932 }
1933
1934 static MemoryListener core_memory_listener = {
1935 .log_global_start = core_log_global_start,
1936 .log_global_stop = core_log_global_stop,
1937 .priority = 1,
1938 };
1939
1940 void address_space_init_dispatch(AddressSpace *as)
1941 {
1942 as->dispatch = NULL;
1943 as->dispatch_listener = (MemoryListener) {
1944 .begin = mem_begin,
1945 .commit = mem_commit,
1946 .region_add = mem_add,
1947 .region_nop = mem_add,
1948 .priority = 0,
1949 };
1950 memory_listener_register(&as->dispatch_listener, as);
1951 }
1952
1953 void address_space_destroy_dispatch(AddressSpace *as)
1954 {
1955 AddressSpaceDispatch *d = as->dispatch;
1956
1957 memory_listener_unregister(&as->dispatch_listener);
1958 g_free(d);
1959 as->dispatch = NULL;
1960 }
1961
1962 static void memory_map_init(void)
1963 {
1964 system_memory = g_malloc(sizeof(*system_memory));
1965
1966 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
1967 address_space_init(&address_space_memory, system_memory, "memory");
1968
1969 system_io = g_malloc(sizeof(*system_io));
1970 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1971 65536);
1972 address_space_init(&address_space_io, system_io, "I/O");
1973
1974 memory_listener_register(&core_memory_listener, &address_space_memory);
1975 }
1976
1977 MemoryRegion *get_system_memory(void)
1978 {
1979 return system_memory;
1980 }
1981
1982 MemoryRegion *get_system_io(void)
1983 {
1984 return system_io;
1985 }
1986
1987 #endif /* !defined(CONFIG_USER_ONLY) */
1988
1989 /* physical memory access (slow version, mainly for debug) */
1990 #if defined(CONFIG_USER_ONLY)
1991 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
1992 uint8_t *buf, int len, int is_write)
1993 {
1994 int l, flags;
1995 target_ulong page;
1996 void * p;
1997
1998 while (len > 0) {
1999 page = addr & TARGET_PAGE_MASK;
2000 l = (page + TARGET_PAGE_SIZE) - addr;
2001 if (l > len)
2002 l = len;
2003 flags = page_get_flags(page);
2004 if (!(flags & PAGE_VALID))
2005 return -1;
2006 if (is_write) {
2007 if (!(flags & PAGE_WRITE))
2008 return -1;
2009 /* XXX: this code should not depend on lock_user */
2010 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2011 return -1;
2012 memcpy(p, buf, l);
2013 unlock_user(p, addr, l);
2014 } else {
2015 if (!(flags & PAGE_READ))
2016 return -1;
2017 /* XXX: this code should not depend on lock_user */
2018 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2019 return -1;
2020 memcpy(buf, p, l);
2021 unlock_user(p, addr, 0);
2022 }
2023 len -= l;
2024 buf += l;
2025 addr += l;
2026 }
2027 return 0;
2028 }
2029
2030 #else
2031
2032 static void invalidate_and_set_dirty(hwaddr addr,
2033 hwaddr length)
2034 {
2035 if (cpu_physical_memory_is_clean(addr)) {
2036 /* invalidate code */
2037 tb_invalidate_phys_page_range(addr, addr + length, 0);
2038 /* set dirty bit */
2039 cpu_physical_memory_set_dirty_range_nocode(addr, length);
2040 }
2041 xen_modified_memory(addr, length);
2042 }
2043
2044 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
2045 {
2046 unsigned access_size_max = mr->ops->valid.max_access_size;
2047
2048 /* Regions are assumed to support 1-4 byte accesses unless
2049 otherwise specified. */
2050 if (access_size_max == 0) {
2051 access_size_max = 4;
2052 }
2053
2054 /* Bound the maximum access by the alignment of the address. */
2055 if (!mr->ops->impl.unaligned) {
2056 unsigned align_size_max = addr & -addr;
2057 if (align_size_max != 0 && align_size_max < access_size_max) {
2058 access_size_max = align_size_max;
2059 }
2060 }
2061
2062 /* Don't attempt accesses larger than the maximum. */
2063 if (l > access_size_max) {
2064 l = access_size_max;
2065 }
2066 if (l & (l - 1)) {
2067 l = 1 << (qemu_fls(l) - 1);
2068 }
2069
2070 return l;
2071 }
2072
2073 bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
2074 int len, bool is_write)
2075 {
2076 hwaddr l;
2077 uint8_t *ptr;
2078 uint64_t val;
2079 hwaddr addr1;
2080 MemoryRegion *mr;
2081 bool error = false;
2082
2083 while (len > 0) {
2084 l = len;
2085 mr = address_space_translate(as, addr, &addr1, &l, is_write);
2086
2087 if (is_write) {
2088 if (!memory_access_is_direct(mr, is_write)) {
2089 l = memory_access_size(mr, l, addr1);
2090 /* XXX: could force current_cpu to NULL to avoid
2091 potential bugs */
2092 switch (l) {
2093 case 8:
2094 /* 64 bit write access */
2095 val = ldq_p(buf);
2096 error |= io_mem_write(mr, addr1, val, 8);
2097 break;
2098 case 4:
2099 /* 32 bit write access */
2100 val = ldl_p(buf);
2101 error |= io_mem_write(mr, addr1, val, 4);
2102 break;
2103 case 2:
2104 /* 16 bit write access */
2105 val = lduw_p(buf);
2106 error |= io_mem_write(mr, addr1, val, 2);
2107 break;
2108 case 1:
2109 /* 8 bit write access */
2110 val = ldub_p(buf);
2111 error |= io_mem_write(mr, addr1, val, 1);
2112 break;
2113 default:
2114 abort();
2115 }
2116 } else {
2117 addr1 += memory_region_get_ram_addr(mr);
2118 /* RAM case */
2119 ptr = qemu_get_ram_ptr(addr1);
2120 memcpy(ptr, buf, l);
2121 invalidate_and_set_dirty(addr1, l);
2122 }
2123 } else {
2124 if (!memory_access_is_direct(mr, is_write)) {
2125 /* I/O case */
2126 l = memory_access_size(mr, l, addr1);
2127 switch (l) {
2128 case 8:
2129 /* 64 bit read access */
2130 error |= io_mem_read(mr, addr1, &val, 8);
2131 stq_p(buf, val);
2132 break;
2133 case 4:
2134 /* 32 bit read access */
2135 error |= io_mem_read(mr, addr1, &val, 4);
2136 stl_p(buf, val);
2137 break;
2138 case 2:
2139 /* 16 bit read access */
2140 error |= io_mem_read(mr, addr1, &val, 2);
2141 stw_p(buf, val);
2142 break;
2143 case 1:
2144 /* 8 bit read access */
2145 error |= io_mem_read(mr, addr1, &val, 1);
2146 stb_p(buf, val);
2147 break;
2148 default:
2149 abort();
2150 }
2151 } else {
2152 /* RAM case */
2153 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2154 memcpy(buf, ptr, l);
2155 }
2156 }
2157 len -= l;
2158 buf += l;
2159 addr += l;
2160 }
2161
2162 return error;
2163 }
2164
2165 bool address_space_write(AddressSpace *as, hwaddr addr,
2166 const uint8_t *buf, int len)
2167 {
2168 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
2169 }
2170
2171 bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
2172 {
2173 return address_space_rw(as, addr, buf, len, false);
2174 }
2175
2176
2177 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2178 int len, int is_write)
2179 {
2180 address_space_rw(&address_space_memory, addr, buf, len, is_write);
2181 }
2182
2183 enum write_rom_type {
2184 WRITE_DATA,
2185 FLUSH_CACHE,
2186 };
2187
2188 static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2189 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2190 {
2191 hwaddr l;
2192 uint8_t *ptr;
2193 hwaddr addr1;
2194 MemoryRegion *mr;
2195
2196 while (len > 0) {
2197 l = len;
2198 mr = address_space_translate(as, addr, &addr1, &l, true);
2199
2200 if (!(memory_region_is_ram(mr) ||
2201 memory_region_is_romd(mr))) {
2202 /* do nothing */
2203 } else {
2204 addr1 += memory_region_get_ram_addr(mr);
2205 /* ROM/RAM case */
2206 ptr = qemu_get_ram_ptr(addr1);
2207 switch (type) {
2208 case WRITE_DATA:
2209 memcpy(ptr, buf, l);
2210 invalidate_and_set_dirty(addr1, l);
2211 break;
2212 case FLUSH_CACHE:
2213 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2214 break;
2215 }
2216 }
2217 len -= l;
2218 buf += l;
2219 addr += l;
2220 }
2221 }
2222
2223 /* used for ROM loading : can write in RAM and ROM */
2224 void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2225 const uint8_t *buf, int len)
2226 {
2227 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2228 }
2229
2230 void cpu_flush_icache_range(hwaddr start, int len)
2231 {
2232 /*
2233 * This function should do the same thing as an icache flush that was
2234 * triggered from within the guest. For TCG we are always cache coherent,
2235 * so there is no need to flush anything. For KVM / Xen we need to flush
2236 * the host's instruction cache at least.
2237 */
2238 if (tcg_enabled()) {
2239 return;
2240 }
2241
2242 cpu_physical_memory_write_rom_internal(&address_space_memory,
2243 start, NULL, len, FLUSH_CACHE);
2244 }
2245
2246 typedef struct {
2247 MemoryRegion *mr;
2248 void *buffer;
2249 hwaddr addr;
2250 hwaddr len;
2251 } BounceBuffer;
2252
2253 static BounceBuffer bounce;
2254
2255 typedef struct MapClient {
2256 void *opaque;
2257 void (*callback)(void *opaque);
2258 QLIST_ENTRY(MapClient) link;
2259 } MapClient;
2260
2261 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2262 = QLIST_HEAD_INITIALIZER(map_client_list);
2263
2264 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2265 {
2266 MapClient *client = g_malloc(sizeof(*client));
2267
2268 client->opaque = opaque;
2269 client->callback = callback;
2270 QLIST_INSERT_HEAD(&map_client_list, client, link);
2271 return client;
2272 }
2273
2274 static void cpu_unregister_map_client(void *_client)
2275 {
2276 MapClient *client = (MapClient *)_client;
2277
2278 QLIST_REMOVE(client, link);
2279 g_free(client);
2280 }
2281
2282 static void cpu_notify_map_clients(void)
2283 {
2284 MapClient *client;
2285
2286 while (!QLIST_EMPTY(&map_client_list)) {
2287 client = QLIST_FIRST(&map_client_list);
2288 client->callback(client->opaque);
2289 cpu_unregister_map_client(client);
2290 }
2291 }
2292
2293 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2294 {
2295 MemoryRegion *mr;
2296 hwaddr l, xlat;
2297
2298 while (len > 0) {
2299 l = len;
2300 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2301 if (!memory_access_is_direct(mr, is_write)) {
2302 l = memory_access_size(mr, l, addr);
2303 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2304 return false;
2305 }
2306 }
2307
2308 len -= l;
2309 addr += l;
2310 }
2311 return true;
2312 }
2313
2314 /* Map a physical memory region into a host virtual address.
2315 * May map a subset of the requested range, given by and returned in *plen.
2316 * May return NULL if resources needed to perform the mapping are exhausted.
2317 * Use only for reads OR writes - not for read-modify-write operations.
2318 * Use cpu_register_map_client() to know when retrying the map operation is
2319 * likely to succeed.
2320 */
2321 void *address_space_map(AddressSpace *as,
2322 hwaddr addr,
2323 hwaddr *plen,
2324 bool is_write)
2325 {
2326 hwaddr len = *plen;
2327 hwaddr done = 0;
2328 hwaddr l, xlat, base;
2329 MemoryRegion *mr, *this_mr;
2330 ram_addr_t raddr;
2331
2332 if (len == 0) {
2333 return NULL;
2334 }
2335
2336 l = len;
2337 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2338 if (!memory_access_is_direct(mr, is_write)) {
2339 if (bounce.buffer) {
2340 return NULL;
2341 }
2342 /* Avoid unbounded allocations */
2343 l = MIN(l, TARGET_PAGE_SIZE);
2344 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
2345 bounce.addr = addr;
2346 bounce.len = l;
2347
2348 memory_region_ref(mr);
2349 bounce.mr = mr;
2350 if (!is_write) {
2351 address_space_read(as, addr, bounce.buffer, l);
2352 }
2353
2354 *plen = l;
2355 return bounce.buffer;
2356 }
2357
2358 base = xlat;
2359 raddr = memory_region_get_ram_addr(mr);
2360
2361 for (;;) {
2362 len -= l;
2363 addr += l;
2364 done += l;
2365 if (len == 0) {
2366 break;
2367 }
2368
2369 l = len;
2370 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2371 if (this_mr != mr || xlat != base + done) {
2372 break;
2373 }
2374 }
2375
2376 memory_region_ref(mr);
2377 *plen = done;
2378 return qemu_ram_ptr_length(raddr + base, plen);
2379 }
2380
2381 /* Unmaps a memory region previously mapped by address_space_map().
2382 * Will also mark the memory as dirty if is_write == 1. access_len gives
2383 * the amount of memory that was actually read or written by the caller.
2384 */
2385 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2386 int is_write, hwaddr access_len)
2387 {
2388 if (buffer != bounce.buffer) {
2389 MemoryRegion *mr;
2390 ram_addr_t addr1;
2391
2392 mr = qemu_ram_addr_from_host(buffer, &addr1);
2393 assert(mr != NULL);
2394 if (is_write) {
2395 invalidate_and_set_dirty(addr1, access_len);
2396 }
2397 if (xen_enabled()) {
2398 xen_invalidate_map_cache_entry(buffer);
2399 }
2400 memory_region_unref(mr);
2401 return;
2402 }
2403 if (is_write) {
2404 address_space_write(as, bounce.addr, bounce.buffer, access_len);
2405 }
2406 qemu_vfree(bounce.buffer);
2407 bounce.buffer = NULL;
2408 memory_region_unref(bounce.mr);
2409 cpu_notify_map_clients();
2410 }
2411
2412 void *cpu_physical_memory_map(hwaddr addr,
2413 hwaddr *plen,
2414 int is_write)
2415 {
2416 return address_space_map(&address_space_memory, addr, plen, is_write);
2417 }
2418
2419 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2420 int is_write, hwaddr access_len)
2421 {
2422 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2423 }
2424
2425 /* warning: addr must be aligned */
2426 static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
2427 enum device_endian endian)
2428 {
2429 uint8_t *ptr;
2430 uint64_t val;
2431 MemoryRegion *mr;
2432 hwaddr l = 4;
2433 hwaddr addr1;
2434
2435 mr = address_space_translate(as, addr, &addr1, &l, false);
2436 if (l < 4 || !memory_access_is_direct(mr, false)) {
2437 /* I/O case */
2438 io_mem_read(mr, addr1, &val, 4);
2439 #if defined(TARGET_WORDS_BIGENDIAN)
2440 if (endian == DEVICE_LITTLE_ENDIAN) {
2441 val = bswap32(val);
2442 }
2443 #else
2444 if (endian == DEVICE_BIG_ENDIAN) {
2445 val = bswap32(val);
2446 }
2447 #endif
2448 } else {
2449 /* RAM case */
2450 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2451 & TARGET_PAGE_MASK)
2452 + addr1);
2453 switch (endian) {
2454 case DEVICE_LITTLE_ENDIAN:
2455 val = ldl_le_p(ptr);
2456 break;
2457 case DEVICE_BIG_ENDIAN:
2458 val = ldl_be_p(ptr);
2459 break;
2460 default:
2461 val = ldl_p(ptr);
2462 break;
2463 }
2464 }
2465 return val;
2466 }
2467
2468 uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
2469 {
2470 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2471 }
2472
2473 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
2474 {
2475 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2476 }
2477
2478 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
2479 {
2480 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2481 }
2482
2483 /* warning: addr must be aligned */
2484 static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
2485 enum device_endian endian)
2486 {
2487 uint8_t *ptr;
2488 uint64_t val;
2489 MemoryRegion *mr;
2490 hwaddr l = 8;
2491 hwaddr addr1;
2492
2493 mr = address_space_translate(as, addr, &addr1, &l,
2494 false);
2495 if (l < 8 || !memory_access_is_direct(mr, false)) {
2496 /* I/O case */
2497 io_mem_read(mr, addr1, &val, 8);
2498 #if defined(TARGET_WORDS_BIGENDIAN)
2499 if (endian == DEVICE_LITTLE_ENDIAN) {
2500 val = bswap64(val);
2501 }
2502 #else
2503 if (endian == DEVICE_BIG_ENDIAN) {
2504 val = bswap64(val);
2505 }
2506 #endif
2507 } else {
2508 /* RAM case */
2509 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2510 & TARGET_PAGE_MASK)
2511 + addr1);
2512 switch (endian) {
2513 case DEVICE_LITTLE_ENDIAN:
2514 val = ldq_le_p(ptr);
2515 break;
2516 case DEVICE_BIG_ENDIAN:
2517 val = ldq_be_p(ptr);
2518 break;
2519 default:
2520 val = ldq_p(ptr);
2521 break;
2522 }
2523 }
2524 return val;
2525 }
2526
2527 uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
2528 {
2529 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2530 }
2531
2532 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
2533 {
2534 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2535 }
2536
2537 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
2538 {
2539 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2540 }
2541
2542 /* XXX: optimize */
2543 uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2544 {
2545 uint8_t val;
2546 address_space_rw(as, addr, &val, 1, 0);
2547 return val;
2548 }
2549
2550 /* warning: addr must be aligned */
2551 static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
2552 enum device_endian endian)
2553 {
2554 uint8_t *ptr;
2555 uint64_t val;
2556 MemoryRegion *mr;
2557 hwaddr l = 2;
2558 hwaddr addr1;
2559
2560 mr = address_space_translate(as, addr, &addr1, &l,
2561 false);
2562 if (l < 2 || !memory_access_is_direct(mr, false)) {
2563 /* I/O case */
2564 io_mem_read(mr, addr1, &val, 2);
2565 #if defined(TARGET_WORDS_BIGENDIAN)
2566 if (endian == DEVICE_LITTLE_ENDIAN) {
2567 val = bswap16(val);
2568 }
2569 #else
2570 if (endian == DEVICE_BIG_ENDIAN) {
2571 val = bswap16(val);
2572 }
2573 #endif
2574 } else {
2575 /* RAM case */
2576 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2577 & TARGET_PAGE_MASK)
2578 + addr1);
2579 switch (endian) {
2580 case DEVICE_LITTLE_ENDIAN:
2581 val = lduw_le_p(ptr);
2582 break;
2583 case DEVICE_BIG_ENDIAN:
2584 val = lduw_be_p(ptr);
2585 break;
2586 default:
2587 val = lduw_p(ptr);
2588 break;
2589 }
2590 }
2591 return val;
2592 }
2593
2594 uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
2595 {
2596 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2597 }
2598
2599 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
2600 {
2601 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2602 }
2603
2604 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
2605 {
2606 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2607 }
2608
2609 /* warning: addr must be aligned. The ram page is not masked as dirty
2610 and the code inside is not invalidated. It is useful if the dirty
2611 bits are used to track modified PTEs */
2612 void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
2613 {
2614 uint8_t *ptr;
2615 MemoryRegion *mr;
2616 hwaddr l = 4;
2617 hwaddr addr1;
2618
2619 mr = address_space_translate(as, addr, &addr1, &l,
2620 true);
2621 if (l < 4 || !memory_access_is_direct(mr, true)) {
2622 io_mem_write(mr, addr1, val, 4);
2623 } else {
2624 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2625 ptr = qemu_get_ram_ptr(addr1);
2626 stl_p(ptr, val);
2627
2628 if (unlikely(in_migration)) {
2629 if (cpu_physical_memory_is_clean(addr1)) {
2630 /* invalidate code */
2631 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2632 /* set dirty bit */
2633 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
2634 }
2635 }
2636 }
2637 }
2638
2639 /* warning: addr must be aligned */
2640 static inline void stl_phys_internal(AddressSpace *as,
2641 hwaddr addr, uint32_t val,
2642 enum device_endian endian)
2643 {
2644 uint8_t *ptr;
2645 MemoryRegion *mr;
2646 hwaddr l = 4;
2647 hwaddr addr1;
2648
2649 mr = address_space_translate(as, addr, &addr1, &l,
2650 true);
2651 if (l < 4 || !memory_access_is_direct(mr, true)) {
2652 #if defined(TARGET_WORDS_BIGENDIAN)
2653 if (endian == DEVICE_LITTLE_ENDIAN) {
2654 val = bswap32(val);
2655 }
2656 #else
2657 if (endian == DEVICE_BIG_ENDIAN) {
2658 val = bswap32(val);
2659 }
2660 #endif
2661 io_mem_write(mr, addr1, val, 4);
2662 } else {
2663 /* RAM case */
2664 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2665 ptr = qemu_get_ram_ptr(addr1);
2666 switch (endian) {
2667 case DEVICE_LITTLE_ENDIAN:
2668 stl_le_p(ptr, val);
2669 break;
2670 case DEVICE_BIG_ENDIAN:
2671 stl_be_p(ptr, val);
2672 break;
2673 default:
2674 stl_p(ptr, val);
2675 break;
2676 }
2677 invalidate_and_set_dirty(addr1, 4);
2678 }
2679 }
2680
2681 void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2682 {
2683 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
2684 }
2685
2686 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2687 {
2688 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
2689 }
2690
2691 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2692 {
2693 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
2694 }
2695
2696 /* XXX: optimize */
2697 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2698 {
2699 uint8_t v = val;
2700 address_space_rw(as, addr, &v, 1, 1);
2701 }
2702
2703 /* warning: addr must be aligned */
2704 static inline void stw_phys_internal(AddressSpace *as,
2705 hwaddr addr, uint32_t val,
2706 enum device_endian endian)
2707 {
2708 uint8_t *ptr;
2709 MemoryRegion *mr;
2710 hwaddr l = 2;
2711 hwaddr addr1;
2712
2713 mr = address_space_translate(as, addr, &addr1, &l, true);
2714 if (l < 2 || !memory_access_is_direct(mr, true)) {
2715 #if defined(TARGET_WORDS_BIGENDIAN)
2716 if (endian == DEVICE_LITTLE_ENDIAN) {
2717 val = bswap16(val);
2718 }
2719 #else
2720 if (endian == DEVICE_BIG_ENDIAN) {
2721 val = bswap16(val);
2722 }
2723 #endif
2724 io_mem_write(mr, addr1, val, 2);
2725 } else {
2726 /* RAM case */
2727 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2728 ptr = qemu_get_ram_ptr(addr1);
2729 switch (endian) {
2730 case DEVICE_LITTLE_ENDIAN:
2731 stw_le_p(ptr, val);
2732 break;
2733 case DEVICE_BIG_ENDIAN:
2734 stw_be_p(ptr, val);
2735 break;
2736 default:
2737 stw_p(ptr, val);
2738 break;
2739 }
2740 invalidate_and_set_dirty(addr1, 2);
2741 }
2742 }
2743
2744 void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2745 {
2746 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
2747 }
2748
2749 void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2750 {
2751 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
2752 }
2753
2754 void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2755 {
2756 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
2757 }
2758
2759 /* XXX: optimize */
2760 void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
2761 {
2762 val = tswap64(val);
2763 address_space_rw(as, addr, (void *) &val, 8, 1);
2764 }
2765
2766 void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
2767 {
2768 val = cpu_to_le64(val);
2769 address_space_rw(as, addr, (void *) &val, 8, 1);
2770 }
2771
2772 void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
2773 {
2774 val = cpu_to_be64(val);
2775 address_space_rw(as, addr, (void *) &val, 8, 1);
2776 }
2777
2778 /* virtual memory access for debug (includes writing to ROM) */
2779 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2780 uint8_t *buf, int len, int is_write)
2781 {
2782 int l;
2783 hwaddr phys_addr;
2784 target_ulong page;
2785
2786 while (len > 0) {
2787 page = addr & TARGET_PAGE_MASK;
2788 phys_addr = cpu_get_phys_page_debug(cpu, page);
2789 /* if no physical page mapped, return an error */
2790 if (phys_addr == -1)
2791 return -1;
2792 l = (page + TARGET_PAGE_SIZE) - addr;
2793 if (l > len)
2794 l = len;
2795 phys_addr += (addr & ~TARGET_PAGE_MASK);
2796 if (is_write) {
2797 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2798 } else {
2799 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2800 }
2801 len -= l;
2802 buf += l;
2803 addr += l;
2804 }
2805 return 0;
2806 }
2807 #endif
2808
2809 /*
2810 * A helper function for the _utterly broken_ virtio device model to find out if
2811 * it's running on a big endian machine. Don't do this at home kids!
2812 */
2813 bool target_words_bigendian(void);
2814 bool target_words_bigendian(void)
2815 {
2816 #if defined(TARGET_WORDS_BIGENDIAN)
2817 return true;
2818 #else
2819 return false;
2820 #endif
2821 }
2822
2823 #ifndef CONFIG_USER_ONLY
2824 bool cpu_physical_memory_is_io(hwaddr phys_addr)
2825 {
2826 MemoryRegion*mr;
2827 hwaddr l = 1;
2828
2829 mr = address_space_translate(&address_space_memory,
2830 phys_addr, &phys_addr, &l, false);
2831
2832 return !(memory_region_is_ram(mr) ||
2833 memory_region_is_romd(mr));
2834 }
2835
2836 void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2837 {
2838 RAMBlock *block;
2839
2840 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2841 func(block->host, block->offset, block->length, opaque);
2842 }
2843 }
2844 #endif