4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "hw/xen/xen.h"
36 #include "qemu/timer.h"
37 #include "qemu/config-file.h"
38 #include "exec/memory.h"
39 #include "sysemu/dma.h"
40 #include "exec/address-spaces.h"
41 #if defined(CONFIG_USER_ONLY)
43 #else /* !CONFIG_USER_ONLY */
44 #include "sysemu/xen-mapcache.h"
47 #include "exec/cpu-all.h"
49 #include "exec/cputlb.h"
50 #include "translate-all.h"
52 #include "exec/memory-internal.h"
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
57 static int in_migration
;
59 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
61 static MemoryRegion
*system_memory
;
62 static MemoryRegion
*system_io
;
64 AddressSpace address_space_io
;
65 AddressSpace address_space_memory
;
67 MemoryRegion io_mem_rom
, io_mem_notdirty
;
68 static MemoryRegion io_mem_unassigned
;
72 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
73 /* current CPU in the current thread. It is only valid inside
75 DEFINE_TLS(CPUState
*, current_cpu
);
76 /* 0 = Do not count executed instructions.
77 1 = Precise instruction counting.
78 2 = Adaptive rate instruction counting. */
81 #if !defined(CONFIG_USER_ONLY)
83 typedef struct PhysPageEntry PhysPageEntry
;
85 struct PhysPageEntry
{
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
91 typedef PhysPageEntry Node
[L2_SIZE
];
93 struct AddressSpaceDispatch
{
94 /* This is a multi-level map on the physical address space.
95 * The bottom level has pointers to MemoryRegionSections.
97 PhysPageEntry phys_map
;
99 MemoryRegionSection
*sections
;
103 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
104 typedef struct subpage_t
{
108 uint16_t sub_section
[TARGET_PAGE_SIZE
];
111 #define PHYS_SECTION_UNASSIGNED 0
112 #define PHYS_SECTION_NOTDIRTY 1
113 #define PHYS_SECTION_ROM 2
114 #define PHYS_SECTION_WATCH 3
116 typedef struct PhysPageMap
{
117 unsigned sections_nb
;
118 unsigned sections_nb_alloc
;
120 unsigned nodes_nb_alloc
;
122 MemoryRegionSection
*sections
;
125 static PhysPageMap
*prev_map
;
126 static PhysPageMap next_map
;
128 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
130 static void io_mem_init(void);
131 static void memory_map_init(void);
132 static void *qemu_safe_ram_ptr(ram_addr_t addr
);
134 static MemoryRegion io_mem_watch
;
137 #if !defined(CONFIG_USER_ONLY)
139 static void phys_map_node_reserve(unsigned nodes
)
141 if (next_map
.nodes_nb
+ nodes
> next_map
.nodes_nb_alloc
) {
142 next_map
.nodes_nb_alloc
= MAX(next_map
.nodes_nb_alloc
* 2,
144 next_map
.nodes_nb_alloc
= MAX(next_map
.nodes_nb_alloc
,
145 next_map
.nodes_nb
+ nodes
);
146 next_map
.nodes
= g_renew(Node
, next_map
.nodes
,
147 next_map
.nodes_nb_alloc
);
151 static uint16_t phys_map_node_alloc(void)
156 ret
= next_map
.nodes_nb
++;
157 assert(ret
!= PHYS_MAP_NODE_NIL
);
158 assert(ret
!= next_map
.nodes_nb_alloc
);
159 for (i
= 0; i
< L2_SIZE
; ++i
) {
160 next_map
.nodes
[ret
][i
].is_leaf
= 0;
161 next_map
.nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
166 static void phys_page_set_level(PhysPageEntry
*lp
, hwaddr
*index
,
167 hwaddr
*nb
, uint16_t leaf
,
172 hwaddr step
= (hwaddr
)1 << (level
* L2_BITS
);
174 if (!lp
->is_leaf
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
175 lp
->ptr
= phys_map_node_alloc();
176 p
= next_map
.nodes
[lp
->ptr
];
178 for (i
= 0; i
< L2_SIZE
; i
++) {
180 p
[i
].ptr
= PHYS_SECTION_UNASSIGNED
;
184 p
= next_map
.nodes
[lp
->ptr
];
186 lp
= &p
[(*index
>> (level
* L2_BITS
)) & (L2_SIZE
- 1)];
188 while (*nb
&& lp
< &p
[L2_SIZE
]) {
189 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
195 phys_page_set_level(lp
, index
, nb
, leaf
, level
- 1);
201 static void phys_page_set(AddressSpaceDispatch
*d
,
202 hwaddr index
, hwaddr nb
,
205 /* Wildly overreserve - it doesn't matter much. */
206 phys_map_node_reserve(3 * P_L2_LEVELS
);
208 phys_page_set_level(&d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
211 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr index
,
212 Node
*nodes
, MemoryRegionSection
*sections
)
217 for (i
= P_L2_LEVELS
- 1; i
>= 0 && !lp
.is_leaf
; i
--) {
218 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
219 return §ions
[PHYS_SECTION_UNASSIGNED
];
222 lp
= p
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
224 return §ions
[lp
.ptr
];
227 bool memory_region_is_unassigned(MemoryRegion
*mr
)
229 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
230 && mr
!= &io_mem_watch
;
233 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
235 bool resolve_subpage
)
237 MemoryRegionSection
*section
;
240 section
= phys_page_find(d
->phys_map
, addr
>> TARGET_PAGE_BITS
,
241 d
->nodes
, d
->sections
);
242 if (resolve_subpage
&& section
->mr
->subpage
) {
243 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
244 section
= &d
->sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
249 static MemoryRegionSection
*
250 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
251 hwaddr
*plen
, bool resolve_subpage
)
253 MemoryRegionSection
*section
;
256 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
257 /* Compute offset within MemoryRegionSection */
258 addr
-= section
->offset_within_address_space
;
260 /* Compute offset within MemoryRegion */
261 *xlat
= addr
+ section
->offset_within_region
;
263 diff
= int128_sub(section
->mr
->size
, int128_make64(addr
));
264 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
268 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
269 hwaddr
*xlat
, hwaddr
*plen
,
273 MemoryRegionSection
*section
;
278 section
= address_space_translate_internal(as
->dispatch
, addr
, &addr
, plen
, true);
281 if (!mr
->iommu_ops
) {
285 iotlb
= mr
->iommu_ops
->translate(mr
, addr
);
286 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
287 | (addr
& iotlb
.addr_mask
));
288 len
= MIN(len
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
289 if (!(iotlb
.perm
& (1 << is_write
))) {
290 mr
= &io_mem_unassigned
;
294 as
= iotlb
.target_as
;
302 MemoryRegionSection
*
303 address_space_translate_for_iotlb(AddressSpace
*as
, hwaddr addr
, hwaddr
*xlat
,
306 MemoryRegionSection
*section
;
307 section
= address_space_translate_internal(as
->dispatch
, addr
, xlat
, plen
, false);
309 assert(!section
->mr
->iommu_ops
);
314 void cpu_exec_init_all(void)
316 #if !defined(CONFIG_USER_ONLY)
317 qemu_mutex_init(&ram_list
.mutex
);
323 #if !defined(CONFIG_USER_ONLY)
325 static int cpu_common_post_load(void *opaque
, int version_id
)
327 CPUState
*cpu
= opaque
;
329 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
330 version_id is increased. */
331 cpu
->interrupt_request
&= ~0x01;
332 tlb_flush(cpu
->env_ptr
, 1);
337 const VMStateDescription vmstate_cpu_common
= {
338 .name
= "cpu_common",
340 .minimum_version_id
= 1,
341 .minimum_version_id_old
= 1,
342 .post_load
= cpu_common_post_load
,
343 .fields
= (VMStateField
[]) {
344 VMSTATE_UINT32(halted
, CPUState
),
345 VMSTATE_UINT32(interrupt_request
, CPUState
),
346 VMSTATE_END_OF_LIST()
352 CPUState
*qemu_get_cpu(int index
)
357 if (cpu
->cpu_index
== index
) {
365 void cpu_exec_init(CPUArchState
*env
)
367 CPUState
*cpu
= ENV_GET_CPU(env
);
368 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
372 #if defined(CONFIG_USER_ONLY)
376 CPU_FOREACH(some_cpu
) {
379 cpu
->cpu_index
= cpu_index
;
381 QTAILQ_INIT(&env
->breakpoints
);
382 QTAILQ_INIT(&env
->watchpoints
);
383 #ifndef CONFIG_USER_ONLY
384 cpu
->thread_id
= qemu_get_thread_id();
386 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
387 #if defined(CONFIG_USER_ONLY)
390 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
391 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
393 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
394 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
395 cpu_save
, cpu_load
, env
);
396 assert(cc
->vmsd
== NULL
);
397 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
399 if (cc
->vmsd
!= NULL
) {
400 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
404 #if defined(TARGET_HAS_ICE)
405 #if defined(CONFIG_USER_ONLY)
406 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
408 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
411 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
413 tb_invalidate_phys_addr(cpu_get_phys_page_debug(cpu
, pc
) |
414 (pc
& ~TARGET_PAGE_MASK
));
417 #endif /* TARGET_HAS_ICE */
419 #if defined(CONFIG_USER_ONLY)
420 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
425 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
426 int flags
, CPUWatchpoint
**watchpoint
)
431 /* Add a watchpoint. */
432 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
433 int flags
, CPUWatchpoint
**watchpoint
)
435 target_ulong len_mask
= ~(len
- 1);
438 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
439 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
440 len
== 0 || len
> TARGET_PAGE_SIZE
) {
441 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
442 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
445 wp
= g_malloc(sizeof(*wp
));
448 wp
->len_mask
= len_mask
;
451 /* keep all GDB-injected watchpoints in front */
453 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
455 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
457 tlb_flush_page(env
, addr
);
464 /* Remove a specific watchpoint. */
465 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
468 target_ulong len_mask
= ~(len
- 1);
471 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
472 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
473 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
474 cpu_watchpoint_remove_by_ref(env
, wp
);
481 /* Remove a specific watchpoint by reference. */
482 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
484 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
486 tlb_flush_page(env
, watchpoint
->vaddr
);
491 /* Remove all matching watchpoints. */
492 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
494 CPUWatchpoint
*wp
, *next
;
496 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
497 if (wp
->flags
& mask
)
498 cpu_watchpoint_remove_by_ref(env
, wp
);
503 /* Add a breakpoint. */
504 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
505 CPUBreakpoint
**breakpoint
)
507 #if defined(TARGET_HAS_ICE)
510 bp
= g_malloc(sizeof(*bp
));
515 /* keep all GDB-injected breakpoints in front */
516 if (flags
& BP_GDB
) {
517 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
519 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
522 breakpoint_invalidate(ENV_GET_CPU(env
), pc
);
533 /* Remove a specific breakpoint. */
534 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
536 #if defined(TARGET_HAS_ICE)
539 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
540 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
541 cpu_breakpoint_remove_by_ref(env
, bp
);
551 /* Remove a specific breakpoint by reference. */
552 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
554 #if defined(TARGET_HAS_ICE)
555 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
557 breakpoint_invalidate(ENV_GET_CPU(env
), breakpoint
->pc
);
563 /* Remove all matching breakpoints. */
564 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
566 #if defined(TARGET_HAS_ICE)
567 CPUBreakpoint
*bp
, *next
;
569 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
570 if (bp
->flags
& mask
)
571 cpu_breakpoint_remove_by_ref(env
, bp
);
576 /* enable or disable single step mode. EXCP_DEBUG is returned by the
577 CPU loop after each instruction */
578 void cpu_single_step(CPUState
*cpu
, int enabled
)
580 #if defined(TARGET_HAS_ICE)
581 if (cpu
->singlestep_enabled
!= enabled
) {
582 cpu
->singlestep_enabled
= enabled
;
584 kvm_update_guest_debug(cpu
, 0);
586 /* must flush all the translated code to avoid inconsistencies */
587 /* XXX: only flush what is necessary */
588 CPUArchState
*env
= cpu
->env_ptr
;
595 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
597 CPUState
*cpu
= ENV_GET_CPU(env
);
603 fprintf(stderr
, "qemu: fatal: ");
604 vfprintf(stderr
, fmt
, ap
);
605 fprintf(stderr
, "\n");
606 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
607 if (qemu_log_enabled()) {
608 qemu_log("qemu: fatal: ");
609 qemu_log_vprintf(fmt
, ap2
);
611 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
617 #if defined(CONFIG_USER_ONLY)
619 struct sigaction act
;
620 sigfillset(&act
.sa_mask
);
621 act
.sa_handler
= SIG_DFL
;
622 sigaction(SIGABRT
, &act
, NULL
);
628 CPUArchState
*cpu_copy(CPUArchState
*env
)
630 CPUArchState
*new_env
= cpu_init(env
->cpu_model_str
);
631 #if defined(TARGET_HAS_ICE)
636 /* Reset non arch specific state */
637 cpu_reset(ENV_GET_CPU(new_env
));
639 /* Copy arch specific state into the new CPU */
640 memcpy(new_env
, env
, sizeof(CPUArchState
));
642 /* Clone all break/watchpoints.
643 Note: Once we support ptrace with hw-debug register access, make sure
644 BP_CPU break/watchpoints are handled correctly on clone. */
645 QTAILQ_INIT(&env
->breakpoints
);
646 QTAILQ_INIT(&env
->watchpoints
);
647 #if defined(TARGET_HAS_ICE)
648 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
649 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
651 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
652 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
660 #if !defined(CONFIG_USER_ONLY)
661 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t end
,
666 /* we modify the TLB cache so that the dirty bit will be set again
667 when accessing the range */
668 start1
= (uintptr_t)qemu_safe_ram_ptr(start
);
669 /* Check that we don't span multiple blocks - this breaks the
670 address comparisons below. */
671 if ((uintptr_t)qemu_safe_ram_ptr(end
- 1) - start1
672 != (end
- 1) - start
) {
675 cpu_tlb_reset_dirty_all(start1
, length
);
679 /* Note: start and end must be within the same ram block. */
680 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
685 start
&= TARGET_PAGE_MASK
;
686 end
= TARGET_PAGE_ALIGN(end
);
688 length
= end
- start
;
691 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
694 tlb_reset_dirty_range_all(start
, end
, length
);
698 static int cpu_physical_memory_set_dirty_tracking(int enable
)
701 in_migration
= enable
;
705 hwaddr
memory_region_section_get_iotlb(CPUArchState
*env
,
706 MemoryRegionSection
*section
,
708 hwaddr paddr
, hwaddr xlat
,
710 target_ulong
*address
)
715 if (memory_region_is_ram(section
->mr
)) {
717 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
719 if (!section
->readonly
) {
720 iotlb
|= PHYS_SECTION_NOTDIRTY
;
722 iotlb
|= PHYS_SECTION_ROM
;
725 iotlb
= section
- address_space_memory
.dispatch
->sections
;
729 /* Make accesses to pages with watchpoints go via the
730 watchpoint trap routines. */
731 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
732 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
733 /* Avoid trapping reads of pages with a write breakpoint. */
734 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
735 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
736 *address
|= TLB_MMIO
;
744 #endif /* defined(CONFIG_USER_ONLY) */
746 #if !defined(CONFIG_USER_ONLY)
748 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
750 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
752 static void *(*phys_mem_alloc
)(ram_addr_t size
) = qemu_anon_ram_alloc
;
755 * Set a custom physical guest memory alloator.
756 * Accelerators with unusual needs may need this. Hopefully, we can
757 * get rid of it eventually.
759 void phys_mem_set_alloc(void *(*alloc
)(ram_addr_t
))
761 phys_mem_alloc
= alloc
;
764 static uint16_t phys_section_add(MemoryRegionSection
*section
)
766 /* The physical section number is ORed with a page-aligned
767 * pointer to produce the iotlb entries. Thus it should
768 * never overflow into the page-aligned value.
770 assert(next_map
.sections_nb
< TARGET_PAGE_SIZE
);
772 if (next_map
.sections_nb
== next_map
.sections_nb_alloc
) {
773 next_map
.sections_nb_alloc
= MAX(next_map
.sections_nb_alloc
* 2,
775 next_map
.sections
= g_renew(MemoryRegionSection
, next_map
.sections
,
776 next_map
.sections_nb_alloc
);
778 next_map
.sections
[next_map
.sections_nb
] = *section
;
779 memory_region_ref(section
->mr
);
780 return next_map
.sections_nb
++;
783 static void phys_section_destroy(MemoryRegion
*mr
)
785 memory_region_unref(mr
);
788 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
789 memory_region_destroy(&subpage
->iomem
);
794 static void phys_sections_free(PhysPageMap
*map
)
796 while (map
->sections_nb
> 0) {
797 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
798 phys_section_destroy(section
->mr
);
800 g_free(map
->sections
);
805 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
808 hwaddr base
= section
->offset_within_address_space
810 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
>> TARGET_PAGE_BITS
,
811 next_map
.nodes
, next_map
.sections
);
812 MemoryRegionSection subsection
= {
813 .offset_within_address_space
= base
,
814 .size
= int128_make64(TARGET_PAGE_SIZE
),
818 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
820 if (!(existing
->mr
->subpage
)) {
821 subpage
= subpage_init(d
->as
, base
);
822 subsection
.mr
= &subpage
->iomem
;
823 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
824 phys_section_add(&subsection
));
826 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
828 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
829 end
= start
+ int128_get64(section
->size
) - 1;
830 subpage_register(subpage
, start
, end
, phys_section_add(section
));
834 static void register_multipage(AddressSpaceDispatch
*d
,
835 MemoryRegionSection
*section
)
837 hwaddr start_addr
= section
->offset_within_address_space
;
838 uint16_t section_index
= phys_section_add(section
);
839 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
843 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
846 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
848 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
849 AddressSpaceDispatch
*d
= as
->next_dispatch
;
850 MemoryRegionSection now
= *section
, remain
= *section
;
851 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
853 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
854 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
855 - now
.offset_within_address_space
;
857 now
.size
= int128_min(int128_make64(left
), now
.size
);
858 register_subpage(d
, &now
);
860 now
.size
= int128_zero();
862 while (int128_ne(remain
.size
, now
.size
)) {
863 remain
.size
= int128_sub(remain
.size
, now
.size
);
864 remain
.offset_within_address_space
+= int128_get64(now
.size
);
865 remain
.offset_within_region
+= int128_get64(now
.size
);
867 if (int128_lt(remain
.size
, page_size
)) {
868 register_subpage(d
, &now
);
869 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
870 now
.size
= page_size
;
871 register_subpage(d
, &now
);
873 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
874 register_multipage(d
, &now
);
879 void qemu_flush_coalesced_mmio_buffer(void)
882 kvm_flush_coalesced_mmio_buffer();
885 void qemu_mutex_lock_ramlist(void)
887 qemu_mutex_lock(&ram_list
.mutex
);
890 void qemu_mutex_unlock_ramlist(void)
892 qemu_mutex_unlock(&ram_list
.mutex
);
899 #define HUGETLBFS_MAGIC 0x958458f6
901 static long gethugepagesize(const char *path
)
907 ret
= statfs(path
, &fs
);
908 } while (ret
!= 0 && errno
== EINTR
);
915 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
916 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
921 static void *file_ram_alloc(RAMBlock
*block
,
926 char *sanitized_name
;
933 unsigned long hpagesize
;
935 hpagesize
= gethugepagesize(path
);
940 if (memory
< hpagesize
) {
944 if (kvm_enabled() && !kvm_has_sync_mmu()) {
945 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
949 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
950 sanitized_name
= g_strdup(block
->mr
->name
);
951 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
956 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
958 g_free(sanitized_name
);
960 fd
= mkstemp(filename
);
962 perror("unable to create backing store for hugepages");
969 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
972 * ftruncate is not supported by hugetlbfs in older
973 * hosts, so don't bother bailing out on errors.
974 * If anything goes wrong with it under other filesystems,
977 if (ftruncate(fd
, memory
))
981 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
982 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
983 * to sidestep this quirk.
985 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
986 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
988 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
990 if (area
== MAP_FAILED
) {
991 perror("file_ram_alloc: can't mmap RAM pages");
999 static void *file_ram_alloc(RAMBlock
*block
,
1003 fprintf(stderr
, "-mem-path not supported on this host\n");
1008 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1010 RAMBlock
*block
, *next_block
;
1011 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1013 assert(size
!= 0); /* it would hand out same offset multiple times */
1015 if (QTAILQ_EMPTY(&ram_list
.blocks
))
1018 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1019 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1021 end
= block
->offset
+ block
->length
;
1023 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
1024 if (next_block
->offset
>= end
) {
1025 next
= MIN(next
, next_block
->offset
);
1028 if (next
- end
>= size
&& next
- end
< mingap
) {
1030 mingap
= next
- end
;
1034 if (offset
== RAM_ADDR_MAX
) {
1035 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1043 ram_addr_t
last_ram_offset(void)
1046 ram_addr_t last
= 0;
1048 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
1049 last
= MAX(last
, block
->offset
+ block
->length
);
1054 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1058 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1059 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1060 "dump-guest-core", true)) {
1061 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1063 perror("qemu_madvise");
1064 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1065 "but dump_guest_core=off specified\n");
1070 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1072 RAMBlock
*new_block
, *block
;
1075 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1076 if (block
->offset
== addr
) {
1082 assert(!new_block
->idstr
[0]);
1085 char *id
= qdev_get_dev_path(dev
);
1087 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1091 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1093 /* This assumes the iothread lock is taken here too. */
1094 qemu_mutex_lock_ramlist();
1095 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1096 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1097 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1102 qemu_mutex_unlock_ramlist();
1105 static int memory_try_enable_merging(void *addr
, size_t len
)
1107 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1108 /* disabled by the user */
1112 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1115 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1118 RAMBlock
*block
, *new_block
;
1120 size
= TARGET_PAGE_ALIGN(size
);
1121 new_block
= g_malloc0(sizeof(*new_block
));
1124 /* This assumes the iothread lock is taken here too. */
1125 qemu_mutex_lock_ramlist();
1127 new_block
->offset
= find_ram_offset(size
);
1129 new_block
->host
= host
;
1130 new_block
->flags
|= RAM_PREALLOC_MASK
;
1131 } else if (xen_enabled()) {
1133 fprintf(stderr
, "-mem-path not supported with Xen\n");
1136 xen_ram_alloc(new_block
->offset
, size
, mr
);
1139 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1141 * file_ram_alloc() needs to allocate just like
1142 * phys_mem_alloc, but we haven't bothered to provide
1146 "-mem-path not supported with this accelerator\n");
1149 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
1151 if (!new_block
->host
) {
1152 new_block
->host
= phys_mem_alloc(size
);
1153 if (!new_block
->host
) {
1154 fprintf(stderr
, "Cannot set up guest memory '%s': %s\n",
1155 new_block
->mr
->name
, strerror(errno
));
1158 memory_try_enable_merging(new_block
->host
, size
);
1161 new_block
->length
= size
;
1163 /* Keep the list sorted from biggest to smallest block. */
1164 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1165 if (block
->length
< new_block
->length
) {
1170 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1172 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1174 ram_list
.mru_block
= NULL
;
1177 qemu_mutex_unlock_ramlist();
1179 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
1180 last_ram_offset() >> TARGET_PAGE_BITS
);
1181 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
1182 0, size
>> TARGET_PAGE_BITS
);
1183 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
, 0xff);
1185 qemu_ram_setup_dump(new_block
->host
, size
);
1186 qemu_madvise(new_block
->host
, size
, QEMU_MADV_HUGEPAGE
);
1187 qemu_madvise(new_block
->host
, size
, QEMU_MADV_DONTFORK
);
1190 kvm_setup_guest_memory(new_block
->host
, size
);
1192 return new_block
->offset
;
1195 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
1197 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
1200 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1204 /* This assumes the iothread lock is taken here too. */
1205 qemu_mutex_lock_ramlist();
1206 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1207 if (addr
== block
->offset
) {
1208 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1209 ram_list
.mru_block
= NULL
;
1215 qemu_mutex_unlock_ramlist();
1218 void qemu_ram_free(ram_addr_t addr
)
1222 /* This assumes the iothread lock is taken here too. */
1223 qemu_mutex_lock_ramlist();
1224 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1225 if (addr
== block
->offset
) {
1226 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1227 ram_list
.mru_block
= NULL
;
1229 if (block
->flags
& RAM_PREALLOC_MASK
) {
1231 } else if (xen_enabled()) {
1232 xen_invalidate_map_cache_entry(block
->host
);
1233 } else if (block
->fd
>= 0) {
1234 munmap(block
->host
, block
->length
);
1237 qemu_anon_ram_free(block
->host
, block
->length
);
1243 qemu_mutex_unlock_ramlist();
1248 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1255 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1256 offset
= addr
- block
->offset
;
1257 if (offset
< block
->length
) {
1258 vaddr
= block
->host
+ offset
;
1259 if (block
->flags
& RAM_PREALLOC_MASK
) {
1261 } else if (xen_enabled()) {
1265 munmap(vaddr
, length
);
1266 if (block
->fd
>= 0) {
1268 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
1271 flags
|= MAP_PRIVATE
;
1273 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1274 flags
, block
->fd
, offset
);
1277 * Remap needs to match alloc. Accelerators that
1278 * set phys_mem_alloc never remap. If they did,
1279 * we'd need a remap hook here.
1281 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1283 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1284 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1287 if (area
!= vaddr
) {
1288 fprintf(stderr
, "Could not remap addr: "
1289 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1293 memory_try_enable_merging(vaddr
, length
);
1294 qemu_ram_setup_dump(vaddr
, length
);
1300 #endif /* !_WIN32 */
1302 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
1306 /* The list is protected by the iothread lock here. */
1307 block
= ram_list
.mru_block
;
1308 if (block
&& addr
- block
->offset
< block
->length
) {
1311 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1312 if (addr
- block
->offset
< block
->length
) {
1317 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1321 ram_list
.mru_block
= block
;
1325 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1326 With the exception of the softmmu code in this file, this should
1327 only be used for local memory (e.g. video ram) that the device owns,
1328 and knows it isn't going to access beyond the end of the block.
1330 It should not be used for general purpose DMA.
1331 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1333 void *qemu_get_ram_ptr(ram_addr_t addr
)
1335 RAMBlock
*block
= qemu_get_ram_block(addr
);
1337 if (xen_enabled()) {
1338 /* We need to check if the requested address is in the RAM
1339 * because we don't want to map the entire memory in QEMU.
1340 * In that case just map until the end of the page.
1342 if (block
->offset
== 0) {
1343 return xen_map_cache(addr
, 0, 0);
1344 } else if (block
->host
== NULL
) {
1346 xen_map_cache(block
->offset
, block
->length
, 1);
1349 return block
->host
+ (addr
- block
->offset
);
1352 /* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1353 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1355 * ??? Is this still necessary?
1357 static void *qemu_safe_ram_ptr(ram_addr_t addr
)
1361 /* The list is protected by the iothread lock here. */
1362 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1363 if (addr
- block
->offset
< block
->length
) {
1364 if (xen_enabled()) {
1365 /* We need to check if the requested address is in the RAM
1366 * because we don't want to map the entire memory in QEMU.
1367 * In that case just map until the end of the page.
1369 if (block
->offset
== 0) {
1370 return xen_map_cache(addr
, 0, 0);
1371 } else if (block
->host
== NULL
) {
1373 xen_map_cache(block
->offset
, block
->length
, 1);
1376 return block
->host
+ (addr
- block
->offset
);
1380 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1386 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1387 * but takes a size argument */
1388 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1393 if (xen_enabled()) {
1394 return xen_map_cache(addr
, *size
, 1);
1398 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1399 if (addr
- block
->offset
< block
->length
) {
1400 if (addr
- block
->offset
+ *size
> block
->length
)
1401 *size
= block
->length
- addr
+ block
->offset
;
1402 return block
->host
+ (addr
- block
->offset
);
1406 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1411 /* Some of the softmmu routines need to translate from a host pointer
1412 (typically a TLB entry) back to a ram offset. */
1413 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1416 uint8_t *host
= ptr
;
1418 if (xen_enabled()) {
1419 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1420 return qemu_get_ram_block(*ram_addr
)->mr
;
1423 block
= ram_list
.mru_block
;
1424 if (block
&& block
->host
&& host
- block
->host
< block
->length
) {
1428 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1429 /* This case append when the block is not mapped. */
1430 if (block
->host
== NULL
) {
1433 if (host
- block
->host
< block
->length
) {
1441 *ram_addr
= block
->offset
+ (host
- block
->host
);
1445 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1446 uint64_t val
, unsigned size
)
1449 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1450 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1451 tb_invalidate_phys_page_fast(ram_addr
, size
);
1452 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1456 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1459 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1462 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1467 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1468 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
1469 /* we remove the notdirty callback only if the code has been
1471 if (dirty_flags
== 0xff) {
1472 CPUArchState
*env
= current_cpu
->env_ptr
;
1473 tlb_set_dirty(env
, env
->mem_io_vaddr
);
1477 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1478 unsigned size
, bool is_write
)
1483 static const MemoryRegionOps notdirty_mem_ops
= {
1484 .write
= notdirty_mem_write
,
1485 .valid
.accepts
= notdirty_mem_accepts
,
1486 .endianness
= DEVICE_NATIVE_ENDIAN
,
1489 /* Generate a debug exception if a watchpoint has been hit. */
1490 static void check_watchpoint(int offset
, int len_mask
, int flags
)
1492 CPUArchState
*env
= current_cpu
->env_ptr
;
1493 target_ulong pc
, cs_base
;
1498 if (env
->watchpoint_hit
) {
1499 /* We re-entered the check after replacing the TB. Now raise
1500 * the debug interrupt so that is will trigger after the
1501 * current instruction. */
1502 cpu_interrupt(ENV_GET_CPU(env
), CPU_INTERRUPT_DEBUG
);
1505 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1506 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1507 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
1508 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
1509 wp
->flags
|= BP_WATCHPOINT_HIT
;
1510 if (!env
->watchpoint_hit
) {
1511 env
->watchpoint_hit
= wp
;
1512 tb_check_watchpoint(env
);
1513 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1514 env
->exception_index
= EXCP_DEBUG
;
1517 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1518 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
1519 cpu_resume_from_signal(env
, NULL
);
1523 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1528 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1529 so these check for a hit then pass through to the normal out-of-line
1531 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1534 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
1536 case 1: return ldub_phys(addr
);
1537 case 2: return lduw_phys(addr
);
1538 case 4: return ldl_phys(addr
);
1543 static void watch_mem_write(void *opaque
, hwaddr addr
,
1544 uint64_t val
, unsigned size
)
1546 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
1549 stb_phys(addr
, val
);
1552 stw_phys(addr
, val
);
1555 stl_phys(addr
, val
);
1561 static const MemoryRegionOps watch_mem_ops
= {
1562 .read
= watch_mem_read
,
1563 .write
= watch_mem_write
,
1564 .endianness
= DEVICE_NATIVE_ENDIAN
,
1567 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1570 subpage_t
*subpage
= opaque
;
1573 #if defined(DEBUG_SUBPAGE)
1574 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
"\n", __func__
,
1575 subpage
, len
, addr
);
1577 address_space_read(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1590 static void subpage_write(void *opaque
, hwaddr addr
,
1591 uint64_t value
, unsigned len
)
1593 subpage_t
*subpage
= opaque
;
1596 #if defined(DEBUG_SUBPAGE)
1597 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1598 " value %"PRIx64
"\n",
1599 __func__
, subpage
, len
, addr
, value
);
1614 address_space_write(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1617 static bool subpage_accepts(void *opaque
, hwaddr addr
,
1618 unsigned size
, bool is_write
)
1620 subpage_t
*subpage
= opaque
;
1621 #if defined(DEBUG_SUBPAGE)
1622 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx
"\n",
1623 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
1626 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
1630 static const MemoryRegionOps subpage_ops
= {
1631 .read
= subpage_read
,
1632 .write
= subpage_write
,
1633 .valid
.accepts
= subpage_accepts
,
1634 .endianness
= DEVICE_NATIVE_ENDIAN
,
1637 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1642 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1644 idx
= SUBPAGE_IDX(start
);
1645 eidx
= SUBPAGE_IDX(end
);
1646 #if defined(DEBUG_SUBPAGE)
1647 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
1648 mmio
, start
, end
, idx
, eidx
, memory
);
1650 for (; idx
<= eidx
; idx
++) {
1651 mmio
->sub_section
[idx
] = section
;
1657 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
1661 mmio
= g_malloc0(sizeof(subpage_t
));
1665 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
1666 "subpage", TARGET_PAGE_SIZE
);
1667 mmio
->iomem
.subpage
= true;
1668 #if defined(DEBUG_SUBPAGE)
1669 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
1670 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
1672 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
1677 static uint16_t dummy_section(MemoryRegion
*mr
)
1679 MemoryRegionSection section
= {
1681 .offset_within_address_space
= 0,
1682 .offset_within_region
= 0,
1683 .size
= int128_2_64(),
1686 return phys_section_add(§ion
);
1689 MemoryRegion
*iotlb_to_region(hwaddr index
)
1691 return address_space_memory
.dispatch
->sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1694 static void io_mem_init(void)
1696 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, "rom", UINT64_MAX
);
1697 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
1698 "unassigned", UINT64_MAX
);
1699 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
1700 "notdirty", UINT64_MAX
);
1701 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
1702 "watch", UINT64_MAX
);
1705 static void mem_begin(MemoryListener
*listener
)
1707 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1708 AddressSpaceDispatch
*d
= g_new(AddressSpaceDispatch
, 1);
1710 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .is_leaf
= 0 };
1712 as
->next_dispatch
= d
;
1715 static void mem_commit(MemoryListener
*listener
)
1717 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1718 AddressSpaceDispatch
*cur
= as
->dispatch
;
1719 AddressSpaceDispatch
*next
= as
->next_dispatch
;
1721 next
->nodes
= next_map
.nodes
;
1722 next
->sections
= next_map
.sections
;
1724 as
->dispatch
= next
;
1728 static void core_begin(MemoryListener
*listener
)
1732 prev_map
= g_new(PhysPageMap
, 1);
1733 *prev_map
= next_map
;
1735 memset(&next_map
, 0, sizeof(next_map
));
1736 n
= dummy_section(&io_mem_unassigned
);
1737 assert(n
== PHYS_SECTION_UNASSIGNED
);
1738 n
= dummy_section(&io_mem_notdirty
);
1739 assert(n
== PHYS_SECTION_NOTDIRTY
);
1740 n
= dummy_section(&io_mem_rom
);
1741 assert(n
== PHYS_SECTION_ROM
);
1742 n
= dummy_section(&io_mem_watch
);
1743 assert(n
== PHYS_SECTION_WATCH
);
1746 /* This listener's commit run after the other AddressSpaceDispatch listeners'.
1747 * All AddressSpaceDispatch instances have switched to the next map.
1749 static void core_commit(MemoryListener
*listener
)
1751 phys_sections_free(prev_map
);
1754 static void tcg_commit(MemoryListener
*listener
)
1758 /* since each CPU stores ram addresses in its TLB cache, we must
1759 reset the modified entries */
1762 CPUArchState
*env
= cpu
->env_ptr
;
1768 static void core_log_global_start(MemoryListener
*listener
)
1770 cpu_physical_memory_set_dirty_tracking(1);
1773 static void core_log_global_stop(MemoryListener
*listener
)
1775 cpu_physical_memory_set_dirty_tracking(0);
1778 static MemoryListener core_memory_listener
= {
1779 .begin
= core_begin
,
1780 .commit
= core_commit
,
1781 .log_global_start
= core_log_global_start
,
1782 .log_global_stop
= core_log_global_stop
,
1786 static MemoryListener tcg_memory_listener
= {
1787 .commit
= tcg_commit
,
1790 void address_space_init_dispatch(AddressSpace
*as
)
1792 as
->dispatch
= NULL
;
1793 as
->dispatch_listener
= (MemoryListener
) {
1795 .commit
= mem_commit
,
1796 .region_add
= mem_add
,
1797 .region_nop
= mem_add
,
1800 memory_listener_register(&as
->dispatch_listener
, as
);
1803 void address_space_destroy_dispatch(AddressSpace
*as
)
1805 AddressSpaceDispatch
*d
= as
->dispatch
;
1807 memory_listener_unregister(&as
->dispatch_listener
);
1809 as
->dispatch
= NULL
;
1812 static void memory_map_init(void)
1814 system_memory
= g_malloc(sizeof(*system_memory
));
1815 memory_region_init(system_memory
, NULL
, "system", INT64_MAX
);
1816 address_space_init(&address_space_memory
, system_memory
, "memory");
1818 system_io
= g_malloc(sizeof(*system_io
));
1819 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
1821 address_space_init(&address_space_io
, system_io
, "I/O");
1823 memory_listener_register(&core_memory_listener
, &address_space_memory
);
1824 if (tcg_enabled()) {
1825 memory_listener_register(&tcg_memory_listener
, &address_space_memory
);
1829 MemoryRegion
*get_system_memory(void)
1831 return system_memory
;
1834 MemoryRegion
*get_system_io(void)
1839 #endif /* !defined(CONFIG_USER_ONLY) */
1841 /* physical memory access (slow version, mainly for debug) */
1842 #if defined(CONFIG_USER_ONLY)
1843 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
1844 uint8_t *buf
, int len
, int is_write
)
1851 page
= addr
& TARGET_PAGE_MASK
;
1852 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1855 flags
= page_get_flags(page
);
1856 if (!(flags
& PAGE_VALID
))
1859 if (!(flags
& PAGE_WRITE
))
1861 /* XXX: this code should not depend on lock_user */
1862 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
1865 unlock_user(p
, addr
, l
);
1867 if (!(flags
& PAGE_READ
))
1869 /* XXX: this code should not depend on lock_user */
1870 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
1873 unlock_user(p
, addr
, 0);
1884 static void invalidate_and_set_dirty(hwaddr addr
,
1887 if (!cpu_physical_memory_is_dirty(addr
)) {
1888 /* invalidate code */
1889 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
1891 cpu_physical_memory_set_dirty_flags(addr
, (0xff & ~CODE_DIRTY_FLAG
));
1893 xen_modified_memory(addr
, length
);
1896 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
1898 if (memory_region_is_ram(mr
)) {
1899 return !(is_write
&& mr
->readonly
);
1901 if (memory_region_is_romd(mr
)) {
1908 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
1910 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
1912 /* Regions are assumed to support 1-4 byte accesses unless
1913 otherwise specified. */
1914 if (access_size_max
== 0) {
1915 access_size_max
= 4;
1918 /* Bound the maximum access by the alignment of the address. */
1919 if (!mr
->ops
->impl
.unaligned
) {
1920 unsigned align_size_max
= addr
& -addr
;
1921 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
1922 access_size_max
= align_size_max
;
1926 /* Don't attempt accesses larger than the maximum. */
1927 if (l
> access_size_max
) {
1928 l
= access_size_max
;
1931 l
= 1 << (qemu_fls(l
) - 1);
1937 bool address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
1938 int len
, bool is_write
)
1949 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
1952 if (!memory_access_is_direct(mr
, is_write
)) {
1953 l
= memory_access_size(mr
, l
, addr1
);
1954 /* XXX: could force current_cpu to NULL to avoid
1958 /* 64 bit write access */
1960 error
|= io_mem_write(mr
, addr1
, val
, 8);
1963 /* 32 bit write access */
1965 error
|= io_mem_write(mr
, addr1
, val
, 4);
1968 /* 16 bit write access */
1970 error
|= io_mem_write(mr
, addr1
, val
, 2);
1973 /* 8 bit write access */
1975 error
|= io_mem_write(mr
, addr1
, val
, 1);
1981 addr1
+= memory_region_get_ram_addr(mr
);
1983 ptr
= qemu_get_ram_ptr(addr1
);
1984 memcpy(ptr
, buf
, l
);
1985 invalidate_and_set_dirty(addr1
, l
);
1988 if (!memory_access_is_direct(mr
, is_write
)) {
1990 l
= memory_access_size(mr
, l
, addr1
);
1993 /* 64 bit read access */
1994 error
|= io_mem_read(mr
, addr1
, &val
, 8);
1998 /* 32 bit read access */
1999 error
|= io_mem_read(mr
, addr1
, &val
, 4);
2003 /* 16 bit read access */
2004 error
|= io_mem_read(mr
, addr1
, &val
, 2);
2008 /* 8 bit read access */
2009 error
|= io_mem_read(mr
, addr1
, &val
, 1);
2017 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2018 memcpy(buf
, ptr
, l
);
2029 bool address_space_write(AddressSpace
*as
, hwaddr addr
,
2030 const uint8_t *buf
, int len
)
2032 return address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
2035 bool address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
2037 return address_space_rw(as
, addr
, buf
, len
, false);
2041 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2042 int len
, int is_write
)
2044 address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
2047 /* used for ROM loading : can write in RAM and ROM */
2048 void cpu_physical_memory_write_rom(hwaddr addr
,
2049 const uint8_t *buf
, int len
)
2058 mr
= address_space_translate(&address_space_memory
,
2059 addr
, &addr1
, &l
, true);
2061 if (!(memory_region_is_ram(mr
) ||
2062 memory_region_is_romd(mr
))) {
2065 addr1
+= memory_region_get_ram_addr(mr
);
2067 ptr
= qemu_get_ram_ptr(addr1
);
2068 memcpy(ptr
, buf
, l
);
2069 invalidate_and_set_dirty(addr1
, l
);
2084 static BounceBuffer bounce
;
2086 typedef struct MapClient
{
2088 void (*callback
)(void *opaque
);
2089 QLIST_ENTRY(MapClient
) link
;
2092 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2093 = QLIST_HEAD_INITIALIZER(map_client_list
);
2095 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2097 MapClient
*client
= g_malloc(sizeof(*client
));
2099 client
->opaque
= opaque
;
2100 client
->callback
= callback
;
2101 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2105 static void cpu_unregister_map_client(void *_client
)
2107 MapClient
*client
= (MapClient
*)_client
;
2109 QLIST_REMOVE(client
, link
);
2113 static void cpu_notify_map_clients(void)
2117 while (!QLIST_EMPTY(&map_client_list
)) {
2118 client
= QLIST_FIRST(&map_client_list
);
2119 client
->callback(client
->opaque
);
2120 cpu_unregister_map_client(client
);
2124 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2131 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2132 if (!memory_access_is_direct(mr
, is_write
)) {
2133 l
= memory_access_size(mr
, l
, addr
);
2134 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2145 /* Map a physical memory region into a host virtual address.
2146 * May map a subset of the requested range, given by and returned in *plen.
2147 * May return NULL if resources needed to perform the mapping are exhausted.
2148 * Use only for reads OR writes - not for read-modify-write operations.
2149 * Use cpu_register_map_client() to know when retrying the map operation is
2150 * likely to succeed.
2152 void *address_space_map(AddressSpace
*as
,
2159 hwaddr l
, xlat
, base
;
2160 MemoryRegion
*mr
, *this_mr
;
2168 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2169 if (!memory_access_is_direct(mr
, is_write
)) {
2170 if (bounce
.buffer
) {
2173 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
2177 memory_region_ref(mr
);
2180 address_space_read(as
, addr
, bounce
.buffer
, l
);
2184 return bounce
.buffer
;
2188 raddr
= memory_region_get_ram_addr(mr
);
2199 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2200 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2205 memory_region_ref(mr
);
2207 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2210 /* Unmaps a memory region previously mapped by address_space_map().
2211 * Will also mark the memory as dirty if is_write == 1. access_len gives
2212 * the amount of memory that was actually read or written by the caller.
2214 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2215 int is_write
, hwaddr access_len
)
2217 if (buffer
!= bounce
.buffer
) {
2221 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2224 while (access_len
) {
2226 l
= TARGET_PAGE_SIZE
;
2229 invalidate_and_set_dirty(addr1
, l
);
2234 if (xen_enabled()) {
2235 xen_invalidate_map_cache_entry(buffer
);
2237 memory_region_unref(mr
);
2241 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2243 qemu_vfree(bounce
.buffer
);
2244 bounce
.buffer
= NULL
;
2245 memory_region_unref(bounce
.mr
);
2246 cpu_notify_map_clients();
2249 void *cpu_physical_memory_map(hwaddr addr
,
2253 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2256 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2257 int is_write
, hwaddr access_len
)
2259 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2262 /* warning: addr must be aligned */
2263 static inline uint32_t ldl_phys_internal(hwaddr addr
,
2264 enum device_endian endian
)
2272 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2274 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2276 io_mem_read(mr
, addr1
, &val
, 4);
2277 #if defined(TARGET_WORDS_BIGENDIAN)
2278 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2282 if (endian
== DEVICE_BIG_ENDIAN
) {
2288 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2292 case DEVICE_LITTLE_ENDIAN
:
2293 val
= ldl_le_p(ptr
);
2295 case DEVICE_BIG_ENDIAN
:
2296 val
= ldl_be_p(ptr
);
2306 uint32_t ldl_phys(hwaddr addr
)
2308 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2311 uint32_t ldl_le_phys(hwaddr addr
)
2313 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2316 uint32_t ldl_be_phys(hwaddr addr
)
2318 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2321 /* warning: addr must be aligned */
2322 static inline uint64_t ldq_phys_internal(hwaddr addr
,
2323 enum device_endian endian
)
2331 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2333 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2335 io_mem_read(mr
, addr1
, &val
, 8);
2336 #if defined(TARGET_WORDS_BIGENDIAN)
2337 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2341 if (endian
== DEVICE_BIG_ENDIAN
) {
2347 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2351 case DEVICE_LITTLE_ENDIAN
:
2352 val
= ldq_le_p(ptr
);
2354 case DEVICE_BIG_ENDIAN
:
2355 val
= ldq_be_p(ptr
);
2365 uint64_t ldq_phys(hwaddr addr
)
2367 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2370 uint64_t ldq_le_phys(hwaddr addr
)
2372 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2375 uint64_t ldq_be_phys(hwaddr addr
)
2377 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2381 uint32_t ldub_phys(hwaddr addr
)
2384 cpu_physical_memory_read(addr
, &val
, 1);
2388 /* warning: addr must be aligned */
2389 static inline uint32_t lduw_phys_internal(hwaddr addr
,
2390 enum device_endian endian
)
2398 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2400 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
2402 io_mem_read(mr
, addr1
, &val
, 2);
2403 #if defined(TARGET_WORDS_BIGENDIAN)
2404 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2408 if (endian
== DEVICE_BIG_ENDIAN
) {
2414 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2418 case DEVICE_LITTLE_ENDIAN
:
2419 val
= lduw_le_p(ptr
);
2421 case DEVICE_BIG_ENDIAN
:
2422 val
= lduw_be_p(ptr
);
2432 uint32_t lduw_phys(hwaddr addr
)
2434 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2437 uint32_t lduw_le_phys(hwaddr addr
)
2439 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2442 uint32_t lduw_be_phys(hwaddr addr
)
2444 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2447 /* warning: addr must be aligned. The ram page is not masked as dirty
2448 and the code inside is not invalidated. It is useful if the dirty
2449 bits are used to track modified PTEs */
2450 void stl_phys_notdirty(hwaddr addr
, uint32_t val
)
2457 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2459 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2460 io_mem_write(mr
, addr1
, val
, 4);
2462 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2463 ptr
= qemu_get_ram_ptr(addr1
);
2466 if (unlikely(in_migration
)) {
2467 if (!cpu_physical_memory_is_dirty(addr1
)) {
2468 /* invalidate code */
2469 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2471 cpu_physical_memory_set_dirty_flags(
2472 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
2478 /* warning: addr must be aligned */
2479 static inline void stl_phys_internal(hwaddr addr
, uint32_t val
,
2480 enum device_endian endian
)
2487 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2489 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2490 #if defined(TARGET_WORDS_BIGENDIAN)
2491 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2495 if (endian
== DEVICE_BIG_ENDIAN
) {
2499 io_mem_write(mr
, addr1
, val
, 4);
2502 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2503 ptr
= qemu_get_ram_ptr(addr1
);
2505 case DEVICE_LITTLE_ENDIAN
:
2508 case DEVICE_BIG_ENDIAN
:
2515 invalidate_and_set_dirty(addr1
, 4);
2519 void stl_phys(hwaddr addr
, uint32_t val
)
2521 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2524 void stl_le_phys(hwaddr addr
, uint32_t val
)
2526 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2529 void stl_be_phys(hwaddr addr
, uint32_t val
)
2531 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2535 void stb_phys(hwaddr addr
, uint32_t val
)
2538 cpu_physical_memory_write(addr
, &v
, 1);
2541 /* warning: addr must be aligned */
2542 static inline void stw_phys_internal(hwaddr addr
, uint32_t val
,
2543 enum device_endian endian
)
2550 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2552 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
2553 #if defined(TARGET_WORDS_BIGENDIAN)
2554 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2558 if (endian
== DEVICE_BIG_ENDIAN
) {
2562 io_mem_write(mr
, addr1
, val
, 2);
2565 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2566 ptr
= qemu_get_ram_ptr(addr1
);
2568 case DEVICE_LITTLE_ENDIAN
:
2571 case DEVICE_BIG_ENDIAN
:
2578 invalidate_and_set_dirty(addr1
, 2);
2582 void stw_phys(hwaddr addr
, uint32_t val
)
2584 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2587 void stw_le_phys(hwaddr addr
, uint32_t val
)
2589 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2592 void stw_be_phys(hwaddr addr
, uint32_t val
)
2594 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2598 void stq_phys(hwaddr addr
, uint64_t val
)
2601 cpu_physical_memory_write(addr
, &val
, 8);
2604 void stq_le_phys(hwaddr addr
, uint64_t val
)
2606 val
= cpu_to_le64(val
);
2607 cpu_physical_memory_write(addr
, &val
, 8);
2610 void stq_be_phys(hwaddr addr
, uint64_t val
)
2612 val
= cpu_to_be64(val
);
2613 cpu_physical_memory_write(addr
, &val
, 8);
2616 /* virtual memory access for debug (includes writing to ROM) */
2617 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2618 uint8_t *buf
, int len
, int is_write
)
2625 page
= addr
& TARGET_PAGE_MASK
;
2626 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
2627 /* if no physical page mapped, return an error */
2628 if (phys_addr
== -1)
2630 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2633 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2635 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
2637 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
2646 #if !defined(CONFIG_USER_ONLY)
2649 * A helper function for the _utterly broken_ virtio device model to find out if
2650 * it's running on a big endian machine. Don't do this at home kids!
2652 bool virtio_is_big_endian(void);
2653 bool virtio_is_big_endian(void)
2655 #if defined(TARGET_WORDS_BIGENDIAN)
2664 #ifndef CONFIG_USER_ONLY
2665 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2670 mr
= address_space_translate(&address_space_memory
,
2671 phys_addr
, &phys_addr
, &l
, false);
2673 return !(memory_region_is_ram(mr
) ||
2674 memory_region_is_romd(mr
));
2677 void qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
2681 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
2682 func(block
->host
, block
->offset
, block
->length
, opaque
);