4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "hw/xen/xen.h"
36 #include "qemu/timer.h"
37 #include "qemu/config-file.h"
38 #include "exec/memory.h"
39 #include "sysemu/dma.h"
40 #include "exec/address-spaces.h"
41 #if defined(CONFIG_USER_ONLY)
43 #else /* !CONFIG_USER_ONLY */
44 #include "sysemu/xen-mapcache.h"
47 #include "exec/cpu-all.h"
49 #include "exec/cputlb.h"
50 #include "translate-all.h"
52 #include "exec/memory-internal.h"
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
57 static int in_migration
;
59 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
61 static MemoryRegion
*system_memory
;
62 static MemoryRegion
*system_io
;
64 AddressSpace address_space_io
;
65 AddressSpace address_space_memory
;
67 MemoryRegion io_mem_rom
, io_mem_notdirty
;
68 static MemoryRegion io_mem_unassigned
;
72 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
73 /* current CPU in the current thread. It is only valid inside
75 DEFINE_TLS(CPUState
*, current_cpu
);
76 /* 0 = Do not count executed instructions.
77 1 = Precise instruction counting.
78 2 = Adaptive rate instruction counting. */
81 #if !defined(CONFIG_USER_ONLY)
83 typedef struct PhysPageEntry PhysPageEntry
;
85 struct PhysPageEntry
{
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
91 typedef PhysPageEntry Node
[L2_SIZE
];
93 struct AddressSpaceDispatch
{
94 /* This is a multi-level map on the physical address space.
95 * The bottom level has pointers to MemoryRegionSections.
97 PhysPageEntry phys_map
;
99 MemoryRegionSection
*sections
;
103 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
104 typedef struct subpage_t
{
108 uint16_t sub_section
[TARGET_PAGE_SIZE
];
111 #define PHYS_SECTION_UNASSIGNED 0
112 #define PHYS_SECTION_NOTDIRTY 1
113 #define PHYS_SECTION_ROM 2
114 #define PHYS_SECTION_WATCH 3
116 typedef struct PhysPageMap
{
117 unsigned sections_nb
;
118 unsigned sections_nb_alloc
;
120 unsigned nodes_nb_alloc
;
122 MemoryRegionSection
*sections
;
125 static PhysPageMap
*prev_map
;
126 static PhysPageMap next_map
;
128 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
130 static void io_mem_init(void);
131 static void memory_map_init(void);
132 static void *qemu_safe_ram_ptr(ram_addr_t addr
);
134 static MemoryRegion io_mem_watch
;
137 #if !defined(CONFIG_USER_ONLY)
139 static void phys_map_node_reserve(unsigned nodes
)
141 if (next_map
.nodes_nb
+ nodes
> next_map
.nodes_nb_alloc
) {
142 next_map
.nodes_nb_alloc
= MAX(next_map
.nodes_nb_alloc
* 2,
144 next_map
.nodes_nb_alloc
= MAX(next_map
.nodes_nb_alloc
,
145 next_map
.nodes_nb
+ nodes
);
146 next_map
.nodes
= g_renew(Node
, next_map
.nodes
,
147 next_map
.nodes_nb_alloc
);
151 static uint16_t phys_map_node_alloc(void)
156 ret
= next_map
.nodes_nb
++;
157 assert(ret
!= PHYS_MAP_NODE_NIL
);
158 assert(ret
!= next_map
.nodes_nb_alloc
);
159 for (i
= 0; i
< L2_SIZE
; ++i
) {
160 next_map
.nodes
[ret
][i
].is_leaf
= 0;
161 next_map
.nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
166 static void phys_page_set_level(PhysPageEntry
*lp
, hwaddr
*index
,
167 hwaddr
*nb
, uint16_t leaf
,
172 hwaddr step
= (hwaddr
)1 << (level
* L2_BITS
);
174 if (!lp
->is_leaf
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
175 lp
->ptr
= phys_map_node_alloc();
176 p
= next_map
.nodes
[lp
->ptr
];
178 for (i
= 0; i
< L2_SIZE
; i
++) {
180 p
[i
].ptr
= PHYS_SECTION_UNASSIGNED
;
184 p
= next_map
.nodes
[lp
->ptr
];
186 lp
= &p
[(*index
>> (level
* L2_BITS
)) & (L2_SIZE
- 1)];
188 while (*nb
&& lp
< &p
[L2_SIZE
]) {
189 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
195 phys_page_set_level(lp
, index
, nb
, leaf
, level
- 1);
201 static void phys_page_set(AddressSpaceDispatch
*d
,
202 hwaddr index
, hwaddr nb
,
205 /* Wildly overreserve - it doesn't matter much. */
206 phys_map_node_reserve(3 * P_L2_LEVELS
);
208 phys_page_set_level(&d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
211 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr index
,
212 Node
*nodes
, MemoryRegionSection
*sections
)
217 for (i
= P_L2_LEVELS
- 1; i
>= 0 && !lp
.is_leaf
; i
--) {
218 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
219 return §ions
[PHYS_SECTION_UNASSIGNED
];
222 lp
= p
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
224 return §ions
[lp
.ptr
];
227 bool memory_region_is_unassigned(MemoryRegion
*mr
)
229 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
230 && mr
!= &io_mem_watch
;
233 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
235 bool resolve_subpage
)
237 MemoryRegionSection
*section
;
240 section
= phys_page_find(d
->phys_map
, addr
>> TARGET_PAGE_BITS
,
241 d
->nodes
, d
->sections
);
242 if (resolve_subpage
&& section
->mr
->subpage
) {
243 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
244 section
= &d
->sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
249 static MemoryRegionSection
*
250 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
251 hwaddr
*plen
, bool resolve_subpage
)
253 MemoryRegionSection
*section
;
256 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
257 /* Compute offset within MemoryRegionSection */
258 addr
-= section
->offset_within_address_space
;
260 /* Compute offset within MemoryRegion */
261 *xlat
= addr
+ section
->offset_within_region
;
263 diff
= int128_sub(section
->mr
->size
, int128_make64(addr
));
264 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
268 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
269 hwaddr
*xlat
, hwaddr
*plen
,
273 MemoryRegionSection
*section
;
278 section
= address_space_translate_internal(as
->dispatch
, addr
, &addr
, plen
, true);
281 if (!mr
->iommu_ops
) {
285 iotlb
= mr
->iommu_ops
->translate(mr
, addr
);
286 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
287 | (addr
& iotlb
.addr_mask
));
288 len
= MIN(len
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
289 if (!(iotlb
.perm
& (1 << is_write
))) {
290 mr
= &io_mem_unassigned
;
294 as
= iotlb
.target_as
;
302 MemoryRegionSection
*
303 address_space_translate_for_iotlb(AddressSpace
*as
, hwaddr addr
, hwaddr
*xlat
,
306 MemoryRegionSection
*section
;
307 section
= address_space_translate_internal(as
->dispatch
, addr
, xlat
, plen
, false);
309 assert(!section
->mr
->iommu_ops
);
314 void cpu_exec_init_all(void)
316 #if !defined(CONFIG_USER_ONLY)
317 qemu_mutex_init(&ram_list
.mutex
);
323 #if !defined(CONFIG_USER_ONLY)
325 static int cpu_common_post_load(void *opaque
, int version_id
)
327 CPUState
*cpu
= opaque
;
329 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
330 version_id is increased. */
331 cpu
->interrupt_request
&= ~0x01;
332 tlb_flush(cpu
->env_ptr
, 1);
337 const VMStateDescription vmstate_cpu_common
= {
338 .name
= "cpu_common",
340 .minimum_version_id
= 1,
341 .minimum_version_id_old
= 1,
342 .post_load
= cpu_common_post_load
,
343 .fields
= (VMStateField
[]) {
344 VMSTATE_UINT32(halted
, CPUState
),
345 VMSTATE_UINT32(interrupt_request
, CPUState
),
346 VMSTATE_END_OF_LIST()
352 CPUState
*qemu_get_cpu(int index
)
357 if (cpu
->cpu_index
== index
) {
365 void cpu_exec_init(CPUArchState
*env
)
367 CPUState
*cpu
= ENV_GET_CPU(env
);
368 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
372 #if defined(CONFIG_USER_ONLY)
376 CPU_FOREACH(some_cpu
) {
379 cpu
->cpu_index
= cpu_index
;
381 QTAILQ_INIT(&env
->breakpoints
);
382 QTAILQ_INIT(&env
->watchpoints
);
383 #ifndef CONFIG_USER_ONLY
384 cpu
->thread_id
= qemu_get_thread_id();
386 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
387 #if defined(CONFIG_USER_ONLY)
390 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
391 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
393 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
394 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
395 cpu_save
, cpu_load
, env
);
396 assert(cc
->vmsd
== NULL
);
397 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
399 if (cc
->vmsd
!= NULL
) {
400 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
404 #if defined(TARGET_HAS_ICE)
405 #if defined(CONFIG_USER_ONLY)
406 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
408 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
411 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
413 tb_invalidate_phys_addr(cpu_get_phys_page_debug(cpu
, pc
) |
414 (pc
& ~TARGET_PAGE_MASK
));
417 #endif /* TARGET_HAS_ICE */
419 #if defined(CONFIG_USER_ONLY)
420 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
425 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
426 int flags
, CPUWatchpoint
**watchpoint
)
431 /* Add a watchpoint. */
432 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
433 int flags
, CPUWatchpoint
**watchpoint
)
435 target_ulong len_mask
= ~(len
- 1);
438 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
439 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
440 len
== 0 || len
> TARGET_PAGE_SIZE
) {
441 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
442 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
445 wp
= g_malloc(sizeof(*wp
));
448 wp
->len_mask
= len_mask
;
451 /* keep all GDB-injected watchpoints in front */
453 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
455 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
457 tlb_flush_page(env
, addr
);
464 /* Remove a specific watchpoint. */
465 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
468 target_ulong len_mask
= ~(len
- 1);
471 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
472 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
473 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
474 cpu_watchpoint_remove_by_ref(env
, wp
);
481 /* Remove a specific watchpoint by reference. */
482 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
484 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
486 tlb_flush_page(env
, watchpoint
->vaddr
);
491 /* Remove all matching watchpoints. */
492 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
494 CPUWatchpoint
*wp
, *next
;
496 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
497 if (wp
->flags
& mask
)
498 cpu_watchpoint_remove_by_ref(env
, wp
);
503 /* Add a breakpoint. */
504 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
505 CPUBreakpoint
**breakpoint
)
507 #if defined(TARGET_HAS_ICE)
510 bp
= g_malloc(sizeof(*bp
));
515 /* keep all GDB-injected breakpoints in front */
516 if (flags
& BP_GDB
) {
517 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
519 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
522 breakpoint_invalidate(ENV_GET_CPU(env
), pc
);
533 /* Remove a specific breakpoint. */
534 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
536 #if defined(TARGET_HAS_ICE)
539 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
540 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
541 cpu_breakpoint_remove_by_ref(env
, bp
);
551 /* Remove a specific breakpoint by reference. */
552 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
554 #if defined(TARGET_HAS_ICE)
555 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
557 breakpoint_invalidate(ENV_GET_CPU(env
), breakpoint
->pc
);
563 /* Remove all matching breakpoints. */
564 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
566 #if defined(TARGET_HAS_ICE)
567 CPUBreakpoint
*bp
, *next
;
569 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
570 if (bp
->flags
& mask
)
571 cpu_breakpoint_remove_by_ref(env
, bp
);
576 /* enable or disable single step mode. EXCP_DEBUG is returned by the
577 CPU loop after each instruction */
578 void cpu_single_step(CPUState
*cpu
, int enabled
)
580 #if defined(TARGET_HAS_ICE)
581 if (cpu
->singlestep_enabled
!= enabled
) {
582 cpu
->singlestep_enabled
= enabled
;
584 kvm_update_guest_debug(cpu
, 0);
586 /* must flush all the translated code to avoid inconsistencies */
587 /* XXX: only flush what is necessary */
588 CPUArchState
*env
= cpu
->env_ptr
;
595 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
597 CPUState
*cpu
= ENV_GET_CPU(env
);
603 fprintf(stderr
, "qemu: fatal: ");
604 vfprintf(stderr
, fmt
, ap
);
605 fprintf(stderr
, "\n");
606 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
607 if (qemu_log_enabled()) {
608 qemu_log("qemu: fatal: ");
609 qemu_log_vprintf(fmt
, ap2
);
611 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
617 #if defined(CONFIG_USER_ONLY)
619 struct sigaction act
;
620 sigfillset(&act
.sa_mask
);
621 act
.sa_handler
= SIG_DFL
;
622 sigaction(SIGABRT
, &act
, NULL
);
628 CPUArchState
*cpu_copy(CPUArchState
*env
)
630 CPUArchState
*new_env
= cpu_init(env
->cpu_model_str
);
631 #if defined(TARGET_HAS_ICE)
636 /* Reset non arch specific state */
637 cpu_reset(ENV_GET_CPU(new_env
));
639 /* Copy arch specific state into the new CPU */
640 memcpy(new_env
, env
, sizeof(CPUArchState
));
642 /* Clone all break/watchpoints.
643 Note: Once we support ptrace with hw-debug register access, make sure
644 BP_CPU break/watchpoints are handled correctly on clone. */
645 QTAILQ_INIT(&env
->breakpoints
);
646 QTAILQ_INIT(&env
->watchpoints
);
647 #if defined(TARGET_HAS_ICE)
648 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
649 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
651 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
652 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
660 #if !defined(CONFIG_USER_ONLY)
661 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t end
,
666 /* we modify the TLB cache so that the dirty bit will be set again
667 when accessing the range */
668 start1
= (uintptr_t)qemu_safe_ram_ptr(start
);
669 /* Check that we don't span multiple blocks - this breaks the
670 address comparisons below. */
671 if ((uintptr_t)qemu_safe_ram_ptr(end
- 1) - start1
672 != (end
- 1) - start
) {
675 cpu_tlb_reset_dirty_all(start1
, length
);
679 /* Note: start and end must be within the same ram block. */
680 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
685 start
&= TARGET_PAGE_MASK
;
686 end
= TARGET_PAGE_ALIGN(end
);
688 length
= end
- start
;
691 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
694 tlb_reset_dirty_range_all(start
, end
, length
);
698 static int cpu_physical_memory_set_dirty_tracking(int enable
)
701 in_migration
= enable
;
705 hwaddr
memory_region_section_get_iotlb(CPUArchState
*env
,
706 MemoryRegionSection
*section
,
708 hwaddr paddr
, hwaddr xlat
,
710 target_ulong
*address
)
715 if (memory_region_is_ram(section
->mr
)) {
717 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
719 if (!section
->readonly
) {
720 iotlb
|= PHYS_SECTION_NOTDIRTY
;
722 iotlb
|= PHYS_SECTION_ROM
;
725 iotlb
= section
- address_space_memory
.dispatch
->sections
;
729 /* Make accesses to pages with watchpoints go via the
730 watchpoint trap routines. */
731 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
732 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
733 /* Avoid trapping reads of pages with a write breakpoint. */
734 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
735 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
736 *address
|= TLB_MMIO
;
744 #endif /* defined(CONFIG_USER_ONLY) */
746 #if !defined(CONFIG_USER_ONLY)
748 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
750 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
752 static uint16_t phys_section_add(MemoryRegionSection
*section
)
754 /* The physical section number is ORed with a page-aligned
755 * pointer to produce the iotlb entries. Thus it should
756 * never overflow into the page-aligned value.
758 assert(next_map
.sections_nb
< TARGET_PAGE_SIZE
);
760 if (next_map
.sections_nb
== next_map
.sections_nb_alloc
) {
761 next_map
.sections_nb_alloc
= MAX(next_map
.sections_nb_alloc
* 2,
763 next_map
.sections
= g_renew(MemoryRegionSection
, next_map
.sections
,
764 next_map
.sections_nb_alloc
);
766 next_map
.sections
[next_map
.sections_nb
] = *section
;
767 memory_region_ref(section
->mr
);
768 return next_map
.sections_nb
++;
771 static void phys_section_destroy(MemoryRegion
*mr
)
773 memory_region_unref(mr
);
776 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
777 memory_region_destroy(&subpage
->iomem
);
782 static void phys_sections_free(PhysPageMap
*map
)
784 while (map
->sections_nb
> 0) {
785 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
786 phys_section_destroy(section
->mr
);
788 g_free(map
->sections
);
793 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
796 hwaddr base
= section
->offset_within_address_space
798 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
>> TARGET_PAGE_BITS
,
799 next_map
.nodes
, next_map
.sections
);
800 MemoryRegionSection subsection
= {
801 .offset_within_address_space
= base
,
802 .size
= int128_make64(TARGET_PAGE_SIZE
),
806 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
808 if (!(existing
->mr
->subpage
)) {
809 subpage
= subpage_init(d
->as
, base
);
810 subsection
.mr
= &subpage
->iomem
;
811 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
812 phys_section_add(&subsection
));
814 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
816 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
817 end
= start
+ int128_get64(section
->size
) - 1;
818 subpage_register(subpage
, start
, end
, phys_section_add(section
));
822 static void register_multipage(AddressSpaceDispatch
*d
,
823 MemoryRegionSection
*section
)
825 hwaddr start_addr
= section
->offset_within_address_space
;
826 uint16_t section_index
= phys_section_add(section
);
827 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
831 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
834 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
836 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
837 AddressSpaceDispatch
*d
= as
->next_dispatch
;
838 MemoryRegionSection now
= *section
, remain
= *section
;
839 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
841 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
842 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
843 - now
.offset_within_address_space
;
845 now
.size
= int128_min(int128_make64(left
), now
.size
);
846 register_subpage(d
, &now
);
848 now
.size
= int128_zero();
850 while (int128_ne(remain
.size
, now
.size
)) {
851 remain
.size
= int128_sub(remain
.size
, now
.size
);
852 remain
.offset_within_address_space
+= int128_get64(now
.size
);
853 remain
.offset_within_region
+= int128_get64(now
.size
);
855 if (int128_lt(remain
.size
, page_size
)) {
856 register_subpage(d
, &now
);
857 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
858 now
.size
= page_size
;
859 register_subpage(d
, &now
);
861 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
862 register_multipage(d
, &now
);
867 void qemu_flush_coalesced_mmio_buffer(void)
870 kvm_flush_coalesced_mmio_buffer();
873 void qemu_mutex_lock_ramlist(void)
875 qemu_mutex_lock(&ram_list
.mutex
);
878 void qemu_mutex_unlock_ramlist(void)
880 qemu_mutex_unlock(&ram_list
.mutex
);
883 #if defined(__linux__) && !defined(TARGET_S390X)
887 #define HUGETLBFS_MAGIC 0x958458f6
889 static long gethugepagesize(const char *path
)
895 ret
= statfs(path
, &fs
);
896 } while (ret
!= 0 && errno
== EINTR
);
903 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
904 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
909 static void *file_ram_alloc(RAMBlock
*block
,
914 char *sanitized_name
;
921 unsigned long hpagesize
;
923 hpagesize
= gethugepagesize(path
);
928 if (memory
< hpagesize
) {
932 if (kvm_enabled() && !kvm_has_sync_mmu()) {
933 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
937 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
938 sanitized_name
= g_strdup(block
->mr
->name
);
939 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
944 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
946 g_free(sanitized_name
);
948 fd
= mkstemp(filename
);
950 perror("unable to create backing store for hugepages");
957 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
960 * ftruncate is not supported by hugetlbfs in older
961 * hosts, so don't bother bailing out on errors.
962 * If anything goes wrong with it under other filesystems,
965 if (ftruncate(fd
, memory
))
969 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
970 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
971 * to sidestep this quirk.
973 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
974 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
976 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
978 if (area
== MAP_FAILED
) {
979 perror("file_ram_alloc: can't mmap RAM pages");
988 static ram_addr_t
find_ram_offset(ram_addr_t size
)
990 RAMBlock
*block
, *next_block
;
991 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
993 assert(size
!= 0); /* it would hand out same offset multiple times */
995 if (QTAILQ_EMPTY(&ram_list
.blocks
))
998 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
999 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1001 end
= block
->offset
+ block
->length
;
1003 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
1004 if (next_block
->offset
>= end
) {
1005 next
= MIN(next
, next_block
->offset
);
1008 if (next
- end
>= size
&& next
- end
< mingap
) {
1010 mingap
= next
- end
;
1014 if (offset
== RAM_ADDR_MAX
) {
1015 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1023 ram_addr_t
last_ram_offset(void)
1026 ram_addr_t last
= 0;
1028 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
1029 last
= MAX(last
, block
->offset
+ block
->length
);
1034 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1038 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1039 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1040 "dump-guest-core", true)) {
1041 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1043 perror("qemu_madvise");
1044 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1045 "but dump_guest_core=off specified\n");
1050 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1052 RAMBlock
*new_block
, *block
;
1055 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1056 if (block
->offset
== addr
) {
1062 assert(!new_block
->idstr
[0]);
1065 char *id
= qdev_get_dev_path(dev
);
1067 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1071 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1073 /* This assumes the iothread lock is taken here too. */
1074 qemu_mutex_lock_ramlist();
1075 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1076 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1077 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1082 qemu_mutex_unlock_ramlist();
1085 static int memory_try_enable_merging(void *addr
, size_t len
)
1087 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1088 /* disabled by the user */
1092 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1095 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1098 RAMBlock
*block
, *new_block
;
1100 size
= TARGET_PAGE_ALIGN(size
);
1101 new_block
= g_malloc0(sizeof(*new_block
));
1103 /* This assumes the iothread lock is taken here too. */
1104 qemu_mutex_lock_ramlist();
1106 new_block
->offset
= find_ram_offset(size
);
1108 new_block
->host
= host
;
1109 new_block
->flags
|= RAM_PREALLOC_MASK
;
1110 } else if (xen_enabled()) {
1112 fprintf(stderr
, "-mem-path not supported with Xen\n");
1115 xen_ram_alloc(new_block
->offset
, size
, mr
);
1118 #if defined (__linux__) && !defined(TARGET_S390X)
1119 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
1120 if (!new_block
->host
) {
1121 new_block
->host
= qemu_anon_ram_alloc(size
);
1122 memory_try_enable_merging(new_block
->host
, size
);
1125 fprintf(stderr
, "-mem-path option unsupported\n");
1129 if (kvm_enabled()) {
1130 /* some s390/kvm configurations have special constraints */
1131 new_block
->host
= kvm_ram_alloc(size
);
1133 new_block
->host
= qemu_anon_ram_alloc(size
);
1135 memory_try_enable_merging(new_block
->host
, size
);
1138 new_block
->length
= size
;
1140 /* Keep the list sorted from biggest to smallest block. */
1141 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1142 if (block
->length
< new_block
->length
) {
1147 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1149 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1151 ram_list
.mru_block
= NULL
;
1154 qemu_mutex_unlock_ramlist();
1156 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
1157 last_ram_offset() >> TARGET_PAGE_BITS
);
1158 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
1159 0, size
>> TARGET_PAGE_BITS
);
1160 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
, 0xff);
1162 qemu_ram_setup_dump(new_block
->host
, size
);
1163 qemu_madvise(new_block
->host
, size
, QEMU_MADV_HUGEPAGE
);
1166 kvm_setup_guest_memory(new_block
->host
, size
);
1168 return new_block
->offset
;
1171 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
1173 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
1176 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1180 /* This assumes the iothread lock is taken here too. */
1181 qemu_mutex_lock_ramlist();
1182 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1183 if (addr
== block
->offset
) {
1184 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1185 ram_list
.mru_block
= NULL
;
1191 qemu_mutex_unlock_ramlist();
1194 void qemu_ram_free(ram_addr_t addr
)
1198 /* This assumes the iothread lock is taken here too. */
1199 qemu_mutex_lock_ramlist();
1200 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1201 if (addr
== block
->offset
) {
1202 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1203 ram_list
.mru_block
= NULL
;
1205 if (block
->flags
& RAM_PREALLOC_MASK
) {
1207 } else if (xen_enabled()) {
1208 xen_invalidate_map_cache_entry(block
->host
);
1209 } else if (mem_path
) {
1210 #if defined (__linux__) && !defined(TARGET_S390X)
1212 munmap(block
->host
, block
->length
);
1215 qemu_anon_ram_free(block
->host
, block
->length
);
1221 qemu_anon_ram_free(block
->host
, block
->length
);
1227 qemu_mutex_unlock_ramlist();
1232 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1239 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1240 offset
= addr
- block
->offset
;
1241 if (offset
< block
->length
) {
1242 vaddr
= block
->host
+ offset
;
1243 if (block
->flags
& RAM_PREALLOC_MASK
) {
1245 } else if (xen_enabled()) {
1249 munmap(vaddr
, length
);
1251 #if defined(__linux__) && !defined(TARGET_S390X)
1254 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
1257 flags
|= MAP_PRIVATE
;
1259 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1260 flags
, block
->fd
, offset
);
1262 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1263 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1270 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1271 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
1272 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
1275 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1276 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1280 if (area
!= vaddr
) {
1281 fprintf(stderr
, "Could not remap addr: "
1282 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1286 memory_try_enable_merging(vaddr
, length
);
1287 qemu_ram_setup_dump(vaddr
, length
);
1293 #endif /* !_WIN32 */
1295 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
1299 /* The list is protected by the iothread lock here. */
1300 block
= ram_list
.mru_block
;
1301 if (block
&& addr
- block
->offset
< block
->length
) {
1304 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1305 if (addr
- block
->offset
< block
->length
) {
1310 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1314 ram_list
.mru_block
= block
;
1318 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1319 With the exception of the softmmu code in this file, this should
1320 only be used for local memory (e.g. video ram) that the device owns,
1321 and knows it isn't going to access beyond the end of the block.
1323 It should not be used for general purpose DMA.
1324 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1326 void *qemu_get_ram_ptr(ram_addr_t addr
)
1328 RAMBlock
*block
= qemu_get_ram_block(addr
);
1330 if (xen_enabled()) {
1331 /* We need to check if the requested address is in the RAM
1332 * because we don't want to map the entire memory in QEMU.
1333 * In that case just map until the end of the page.
1335 if (block
->offset
== 0) {
1336 return xen_map_cache(addr
, 0, 0);
1337 } else if (block
->host
== NULL
) {
1339 xen_map_cache(block
->offset
, block
->length
, 1);
1342 return block
->host
+ (addr
- block
->offset
);
1345 /* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1346 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1348 * ??? Is this still necessary?
1350 static void *qemu_safe_ram_ptr(ram_addr_t addr
)
1354 /* The list is protected by the iothread lock here. */
1355 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1356 if (addr
- block
->offset
< block
->length
) {
1357 if (xen_enabled()) {
1358 /* We need to check if the requested address is in the RAM
1359 * because we don't want to map the entire memory in QEMU.
1360 * In that case just map until the end of the page.
1362 if (block
->offset
== 0) {
1363 return xen_map_cache(addr
, 0, 0);
1364 } else if (block
->host
== NULL
) {
1366 xen_map_cache(block
->offset
, block
->length
, 1);
1369 return block
->host
+ (addr
- block
->offset
);
1373 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1379 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1380 * but takes a size argument */
1381 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1386 if (xen_enabled()) {
1387 return xen_map_cache(addr
, *size
, 1);
1391 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1392 if (addr
- block
->offset
< block
->length
) {
1393 if (addr
- block
->offset
+ *size
> block
->length
)
1394 *size
= block
->length
- addr
+ block
->offset
;
1395 return block
->host
+ (addr
- block
->offset
);
1399 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1404 /* Some of the softmmu routines need to translate from a host pointer
1405 (typically a TLB entry) back to a ram offset. */
1406 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1409 uint8_t *host
= ptr
;
1411 if (xen_enabled()) {
1412 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1413 return qemu_get_ram_block(*ram_addr
)->mr
;
1416 block
= ram_list
.mru_block
;
1417 if (block
&& block
->host
&& host
- block
->host
< block
->length
) {
1421 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1422 /* This case append when the block is not mapped. */
1423 if (block
->host
== NULL
) {
1426 if (host
- block
->host
< block
->length
) {
1434 *ram_addr
= block
->offset
+ (host
- block
->host
);
1438 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1439 uint64_t val
, unsigned size
)
1442 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1443 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1444 tb_invalidate_phys_page_fast(ram_addr
, size
);
1445 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1449 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1452 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1455 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1460 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1461 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
1462 /* we remove the notdirty callback only if the code has been
1464 if (dirty_flags
== 0xff) {
1465 CPUArchState
*env
= current_cpu
->env_ptr
;
1466 tlb_set_dirty(env
, env
->mem_io_vaddr
);
1470 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1471 unsigned size
, bool is_write
)
1476 static const MemoryRegionOps notdirty_mem_ops
= {
1477 .write
= notdirty_mem_write
,
1478 .valid
.accepts
= notdirty_mem_accepts
,
1479 .endianness
= DEVICE_NATIVE_ENDIAN
,
1482 /* Generate a debug exception if a watchpoint has been hit. */
1483 static void check_watchpoint(int offset
, int len_mask
, int flags
)
1485 CPUArchState
*env
= current_cpu
->env_ptr
;
1486 target_ulong pc
, cs_base
;
1491 if (env
->watchpoint_hit
) {
1492 /* We re-entered the check after replacing the TB. Now raise
1493 * the debug interrupt so that is will trigger after the
1494 * current instruction. */
1495 cpu_interrupt(ENV_GET_CPU(env
), CPU_INTERRUPT_DEBUG
);
1498 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1499 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1500 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
1501 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
1502 wp
->flags
|= BP_WATCHPOINT_HIT
;
1503 if (!env
->watchpoint_hit
) {
1504 env
->watchpoint_hit
= wp
;
1505 tb_check_watchpoint(env
);
1506 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1507 env
->exception_index
= EXCP_DEBUG
;
1510 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1511 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
1512 cpu_resume_from_signal(env
, NULL
);
1516 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1521 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1522 so these check for a hit then pass through to the normal out-of-line
1524 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1527 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
1529 case 1: return ldub_phys(addr
);
1530 case 2: return lduw_phys(addr
);
1531 case 4: return ldl_phys(addr
);
1536 static void watch_mem_write(void *opaque
, hwaddr addr
,
1537 uint64_t val
, unsigned size
)
1539 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
1542 stb_phys(addr
, val
);
1545 stw_phys(addr
, val
);
1548 stl_phys(addr
, val
);
1554 static const MemoryRegionOps watch_mem_ops
= {
1555 .read
= watch_mem_read
,
1556 .write
= watch_mem_write
,
1557 .endianness
= DEVICE_NATIVE_ENDIAN
,
1560 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1563 subpage_t
*subpage
= opaque
;
1566 #if defined(DEBUG_SUBPAGE)
1567 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
"\n", __func__
,
1568 subpage
, len
, addr
);
1570 address_space_read(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1583 static void subpage_write(void *opaque
, hwaddr addr
,
1584 uint64_t value
, unsigned len
)
1586 subpage_t
*subpage
= opaque
;
1589 #if defined(DEBUG_SUBPAGE)
1590 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1591 " value %"PRIx64
"\n",
1592 __func__
, subpage
, len
, addr
, value
);
1607 address_space_write(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1610 static bool subpage_accepts(void *opaque
, hwaddr addr
,
1611 unsigned size
, bool is_write
)
1613 subpage_t
*subpage
= opaque
;
1614 #if defined(DEBUG_SUBPAGE)
1615 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx
"\n",
1616 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
1619 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
1623 static const MemoryRegionOps subpage_ops
= {
1624 .read
= subpage_read
,
1625 .write
= subpage_write
,
1626 .valid
.accepts
= subpage_accepts
,
1627 .endianness
= DEVICE_NATIVE_ENDIAN
,
1630 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1635 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1637 idx
= SUBPAGE_IDX(start
);
1638 eidx
= SUBPAGE_IDX(end
);
1639 #if defined(DEBUG_SUBPAGE)
1640 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
1641 mmio
, start
, end
, idx
, eidx
, memory
);
1643 for (; idx
<= eidx
; idx
++) {
1644 mmio
->sub_section
[idx
] = section
;
1650 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
1654 mmio
= g_malloc0(sizeof(subpage_t
));
1658 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
1659 "subpage", TARGET_PAGE_SIZE
);
1660 mmio
->iomem
.subpage
= true;
1661 #if defined(DEBUG_SUBPAGE)
1662 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
1663 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
1665 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
1670 static uint16_t dummy_section(MemoryRegion
*mr
)
1672 MemoryRegionSection section
= {
1674 .offset_within_address_space
= 0,
1675 .offset_within_region
= 0,
1676 .size
= int128_2_64(),
1679 return phys_section_add(§ion
);
1682 MemoryRegion
*iotlb_to_region(hwaddr index
)
1684 return address_space_memory
.dispatch
->sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1687 static void io_mem_init(void)
1689 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, "rom", UINT64_MAX
);
1690 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
1691 "unassigned", UINT64_MAX
);
1692 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
1693 "notdirty", UINT64_MAX
);
1694 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
1695 "watch", UINT64_MAX
);
1698 static void mem_begin(MemoryListener
*listener
)
1700 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1701 AddressSpaceDispatch
*d
= g_new(AddressSpaceDispatch
, 1);
1703 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .is_leaf
= 0 };
1705 as
->next_dispatch
= d
;
1708 static void mem_commit(MemoryListener
*listener
)
1710 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1711 AddressSpaceDispatch
*cur
= as
->dispatch
;
1712 AddressSpaceDispatch
*next
= as
->next_dispatch
;
1714 next
->nodes
= next_map
.nodes
;
1715 next
->sections
= next_map
.sections
;
1717 as
->dispatch
= next
;
1721 static void core_begin(MemoryListener
*listener
)
1725 prev_map
= g_new(PhysPageMap
, 1);
1726 *prev_map
= next_map
;
1728 memset(&next_map
, 0, sizeof(next_map
));
1729 n
= dummy_section(&io_mem_unassigned
);
1730 assert(n
== PHYS_SECTION_UNASSIGNED
);
1731 n
= dummy_section(&io_mem_notdirty
);
1732 assert(n
== PHYS_SECTION_NOTDIRTY
);
1733 n
= dummy_section(&io_mem_rom
);
1734 assert(n
== PHYS_SECTION_ROM
);
1735 n
= dummy_section(&io_mem_watch
);
1736 assert(n
== PHYS_SECTION_WATCH
);
1739 /* This listener's commit run after the other AddressSpaceDispatch listeners'.
1740 * All AddressSpaceDispatch instances have switched to the next map.
1742 static void core_commit(MemoryListener
*listener
)
1744 phys_sections_free(prev_map
);
1747 static void tcg_commit(MemoryListener
*listener
)
1751 /* since each CPU stores ram addresses in its TLB cache, we must
1752 reset the modified entries */
1755 CPUArchState
*env
= cpu
->env_ptr
;
1761 static void core_log_global_start(MemoryListener
*listener
)
1763 cpu_physical_memory_set_dirty_tracking(1);
1766 static void core_log_global_stop(MemoryListener
*listener
)
1768 cpu_physical_memory_set_dirty_tracking(0);
1771 static MemoryListener core_memory_listener
= {
1772 .begin
= core_begin
,
1773 .commit
= core_commit
,
1774 .log_global_start
= core_log_global_start
,
1775 .log_global_stop
= core_log_global_stop
,
1779 static MemoryListener tcg_memory_listener
= {
1780 .commit
= tcg_commit
,
1783 void address_space_init_dispatch(AddressSpace
*as
)
1785 as
->dispatch
= NULL
;
1786 as
->dispatch_listener
= (MemoryListener
) {
1788 .commit
= mem_commit
,
1789 .region_add
= mem_add
,
1790 .region_nop
= mem_add
,
1793 memory_listener_register(&as
->dispatch_listener
, as
);
1796 void address_space_destroy_dispatch(AddressSpace
*as
)
1798 AddressSpaceDispatch
*d
= as
->dispatch
;
1800 memory_listener_unregister(&as
->dispatch_listener
);
1802 as
->dispatch
= NULL
;
1805 static void memory_map_init(void)
1807 system_memory
= g_malloc(sizeof(*system_memory
));
1808 memory_region_init(system_memory
, NULL
, "system", INT64_MAX
);
1809 address_space_init(&address_space_memory
, system_memory
, "memory");
1811 system_io
= g_malloc(sizeof(*system_io
));
1812 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
1814 address_space_init(&address_space_io
, system_io
, "I/O");
1816 memory_listener_register(&core_memory_listener
, &address_space_memory
);
1817 if (tcg_enabled()) {
1818 memory_listener_register(&tcg_memory_listener
, &address_space_memory
);
1822 MemoryRegion
*get_system_memory(void)
1824 return system_memory
;
1827 MemoryRegion
*get_system_io(void)
1832 #endif /* !defined(CONFIG_USER_ONLY) */
1834 /* physical memory access (slow version, mainly for debug) */
1835 #if defined(CONFIG_USER_ONLY)
1836 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
1837 uint8_t *buf
, int len
, int is_write
)
1844 page
= addr
& TARGET_PAGE_MASK
;
1845 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1848 flags
= page_get_flags(page
);
1849 if (!(flags
& PAGE_VALID
))
1852 if (!(flags
& PAGE_WRITE
))
1854 /* XXX: this code should not depend on lock_user */
1855 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
1858 unlock_user(p
, addr
, l
);
1860 if (!(flags
& PAGE_READ
))
1862 /* XXX: this code should not depend on lock_user */
1863 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
1866 unlock_user(p
, addr
, 0);
1877 static void invalidate_and_set_dirty(hwaddr addr
,
1880 if (!cpu_physical_memory_is_dirty(addr
)) {
1881 /* invalidate code */
1882 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
1884 cpu_physical_memory_set_dirty_flags(addr
, (0xff & ~CODE_DIRTY_FLAG
));
1886 xen_modified_memory(addr
, length
);
1889 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
1891 if (memory_region_is_ram(mr
)) {
1892 return !(is_write
&& mr
->readonly
);
1894 if (memory_region_is_romd(mr
)) {
1901 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
1903 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
1905 /* Regions are assumed to support 1-4 byte accesses unless
1906 otherwise specified. */
1907 if (access_size_max
== 0) {
1908 access_size_max
= 4;
1911 /* Bound the maximum access by the alignment of the address. */
1912 if (!mr
->ops
->impl
.unaligned
) {
1913 unsigned align_size_max
= addr
& -addr
;
1914 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
1915 access_size_max
= align_size_max
;
1919 /* Don't attempt accesses larger than the maximum. */
1920 if (l
> access_size_max
) {
1921 l
= access_size_max
;
1924 l
= 1 << (qemu_fls(l
) - 1);
1930 bool address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
1931 int len
, bool is_write
)
1942 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
1945 if (!memory_access_is_direct(mr
, is_write
)) {
1946 l
= memory_access_size(mr
, l
, addr1
);
1947 /* XXX: could force current_cpu to NULL to avoid
1951 /* 64 bit write access */
1953 error
|= io_mem_write(mr
, addr1
, val
, 8);
1956 /* 32 bit write access */
1958 error
|= io_mem_write(mr
, addr1
, val
, 4);
1961 /* 16 bit write access */
1963 error
|= io_mem_write(mr
, addr1
, val
, 2);
1966 /* 8 bit write access */
1968 error
|= io_mem_write(mr
, addr1
, val
, 1);
1974 addr1
+= memory_region_get_ram_addr(mr
);
1976 ptr
= qemu_get_ram_ptr(addr1
);
1977 memcpy(ptr
, buf
, l
);
1978 invalidate_and_set_dirty(addr1
, l
);
1981 if (!memory_access_is_direct(mr
, is_write
)) {
1983 l
= memory_access_size(mr
, l
, addr1
);
1986 /* 64 bit read access */
1987 error
|= io_mem_read(mr
, addr1
, &val
, 8);
1991 /* 32 bit read access */
1992 error
|= io_mem_read(mr
, addr1
, &val
, 4);
1996 /* 16 bit read access */
1997 error
|= io_mem_read(mr
, addr1
, &val
, 2);
2001 /* 8 bit read access */
2002 error
|= io_mem_read(mr
, addr1
, &val
, 1);
2010 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2011 memcpy(buf
, ptr
, l
);
2022 bool address_space_write(AddressSpace
*as
, hwaddr addr
,
2023 const uint8_t *buf
, int len
)
2025 return address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
2028 bool address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
2030 return address_space_rw(as
, addr
, buf
, len
, false);
2034 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2035 int len
, int is_write
)
2037 address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
2040 /* used for ROM loading : can write in RAM and ROM */
2041 void cpu_physical_memory_write_rom(hwaddr addr
,
2042 const uint8_t *buf
, int len
)
2051 mr
= address_space_translate(&address_space_memory
,
2052 addr
, &addr1
, &l
, true);
2054 if (!(memory_region_is_ram(mr
) ||
2055 memory_region_is_romd(mr
))) {
2058 addr1
+= memory_region_get_ram_addr(mr
);
2060 ptr
= qemu_get_ram_ptr(addr1
);
2061 memcpy(ptr
, buf
, l
);
2062 invalidate_and_set_dirty(addr1
, l
);
2077 static BounceBuffer bounce
;
2079 typedef struct MapClient
{
2081 void (*callback
)(void *opaque
);
2082 QLIST_ENTRY(MapClient
) link
;
2085 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2086 = QLIST_HEAD_INITIALIZER(map_client_list
);
2088 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2090 MapClient
*client
= g_malloc(sizeof(*client
));
2092 client
->opaque
= opaque
;
2093 client
->callback
= callback
;
2094 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2098 static void cpu_unregister_map_client(void *_client
)
2100 MapClient
*client
= (MapClient
*)_client
;
2102 QLIST_REMOVE(client
, link
);
2106 static void cpu_notify_map_clients(void)
2110 while (!QLIST_EMPTY(&map_client_list
)) {
2111 client
= QLIST_FIRST(&map_client_list
);
2112 client
->callback(client
->opaque
);
2113 cpu_unregister_map_client(client
);
2117 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2124 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2125 if (!memory_access_is_direct(mr
, is_write
)) {
2126 l
= memory_access_size(mr
, l
, addr
);
2127 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2138 /* Map a physical memory region into a host virtual address.
2139 * May map a subset of the requested range, given by and returned in *plen.
2140 * May return NULL if resources needed to perform the mapping are exhausted.
2141 * Use only for reads OR writes - not for read-modify-write operations.
2142 * Use cpu_register_map_client() to know when retrying the map operation is
2143 * likely to succeed.
2145 void *address_space_map(AddressSpace
*as
,
2152 hwaddr l
, xlat
, base
;
2153 MemoryRegion
*mr
, *this_mr
;
2161 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2162 if (!memory_access_is_direct(mr
, is_write
)) {
2163 if (bounce
.buffer
) {
2166 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
2170 memory_region_ref(mr
);
2173 address_space_read(as
, addr
, bounce
.buffer
, l
);
2177 return bounce
.buffer
;
2181 raddr
= memory_region_get_ram_addr(mr
);
2192 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2193 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2198 memory_region_ref(mr
);
2200 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2203 /* Unmaps a memory region previously mapped by address_space_map().
2204 * Will also mark the memory as dirty if is_write == 1. access_len gives
2205 * the amount of memory that was actually read or written by the caller.
2207 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2208 int is_write
, hwaddr access_len
)
2210 if (buffer
!= bounce
.buffer
) {
2214 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2217 while (access_len
) {
2219 l
= TARGET_PAGE_SIZE
;
2222 invalidate_and_set_dirty(addr1
, l
);
2227 if (xen_enabled()) {
2228 xen_invalidate_map_cache_entry(buffer
);
2230 memory_region_unref(mr
);
2234 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2236 qemu_vfree(bounce
.buffer
);
2237 bounce
.buffer
= NULL
;
2238 memory_region_unref(bounce
.mr
);
2239 cpu_notify_map_clients();
2242 void *cpu_physical_memory_map(hwaddr addr
,
2246 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2249 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2250 int is_write
, hwaddr access_len
)
2252 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2255 /* warning: addr must be aligned */
2256 static inline uint32_t ldl_phys_internal(hwaddr addr
,
2257 enum device_endian endian
)
2265 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2267 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2269 io_mem_read(mr
, addr1
, &val
, 4);
2270 #if defined(TARGET_WORDS_BIGENDIAN)
2271 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2275 if (endian
== DEVICE_BIG_ENDIAN
) {
2281 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2285 case DEVICE_LITTLE_ENDIAN
:
2286 val
= ldl_le_p(ptr
);
2288 case DEVICE_BIG_ENDIAN
:
2289 val
= ldl_be_p(ptr
);
2299 uint32_t ldl_phys(hwaddr addr
)
2301 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2304 uint32_t ldl_le_phys(hwaddr addr
)
2306 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2309 uint32_t ldl_be_phys(hwaddr addr
)
2311 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2314 /* warning: addr must be aligned */
2315 static inline uint64_t ldq_phys_internal(hwaddr addr
,
2316 enum device_endian endian
)
2324 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2326 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2328 io_mem_read(mr
, addr1
, &val
, 8);
2329 #if defined(TARGET_WORDS_BIGENDIAN)
2330 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2334 if (endian
== DEVICE_BIG_ENDIAN
) {
2340 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2344 case DEVICE_LITTLE_ENDIAN
:
2345 val
= ldq_le_p(ptr
);
2347 case DEVICE_BIG_ENDIAN
:
2348 val
= ldq_be_p(ptr
);
2358 uint64_t ldq_phys(hwaddr addr
)
2360 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2363 uint64_t ldq_le_phys(hwaddr addr
)
2365 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2368 uint64_t ldq_be_phys(hwaddr addr
)
2370 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2374 uint32_t ldub_phys(hwaddr addr
)
2377 cpu_physical_memory_read(addr
, &val
, 1);
2381 /* warning: addr must be aligned */
2382 static inline uint32_t lduw_phys_internal(hwaddr addr
,
2383 enum device_endian endian
)
2391 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2393 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
2395 io_mem_read(mr
, addr1
, &val
, 2);
2396 #if defined(TARGET_WORDS_BIGENDIAN)
2397 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2401 if (endian
== DEVICE_BIG_ENDIAN
) {
2407 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2411 case DEVICE_LITTLE_ENDIAN
:
2412 val
= lduw_le_p(ptr
);
2414 case DEVICE_BIG_ENDIAN
:
2415 val
= lduw_be_p(ptr
);
2425 uint32_t lduw_phys(hwaddr addr
)
2427 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2430 uint32_t lduw_le_phys(hwaddr addr
)
2432 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2435 uint32_t lduw_be_phys(hwaddr addr
)
2437 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2440 /* warning: addr must be aligned. The ram page is not masked as dirty
2441 and the code inside is not invalidated. It is useful if the dirty
2442 bits are used to track modified PTEs */
2443 void stl_phys_notdirty(hwaddr addr
, uint32_t val
)
2450 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2452 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2453 io_mem_write(mr
, addr1
, val
, 4);
2455 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2456 ptr
= qemu_get_ram_ptr(addr1
);
2459 if (unlikely(in_migration
)) {
2460 if (!cpu_physical_memory_is_dirty(addr1
)) {
2461 /* invalidate code */
2462 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2464 cpu_physical_memory_set_dirty_flags(
2465 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
2471 /* warning: addr must be aligned */
2472 static inline void stl_phys_internal(hwaddr addr
, uint32_t val
,
2473 enum device_endian endian
)
2480 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2482 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2483 #if defined(TARGET_WORDS_BIGENDIAN)
2484 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2488 if (endian
== DEVICE_BIG_ENDIAN
) {
2492 io_mem_write(mr
, addr1
, val
, 4);
2495 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2496 ptr
= qemu_get_ram_ptr(addr1
);
2498 case DEVICE_LITTLE_ENDIAN
:
2501 case DEVICE_BIG_ENDIAN
:
2508 invalidate_and_set_dirty(addr1
, 4);
2512 void stl_phys(hwaddr addr
, uint32_t val
)
2514 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2517 void stl_le_phys(hwaddr addr
, uint32_t val
)
2519 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2522 void stl_be_phys(hwaddr addr
, uint32_t val
)
2524 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2528 void stb_phys(hwaddr addr
, uint32_t val
)
2531 cpu_physical_memory_write(addr
, &v
, 1);
2534 /* warning: addr must be aligned */
2535 static inline void stw_phys_internal(hwaddr addr
, uint32_t val
,
2536 enum device_endian endian
)
2543 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2545 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
2546 #if defined(TARGET_WORDS_BIGENDIAN)
2547 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2551 if (endian
== DEVICE_BIG_ENDIAN
) {
2555 io_mem_write(mr
, addr1
, val
, 2);
2558 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2559 ptr
= qemu_get_ram_ptr(addr1
);
2561 case DEVICE_LITTLE_ENDIAN
:
2564 case DEVICE_BIG_ENDIAN
:
2571 invalidate_and_set_dirty(addr1
, 2);
2575 void stw_phys(hwaddr addr
, uint32_t val
)
2577 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2580 void stw_le_phys(hwaddr addr
, uint32_t val
)
2582 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2585 void stw_be_phys(hwaddr addr
, uint32_t val
)
2587 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2591 void stq_phys(hwaddr addr
, uint64_t val
)
2594 cpu_physical_memory_write(addr
, &val
, 8);
2597 void stq_le_phys(hwaddr addr
, uint64_t val
)
2599 val
= cpu_to_le64(val
);
2600 cpu_physical_memory_write(addr
, &val
, 8);
2603 void stq_be_phys(hwaddr addr
, uint64_t val
)
2605 val
= cpu_to_be64(val
);
2606 cpu_physical_memory_write(addr
, &val
, 8);
2609 /* virtual memory access for debug (includes writing to ROM) */
2610 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2611 uint8_t *buf
, int len
, int is_write
)
2618 page
= addr
& TARGET_PAGE_MASK
;
2619 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
2620 /* if no physical page mapped, return an error */
2621 if (phys_addr
== -1)
2623 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2626 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2628 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
2630 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
2639 #if !defined(CONFIG_USER_ONLY)
2642 * A helper function for the _utterly broken_ virtio device model to find out if
2643 * it's running on a big endian machine. Don't do this at home kids!
2645 bool virtio_is_big_endian(void);
2646 bool virtio_is_big_endian(void)
2648 #if defined(TARGET_WORDS_BIGENDIAN)
2657 #ifndef CONFIG_USER_ONLY
2658 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2663 mr
= address_space_translate(&address_space_memory
,
2664 phys_addr
, &phys_addr
, &l
, false);
2666 return !(memory_region_is_ram(mr
) ||
2667 memory_region_is_romd(mr
));
2670 void qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
2674 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
2675 func(block
->host
, block
->offset
, block
->length
, opaque
);