4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "hw/xen/xen.h"
36 #include "qemu/timer.h"
37 #include "qemu/config-file.h"
38 #include "exec/memory.h"
39 #include "sysemu/dma.h"
40 #include "exec/address-spaces.h"
41 #if defined(CONFIG_USER_ONLY)
43 #else /* !CONFIG_USER_ONLY */
44 #include "sysemu/xen-mapcache.h"
47 #include "exec/cpu-all.h"
49 #include "exec/cputlb.h"
50 #include "translate-all.h"
52 #include "exec/memory-internal.h"
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
57 static int in_migration
;
59 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
61 static MemoryRegion
*system_memory
;
62 static MemoryRegion
*system_io
;
64 AddressSpace address_space_io
;
65 AddressSpace address_space_memory
;
67 MemoryRegion io_mem_rom
, io_mem_notdirty
;
68 static MemoryRegion io_mem_unassigned
;
73 /* current CPU in the current thread. It is only valid inside
75 DEFINE_TLS(CPUState
*, current_cpu
);
76 /* 0 = Do not count executed instructions.
77 1 = Precise instruction counting.
78 2 = Adaptive rate instruction counting. */
81 #if !defined(CONFIG_USER_ONLY)
83 typedef struct PhysPageEntry PhysPageEntry
;
85 struct PhysPageEntry
{
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
91 typedef PhysPageEntry Node
[L2_SIZE
];
93 struct AddressSpaceDispatch
{
94 /* This is a multi-level map on the physical address space.
95 * The bottom level has pointers to MemoryRegionSections.
97 PhysPageEntry phys_map
;
99 MemoryRegionSection
*sections
;
103 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
104 typedef struct subpage_t
{
108 uint16_t sub_section
[TARGET_PAGE_SIZE
];
111 #define PHYS_SECTION_UNASSIGNED 0
112 #define PHYS_SECTION_NOTDIRTY 1
113 #define PHYS_SECTION_ROM 2
114 #define PHYS_SECTION_WATCH 3
116 typedef struct PhysPageMap
{
117 unsigned sections_nb
;
118 unsigned sections_nb_alloc
;
120 unsigned nodes_nb_alloc
;
122 MemoryRegionSection
*sections
;
125 static PhysPageMap
*prev_map
;
126 static PhysPageMap next_map
;
128 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
130 static void io_mem_init(void);
131 static void memory_map_init(void);
132 static void *qemu_safe_ram_ptr(ram_addr_t addr
);
134 static MemoryRegion io_mem_watch
;
137 #if !defined(CONFIG_USER_ONLY)
139 static void phys_map_node_reserve(unsigned nodes
)
141 if (next_map
.nodes_nb
+ nodes
> next_map
.nodes_nb_alloc
) {
142 next_map
.nodes_nb_alloc
= MAX(next_map
.nodes_nb_alloc
* 2,
144 next_map
.nodes_nb_alloc
= MAX(next_map
.nodes_nb_alloc
,
145 next_map
.nodes_nb
+ nodes
);
146 next_map
.nodes
= g_renew(Node
, next_map
.nodes
,
147 next_map
.nodes_nb_alloc
);
151 static uint16_t phys_map_node_alloc(void)
156 ret
= next_map
.nodes_nb
++;
157 assert(ret
!= PHYS_MAP_NODE_NIL
);
158 assert(ret
!= next_map
.nodes_nb_alloc
);
159 for (i
= 0; i
< L2_SIZE
; ++i
) {
160 next_map
.nodes
[ret
][i
].is_leaf
= 0;
161 next_map
.nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
166 static void phys_page_set_level(PhysPageEntry
*lp
, hwaddr
*index
,
167 hwaddr
*nb
, uint16_t leaf
,
172 hwaddr step
= (hwaddr
)1 << (level
* L2_BITS
);
174 if (!lp
->is_leaf
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
175 lp
->ptr
= phys_map_node_alloc();
176 p
= next_map
.nodes
[lp
->ptr
];
178 for (i
= 0; i
< L2_SIZE
; i
++) {
180 p
[i
].ptr
= PHYS_SECTION_UNASSIGNED
;
184 p
= next_map
.nodes
[lp
->ptr
];
186 lp
= &p
[(*index
>> (level
* L2_BITS
)) & (L2_SIZE
- 1)];
188 while (*nb
&& lp
< &p
[L2_SIZE
]) {
189 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
195 phys_page_set_level(lp
, index
, nb
, leaf
, level
- 1);
201 static void phys_page_set(AddressSpaceDispatch
*d
,
202 hwaddr index
, hwaddr nb
,
205 /* Wildly overreserve - it doesn't matter much. */
206 phys_map_node_reserve(3 * P_L2_LEVELS
);
208 phys_page_set_level(&d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
211 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr index
,
212 Node
*nodes
, MemoryRegionSection
*sections
)
217 for (i
= P_L2_LEVELS
- 1; i
>= 0 && !lp
.is_leaf
; i
--) {
218 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
219 return §ions
[PHYS_SECTION_UNASSIGNED
];
222 lp
= p
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
224 return §ions
[lp
.ptr
];
227 bool memory_region_is_unassigned(MemoryRegion
*mr
)
229 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
230 && mr
!= &io_mem_watch
;
233 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
235 bool resolve_subpage
)
237 MemoryRegionSection
*section
;
240 section
= phys_page_find(d
->phys_map
, addr
>> TARGET_PAGE_BITS
,
241 d
->nodes
, d
->sections
);
242 if (resolve_subpage
&& section
->mr
->subpage
) {
243 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
244 section
= &d
->sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
249 static MemoryRegionSection
*
250 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
251 hwaddr
*plen
, bool resolve_subpage
)
253 MemoryRegionSection
*section
;
256 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
257 /* Compute offset within MemoryRegionSection */
258 addr
-= section
->offset_within_address_space
;
260 /* Compute offset within MemoryRegion */
261 *xlat
= addr
+ section
->offset_within_region
;
263 diff
= int128_sub(section
->mr
->size
, int128_make64(addr
));
264 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
268 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
269 hwaddr
*xlat
, hwaddr
*plen
,
273 MemoryRegionSection
*section
;
278 section
= address_space_translate_internal(as
->dispatch
, addr
, &addr
, plen
, true);
281 if (!mr
->iommu_ops
) {
285 iotlb
= mr
->iommu_ops
->translate(mr
, addr
);
286 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
287 | (addr
& iotlb
.addr_mask
));
288 len
= MIN(len
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
289 if (!(iotlb
.perm
& (1 << is_write
))) {
290 mr
= &io_mem_unassigned
;
294 as
= iotlb
.target_as
;
302 MemoryRegionSection
*
303 address_space_translate_for_iotlb(AddressSpace
*as
, hwaddr addr
, hwaddr
*xlat
,
306 MemoryRegionSection
*section
;
307 section
= address_space_translate_internal(as
->dispatch
, addr
, xlat
, plen
, false);
309 assert(!section
->mr
->iommu_ops
);
314 void cpu_exec_init_all(void)
316 #if !defined(CONFIG_USER_ONLY)
317 qemu_mutex_init(&ram_list
.mutex
);
323 #if !defined(CONFIG_USER_ONLY)
325 static int cpu_common_post_load(void *opaque
, int version_id
)
327 CPUState
*cpu
= opaque
;
329 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
330 version_id is increased. */
331 cpu
->interrupt_request
&= ~0x01;
332 tlb_flush(cpu
->env_ptr
, 1);
337 const VMStateDescription vmstate_cpu_common
= {
338 .name
= "cpu_common",
340 .minimum_version_id
= 1,
341 .minimum_version_id_old
= 1,
342 .post_load
= cpu_common_post_load
,
343 .fields
= (VMStateField
[]) {
344 VMSTATE_UINT32(halted
, CPUState
),
345 VMSTATE_UINT32(interrupt_request
, CPUState
),
346 VMSTATE_END_OF_LIST()
352 CPUState
*qemu_get_cpu(int index
)
354 CPUState
*cpu
= first_cpu
;
357 if (cpu
->cpu_index
== index
) {
366 void qemu_for_each_cpu(void (*func
)(CPUState
*cpu
, void *data
), void *data
)
377 void cpu_exec_init(CPUArchState
*env
)
379 CPUState
*cpu
= ENV_GET_CPU(env
);
380 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
384 #if defined(CONFIG_USER_ONLY)
387 cpu
->next_cpu
= NULL
;
390 while (*pcpu
!= NULL
) {
391 pcpu
= &(*pcpu
)->next_cpu
;
394 cpu
->cpu_index
= cpu_index
;
396 QTAILQ_INIT(&env
->breakpoints
);
397 QTAILQ_INIT(&env
->watchpoints
);
398 #ifndef CONFIG_USER_ONLY
399 cpu
->thread_id
= qemu_get_thread_id();
402 #if defined(CONFIG_USER_ONLY)
405 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
406 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
408 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
409 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
410 cpu_save
, cpu_load
, env
);
411 assert(cc
->vmsd
== NULL
);
412 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
414 if (cc
->vmsd
!= NULL
) {
415 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
419 #if defined(TARGET_HAS_ICE)
420 #if defined(CONFIG_USER_ONLY)
421 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
423 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
426 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
428 tb_invalidate_phys_addr(cpu_get_phys_page_debug(cpu
, pc
) |
429 (pc
& ~TARGET_PAGE_MASK
));
432 #endif /* TARGET_HAS_ICE */
434 #if defined(CONFIG_USER_ONLY)
435 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
440 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
441 int flags
, CPUWatchpoint
**watchpoint
)
446 /* Add a watchpoint. */
447 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
448 int flags
, CPUWatchpoint
**watchpoint
)
450 target_ulong len_mask
= ~(len
- 1);
453 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
454 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
455 len
== 0 || len
> TARGET_PAGE_SIZE
) {
456 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
457 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
460 wp
= g_malloc(sizeof(*wp
));
463 wp
->len_mask
= len_mask
;
466 /* keep all GDB-injected watchpoints in front */
468 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
470 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
472 tlb_flush_page(env
, addr
);
479 /* Remove a specific watchpoint. */
480 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
483 target_ulong len_mask
= ~(len
- 1);
486 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
487 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
488 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
489 cpu_watchpoint_remove_by_ref(env
, wp
);
496 /* Remove a specific watchpoint by reference. */
497 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
499 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
501 tlb_flush_page(env
, watchpoint
->vaddr
);
506 /* Remove all matching watchpoints. */
507 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
509 CPUWatchpoint
*wp
, *next
;
511 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
512 if (wp
->flags
& mask
)
513 cpu_watchpoint_remove_by_ref(env
, wp
);
518 /* Add a breakpoint. */
519 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
520 CPUBreakpoint
**breakpoint
)
522 #if defined(TARGET_HAS_ICE)
525 bp
= g_malloc(sizeof(*bp
));
530 /* keep all GDB-injected breakpoints in front */
531 if (flags
& BP_GDB
) {
532 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
534 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
537 breakpoint_invalidate(ENV_GET_CPU(env
), pc
);
548 /* Remove a specific breakpoint. */
549 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
551 #if defined(TARGET_HAS_ICE)
554 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
555 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
556 cpu_breakpoint_remove_by_ref(env
, bp
);
566 /* Remove a specific breakpoint by reference. */
567 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
569 #if defined(TARGET_HAS_ICE)
570 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
572 breakpoint_invalidate(ENV_GET_CPU(env
), breakpoint
->pc
);
578 /* Remove all matching breakpoints. */
579 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
581 #if defined(TARGET_HAS_ICE)
582 CPUBreakpoint
*bp
, *next
;
584 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
585 if (bp
->flags
& mask
)
586 cpu_breakpoint_remove_by_ref(env
, bp
);
591 /* enable or disable single step mode. EXCP_DEBUG is returned by the
592 CPU loop after each instruction */
593 void cpu_single_step(CPUState
*cpu
, int enabled
)
595 #if defined(TARGET_HAS_ICE)
596 if (cpu
->singlestep_enabled
!= enabled
) {
597 cpu
->singlestep_enabled
= enabled
;
599 kvm_update_guest_debug(cpu
, 0);
601 /* must flush all the translated code to avoid inconsistencies */
602 /* XXX: only flush what is necessary */
603 CPUArchState
*env
= cpu
->env_ptr
;
610 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
612 CPUState
*cpu
= ENV_GET_CPU(env
);
618 fprintf(stderr
, "qemu: fatal: ");
619 vfprintf(stderr
, fmt
, ap
);
620 fprintf(stderr
, "\n");
621 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
622 if (qemu_log_enabled()) {
623 qemu_log("qemu: fatal: ");
624 qemu_log_vprintf(fmt
, ap2
);
626 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
632 #if defined(CONFIG_USER_ONLY)
634 struct sigaction act
;
635 sigfillset(&act
.sa_mask
);
636 act
.sa_handler
= SIG_DFL
;
637 sigaction(SIGABRT
, &act
, NULL
);
643 CPUArchState
*cpu_copy(CPUArchState
*env
)
645 CPUArchState
*new_env
= cpu_init(env
->cpu_model_str
);
646 #if defined(TARGET_HAS_ICE)
651 /* Reset non arch specific state */
652 cpu_reset(ENV_GET_CPU(new_env
));
654 /* Copy arch specific state into the new CPU */
655 memcpy(new_env
, env
, sizeof(CPUArchState
));
657 /* Clone all break/watchpoints.
658 Note: Once we support ptrace with hw-debug register access, make sure
659 BP_CPU break/watchpoints are handled correctly on clone. */
660 QTAILQ_INIT(&env
->breakpoints
);
661 QTAILQ_INIT(&env
->watchpoints
);
662 #if defined(TARGET_HAS_ICE)
663 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
664 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
666 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
667 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
675 #if !defined(CONFIG_USER_ONLY)
676 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t end
,
681 /* we modify the TLB cache so that the dirty bit will be set again
682 when accessing the range */
683 start1
= (uintptr_t)qemu_safe_ram_ptr(start
);
684 /* Check that we don't span multiple blocks - this breaks the
685 address comparisons below. */
686 if ((uintptr_t)qemu_safe_ram_ptr(end
- 1) - start1
687 != (end
- 1) - start
) {
690 cpu_tlb_reset_dirty_all(start1
, length
);
694 /* Note: start and end must be within the same ram block. */
695 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
700 start
&= TARGET_PAGE_MASK
;
701 end
= TARGET_PAGE_ALIGN(end
);
703 length
= end
- start
;
706 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
709 tlb_reset_dirty_range_all(start
, end
, length
);
713 static int cpu_physical_memory_set_dirty_tracking(int enable
)
716 in_migration
= enable
;
720 hwaddr
memory_region_section_get_iotlb(CPUArchState
*env
,
721 MemoryRegionSection
*section
,
723 hwaddr paddr
, hwaddr xlat
,
725 target_ulong
*address
)
730 if (memory_region_is_ram(section
->mr
)) {
732 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
734 if (!section
->readonly
) {
735 iotlb
|= PHYS_SECTION_NOTDIRTY
;
737 iotlb
|= PHYS_SECTION_ROM
;
740 iotlb
= section
- address_space_memory
.dispatch
->sections
;
744 /* Make accesses to pages with watchpoints go via the
745 watchpoint trap routines. */
746 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
747 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
748 /* Avoid trapping reads of pages with a write breakpoint. */
749 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
750 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
751 *address
|= TLB_MMIO
;
759 #endif /* defined(CONFIG_USER_ONLY) */
761 #if !defined(CONFIG_USER_ONLY)
763 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
765 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
767 static uint16_t phys_section_add(MemoryRegionSection
*section
)
769 /* The physical section number is ORed with a page-aligned
770 * pointer to produce the iotlb entries. Thus it should
771 * never overflow into the page-aligned value.
773 assert(next_map
.sections_nb
< TARGET_PAGE_SIZE
);
775 if (next_map
.sections_nb
== next_map
.sections_nb_alloc
) {
776 next_map
.sections_nb_alloc
= MAX(next_map
.sections_nb_alloc
* 2,
778 next_map
.sections
= g_renew(MemoryRegionSection
, next_map
.sections
,
779 next_map
.sections_nb_alloc
);
781 next_map
.sections
[next_map
.sections_nb
] = *section
;
782 memory_region_ref(section
->mr
);
783 return next_map
.sections_nb
++;
786 static void phys_section_destroy(MemoryRegion
*mr
)
788 memory_region_unref(mr
);
791 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
792 memory_region_destroy(&subpage
->iomem
);
797 static void phys_sections_free(PhysPageMap
*map
)
799 while (map
->sections_nb
> 0) {
800 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
801 phys_section_destroy(section
->mr
);
803 g_free(map
->sections
);
808 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
811 hwaddr base
= section
->offset_within_address_space
813 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
>> TARGET_PAGE_BITS
,
814 next_map
.nodes
, next_map
.sections
);
815 MemoryRegionSection subsection
= {
816 .offset_within_address_space
= base
,
817 .size
= int128_make64(TARGET_PAGE_SIZE
),
821 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
823 if (!(existing
->mr
->subpage
)) {
824 subpage
= subpage_init(d
->as
, base
);
825 subsection
.mr
= &subpage
->iomem
;
826 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
827 phys_section_add(&subsection
));
829 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
831 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
832 end
= start
+ int128_get64(section
->size
) - 1;
833 subpage_register(subpage
, start
, end
, phys_section_add(section
));
837 static void register_multipage(AddressSpaceDispatch
*d
,
838 MemoryRegionSection
*section
)
840 hwaddr start_addr
= section
->offset_within_address_space
;
841 uint16_t section_index
= phys_section_add(section
);
842 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
846 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
849 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
851 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
852 AddressSpaceDispatch
*d
= as
->next_dispatch
;
853 MemoryRegionSection now
= *section
, remain
= *section
;
854 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
856 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
857 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
858 - now
.offset_within_address_space
;
860 now
.size
= int128_min(int128_make64(left
), now
.size
);
861 register_subpage(d
, &now
);
863 now
.size
= int128_zero();
865 while (int128_ne(remain
.size
, now
.size
)) {
866 remain
.size
= int128_sub(remain
.size
, now
.size
);
867 remain
.offset_within_address_space
+= int128_get64(now
.size
);
868 remain
.offset_within_region
+= int128_get64(now
.size
);
870 if (int128_lt(remain
.size
, page_size
)) {
871 register_subpage(d
, &now
);
872 } else if (remain
.offset_within_region
& ~TARGET_PAGE_MASK
) {
873 now
.size
= page_size
;
874 register_subpage(d
, &now
);
876 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
877 register_multipage(d
, &now
);
882 void qemu_flush_coalesced_mmio_buffer(void)
885 kvm_flush_coalesced_mmio_buffer();
888 void qemu_mutex_lock_ramlist(void)
890 qemu_mutex_lock(&ram_list
.mutex
);
893 void qemu_mutex_unlock_ramlist(void)
895 qemu_mutex_unlock(&ram_list
.mutex
);
898 #if defined(__linux__) && !defined(TARGET_S390X)
902 #define HUGETLBFS_MAGIC 0x958458f6
904 static long gethugepagesize(const char *path
)
910 ret
= statfs(path
, &fs
);
911 } while (ret
!= 0 && errno
== EINTR
);
918 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
919 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
924 static void *file_ram_alloc(RAMBlock
*block
,
929 char *sanitized_name
;
936 unsigned long hpagesize
;
938 hpagesize
= gethugepagesize(path
);
943 if (memory
< hpagesize
) {
947 if (kvm_enabled() && !kvm_has_sync_mmu()) {
948 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
952 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
953 sanitized_name
= g_strdup(block
->mr
->name
);
954 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
959 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
961 g_free(sanitized_name
);
963 fd
= mkstemp(filename
);
965 perror("unable to create backing store for hugepages");
972 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
975 * ftruncate is not supported by hugetlbfs in older
976 * hosts, so don't bother bailing out on errors.
977 * If anything goes wrong with it under other filesystems,
980 if (ftruncate(fd
, memory
))
984 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
985 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
986 * to sidestep this quirk.
988 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
989 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
991 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
993 if (area
== MAP_FAILED
) {
994 perror("file_ram_alloc: can't mmap RAM pages");
1003 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1005 RAMBlock
*block
, *next_block
;
1006 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1008 assert(size
!= 0); /* it would hand out same offset multiple times */
1010 if (QTAILQ_EMPTY(&ram_list
.blocks
))
1013 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1014 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1016 end
= block
->offset
+ block
->length
;
1018 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
1019 if (next_block
->offset
>= end
) {
1020 next
= MIN(next
, next_block
->offset
);
1023 if (next
- end
>= size
&& next
- end
< mingap
) {
1025 mingap
= next
- end
;
1029 if (offset
== RAM_ADDR_MAX
) {
1030 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1038 ram_addr_t
last_ram_offset(void)
1041 ram_addr_t last
= 0;
1043 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
1044 last
= MAX(last
, block
->offset
+ block
->length
);
1049 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1053 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1054 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1055 "dump-guest-core", true)) {
1056 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1058 perror("qemu_madvise");
1059 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1060 "but dump_guest_core=off specified\n");
1065 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1067 RAMBlock
*new_block
, *block
;
1070 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1071 if (block
->offset
== addr
) {
1077 assert(!new_block
->idstr
[0]);
1080 char *id
= qdev_get_dev_path(dev
);
1082 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1086 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1088 /* This assumes the iothread lock is taken here too. */
1089 qemu_mutex_lock_ramlist();
1090 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1091 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1092 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1097 qemu_mutex_unlock_ramlist();
1100 static int memory_try_enable_merging(void *addr
, size_t len
)
1102 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1103 /* disabled by the user */
1107 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1110 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1113 RAMBlock
*block
, *new_block
;
1115 size
= TARGET_PAGE_ALIGN(size
);
1116 new_block
= g_malloc0(sizeof(*new_block
));
1118 /* This assumes the iothread lock is taken here too. */
1119 qemu_mutex_lock_ramlist();
1121 new_block
->offset
= find_ram_offset(size
);
1123 new_block
->host
= host
;
1124 new_block
->flags
|= RAM_PREALLOC_MASK
;
1127 #if defined (__linux__) && !defined(TARGET_S390X)
1128 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
1129 if (!new_block
->host
) {
1130 new_block
->host
= qemu_anon_ram_alloc(size
);
1131 memory_try_enable_merging(new_block
->host
, size
);
1134 fprintf(stderr
, "-mem-path option unsupported\n");
1138 if (xen_enabled()) {
1139 xen_ram_alloc(new_block
->offset
, size
, mr
);
1140 } else if (kvm_enabled()) {
1141 /* some s390/kvm configurations have special constraints */
1142 new_block
->host
= kvm_ram_alloc(size
);
1144 new_block
->host
= qemu_anon_ram_alloc(size
);
1146 memory_try_enable_merging(new_block
->host
, size
);
1149 new_block
->length
= size
;
1151 /* Keep the list sorted from biggest to smallest block. */
1152 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1153 if (block
->length
< new_block
->length
) {
1158 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1160 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1162 ram_list
.mru_block
= NULL
;
1165 qemu_mutex_unlock_ramlist();
1167 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
1168 last_ram_offset() >> TARGET_PAGE_BITS
);
1169 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
1170 0, size
>> TARGET_PAGE_BITS
);
1171 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
, 0xff);
1173 qemu_ram_setup_dump(new_block
->host
, size
);
1174 qemu_madvise(new_block
->host
, size
, QEMU_MADV_HUGEPAGE
);
1175 qemu_madvise(new_block
->host
, size
, QEMU_MADV_DONTFORK
);
1178 kvm_setup_guest_memory(new_block
->host
, size
);
1180 return new_block
->offset
;
1183 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
1185 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
1188 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1192 /* This assumes the iothread lock is taken here too. */
1193 qemu_mutex_lock_ramlist();
1194 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1195 if (addr
== block
->offset
) {
1196 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1197 ram_list
.mru_block
= NULL
;
1203 qemu_mutex_unlock_ramlist();
1206 void qemu_ram_free(ram_addr_t addr
)
1210 /* This assumes the iothread lock is taken here too. */
1211 qemu_mutex_lock_ramlist();
1212 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1213 if (addr
== block
->offset
) {
1214 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1215 ram_list
.mru_block
= NULL
;
1217 if (block
->flags
& RAM_PREALLOC_MASK
) {
1219 } else if (mem_path
) {
1220 #if defined (__linux__) && !defined(TARGET_S390X)
1222 munmap(block
->host
, block
->length
);
1225 qemu_anon_ram_free(block
->host
, block
->length
);
1231 if (xen_enabled()) {
1232 xen_invalidate_map_cache_entry(block
->host
);
1234 qemu_anon_ram_free(block
->host
, block
->length
);
1241 qemu_mutex_unlock_ramlist();
1246 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1253 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1254 offset
= addr
- block
->offset
;
1255 if (offset
< block
->length
) {
1256 vaddr
= block
->host
+ offset
;
1257 if (block
->flags
& RAM_PREALLOC_MASK
) {
1261 munmap(vaddr
, length
);
1263 #if defined(__linux__) && !defined(TARGET_S390X)
1266 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
1269 flags
|= MAP_PRIVATE
;
1271 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1272 flags
, block
->fd
, offset
);
1274 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1275 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1282 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1283 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
1284 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
1287 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1288 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1292 if (area
!= vaddr
) {
1293 fprintf(stderr
, "Could not remap addr: "
1294 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1298 memory_try_enable_merging(vaddr
, length
);
1299 qemu_ram_setup_dump(vaddr
, length
);
1305 #endif /* !_WIN32 */
1307 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
1311 /* The list is protected by the iothread lock here. */
1312 block
= ram_list
.mru_block
;
1313 if (block
&& addr
- block
->offset
< block
->length
) {
1316 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1317 if (addr
- block
->offset
< block
->length
) {
1322 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1326 ram_list
.mru_block
= block
;
1330 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1331 With the exception of the softmmu code in this file, this should
1332 only be used for local memory (e.g. video ram) that the device owns,
1333 and knows it isn't going to access beyond the end of the block.
1335 It should not be used for general purpose DMA.
1336 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1338 void *qemu_get_ram_ptr(ram_addr_t addr
)
1340 RAMBlock
*block
= qemu_get_ram_block(addr
);
1342 if (xen_enabled()) {
1343 /* We need to check if the requested address is in the RAM
1344 * because we don't want to map the entire memory in QEMU.
1345 * In that case just map until the end of the page.
1347 if (block
->offset
== 0) {
1348 return xen_map_cache(addr
, 0, 0);
1349 } else if (block
->host
== NULL
) {
1351 xen_map_cache(block
->offset
, block
->length
, 1);
1354 return block
->host
+ (addr
- block
->offset
);
1357 /* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1358 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1360 * ??? Is this still necessary?
1362 static void *qemu_safe_ram_ptr(ram_addr_t addr
)
1366 /* The list is protected by the iothread lock here. */
1367 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1368 if (addr
- block
->offset
< block
->length
) {
1369 if (xen_enabled()) {
1370 /* We need to check if the requested address is in the RAM
1371 * because we don't want to map the entire memory in QEMU.
1372 * In that case just map until the end of the page.
1374 if (block
->offset
== 0) {
1375 return xen_map_cache(addr
, 0, 0);
1376 } else if (block
->host
== NULL
) {
1378 xen_map_cache(block
->offset
, block
->length
, 1);
1381 return block
->host
+ (addr
- block
->offset
);
1385 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1391 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1392 * but takes a size argument */
1393 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1398 if (xen_enabled()) {
1399 return xen_map_cache(addr
, *size
, 1);
1403 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1404 if (addr
- block
->offset
< block
->length
) {
1405 if (addr
- block
->offset
+ *size
> block
->length
)
1406 *size
= block
->length
- addr
+ block
->offset
;
1407 return block
->host
+ (addr
- block
->offset
);
1411 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1416 /* Some of the softmmu routines need to translate from a host pointer
1417 (typically a TLB entry) back to a ram offset. */
1418 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1421 uint8_t *host
= ptr
;
1423 if (xen_enabled()) {
1424 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1425 return qemu_get_ram_block(*ram_addr
)->mr
;
1428 block
= ram_list
.mru_block
;
1429 if (block
&& block
->host
&& host
- block
->host
< block
->length
) {
1433 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1434 /* This case append when the block is not mapped. */
1435 if (block
->host
== NULL
) {
1438 if (host
- block
->host
< block
->length
) {
1446 *ram_addr
= block
->offset
+ (host
- block
->host
);
1450 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1451 uint64_t val
, unsigned size
)
1454 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1455 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1456 tb_invalidate_phys_page_fast(ram_addr
, size
);
1457 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1461 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1464 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1467 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1472 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1473 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
1474 /* we remove the notdirty callback only if the code has been
1476 if (dirty_flags
== 0xff) {
1477 CPUArchState
*env
= current_cpu
->env_ptr
;
1478 tlb_set_dirty(env
, env
->mem_io_vaddr
);
1482 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1483 unsigned size
, bool is_write
)
1488 static const MemoryRegionOps notdirty_mem_ops
= {
1489 .write
= notdirty_mem_write
,
1490 .valid
.accepts
= notdirty_mem_accepts
,
1491 .endianness
= DEVICE_NATIVE_ENDIAN
,
1494 /* Generate a debug exception if a watchpoint has been hit. */
1495 static void check_watchpoint(int offset
, int len_mask
, int flags
)
1497 CPUArchState
*env
= current_cpu
->env_ptr
;
1498 target_ulong pc
, cs_base
;
1503 if (env
->watchpoint_hit
) {
1504 /* We re-entered the check after replacing the TB. Now raise
1505 * the debug interrupt so that is will trigger after the
1506 * current instruction. */
1507 cpu_interrupt(ENV_GET_CPU(env
), CPU_INTERRUPT_DEBUG
);
1510 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1511 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1512 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
1513 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
1514 wp
->flags
|= BP_WATCHPOINT_HIT
;
1515 if (!env
->watchpoint_hit
) {
1516 env
->watchpoint_hit
= wp
;
1517 tb_check_watchpoint(env
);
1518 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1519 env
->exception_index
= EXCP_DEBUG
;
1522 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1523 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
1524 cpu_resume_from_signal(env
, NULL
);
1528 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1533 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1534 so these check for a hit then pass through to the normal out-of-line
1536 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1539 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
1541 case 1: return ldub_phys(addr
);
1542 case 2: return lduw_phys(addr
);
1543 case 4: return ldl_phys(addr
);
1548 static void watch_mem_write(void *opaque
, hwaddr addr
,
1549 uint64_t val
, unsigned size
)
1551 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
1554 stb_phys(addr
, val
);
1557 stw_phys(addr
, val
);
1560 stl_phys(addr
, val
);
1566 static const MemoryRegionOps watch_mem_ops
= {
1567 .read
= watch_mem_read
,
1568 .write
= watch_mem_write
,
1569 .endianness
= DEVICE_NATIVE_ENDIAN
,
1572 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1575 subpage_t
*subpage
= opaque
;
1578 #if defined(DEBUG_SUBPAGE)
1579 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
"\n", __func__
,
1580 subpage
, len
, addr
);
1582 address_space_read(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1595 static void subpage_write(void *opaque
, hwaddr addr
,
1596 uint64_t value
, unsigned len
)
1598 subpage_t
*subpage
= opaque
;
1601 #if defined(DEBUG_SUBPAGE)
1602 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1603 " value %"PRIx64
"\n",
1604 __func__
, subpage
, len
, addr
, value
);
1619 address_space_write(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1622 static bool subpage_accepts(void *opaque
, hwaddr addr
,
1623 unsigned size
, bool is_write
)
1625 subpage_t
*subpage
= opaque
;
1626 #if defined(DEBUG_SUBPAGE)
1627 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx
"\n",
1628 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
1631 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
1635 static const MemoryRegionOps subpage_ops
= {
1636 .read
= subpage_read
,
1637 .write
= subpage_write
,
1638 .valid
.accepts
= subpage_accepts
,
1639 .endianness
= DEVICE_NATIVE_ENDIAN
,
1642 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1647 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1649 idx
= SUBPAGE_IDX(start
);
1650 eidx
= SUBPAGE_IDX(end
);
1651 #if defined(DEBUG_SUBPAGE)
1652 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
1653 mmio
, start
, end
, idx
, eidx
, memory
);
1655 for (; idx
<= eidx
; idx
++) {
1656 mmio
->sub_section
[idx
] = section
;
1662 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
1666 mmio
= g_malloc0(sizeof(subpage_t
));
1670 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
1671 "subpage", TARGET_PAGE_SIZE
);
1672 mmio
->iomem
.subpage
= true;
1673 #if defined(DEBUG_SUBPAGE)
1674 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
1675 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
1677 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
1682 static uint16_t dummy_section(MemoryRegion
*mr
)
1684 MemoryRegionSection section
= {
1686 .offset_within_address_space
= 0,
1687 .offset_within_region
= 0,
1688 .size
= int128_2_64(),
1691 return phys_section_add(§ion
);
1694 MemoryRegion
*iotlb_to_region(hwaddr index
)
1696 return address_space_memory
.dispatch
->sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1699 static void io_mem_init(void)
1701 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, "rom", UINT64_MAX
);
1702 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
1703 "unassigned", UINT64_MAX
);
1704 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
1705 "notdirty", UINT64_MAX
);
1706 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
1707 "watch", UINT64_MAX
);
1710 static void mem_begin(MemoryListener
*listener
)
1712 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1713 AddressSpaceDispatch
*d
= g_new(AddressSpaceDispatch
, 1);
1715 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .is_leaf
= 0 };
1717 as
->next_dispatch
= d
;
1720 static void mem_commit(MemoryListener
*listener
)
1722 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1723 AddressSpaceDispatch
*cur
= as
->dispatch
;
1724 AddressSpaceDispatch
*next
= as
->next_dispatch
;
1726 next
->nodes
= next_map
.nodes
;
1727 next
->sections
= next_map
.sections
;
1729 as
->dispatch
= next
;
1733 static void core_begin(MemoryListener
*listener
)
1737 prev_map
= g_new(PhysPageMap
, 1);
1738 *prev_map
= next_map
;
1740 memset(&next_map
, 0, sizeof(next_map
));
1741 n
= dummy_section(&io_mem_unassigned
);
1742 assert(n
== PHYS_SECTION_UNASSIGNED
);
1743 n
= dummy_section(&io_mem_notdirty
);
1744 assert(n
== PHYS_SECTION_NOTDIRTY
);
1745 n
= dummy_section(&io_mem_rom
);
1746 assert(n
== PHYS_SECTION_ROM
);
1747 n
= dummy_section(&io_mem_watch
);
1748 assert(n
== PHYS_SECTION_WATCH
);
1751 /* This listener's commit run after the other AddressSpaceDispatch listeners'.
1752 * All AddressSpaceDispatch instances have switched to the next map.
1754 static void core_commit(MemoryListener
*listener
)
1756 phys_sections_free(prev_map
);
1759 static void tcg_commit(MemoryListener
*listener
)
1763 /* since each CPU stores ram addresses in its TLB cache, we must
1764 reset the modified entries */
1766 for (cpu
= first_cpu
; cpu
!= NULL
; cpu
= cpu
->next_cpu
) {
1767 CPUArchState
*env
= cpu
->env_ptr
;
1773 static void core_log_global_start(MemoryListener
*listener
)
1775 cpu_physical_memory_set_dirty_tracking(1);
1778 static void core_log_global_stop(MemoryListener
*listener
)
1780 cpu_physical_memory_set_dirty_tracking(0);
1783 static MemoryListener core_memory_listener
= {
1784 .begin
= core_begin
,
1785 .commit
= core_commit
,
1786 .log_global_start
= core_log_global_start
,
1787 .log_global_stop
= core_log_global_stop
,
1791 static MemoryListener tcg_memory_listener
= {
1792 .commit
= tcg_commit
,
1795 void address_space_init_dispatch(AddressSpace
*as
)
1797 as
->dispatch
= NULL
;
1798 as
->dispatch_listener
= (MemoryListener
) {
1800 .commit
= mem_commit
,
1801 .region_add
= mem_add
,
1802 .region_nop
= mem_add
,
1805 memory_listener_register(&as
->dispatch_listener
, as
);
1808 void address_space_destroy_dispatch(AddressSpace
*as
)
1810 AddressSpaceDispatch
*d
= as
->dispatch
;
1812 memory_listener_unregister(&as
->dispatch_listener
);
1814 as
->dispatch
= NULL
;
1817 static void memory_map_init(void)
1819 system_memory
= g_malloc(sizeof(*system_memory
));
1820 memory_region_init(system_memory
, NULL
, "system", INT64_MAX
);
1821 address_space_init(&address_space_memory
, system_memory
, "memory");
1823 system_io
= g_malloc(sizeof(*system_io
));
1824 memory_region_init(system_io
, NULL
, "io", 65536);
1825 address_space_init(&address_space_io
, system_io
, "I/O");
1827 memory_listener_register(&core_memory_listener
, &address_space_memory
);
1828 memory_listener_register(&tcg_memory_listener
, &address_space_memory
);
1831 MemoryRegion
*get_system_memory(void)
1833 return system_memory
;
1836 MemoryRegion
*get_system_io(void)
1841 #endif /* !defined(CONFIG_USER_ONLY) */
1843 /* physical memory access (slow version, mainly for debug) */
1844 #if defined(CONFIG_USER_ONLY)
1845 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
1846 uint8_t *buf
, int len
, int is_write
)
1853 page
= addr
& TARGET_PAGE_MASK
;
1854 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1857 flags
= page_get_flags(page
);
1858 if (!(flags
& PAGE_VALID
))
1861 if (!(flags
& PAGE_WRITE
))
1863 /* XXX: this code should not depend on lock_user */
1864 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
1867 unlock_user(p
, addr
, l
);
1869 if (!(flags
& PAGE_READ
))
1871 /* XXX: this code should not depend on lock_user */
1872 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
1875 unlock_user(p
, addr
, 0);
1886 static void invalidate_and_set_dirty(hwaddr addr
,
1889 if (!cpu_physical_memory_is_dirty(addr
)) {
1890 /* invalidate code */
1891 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
1893 cpu_physical_memory_set_dirty_flags(addr
, (0xff & ~CODE_DIRTY_FLAG
));
1895 xen_modified_memory(addr
, length
);
1898 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
1900 if (memory_region_is_ram(mr
)) {
1901 return !(is_write
&& mr
->readonly
);
1903 if (memory_region_is_romd(mr
)) {
1910 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
1912 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
1914 /* Regions are assumed to support 1-4 byte accesses unless
1915 otherwise specified. */
1916 if (access_size_max
== 0) {
1917 access_size_max
= 4;
1920 /* Bound the maximum access by the alignment of the address. */
1921 if (!mr
->ops
->impl
.unaligned
) {
1922 unsigned align_size_max
= addr
& -addr
;
1923 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
1924 access_size_max
= align_size_max
;
1928 /* Don't attempt accesses larger than the maximum. */
1929 if (l
> access_size_max
) {
1930 l
= access_size_max
;
1933 l
= 1 << (qemu_fls(l
) - 1);
1939 bool address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
1940 int len
, bool is_write
)
1951 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
1954 if (!memory_access_is_direct(mr
, is_write
)) {
1955 l
= memory_access_size(mr
, l
, addr1
);
1956 /* XXX: could force current_cpu to NULL to avoid
1960 /* 64 bit write access */
1962 error
|= io_mem_write(mr
, addr1
, val
, 8);
1965 /* 32 bit write access */
1967 error
|= io_mem_write(mr
, addr1
, val
, 4);
1970 /* 16 bit write access */
1972 error
|= io_mem_write(mr
, addr1
, val
, 2);
1975 /* 8 bit write access */
1977 error
|= io_mem_write(mr
, addr1
, val
, 1);
1983 addr1
+= memory_region_get_ram_addr(mr
);
1985 ptr
= qemu_get_ram_ptr(addr1
);
1986 memcpy(ptr
, buf
, l
);
1987 invalidate_and_set_dirty(addr1
, l
);
1990 if (!memory_access_is_direct(mr
, is_write
)) {
1992 l
= memory_access_size(mr
, l
, addr1
);
1995 /* 64 bit read access */
1996 error
|= io_mem_read(mr
, addr1
, &val
, 8);
2000 /* 32 bit read access */
2001 error
|= io_mem_read(mr
, addr1
, &val
, 4);
2005 /* 16 bit read access */
2006 error
|= io_mem_read(mr
, addr1
, &val
, 2);
2010 /* 8 bit read access */
2011 error
|= io_mem_read(mr
, addr1
, &val
, 1);
2019 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2020 memcpy(buf
, ptr
, l
);
2031 bool address_space_write(AddressSpace
*as
, hwaddr addr
,
2032 const uint8_t *buf
, int len
)
2034 return address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
2037 bool address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
2039 return address_space_rw(as
, addr
, buf
, len
, false);
2043 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2044 int len
, int is_write
)
2046 address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
2049 /* used for ROM loading : can write in RAM and ROM */
2050 void cpu_physical_memory_write_rom(hwaddr addr
,
2051 const uint8_t *buf
, int len
)
2060 mr
= address_space_translate(&address_space_memory
,
2061 addr
, &addr1
, &l
, true);
2063 if (!(memory_region_is_ram(mr
) ||
2064 memory_region_is_romd(mr
))) {
2067 addr1
+= memory_region_get_ram_addr(mr
);
2069 ptr
= qemu_get_ram_ptr(addr1
);
2070 memcpy(ptr
, buf
, l
);
2071 invalidate_and_set_dirty(addr1
, l
);
2086 static BounceBuffer bounce
;
2088 typedef struct MapClient
{
2090 void (*callback
)(void *opaque
);
2091 QLIST_ENTRY(MapClient
) link
;
2094 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2095 = QLIST_HEAD_INITIALIZER(map_client_list
);
2097 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2099 MapClient
*client
= g_malloc(sizeof(*client
));
2101 client
->opaque
= opaque
;
2102 client
->callback
= callback
;
2103 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2107 static void cpu_unregister_map_client(void *_client
)
2109 MapClient
*client
= (MapClient
*)_client
;
2111 QLIST_REMOVE(client
, link
);
2115 static void cpu_notify_map_clients(void)
2119 while (!QLIST_EMPTY(&map_client_list
)) {
2120 client
= QLIST_FIRST(&map_client_list
);
2121 client
->callback(client
->opaque
);
2122 cpu_unregister_map_client(client
);
2126 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2133 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2134 if (!memory_access_is_direct(mr
, is_write
)) {
2135 l
= memory_access_size(mr
, l
, addr
);
2136 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2147 /* Map a physical memory region into a host virtual address.
2148 * May map a subset of the requested range, given by and returned in *plen.
2149 * May return NULL if resources needed to perform the mapping are exhausted.
2150 * Use only for reads OR writes - not for read-modify-write operations.
2151 * Use cpu_register_map_client() to know when retrying the map operation is
2152 * likely to succeed.
2154 void *address_space_map(AddressSpace
*as
,
2161 hwaddr l
, xlat
, base
;
2162 MemoryRegion
*mr
, *this_mr
;
2170 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2171 if (!memory_access_is_direct(mr
, is_write
)) {
2172 if (bounce
.buffer
) {
2175 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
2179 memory_region_ref(mr
);
2182 address_space_read(as
, addr
, bounce
.buffer
, l
);
2186 return bounce
.buffer
;
2190 raddr
= memory_region_get_ram_addr(mr
);
2201 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2202 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2207 memory_region_ref(mr
);
2209 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2212 /* Unmaps a memory region previously mapped by address_space_map().
2213 * Will also mark the memory as dirty if is_write == 1. access_len gives
2214 * the amount of memory that was actually read or written by the caller.
2216 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2217 int is_write
, hwaddr access_len
)
2219 if (buffer
!= bounce
.buffer
) {
2223 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2226 while (access_len
) {
2228 l
= TARGET_PAGE_SIZE
;
2231 invalidate_and_set_dirty(addr1
, l
);
2236 if (xen_enabled()) {
2237 xen_invalidate_map_cache_entry(buffer
);
2239 memory_region_unref(mr
);
2243 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2245 qemu_vfree(bounce
.buffer
);
2246 bounce
.buffer
= NULL
;
2247 memory_region_unref(bounce
.mr
);
2248 cpu_notify_map_clients();
2251 void *cpu_physical_memory_map(hwaddr addr
,
2255 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2258 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2259 int is_write
, hwaddr access_len
)
2261 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2264 /* warning: addr must be aligned */
2265 static inline uint32_t ldl_phys_internal(hwaddr addr
,
2266 enum device_endian endian
)
2274 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2276 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2278 io_mem_read(mr
, addr1
, &val
, 4);
2279 #if defined(TARGET_WORDS_BIGENDIAN)
2280 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2284 if (endian
== DEVICE_BIG_ENDIAN
) {
2290 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2294 case DEVICE_LITTLE_ENDIAN
:
2295 val
= ldl_le_p(ptr
);
2297 case DEVICE_BIG_ENDIAN
:
2298 val
= ldl_be_p(ptr
);
2308 uint32_t ldl_phys(hwaddr addr
)
2310 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2313 uint32_t ldl_le_phys(hwaddr addr
)
2315 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2318 uint32_t ldl_be_phys(hwaddr addr
)
2320 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2323 /* warning: addr must be aligned */
2324 static inline uint64_t ldq_phys_internal(hwaddr addr
,
2325 enum device_endian endian
)
2333 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2335 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2337 io_mem_read(mr
, addr1
, &val
, 8);
2338 #if defined(TARGET_WORDS_BIGENDIAN)
2339 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2343 if (endian
== DEVICE_BIG_ENDIAN
) {
2349 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2353 case DEVICE_LITTLE_ENDIAN
:
2354 val
= ldq_le_p(ptr
);
2356 case DEVICE_BIG_ENDIAN
:
2357 val
= ldq_be_p(ptr
);
2367 uint64_t ldq_phys(hwaddr addr
)
2369 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2372 uint64_t ldq_le_phys(hwaddr addr
)
2374 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2377 uint64_t ldq_be_phys(hwaddr addr
)
2379 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2383 uint32_t ldub_phys(hwaddr addr
)
2386 cpu_physical_memory_read(addr
, &val
, 1);
2390 /* warning: addr must be aligned */
2391 static inline uint32_t lduw_phys_internal(hwaddr addr
,
2392 enum device_endian endian
)
2400 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2402 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
2404 io_mem_read(mr
, addr1
, &val
, 2);
2405 #if defined(TARGET_WORDS_BIGENDIAN)
2406 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2410 if (endian
== DEVICE_BIG_ENDIAN
) {
2416 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2420 case DEVICE_LITTLE_ENDIAN
:
2421 val
= lduw_le_p(ptr
);
2423 case DEVICE_BIG_ENDIAN
:
2424 val
= lduw_be_p(ptr
);
2434 uint32_t lduw_phys(hwaddr addr
)
2436 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2439 uint32_t lduw_le_phys(hwaddr addr
)
2441 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2444 uint32_t lduw_be_phys(hwaddr addr
)
2446 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2449 /* warning: addr must be aligned. The ram page is not masked as dirty
2450 and the code inside is not invalidated. It is useful if the dirty
2451 bits are used to track modified PTEs */
2452 void stl_phys_notdirty(hwaddr addr
, uint32_t val
)
2459 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2461 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2462 io_mem_write(mr
, addr1
, val
, 4);
2464 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2465 ptr
= qemu_get_ram_ptr(addr1
);
2468 if (unlikely(in_migration
)) {
2469 if (!cpu_physical_memory_is_dirty(addr1
)) {
2470 /* invalidate code */
2471 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2473 cpu_physical_memory_set_dirty_flags(
2474 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
2480 /* warning: addr must be aligned */
2481 static inline void stl_phys_internal(hwaddr addr
, uint32_t val
,
2482 enum device_endian endian
)
2489 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2491 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2492 #if defined(TARGET_WORDS_BIGENDIAN)
2493 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2497 if (endian
== DEVICE_BIG_ENDIAN
) {
2501 io_mem_write(mr
, addr1
, val
, 4);
2504 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2505 ptr
= qemu_get_ram_ptr(addr1
);
2507 case DEVICE_LITTLE_ENDIAN
:
2510 case DEVICE_BIG_ENDIAN
:
2517 invalidate_and_set_dirty(addr1
, 4);
2521 void stl_phys(hwaddr addr
, uint32_t val
)
2523 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2526 void stl_le_phys(hwaddr addr
, uint32_t val
)
2528 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2531 void stl_be_phys(hwaddr addr
, uint32_t val
)
2533 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2537 void stb_phys(hwaddr addr
, uint32_t val
)
2540 cpu_physical_memory_write(addr
, &v
, 1);
2543 /* warning: addr must be aligned */
2544 static inline void stw_phys_internal(hwaddr addr
, uint32_t val
,
2545 enum device_endian endian
)
2552 mr
= address_space_translate(&address_space_memory
, addr
, &addr1
, &l
,
2554 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
2555 #if defined(TARGET_WORDS_BIGENDIAN)
2556 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2560 if (endian
== DEVICE_BIG_ENDIAN
) {
2564 io_mem_write(mr
, addr1
, val
, 2);
2567 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2568 ptr
= qemu_get_ram_ptr(addr1
);
2570 case DEVICE_LITTLE_ENDIAN
:
2573 case DEVICE_BIG_ENDIAN
:
2580 invalidate_and_set_dirty(addr1
, 2);
2584 void stw_phys(hwaddr addr
, uint32_t val
)
2586 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2589 void stw_le_phys(hwaddr addr
, uint32_t val
)
2591 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2594 void stw_be_phys(hwaddr addr
, uint32_t val
)
2596 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2600 void stq_phys(hwaddr addr
, uint64_t val
)
2603 cpu_physical_memory_write(addr
, &val
, 8);
2606 void stq_le_phys(hwaddr addr
, uint64_t val
)
2608 val
= cpu_to_le64(val
);
2609 cpu_physical_memory_write(addr
, &val
, 8);
2612 void stq_be_phys(hwaddr addr
, uint64_t val
)
2614 val
= cpu_to_be64(val
);
2615 cpu_physical_memory_write(addr
, &val
, 8);
2618 /* virtual memory access for debug (includes writing to ROM) */
2619 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2620 uint8_t *buf
, int len
, int is_write
)
2627 page
= addr
& TARGET_PAGE_MASK
;
2628 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
2629 /* if no physical page mapped, return an error */
2630 if (phys_addr
== -1)
2632 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2635 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2637 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
2639 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
2648 #if !defined(CONFIG_USER_ONLY)
2651 * A helper function for the _utterly broken_ virtio device model to find out if
2652 * it's running on a big endian machine. Don't do this at home kids!
2654 bool virtio_is_big_endian(void);
2655 bool virtio_is_big_endian(void)
2657 #if defined(TARGET_WORDS_BIGENDIAN)
2666 #ifndef CONFIG_USER_ONLY
2667 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2672 mr
= address_space_translate(&address_space_memory
,
2673 phys_addr
, &phys_addr
, &l
, false);
2675 return !(memory_region_is_ram(mr
) ||
2676 memory_region_is_romd(mr
));
2679 void qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
2683 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
2684 func(block
->host
, block
->offset
, block
->length
, opaque
);