4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
30 #include "qemu/osdep.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
33 #include "hw/xen/xen.h"
34 #include "qemu/timer.h"
35 #include "qemu/config-file.h"
36 #include "qemu/error-report.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
52 #include "exec/ram_addr.h"
54 #include "qemu/range.h"
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 static bool in_migration
;
61 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
63 static MemoryRegion
*system_memory
;
64 static MemoryRegion
*system_io
;
66 AddressSpace address_space_io
;
67 AddressSpace address_space_memory
;
69 MemoryRegion io_mem_rom
, io_mem_notdirty
;
70 static MemoryRegion io_mem_unassigned
;
72 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73 #define RAM_PREALLOC (1 << 0)
75 /* RAM is mmap-ed with MAP_SHARED */
76 #define RAM_SHARED (1 << 1)
80 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
81 /* current CPU in the current thread. It is only valid inside
83 DEFINE_TLS(CPUState
*, current_cpu
);
84 /* 0 = Do not count executed instructions.
85 1 = Precise instruction counting.
86 2 = Adaptive rate instruction counting. */
89 #if !defined(CONFIG_USER_ONLY)
91 typedef struct PhysPageEntry PhysPageEntry
;
93 struct PhysPageEntry
{
94 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
96 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
100 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
102 /* Size of the L2 (and L3, etc) page tables. */
103 #define ADDR_SPACE_BITS 64
106 #define P_L2_SIZE (1 << P_L2_BITS)
108 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
110 typedef PhysPageEntry Node
[P_L2_SIZE
];
112 typedef struct PhysPageMap
{
113 unsigned sections_nb
;
114 unsigned sections_nb_alloc
;
116 unsigned nodes_nb_alloc
;
118 MemoryRegionSection
*sections
;
121 struct AddressSpaceDispatch
{
122 /* This is a multi-level map on the physical address space.
123 * The bottom level has pointers to MemoryRegionSections.
125 PhysPageEntry phys_map
;
130 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
131 typedef struct subpage_t
{
135 uint16_t sub_section
[TARGET_PAGE_SIZE
];
138 #define PHYS_SECTION_UNASSIGNED 0
139 #define PHYS_SECTION_NOTDIRTY 1
140 #define PHYS_SECTION_ROM 2
141 #define PHYS_SECTION_WATCH 3
143 static void io_mem_init(void);
144 static void memory_map_init(void);
145 static void tcg_commit(MemoryListener
*listener
);
147 static MemoryRegion io_mem_watch
;
150 #if !defined(CONFIG_USER_ONLY)
152 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
154 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
155 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
156 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
157 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
161 static uint32_t phys_map_node_alloc(PhysPageMap
*map
)
166 ret
= map
->nodes_nb
++;
167 assert(ret
!= PHYS_MAP_NODE_NIL
);
168 assert(ret
!= map
->nodes_nb_alloc
);
169 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
170 map
->nodes
[ret
][i
].skip
= 1;
171 map
->nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
176 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
177 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
182 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
184 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
185 lp
->ptr
= phys_map_node_alloc(map
);
186 p
= map
->nodes
[lp
->ptr
];
188 for (i
= 0; i
< P_L2_SIZE
; i
++) {
190 p
[i
].ptr
= PHYS_SECTION_UNASSIGNED
;
194 p
= map
->nodes
[lp
->ptr
];
196 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
198 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
199 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
205 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
211 static void phys_page_set(AddressSpaceDispatch
*d
,
212 hwaddr index
, hwaddr nb
,
215 /* Wildly overreserve - it doesn't matter much. */
216 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
218 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
221 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
222 * and update our entry so we can skip it and go directly to the destination.
224 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
226 unsigned valid_ptr
= P_L2_SIZE
;
231 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
236 for (i
= 0; i
< P_L2_SIZE
; i
++) {
237 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
244 phys_page_compact(&p
[i
], nodes
, compacted
);
248 /* We can only compress if there's only one child. */
253 assert(valid_ptr
< P_L2_SIZE
);
255 /* Don't compress if it won't fit in the # of bits we have. */
256 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
260 lp
->ptr
= p
[valid_ptr
].ptr
;
261 if (!p
[valid_ptr
].skip
) {
262 /* If our only child is a leaf, make this a leaf. */
263 /* By design, we should have made this node a leaf to begin with so we
264 * should never reach here.
265 * But since it's so simple to handle this, let's do it just in case we
270 lp
->skip
+= p
[valid_ptr
].skip
;
274 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
276 DECLARE_BITMAP(compacted
, nodes_nb
);
278 if (d
->phys_map
.skip
) {
279 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
283 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
284 Node
*nodes
, MemoryRegionSection
*sections
)
287 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
290 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
291 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
292 return §ions
[PHYS_SECTION_UNASSIGNED
];
295 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
298 if (sections
[lp
.ptr
].size
.hi
||
299 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
300 sections
[lp
.ptr
].size
.lo
, addr
)) {
301 return §ions
[lp
.ptr
];
303 return §ions
[PHYS_SECTION_UNASSIGNED
];
307 bool memory_region_is_unassigned(MemoryRegion
*mr
)
309 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
310 && mr
!= &io_mem_watch
;
313 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
315 bool resolve_subpage
)
317 MemoryRegionSection
*section
;
320 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
321 if (resolve_subpage
&& section
->mr
->subpage
) {
322 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
323 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
328 static MemoryRegionSection
*
329 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
330 hwaddr
*plen
, bool resolve_subpage
)
332 MemoryRegionSection
*section
;
335 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
336 /* Compute offset within MemoryRegionSection */
337 addr
-= section
->offset_within_address_space
;
339 /* Compute offset within MemoryRegion */
340 *xlat
= addr
+ section
->offset_within_region
;
342 diff
= int128_sub(section
->mr
->size
, int128_make64(addr
));
343 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
347 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
349 if (memory_region_is_ram(mr
)) {
350 return !(is_write
&& mr
->readonly
);
352 if (memory_region_is_romd(mr
)) {
359 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
360 hwaddr
*xlat
, hwaddr
*plen
,
364 MemoryRegionSection
*section
;
369 section
= address_space_translate_internal(as
->dispatch
, addr
, &addr
, plen
, true);
372 if (!mr
->iommu_ops
) {
376 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
377 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
378 | (addr
& iotlb
.addr_mask
));
379 len
= MIN(len
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
380 if (!(iotlb
.perm
& (1 << is_write
))) {
381 mr
= &io_mem_unassigned
;
385 as
= iotlb
.target_as
;
388 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
389 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
390 len
= MIN(page
, len
);
398 MemoryRegionSection
*
399 address_space_translate_for_iotlb(AddressSpace
*as
, hwaddr addr
, hwaddr
*xlat
,
402 MemoryRegionSection
*section
;
403 section
= address_space_translate_internal(as
->dispatch
, addr
, xlat
, plen
, false);
405 assert(!section
->mr
->iommu_ops
);
410 void cpu_exec_init_all(void)
412 #if !defined(CONFIG_USER_ONLY)
413 qemu_mutex_init(&ram_list
.mutex
);
419 #if !defined(CONFIG_USER_ONLY)
421 static int cpu_common_post_load(void *opaque
, int version_id
)
423 CPUState
*cpu
= opaque
;
425 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
426 version_id is increased. */
427 cpu
->interrupt_request
&= ~0x01;
433 static int cpu_common_pre_load(void *opaque
)
435 CPUState
*cpu
= opaque
;
437 cpu
->exception_index
= 0;
442 static bool cpu_common_exception_index_needed(void *opaque
)
444 CPUState
*cpu
= opaque
;
446 return cpu
->exception_index
!= 0;
449 static const VMStateDescription vmstate_cpu_common_exception_index
= {
450 .name
= "cpu_common/exception_index",
452 .minimum_version_id
= 1,
453 .fields
= (VMStateField
[]) {
454 VMSTATE_INT32(exception_index
, CPUState
),
455 VMSTATE_END_OF_LIST()
459 const VMStateDescription vmstate_cpu_common
= {
460 .name
= "cpu_common",
462 .minimum_version_id
= 1,
463 .pre_load
= cpu_common_pre_load
,
464 .post_load
= cpu_common_post_load
,
465 .fields
= (VMStateField
[]) {
466 VMSTATE_UINT32(halted
, CPUState
),
467 VMSTATE_UINT32(interrupt_request
, CPUState
),
468 VMSTATE_END_OF_LIST()
470 .subsections
= (VMStateSubsection
[]) {
472 .vmsd
= &vmstate_cpu_common_exception_index
,
473 .needed
= cpu_common_exception_index_needed
,
482 CPUState
*qemu_get_cpu(int index
)
487 if (cpu
->cpu_index
== index
) {
495 #if !defined(CONFIG_USER_ONLY)
496 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
498 /* We only support one address space per cpu at the moment. */
499 assert(cpu
->as
== as
);
501 if (cpu
->tcg_as_listener
) {
502 memory_listener_unregister(cpu
->tcg_as_listener
);
504 cpu
->tcg_as_listener
= g_new0(MemoryListener
, 1);
506 cpu
->tcg_as_listener
->commit
= tcg_commit
;
507 memory_listener_register(cpu
->tcg_as_listener
, as
);
511 void cpu_exec_init(CPUArchState
*env
)
513 CPUState
*cpu
= ENV_GET_CPU(env
);
514 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
518 #if defined(CONFIG_USER_ONLY)
522 CPU_FOREACH(some_cpu
) {
525 cpu
->cpu_index
= cpu_index
;
527 QTAILQ_INIT(&cpu
->breakpoints
);
528 QTAILQ_INIT(&cpu
->watchpoints
);
529 #ifndef CONFIG_USER_ONLY
530 cpu
->as
= &address_space_memory
;
531 cpu
->thread_id
= qemu_get_thread_id();
533 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
534 #if defined(CONFIG_USER_ONLY)
537 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
538 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
540 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
541 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
542 cpu_save
, cpu_load
, env
);
543 assert(cc
->vmsd
== NULL
);
544 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
546 if (cc
->vmsd
!= NULL
) {
547 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
551 #if defined(TARGET_HAS_ICE)
552 #if defined(CONFIG_USER_ONLY)
553 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
555 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
558 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
560 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
562 tb_invalidate_phys_addr(cpu
->as
,
563 phys
| (pc
& ~TARGET_PAGE_MASK
));
567 #endif /* TARGET_HAS_ICE */
569 #if defined(CONFIG_USER_ONLY)
570 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
575 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
581 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
585 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
586 int flags
, CPUWatchpoint
**watchpoint
)
591 /* Add a watchpoint. */
592 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
593 int flags
, CPUWatchpoint
**watchpoint
)
597 /* forbid ranges which are empty or run off the end of the address space */
598 if (len
== 0 || (addr
+ len
- 1) <= addr
) {
599 error_report("tried to set invalid watchpoint at %"
600 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
603 wp
= g_malloc(sizeof(*wp
));
609 /* keep all GDB-injected watchpoints in front */
610 if (flags
& BP_GDB
) {
611 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
613 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
616 tlb_flush_page(cpu
, addr
);
623 /* Remove a specific watchpoint. */
624 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
629 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
630 if (addr
== wp
->vaddr
&& len
== wp
->len
631 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
632 cpu_watchpoint_remove_by_ref(cpu
, wp
);
639 /* Remove a specific watchpoint by reference. */
640 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
642 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
644 tlb_flush_page(cpu
, watchpoint
->vaddr
);
649 /* Remove all matching watchpoints. */
650 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
652 CPUWatchpoint
*wp
, *next
;
654 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
655 if (wp
->flags
& mask
) {
656 cpu_watchpoint_remove_by_ref(cpu
, wp
);
661 /* Return true if this watchpoint address matches the specified
662 * access (ie the address range covered by the watchpoint overlaps
663 * partially or completely with the address range covered by the
666 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
670 /* We know the lengths are non-zero, but a little caution is
671 * required to avoid errors in the case where the range ends
672 * exactly at the top of the address space and so addr + len
673 * wraps round to zero.
675 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
676 vaddr addrend
= addr
+ len
- 1;
678 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
683 /* Add a breakpoint. */
684 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
685 CPUBreakpoint
**breakpoint
)
687 #if defined(TARGET_HAS_ICE)
690 bp
= g_malloc(sizeof(*bp
));
695 /* keep all GDB-injected breakpoints in front */
696 if (flags
& BP_GDB
) {
697 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
699 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
702 breakpoint_invalidate(cpu
, pc
);
713 /* Remove a specific breakpoint. */
714 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
716 #if defined(TARGET_HAS_ICE)
719 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
720 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
721 cpu_breakpoint_remove_by_ref(cpu
, bp
);
731 /* Remove a specific breakpoint by reference. */
732 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
734 #if defined(TARGET_HAS_ICE)
735 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
737 breakpoint_invalidate(cpu
, breakpoint
->pc
);
743 /* Remove all matching breakpoints. */
744 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
746 #if defined(TARGET_HAS_ICE)
747 CPUBreakpoint
*bp
, *next
;
749 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
750 if (bp
->flags
& mask
) {
751 cpu_breakpoint_remove_by_ref(cpu
, bp
);
757 /* enable or disable single step mode. EXCP_DEBUG is returned by the
758 CPU loop after each instruction */
759 void cpu_single_step(CPUState
*cpu
, int enabled
)
761 #if defined(TARGET_HAS_ICE)
762 if (cpu
->singlestep_enabled
!= enabled
) {
763 cpu
->singlestep_enabled
= enabled
;
765 kvm_update_guest_debug(cpu
, 0);
767 /* must flush all the translated code to avoid inconsistencies */
768 /* XXX: only flush what is necessary */
769 CPUArchState
*env
= cpu
->env_ptr
;
776 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
783 fprintf(stderr
, "qemu: fatal: ");
784 vfprintf(stderr
, fmt
, ap
);
785 fprintf(stderr
, "\n");
786 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
787 if (qemu_log_enabled()) {
788 qemu_log("qemu: fatal: ");
789 qemu_log_vprintf(fmt
, ap2
);
791 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
797 #if defined(CONFIG_USER_ONLY)
799 struct sigaction act
;
800 sigfillset(&act
.sa_mask
);
801 act
.sa_handler
= SIG_DFL
;
802 sigaction(SIGABRT
, &act
, NULL
);
808 #if !defined(CONFIG_USER_ONLY)
809 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
813 /* The list is protected by the iothread lock here. */
814 block
= ram_list
.mru_block
;
815 if (block
&& addr
- block
->offset
< block
->length
) {
818 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
819 if (addr
- block
->offset
< block
->length
) {
824 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
828 ram_list
.mru_block
= block
;
832 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
838 end
= TARGET_PAGE_ALIGN(start
+ length
);
839 start
&= TARGET_PAGE_MASK
;
841 block
= qemu_get_ram_block(start
);
842 assert(block
== qemu_get_ram_block(end
- 1));
843 start1
= (uintptr_t)block
->host
+ (start
- block
->offset
);
844 cpu_tlb_reset_dirty_all(start1
, length
);
847 /* Note: start and end must be within the same ram block. */
848 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t length
,
853 cpu_physical_memory_clear_dirty_range(start
, length
, client
);
856 tlb_reset_dirty_range_all(start
, length
);
860 static void cpu_physical_memory_set_dirty_tracking(bool enable
)
862 in_migration
= enable
;
865 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
866 MemoryRegionSection
*section
,
868 hwaddr paddr
, hwaddr xlat
,
870 target_ulong
*address
)
875 if (memory_region_is_ram(section
->mr
)) {
877 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
879 if (!section
->readonly
) {
880 iotlb
|= PHYS_SECTION_NOTDIRTY
;
882 iotlb
|= PHYS_SECTION_ROM
;
885 iotlb
= section
- section
->address_space
->dispatch
->map
.sections
;
889 /* Make accesses to pages with watchpoints go via the
890 watchpoint trap routines. */
891 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
892 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
893 /* Avoid trapping reads of pages with a write breakpoint. */
894 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
895 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
896 *address
|= TLB_MMIO
;
904 #endif /* defined(CONFIG_USER_ONLY) */
906 #if !defined(CONFIG_USER_ONLY)
908 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
910 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
912 static void *(*phys_mem_alloc
)(size_t size
) = qemu_anon_ram_alloc
;
915 * Set a custom physical guest memory alloator.
916 * Accelerators with unusual needs may need this. Hopefully, we can
917 * get rid of it eventually.
919 void phys_mem_set_alloc(void *(*alloc
)(size_t))
921 phys_mem_alloc
= alloc
;
924 static uint16_t phys_section_add(PhysPageMap
*map
,
925 MemoryRegionSection
*section
)
927 /* The physical section number is ORed with a page-aligned
928 * pointer to produce the iotlb entries. Thus it should
929 * never overflow into the page-aligned value.
931 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
933 if (map
->sections_nb
== map
->sections_nb_alloc
) {
934 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
935 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
936 map
->sections_nb_alloc
);
938 map
->sections
[map
->sections_nb
] = *section
;
939 memory_region_ref(section
->mr
);
940 return map
->sections_nb
++;
943 static void phys_section_destroy(MemoryRegion
*mr
)
945 memory_region_unref(mr
);
948 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
949 object_unref(OBJECT(&subpage
->iomem
));
954 static void phys_sections_free(PhysPageMap
*map
)
956 while (map
->sections_nb
> 0) {
957 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
958 phys_section_destroy(section
->mr
);
960 g_free(map
->sections
);
964 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
967 hwaddr base
= section
->offset_within_address_space
969 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
970 d
->map
.nodes
, d
->map
.sections
);
971 MemoryRegionSection subsection
= {
972 .offset_within_address_space
= base
,
973 .size
= int128_make64(TARGET_PAGE_SIZE
),
977 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
979 if (!(existing
->mr
->subpage
)) {
980 subpage
= subpage_init(d
->as
, base
);
981 subsection
.address_space
= d
->as
;
982 subsection
.mr
= &subpage
->iomem
;
983 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
984 phys_section_add(&d
->map
, &subsection
));
986 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
988 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
989 end
= start
+ int128_get64(section
->size
) - 1;
990 subpage_register(subpage
, start
, end
,
991 phys_section_add(&d
->map
, section
));
995 static void register_multipage(AddressSpaceDispatch
*d
,
996 MemoryRegionSection
*section
)
998 hwaddr start_addr
= section
->offset_within_address_space
;
999 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1000 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1004 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1007 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1009 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1010 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1011 MemoryRegionSection now
= *section
, remain
= *section
;
1012 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1014 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1015 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1016 - now
.offset_within_address_space
;
1018 now
.size
= int128_min(int128_make64(left
), now
.size
);
1019 register_subpage(d
, &now
);
1021 now
.size
= int128_zero();
1023 while (int128_ne(remain
.size
, now
.size
)) {
1024 remain
.size
= int128_sub(remain
.size
, now
.size
);
1025 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1026 remain
.offset_within_region
+= int128_get64(now
.size
);
1028 if (int128_lt(remain
.size
, page_size
)) {
1029 register_subpage(d
, &now
);
1030 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1031 now
.size
= page_size
;
1032 register_subpage(d
, &now
);
1034 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1035 register_multipage(d
, &now
);
1040 void qemu_flush_coalesced_mmio_buffer(void)
1043 kvm_flush_coalesced_mmio_buffer();
1046 void qemu_mutex_lock_ramlist(void)
1048 qemu_mutex_lock(&ram_list
.mutex
);
1051 void qemu_mutex_unlock_ramlist(void)
1053 qemu_mutex_unlock(&ram_list
.mutex
);
1058 #include <sys/vfs.h>
1060 #define HUGETLBFS_MAGIC 0x958458f6
1062 static long gethugepagesize(const char *path
, Error
**errp
)
1068 ret
= statfs(path
, &fs
);
1069 } while (ret
!= 0 && errno
== EINTR
);
1072 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1077 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
1078 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
1083 static void *file_ram_alloc(RAMBlock
*block
,
1089 char *sanitized_name
;
1094 Error
*local_err
= NULL
;
1096 hpagesize
= gethugepagesize(path
, &local_err
);
1098 error_propagate(errp
, local_err
);
1102 if (memory
< hpagesize
) {
1103 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1104 "or larger than huge page size 0x%" PRIx64
,
1109 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1111 "host lacks kvm mmu notifiers, -mem-path unsupported");
1115 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1116 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1117 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1122 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1124 g_free(sanitized_name
);
1126 fd
= mkstemp(filename
);
1128 error_setg_errno(errp
, errno
,
1129 "unable to create backing store for hugepages");
1136 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
1139 * ftruncate is not supported by hugetlbfs in older
1140 * hosts, so don't bother bailing out on errors.
1141 * If anything goes wrong with it under other filesystems,
1144 if (ftruncate(fd
, memory
)) {
1145 perror("ftruncate");
1148 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
,
1149 (block
->flags
& RAM_SHARED
? MAP_SHARED
: MAP_PRIVATE
),
1151 if (area
== MAP_FAILED
) {
1152 error_setg_errno(errp
, errno
,
1153 "unable to map backing store for hugepages");
1159 os_mem_prealloc(fd
, area
, memory
);
1173 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1175 RAMBlock
*block
, *next_block
;
1176 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1178 assert(size
!= 0); /* it would hand out same offset multiple times */
1180 if (QTAILQ_EMPTY(&ram_list
.blocks
))
1183 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1184 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1186 end
= block
->offset
+ block
->length
;
1188 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
1189 if (next_block
->offset
>= end
) {
1190 next
= MIN(next
, next_block
->offset
);
1193 if (next
- end
>= size
&& next
- end
< mingap
) {
1195 mingap
= next
- end
;
1199 if (offset
== RAM_ADDR_MAX
) {
1200 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1208 ram_addr_t
last_ram_offset(void)
1211 ram_addr_t last
= 0;
1213 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
1214 last
= MAX(last
, block
->offset
+ block
->length
);
1219 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1223 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1224 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1225 "dump-guest-core", true)) {
1226 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1228 perror("qemu_madvise");
1229 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1230 "but dump_guest_core=off specified\n");
1235 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1239 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1240 if (block
->offset
== addr
) {
1248 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1250 RAMBlock
*new_block
= find_ram_block(addr
);
1254 assert(!new_block
->idstr
[0]);
1257 char *id
= qdev_get_dev_path(dev
);
1259 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1263 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1265 /* This assumes the iothread lock is taken here too. */
1266 qemu_mutex_lock_ramlist();
1267 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1268 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1269 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1274 qemu_mutex_unlock_ramlist();
1277 void qemu_ram_unset_idstr(ram_addr_t addr
)
1279 RAMBlock
*block
= find_ram_block(addr
);
1282 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1286 static int memory_try_enable_merging(void *addr
, size_t len
)
1288 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1289 /* disabled by the user */
1293 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1296 static ram_addr_t
ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1299 ram_addr_t old_ram_size
, new_ram_size
;
1301 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1303 /* This assumes the iothread lock is taken here too. */
1304 qemu_mutex_lock_ramlist();
1305 new_block
->offset
= find_ram_offset(new_block
->length
);
1307 if (!new_block
->host
) {
1308 if (xen_enabled()) {
1309 xen_ram_alloc(new_block
->offset
, new_block
->length
, new_block
->mr
);
1311 new_block
->host
= phys_mem_alloc(new_block
->length
);
1312 if (!new_block
->host
) {
1313 error_setg_errno(errp
, errno
,
1314 "cannot set up guest memory '%s'",
1315 memory_region_name(new_block
->mr
));
1316 qemu_mutex_unlock_ramlist();
1319 memory_try_enable_merging(new_block
->host
, new_block
->length
);
1323 /* Keep the list sorted from biggest to smallest block. */
1324 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1325 if (block
->length
< new_block
->length
) {
1330 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1332 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1334 ram_list
.mru_block
= NULL
;
1337 qemu_mutex_unlock_ramlist();
1339 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1341 if (new_ram_size
> old_ram_size
) {
1343 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1344 ram_list
.dirty_memory
[i
] =
1345 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1346 old_ram_size
, new_ram_size
);
1349 cpu_physical_memory_set_dirty_range(new_block
->offset
, new_block
->length
);
1351 qemu_ram_setup_dump(new_block
->host
, new_block
->length
);
1352 qemu_madvise(new_block
->host
, new_block
->length
, QEMU_MADV_HUGEPAGE
);
1353 qemu_madvise(new_block
->host
, new_block
->length
, QEMU_MADV_DONTFORK
);
1355 if (kvm_enabled()) {
1356 kvm_setup_guest_memory(new_block
->host
, new_block
->length
);
1359 return new_block
->offset
;
1363 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1364 bool share
, const char *mem_path
,
1367 RAMBlock
*new_block
;
1369 Error
*local_err
= NULL
;
1371 if (xen_enabled()) {
1372 error_setg(errp
, "-mem-path not supported with Xen");
1376 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1378 * file_ram_alloc() needs to allocate just like
1379 * phys_mem_alloc, but we haven't bothered to provide
1383 "-mem-path not supported with this accelerator");
1387 size
= TARGET_PAGE_ALIGN(size
);
1388 new_block
= g_malloc0(sizeof(*new_block
));
1390 new_block
->length
= size
;
1391 new_block
->flags
= share
? RAM_SHARED
: 0;
1392 new_block
->host
= file_ram_alloc(new_block
, size
,
1394 if (!new_block
->host
) {
1399 addr
= ram_block_add(new_block
, &local_err
);
1402 error_propagate(errp
, local_err
);
1409 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1410 MemoryRegion
*mr
, Error
**errp
)
1412 RAMBlock
*new_block
;
1414 Error
*local_err
= NULL
;
1416 size
= TARGET_PAGE_ALIGN(size
);
1417 new_block
= g_malloc0(sizeof(*new_block
));
1419 new_block
->length
= size
;
1421 new_block
->host
= host
;
1423 new_block
->flags
|= RAM_PREALLOC
;
1425 addr
= ram_block_add(new_block
, &local_err
);
1428 error_propagate(errp
, local_err
);
1434 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1436 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
, errp
);
1439 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1443 /* This assumes the iothread lock is taken here too. */
1444 qemu_mutex_lock_ramlist();
1445 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1446 if (addr
== block
->offset
) {
1447 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1448 ram_list
.mru_block
= NULL
;
1454 qemu_mutex_unlock_ramlist();
1457 void qemu_ram_free(ram_addr_t addr
)
1461 /* This assumes the iothread lock is taken here too. */
1462 qemu_mutex_lock_ramlist();
1463 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1464 if (addr
== block
->offset
) {
1465 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1466 ram_list
.mru_block
= NULL
;
1468 if (block
->flags
& RAM_PREALLOC
) {
1470 } else if (xen_enabled()) {
1471 xen_invalidate_map_cache_entry(block
->host
);
1473 } else if (block
->fd
>= 0) {
1474 munmap(block
->host
, block
->length
);
1478 qemu_anon_ram_free(block
->host
, block
->length
);
1484 qemu_mutex_unlock_ramlist();
1489 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1496 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1497 offset
= addr
- block
->offset
;
1498 if (offset
< block
->length
) {
1499 vaddr
= block
->host
+ offset
;
1500 if (block
->flags
& RAM_PREALLOC
) {
1502 } else if (xen_enabled()) {
1506 munmap(vaddr
, length
);
1507 if (block
->fd
>= 0) {
1508 flags
|= (block
->flags
& RAM_SHARED
?
1509 MAP_SHARED
: MAP_PRIVATE
);
1510 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1511 flags
, block
->fd
, offset
);
1514 * Remap needs to match alloc. Accelerators that
1515 * set phys_mem_alloc never remap. If they did,
1516 * we'd need a remap hook here.
1518 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1520 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1521 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1524 if (area
!= vaddr
) {
1525 fprintf(stderr
, "Could not remap addr: "
1526 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1530 memory_try_enable_merging(vaddr
, length
);
1531 qemu_ram_setup_dump(vaddr
, length
);
1537 #endif /* !_WIN32 */
1539 int qemu_get_ram_fd(ram_addr_t addr
)
1541 RAMBlock
*block
= qemu_get_ram_block(addr
);
1546 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1548 RAMBlock
*block
= qemu_get_ram_block(addr
);
1553 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1554 With the exception of the softmmu code in this file, this should
1555 only be used for local memory (e.g. video ram) that the device owns,
1556 and knows it isn't going to access beyond the end of the block.
1558 It should not be used for general purpose DMA.
1559 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1561 void *qemu_get_ram_ptr(ram_addr_t addr
)
1563 RAMBlock
*block
= qemu_get_ram_block(addr
);
1565 if (xen_enabled()) {
1566 /* We need to check if the requested address is in the RAM
1567 * because we don't want to map the entire memory in QEMU.
1568 * In that case just map until the end of the page.
1570 if (block
->offset
== 0) {
1571 return xen_map_cache(addr
, 0, 0);
1572 } else if (block
->host
== NULL
) {
1574 xen_map_cache(block
->offset
, block
->length
, 1);
1577 return block
->host
+ (addr
- block
->offset
);
1580 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1581 * but takes a size argument */
1582 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1587 if (xen_enabled()) {
1588 return xen_map_cache(addr
, *size
, 1);
1592 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1593 if (addr
- block
->offset
< block
->length
) {
1594 if (addr
- block
->offset
+ *size
> block
->length
)
1595 *size
= block
->length
- addr
+ block
->offset
;
1596 return block
->host
+ (addr
- block
->offset
);
1600 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1605 /* Some of the softmmu routines need to translate from a host pointer
1606 (typically a TLB entry) back to a ram offset. */
1607 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1610 uint8_t *host
= ptr
;
1612 if (xen_enabled()) {
1613 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1614 return qemu_get_ram_block(*ram_addr
)->mr
;
1617 block
= ram_list
.mru_block
;
1618 if (block
&& block
->host
&& host
- block
->host
< block
->length
) {
1622 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1623 /* This case append when the block is not mapped. */
1624 if (block
->host
== NULL
) {
1627 if (host
- block
->host
< block
->length
) {
1635 *ram_addr
= block
->offset
+ (host
- block
->host
);
1639 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1640 uint64_t val
, unsigned size
)
1642 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1643 tb_invalidate_phys_page_fast(ram_addr
, size
);
1647 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1650 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1653 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1658 cpu_physical_memory_set_dirty_range_nocode(ram_addr
, size
);
1659 /* we remove the notdirty callback only if the code has been
1661 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1662 CPUArchState
*env
= current_cpu
->env_ptr
;
1663 tlb_set_dirty(env
, current_cpu
->mem_io_vaddr
);
1667 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1668 unsigned size
, bool is_write
)
1673 static const MemoryRegionOps notdirty_mem_ops
= {
1674 .write
= notdirty_mem_write
,
1675 .valid
.accepts
= notdirty_mem_accepts
,
1676 .endianness
= DEVICE_NATIVE_ENDIAN
,
1679 /* Generate a debug exception if a watchpoint has been hit. */
1680 static void check_watchpoint(int offset
, int len
, int flags
)
1682 CPUState
*cpu
= current_cpu
;
1683 CPUArchState
*env
= cpu
->env_ptr
;
1684 target_ulong pc
, cs_base
;
1689 if (cpu
->watchpoint_hit
) {
1690 /* We re-entered the check after replacing the TB. Now raise
1691 * the debug interrupt so that is will trigger after the
1692 * current instruction. */
1693 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
1696 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1697 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1698 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
1699 && (wp
->flags
& flags
)) {
1700 if (flags
== BP_MEM_READ
) {
1701 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
1703 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
1705 wp
->hitaddr
= vaddr
;
1706 if (!cpu
->watchpoint_hit
) {
1707 cpu
->watchpoint_hit
= wp
;
1708 tb_check_watchpoint(cpu
);
1709 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1710 cpu
->exception_index
= EXCP_DEBUG
;
1713 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1714 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
1715 cpu_resume_from_signal(cpu
, NULL
);
1719 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1724 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1725 so these check for a hit then pass through to the normal out-of-line
1727 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1730 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, BP_MEM_READ
);
1732 case 1: return ldub_phys(&address_space_memory
, addr
);
1733 case 2: return lduw_phys(&address_space_memory
, addr
);
1734 case 4: return ldl_phys(&address_space_memory
, addr
);
1739 static void watch_mem_write(void *opaque
, hwaddr addr
,
1740 uint64_t val
, unsigned size
)
1742 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, BP_MEM_WRITE
);
1745 stb_phys(&address_space_memory
, addr
, val
);
1748 stw_phys(&address_space_memory
, addr
, val
);
1751 stl_phys(&address_space_memory
, addr
, val
);
1757 static const MemoryRegionOps watch_mem_ops
= {
1758 .read
= watch_mem_read
,
1759 .write
= watch_mem_write
,
1760 .endianness
= DEVICE_NATIVE_ENDIAN
,
1763 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1766 subpage_t
*subpage
= opaque
;
1769 #if defined(DEBUG_SUBPAGE)
1770 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
1771 subpage
, len
, addr
);
1773 address_space_read(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1786 static void subpage_write(void *opaque
, hwaddr addr
,
1787 uint64_t value
, unsigned len
)
1789 subpage_t
*subpage
= opaque
;
1792 #if defined(DEBUG_SUBPAGE)
1793 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1794 " value %"PRIx64
"\n",
1795 __func__
, subpage
, len
, addr
, value
);
1810 address_space_write(subpage
->as
, addr
+ subpage
->base
, buf
, len
);
1813 static bool subpage_accepts(void *opaque
, hwaddr addr
,
1814 unsigned len
, bool is_write
)
1816 subpage_t
*subpage
= opaque
;
1817 #if defined(DEBUG_SUBPAGE)
1818 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
1819 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
1822 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
1826 static const MemoryRegionOps subpage_ops
= {
1827 .read
= subpage_read
,
1828 .write
= subpage_write
,
1829 .valid
.accepts
= subpage_accepts
,
1830 .endianness
= DEVICE_NATIVE_ENDIAN
,
1833 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1838 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1840 idx
= SUBPAGE_IDX(start
);
1841 eidx
= SUBPAGE_IDX(end
);
1842 #if defined(DEBUG_SUBPAGE)
1843 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1844 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
1846 for (; idx
<= eidx
; idx
++) {
1847 mmio
->sub_section
[idx
] = section
;
1853 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
1857 mmio
= g_malloc0(sizeof(subpage_t
));
1861 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
1862 NULL
, TARGET_PAGE_SIZE
);
1863 mmio
->iomem
.subpage
= true;
1864 #if defined(DEBUG_SUBPAGE)
1865 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
1866 mmio
, base
, TARGET_PAGE_SIZE
);
1868 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
1873 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
1877 MemoryRegionSection section
= {
1878 .address_space
= as
,
1880 .offset_within_address_space
= 0,
1881 .offset_within_region
= 0,
1882 .size
= int128_2_64(),
1885 return phys_section_add(map
, §ion
);
1888 MemoryRegion
*iotlb_to_region(AddressSpace
*as
, hwaddr index
)
1890 return as
->dispatch
->map
.sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1893 static void io_mem_init(void)
1895 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
1896 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
1898 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
1900 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
1904 static void mem_begin(MemoryListener
*listener
)
1906 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1907 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
1910 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
1911 assert(n
== PHYS_SECTION_UNASSIGNED
);
1912 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
1913 assert(n
== PHYS_SECTION_NOTDIRTY
);
1914 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
1915 assert(n
== PHYS_SECTION_ROM
);
1916 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
1917 assert(n
== PHYS_SECTION_WATCH
);
1919 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
1921 as
->next_dispatch
= d
;
1924 static void mem_commit(MemoryListener
*listener
)
1926 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1927 AddressSpaceDispatch
*cur
= as
->dispatch
;
1928 AddressSpaceDispatch
*next
= as
->next_dispatch
;
1930 phys_page_compact_all(next
, next
->map
.nodes_nb
);
1932 as
->dispatch
= next
;
1935 phys_sections_free(&cur
->map
);
1940 static void tcg_commit(MemoryListener
*listener
)
1944 /* since each CPU stores ram addresses in its TLB cache, we must
1945 reset the modified entries */
1948 /* FIXME: Disentangle the cpu.h circular files deps so we can
1949 directly get the right CPU from listener. */
1950 if (cpu
->tcg_as_listener
!= listener
) {
1957 static void core_log_global_start(MemoryListener
*listener
)
1959 cpu_physical_memory_set_dirty_tracking(true);
1962 static void core_log_global_stop(MemoryListener
*listener
)
1964 cpu_physical_memory_set_dirty_tracking(false);
1967 static MemoryListener core_memory_listener
= {
1968 .log_global_start
= core_log_global_start
,
1969 .log_global_stop
= core_log_global_stop
,
1973 void address_space_init_dispatch(AddressSpace
*as
)
1975 as
->dispatch
= NULL
;
1976 as
->dispatch_listener
= (MemoryListener
) {
1978 .commit
= mem_commit
,
1979 .region_add
= mem_add
,
1980 .region_nop
= mem_add
,
1983 memory_listener_register(&as
->dispatch_listener
, as
);
1986 void address_space_destroy_dispatch(AddressSpace
*as
)
1988 AddressSpaceDispatch
*d
= as
->dispatch
;
1990 memory_listener_unregister(&as
->dispatch_listener
);
1992 as
->dispatch
= NULL
;
1995 static void memory_map_init(void)
1997 system_memory
= g_malloc(sizeof(*system_memory
));
1999 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2000 address_space_init(&address_space_memory
, system_memory
, "memory");
2002 system_io
= g_malloc(sizeof(*system_io
));
2003 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2005 address_space_init(&address_space_io
, system_io
, "I/O");
2007 memory_listener_register(&core_memory_listener
, &address_space_memory
);
2010 MemoryRegion
*get_system_memory(void)
2012 return system_memory
;
2015 MemoryRegion
*get_system_io(void)
2020 #endif /* !defined(CONFIG_USER_ONLY) */
2022 /* physical memory access (slow version, mainly for debug) */
2023 #if defined(CONFIG_USER_ONLY)
2024 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2025 uint8_t *buf
, int len
, int is_write
)
2032 page
= addr
& TARGET_PAGE_MASK
;
2033 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2036 flags
= page_get_flags(page
);
2037 if (!(flags
& PAGE_VALID
))
2040 if (!(flags
& PAGE_WRITE
))
2042 /* XXX: this code should not depend on lock_user */
2043 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2046 unlock_user(p
, addr
, l
);
2048 if (!(flags
& PAGE_READ
))
2050 /* XXX: this code should not depend on lock_user */
2051 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2054 unlock_user(p
, addr
, 0);
2065 static void invalidate_and_set_dirty(hwaddr addr
,
2068 if (cpu_physical_memory_is_clean(addr
)) {
2069 /* invalidate code */
2070 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
2072 cpu_physical_memory_set_dirty_range_nocode(addr
, length
);
2074 xen_modified_memory(addr
, length
);
2077 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2079 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2081 /* Regions are assumed to support 1-4 byte accesses unless
2082 otherwise specified. */
2083 if (access_size_max
== 0) {
2084 access_size_max
= 4;
2087 /* Bound the maximum access by the alignment of the address. */
2088 if (!mr
->ops
->impl
.unaligned
) {
2089 unsigned align_size_max
= addr
& -addr
;
2090 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2091 access_size_max
= align_size_max
;
2095 /* Don't attempt accesses larger than the maximum. */
2096 if (l
> access_size_max
) {
2097 l
= access_size_max
;
2100 l
= 1 << (qemu_fls(l
) - 1);
2106 bool address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
2107 int len
, bool is_write
)
2118 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
2121 if (!memory_access_is_direct(mr
, is_write
)) {
2122 l
= memory_access_size(mr
, l
, addr1
);
2123 /* XXX: could force current_cpu to NULL to avoid
2127 /* 64 bit write access */
2129 error
|= io_mem_write(mr
, addr1
, val
, 8);
2132 /* 32 bit write access */
2134 error
|= io_mem_write(mr
, addr1
, val
, 4);
2137 /* 16 bit write access */
2139 error
|= io_mem_write(mr
, addr1
, val
, 2);
2142 /* 8 bit write access */
2144 error
|= io_mem_write(mr
, addr1
, val
, 1);
2150 addr1
+= memory_region_get_ram_addr(mr
);
2152 ptr
= qemu_get_ram_ptr(addr1
);
2153 memcpy(ptr
, buf
, l
);
2154 invalidate_and_set_dirty(addr1
, l
);
2157 if (!memory_access_is_direct(mr
, is_write
)) {
2159 l
= memory_access_size(mr
, l
, addr1
);
2162 /* 64 bit read access */
2163 error
|= io_mem_read(mr
, addr1
, &val
, 8);
2167 /* 32 bit read access */
2168 error
|= io_mem_read(mr
, addr1
, &val
, 4);
2172 /* 16 bit read access */
2173 error
|= io_mem_read(mr
, addr1
, &val
, 2);
2177 /* 8 bit read access */
2178 error
|= io_mem_read(mr
, addr1
, &val
, 1);
2186 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2187 memcpy(buf
, ptr
, l
);
2198 bool address_space_write(AddressSpace
*as
, hwaddr addr
,
2199 const uint8_t *buf
, int len
)
2201 return address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
2204 bool address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
2206 return address_space_rw(as
, addr
, buf
, len
, false);
2210 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2211 int len
, int is_write
)
2213 address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
2216 enum write_rom_type
{
2221 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2222 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2231 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2233 if (!(memory_region_is_ram(mr
) ||
2234 memory_region_is_romd(mr
))) {
2237 addr1
+= memory_region_get_ram_addr(mr
);
2239 ptr
= qemu_get_ram_ptr(addr1
);
2242 memcpy(ptr
, buf
, l
);
2243 invalidate_and_set_dirty(addr1
, l
);
2246 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2256 /* used for ROM loading : can write in RAM and ROM */
2257 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2258 const uint8_t *buf
, int len
)
2260 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2263 void cpu_flush_icache_range(hwaddr start
, int len
)
2266 * This function should do the same thing as an icache flush that was
2267 * triggered from within the guest. For TCG we are always cache coherent,
2268 * so there is no need to flush anything. For KVM / Xen we need to flush
2269 * the host's instruction cache at least.
2271 if (tcg_enabled()) {
2275 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2276 start
, NULL
, len
, FLUSH_CACHE
);
2286 static BounceBuffer bounce
;
2288 typedef struct MapClient
{
2290 void (*callback
)(void *opaque
);
2291 QLIST_ENTRY(MapClient
) link
;
2294 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2295 = QLIST_HEAD_INITIALIZER(map_client_list
);
2297 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2299 MapClient
*client
= g_malloc(sizeof(*client
));
2301 client
->opaque
= opaque
;
2302 client
->callback
= callback
;
2303 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2307 static void cpu_unregister_map_client(void *_client
)
2309 MapClient
*client
= (MapClient
*)_client
;
2311 QLIST_REMOVE(client
, link
);
2315 static void cpu_notify_map_clients(void)
2319 while (!QLIST_EMPTY(&map_client_list
)) {
2320 client
= QLIST_FIRST(&map_client_list
);
2321 client
->callback(client
->opaque
);
2322 cpu_unregister_map_client(client
);
2326 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2333 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2334 if (!memory_access_is_direct(mr
, is_write
)) {
2335 l
= memory_access_size(mr
, l
, addr
);
2336 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2347 /* Map a physical memory region into a host virtual address.
2348 * May map a subset of the requested range, given by and returned in *plen.
2349 * May return NULL if resources needed to perform the mapping are exhausted.
2350 * Use only for reads OR writes - not for read-modify-write operations.
2351 * Use cpu_register_map_client() to know when retrying the map operation is
2352 * likely to succeed.
2354 void *address_space_map(AddressSpace
*as
,
2361 hwaddr l
, xlat
, base
;
2362 MemoryRegion
*mr
, *this_mr
;
2370 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2371 if (!memory_access_is_direct(mr
, is_write
)) {
2372 if (bounce
.buffer
) {
2375 /* Avoid unbounded allocations */
2376 l
= MIN(l
, TARGET_PAGE_SIZE
);
2377 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2381 memory_region_ref(mr
);
2384 address_space_read(as
, addr
, bounce
.buffer
, l
);
2388 return bounce
.buffer
;
2392 raddr
= memory_region_get_ram_addr(mr
);
2403 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2404 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2409 memory_region_ref(mr
);
2411 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2414 /* Unmaps a memory region previously mapped by address_space_map().
2415 * Will also mark the memory as dirty if is_write == 1. access_len gives
2416 * the amount of memory that was actually read or written by the caller.
2418 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2419 int is_write
, hwaddr access_len
)
2421 if (buffer
!= bounce
.buffer
) {
2425 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2428 invalidate_and_set_dirty(addr1
, access_len
);
2430 if (xen_enabled()) {
2431 xen_invalidate_map_cache_entry(buffer
);
2433 memory_region_unref(mr
);
2437 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2439 qemu_vfree(bounce
.buffer
);
2440 bounce
.buffer
= NULL
;
2441 memory_region_unref(bounce
.mr
);
2442 cpu_notify_map_clients();
2445 void *cpu_physical_memory_map(hwaddr addr
,
2449 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2452 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2453 int is_write
, hwaddr access_len
)
2455 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2458 /* warning: addr must be aligned */
2459 static inline uint32_t ldl_phys_internal(AddressSpace
*as
, hwaddr addr
,
2460 enum device_endian endian
)
2468 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2469 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2471 io_mem_read(mr
, addr1
, &val
, 4);
2472 #if defined(TARGET_WORDS_BIGENDIAN)
2473 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2477 if (endian
== DEVICE_BIG_ENDIAN
) {
2483 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2487 case DEVICE_LITTLE_ENDIAN
:
2488 val
= ldl_le_p(ptr
);
2490 case DEVICE_BIG_ENDIAN
:
2491 val
= ldl_be_p(ptr
);
2501 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
2503 return ldl_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2506 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
2508 return ldl_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2511 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
2513 return ldl_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2516 /* warning: addr must be aligned */
2517 static inline uint64_t ldq_phys_internal(AddressSpace
*as
, hwaddr addr
,
2518 enum device_endian endian
)
2526 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2528 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2530 io_mem_read(mr
, addr1
, &val
, 8);
2531 #if defined(TARGET_WORDS_BIGENDIAN)
2532 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2536 if (endian
== DEVICE_BIG_ENDIAN
) {
2542 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2546 case DEVICE_LITTLE_ENDIAN
:
2547 val
= ldq_le_p(ptr
);
2549 case DEVICE_BIG_ENDIAN
:
2550 val
= ldq_be_p(ptr
);
2560 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
2562 return ldq_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2565 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
2567 return ldq_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2570 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
2572 return ldq_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2576 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
2579 address_space_rw(as
, addr
, &val
, 1, 0);
2583 /* warning: addr must be aligned */
2584 static inline uint32_t lduw_phys_internal(AddressSpace
*as
, hwaddr addr
,
2585 enum device_endian endian
)
2593 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2595 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
2597 io_mem_read(mr
, addr1
, &val
, 2);
2598 #if defined(TARGET_WORDS_BIGENDIAN)
2599 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2603 if (endian
== DEVICE_BIG_ENDIAN
) {
2609 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2613 case DEVICE_LITTLE_ENDIAN
:
2614 val
= lduw_le_p(ptr
);
2616 case DEVICE_BIG_ENDIAN
:
2617 val
= lduw_be_p(ptr
);
2627 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
2629 return lduw_phys_internal(as
, addr
, DEVICE_NATIVE_ENDIAN
);
2632 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
2634 return lduw_phys_internal(as
, addr
, DEVICE_LITTLE_ENDIAN
);
2637 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
2639 return lduw_phys_internal(as
, addr
, DEVICE_BIG_ENDIAN
);
2642 /* warning: addr must be aligned. The ram page is not masked as dirty
2643 and the code inside is not invalidated. It is useful if the dirty
2644 bits are used to track modified PTEs */
2645 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2652 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2654 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2655 io_mem_write(mr
, addr1
, val
, 4);
2657 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2658 ptr
= qemu_get_ram_ptr(addr1
);
2661 if (unlikely(in_migration
)) {
2662 if (cpu_physical_memory_is_clean(addr1
)) {
2663 /* invalidate code */
2664 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2666 cpu_physical_memory_set_dirty_range_nocode(addr1
, 4);
2672 /* warning: addr must be aligned */
2673 static inline void stl_phys_internal(AddressSpace
*as
,
2674 hwaddr addr
, uint32_t val
,
2675 enum device_endian endian
)
2682 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2684 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
2685 #if defined(TARGET_WORDS_BIGENDIAN)
2686 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2690 if (endian
== DEVICE_BIG_ENDIAN
) {
2694 io_mem_write(mr
, addr1
, val
, 4);
2697 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2698 ptr
= qemu_get_ram_ptr(addr1
);
2700 case DEVICE_LITTLE_ENDIAN
:
2703 case DEVICE_BIG_ENDIAN
:
2710 invalidate_and_set_dirty(addr1
, 4);
2714 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2716 stl_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2719 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2721 stl_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2724 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2726 stl_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2730 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2733 address_space_rw(as
, addr
, &v
, 1, 1);
2736 /* warning: addr must be aligned */
2737 static inline void stw_phys_internal(AddressSpace
*as
,
2738 hwaddr addr
, uint32_t val
,
2739 enum device_endian endian
)
2746 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2747 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
2748 #if defined(TARGET_WORDS_BIGENDIAN)
2749 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2753 if (endian
== DEVICE_BIG_ENDIAN
) {
2757 io_mem_write(mr
, addr1
, val
, 2);
2760 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
2761 ptr
= qemu_get_ram_ptr(addr1
);
2763 case DEVICE_LITTLE_ENDIAN
:
2766 case DEVICE_BIG_ENDIAN
:
2773 invalidate_and_set_dirty(addr1
, 2);
2777 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2779 stw_phys_internal(as
, addr
, val
, DEVICE_NATIVE_ENDIAN
);
2782 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2784 stw_phys_internal(as
, addr
, val
, DEVICE_LITTLE_ENDIAN
);
2787 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
2789 stw_phys_internal(as
, addr
, val
, DEVICE_BIG_ENDIAN
);
2793 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2796 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2799 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2801 val
= cpu_to_le64(val
);
2802 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2805 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
2807 val
= cpu_to_be64(val
);
2808 address_space_rw(as
, addr
, (void *) &val
, 8, 1);
2811 /* virtual memory access for debug (includes writing to ROM) */
2812 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2813 uint8_t *buf
, int len
, int is_write
)
2820 page
= addr
& TARGET_PAGE_MASK
;
2821 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
2822 /* if no physical page mapped, return an error */
2823 if (phys_addr
== -1)
2825 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2828 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2830 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
2832 address_space_rw(cpu
->as
, phys_addr
, buf
, l
, 0);
2843 * A helper function for the _utterly broken_ virtio device model to find out if
2844 * it's running on a big endian machine. Don't do this at home kids!
2846 bool target_words_bigendian(void);
2847 bool target_words_bigendian(void)
2849 #if defined(TARGET_WORDS_BIGENDIAN)
2856 #ifndef CONFIG_USER_ONLY
2857 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2862 mr
= address_space_translate(&address_space_memory
,
2863 phys_addr
, &phys_addr
, &l
, false);
2865 return !(memory_region_is_ram(mr
) ||
2866 memory_region_is_romd(mr
));
2869 void qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
2873 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
2874 func(block
->host
, block
->offset
, block
->length
, opaque
);