4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
24 #include "qemu/cutils.h"
26 #include "exec/exec-all.h"
28 #include "hw/qdev-core.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
31 #include "hw/xen/xen.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "qemu/error-report.h"
38 #if defined(CONFIG_USER_ONLY)
40 #else /* !CONFIG_USER_ONLY */
42 #include "exec/memory.h"
43 #include "exec/ioport.h"
44 #include "sysemu/dma.h"
45 #include "sysemu/numa.h"
46 #include "sysemu/hw_accel.h"
47 #include "exec/address-spaces.h"
48 #include "sysemu/xen-mapcache.h"
49 #include "trace-root.h"
51 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
53 #include <linux/falloc.h>
57 #include "exec/cpu-all.h"
58 #include "qemu/rcu_queue.h"
59 #include "qemu/main-loop.h"
60 #include "translate-all.h"
61 #include "sysemu/replay.h"
63 #include "exec/memory-internal.h"
64 #include "exec/ram_addr.h"
67 #include "migration/vmstate.h"
69 #include "qemu/range.h"
71 #include "qemu/mmap-alloc.h"
74 #include "monitor/monitor.h"
76 //#define DEBUG_SUBPAGE
78 #if !defined(CONFIG_USER_ONLY)
79 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
80 * are protected by the ramlist lock.
82 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
84 static MemoryRegion
*system_memory
;
85 static MemoryRegion
*system_io
;
87 AddressSpace address_space_io
;
88 AddressSpace address_space_memory
;
90 MemoryRegion io_mem_rom
, io_mem_notdirty
;
91 static MemoryRegion io_mem_unassigned
;
93 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
94 #define RAM_PREALLOC (1 << 0)
96 /* RAM is mmap-ed with MAP_SHARED */
97 #define RAM_SHARED (1 << 1)
99 /* Only a portion of RAM (used_length) is actually used, and migrated.
100 * This used_length size can change across reboots.
102 #define RAM_RESIZEABLE (1 << 2)
106 #ifdef TARGET_PAGE_BITS_VARY
107 int target_page_bits
;
108 bool target_page_bits_decided
;
111 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
112 /* current CPU in the current thread. It is only valid inside
114 __thread CPUState
*current_cpu
;
115 /* 0 = Do not count executed instructions.
116 1 = Precise instruction counting.
117 2 = Adaptive rate instruction counting. */
120 bool set_preferred_target_page_bits(int bits
)
122 /* The target page size is the lowest common denominator for all
123 * the CPUs in the system, so we can only make it smaller, never
124 * larger. And we can't make it smaller once we've committed to
127 #ifdef TARGET_PAGE_BITS_VARY
128 assert(bits
>= TARGET_PAGE_BITS_MIN
);
129 if (target_page_bits
== 0 || target_page_bits
> bits
) {
130 if (target_page_bits_decided
) {
133 target_page_bits
= bits
;
139 #if !defined(CONFIG_USER_ONLY)
141 static void finalize_target_page_bits(void)
143 #ifdef TARGET_PAGE_BITS_VARY
144 if (target_page_bits
== 0) {
145 target_page_bits
= TARGET_PAGE_BITS_MIN
;
147 target_page_bits_decided
= true;
151 typedef struct PhysPageEntry PhysPageEntry
;
153 struct PhysPageEntry
{
154 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
156 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
160 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
162 /* Size of the L2 (and L3, etc) page tables. */
163 #define ADDR_SPACE_BITS 64
166 #define P_L2_SIZE (1 << P_L2_BITS)
168 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
170 typedef PhysPageEntry Node
[P_L2_SIZE
];
172 typedef struct PhysPageMap
{
175 unsigned sections_nb
;
176 unsigned sections_nb_alloc
;
178 unsigned nodes_nb_alloc
;
180 MemoryRegionSection
*sections
;
183 struct AddressSpaceDispatch
{
186 MemoryRegionSection
*mru_section
;
187 /* This is a multi-level map on the physical address space.
188 * The bottom level has pointers to MemoryRegionSections.
190 PhysPageEntry phys_map
;
195 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
196 typedef struct subpage_t
{
200 uint16_t sub_section
[];
203 #define PHYS_SECTION_UNASSIGNED 0
204 #define PHYS_SECTION_NOTDIRTY 1
205 #define PHYS_SECTION_ROM 2
206 #define PHYS_SECTION_WATCH 3
208 static void io_mem_init(void);
209 static void memory_map_init(void);
210 static void tcg_commit(MemoryListener
*listener
);
212 static MemoryRegion io_mem_watch
;
215 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
216 * @cpu: the CPU whose AddressSpace this is
217 * @as: the AddressSpace itself
218 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
219 * @tcg_as_listener: listener for tracking changes to the AddressSpace
221 struct CPUAddressSpace
{
224 struct AddressSpaceDispatch
*memory_dispatch
;
225 MemoryListener tcg_as_listener
;
228 struct DirtyBitmapSnapshot
{
231 unsigned long dirty
[];
236 #if !defined(CONFIG_USER_ONLY)
238 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
240 static unsigned alloc_hint
= 16;
241 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
242 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, alloc_hint
);
243 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
244 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
245 alloc_hint
= map
->nodes_nb_alloc
;
249 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
256 ret
= map
->nodes_nb
++;
258 assert(ret
!= PHYS_MAP_NODE_NIL
);
259 assert(ret
!= map
->nodes_nb_alloc
);
261 e
.skip
= leaf
? 0 : 1;
262 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
263 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
264 memcpy(&p
[i
], &e
, sizeof(e
));
269 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
270 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
274 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
276 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
277 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
279 p
= map
->nodes
[lp
->ptr
];
280 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
282 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
283 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
289 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
295 static void phys_page_set(AddressSpaceDispatch
*d
,
296 hwaddr index
, hwaddr nb
,
299 /* Wildly overreserve - it doesn't matter much. */
300 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
302 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
305 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
306 * and update our entry so we can skip it and go directly to the destination.
308 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
)
310 unsigned valid_ptr
= P_L2_SIZE
;
315 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
320 for (i
= 0; i
< P_L2_SIZE
; i
++) {
321 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
328 phys_page_compact(&p
[i
], nodes
);
332 /* We can only compress if there's only one child. */
337 assert(valid_ptr
< P_L2_SIZE
);
339 /* Don't compress if it won't fit in the # of bits we have. */
340 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
344 lp
->ptr
= p
[valid_ptr
].ptr
;
345 if (!p
[valid_ptr
].skip
) {
346 /* If our only child is a leaf, make this a leaf. */
347 /* By design, we should have made this node a leaf to begin with so we
348 * should never reach here.
349 * But since it's so simple to handle this, let's do it just in case we
354 lp
->skip
+= p
[valid_ptr
].skip
;
358 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
360 if (d
->phys_map
.skip
) {
361 phys_page_compact(&d
->phys_map
, d
->map
.nodes
);
365 static inline bool section_covers_addr(const MemoryRegionSection
*section
,
368 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
369 * the section must cover the entire address space.
371 return int128_gethi(section
->size
) ||
372 range_covers_byte(section
->offset_within_address_space
,
373 int128_getlo(section
->size
), addr
);
376 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
377 Node
*nodes
, MemoryRegionSection
*sections
)
380 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
383 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
384 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
385 return §ions
[PHYS_SECTION_UNASSIGNED
];
388 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
391 if (section_covers_addr(§ions
[lp
.ptr
], addr
)) {
392 return §ions
[lp
.ptr
];
394 return §ions
[PHYS_SECTION_UNASSIGNED
];
398 bool memory_region_is_unassigned(MemoryRegion
*mr
)
400 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
401 && mr
!= &io_mem_watch
;
404 /* Called from RCU critical section */
405 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
407 bool resolve_subpage
)
409 MemoryRegionSection
*section
= atomic_read(&d
->mru_section
);
413 if (section
&& section
!= &d
->map
.sections
[PHYS_SECTION_UNASSIGNED
] &&
414 section_covers_addr(section
, addr
)) {
417 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
,
421 if (resolve_subpage
&& section
->mr
->subpage
) {
422 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
423 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
426 atomic_set(&d
->mru_section
, section
);
431 /* Called from RCU critical section */
432 static MemoryRegionSection
*
433 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
434 hwaddr
*plen
, bool resolve_subpage
)
436 MemoryRegionSection
*section
;
440 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
441 /* Compute offset within MemoryRegionSection */
442 addr
-= section
->offset_within_address_space
;
444 /* Compute offset within MemoryRegion */
445 *xlat
= addr
+ section
->offset_within_region
;
449 /* MMIO registers can be expected to perform full-width accesses based only
450 * on their address, without considering adjacent registers that could
451 * decode to completely different MemoryRegions. When such registers
452 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
453 * regions overlap wildly. For this reason we cannot clamp the accesses
456 * If the length is small (as is the case for address_space_ldl/stl),
457 * everything works fine. If the incoming length is large, however,
458 * the caller really has to do the clamping through memory_access_size.
460 if (memory_region_is_ram(mr
)) {
461 diff
= int128_sub(section
->size
, int128_make64(addr
));
462 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
467 /* Called from RCU critical section */
468 IOMMUTLBEntry
address_space_get_iotlb_entry(AddressSpace
*as
, hwaddr addr
,
471 IOMMUTLBEntry iotlb
= {0};
472 MemoryRegionSection
*section
;
476 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
477 section
= address_space_lookup_region(d
, addr
, false);
478 addr
= addr
- section
->offset_within_address_space
479 + section
->offset_within_region
;
482 if (!mr
->iommu_ops
) {
486 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
487 if (!(iotlb
.perm
& (1 << is_write
))) {
488 iotlb
.target_as
= NULL
;
492 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
493 | (addr
& iotlb
.addr_mask
));
494 as
= iotlb
.target_as
;
500 /* Called from RCU critical section */
501 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
502 hwaddr
*xlat
, hwaddr
*plen
,
506 MemoryRegionSection
*section
;
510 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
511 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
514 if (!mr
->iommu_ops
) {
518 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
519 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
520 | (addr
& iotlb
.addr_mask
));
521 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
522 if (!(iotlb
.perm
& (1 << is_write
))) {
523 mr
= &io_mem_unassigned
;
527 as
= iotlb
.target_as
;
530 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
531 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
532 *plen
= MIN(page
, *plen
);
539 /* Called from RCU critical section */
540 MemoryRegionSection
*
541 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
542 hwaddr
*xlat
, hwaddr
*plen
)
544 MemoryRegionSection
*section
;
545 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpu
->cpu_ases
[asidx
].memory_dispatch
);
547 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
549 assert(!section
->mr
->iommu_ops
);
554 #if !defined(CONFIG_USER_ONLY)
556 static int cpu_common_post_load(void *opaque
, int version_id
)
558 CPUState
*cpu
= opaque
;
560 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
561 version_id is increased. */
562 cpu
->interrupt_request
&= ~0x01;
568 static int cpu_common_pre_load(void *opaque
)
570 CPUState
*cpu
= opaque
;
572 cpu
->exception_index
= -1;
577 static bool cpu_common_exception_index_needed(void *opaque
)
579 CPUState
*cpu
= opaque
;
581 return tcg_enabled() && cpu
->exception_index
!= -1;
584 static const VMStateDescription vmstate_cpu_common_exception_index
= {
585 .name
= "cpu_common/exception_index",
587 .minimum_version_id
= 1,
588 .needed
= cpu_common_exception_index_needed
,
589 .fields
= (VMStateField
[]) {
590 VMSTATE_INT32(exception_index
, CPUState
),
591 VMSTATE_END_OF_LIST()
595 static bool cpu_common_crash_occurred_needed(void *opaque
)
597 CPUState
*cpu
= opaque
;
599 return cpu
->crash_occurred
;
602 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
603 .name
= "cpu_common/crash_occurred",
605 .minimum_version_id
= 1,
606 .needed
= cpu_common_crash_occurred_needed
,
607 .fields
= (VMStateField
[]) {
608 VMSTATE_BOOL(crash_occurred
, CPUState
),
609 VMSTATE_END_OF_LIST()
613 const VMStateDescription vmstate_cpu_common
= {
614 .name
= "cpu_common",
616 .minimum_version_id
= 1,
617 .pre_load
= cpu_common_pre_load
,
618 .post_load
= cpu_common_post_load
,
619 .fields
= (VMStateField
[]) {
620 VMSTATE_UINT32(halted
, CPUState
),
621 VMSTATE_UINT32(interrupt_request
, CPUState
),
622 VMSTATE_END_OF_LIST()
624 .subsections
= (const VMStateDescription
*[]) {
625 &vmstate_cpu_common_exception_index
,
626 &vmstate_cpu_common_crash_occurred
,
633 CPUState
*qemu_get_cpu(int index
)
638 if (cpu
->cpu_index
== index
) {
646 #if !defined(CONFIG_USER_ONLY)
647 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
649 CPUAddressSpace
*newas
;
651 /* Target code should have set num_ases before calling us */
652 assert(asidx
< cpu
->num_ases
);
655 /* address space 0 gets the convenience alias */
659 /* KVM cannot currently support multiple address spaces. */
660 assert(asidx
== 0 || !kvm_enabled());
662 if (!cpu
->cpu_ases
) {
663 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
666 newas
= &cpu
->cpu_ases
[asidx
];
670 newas
->tcg_as_listener
.commit
= tcg_commit
;
671 memory_listener_register(&newas
->tcg_as_listener
, as
);
675 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
677 /* Return the AddressSpace corresponding to the specified index */
678 return cpu
->cpu_ases
[asidx
].as
;
682 void cpu_exec_unrealizefn(CPUState
*cpu
)
684 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
686 cpu_list_remove(cpu
);
688 if (cc
->vmsd
!= NULL
) {
689 vmstate_unregister(NULL
, cc
->vmsd
, cpu
);
691 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
692 vmstate_unregister(NULL
, &vmstate_cpu_common
, cpu
);
696 void cpu_exec_initfn(CPUState
*cpu
)
701 #ifndef CONFIG_USER_ONLY
702 cpu
->thread_id
= qemu_get_thread_id();
704 /* This is a softmmu CPU object, so create a property for it
705 * so users can wire up its memory. (This can't go in qom/cpu.c
706 * because that file is compiled only once for both user-mode
707 * and system builds.) The default if no link is set up is to use
708 * the system address space.
710 object_property_add_link(OBJECT(cpu
), "memory", TYPE_MEMORY_REGION
,
711 (Object
**)&cpu
->memory
,
712 qdev_prop_allow_set_link_before_realize
,
713 OBJ_PROP_LINK_UNREF_ON_RELEASE
,
715 cpu
->memory
= system_memory
;
716 object_ref(OBJECT(cpu
->memory
));
720 void cpu_exec_realizefn(CPUState
*cpu
, Error
**errp
)
722 CPUClass
*cc ATTRIBUTE_UNUSED
= CPU_GET_CLASS(cpu
);
726 #ifndef CONFIG_USER_ONLY
727 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
728 vmstate_register(NULL
, cpu
->cpu_index
, &vmstate_cpu_common
, cpu
);
730 if (cc
->vmsd
!= NULL
) {
731 vmstate_register(NULL
, cpu
->cpu_index
, cc
->vmsd
, cpu
);
736 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
738 /* Flush the whole TB as this will not have race conditions
739 * even if we don't have proper locking yet.
740 * Ideally we would just invalidate the TBs for the
746 #if defined(CONFIG_USER_ONLY)
747 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
752 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
758 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
762 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
763 int flags
, CPUWatchpoint
**watchpoint
)
768 /* Add a watchpoint. */
769 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
770 int flags
, CPUWatchpoint
**watchpoint
)
774 /* forbid ranges which are empty or run off the end of the address space */
775 if (len
== 0 || (addr
+ len
- 1) < addr
) {
776 error_report("tried to set invalid watchpoint at %"
777 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
780 wp
= g_malloc(sizeof(*wp
));
786 /* keep all GDB-injected watchpoints in front */
787 if (flags
& BP_GDB
) {
788 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
790 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
793 tlb_flush_page(cpu
, addr
);
800 /* Remove a specific watchpoint. */
801 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
806 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
807 if (addr
== wp
->vaddr
&& len
== wp
->len
808 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
809 cpu_watchpoint_remove_by_ref(cpu
, wp
);
816 /* Remove a specific watchpoint by reference. */
817 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
819 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
821 tlb_flush_page(cpu
, watchpoint
->vaddr
);
826 /* Remove all matching watchpoints. */
827 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
829 CPUWatchpoint
*wp
, *next
;
831 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
832 if (wp
->flags
& mask
) {
833 cpu_watchpoint_remove_by_ref(cpu
, wp
);
838 /* Return true if this watchpoint address matches the specified
839 * access (ie the address range covered by the watchpoint overlaps
840 * partially or completely with the address range covered by the
843 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
847 /* We know the lengths are non-zero, but a little caution is
848 * required to avoid errors in the case where the range ends
849 * exactly at the top of the address space and so addr + len
850 * wraps round to zero.
852 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
853 vaddr addrend
= addr
+ len
- 1;
855 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
860 /* Add a breakpoint. */
861 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
862 CPUBreakpoint
**breakpoint
)
866 bp
= g_malloc(sizeof(*bp
));
871 /* keep all GDB-injected breakpoints in front */
872 if (flags
& BP_GDB
) {
873 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
875 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
878 breakpoint_invalidate(cpu
, pc
);
886 /* Remove a specific breakpoint. */
887 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
891 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
892 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
893 cpu_breakpoint_remove_by_ref(cpu
, bp
);
900 /* Remove a specific breakpoint by reference. */
901 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
903 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
905 breakpoint_invalidate(cpu
, breakpoint
->pc
);
910 /* Remove all matching breakpoints. */
911 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
913 CPUBreakpoint
*bp
, *next
;
915 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
916 if (bp
->flags
& mask
) {
917 cpu_breakpoint_remove_by_ref(cpu
, bp
);
922 /* enable or disable single step mode. EXCP_DEBUG is returned by the
923 CPU loop after each instruction */
924 void cpu_single_step(CPUState
*cpu
, int enabled
)
926 if (cpu
->singlestep_enabled
!= enabled
) {
927 cpu
->singlestep_enabled
= enabled
;
929 kvm_update_guest_debug(cpu
, 0);
931 /* must flush all the translated code to avoid inconsistencies */
932 /* XXX: only flush what is necessary */
938 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
945 fprintf(stderr
, "qemu: fatal: ");
946 vfprintf(stderr
, fmt
, ap
);
947 fprintf(stderr
, "\n");
948 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
949 if (qemu_log_separate()) {
951 qemu_log("qemu: fatal: ");
952 qemu_log_vprintf(fmt
, ap2
);
954 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
962 #if defined(CONFIG_USER_ONLY)
964 struct sigaction act
;
965 sigfillset(&act
.sa_mask
);
966 act
.sa_handler
= SIG_DFL
;
967 sigaction(SIGABRT
, &act
, NULL
);
973 #if !defined(CONFIG_USER_ONLY)
974 /* Called from RCU critical section */
975 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
979 block
= atomic_rcu_read(&ram_list
.mru_block
);
980 if (block
&& addr
- block
->offset
< block
->max_length
) {
983 RAMBLOCK_FOREACH(block
) {
984 if (addr
- block
->offset
< block
->max_length
) {
989 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
993 /* It is safe to write mru_block outside the iothread lock. This
998 * xxx removed from list
1002 * call_rcu(reclaim_ramblock, xxx);
1005 * atomic_rcu_set is not needed here. The block was already published
1006 * when it was placed into the list. Here we're just making an extra
1007 * copy of the pointer.
1009 ram_list
.mru_block
= block
;
1013 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
1020 end
= TARGET_PAGE_ALIGN(start
+ length
);
1021 start
&= TARGET_PAGE_MASK
;
1024 block
= qemu_get_ram_block(start
);
1025 assert(block
== qemu_get_ram_block(end
- 1));
1026 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
1028 tlb_reset_dirty(cpu
, start1
, length
);
1033 /* Note: start and end must be within the same ram block. */
1034 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
1038 DirtyMemoryBlocks
*blocks
;
1039 unsigned long end
, page
;
1046 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
1047 page
= start
>> TARGET_PAGE_BITS
;
1051 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
1053 while (page
< end
) {
1054 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
1055 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
1056 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
1058 dirty
|= bitmap_test_and_clear_atomic(blocks
->blocks
[idx
],
1065 if (dirty
&& tcg_enabled()) {
1066 tlb_reset_dirty_range_all(start
, length
);
1072 DirtyBitmapSnapshot
*cpu_physical_memory_snapshot_and_clear_dirty
1073 (ram_addr_t start
, ram_addr_t length
, unsigned client
)
1075 DirtyMemoryBlocks
*blocks
;
1076 unsigned long align
= 1UL << (TARGET_PAGE_BITS
+ BITS_PER_LEVEL
);
1077 ram_addr_t first
= QEMU_ALIGN_DOWN(start
, align
);
1078 ram_addr_t last
= QEMU_ALIGN_UP(start
+ length
, align
);
1079 DirtyBitmapSnapshot
*snap
;
1080 unsigned long page
, end
, dest
;
1082 snap
= g_malloc0(sizeof(*snap
) +
1083 ((last
- first
) >> (TARGET_PAGE_BITS
+ 3)));
1084 snap
->start
= first
;
1087 page
= first
>> TARGET_PAGE_BITS
;
1088 end
= last
>> TARGET_PAGE_BITS
;
1093 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
1095 while (page
< end
) {
1096 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
1097 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
1098 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
1100 assert(QEMU_IS_ALIGNED(offset
, (1 << BITS_PER_LEVEL
)));
1101 assert(QEMU_IS_ALIGNED(num
, (1 << BITS_PER_LEVEL
)));
1102 offset
>>= BITS_PER_LEVEL
;
1104 bitmap_copy_and_clear_atomic(snap
->dirty
+ dest
,
1105 blocks
->blocks
[idx
] + offset
,
1108 dest
+= num
>> BITS_PER_LEVEL
;
1113 if (tcg_enabled()) {
1114 tlb_reset_dirty_range_all(start
, length
);
1120 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot
*snap
,
1124 unsigned long page
, end
;
1126 assert(start
>= snap
->start
);
1127 assert(start
+ length
<= snap
->end
);
1129 end
= TARGET_PAGE_ALIGN(start
+ length
- snap
->start
) >> TARGET_PAGE_BITS
;
1130 page
= (start
- snap
->start
) >> TARGET_PAGE_BITS
;
1132 while (page
< end
) {
1133 if (test_bit(page
, snap
->dirty
)) {
1141 /* Called from RCU critical section */
1142 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1143 MemoryRegionSection
*section
,
1145 hwaddr paddr
, hwaddr xlat
,
1147 target_ulong
*address
)
1152 if (memory_region_is_ram(section
->mr
)) {
1154 iotlb
= memory_region_get_ram_addr(section
->mr
) + xlat
;
1155 if (!section
->readonly
) {
1156 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1158 iotlb
|= PHYS_SECTION_ROM
;
1161 AddressSpaceDispatch
*d
;
1163 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1164 iotlb
= section
- d
->map
.sections
;
1168 /* Make accesses to pages with watchpoints go via the
1169 watchpoint trap routines. */
1170 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1171 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1172 /* Avoid trapping reads of pages with a write breakpoint. */
1173 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1174 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1175 *address
|= TLB_MMIO
;
1183 #endif /* defined(CONFIG_USER_ONLY) */
1185 #if !defined(CONFIG_USER_ONLY)
1187 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1189 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1191 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1192 qemu_anon_ram_alloc
;
1195 * Set a custom physical guest memory alloator.
1196 * Accelerators with unusual needs may need this. Hopefully, we can
1197 * get rid of it eventually.
1199 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1201 phys_mem_alloc
= alloc
;
1204 static uint16_t phys_section_add(PhysPageMap
*map
,
1205 MemoryRegionSection
*section
)
1207 /* The physical section number is ORed with a page-aligned
1208 * pointer to produce the iotlb entries. Thus it should
1209 * never overflow into the page-aligned value.
1211 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1213 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1214 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1215 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1216 map
->sections_nb_alloc
);
1218 map
->sections
[map
->sections_nb
] = *section
;
1219 memory_region_ref(section
->mr
);
1220 return map
->sections_nb
++;
1223 static void phys_section_destroy(MemoryRegion
*mr
)
1225 bool have_sub_page
= mr
->subpage
;
1227 memory_region_unref(mr
);
1229 if (have_sub_page
) {
1230 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1231 object_unref(OBJECT(&subpage
->iomem
));
1236 static void phys_sections_free(PhysPageMap
*map
)
1238 while (map
->sections_nb
> 0) {
1239 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1240 phys_section_destroy(section
->mr
);
1242 g_free(map
->sections
);
1246 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1249 hwaddr base
= section
->offset_within_address_space
1251 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1252 d
->map
.nodes
, d
->map
.sections
);
1253 MemoryRegionSection subsection
= {
1254 .offset_within_address_space
= base
,
1255 .size
= int128_make64(TARGET_PAGE_SIZE
),
1259 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1261 if (!(existing
->mr
->subpage
)) {
1262 subpage
= subpage_init(d
->as
, base
);
1263 subsection
.address_space
= d
->as
;
1264 subsection
.mr
= &subpage
->iomem
;
1265 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1266 phys_section_add(&d
->map
, &subsection
));
1268 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1270 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1271 end
= start
+ int128_get64(section
->size
) - 1;
1272 subpage_register(subpage
, start
, end
,
1273 phys_section_add(&d
->map
, section
));
1277 static void register_multipage(AddressSpaceDispatch
*d
,
1278 MemoryRegionSection
*section
)
1280 hwaddr start_addr
= section
->offset_within_address_space
;
1281 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1282 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1286 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1289 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1291 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1292 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1293 MemoryRegionSection now
= *section
, remain
= *section
;
1294 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1296 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1297 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1298 - now
.offset_within_address_space
;
1300 now
.size
= int128_min(int128_make64(left
), now
.size
);
1301 register_subpage(d
, &now
);
1303 now
.size
= int128_zero();
1305 while (int128_ne(remain
.size
, now
.size
)) {
1306 remain
.size
= int128_sub(remain
.size
, now
.size
);
1307 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1308 remain
.offset_within_region
+= int128_get64(now
.size
);
1310 if (int128_lt(remain
.size
, page_size
)) {
1311 register_subpage(d
, &now
);
1312 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1313 now
.size
= page_size
;
1314 register_subpage(d
, &now
);
1316 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1317 register_multipage(d
, &now
);
1322 void qemu_flush_coalesced_mmio_buffer(void)
1325 kvm_flush_coalesced_mmio_buffer();
1328 void qemu_mutex_lock_ramlist(void)
1330 qemu_mutex_lock(&ram_list
.mutex
);
1333 void qemu_mutex_unlock_ramlist(void)
1335 qemu_mutex_unlock(&ram_list
.mutex
);
1338 void ram_block_dump(Monitor
*mon
)
1344 monitor_printf(mon
, "%24s %8s %18s %18s %18s\n",
1345 "Block Name", "PSize", "Offset", "Used", "Total");
1346 RAMBLOCK_FOREACH(block
) {
1347 psize
= size_to_str(block
->page_size
);
1348 monitor_printf(mon
, "%24s %8s 0x%016" PRIx64
" 0x%016" PRIx64
1349 " 0x%016" PRIx64
"\n", block
->idstr
, psize
,
1350 (uint64_t)block
->offset
,
1351 (uint64_t)block
->used_length
,
1352 (uint64_t)block
->max_length
);
1360 * FIXME TOCTTOU: this iterates over memory backends' mem-path, which
1361 * may or may not name the same files / on the same filesystem now as
1362 * when we actually open and map them. Iterate over the file
1363 * descriptors instead, and use qemu_fd_getpagesize().
1365 static int find_max_supported_pagesize(Object
*obj
, void *opaque
)
1368 long *hpsize_min
= opaque
;
1370 if (object_dynamic_cast(obj
, TYPE_MEMORY_BACKEND
)) {
1371 mem_path
= object_property_get_str(obj
, "mem-path", NULL
);
1373 long hpsize
= qemu_mempath_getpagesize(mem_path
);
1374 if (hpsize
< *hpsize_min
) {
1375 *hpsize_min
= hpsize
;
1378 *hpsize_min
= getpagesize();
1385 long qemu_getrampagesize(void)
1387 long hpsize
= LONG_MAX
;
1388 long mainrampagesize
;
1389 Object
*memdev_root
;
1392 mainrampagesize
= qemu_mempath_getpagesize(mem_path
);
1394 mainrampagesize
= getpagesize();
1397 /* it's possible we have memory-backend objects with
1398 * hugepage-backed RAM. these may get mapped into system
1399 * address space via -numa parameters or memory hotplug
1400 * hooks. we want to take these into account, but we
1401 * also want to make sure these supported hugepage
1402 * sizes are applicable across the entire range of memory
1403 * we may boot from, so we take the min across all
1404 * backends, and assume normal pages in cases where a
1405 * backend isn't backed by hugepages.
1407 memdev_root
= object_resolve_path("/objects", NULL
);
1409 object_child_foreach(memdev_root
, find_max_supported_pagesize
, &hpsize
);
1411 if (hpsize
== LONG_MAX
) {
1412 /* No additional memory regions found ==> Report main RAM page size */
1413 return mainrampagesize
;
1416 /* If NUMA is disabled or the NUMA nodes are not backed with a
1417 * memory-backend, then there is at least one node using "normal" RAM,
1418 * so if its page size is smaller we have got to report that size instead.
1420 if (hpsize
> mainrampagesize
&&
1421 (nb_numa_nodes
== 0 || numa_info
[0].node_memdev
== NULL
)) {
1424 error_report("Huge page support disabled (n/a for main memory).");
1427 return mainrampagesize
;
1433 long qemu_getrampagesize(void)
1435 return getpagesize();
1440 static int64_t get_file_size(int fd
)
1442 int64_t size
= lseek(fd
, 0, SEEK_END
);
1449 static void *file_ram_alloc(RAMBlock
*block
,
1454 bool unlink_on_error
= false;
1456 char *sanitized_name
;
1458 void *area
= MAP_FAILED
;
1462 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1464 "host lacks kvm mmu notifiers, -mem-path unsupported");
1469 fd
= open(path
, O_RDWR
);
1471 /* @path names an existing file, use it */
1474 if (errno
== ENOENT
) {
1475 /* @path names a file that doesn't exist, create it */
1476 fd
= open(path
, O_RDWR
| O_CREAT
| O_EXCL
, 0644);
1478 unlink_on_error
= true;
1481 } else if (errno
== EISDIR
) {
1482 /* @path names a directory, create a file there */
1483 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1484 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1485 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1491 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1493 g_free(sanitized_name
);
1495 fd
= mkstemp(filename
);
1503 if (errno
!= EEXIST
&& errno
!= EINTR
) {
1504 error_setg_errno(errp
, errno
,
1505 "can't open backing store %s for guest RAM",
1510 * Try again on EINTR and EEXIST. The latter happens when
1511 * something else creates the file between our two open().
1515 block
->page_size
= qemu_fd_getpagesize(fd
);
1516 block
->mr
->align
= block
->page_size
;
1517 #if defined(__s390x__)
1518 if (kvm_enabled()) {
1519 block
->mr
->align
= MAX(block
->mr
->align
, QEMU_VMALLOC_ALIGN
);
1523 file_size
= get_file_size(fd
);
1525 if (memory
< block
->page_size
) {
1526 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1527 "or larger than page size 0x%zx",
1528 memory
, block
->page_size
);
1532 if (file_size
> 0 && file_size
< memory
) {
1533 error_setg(errp
, "backing store %s size 0x%" PRIx64
1534 " does not match 'size' option 0x" RAM_ADDR_FMT
,
1535 path
, file_size
, memory
);
1539 memory
= ROUND_UP(memory
, block
->page_size
);
1542 * ftruncate is not supported by hugetlbfs in older
1543 * hosts, so don't bother bailing out on errors.
1544 * If anything goes wrong with it under other filesystems,
1547 * Do not truncate the non-empty backend file to avoid corrupting
1548 * the existing data in the file. Disabling shrinking is not
1549 * enough. For example, the current vNVDIMM implementation stores
1550 * the guest NVDIMM labels at the end of the backend file. If the
1551 * backend file is later extended, QEMU will not be able to find
1552 * those labels. Therefore, extending the non-empty backend file
1553 * is disabled as well.
1555 if (!file_size
&& ftruncate(fd
, memory
)) {
1556 perror("ftruncate");
1559 area
= qemu_ram_mmap(fd
, memory
, block
->mr
->align
,
1560 block
->flags
& RAM_SHARED
);
1561 if (area
== MAP_FAILED
) {
1562 error_setg_errno(errp
, errno
,
1563 "unable to map backing store for guest RAM");
1568 os_mem_prealloc(fd
, area
, memory
, smp_cpus
, errp
);
1569 if (errp
&& *errp
) {
1578 if (area
!= MAP_FAILED
) {
1579 qemu_ram_munmap(area
, memory
);
1581 if (unlink_on_error
) {
1591 /* Called with the ramlist lock held. */
1592 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1594 RAMBlock
*block
, *next_block
;
1595 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1597 assert(size
!= 0); /* it would hand out same offset multiple times */
1599 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1603 RAMBLOCK_FOREACH(block
) {
1604 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1606 end
= block
->offset
+ block
->max_length
;
1608 RAMBLOCK_FOREACH(next_block
) {
1609 if (next_block
->offset
>= end
) {
1610 next
= MIN(next
, next_block
->offset
);
1613 if (next
- end
>= size
&& next
- end
< mingap
) {
1615 mingap
= next
- end
;
1619 if (offset
== RAM_ADDR_MAX
) {
1620 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1628 unsigned long last_ram_page(void)
1631 ram_addr_t last
= 0;
1634 RAMBLOCK_FOREACH(block
) {
1635 last
= MAX(last
, block
->offset
+ block
->max_length
);
1638 return last
>> TARGET_PAGE_BITS
;
1641 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1645 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1646 if (!machine_dump_guest_core(current_machine
)) {
1647 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1649 perror("qemu_madvise");
1650 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1651 "but dump_guest_core=off specified\n");
1656 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1661 bool qemu_ram_is_shared(RAMBlock
*rb
)
1663 return rb
->flags
& RAM_SHARED
;
1666 /* Called with iothread lock held. */
1667 void qemu_ram_set_idstr(RAMBlock
*new_block
, const char *name
, DeviceState
*dev
)
1672 assert(!new_block
->idstr
[0]);
1675 char *id
= qdev_get_dev_path(dev
);
1677 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1681 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1684 RAMBLOCK_FOREACH(block
) {
1685 if (block
!= new_block
&&
1686 !strcmp(block
->idstr
, new_block
->idstr
)) {
1687 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1695 /* Called with iothread lock held. */
1696 void qemu_ram_unset_idstr(RAMBlock
*block
)
1698 /* FIXME: arch_init.c assumes that this is not called throughout
1699 * migration. Ignore the problem since hot-unplug during migration
1700 * does not work anyway.
1703 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1707 size_t qemu_ram_pagesize(RAMBlock
*rb
)
1709 return rb
->page_size
;
1712 /* Returns the largest size of page in use */
1713 size_t qemu_ram_pagesize_largest(void)
1718 RAMBLOCK_FOREACH(block
) {
1719 largest
= MAX(largest
, qemu_ram_pagesize(block
));
1725 static int memory_try_enable_merging(void *addr
, size_t len
)
1727 if (!machine_mem_merge(current_machine
)) {
1728 /* disabled by the user */
1732 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1735 /* Only legal before guest might have detected the memory size: e.g. on
1736 * incoming migration, or right after reset.
1738 * As memory core doesn't know how is memory accessed, it is up to
1739 * resize callback to update device state and/or add assertions to detect
1740 * misuse, if necessary.
1742 int qemu_ram_resize(RAMBlock
*block
, ram_addr_t newsize
, Error
**errp
)
1746 newsize
= HOST_PAGE_ALIGN(newsize
);
1748 if (block
->used_length
== newsize
) {
1752 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1753 error_setg_errno(errp
, EINVAL
,
1754 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1755 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1756 newsize
, block
->used_length
);
1760 if (block
->max_length
< newsize
) {
1761 error_setg_errno(errp
, EINVAL
,
1762 "Length too large: %s: 0x" RAM_ADDR_FMT
1763 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1764 newsize
, block
->max_length
);
1768 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1769 block
->used_length
= newsize
;
1770 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1772 memory_region_set_size(block
->mr
, newsize
);
1773 if (block
->resized
) {
1774 block
->resized(block
->idstr
, newsize
, block
->host
);
1779 /* Called with ram_list.mutex held */
1780 static void dirty_memory_extend(ram_addr_t old_ram_size
,
1781 ram_addr_t new_ram_size
)
1783 ram_addr_t old_num_blocks
= DIV_ROUND_UP(old_ram_size
,
1784 DIRTY_MEMORY_BLOCK_SIZE
);
1785 ram_addr_t new_num_blocks
= DIV_ROUND_UP(new_ram_size
,
1786 DIRTY_MEMORY_BLOCK_SIZE
);
1789 /* Only need to extend if block count increased */
1790 if (new_num_blocks
<= old_num_blocks
) {
1794 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1795 DirtyMemoryBlocks
*old_blocks
;
1796 DirtyMemoryBlocks
*new_blocks
;
1799 old_blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
1800 new_blocks
= g_malloc(sizeof(*new_blocks
) +
1801 sizeof(new_blocks
->blocks
[0]) * new_num_blocks
);
1803 if (old_num_blocks
) {
1804 memcpy(new_blocks
->blocks
, old_blocks
->blocks
,
1805 old_num_blocks
* sizeof(old_blocks
->blocks
[0]));
1808 for (j
= old_num_blocks
; j
< new_num_blocks
; j
++) {
1809 new_blocks
->blocks
[j
] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE
);
1812 atomic_rcu_set(&ram_list
.dirty_memory
[i
], new_blocks
);
1815 g_free_rcu(old_blocks
, rcu
);
1820 static void ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1823 RAMBlock
*last_block
= NULL
;
1824 ram_addr_t old_ram_size
, new_ram_size
;
1827 old_ram_size
= last_ram_page();
1829 qemu_mutex_lock_ramlist();
1830 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1832 if (!new_block
->host
) {
1833 if (xen_enabled()) {
1834 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1835 new_block
->mr
, &err
);
1837 error_propagate(errp
, err
);
1838 qemu_mutex_unlock_ramlist();
1842 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1843 &new_block
->mr
->align
);
1844 if (!new_block
->host
) {
1845 error_setg_errno(errp
, errno
,
1846 "cannot set up guest memory '%s'",
1847 memory_region_name(new_block
->mr
));
1848 qemu_mutex_unlock_ramlist();
1851 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1855 new_ram_size
= MAX(old_ram_size
,
1856 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1857 if (new_ram_size
> old_ram_size
) {
1858 dirty_memory_extend(old_ram_size
, new_ram_size
);
1860 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1861 * QLIST (which has an RCU-friendly variant) does not have insertion at
1862 * tail, so save the last element in last_block.
1864 RAMBLOCK_FOREACH(block
) {
1866 if (block
->max_length
< new_block
->max_length
) {
1871 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1872 } else if (last_block
) {
1873 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1874 } else { /* list is empty */
1875 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1877 ram_list
.mru_block
= NULL
;
1879 /* Write list before version */
1882 qemu_mutex_unlock_ramlist();
1884 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1885 new_block
->used_length
,
1888 if (new_block
->host
) {
1889 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1890 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1891 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
1892 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1893 ram_block_notify_add(new_block
->host
, new_block
->max_length
);
1898 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1899 bool share
, const char *mem_path
,
1902 RAMBlock
*new_block
;
1903 Error
*local_err
= NULL
;
1905 if (xen_enabled()) {
1906 error_setg(errp
, "-mem-path not supported with Xen");
1910 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1912 * file_ram_alloc() needs to allocate just like
1913 * phys_mem_alloc, but we haven't bothered to provide
1917 "-mem-path not supported with this accelerator");
1921 size
= HOST_PAGE_ALIGN(size
);
1922 new_block
= g_malloc0(sizeof(*new_block
));
1924 new_block
->used_length
= size
;
1925 new_block
->max_length
= size
;
1926 new_block
->flags
= share
? RAM_SHARED
: 0;
1927 new_block
->host
= file_ram_alloc(new_block
, size
,
1929 if (!new_block
->host
) {
1934 ram_block_add(new_block
, &local_err
);
1937 error_propagate(errp
, local_err
);
1945 RAMBlock
*qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1946 void (*resized
)(const char*,
1949 void *host
, bool resizeable
,
1950 MemoryRegion
*mr
, Error
**errp
)
1952 RAMBlock
*new_block
;
1953 Error
*local_err
= NULL
;
1955 size
= HOST_PAGE_ALIGN(size
);
1956 max_size
= HOST_PAGE_ALIGN(max_size
);
1957 new_block
= g_malloc0(sizeof(*new_block
));
1959 new_block
->resized
= resized
;
1960 new_block
->used_length
= size
;
1961 new_block
->max_length
= max_size
;
1962 assert(max_size
>= size
);
1964 new_block
->page_size
= getpagesize();
1965 new_block
->host
= host
;
1967 new_block
->flags
|= RAM_PREALLOC
;
1970 new_block
->flags
|= RAM_RESIZEABLE
;
1972 ram_block_add(new_block
, &local_err
);
1975 error_propagate(errp
, local_err
);
1981 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1982 MemoryRegion
*mr
, Error
**errp
)
1984 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1987 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1989 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1992 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1993 void (*resized
)(const char*,
1996 MemoryRegion
*mr
, Error
**errp
)
1998 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
2001 static void reclaim_ramblock(RAMBlock
*block
)
2003 if (block
->flags
& RAM_PREALLOC
) {
2005 } else if (xen_enabled()) {
2006 xen_invalidate_map_cache_entry(block
->host
);
2008 } else if (block
->fd
>= 0) {
2009 qemu_ram_munmap(block
->host
, block
->max_length
);
2013 qemu_anon_ram_free(block
->host
, block
->max_length
);
2018 void qemu_ram_free(RAMBlock
*block
)
2025 ram_block_notify_remove(block
->host
, block
->max_length
);
2028 qemu_mutex_lock_ramlist();
2029 QLIST_REMOVE_RCU(block
, next
);
2030 ram_list
.mru_block
= NULL
;
2031 /* Write list before version */
2034 call_rcu(block
, reclaim_ramblock
, rcu
);
2035 qemu_mutex_unlock_ramlist();
2039 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2046 RAMBLOCK_FOREACH(block
) {
2047 offset
= addr
- block
->offset
;
2048 if (offset
< block
->max_length
) {
2049 vaddr
= ramblock_ptr(block
, offset
);
2050 if (block
->flags
& RAM_PREALLOC
) {
2052 } else if (xen_enabled()) {
2056 if (block
->fd
>= 0) {
2057 flags
|= (block
->flags
& RAM_SHARED
?
2058 MAP_SHARED
: MAP_PRIVATE
);
2059 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2060 flags
, block
->fd
, offset
);
2063 * Remap needs to match alloc. Accelerators that
2064 * set phys_mem_alloc never remap. If they did,
2065 * we'd need a remap hook here.
2067 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
2069 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2070 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2073 if (area
!= vaddr
) {
2074 fprintf(stderr
, "Could not remap addr: "
2075 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
2079 memory_try_enable_merging(vaddr
, length
);
2080 qemu_ram_setup_dump(vaddr
, length
);
2085 #endif /* !_WIN32 */
2087 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2088 * This should not be used for general purpose DMA. Use address_space_map
2089 * or address_space_rw instead. For local memory (e.g. video ram) that the
2090 * device owns, use memory_region_get_ram_ptr.
2092 * Called within RCU critical section.
2094 void *qemu_map_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
)
2096 RAMBlock
*block
= ram_block
;
2098 if (block
== NULL
) {
2099 block
= qemu_get_ram_block(addr
);
2100 addr
-= block
->offset
;
2103 if (xen_enabled() && block
->host
== NULL
) {
2104 /* We need to check if the requested address is in the RAM
2105 * because we don't want to map the entire memory in QEMU.
2106 * In that case just map until the end of the page.
2108 if (block
->offset
== 0) {
2109 return xen_map_cache(addr
, 0, 0);
2112 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
2114 return ramblock_ptr(block
, addr
);
2117 /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
2118 * but takes a size argument.
2120 * Called within RCU critical section.
2122 static void *qemu_ram_ptr_length(RAMBlock
*ram_block
, ram_addr_t addr
,
2125 RAMBlock
*block
= ram_block
;
2130 if (block
== NULL
) {
2131 block
= qemu_get_ram_block(addr
);
2132 addr
-= block
->offset
;
2134 *size
= MIN(*size
, block
->max_length
- addr
);
2136 if (xen_enabled() && block
->host
== NULL
) {
2137 /* We need to check if the requested address is in the RAM
2138 * because we don't want to map the entire memory in QEMU.
2139 * In that case just map the requested area.
2141 if (block
->offset
== 0) {
2142 return xen_map_cache(addr
, *size
, 1);
2145 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
2148 return ramblock_ptr(block
, addr
);
2152 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
2155 * ptr: Host pointer to look up
2156 * round_offset: If true round the result offset down to a page boundary
2157 * *ram_addr: set to result ram_addr
2158 * *offset: set to result offset within the RAMBlock
2160 * Returns: RAMBlock (or NULL if not found)
2162 * By the time this function returns, the returned pointer is not protected
2163 * by RCU anymore. If the caller is not within an RCU critical section and
2164 * does not hold the iothread lock, it must have other means of protecting the
2165 * pointer, such as a reference to the region that includes the incoming
2168 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
2172 uint8_t *host
= ptr
;
2174 if (xen_enabled()) {
2175 ram_addr_t ram_addr
;
2177 ram_addr
= xen_ram_addr_from_mapcache(ptr
);
2178 block
= qemu_get_ram_block(ram_addr
);
2180 *offset
= ram_addr
- block
->offset
;
2187 block
= atomic_rcu_read(&ram_list
.mru_block
);
2188 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
2192 RAMBLOCK_FOREACH(block
) {
2193 /* This case append when the block is not mapped. */
2194 if (block
->host
== NULL
) {
2197 if (host
- block
->host
< block
->max_length
) {
2206 *offset
= (host
- block
->host
);
2208 *offset
&= TARGET_PAGE_MASK
;
2215 * Finds the named RAMBlock
2217 * name: The name of RAMBlock to find
2219 * Returns: RAMBlock (or NULL if not found)
2221 RAMBlock
*qemu_ram_block_by_name(const char *name
)
2225 RAMBLOCK_FOREACH(block
) {
2226 if (!strcmp(name
, block
->idstr
)) {
2234 /* Some of the softmmu routines need to translate from a host pointer
2235 (typically a TLB entry) back to a ram offset. */
2236 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2241 block
= qemu_ram_block_from_host(ptr
, false, &offset
);
2243 return RAM_ADDR_INVALID
;
2246 return block
->offset
+ offset
;
2249 /* Called within RCU critical section. */
2250 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
2251 uint64_t val
, unsigned size
)
2253 bool locked
= false;
2255 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
2258 tb_invalidate_phys_page_fast(ram_addr
, size
);
2262 stb_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2265 stw_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2268 stl_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2278 /* Set both VGA and migration bits for simplicity and to remove
2279 * the notdirty callback faster.
2281 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
2282 DIRTY_CLIENTS_NOCODE
);
2283 /* we remove the notdirty callback only if the code has been
2285 if (!cpu_physical_memory_is_clean(ram_addr
)) {
2286 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
2290 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2291 unsigned size
, bool is_write
)
2296 static const MemoryRegionOps notdirty_mem_ops
= {
2297 .write
= notdirty_mem_write
,
2298 .valid
.accepts
= notdirty_mem_accepts
,
2299 .endianness
= DEVICE_NATIVE_ENDIAN
,
2302 /* Generate a debug exception if a watchpoint has been hit. */
2303 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2305 CPUState
*cpu
= current_cpu
;
2306 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
2307 CPUArchState
*env
= cpu
->env_ptr
;
2308 target_ulong pc
, cs_base
;
2313 if (cpu
->watchpoint_hit
) {
2314 /* We re-entered the check after replacing the TB. Now raise
2315 * the debug interrupt so that is will trigger after the
2316 * current instruction. */
2317 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2320 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2321 vaddr
= cc
->adjust_watchpoint_address(cpu
, vaddr
, len
);
2322 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2323 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2324 && (wp
->flags
& flags
)) {
2325 if (flags
== BP_MEM_READ
) {
2326 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2328 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2330 wp
->hitaddr
= vaddr
;
2331 wp
->hitattrs
= attrs
;
2332 if (!cpu
->watchpoint_hit
) {
2333 if (wp
->flags
& BP_CPU
&&
2334 !cc
->debug_check_watchpoint(cpu
, wp
)) {
2335 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2338 cpu
->watchpoint_hit
= wp
;
2340 /* Both tb_lock and iothread_mutex will be reset when
2341 * cpu_loop_exit or cpu_loop_exit_noexc longjmp
2342 * back into the cpu_exec main loop.
2345 tb_check_watchpoint(cpu
);
2346 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2347 cpu
->exception_index
= EXCP_DEBUG
;
2350 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2351 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2352 cpu_loop_exit_noexc(cpu
);
2356 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2361 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2362 so these check for a hit then pass through to the normal out-of-line
2364 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2365 unsigned size
, MemTxAttrs attrs
)
2369 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2370 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2372 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2375 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2378 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2381 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2389 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2390 uint64_t val
, unsigned size
,
2394 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2395 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2397 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2400 address_space_stb(as
, addr
, val
, attrs
, &res
);
2403 address_space_stw(as
, addr
, val
, attrs
, &res
);
2406 address_space_stl(as
, addr
, val
, attrs
, &res
);
2413 static const MemoryRegionOps watch_mem_ops
= {
2414 .read_with_attrs
= watch_mem_read
,
2415 .write_with_attrs
= watch_mem_write
,
2416 .endianness
= DEVICE_NATIVE_ENDIAN
,
2419 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2420 unsigned len
, MemTxAttrs attrs
)
2422 subpage_t
*subpage
= opaque
;
2426 #if defined(DEBUG_SUBPAGE)
2427 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2428 subpage
, len
, addr
);
2430 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2437 *data
= ldub_p(buf
);
2440 *data
= lduw_p(buf
);
2453 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2454 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2456 subpage_t
*subpage
= opaque
;
2459 #if defined(DEBUG_SUBPAGE)
2460 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2461 " value %"PRIx64
"\n",
2462 __func__
, subpage
, len
, addr
, value
);
2480 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2484 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2485 unsigned len
, bool is_write
)
2487 subpage_t
*subpage
= opaque
;
2488 #if defined(DEBUG_SUBPAGE)
2489 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2490 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2493 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2497 static const MemoryRegionOps subpage_ops
= {
2498 .read_with_attrs
= subpage_read
,
2499 .write_with_attrs
= subpage_write
,
2500 .impl
.min_access_size
= 1,
2501 .impl
.max_access_size
= 8,
2502 .valid
.min_access_size
= 1,
2503 .valid
.max_access_size
= 8,
2504 .valid
.accepts
= subpage_accepts
,
2505 .endianness
= DEVICE_NATIVE_ENDIAN
,
2508 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2513 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2515 idx
= SUBPAGE_IDX(start
);
2516 eidx
= SUBPAGE_IDX(end
);
2517 #if defined(DEBUG_SUBPAGE)
2518 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2519 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2521 for (; idx
<= eidx
; idx
++) {
2522 mmio
->sub_section
[idx
] = section
;
2528 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2532 mmio
= g_malloc0(sizeof(subpage_t
) + TARGET_PAGE_SIZE
* sizeof(uint16_t));
2535 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2536 NULL
, TARGET_PAGE_SIZE
);
2537 mmio
->iomem
.subpage
= true;
2538 #if defined(DEBUG_SUBPAGE)
2539 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2540 mmio
, base
, TARGET_PAGE_SIZE
);
2542 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2547 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2551 MemoryRegionSection section
= {
2552 .address_space
= as
,
2554 .offset_within_address_space
= 0,
2555 .offset_within_region
= 0,
2556 .size
= int128_2_64(),
2559 return phys_section_add(map
, §ion
);
2562 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2564 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2565 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2566 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2567 MemoryRegionSection
*sections
= d
->map
.sections
;
2569 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2572 static void io_mem_init(void)
2574 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2575 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2578 /* io_mem_notdirty calls tb_invalidate_phys_page_fast,
2579 * which can be called without the iothread mutex.
2581 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2583 memory_region_clear_global_locking(&io_mem_notdirty
);
2585 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2589 static void mem_begin(MemoryListener
*listener
)
2591 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2592 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2595 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2596 assert(n
== PHYS_SECTION_UNASSIGNED
);
2597 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2598 assert(n
== PHYS_SECTION_NOTDIRTY
);
2599 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2600 assert(n
== PHYS_SECTION_ROM
);
2601 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2602 assert(n
== PHYS_SECTION_WATCH
);
2604 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2606 as
->next_dispatch
= d
;
2609 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2611 phys_sections_free(&d
->map
);
2615 static void mem_commit(MemoryListener
*listener
)
2617 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2618 AddressSpaceDispatch
*cur
= as
->dispatch
;
2619 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2621 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2623 atomic_rcu_set(&as
->dispatch
, next
);
2625 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2629 static void tcg_commit(MemoryListener
*listener
)
2631 CPUAddressSpace
*cpuas
;
2632 AddressSpaceDispatch
*d
;
2634 /* since each CPU stores ram addresses in its TLB cache, we must
2635 reset the modified entries */
2636 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2637 cpu_reloading_memory_map();
2638 /* The CPU and TLB are protected by the iothread lock.
2639 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2640 * may have split the RCU critical section.
2642 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2643 atomic_rcu_set(&cpuas
->memory_dispatch
, d
);
2644 tlb_flush(cpuas
->cpu
);
2647 void address_space_init_dispatch(AddressSpace
*as
)
2649 as
->dispatch
= NULL
;
2650 as
->dispatch_listener
= (MemoryListener
) {
2652 .commit
= mem_commit
,
2653 .region_add
= mem_add
,
2654 .region_nop
= mem_add
,
2657 memory_listener_register(&as
->dispatch_listener
, as
);
2660 void address_space_unregister(AddressSpace
*as
)
2662 memory_listener_unregister(&as
->dispatch_listener
);
2665 void address_space_destroy_dispatch(AddressSpace
*as
)
2667 AddressSpaceDispatch
*d
= as
->dispatch
;
2669 atomic_rcu_set(&as
->dispatch
, NULL
);
2671 call_rcu(d
, address_space_dispatch_free
, rcu
);
2675 static void memory_map_init(void)
2677 system_memory
= g_malloc(sizeof(*system_memory
));
2679 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2680 address_space_init(&address_space_memory
, system_memory
, "memory");
2682 system_io
= g_malloc(sizeof(*system_io
));
2683 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2685 address_space_init(&address_space_io
, system_io
, "I/O");
2688 MemoryRegion
*get_system_memory(void)
2690 return system_memory
;
2693 MemoryRegion
*get_system_io(void)
2698 #endif /* !defined(CONFIG_USER_ONLY) */
2700 /* physical memory access (slow version, mainly for debug) */
2701 #if defined(CONFIG_USER_ONLY)
2702 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2703 uint8_t *buf
, int len
, int is_write
)
2710 page
= addr
& TARGET_PAGE_MASK
;
2711 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2714 flags
= page_get_flags(page
);
2715 if (!(flags
& PAGE_VALID
))
2718 if (!(flags
& PAGE_WRITE
))
2720 /* XXX: this code should not depend on lock_user */
2721 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2724 unlock_user(p
, addr
, l
);
2726 if (!(flags
& PAGE_READ
))
2728 /* XXX: this code should not depend on lock_user */
2729 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2732 unlock_user(p
, addr
, 0);
2743 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2746 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2747 addr
+= memory_region_get_ram_addr(mr
);
2749 /* No early return if dirty_log_mask is or becomes 0, because
2750 * cpu_physical_memory_set_dirty_range will still call
2751 * xen_modified_memory.
2753 if (dirty_log_mask
) {
2755 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2757 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2759 tb_invalidate_phys_range(addr
, addr
+ length
);
2761 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2763 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2766 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2768 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2770 /* Regions are assumed to support 1-4 byte accesses unless
2771 otherwise specified. */
2772 if (access_size_max
== 0) {
2773 access_size_max
= 4;
2776 /* Bound the maximum access by the alignment of the address. */
2777 if (!mr
->ops
->impl
.unaligned
) {
2778 unsigned align_size_max
= addr
& -addr
;
2779 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2780 access_size_max
= align_size_max
;
2784 /* Don't attempt accesses larger than the maximum. */
2785 if (l
> access_size_max
) {
2786 l
= access_size_max
;
2793 static bool prepare_mmio_access(MemoryRegion
*mr
)
2795 bool unlocked
= !qemu_mutex_iothread_locked();
2796 bool release_lock
= false;
2798 if (unlocked
&& mr
->global_locking
) {
2799 qemu_mutex_lock_iothread();
2801 release_lock
= true;
2803 if (mr
->flush_coalesced_mmio
) {
2805 qemu_mutex_lock_iothread();
2807 qemu_flush_coalesced_mmio_buffer();
2809 qemu_mutex_unlock_iothread();
2813 return release_lock
;
2816 /* Called within RCU critical section. */
2817 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2820 int len
, hwaddr addr1
,
2821 hwaddr l
, MemoryRegion
*mr
)
2825 MemTxResult result
= MEMTX_OK
;
2826 bool release_lock
= false;
2829 if (!memory_access_is_direct(mr
, true)) {
2830 release_lock
|= prepare_mmio_access(mr
);
2831 l
= memory_access_size(mr
, l
, addr1
);
2832 /* XXX: could force current_cpu to NULL to avoid
2836 /* 64 bit write access */
2838 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2842 /* 32 bit write access */
2843 val
= (uint32_t)ldl_p(buf
);
2844 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2848 /* 16 bit write access */
2850 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2854 /* 8 bit write access */
2856 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2864 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2865 memcpy(ptr
, buf
, l
);
2866 invalidate_and_set_dirty(mr
, addr1
, l
);
2870 qemu_mutex_unlock_iothread();
2871 release_lock
= false;
2883 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2889 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2890 const uint8_t *buf
, int len
)
2895 MemTxResult result
= MEMTX_OK
;
2900 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2901 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2909 /* Called within RCU critical section. */
2910 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2911 MemTxAttrs attrs
, uint8_t *buf
,
2912 int len
, hwaddr addr1
, hwaddr l
,
2917 MemTxResult result
= MEMTX_OK
;
2918 bool release_lock
= false;
2921 if (!memory_access_is_direct(mr
, false)) {
2923 release_lock
|= prepare_mmio_access(mr
);
2924 l
= memory_access_size(mr
, l
, addr1
);
2927 /* 64 bit read access */
2928 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2933 /* 32 bit read access */
2934 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2939 /* 16 bit read access */
2940 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2945 /* 8 bit read access */
2946 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2955 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2956 memcpy(buf
, ptr
, l
);
2960 qemu_mutex_unlock_iothread();
2961 release_lock
= false;
2973 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2979 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2980 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2985 MemTxResult result
= MEMTX_OK
;
2990 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2991 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2999 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
3000 uint8_t *buf
, int len
, bool is_write
)
3003 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
3005 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
3009 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
3010 int len
, int is_write
)
3012 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
3013 buf
, len
, is_write
);
3016 enum write_rom_type
{
3021 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
3022 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
3032 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3034 if (!(memory_region_is_ram(mr
) ||
3035 memory_region_is_romd(mr
))) {
3036 l
= memory_access_size(mr
, l
, addr1
);
3039 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3042 memcpy(ptr
, buf
, l
);
3043 invalidate_and_set_dirty(mr
, addr1
, l
);
3046 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
3057 /* used for ROM loading : can write in RAM and ROM */
3058 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
3059 const uint8_t *buf
, int len
)
3061 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
3064 void cpu_flush_icache_range(hwaddr start
, int len
)
3067 * This function should do the same thing as an icache flush that was
3068 * triggered from within the guest. For TCG we are always cache coherent,
3069 * so there is no need to flush anything. For KVM / Xen we need to flush
3070 * the host's instruction cache at least.
3072 if (tcg_enabled()) {
3076 cpu_physical_memory_write_rom_internal(&address_space_memory
,
3077 start
, NULL
, len
, FLUSH_CACHE
);
3088 static BounceBuffer bounce
;
3090 typedef struct MapClient
{
3092 QLIST_ENTRY(MapClient
) link
;
3095 QemuMutex map_client_list_lock
;
3096 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3097 = QLIST_HEAD_INITIALIZER(map_client_list
);
3099 static void cpu_unregister_map_client_do(MapClient
*client
)
3101 QLIST_REMOVE(client
, link
);
3105 static void cpu_notify_map_clients_locked(void)
3109 while (!QLIST_EMPTY(&map_client_list
)) {
3110 client
= QLIST_FIRST(&map_client_list
);
3111 qemu_bh_schedule(client
->bh
);
3112 cpu_unregister_map_client_do(client
);
3116 void cpu_register_map_client(QEMUBH
*bh
)
3118 MapClient
*client
= g_malloc(sizeof(*client
));
3120 qemu_mutex_lock(&map_client_list_lock
);
3122 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3123 if (!atomic_read(&bounce
.in_use
)) {
3124 cpu_notify_map_clients_locked();
3126 qemu_mutex_unlock(&map_client_list_lock
);
3129 void cpu_exec_init_all(void)
3131 qemu_mutex_init(&ram_list
.mutex
);
3132 /* The data structures we set up here depend on knowing the page size,
3133 * so no more changes can be made after this point.
3134 * In an ideal world, nothing we did before we had finished the
3135 * machine setup would care about the target page size, and we could
3136 * do this much later, rather than requiring board models to state
3137 * up front what their requirements are.
3139 finalize_target_page_bits();
3142 qemu_mutex_init(&map_client_list_lock
);
3145 void cpu_unregister_map_client(QEMUBH
*bh
)
3149 qemu_mutex_lock(&map_client_list_lock
);
3150 QLIST_FOREACH(client
, &map_client_list
, link
) {
3151 if (client
->bh
== bh
) {
3152 cpu_unregister_map_client_do(client
);
3156 qemu_mutex_unlock(&map_client_list_lock
);
3159 static void cpu_notify_map_clients(void)
3161 qemu_mutex_lock(&map_client_list_lock
);
3162 cpu_notify_map_clients_locked();
3163 qemu_mutex_unlock(&map_client_list_lock
);
3166 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
3174 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
3175 if (!memory_access_is_direct(mr
, is_write
)) {
3176 l
= memory_access_size(mr
, l
, addr
);
3177 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
3191 address_space_extend_translation(AddressSpace
*as
, hwaddr addr
, hwaddr target_len
,
3192 MemoryRegion
*mr
, hwaddr base
, hwaddr len
,
3197 MemoryRegion
*this_mr
;
3203 if (target_len
== 0) {
3208 this_mr
= address_space_translate(as
, addr
, &xlat
, &len
, is_write
);
3209 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
3215 /* Map a physical memory region into a host virtual address.
3216 * May map a subset of the requested range, given by and returned in *plen.
3217 * May return NULL if resources needed to perform the mapping are exhausted.
3218 * Use only for reads OR writes - not for read-modify-write operations.
3219 * Use cpu_register_map_client() to know when retrying the map operation is
3220 * likely to succeed.
3222 void *address_space_map(AddressSpace
*as
,
3238 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
3240 if (!memory_access_is_direct(mr
, is_write
)) {
3241 if (atomic_xchg(&bounce
.in_use
, true)) {
3245 /* Avoid unbounded allocations */
3246 l
= MIN(l
, TARGET_PAGE_SIZE
);
3247 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
3251 memory_region_ref(mr
);
3254 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
3260 return bounce
.buffer
;
3264 memory_region_ref(mr
);
3265 *plen
= address_space_extend_translation(as
, addr
, len
, mr
, xlat
, l
, is_write
);
3266 ptr
= qemu_ram_ptr_length(mr
->ram_block
, xlat
, plen
);
3272 /* Unmaps a memory region previously mapped by address_space_map().
3273 * Will also mark the memory as dirty if is_write == 1. access_len gives
3274 * the amount of memory that was actually read or written by the caller.
3276 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
3277 int is_write
, hwaddr access_len
)
3279 if (buffer
!= bounce
.buffer
) {
3283 mr
= memory_region_from_host(buffer
, &addr1
);
3286 invalidate_and_set_dirty(mr
, addr1
, access_len
);
3288 if (xen_enabled()) {
3289 xen_invalidate_map_cache_entry(buffer
);
3291 memory_region_unref(mr
);
3295 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
3296 bounce
.buffer
, access_len
);
3298 qemu_vfree(bounce
.buffer
);
3299 bounce
.buffer
= NULL
;
3300 memory_region_unref(bounce
.mr
);
3301 atomic_mb_set(&bounce
.in_use
, false);
3302 cpu_notify_map_clients();
3305 void *cpu_physical_memory_map(hwaddr addr
,
3309 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
3312 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
3313 int is_write
, hwaddr access_len
)
3315 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3318 #define ARG1_DECL AddressSpace *as
3321 #define TRANSLATE(...) address_space_translate(as, __VA_ARGS__)
3322 #define IS_DIRECT(mr, is_write) memory_access_is_direct(mr, is_write)
3323 #define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs)
3324 #define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
3325 #define RCU_READ_LOCK(...) rcu_read_lock()
3326 #define RCU_READ_UNLOCK(...) rcu_read_unlock()
3327 #include "memory_ldst.inc.c"
3329 int64_t address_space_cache_init(MemoryRegionCache
*cache
,
3341 void address_space_cache_invalidate(MemoryRegionCache
*cache
,
3347 void address_space_cache_destroy(MemoryRegionCache
*cache
)
3352 #define ARG1_DECL MemoryRegionCache *cache
3354 #define SUFFIX _cached
3355 #define TRANSLATE(addr, ...) \
3356 address_space_translate(cache->as, cache->xlat + (addr), __VA_ARGS__)
3357 #define IS_DIRECT(mr, is_write) true
3358 #define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs)
3359 #define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
3360 #define RCU_READ_LOCK() rcu_read_lock()
3361 #define RCU_READ_UNLOCK() rcu_read_unlock()
3362 #include "memory_ldst.inc.c"
3364 /* virtual memory access for debug (includes writing to ROM) */
3365 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3366 uint8_t *buf
, int len
, int is_write
)
3372 cpu_synchronize_state(cpu
);
3377 page
= addr
& TARGET_PAGE_MASK
;
3378 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3379 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3380 /* if no physical page mapped, return an error */
3381 if (phys_addr
== -1)
3383 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3386 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3388 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3391 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3392 MEMTXATTRS_UNSPECIFIED
,
3403 * Allows code that needs to deal with migration bitmaps etc to still be built
3404 * target independent.
3406 size_t qemu_target_page_size(void)
3408 return TARGET_PAGE_SIZE
;
3414 * A helper function for the _utterly broken_ virtio device model to find out if
3415 * it's running on a big endian machine. Don't do this at home kids!
3417 bool target_words_bigendian(void);
3418 bool target_words_bigendian(void)
3420 #if defined(TARGET_WORDS_BIGENDIAN)
3427 #ifndef CONFIG_USER_ONLY
3428 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3435 mr
= address_space_translate(&address_space_memory
,
3436 phys_addr
, &phys_addr
, &l
, false);
3438 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3443 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3449 RAMBLOCK_FOREACH(block
) {
3450 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3451 block
->used_length
, opaque
);
3461 * Unmap pages of memory from start to start+length such that
3462 * they a) read as 0, b) Trigger whatever fault mechanism
3463 * the OS provides for postcopy.
3464 * The pages must be unmapped by the end of the function.
3465 * Returns: 0 on success, none-0 on failure
3468 int ram_block_discard_range(RAMBlock
*rb
, uint64_t start
, size_t length
)
3472 uint8_t *host_startaddr
= rb
->host
+ start
;
3474 if ((uintptr_t)host_startaddr
& (rb
->page_size
- 1)) {
3475 error_report("ram_block_discard_range: Unaligned start address: %p",
3480 if ((start
+ length
) <= rb
->used_length
) {
3481 uint8_t *host_endaddr
= host_startaddr
+ length
;
3482 if ((uintptr_t)host_endaddr
& (rb
->page_size
- 1)) {
3483 error_report("ram_block_discard_range: Unaligned end address: %p",
3488 errno
= ENOTSUP
; /* If we are missing MADVISE etc */
3490 if (rb
->page_size
== qemu_host_page_size
) {
3491 #if defined(CONFIG_MADVISE)
3492 /* Note: We need the madvise MADV_DONTNEED behaviour of definitely
3495 ret
= madvise(host_startaddr
, length
, MADV_DONTNEED
);
3498 /* Huge page case - unfortunately it can't do DONTNEED, but
3499 * it can do the equivalent by FALLOC_FL_PUNCH_HOLE in the
3502 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
3503 ret
= fallocate(rb
->fd
, FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
,
3509 error_report("ram_block_discard_range: Failed to discard range "
3510 "%s:%" PRIx64
" +%zx (%d)",
3511 rb
->idstr
, start
, length
, ret
);
3514 error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64
3515 "/%zx/" RAM_ADDR_FMT
")",
3516 rb
->idstr
, start
, length
, rb
->used_length
);