4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu-common.h"
22 #include "qapi/error.h"
24 #include "qemu/cutils.h"
26 #include "exec/exec-all.h"
27 #include "exec/target_page.h"
29 #include "hw/qdev-core.h"
30 #include "hw/qdev-properties.h"
31 #if !defined(CONFIG_USER_ONLY)
32 #include "hw/boards.h"
33 #include "hw/xen/xen.h"
35 #include "sysemu/kvm.h"
36 #include "sysemu/sysemu.h"
37 #include "sysemu/tcg.h"
38 #include "qemu/timer.h"
39 #include "qemu/config-file.h"
40 #include "qemu/error-report.h"
41 #include "qemu/qemu-print.h"
42 #if defined(CONFIG_USER_ONLY)
44 #else /* !CONFIG_USER_ONLY */
45 #include "exec/memory.h"
46 #include "exec/ioport.h"
47 #include "sysemu/dma.h"
48 #include "sysemu/hostmem.h"
49 #include "sysemu/hw_accel.h"
50 #include "exec/address-spaces.h"
51 #include "sysemu/xen-mapcache.h"
52 #include "trace-root.h"
54 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
55 #include <linux/falloc.h>
59 #include "qemu/rcu_queue.h"
60 #include "qemu/main-loop.h"
61 #include "translate-all.h"
62 #include "sysemu/replay.h"
64 #include "exec/memory-internal.h"
65 #include "exec/ram_addr.h"
68 #include "migration/vmstate.h"
70 #include "qemu/range.h"
72 #include "qemu/mmap-alloc.h"
75 #include "monitor/monitor.h"
77 //#define DEBUG_SUBPAGE
79 #if !defined(CONFIG_USER_ONLY)
80 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
81 * are protected by the ramlist lock.
83 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
85 static MemoryRegion
*system_memory
;
86 static MemoryRegion
*system_io
;
88 AddressSpace address_space_io
;
89 AddressSpace address_space_memory
;
91 MemoryRegion io_mem_rom
, io_mem_notdirty
;
92 static MemoryRegion io_mem_unassigned
;
95 #ifdef TARGET_PAGE_BITS_VARY
97 bool target_page_bits_decided
;
100 CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
102 /* current CPU in the current thread. It is only valid inside
104 __thread CPUState
*current_cpu
;
105 /* 0 = Do not count executed instructions.
106 1 = Precise instruction counting.
107 2 = Adaptive rate instruction counting. */
110 uintptr_t qemu_host_page_size
;
111 intptr_t qemu_host_page_mask
;
113 bool set_preferred_target_page_bits(int bits
)
115 /* The target page size is the lowest common denominator for all
116 * the CPUs in the system, so we can only make it smaller, never
117 * larger. And we can't make it smaller once we've committed to
120 #ifdef TARGET_PAGE_BITS_VARY
121 assert(bits
>= TARGET_PAGE_BITS_MIN
);
122 if (target_page_bits
== 0 || target_page_bits
> bits
) {
123 if (target_page_bits_decided
) {
126 target_page_bits
= bits
;
132 #if !defined(CONFIG_USER_ONLY)
134 static void finalize_target_page_bits(void)
136 #ifdef TARGET_PAGE_BITS_VARY
137 if (target_page_bits
== 0) {
138 target_page_bits
= TARGET_PAGE_BITS_MIN
;
140 target_page_bits_decided
= true;
144 typedef struct PhysPageEntry PhysPageEntry
;
146 struct PhysPageEntry
{
147 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
149 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
153 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
155 /* Size of the L2 (and L3, etc) page tables. */
156 #define ADDR_SPACE_BITS 64
159 #define P_L2_SIZE (1 << P_L2_BITS)
161 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
163 typedef PhysPageEntry Node
[P_L2_SIZE
];
165 typedef struct PhysPageMap
{
168 unsigned sections_nb
;
169 unsigned sections_nb_alloc
;
171 unsigned nodes_nb_alloc
;
173 MemoryRegionSection
*sections
;
176 struct AddressSpaceDispatch
{
177 MemoryRegionSection
*mru_section
;
178 /* This is a multi-level map on the physical address space.
179 * The bottom level has pointers to MemoryRegionSections.
181 PhysPageEntry phys_map
;
185 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
186 typedef struct subpage_t
{
190 uint16_t sub_section
[];
193 #define PHYS_SECTION_UNASSIGNED 0
194 #define PHYS_SECTION_NOTDIRTY 1
195 #define PHYS_SECTION_ROM 2
197 static void io_mem_init(void);
198 static void memory_map_init(void);
199 static void tcg_log_global_after_sync(MemoryListener
*listener
);
200 static void tcg_commit(MemoryListener
*listener
);
203 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
204 * @cpu: the CPU whose AddressSpace this is
205 * @as: the AddressSpace itself
206 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
207 * @tcg_as_listener: listener for tracking changes to the AddressSpace
209 struct CPUAddressSpace
{
212 struct AddressSpaceDispatch
*memory_dispatch
;
213 MemoryListener tcg_as_listener
;
216 struct DirtyBitmapSnapshot
{
219 unsigned long dirty
[];
224 #if !defined(CONFIG_USER_ONLY)
226 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
228 static unsigned alloc_hint
= 16;
229 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
230 map
->nodes_nb_alloc
= MAX(alloc_hint
, map
->nodes_nb
+ nodes
);
231 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
232 alloc_hint
= map
->nodes_nb_alloc
;
236 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
243 ret
= map
->nodes_nb
++;
245 assert(ret
!= PHYS_MAP_NODE_NIL
);
246 assert(ret
!= map
->nodes_nb_alloc
);
248 e
.skip
= leaf
? 0 : 1;
249 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
250 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
251 memcpy(&p
[i
], &e
, sizeof(e
));
256 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
257 hwaddr
*index
, uint64_t *nb
, uint16_t leaf
,
261 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
263 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
264 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
266 p
= map
->nodes
[lp
->ptr
];
267 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
269 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
270 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
276 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
282 static void phys_page_set(AddressSpaceDispatch
*d
,
283 hwaddr index
, uint64_t nb
,
286 /* Wildly overreserve - it doesn't matter much. */
287 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
289 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
292 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
293 * and update our entry so we can skip it and go directly to the destination.
295 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
)
297 unsigned valid_ptr
= P_L2_SIZE
;
302 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
307 for (i
= 0; i
< P_L2_SIZE
; i
++) {
308 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
315 phys_page_compact(&p
[i
], nodes
);
319 /* We can only compress if there's only one child. */
324 assert(valid_ptr
< P_L2_SIZE
);
326 /* Don't compress if it won't fit in the # of bits we have. */
327 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
331 lp
->ptr
= p
[valid_ptr
].ptr
;
332 if (!p
[valid_ptr
].skip
) {
333 /* If our only child is a leaf, make this a leaf. */
334 /* By design, we should have made this node a leaf to begin with so we
335 * should never reach here.
336 * But since it's so simple to handle this, let's do it just in case we
341 lp
->skip
+= p
[valid_ptr
].skip
;
345 void address_space_dispatch_compact(AddressSpaceDispatch
*d
)
347 if (d
->phys_map
.skip
) {
348 phys_page_compact(&d
->phys_map
, d
->map
.nodes
);
352 static inline bool section_covers_addr(const MemoryRegionSection
*section
,
355 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
356 * the section must cover the entire address space.
358 return int128_gethi(section
->size
) ||
359 range_covers_byte(section
->offset_within_address_space
,
360 int128_getlo(section
->size
), addr
);
363 static MemoryRegionSection
*phys_page_find(AddressSpaceDispatch
*d
, hwaddr addr
)
365 PhysPageEntry lp
= d
->phys_map
, *p
;
366 Node
*nodes
= d
->map
.nodes
;
367 MemoryRegionSection
*sections
= d
->map
.sections
;
368 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
371 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
372 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
373 return §ions
[PHYS_SECTION_UNASSIGNED
];
376 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
379 if (section_covers_addr(§ions
[lp
.ptr
], addr
)) {
380 return §ions
[lp
.ptr
];
382 return §ions
[PHYS_SECTION_UNASSIGNED
];
386 /* Called from RCU critical section */
387 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
389 bool resolve_subpage
)
391 MemoryRegionSection
*section
= atomic_read(&d
->mru_section
);
394 if (!section
|| section
== &d
->map
.sections
[PHYS_SECTION_UNASSIGNED
] ||
395 !section_covers_addr(section
, addr
)) {
396 section
= phys_page_find(d
, addr
);
397 atomic_set(&d
->mru_section
, section
);
399 if (resolve_subpage
&& section
->mr
->subpage
) {
400 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
401 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
406 /* Called from RCU critical section */
407 static MemoryRegionSection
*
408 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
409 hwaddr
*plen
, bool resolve_subpage
)
411 MemoryRegionSection
*section
;
415 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
416 /* Compute offset within MemoryRegionSection */
417 addr
-= section
->offset_within_address_space
;
419 /* Compute offset within MemoryRegion */
420 *xlat
= addr
+ section
->offset_within_region
;
424 /* MMIO registers can be expected to perform full-width accesses based only
425 * on their address, without considering adjacent registers that could
426 * decode to completely different MemoryRegions. When such registers
427 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
428 * regions overlap wildly. For this reason we cannot clamp the accesses
431 * If the length is small (as is the case for address_space_ldl/stl),
432 * everything works fine. If the incoming length is large, however,
433 * the caller really has to do the clamping through memory_access_size.
435 if (memory_region_is_ram(mr
)) {
436 diff
= int128_sub(section
->size
, int128_make64(addr
));
437 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
443 * address_space_translate_iommu - translate an address through an IOMMU
444 * memory region and then through the target address space.
446 * @iommu_mr: the IOMMU memory region that we start the translation from
447 * @addr: the address to be translated through the MMU
448 * @xlat: the translated address offset within the destination memory region.
449 * It cannot be %NULL.
450 * @plen_out: valid read/write length of the translated address. It
452 * @page_mask_out: page mask for the translated address. This
453 * should only be meaningful for IOMMU translated
454 * addresses, since there may be huge pages that this bit
455 * would tell. It can be %NULL if we don't care about it.
456 * @is_write: whether the translation operation is for write
457 * @is_mmio: whether this can be MMIO, set true if it can
458 * @target_as: the address space targeted by the IOMMU
459 * @attrs: transaction attributes
461 * This function is called from RCU critical section. It is the common
462 * part of flatview_do_translate and address_space_translate_cached.
464 static MemoryRegionSection
address_space_translate_iommu(IOMMUMemoryRegion
*iommu_mr
,
467 hwaddr
*page_mask_out
,
470 AddressSpace
**target_as
,
473 MemoryRegionSection
*section
;
474 hwaddr page_mask
= (hwaddr
)-1;
478 IOMMUMemoryRegionClass
*imrc
= memory_region_get_iommu_class_nocheck(iommu_mr
);
482 if (imrc
->attrs_to_index
) {
483 iommu_idx
= imrc
->attrs_to_index(iommu_mr
, attrs
);
486 iotlb
= imrc
->translate(iommu_mr
, addr
, is_write
?
487 IOMMU_WO
: IOMMU_RO
, iommu_idx
);
489 if (!(iotlb
.perm
& (1 << is_write
))) {
493 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
494 | (addr
& iotlb
.addr_mask
));
495 page_mask
&= iotlb
.addr_mask
;
496 *plen_out
= MIN(*plen_out
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
497 *target_as
= iotlb
.target_as
;
499 section
= address_space_translate_internal(
500 address_space_to_dispatch(iotlb
.target_as
), addr
, xlat
,
503 iommu_mr
= memory_region_get_iommu(section
->mr
);
504 } while (unlikely(iommu_mr
));
507 *page_mask_out
= page_mask
;
512 return (MemoryRegionSection
) { .mr
= &io_mem_unassigned
};
516 * flatview_do_translate - translate an address in FlatView
518 * @fv: the flat view that we want to translate on
519 * @addr: the address to be translated in above address space
520 * @xlat: the translated address offset within memory region. It
522 * @plen_out: valid read/write length of the translated address. It
523 * can be @NULL when we don't care about it.
524 * @page_mask_out: page mask for the translated address. This
525 * should only be meaningful for IOMMU translated
526 * addresses, since there may be huge pages that this bit
527 * would tell. It can be @NULL if we don't care about it.
528 * @is_write: whether the translation operation is for write
529 * @is_mmio: whether this can be MMIO, set true if it can
530 * @target_as: the address space targeted by the IOMMU
531 * @attrs: memory transaction attributes
533 * This function is called from RCU critical section
535 static MemoryRegionSection
flatview_do_translate(FlatView
*fv
,
539 hwaddr
*page_mask_out
,
542 AddressSpace
**target_as
,
545 MemoryRegionSection
*section
;
546 IOMMUMemoryRegion
*iommu_mr
;
547 hwaddr plen
= (hwaddr
)(-1);
553 section
= address_space_translate_internal(
554 flatview_to_dispatch(fv
), addr
, xlat
,
557 iommu_mr
= memory_region_get_iommu(section
->mr
);
558 if (unlikely(iommu_mr
)) {
559 return address_space_translate_iommu(iommu_mr
, xlat
,
560 plen_out
, page_mask_out
,
565 /* Not behind an IOMMU, use default page size. */
566 *page_mask_out
= ~TARGET_PAGE_MASK
;
572 /* Called from RCU critical section */
573 IOMMUTLBEntry
address_space_get_iotlb_entry(AddressSpace
*as
, hwaddr addr
,
574 bool is_write
, MemTxAttrs attrs
)
576 MemoryRegionSection section
;
577 hwaddr xlat
, page_mask
;
580 * This can never be MMIO, and we don't really care about plen,
583 section
= flatview_do_translate(address_space_to_flatview(as
), addr
, &xlat
,
584 NULL
, &page_mask
, is_write
, false, &as
,
587 /* Illegal translation */
588 if (section
.mr
== &io_mem_unassigned
) {
592 /* Convert memory region offset into address space offset */
593 xlat
+= section
.offset_within_address_space
-
594 section
.offset_within_region
;
596 return (IOMMUTLBEntry
) {
598 .iova
= addr
& ~page_mask
,
599 .translated_addr
= xlat
& ~page_mask
,
600 .addr_mask
= page_mask
,
601 /* IOTLBs are for DMAs, and DMA only allows on RAMs. */
606 return (IOMMUTLBEntry
) {0};
609 /* Called from RCU critical section */
610 MemoryRegion
*flatview_translate(FlatView
*fv
, hwaddr addr
, hwaddr
*xlat
,
611 hwaddr
*plen
, bool is_write
,
615 MemoryRegionSection section
;
616 AddressSpace
*as
= NULL
;
618 /* This can be MMIO, so setup MMIO bit. */
619 section
= flatview_do_translate(fv
, addr
, xlat
, plen
, NULL
,
620 is_write
, true, &as
, attrs
);
623 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
624 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
625 *plen
= MIN(page
, *plen
);
631 typedef struct TCGIOMMUNotifier
{
639 static void tcg_iommu_unmap_notify(IOMMUNotifier
*n
, IOMMUTLBEntry
*iotlb
)
641 TCGIOMMUNotifier
*notifier
= container_of(n
, TCGIOMMUNotifier
, n
);
643 if (!notifier
->active
) {
646 tlb_flush(notifier
->cpu
);
647 notifier
->active
= false;
648 /* We leave the notifier struct on the list to avoid reallocating it later.
649 * Generally the number of IOMMUs a CPU deals with will be small.
650 * In any case we can't unregister the iommu notifier from a notify
655 static void tcg_register_iommu_notifier(CPUState
*cpu
,
656 IOMMUMemoryRegion
*iommu_mr
,
659 /* Make sure this CPU has an IOMMU notifier registered for this
660 * IOMMU/IOMMU index combination, so that we can flush its TLB
661 * when the IOMMU tells us the mappings we've cached have changed.
663 MemoryRegion
*mr
= MEMORY_REGION(iommu_mr
);
664 TCGIOMMUNotifier
*notifier
;
667 for (i
= 0; i
< cpu
->iommu_notifiers
->len
; i
++) {
668 notifier
= g_array_index(cpu
->iommu_notifiers
, TCGIOMMUNotifier
*, i
);
669 if (notifier
->mr
== mr
&& notifier
->iommu_idx
== iommu_idx
) {
673 if (i
== cpu
->iommu_notifiers
->len
) {
674 /* Not found, add a new entry at the end of the array */
675 cpu
->iommu_notifiers
= g_array_set_size(cpu
->iommu_notifiers
, i
+ 1);
676 notifier
= g_new0(TCGIOMMUNotifier
, 1);
677 g_array_index(cpu
->iommu_notifiers
, TCGIOMMUNotifier
*, i
) = notifier
;
680 notifier
->iommu_idx
= iommu_idx
;
682 /* Rather than trying to register interest in the specific part
683 * of the iommu's address space that we've accessed and then
684 * expand it later as subsequent accesses touch more of it, we
685 * just register interest in the whole thing, on the assumption
686 * that iommu reconfiguration will be rare.
688 iommu_notifier_init(¬ifier
->n
,
689 tcg_iommu_unmap_notify
,
690 IOMMU_NOTIFIER_UNMAP
,
694 memory_region_register_iommu_notifier(notifier
->mr
, ¬ifier
->n
);
697 if (!notifier
->active
) {
698 notifier
->active
= true;
702 static void tcg_iommu_free_notifier_list(CPUState
*cpu
)
704 /* Destroy the CPU's notifier list */
706 TCGIOMMUNotifier
*notifier
;
708 for (i
= 0; i
< cpu
->iommu_notifiers
->len
; i
++) {
709 notifier
= g_array_index(cpu
->iommu_notifiers
, TCGIOMMUNotifier
*, i
);
710 memory_region_unregister_iommu_notifier(notifier
->mr
, ¬ifier
->n
);
713 g_array_free(cpu
->iommu_notifiers
, true);
716 /* Called from RCU critical section */
717 MemoryRegionSection
*
718 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
719 hwaddr
*xlat
, hwaddr
*plen
,
720 MemTxAttrs attrs
, int *prot
)
722 MemoryRegionSection
*section
;
723 IOMMUMemoryRegion
*iommu_mr
;
724 IOMMUMemoryRegionClass
*imrc
;
727 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpu
->cpu_ases
[asidx
].memory_dispatch
);
730 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, false);
732 iommu_mr
= memory_region_get_iommu(section
->mr
);
737 imrc
= memory_region_get_iommu_class_nocheck(iommu_mr
);
739 iommu_idx
= imrc
->attrs_to_index(iommu_mr
, attrs
);
740 tcg_register_iommu_notifier(cpu
, iommu_mr
, iommu_idx
);
741 /* We need all the permissions, so pass IOMMU_NONE so the IOMMU
742 * doesn't short-cut its translation table walk.
744 iotlb
= imrc
->translate(iommu_mr
, addr
, IOMMU_NONE
, iommu_idx
);
745 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
746 | (addr
& iotlb
.addr_mask
));
747 /* Update the caller's prot bits to remove permissions the IOMMU
748 * is giving us a failure response for. If we get down to no
749 * permissions left at all we can give up now.
751 if (!(iotlb
.perm
& IOMMU_RO
)) {
752 *prot
&= ~(PAGE_READ
| PAGE_EXEC
);
754 if (!(iotlb
.perm
& IOMMU_WO
)) {
755 *prot
&= ~PAGE_WRITE
;
762 d
= flatview_to_dispatch(address_space_to_flatview(iotlb
.target_as
));
765 assert(!memory_region_is_iommu(section
->mr
));
770 return &d
->map
.sections
[PHYS_SECTION_UNASSIGNED
];
774 #if !defined(CONFIG_USER_ONLY)
776 static int cpu_common_post_load(void *opaque
, int version_id
)
778 CPUState
*cpu
= opaque
;
780 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
781 version_id is increased. */
782 cpu
->interrupt_request
&= ~0x01;
785 /* loadvm has just updated the content of RAM, bypassing the
786 * usual mechanisms that ensure we flush TBs for writes to
787 * memory we've translated code from. So we must flush all TBs,
788 * which will now be stale.
795 static int cpu_common_pre_load(void *opaque
)
797 CPUState
*cpu
= opaque
;
799 cpu
->exception_index
= -1;
804 static bool cpu_common_exception_index_needed(void *opaque
)
806 CPUState
*cpu
= opaque
;
808 return tcg_enabled() && cpu
->exception_index
!= -1;
811 static const VMStateDescription vmstate_cpu_common_exception_index
= {
812 .name
= "cpu_common/exception_index",
814 .minimum_version_id
= 1,
815 .needed
= cpu_common_exception_index_needed
,
816 .fields
= (VMStateField
[]) {
817 VMSTATE_INT32(exception_index
, CPUState
),
818 VMSTATE_END_OF_LIST()
822 static bool cpu_common_crash_occurred_needed(void *opaque
)
824 CPUState
*cpu
= opaque
;
826 return cpu
->crash_occurred
;
829 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
830 .name
= "cpu_common/crash_occurred",
832 .minimum_version_id
= 1,
833 .needed
= cpu_common_crash_occurred_needed
,
834 .fields
= (VMStateField
[]) {
835 VMSTATE_BOOL(crash_occurred
, CPUState
),
836 VMSTATE_END_OF_LIST()
840 const VMStateDescription vmstate_cpu_common
= {
841 .name
= "cpu_common",
843 .minimum_version_id
= 1,
844 .pre_load
= cpu_common_pre_load
,
845 .post_load
= cpu_common_post_load
,
846 .fields
= (VMStateField
[]) {
847 VMSTATE_UINT32(halted
, CPUState
),
848 VMSTATE_UINT32(interrupt_request
, CPUState
),
849 VMSTATE_END_OF_LIST()
851 .subsections
= (const VMStateDescription
*[]) {
852 &vmstate_cpu_common_exception_index
,
853 &vmstate_cpu_common_crash_occurred
,
860 CPUState
*qemu_get_cpu(int index
)
865 if (cpu
->cpu_index
== index
) {
873 #if !defined(CONFIG_USER_ONLY)
874 void cpu_address_space_init(CPUState
*cpu
, int asidx
,
875 const char *prefix
, MemoryRegion
*mr
)
877 CPUAddressSpace
*newas
;
878 AddressSpace
*as
= g_new0(AddressSpace
, 1);
882 as_name
= g_strdup_printf("%s-%d", prefix
, cpu
->cpu_index
);
883 address_space_init(as
, mr
, as_name
);
886 /* Target code should have set num_ases before calling us */
887 assert(asidx
< cpu
->num_ases
);
890 /* address space 0 gets the convenience alias */
894 /* KVM cannot currently support multiple address spaces. */
895 assert(asidx
== 0 || !kvm_enabled());
897 if (!cpu
->cpu_ases
) {
898 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
901 newas
= &cpu
->cpu_ases
[asidx
];
905 newas
->tcg_as_listener
.log_global_after_sync
= tcg_log_global_after_sync
;
906 newas
->tcg_as_listener
.commit
= tcg_commit
;
907 memory_listener_register(&newas
->tcg_as_listener
, as
);
911 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
913 /* Return the AddressSpace corresponding to the specified index */
914 return cpu
->cpu_ases
[asidx
].as
;
918 void cpu_exec_unrealizefn(CPUState
*cpu
)
920 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
922 cpu_list_remove(cpu
);
924 if (cc
->vmsd
!= NULL
) {
925 vmstate_unregister(NULL
, cc
->vmsd
, cpu
);
927 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
928 vmstate_unregister(NULL
, &vmstate_cpu_common
, cpu
);
930 #ifndef CONFIG_USER_ONLY
931 tcg_iommu_free_notifier_list(cpu
);
935 Property cpu_common_props
[] = {
936 #ifndef CONFIG_USER_ONLY
937 /* Create a memory property for softmmu CPU object,
938 * so users can wire up its memory. (This can't go in hw/core/cpu.c
939 * because that file is compiled only once for both user-mode
940 * and system builds.) The default if no link is set up is to use
941 * the system address space.
943 DEFINE_PROP_LINK("memory", CPUState
, memory
, TYPE_MEMORY_REGION
,
946 DEFINE_PROP_END_OF_LIST(),
949 void cpu_exec_initfn(CPUState
*cpu
)
954 #ifndef CONFIG_USER_ONLY
955 cpu
->thread_id
= qemu_get_thread_id();
956 cpu
->memory
= system_memory
;
957 object_ref(OBJECT(cpu
->memory
));
961 void cpu_exec_realizefn(CPUState
*cpu
, Error
**errp
)
963 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
964 static bool tcg_target_initialized
;
968 if (tcg_enabled() && !tcg_target_initialized
) {
969 tcg_target_initialized
= true;
970 cc
->tcg_initialize();
974 #ifndef CONFIG_USER_ONLY
975 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
976 vmstate_register(NULL
, cpu
->cpu_index
, &vmstate_cpu_common
, cpu
);
978 if (cc
->vmsd
!= NULL
) {
979 vmstate_register(NULL
, cpu
->cpu_index
, cc
->vmsd
, cpu
);
982 cpu
->iommu_notifiers
= g_array_new(false, true, sizeof(TCGIOMMUNotifier
*));
986 const char *parse_cpu_option(const char *cpu_option
)
990 gchar
**model_pieces
;
991 const char *cpu_type
;
993 model_pieces
= g_strsplit(cpu_option
, ",", 2);
994 if (!model_pieces
[0]) {
995 error_report("-cpu option cannot be empty");
999 oc
= cpu_class_by_name(CPU_RESOLVING_TYPE
, model_pieces
[0]);
1001 error_report("unable to find CPU model '%s'", model_pieces
[0]);
1002 g_strfreev(model_pieces
);
1006 cpu_type
= object_class_get_name(oc
);
1008 cc
->parse_features(cpu_type
, model_pieces
[1], &error_fatal
);
1009 g_strfreev(model_pieces
);
1013 #if defined(CONFIG_USER_ONLY)
1014 void tb_invalidate_phys_addr(target_ulong addr
)
1017 tb_invalidate_phys_page_range(addr
, addr
+ 1, 0);
1021 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
1023 tb_invalidate_phys_addr(pc
);
1026 void tb_invalidate_phys_addr(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
)
1028 ram_addr_t ram_addr
;
1032 if (!tcg_enabled()) {
1037 mr
= address_space_translate(as
, addr
, &addr
, &l
, false, attrs
);
1038 if (!(memory_region_is_ram(mr
)
1039 || memory_region_is_romd(mr
))) {
1043 ram_addr
= memory_region_get_ram_addr(mr
) + addr
;
1044 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1048 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
1051 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
1052 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
1054 /* Locks grabbed by tb_invalidate_phys_addr */
1055 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
1056 phys
| (pc
& ~TARGET_PAGE_MASK
), attrs
);
1061 #ifndef CONFIG_USER_ONLY
1062 /* Add a watchpoint. */
1063 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
1064 int flags
, CPUWatchpoint
**watchpoint
)
1068 /* forbid ranges which are empty or run off the end of the address space */
1069 if (len
== 0 || (addr
+ len
- 1) < addr
) {
1070 error_report("tried to set invalid watchpoint at %"
1071 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
1074 wp
= g_malloc(sizeof(*wp
));
1080 /* keep all GDB-injected watchpoints in front */
1081 if (flags
& BP_GDB
) {
1082 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
1084 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
1087 tlb_flush_page(cpu
, addr
);
1094 /* Remove a specific watchpoint. */
1095 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
1100 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1101 if (addr
== wp
->vaddr
&& len
== wp
->len
1102 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1103 cpu_watchpoint_remove_by_ref(cpu
, wp
);
1110 /* Remove a specific watchpoint by reference. */
1111 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
1113 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
1115 tlb_flush_page(cpu
, watchpoint
->vaddr
);
1120 /* Remove all matching watchpoints. */
1121 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
1123 CPUWatchpoint
*wp
, *next
;
1125 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
1126 if (wp
->flags
& mask
) {
1127 cpu_watchpoint_remove_by_ref(cpu
, wp
);
1132 /* Return true if this watchpoint address matches the specified
1133 * access (ie the address range covered by the watchpoint overlaps
1134 * partially or completely with the address range covered by the
1137 static inline bool watchpoint_address_matches(CPUWatchpoint
*wp
,
1138 vaddr addr
, vaddr len
)
1140 /* We know the lengths are non-zero, but a little caution is
1141 * required to avoid errors in the case where the range ends
1142 * exactly at the top of the address space and so addr + len
1143 * wraps round to zero.
1145 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
1146 vaddr addrend
= addr
+ len
- 1;
1148 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
1151 /* Return flags for watchpoints that match addr + prot. */
1152 int cpu_watchpoint_address_matches(CPUState
*cpu
, vaddr addr
, vaddr len
)
1157 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1158 if (watchpoint_address_matches(wp
, addr
, TARGET_PAGE_SIZE
)) {
1164 #endif /* !CONFIG_USER_ONLY */
1166 /* Add a breakpoint. */
1167 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
1168 CPUBreakpoint
**breakpoint
)
1172 bp
= g_malloc(sizeof(*bp
));
1177 /* keep all GDB-injected breakpoints in front */
1178 if (flags
& BP_GDB
) {
1179 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
1181 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
1184 breakpoint_invalidate(cpu
, pc
);
1192 /* Remove a specific breakpoint. */
1193 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
1197 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
1198 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1199 cpu_breakpoint_remove_by_ref(cpu
, bp
);
1206 /* Remove a specific breakpoint by reference. */
1207 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
1209 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
1211 breakpoint_invalidate(cpu
, breakpoint
->pc
);
1216 /* Remove all matching breakpoints. */
1217 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
1219 CPUBreakpoint
*bp
, *next
;
1221 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
1222 if (bp
->flags
& mask
) {
1223 cpu_breakpoint_remove_by_ref(cpu
, bp
);
1228 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1229 CPU loop after each instruction */
1230 void cpu_single_step(CPUState
*cpu
, int enabled
)
1232 if (cpu
->singlestep_enabled
!= enabled
) {
1233 cpu
->singlestep_enabled
= enabled
;
1234 if (kvm_enabled()) {
1235 kvm_update_guest_debug(cpu
, 0);
1237 /* must flush all the translated code to avoid inconsistencies */
1238 /* XXX: only flush what is necessary */
1244 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
1251 fprintf(stderr
, "qemu: fatal: ");
1252 vfprintf(stderr
, fmt
, ap
);
1253 fprintf(stderr
, "\n");
1254 cpu_dump_state(cpu
, stderr
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
1255 if (qemu_log_separate()) {
1257 qemu_log("qemu: fatal: ");
1258 qemu_log_vprintf(fmt
, ap2
);
1260 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
1268 #if defined(CONFIG_USER_ONLY)
1270 struct sigaction act
;
1271 sigfillset(&act
.sa_mask
);
1272 act
.sa_handler
= SIG_DFL
;
1274 sigaction(SIGABRT
, &act
, NULL
);
1280 #if !defined(CONFIG_USER_ONLY)
1281 /* Called from RCU critical section */
1282 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
1286 block
= atomic_rcu_read(&ram_list
.mru_block
);
1287 if (block
&& addr
- block
->offset
< block
->max_length
) {
1290 RAMBLOCK_FOREACH(block
) {
1291 if (addr
- block
->offset
< block
->max_length
) {
1296 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1300 /* It is safe to write mru_block outside the iothread lock. This
1305 * xxx removed from list
1309 * call_rcu(reclaim_ramblock, xxx);
1312 * atomic_rcu_set is not needed here. The block was already published
1313 * when it was placed into the list. Here we're just making an extra
1314 * copy of the pointer.
1316 ram_list
.mru_block
= block
;
1320 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
1327 assert(tcg_enabled());
1328 end
= TARGET_PAGE_ALIGN(start
+ length
);
1329 start
&= TARGET_PAGE_MASK
;
1332 block
= qemu_get_ram_block(start
);
1333 assert(block
== qemu_get_ram_block(end
- 1));
1334 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
1336 tlb_reset_dirty(cpu
, start1
, length
);
1341 /* Note: start and end must be within the same ram block. */
1342 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
1346 DirtyMemoryBlocks
*blocks
;
1347 unsigned long end
, page
;
1350 uint64_t mr_offset
, mr_size
;
1356 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
1357 page
= start
>> TARGET_PAGE_BITS
;
1361 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
1362 ramblock
= qemu_get_ram_block(start
);
1363 /* Range sanity check on the ramblock */
1364 assert(start
>= ramblock
->offset
&&
1365 start
+ length
<= ramblock
->offset
+ ramblock
->used_length
);
1367 while (page
< end
) {
1368 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
1369 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
1370 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
1372 dirty
|= bitmap_test_and_clear_atomic(blocks
->blocks
[idx
],
1377 mr_offset
= (ram_addr_t
)(page
<< TARGET_PAGE_BITS
) - ramblock
->offset
;
1378 mr_size
= (end
- page
) << TARGET_PAGE_BITS
;
1379 memory_region_clear_dirty_bitmap(ramblock
->mr
, mr_offset
, mr_size
);
1383 if (dirty
&& tcg_enabled()) {
1384 tlb_reset_dirty_range_all(start
, length
);
1390 DirtyBitmapSnapshot
*cpu_physical_memory_snapshot_and_clear_dirty
1391 (MemoryRegion
*mr
, hwaddr offset
, hwaddr length
, unsigned client
)
1393 DirtyMemoryBlocks
*blocks
;
1394 ram_addr_t start
= memory_region_get_ram_addr(mr
) + offset
;
1395 unsigned long align
= 1UL << (TARGET_PAGE_BITS
+ BITS_PER_LEVEL
);
1396 ram_addr_t first
= QEMU_ALIGN_DOWN(start
, align
);
1397 ram_addr_t last
= QEMU_ALIGN_UP(start
+ length
, align
);
1398 DirtyBitmapSnapshot
*snap
;
1399 unsigned long page
, end
, dest
;
1401 snap
= g_malloc0(sizeof(*snap
) +
1402 ((last
- first
) >> (TARGET_PAGE_BITS
+ 3)));
1403 snap
->start
= first
;
1406 page
= first
>> TARGET_PAGE_BITS
;
1407 end
= last
>> TARGET_PAGE_BITS
;
1412 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
1414 while (page
< end
) {
1415 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
1416 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
1417 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
1419 assert(QEMU_IS_ALIGNED(offset
, (1 << BITS_PER_LEVEL
)));
1420 assert(QEMU_IS_ALIGNED(num
, (1 << BITS_PER_LEVEL
)));
1421 offset
>>= BITS_PER_LEVEL
;
1423 bitmap_copy_and_clear_atomic(snap
->dirty
+ dest
,
1424 blocks
->blocks
[idx
] + offset
,
1427 dest
+= num
>> BITS_PER_LEVEL
;
1432 if (tcg_enabled()) {
1433 tlb_reset_dirty_range_all(start
, length
);
1436 memory_region_clear_dirty_bitmap(mr
, offset
, length
);
1441 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot
*snap
,
1445 unsigned long page
, end
;
1447 assert(start
>= snap
->start
);
1448 assert(start
+ length
<= snap
->end
);
1450 end
= TARGET_PAGE_ALIGN(start
+ length
- snap
->start
) >> TARGET_PAGE_BITS
;
1451 page
= (start
- snap
->start
) >> TARGET_PAGE_BITS
;
1453 while (page
< end
) {
1454 if (test_bit(page
, snap
->dirty
)) {
1462 /* Called from RCU critical section */
1463 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1464 MemoryRegionSection
*section
,
1466 hwaddr paddr
, hwaddr xlat
,
1468 target_ulong
*address
)
1472 if (memory_region_is_ram(section
->mr
)) {
1474 iotlb
= memory_region_get_ram_addr(section
->mr
) + xlat
;
1475 if (!section
->readonly
) {
1476 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1478 iotlb
|= PHYS_SECTION_ROM
;
1481 AddressSpaceDispatch
*d
;
1483 d
= flatview_to_dispatch(section
->fv
);
1484 iotlb
= section
- d
->map
.sections
;
1490 #endif /* defined(CONFIG_USER_ONLY) */
1492 #if !defined(CONFIG_USER_ONLY)
1494 static int subpage_register(subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1496 static subpage_t
*subpage_init(FlatView
*fv
, hwaddr base
);
1498 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
, bool shared
) =
1499 qemu_anon_ram_alloc
;
1502 * Set a custom physical guest memory alloator.
1503 * Accelerators with unusual needs may need this. Hopefully, we can
1504 * get rid of it eventually.
1506 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
, bool shared
))
1508 phys_mem_alloc
= alloc
;
1511 static uint16_t phys_section_add(PhysPageMap
*map
,
1512 MemoryRegionSection
*section
)
1514 /* The physical section number is ORed with a page-aligned
1515 * pointer to produce the iotlb entries. Thus it should
1516 * never overflow into the page-aligned value.
1518 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1520 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1521 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1522 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1523 map
->sections_nb_alloc
);
1525 map
->sections
[map
->sections_nb
] = *section
;
1526 memory_region_ref(section
->mr
);
1527 return map
->sections_nb
++;
1530 static void phys_section_destroy(MemoryRegion
*mr
)
1532 bool have_sub_page
= mr
->subpage
;
1534 memory_region_unref(mr
);
1536 if (have_sub_page
) {
1537 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1538 object_unref(OBJECT(&subpage
->iomem
));
1543 static void phys_sections_free(PhysPageMap
*map
)
1545 while (map
->sections_nb
> 0) {
1546 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1547 phys_section_destroy(section
->mr
);
1549 g_free(map
->sections
);
1553 static void register_subpage(FlatView
*fv
, MemoryRegionSection
*section
)
1555 AddressSpaceDispatch
*d
= flatview_to_dispatch(fv
);
1557 hwaddr base
= section
->offset_within_address_space
1559 MemoryRegionSection
*existing
= phys_page_find(d
, base
);
1560 MemoryRegionSection subsection
= {
1561 .offset_within_address_space
= base
,
1562 .size
= int128_make64(TARGET_PAGE_SIZE
),
1566 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1568 if (!(existing
->mr
->subpage
)) {
1569 subpage
= subpage_init(fv
, base
);
1571 subsection
.mr
= &subpage
->iomem
;
1572 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1573 phys_section_add(&d
->map
, &subsection
));
1575 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1577 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1578 end
= start
+ int128_get64(section
->size
) - 1;
1579 subpage_register(subpage
, start
, end
,
1580 phys_section_add(&d
->map
, section
));
1584 static void register_multipage(FlatView
*fv
,
1585 MemoryRegionSection
*section
)
1587 AddressSpaceDispatch
*d
= flatview_to_dispatch(fv
);
1588 hwaddr start_addr
= section
->offset_within_address_space
;
1589 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1590 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1594 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1598 * The range in *section* may look like this:
1602 * where s stands for subpage and P for page.
1604 void flatview_add_to_dispatch(FlatView
*fv
, MemoryRegionSection
*section
)
1606 MemoryRegionSection remain
= *section
;
1607 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1609 /* register first subpage */
1610 if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1611 uint64_t left
= TARGET_PAGE_ALIGN(remain
.offset_within_address_space
)
1612 - remain
.offset_within_address_space
;
1614 MemoryRegionSection now
= remain
;
1615 now
.size
= int128_min(int128_make64(left
), now
.size
);
1616 register_subpage(fv
, &now
);
1617 if (int128_eq(remain
.size
, now
.size
)) {
1620 remain
.size
= int128_sub(remain
.size
, now
.size
);
1621 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1622 remain
.offset_within_region
+= int128_get64(now
.size
);
1625 /* register whole pages */
1626 if (int128_ge(remain
.size
, page_size
)) {
1627 MemoryRegionSection now
= remain
;
1628 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1629 register_multipage(fv
, &now
);
1630 if (int128_eq(remain
.size
, now
.size
)) {
1633 remain
.size
= int128_sub(remain
.size
, now
.size
);
1634 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1635 remain
.offset_within_region
+= int128_get64(now
.size
);
1638 /* register last subpage */
1639 register_subpage(fv
, &remain
);
1642 void qemu_flush_coalesced_mmio_buffer(void)
1645 kvm_flush_coalesced_mmio_buffer();
1648 void qemu_mutex_lock_ramlist(void)
1650 qemu_mutex_lock(&ram_list
.mutex
);
1653 void qemu_mutex_unlock_ramlist(void)
1655 qemu_mutex_unlock(&ram_list
.mutex
);
1658 void ram_block_dump(Monitor
*mon
)
1664 monitor_printf(mon
, "%24s %8s %18s %18s %18s\n",
1665 "Block Name", "PSize", "Offset", "Used", "Total");
1666 RAMBLOCK_FOREACH(block
) {
1667 psize
= size_to_str(block
->page_size
);
1668 monitor_printf(mon
, "%24s %8s 0x%016" PRIx64
" 0x%016" PRIx64
1669 " 0x%016" PRIx64
"\n", block
->idstr
, psize
,
1670 (uint64_t)block
->offset
,
1671 (uint64_t)block
->used_length
,
1672 (uint64_t)block
->max_length
);
1680 * FIXME TOCTTOU: this iterates over memory backends' mem-path, which
1681 * may or may not name the same files / on the same filesystem now as
1682 * when we actually open and map them. Iterate over the file
1683 * descriptors instead, and use qemu_fd_getpagesize().
1685 static int find_min_backend_pagesize(Object
*obj
, void *opaque
)
1687 long *hpsize_min
= opaque
;
1689 if (object_dynamic_cast(obj
, TYPE_MEMORY_BACKEND
)) {
1690 HostMemoryBackend
*backend
= MEMORY_BACKEND(obj
);
1691 long hpsize
= host_memory_backend_pagesize(backend
);
1693 if (host_memory_backend_is_mapped(backend
) && (hpsize
< *hpsize_min
)) {
1694 *hpsize_min
= hpsize
;
1701 static int find_max_backend_pagesize(Object
*obj
, void *opaque
)
1703 long *hpsize_max
= opaque
;
1705 if (object_dynamic_cast(obj
, TYPE_MEMORY_BACKEND
)) {
1706 HostMemoryBackend
*backend
= MEMORY_BACKEND(obj
);
1707 long hpsize
= host_memory_backend_pagesize(backend
);
1709 if (host_memory_backend_is_mapped(backend
) && (hpsize
> *hpsize_max
)) {
1710 *hpsize_max
= hpsize
;
1718 * TODO: We assume right now that all mapped host memory backends are
1719 * used as RAM, however some might be used for different purposes.
1721 long qemu_minrampagesize(void)
1723 long hpsize
= LONG_MAX
;
1724 long mainrampagesize
;
1725 Object
*memdev_root
;
1726 MachineState
*ms
= MACHINE(qdev_get_machine());
1728 mainrampagesize
= qemu_mempath_getpagesize(mem_path
);
1730 /* it's possible we have memory-backend objects with
1731 * hugepage-backed RAM. these may get mapped into system
1732 * address space via -numa parameters or memory hotplug
1733 * hooks. we want to take these into account, but we
1734 * also want to make sure these supported hugepage
1735 * sizes are applicable across the entire range of memory
1736 * we may boot from, so we take the min across all
1737 * backends, and assume normal pages in cases where a
1738 * backend isn't backed by hugepages.
1740 memdev_root
= object_resolve_path("/objects", NULL
);
1742 object_child_foreach(memdev_root
, find_min_backend_pagesize
, &hpsize
);
1744 if (hpsize
== LONG_MAX
) {
1745 /* No additional memory regions found ==> Report main RAM page size */
1746 return mainrampagesize
;
1749 /* If NUMA is disabled or the NUMA nodes are not backed with a
1750 * memory-backend, then there is at least one node using "normal" RAM,
1751 * so if its page size is smaller we have got to report that size instead.
1753 if (hpsize
> mainrampagesize
&&
1754 (ms
->numa_state
== NULL
||
1755 ms
->numa_state
->num_nodes
== 0 ||
1756 ms
->numa_state
->nodes
[0].node_memdev
== NULL
)) {
1759 error_report("Huge page support disabled (n/a for main memory).");
1762 return mainrampagesize
;
1768 long qemu_maxrampagesize(void)
1770 long pagesize
= qemu_mempath_getpagesize(mem_path
);
1771 Object
*memdev_root
= object_resolve_path("/objects", NULL
);
1774 object_child_foreach(memdev_root
, find_max_backend_pagesize
,
1780 long qemu_minrampagesize(void)
1782 return getpagesize();
1784 long qemu_maxrampagesize(void)
1786 return getpagesize();
1791 static int64_t get_file_size(int fd
)
1793 int64_t size
= lseek(fd
, 0, SEEK_END
);
1800 static int file_ram_open(const char *path
,
1801 const char *region_name
,
1806 char *sanitized_name
;
1812 fd
= open(path
, O_RDWR
);
1814 /* @path names an existing file, use it */
1817 if (errno
== ENOENT
) {
1818 /* @path names a file that doesn't exist, create it */
1819 fd
= open(path
, O_RDWR
| O_CREAT
| O_EXCL
, 0644);
1824 } else if (errno
== EISDIR
) {
1825 /* @path names a directory, create a file there */
1826 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1827 sanitized_name
= g_strdup(region_name
);
1828 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1834 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1836 g_free(sanitized_name
);
1838 fd
= mkstemp(filename
);
1846 if (errno
!= EEXIST
&& errno
!= EINTR
) {
1847 error_setg_errno(errp
, errno
,
1848 "can't open backing store %s for guest RAM",
1853 * Try again on EINTR and EEXIST. The latter happens when
1854 * something else creates the file between our two open().
1861 static void *file_ram_alloc(RAMBlock
*block
,
1867 MachineState
*ms
= MACHINE(qdev_get_machine());
1870 block
->page_size
= qemu_fd_getpagesize(fd
);
1871 if (block
->mr
->align
% block
->page_size
) {
1872 error_setg(errp
, "alignment 0x%" PRIx64
1873 " must be multiples of page size 0x%zx",
1874 block
->mr
->align
, block
->page_size
);
1876 } else if (block
->mr
->align
&& !is_power_of_2(block
->mr
->align
)) {
1877 error_setg(errp
, "alignment 0x%" PRIx64
1878 " must be a power of two", block
->mr
->align
);
1881 block
->mr
->align
= MAX(block
->page_size
, block
->mr
->align
);
1882 #if defined(__s390x__)
1883 if (kvm_enabled()) {
1884 block
->mr
->align
= MAX(block
->mr
->align
, QEMU_VMALLOC_ALIGN
);
1888 if (memory
< block
->page_size
) {
1889 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1890 "or larger than page size 0x%zx",
1891 memory
, block
->page_size
);
1895 memory
= ROUND_UP(memory
, block
->page_size
);
1898 * ftruncate is not supported by hugetlbfs in older
1899 * hosts, so don't bother bailing out on errors.
1900 * If anything goes wrong with it under other filesystems,
1903 * Do not truncate the non-empty backend file to avoid corrupting
1904 * the existing data in the file. Disabling shrinking is not
1905 * enough. For example, the current vNVDIMM implementation stores
1906 * the guest NVDIMM labels at the end of the backend file. If the
1907 * backend file is later extended, QEMU will not be able to find
1908 * those labels. Therefore, extending the non-empty backend file
1909 * is disabled as well.
1911 if (truncate
&& ftruncate(fd
, memory
)) {
1912 perror("ftruncate");
1915 area
= qemu_ram_mmap(fd
, memory
, block
->mr
->align
,
1916 block
->flags
& RAM_SHARED
, block
->flags
& RAM_PMEM
);
1917 if (area
== MAP_FAILED
) {
1918 error_setg_errno(errp
, errno
,
1919 "unable to map backing store for guest RAM");
1924 os_mem_prealloc(fd
, area
, memory
, ms
->smp
.cpus
, errp
);
1925 if (errp
&& *errp
) {
1926 qemu_ram_munmap(fd
, area
, memory
);
1936 /* Allocate space within the ram_addr_t space that governs the
1938 * Called with the ramlist lock held.
1940 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1942 RAMBlock
*block
, *next_block
;
1943 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1945 assert(size
!= 0); /* it would hand out same offset multiple times */
1947 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1951 RAMBLOCK_FOREACH(block
) {
1952 ram_addr_t candidate
, next
= RAM_ADDR_MAX
;
1954 /* Align blocks to start on a 'long' in the bitmap
1955 * which makes the bitmap sync'ing take the fast path.
1957 candidate
= block
->offset
+ block
->max_length
;
1958 candidate
= ROUND_UP(candidate
, BITS_PER_LONG
<< TARGET_PAGE_BITS
);
1960 /* Search for the closest following block
1963 RAMBLOCK_FOREACH(next_block
) {
1964 if (next_block
->offset
>= candidate
) {
1965 next
= MIN(next
, next_block
->offset
);
1969 /* If it fits remember our place and remember the size
1970 * of gap, but keep going so that we might find a smaller
1971 * gap to fill so avoiding fragmentation.
1973 if (next
- candidate
>= size
&& next
- candidate
< mingap
) {
1975 mingap
= next
- candidate
;
1978 trace_find_ram_offset_loop(size
, candidate
, offset
, next
, mingap
);
1981 if (offset
== RAM_ADDR_MAX
) {
1982 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1987 trace_find_ram_offset(size
, offset
);
1992 static unsigned long last_ram_page(void)
1995 ram_addr_t last
= 0;
1998 RAMBLOCK_FOREACH(block
) {
1999 last
= MAX(last
, block
->offset
+ block
->max_length
);
2002 return last
>> TARGET_PAGE_BITS
;
2005 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
2009 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2010 if (!machine_dump_guest_core(current_machine
)) {
2011 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
2013 perror("qemu_madvise");
2014 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
2015 "but dump_guest_core=off specified\n");
2020 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
2025 void *qemu_ram_get_host_addr(RAMBlock
*rb
)
2030 ram_addr_t
qemu_ram_get_offset(RAMBlock
*rb
)
2035 ram_addr_t
qemu_ram_get_used_length(RAMBlock
*rb
)
2037 return rb
->used_length
;
2040 bool qemu_ram_is_shared(RAMBlock
*rb
)
2042 return rb
->flags
& RAM_SHARED
;
2045 /* Note: Only set at the start of postcopy */
2046 bool qemu_ram_is_uf_zeroable(RAMBlock
*rb
)
2048 return rb
->flags
& RAM_UF_ZEROPAGE
;
2051 void qemu_ram_set_uf_zeroable(RAMBlock
*rb
)
2053 rb
->flags
|= RAM_UF_ZEROPAGE
;
2056 bool qemu_ram_is_migratable(RAMBlock
*rb
)
2058 return rb
->flags
& RAM_MIGRATABLE
;
2061 void qemu_ram_set_migratable(RAMBlock
*rb
)
2063 rb
->flags
|= RAM_MIGRATABLE
;
2066 void qemu_ram_unset_migratable(RAMBlock
*rb
)
2068 rb
->flags
&= ~RAM_MIGRATABLE
;
2071 /* Called with iothread lock held. */
2072 void qemu_ram_set_idstr(RAMBlock
*new_block
, const char *name
, DeviceState
*dev
)
2077 assert(!new_block
->idstr
[0]);
2080 char *id
= qdev_get_dev_path(dev
);
2082 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2086 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2089 RAMBLOCK_FOREACH(block
) {
2090 if (block
!= new_block
&&
2091 !strcmp(block
->idstr
, new_block
->idstr
)) {
2092 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2100 /* Called with iothread lock held. */
2101 void qemu_ram_unset_idstr(RAMBlock
*block
)
2103 /* FIXME: arch_init.c assumes that this is not called throughout
2104 * migration. Ignore the problem since hot-unplug during migration
2105 * does not work anyway.
2108 memset(block
->idstr
, 0, sizeof(block
->idstr
));
2112 size_t qemu_ram_pagesize(RAMBlock
*rb
)
2114 return rb
->page_size
;
2117 /* Returns the largest size of page in use */
2118 size_t qemu_ram_pagesize_largest(void)
2123 RAMBLOCK_FOREACH(block
) {
2124 largest
= MAX(largest
, qemu_ram_pagesize(block
));
2130 static int memory_try_enable_merging(void *addr
, size_t len
)
2132 if (!machine_mem_merge(current_machine
)) {
2133 /* disabled by the user */
2137 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
2140 /* Only legal before guest might have detected the memory size: e.g. on
2141 * incoming migration, or right after reset.
2143 * As memory core doesn't know how is memory accessed, it is up to
2144 * resize callback to update device state and/or add assertions to detect
2145 * misuse, if necessary.
2147 int qemu_ram_resize(RAMBlock
*block
, ram_addr_t newsize
, Error
**errp
)
2151 newsize
= HOST_PAGE_ALIGN(newsize
);
2153 if (block
->used_length
== newsize
) {
2157 if (!(block
->flags
& RAM_RESIZEABLE
)) {
2158 error_setg_errno(errp
, EINVAL
,
2159 "Length mismatch: %s: 0x" RAM_ADDR_FMT
2160 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
2161 newsize
, block
->used_length
);
2165 if (block
->max_length
< newsize
) {
2166 error_setg_errno(errp
, EINVAL
,
2167 "Length too large: %s: 0x" RAM_ADDR_FMT
2168 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
2169 newsize
, block
->max_length
);
2173 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
2174 block
->used_length
= newsize
;
2175 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
2177 memory_region_set_size(block
->mr
, newsize
);
2178 if (block
->resized
) {
2179 block
->resized(block
->idstr
, newsize
, block
->host
);
2184 /* Called with ram_list.mutex held */
2185 static void dirty_memory_extend(ram_addr_t old_ram_size
,
2186 ram_addr_t new_ram_size
)
2188 ram_addr_t old_num_blocks
= DIV_ROUND_UP(old_ram_size
,
2189 DIRTY_MEMORY_BLOCK_SIZE
);
2190 ram_addr_t new_num_blocks
= DIV_ROUND_UP(new_ram_size
,
2191 DIRTY_MEMORY_BLOCK_SIZE
);
2194 /* Only need to extend if block count increased */
2195 if (new_num_blocks
<= old_num_blocks
) {
2199 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
2200 DirtyMemoryBlocks
*old_blocks
;
2201 DirtyMemoryBlocks
*new_blocks
;
2204 old_blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
2205 new_blocks
= g_malloc(sizeof(*new_blocks
) +
2206 sizeof(new_blocks
->blocks
[0]) * new_num_blocks
);
2208 if (old_num_blocks
) {
2209 memcpy(new_blocks
->blocks
, old_blocks
->blocks
,
2210 old_num_blocks
* sizeof(old_blocks
->blocks
[0]));
2213 for (j
= old_num_blocks
; j
< new_num_blocks
; j
++) {
2214 new_blocks
->blocks
[j
] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE
);
2217 atomic_rcu_set(&ram_list
.dirty_memory
[i
], new_blocks
);
2220 g_free_rcu(old_blocks
, rcu
);
2225 static void ram_block_add(RAMBlock
*new_block
, Error
**errp
, bool shared
)
2228 RAMBlock
*last_block
= NULL
;
2229 ram_addr_t old_ram_size
, new_ram_size
;
2232 old_ram_size
= last_ram_page();
2234 qemu_mutex_lock_ramlist();
2235 new_block
->offset
= find_ram_offset(new_block
->max_length
);
2237 if (!new_block
->host
) {
2238 if (xen_enabled()) {
2239 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
2240 new_block
->mr
, &err
);
2242 error_propagate(errp
, err
);
2243 qemu_mutex_unlock_ramlist();
2247 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
2248 &new_block
->mr
->align
, shared
);
2249 if (!new_block
->host
) {
2250 error_setg_errno(errp
, errno
,
2251 "cannot set up guest memory '%s'",
2252 memory_region_name(new_block
->mr
));
2253 qemu_mutex_unlock_ramlist();
2256 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
2260 new_ram_size
= MAX(old_ram_size
,
2261 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
2262 if (new_ram_size
> old_ram_size
) {
2263 dirty_memory_extend(old_ram_size
, new_ram_size
);
2265 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
2266 * QLIST (which has an RCU-friendly variant) does not have insertion at
2267 * tail, so save the last element in last_block.
2269 RAMBLOCK_FOREACH(block
) {
2271 if (block
->max_length
< new_block
->max_length
) {
2276 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
2277 } else if (last_block
) {
2278 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
2279 } else { /* list is empty */
2280 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
2282 ram_list
.mru_block
= NULL
;
2284 /* Write list before version */
2287 qemu_mutex_unlock_ramlist();
2289 cpu_physical_memory_set_dirty_range(new_block
->offset
,
2290 new_block
->used_length
,
2293 if (new_block
->host
) {
2294 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
2295 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
2296 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
2297 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
2298 ram_block_notify_add(new_block
->host
, new_block
->max_length
);
2303 RAMBlock
*qemu_ram_alloc_from_fd(ram_addr_t size
, MemoryRegion
*mr
,
2304 uint32_t ram_flags
, int fd
,
2307 RAMBlock
*new_block
;
2308 Error
*local_err
= NULL
;
2311 /* Just support these ram flags by now. */
2312 assert((ram_flags
& ~(RAM_SHARED
| RAM_PMEM
)) == 0);
2314 if (xen_enabled()) {
2315 error_setg(errp
, "-mem-path not supported with Xen");
2319 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2321 "host lacks kvm mmu notifiers, -mem-path unsupported");
2325 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
2327 * file_ram_alloc() needs to allocate just like
2328 * phys_mem_alloc, but we haven't bothered to provide
2332 "-mem-path not supported with this accelerator");
2336 size
= HOST_PAGE_ALIGN(size
);
2337 file_size
= get_file_size(fd
);
2338 if (file_size
> 0 && file_size
< size
) {
2339 error_setg(errp
, "backing store %s size 0x%" PRIx64
2340 " does not match 'size' option 0x" RAM_ADDR_FMT
,
2341 mem_path
, file_size
, size
);
2345 new_block
= g_malloc0(sizeof(*new_block
));
2347 new_block
->used_length
= size
;
2348 new_block
->max_length
= size
;
2349 new_block
->flags
= ram_flags
;
2350 new_block
->host
= file_ram_alloc(new_block
, size
, fd
, !file_size
, errp
);
2351 if (!new_block
->host
) {
2356 ram_block_add(new_block
, &local_err
, ram_flags
& RAM_SHARED
);
2359 error_propagate(errp
, local_err
);
2367 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
2368 uint32_t ram_flags
, const char *mem_path
,
2375 fd
= file_ram_open(mem_path
, memory_region_name(mr
), &created
, errp
);
2380 block
= qemu_ram_alloc_from_fd(size
, mr
, ram_flags
, fd
, errp
);
2394 RAMBlock
*qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
2395 void (*resized
)(const char*,
2398 void *host
, bool resizeable
, bool share
,
2399 MemoryRegion
*mr
, Error
**errp
)
2401 RAMBlock
*new_block
;
2402 Error
*local_err
= NULL
;
2404 size
= HOST_PAGE_ALIGN(size
);
2405 max_size
= HOST_PAGE_ALIGN(max_size
);
2406 new_block
= g_malloc0(sizeof(*new_block
));
2408 new_block
->resized
= resized
;
2409 new_block
->used_length
= size
;
2410 new_block
->max_length
= max_size
;
2411 assert(max_size
>= size
);
2413 new_block
->page_size
= getpagesize();
2414 new_block
->host
= host
;
2416 new_block
->flags
|= RAM_PREALLOC
;
2419 new_block
->flags
|= RAM_RESIZEABLE
;
2421 ram_block_add(new_block
, &local_err
, share
);
2424 error_propagate(errp
, local_err
);
2430 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
2431 MemoryRegion
*mr
, Error
**errp
)
2433 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false,
2437 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, bool share
,
2438 MemoryRegion
*mr
, Error
**errp
)
2440 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false,
2444 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
2445 void (*resized
)(const char*,
2448 MemoryRegion
*mr
, Error
**errp
)
2450 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true,
2454 static void reclaim_ramblock(RAMBlock
*block
)
2456 if (block
->flags
& RAM_PREALLOC
) {
2458 } else if (xen_enabled()) {
2459 xen_invalidate_map_cache_entry(block
->host
);
2461 } else if (block
->fd
>= 0) {
2462 qemu_ram_munmap(block
->fd
, block
->host
, block
->max_length
);
2466 qemu_anon_ram_free(block
->host
, block
->max_length
);
2471 void qemu_ram_free(RAMBlock
*block
)
2478 ram_block_notify_remove(block
->host
, block
->max_length
);
2481 qemu_mutex_lock_ramlist();
2482 QLIST_REMOVE_RCU(block
, next
);
2483 ram_list
.mru_block
= NULL
;
2484 /* Write list before version */
2487 call_rcu(block
, reclaim_ramblock
, rcu
);
2488 qemu_mutex_unlock_ramlist();
2492 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2499 RAMBLOCK_FOREACH(block
) {
2500 offset
= addr
- block
->offset
;
2501 if (offset
< block
->max_length
) {
2502 vaddr
= ramblock_ptr(block
, offset
);
2503 if (block
->flags
& RAM_PREALLOC
) {
2505 } else if (xen_enabled()) {
2509 if (block
->fd
>= 0) {
2510 flags
|= (block
->flags
& RAM_SHARED
?
2511 MAP_SHARED
: MAP_PRIVATE
);
2512 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2513 flags
, block
->fd
, offset
);
2516 * Remap needs to match alloc. Accelerators that
2517 * set phys_mem_alloc never remap. If they did,
2518 * we'd need a remap hook here.
2520 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
2522 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2523 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2526 if (area
!= vaddr
) {
2527 error_report("Could not remap addr: "
2528 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"",
2532 memory_try_enable_merging(vaddr
, length
);
2533 qemu_ram_setup_dump(vaddr
, length
);
2538 #endif /* !_WIN32 */
2540 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2541 * This should not be used for general purpose DMA. Use address_space_map
2542 * or address_space_rw instead. For local memory (e.g. video ram) that the
2543 * device owns, use memory_region_get_ram_ptr.
2545 * Called within RCU critical section.
2547 void *qemu_map_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
)
2549 RAMBlock
*block
= ram_block
;
2551 if (block
== NULL
) {
2552 block
= qemu_get_ram_block(addr
);
2553 addr
-= block
->offset
;
2556 if (xen_enabled() && block
->host
== NULL
) {
2557 /* We need to check if the requested address is in the RAM
2558 * because we don't want to map the entire memory in QEMU.
2559 * In that case just map until the end of the page.
2561 if (block
->offset
== 0) {
2562 return xen_map_cache(addr
, 0, 0, false);
2565 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1, false);
2567 return ramblock_ptr(block
, addr
);
2570 /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
2571 * but takes a size argument.
2573 * Called within RCU critical section.
2575 static void *qemu_ram_ptr_length(RAMBlock
*ram_block
, ram_addr_t addr
,
2576 hwaddr
*size
, bool lock
)
2578 RAMBlock
*block
= ram_block
;
2583 if (block
== NULL
) {
2584 block
= qemu_get_ram_block(addr
);
2585 addr
-= block
->offset
;
2587 *size
= MIN(*size
, block
->max_length
- addr
);
2589 if (xen_enabled() && block
->host
== NULL
) {
2590 /* We need to check if the requested address is in the RAM
2591 * because we don't want to map the entire memory in QEMU.
2592 * In that case just map the requested area.
2594 if (block
->offset
== 0) {
2595 return xen_map_cache(addr
, *size
, lock
, lock
);
2598 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1, lock
);
2601 return ramblock_ptr(block
, addr
);
2604 /* Return the offset of a hostpointer within a ramblock */
2605 ram_addr_t
qemu_ram_block_host_offset(RAMBlock
*rb
, void *host
)
2607 ram_addr_t res
= (uint8_t *)host
- (uint8_t *)rb
->host
;
2608 assert((uintptr_t)host
>= (uintptr_t)rb
->host
);
2609 assert(res
< rb
->max_length
);
2615 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
2618 * ptr: Host pointer to look up
2619 * round_offset: If true round the result offset down to a page boundary
2620 * *ram_addr: set to result ram_addr
2621 * *offset: set to result offset within the RAMBlock
2623 * Returns: RAMBlock (or NULL if not found)
2625 * By the time this function returns, the returned pointer is not protected
2626 * by RCU anymore. If the caller is not within an RCU critical section and
2627 * does not hold the iothread lock, it must have other means of protecting the
2628 * pointer, such as a reference to the region that includes the incoming
2631 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
2635 uint8_t *host
= ptr
;
2637 if (xen_enabled()) {
2638 ram_addr_t ram_addr
;
2640 ram_addr
= xen_ram_addr_from_mapcache(ptr
);
2641 block
= qemu_get_ram_block(ram_addr
);
2643 *offset
= ram_addr
- block
->offset
;
2650 block
= atomic_rcu_read(&ram_list
.mru_block
);
2651 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
2655 RAMBLOCK_FOREACH(block
) {
2656 /* This case append when the block is not mapped. */
2657 if (block
->host
== NULL
) {
2660 if (host
- block
->host
< block
->max_length
) {
2669 *offset
= (host
- block
->host
);
2671 *offset
&= TARGET_PAGE_MASK
;
2678 * Finds the named RAMBlock
2680 * name: The name of RAMBlock to find
2682 * Returns: RAMBlock (or NULL if not found)
2684 RAMBlock
*qemu_ram_block_by_name(const char *name
)
2688 RAMBLOCK_FOREACH(block
) {
2689 if (!strcmp(name
, block
->idstr
)) {
2697 /* Some of the softmmu routines need to translate from a host pointer
2698 (typically a TLB entry) back to a ram offset. */
2699 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2704 block
= qemu_ram_block_from_host(ptr
, false, &offset
);
2706 return RAM_ADDR_INVALID
;
2709 return block
->offset
+ offset
;
2712 /* Called within RCU critical section. */
2713 void memory_notdirty_write_prepare(NotDirtyInfo
*ndi
,
2716 ram_addr_t ram_addr
,
2720 ndi
->ram_addr
= ram_addr
;
2721 ndi
->mem_vaddr
= mem_vaddr
;
2725 assert(tcg_enabled());
2726 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
2727 ndi
->pages
= page_collection_lock(ram_addr
, ram_addr
+ size
);
2728 tb_invalidate_phys_page_fast(ndi
->pages
, ram_addr
, size
);
2732 /* Called within RCU critical section. */
2733 void memory_notdirty_write_complete(NotDirtyInfo
*ndi
)
2736 assert(tcg_enabled());
2737 page_collection_unlock(ndi
->pages
);
2741 /* Set both VGA and migration bits for simplicity and to remove
2742 * the notdirty callback faster.
2744 cpu_physical_memory_set_dirty_range(ndi
->ram_addr
, ndi
->size
,
2745 DIRTY_CLIENTS_NOCODE
);
2746 /* we remove the notdirty callback only if the code has been
2748 if (!cpu_physical_memory_is_clean(ndi
->ram_addr
)) {
2749 tlb_set_dirty(ndi
->cpu
, ndi
->mem_vaddr
);
2753 /* Called within RCU critical section. */
2754 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
2755 uint64_t val
, unsigned size
)
2759 memory_notdirty_write_prepare(&ndi
, current_cpu
, current_cpu
->mem_io_vaddr
,
2762 stn_p(qemu_map_ram_ptr(NULL
, ram_addr
), size
, val
);
2763 memory_notdirty_write_complete(&ndi
);
2766 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2767 unsigned size
, bool is_write
,
2773 static const MemoryRegionOps notdirty_mem_ops
= {
2774 .write
= notdirty_mem_write
,
2775 .valid
.accepts
= notdirty_mem_accepts
,
2776 .endianness
= DEVICE_NATIVE_ENDIAN
,
2778 .min_access_size
= 1,
2779 .max_access_size
= 8,
2783 .min_access_size
= 1,
2784 .max_access_size
= 8,
2789 /* Generate a debug exception if a watchpoint has been hit. */
2790 void cpu_check_watchpoint(CPUState
*cpu
, vaddr addr
, vaddr len
,
2791 MemTxAttrs attrs
, int flags
, uintptr_t ra
)
2793 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
2796 assert(tcg_enabled());
2797 if (cpu
->watchpoint_hit
) {
2799 * We re-entered the check after replacing the TB.
2800 * Now raise the debug interrupt so that it will
2801 * trigger after the current instruction.
2803 qemu_mutex_lock_iothread();
2804 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2805 qemu_mutex_unlock_iothread();
2809 addr
= cc
->adjust_watchpoint_address(cpu
, addr
, len
);
2810 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2811 if (watchpoint_address_matches(wp
, addr
, len
)
2812 && (wp
->flags
& flags
)) {
2813 if (flags
== BP_MEM_READ
) {
2814 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2816 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2818 wp
->hitaddr
= MAX(addr
, wp
->vaddr
);
2819 wp
->hitattrs
= attrs
;
2820 if (!cpu
->watchpoint_hit
) {
2821 if (wp
->flags
& BP_CPU
&&
2822 !cc
->debug_check_watchpoint(cpu
, wp
)) {
2823 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2826 cpu
->watchpoint_hit
= wp
;
2829 tb_check_watchpoint(cpu
);
2830 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2831 cpu
->exception_index
= EXCP_DEBUG
;
2833 cpu_loop_exit_restore(cpu
, ra
);
2835 /* Force execution of one insn next time. */
2836 cpu
->cflags_next_tb
= 1 | curr_cflags();
2839 cpu_restore_state(cpu
, ra
, true);
2841 cpu_loop_exit_noexc(cpu
);
2845 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2850 static MemTxResult
flatview_read(FlatView
*fv
, hwaddr addr
,
2851 MemTxAttrs attrs
, uint8_t *buf
, hwaddr len
);
2852 static MemTxResult
flatview_write(FlatView
*fv
, hwaddr addr
, MemTxAttrs attrs
,
2853 const uint8_t *buf
, hwaddr len
);
2854 static bool flatview_access_valid(FlatView
*fv
, hwaddr addr
, hwaddr len
,
2855 bool is_write
, MemTxAttrs attrs
);
2857 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2858 unsigned len
, MemTxAttrs attrs
)
2860 subpage_t
*subpage
= opaque
;
2864 #if defined(DEBUG_SUBPAGE)
2865 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2866 subpage
, len
, addr
);
2868 res
= flatview_read(subpage
->fv
, addr
+ subpage
->base
, attrs
, buf
, len
);
2872 *data
= ldn_p(buf
, len
);
2876 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2877 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2879 subpage_t
*subpage
= opaque
;
2882 #if defined(DEBUG_SUBPAGE)
2883 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2884 " value %"PRIx64
"\n",
2885 __func__
, subpage
, len
, addr
, value
);
2887 stn_p(buf
, len
, value
);
2888 return flatview_write(subpage
->fv
, addr
+ subpage
->base
, attrs
, buf
, len
);
2891 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2892 unsigned len
, bool is_write
,
2895 subpage_t
*subpage
= opaque
;
2896 #if defined(DEBUG_SUBPAGE)
2897 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2898 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2901 return flatview_access_valid(subpage
->fv
, addr
+ subpage
->base
,
2902 len
, is_write
, attrs
);
2905 static const MemoryRegionOps subpage_ops
= {
2906 .read_with_attrs
= subpage_read
,
2907 .write_with_attrs
= subpage_write
,
2908 .impl
.min_access_size
= 1,
2909 .impl
.max_access_size
= 8,
2910 .valid
.min_access_size
= 1,
2911 .valid
.max_access_size
= 8,
2912 .valid
.accepts
= subpage_accepts
,
2913 .endianness
= DEVICE_NATIVE_ENDIAN
,
2916 static int subpage_register(subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2921 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2923 idx
= SUBPAGE_IDX(start
);
2924 eidx
= SUBPAGE_IDX(end
);
2925 #if defined(DEBUG_SUBPAGE)
2926 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2927 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2929 for (; idx
<= eidx
; idx
++) {
2930 mmio
->sub_section
[idx
] = section
;
2936 static subpage_t
*subpage_init(FlatView
*fv
, hwaddr base
)
2940 /* mmio->sub_section is set to PHYS_SECTION_UNASSIGNED with g_malloc0 */
2941 mmio
= g_malloc0(sizeof(subpage_t
) + TARGET_PAGE_SIZE
* sizeof(uint16_t));
2944 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2945 NULL
, TARGET_PAGE_SIZE
);
2946 mmio
->iomem
.subpage
= true;
2947 #if defined(DEBUG_SUBPAGE)
2948 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2949 mmio
, base
, TARGET_PAGE_SIZE
);
2955 static uint16_t dummy_section(PhysPageMap
*map
, FlatView
*fv
, MemoryRegion
*mr
)
2958 MemoryRegionSection section
= {
2961 .offset_within_address_space
= 0,
2962 .offset_within_region
= 0,
2963 .size
= int128_2_64(),
2966 return phys_section_add(map
, §ion
);
2969 static void readonly_mem_write(void *opaque
, hwaddr addr
,
2970 uint64_t val
, unsigned size
)
2972 /* Ignore any write to ROM. */
2975 static bool readonly_mem_accepts(void *opaque
, hwaddr addr
,
2976 unsigned size
, bool is_write
,
2982 /* This will only be used for writes, because reads are special cased
2983 * to directly access the underlying host ram.
2985 static const MemoryRegionOps readonly_mem_ops
= {
2986 .write
= readonly_mem_write
,
2987 .valid
.accepts
= readonly_mem_accepts
,
2988 .endianness
= DEVICE_NATIVE_ENDIAN
,
2990 .min_access_size
= 1,
2991 .max_access_size
= 8,
2995 .min_access_size
= 1,
2996 .max_access_size
= 8,
3001 MemoryRegionSection
*iotlb_to_section(CPUState
*cpu
,
3002 hwaddr index
, MemTxAttrs attrs
)
3004 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3005 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
3006 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
3007 MemoryRegionSection
*sections
= d
->map
.sections
;
3009 return §ions
[index
& ~TARGET_PAGE_MASK
];
3012 static void io_mem_init(void)
3014 memory_region_init_io(&io_mem_rom
, NULL
, &readonly_mem_ops
,
3015 NULL
, NULL
, UINT64_MAX
);
3016 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
3019 /* io_mem_notdirty calls tb_invalidate_phys_page_fast,
3020 * which can be called without the iothread mutex.
3022 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
3024 memory_region_clear_global_locking(&io_mem_notdirty
);
3027 AddressSpaceDispatch
*address_space_dispatch_new(FlatView
*fv
)
3029 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
3032 n
= dummy_section(&d
->map
, fv
, &io_mem_unassigned
);
3033 assert(n
== PHYS_SECTION_UNASSIGNED
);
3034 n
= dummy_section(&d
->map
, fv
, &io_mem_notdirty
);
3035 assert(n
== PHYS_SECTION_NOTDIRTY
);
3036 n
= dummy_section(&d
->map
, fv
, &io_mem_rom
);
3037 assert(n
== PHYS_SECTION_ROM
);
3039 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
3044 void address_space_dispatch_free(AddressSpaceDispatch
*d
)
3046 phys_sections_free(&d
->map
);
3050 static void do_nothing(CPUState
*cpu
, run_on_cpu_data d
)
3054 static void tcg_log_global_after_sync(MemoryListener
*listener
)
3056 CPUAddressSpace
*cpuas
;
3058 /* Wait for the CPU to end the current TB. This avoids the following
3062 * ---------------------- -------------------------
3063 * TLB check -> slow path
3064 * notdirty_mem_write
3068 * TLB check -> fast path
3072 * by pushing the migration thread's memory read after the vCPU thread has
3073 * written the memory.
3075 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
3076 run_on_cpu(cpuas
->cpu
, do_nothing
, RUN_ON_CPU_NULL
);
3079 static void tcg_commit(MemoryListener
*listener
)
3081 CPUAddressSpace
*cpuas
;
3082 AddressSpaceDispatch
*d
;
3084 assert(tcg_enabled());
3085 /* since each CPU stores ram addresses in its TLB cache, we must
3086 reset the modified entries */
3087 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
3088 cpu_reloading_memory_map();
3089 /* The CPU and TLB are protected by the iothread lock.
3090 * We reload the dispatch pointer now because cpu_reloading_memory_map()
3091 * may have split the RCU critical section.
3093 d
= address_space_to_dispatch(cpuas
->as
);
3094 atomic_rcu_set(&cpuas
->memory_dispatch
, d
);
3095 tlb_flush(cpuas
->cpu
);
3098 static void memory_map_init(void)
3100 system_memory
= g_malloc(sizeof(*system_memory
));
3102 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
3103 address_space_init(&address_space_memory
, system_memory
, "memory");
3105 system_io
= g_malloc(sizeof(*system_io
));
3106 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
3108 address_space_init(&address_space_io
, system_io
, "I/O");
3111 MemoryRegion
*get_system_memory(void)
3113 return system_memory
;
3116 MemoryRegion
*get_system_io(void)
3121 #endif /* !defined(CONFIG_USER_ONLY) */
3123 /* physical memory access (slow version, mainly for debug) */
3124 #if defined(CONFIG_USER_ONLY)
3125 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3126 uint8_t *buf
, target_ulong len
, int is_write
)
3129 target_ulong l
, page
;
3133 page
= addr
& TARGET_PAGE_MASK
;
3134 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3137 flags
= page_get_flags(page
);
3138 if (!(flags
& PAGE_VALID
))
3141 if (!(flags
& PAGE_WRITE
))
3143 /* XXX: this code should not depend on lock_user */
3144 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3147 unlock_user(p
, addr
, l
);
3149 if (!(flags
& PAGE_READ
))
3151 /* XXX: this code should not depend on lock_user */
3152 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3155 unlock_user(p
, addr
, 0);
3166 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
3169 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3170 addr
+= memory_region_get_ram_addr(mr
);
3172 /* No early return if dirty_log_mask is or becomes 0, because
3173 * cpu_physical_memory_set_dirty_range will still call
3174 * xen_modified_memory.
3176 if (dirty_log_mask
) {
3178 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
3180 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
3181 assert(tcg_enabled());
3182 tb_invalidate_phys_range(addr
, addr
+ length
);
3183 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3185 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
3188 void memory_region_flush_rom_device(MemoryRegion
*mr
, hwaddr addr
, hwaddr size
)
3191 * In principle this function would work on other memory region types too,
3192 * but the ROM device use case is the only one where this operation is
3193 * necessary. Other memory regions should use the
3194 * address_space_read/write() APIs.
3196 assert(memory_region_is_romd(mr
));
3198 invalidate_and_set_dirty(mr
, addr
, size
);
3201 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
3203 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
3205 /* Regions are assumed to support 1-4 byte accesses unless
3206 otherwise specified. */
3207 if (access_size_max
== 0) {
3208 access_size_max
= 4;
3211 /* Bound the maximum access by the alignment of the address. */
3212 if (!mr
->ops
->impl
.unaligned
) {
3213 unsigned align_size_max
= addr
& -addr
;
3214 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
3215 access_size_max
= align_size_max
;
3219 /* Don't attempt accesses larger than the maximum. */
3220 if (l
> access_size_max
) {
3221 l
= access_size_max
;
3228 static bool prepare_mmio_access(MemoryRegion
*mr
)
3230 bool unlocked
= !qemu_mutex_iothread_locked();
3231 bool release_lock
= false;
3233 if (unlocked
&& mr
->global_locking
) {
3234 qemu_mutex_lock_iothread();
3236 release_lock
= true;
3238 if (mr
->flush_coalesced_mmio
) {
3240 qemu_mutex_lock_iothread();
3242 qemu_flush_coalesced_mmio_buffer();
3244 qemu_mutex_unlock_iothread();
3248 return release_lock
;
3251 /* Called within RCU critical section. */
3252 static MemTxResult
flatview_write_continue(FlatView
*fv
, hwaddr addr
,
3255 hwaddr len
, hwaddr addr1
,
3256 hwaddr l
, MemoryRegion
*mr
)
3260 MemTxResult result
= MEMTX_OK
;
3261 bool release_lock
= false;
3264 if (!memory_access_is_direct(mr
, true)) {
3265 release_lock
|= prepare_mmio_access(mr
);
3266 l
= memory_access_size(mr
, l
, addr1
);
3267 /* XXX: could force current_cpu to NULL to avoid
3269 val
= ldn_he_p(buf
, l
);
3270 result
|= memory_region_dispatch_write(mr
, addr1
, val
,
3271 size_memop(l
), attrs
);
3274 ptr
= qemu_ram_ptr_length(mr
->ram_block
, addr1
, &l
, false);
3275 memcpy(ptr
, buf
, l
);
3276 invalidate_and_set_dirty(mr
, addr1
, l
);
3280 qemu_mutex_unlock_iothread();
3281 release_lock
= false;
3293 mr
= flatview_translate(fv
, addr
, &addr1
, &l
, true, attrs
);
3299 /* Called from RCU critical section. */
3300 static MemTxResult
flatview_write(FlatView
*fv
, hwaddr addr
, MemTxAttrs attrs
,
3301 const uint8_t *buf
, hwaddr len
)
3306 MemTxResult result
= MEMTX_OK
;
3309 mr
= flatview_translate(fv
, addr
, &addr1
, &l
, true, attrs
);
3310 result
= flatview_write_continue(fv
, addr
, attrs
, buf
, len
,
3316 /* Called within RCU critical section. */
3317 MemTxResult
flatview_read_continue(FlatView
*fv
, hwaddr addr
,
3318 MemTxAttrs attrs
, uint8_t *buf
,
3319 hwaddr len
, hwaddr addr1
, hwaddr l
,
3324 MemTxResult result
= MEMTX_OK
;
3325 bool release_lock
= false;
3328 if (!memory_access_is_direct(mr
, false)) {
3330 release_lock
|= prepare_mmio_access(mr
);
3331 l
= memory_access_size(mr
, l
, addr1
);
3332 result
|= memory_region_dispatch_read(mr
, addr1
, &val
,
3333 size_memop(l
), attrs
);
3334 stn_he_p(buf
, l
, val
);
3337 ptr
= qemu_ram_ptr_length(mr
->ram_block
, addr1
, &l
, false);
3338 memcpy(buf
, ptr
, l
);
3342 qemu_mutex_unlock_iothread();
3343 release_lock
= false;
3355 mr
= flatview_translate(fv
, addr
, &addr1
, &l
, false, attrs
);
3361 /* Called from RCU critical section. */
3362 static MemTxResult
flatview_read(FlatView
*fv
, hwaddr addr
,
3363 MemTxAttrs attrs
, uint8_t *buf
, hwaddr len
)
3370 mr
= flatview_translate(fv
, addr
, &addr1
, &l
, false, attrs
);
3371 return flatview_read_continue(fv
, addr
, attrs
, buf
, len
,
3375 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
3376 MemTxAttrs attrs
, uint8_t *buf
, hwaddr len
)
3378 MemTxResult result
= MEMTX_OK
;
3383 fv
= address_space_to_flatview(as
);
3384 result
= flatview_read(fv
, addr
, attrs
, buf
, len
);
3391 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
,
3393 const uint8_t *buf
, hwaddr len
)
3395 MemTxResult result
= MEMTX_OK
;
3400 fv
= address_space_to_flatview(as
);
3401 result
= flatview_write(fv
, addr
, attrs
, buf
, len
);
3408 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
3409 uint8_t *buf
, hwaddr len
, bool is_write
)
3412 return address_space_write(as
, addr
, attrs
, buf
, len
);
3414 return address_space_read_full(as
, addr
, attrs
, buf
, len
);
3418 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
3419 hwaddr len
, int is_write
)
3421 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
3422 buf
, len
, is_write
);
3425 enum write_rom_type
{
3430 static inline MemTxResult
address_space_write_rom_internal(AddressSpace
*as
,
3435 enum write_rom_type type
)
3445 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true, attrs
);
3447 if (!(memory_region_is_ram(mr
) ||
3448 memory_region_is_romd(mr
))) {
3449 l
= memory_access_size(mr
, l
, addr1
);
3452 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
3455 memcpy(ptr
, buf
, l
);
3456 invalidate_and_set_dirty(mr
, addr1
, l
);
3459 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
3471 /* used for ROM loading : can write in RAM and ROM */
3472 MemTxResult
address_space_write_rom(AddressSpace
*as
, hwaddr addr
,
3474 const uint8_t *buf
, hwaddr len
)
3476 return address_space_write_rom_internal(as
, addr
, attrs
,
3477 buf
, len
, WRITE_DATA
);
3480 void cpu_flush_icache_range(hwaddr start
, hwaddr len
)
3483 * This function should do the same thing as an icache flush that was
3484 * triggered from within the guest. For TCG we are always cache coherent,
3485 * so there is no need to flush anything. For KVM / Xen we need to flush
3486 * the host's instruction cache at least.
3488 if (tcg_enabled()) {
3492 address_space_write_rom_internal(&address_space_memory
,
3493 start
, MEMTXATTRS_UNSPECIFIED
,
3494 NULL
, len
, FLUSH_CACHE
);
3505 static BounceBuffer bounce
;
3507 typedef struct MapClient
{
3509 QLIST_ENTRY(MapClient
) link
;
3512 QemuMutex map_client_list_lock
;
3513 static QLIST_HEAD(, MapClient
) map_client_list
3514 = QLIST_HEAD_INITIALIZER(map_client_list
);
3516 static void cpu_unregister_map_client_do(MapClient
*client
)
3518 QLIST_REMOVE(client
, link
);
3522 static void cpu_notify_map_clients_locked(void)
3526 while (!QLIST_EMPTY(&map_client_list
)) {
3527 client
= QLIST_FIRST(&map_client_list
);
3528 qemu_bh_schedule(client
->bh
);
3529 cpu_unregister_map_client_do(client
);
3533 void cpu_register_map_client(QEMUBH
*bh
)
3535 MapClient
*client
= g_malloc(sizeof(*client
));
3537 qemu_mutex_lock(&map_client_list_lock
);
3539 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3540 if (!atomic_read(&bounce
.in_use
)) {
3541 cpu_notify_map_clients_locked();
3543 qemu_mutex_unlock(&map_client_list_lock
);
3546 void cpu_exec_init_all(void)
3548 qemu_mutex_init(&ram_list
.mutex
);
3549 /* The data structures we set up here depend on knowing the page size,
3550 * so no more changes can be made after this point.
3551 * In an ideal world, nothing we did before we had finished the
3552 * machine setup would care about the target page size, and we could
3553 * do this much later, rather than requiring board models to state
3554 * up front what their requirements are.
3556 finalize_target_page_bits();
3559 qemu_mutex_init(&map_client_list_lock
);
3562 void cpu_unregister_map_client(QEMUBH
*bh
)
3566 qemu_mutex_lock(&map_client_list_lock
);
3567 QLIST_FOREACH(client
, &map_client_list
, link
) {
3568 if (client
->bh
== bh
) {
3569 cpu_unregister_map_client_do(client
);
3573 qemu_mutex_unlock(&map_client_list_lock
);
3576 static void cpu_notify_map_clients(void)
3578 qemu_mutex_lock(&map_client_list_lock
);
3579 cpu_notify_map_clients_locked();
3580 qemu_mutex_unlock(&map_client_list_lock
);
3583 static bool flatview_access_valid(FlatView
*fv
, hwaddr addr
, hwaddr len
,
3584 bool is_write
, MemTxAttrs attrs
)
3591 mr
= flatview_translate(fv
, addr
, &xlat
, &l
, is_write
, attrs
);
3592 if (!memory_access_is_direct(mr
, is_write
)) {
3593 l
= memory_access_size(mr
, l
, addr
);
3594 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
, attrs
)) {
3605 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
,
3606 hwaddr len
, bool is_write
,
3613 fv
= address_space_to_flatview(as
);
3614 result
= flatview_access_valid(fv
, addr
, len
, is_write
, attrs
);
3620 flatview_extend_translation(FlatView
*fv
, hwaddr addr
,
3622 MemoryRegion
*mr
, hwaddr base
, hwaddr len
,
3623 bool is_write
, MemTxAttrs attrs
)
3627 MemoryRegion
*this_mr
;
3633 if (target_len
== 0) {
3638 this_mr
= flatview_translate(fv
, addr
, &xlat
,
3639 &len
, is_write
, attrs
);
3640 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
3646 /* Map a physical memory region into a host virtual address.
3647 * May map a subset of the requested range, given by and returned in *plen.
3648 * May return NULL if resources needed to perform the mapping are exhausted.
3649 * Use only for reads OR writes - not for read-modify-write operations.
3650 * Use cpu_register_map_client() to know when retrying the map operation is
3651 * likely to succeed.
3653 void *address_space_map(AddressSpace
*as
,
3671 fv
= address_space_to_flatview(as
);
3672 mr
= flatview_translate(fv
, addr
, &xlat
, &l
, is_write
, attrs
);
3674 if (!memory_access_is_direct(mr
, is_write
)) {
3675 if (atomic_xchg(&bounce
.in_use
, true)) {
3679 /* Avoid unbounded allocations */
3680 l
= MIN(l
, TARGET_PAGE_SIZE
);
3681 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
3685 memory_region_ref(mr
);
3688 flatview_read(fv
, addr
, MEMTXATTRS_UNSPECIFIED
,
3694 return bounce
.buffer
;
3698 memory_region_ref(mr
);
3699 *plen
= flatview_extend_translation(fv
, addr
, len
, mr
, xlat
,
3700 l
, is_write
, attrs
);
3701 ptr
= qemu_ram_ptr_length(mr
->ram_block
, xlat
, plen
, true);
3707 /* Unmaps a memory region previously mapped by address_space_map().
3708 * Will also mark the memory as dirty if is_write == 1. access_len gives
3709 * the amount of memory that was actually read or written by the caller.
3711 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
3712 int is_write
, hwaddr access_len
)
3714 if (buffer
!= bounce
.buffer
) {
3718 mr
= memory_region_from_host(buffer
, &addr1
);
3721 invalidate_and_set_dirty(mr
, addr1
, access_len
);
3723 if (xen_enabled()) {
3724 xen_invalidate_map_cache_entry(buffer
);
3726 memory_region_unref(mr
);
3730 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
3731 bounce
.buffer
, access_len
);
3733 qemu_vfree(bounce
.buffer
);
3734 bounce
.buffer
= NULL
;
3735 memory_region_unref(bounce
.mr
);
3736 atomic_mb_set(&bounce
.in_use
, false);
3737 cpu_notify_map_clients();
3740 void *cpu_physical_memory_map(hwaddr addr
,
3744 return address_space_map(&address_space_memory
, addr
, plen
, is_write
,
3745 MEMTXATTRS_UNSPECIFIED
);
3748 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
3749 int is_write
, hwaddr access_len
)
3751 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3754 #define ARG1_DECL AddressSpace *as
3757 #define TRANSLATE(...) address_space_translate(as, __VA_ARGS__)
3758 #define RCU_READ_LOCK(...) rcu_read_lock()
3759 #define RCU_READ_UNLOCK(...) rcu_read_unlock()
3760 #include "memory_ldst.inc.c"
3762 int64_t address_space_cache_init(MemoryRegionCache
*cache
,
3768 AddressSpaceDispatch
*d
;
3775 cache
->fv
= address_space_get_flatview(as
);
3776 d
= flatview_to_dispatch(cache
->fv
);
3777 cache
->mrs
= *address_space_translate_internal(d
, addr
, &cache
->xlat
, &l
, true);
3780 memory_region_ref(mr
);
3781 if (memory_access_is_direct(mr
, is_write
)) {
3782 /* We don't care about the memory attributes here as we're only
3783 * doing this if we found actual RAM, which behaves the same
3784 * regardless of attributes; so UNSPECIFIED is fine.
3786 l
= flatview_extend_translation(cache
->fv
, addr
, len
, mr
,
3787 cache
->xlat
, l
, is_write
,
3788 MEMTXATTRS_UNSPECIFIED
);
3789 cache
->ptr
= qemu_ram_ptr_length(mr
->ram_block
, cache
->xlat
, &l
, true);
3795 cache
->is_write
= is_write
;
3799 void address_space_cache_invalidate(MemoryRegionCache
*cache
,
3803 assert(cache
->is_write
);
3804 if (likely(cache
->ptr
)) {
3805 invalidate_and_set_dirty(cache
->mrs
.mr
, addr
+ cache
->xlat
, access_len
);
3809 void address_space_cache_destroy(MemoryRegionCache
*cache
)
3811 if (!cache
->mrs
.mr
) {
3815 if (xen_enabled()) {
3816 xen_invalidate_map_cache_entry(cache
->ptr
);
3818 memory_region_unref(cache
->mrs
.mr
);
3819 flatview_unref(cache
->fv
);
3820 cache
->mrs
.mr
= NULL
;
3824 /* Called from RCU critical section. This function has the same
3825 * semantics as address_space_translate, but it only works on a
3826 * predefined range of a MemoryRegion that was mapped with
3827 * address_space_cache_init.
3829 static inline MemoryRegion
*address_space_translate_cached(
3830 MemoryRegionCache
*cache
, hwaddr addr
, hwaddr
*xlat
,
3831 hwaddr
*plen
, bool is_write
, MemTxAttrs attrs
)
3833 MemoryRegionSection section
;
3835 IOMMUMemoryRegion
*iommu_mr
;
3836 AddressSpace
*target_as
;
3838 assert(!cache
->ptr
);
3839 *xlat
= addr
+ cache
->xlat
;
3842 iommu_mr
= memory_region_get_iommu(mr
);
3848 section
= address_space_translate_iommu(iommu_mr
, xlat
, plen
,
3849 NULL
, is_write
, true,
3854 /* Called from RCU critical section. address_space_read_cached uses this
3855 * out of line function when the target is an MMIO or IOMMU region.
3858 address_space_read_cached_slow(MemoryRegionCache
*cache
, hwaddr addr
,
3859 void *buf
, hwaddr len
)
3865 mr
= address_space_translate_cached(cache
, addr
, &addr1
, &l
, false,
3866 MEMTXATTRS_UNSPECIFIED
);
3867 flatview_read_continue(cache
->fv
,
3868 addr
, MEMTXATTRS_UNSPECIFIED
, buf
, len
,
3872 /* Called from RCU critical section. address_space_write_cached uses this
3873 * out of line function when the target is an MMIO or IOMMU region.
3876 address_space_write_cached_slow(MemoryRegionCache
*cache
, hwaddr addr
,
3877 const void *buf
, hwaddr len
)
3883 mr
= address_space_translate_cached(cache
, addr
, &addr1
, &l
, true,
3884 MEMTXATTRS_UNSPECIFIED
);
3885 flatview_write_continue(cache
->fv
,
3886 addr
, MEMTXATTRS_UNSPECIFIED
, buf
, len
,
3890 #define ARG1_DECL MemoryRegionCache *cache
3892 #define SUFFIX _cached_slow
3893 #define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__)
3894 #define RCU_READ_LOCK() ((void)0)
3895 #define RCU_READ_UNLOCK() ((void)0)
3896 #include "memory_ldst.inc.c"
3898 /* virtual memory access for debug (includes writing to ROM) */
3899 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3900 uint8_t *buf
, target_ulong len
, int is_write
)
3903 target_ulong l
, page
;
3905 cpu_synchronize_state(cpu
);
3910 page
= addr
& TARGET_PAGE_MASK
;
3911 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3912 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3913 /* if no physical page mapped, return an error */
3914 if (phys_addr
== -1)
3916 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3919 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3921 address_space_write_rom(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3924 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3935 * Allows code that needs to deal with migration bitmaps etc to still be built
3936 * target independent.
3938 size_t qemu_target_page_size(void)
3940 return TARGET_PAGE_SIZE
;
3943 int qemu_target_page_bits(void)
3945 return TARGET_PAGE_BITS
;
3948 int qemu_target_page_bits_min(void)
3950 return TARGET_PAGE_BITS_MIN
;
3954 bool target_words_bigendian(void)
3956 #if defined(TARGET_WORDS_BIGENDIAN)
3963 #ifndef CONFIG_USER_ONLY
3964 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3971 mr
= address_space_translate(&address_space_memory
,
3972 phys_addr
, &phys_addr
, &l
, false,
3973 MEMTXATTRS_UNSPECIFIED
);
3975 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3980 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3986 RAMBLOCK_FOREACH(block
) {
3987 ret
= func(block
, opaque
);
3997 * Unmap pages of memory from start to start+length such that
3998 * they a) read as 0, b) Trigger whatever fault mechanism
3999 * the OS provides for postcopy.
4000 * The pages must be unmapped by the end of the function.
4001 * Returns: 0 on success, none-0 on failure
4004 int ram_block_discard_range(RAMBlock
*rb
, uint64_t start
, size_t length
)
4008 uint8_t *host_startaddr
= rb
->host
+ start
;
4010 if ((uintptr_t)host_startaddr
& (rb
->page_size
- 1)) {
4011 error_report("ram_block_discard_range: Unaligned start address: %p",
4016 if ((start
+ length
) <= rb
->used_length
) {
4017 bool need_madvise
, need_fallocate
;
4018 uint8_t *host_endaddr
= host_startaddr
+ length
;
4019 if ((uintptr_t)host_endaddr
& (rb
->page_size
- 1)) {
4020 error_report("ram_block_discard_range: Unaligned end address: %p",
4025 errno
= ENOTSUP
; /* If we are missing MADVISE etc */
4027 /* The logic here is messy;
4028 * madvise DONTNEED fails for hugepages
4029 * fallocate works on hugepages and shmem
4031 need_madvise
= (rb
->page_size
== qemu_host_page_size
);
4032 need_fallocate
= rb
->fd
!= -1;
4033 if (need_fallocate
) {
4034 /* For a file, this causes the area of the file to be zero'd
4035 * if read, and for hugetlbfs also causes it to be unmapped
4036 * so a userfault will trigger.
4038 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
4039 ret
= fallocate(rb
->fd
, FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
,
4043 error_report("ram_block_discard_range: Failed to fallocate "
4044 "%s:%" PRIx64
" +%zx (%d)",
4045 rb
->idstr
, start
, length
, ret
);
4050 error_report("ram_block_discard_range: fallocate not available/file"
4051 "%s:%" PRIx64
" +%zx (%d)",
4052 rb
->idstr
, start
, length
, ret
);
4057 /* For normal RAM this causes it to be unmapped,
4058 * for shared memory it causes the local mapping to disappear
4059 * and to fall back on the file contents (which we just
4060 * fallocate'd away).
4062 #if defined(CONFIG_MADVISE)
4063 ret
= madvise(host_startaddr
, length
, MADV_DONTNEED
);
4066 error_report("ram_block_discard_range: Failed to discard range "
4067 "%s:%" PRIx64
" +%zx (%d)",
4068 rb
->idstr
, start
, length
, ret
);
4073 error_report("ram_block_discard_range: MADVISE not available"
4074 "%s:%" PRIx64
" +%zx (%d)",
4075 rb
->idstr
, start
, length
, ret
);
4079 trace_ram_block_discard_range(rb
->idstr
, host_startaddr
, length
,
4080 need_madvise
, need_fallocate
, ret
);
4082 error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64
4083 "/%zx/" RAM_ADDR_FMT
")",
4084 rb
->idstr
, start
, length
, rb
->used_length
);
4091 bool ramblock_is_pmem(RAMBlock
*rb
)
4093 return rb
->flags
& RAM_PMEM
;
4098 void page_size_init(void)
4100 /* NOTE: we can always suppose that qemu_host_page_size >=
4102 if (qemu_host_page_size
== 0) {
4103 qemu_host_page_size
= qemu_real_host_page_size
;
4105 if (qemu_host_page_size
< TARGET_PAGE_SIZE
) {
4106 qemu_host_page_size
= TARGET_PAGE_SIZE
;
4108 qemu_host_page_mask
= -(intptr_t)qemu_host_page_size
;
4111 #if !defined(CONFIG_USER_ONLY)
4113 static void mtree_print_phys_entries(int start
, int end
, int skip
, int ptr
)
4115 if (start
== end
- 1) {
4116 qemu_printf("\t%3d ", start
);
4118 qemu_printf("\t%3d..%-3d ", start
, end
- 1);
4120 qemu_printf(" skip=%d ", skip
);
4121 if (ptr
== PHYS_MAP_NODE_NIL
) {
4122 qemu_printf(" ptr=NIL");
4124 qemu_printf(" ptr=#%d", ptr
);
4126 qemu_printf(" ptr=[%d]", ptr
);
4131 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
4132 int128_sub((size), int128_one())) : 0)
4134 void mtree_print_dispatch(AddressSpaceDispatch
*d
, MemoryRegion
*root
)
4138 qemu_printf(" Dispatch\n");
4139 qemu_printf(" Physical sections\n");
4141 for (i
= 0; i
< d
->map
.sections_nb
; ++i
) {
4142 MemoryRegionSection
*s
= d
->map
.sections
+ i
;
4143 const char *names
[] = { " [unassigned]", " [not dirty]",
4144 " [ROM]", " [watch]" };
4146 qemu_printf(" #%d @" TARGET_FMT_plx
".." TARGET_FMT_plx
4149 s
->offset_within_address_space
,
4150 s
->offset_within_address_space
+ MR_SIZE(s
->mr
->size
),
4151 s
->mr
->name
? s
->mr
->name
: "(noname)",
4152 i
< ARRAY_SIZE(names
) ? names
[i
] : "",
4153 s
->mr
== root
? " [ROOT]" : "",
4154 s
== d
->mru_section
? " [MRU]" : "",
4155 s
->mr
->is_iommu
? " [iommu]" : "");
4158 qemu_printf(" alias=%s", s
->mr
->alias
->name
?
4159 s
->mr
->alias
->name
: "noname");
4164 qemu_printf(" Nodes (%d bits per level, %d levels) ptr=[%d] skip=%d\n",
4165 P_L2_BITS
, P_L2_LEVELS
, d
->phys_map
.ptr
, d
->phys_map
.skip
);
4166 for (i
= 0; i
< d
->map
.nodes_nb
; ++i
) {
4169 Node
*n
= d
->map
.nodes
+ i
;
4171 qemu_printf(" [%d]\n", i
);
4173 for (j
= 0, jprev
= 0, prev
= *n
[0]; j
< ARRAY_SIZE(*n
); ++j
) {
4174 PhysPageEntry
*pe
= *n
+ j
;
4176 if (pe
->ptr
== prev
.ptr
&& pe
->skip
== prev
.skip
) {
4180 mtree_print_phys_entries(jprev
, j
, prev
.skip
, prev
.ptr
);
4186 if (jprev
!= ARRAY_SIZE(*n
)) {
4187 mtree_print_phys_entries(jprev
, j
, prev
.skip
, prev
.ptr
);