4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
24 #include "qemu/cutils.h"
26 #include "exec/exec-all.h"
28 #include "hw/qdev-core.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
31 #include "hw/xen/xen.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "qemu/error-report.h"
38 #if defined(CONFIG_USER_ONLY)
40 #else /* !CONFIG_USER_ONLY */
42 #include "exec/memory.h"
43 #include "exec/ioport.h"
44 #include "sysemu/dma.h"
45 #include "exec/address-spaces.h"
46 #include "sysemu/xen-mapcache.h"
47 #include "trace-root.h"
50 #include "exec/cpu-all.h"
51 #include "qemu/rcu_queue.h"
52 #include "qemu/main-loop.h"
53 #include "translate-all.h"
54 #include "sysemu/replay.h"
56 #include "exec/memory-internal.h"
57 #include "exec/ram_addr.h"
60 #include "migration/vmstate.h"
62 #include "qemu/range.h"
64 #include "qemu/mmap-alloc.h"
67 //#define DEBUG_SUBPAGE
69 #if !defined(CONFIG_USER_ONLY)
70 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
71 * are protected by the ramlist lock.
73 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
75 static MemoryRegion
*system_memory
;
76 static MemoryRegion
*system_io
;
78 AddressSpace address_space_io
;
79 AddressSpace address_space_memory
;
81 MemoryRegion io_mem_rom
, io_mem_notdirty
;
82 static MemoryRegion io_mem_unassigned
;
84 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
85 #define RAM_PREALLOC (1 << 0)
87 /* RAM is mmap-ed with MAP_SHARED */
88 #define RAM_SHARED (1 << 1)
90 /* Only a portion of RAM (used_length) is actually used, and migrated.
91 * This used_length size can change across reboots.
93 #define RAM_RESIZEABLE (1 << 2)
97 #ifdef TARGET_PAGE_BITS_VARY
99 bool target_page_bits_decided
;
102 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
103 /* current CPU in the current thread. It is only valid inside
105 __thread CPUState
*current_cpu
;
106 /* 0 = Do not count executed instructions.
107 1 = Precise instruction counting.
108 2 = Adaptive rate instruction counting. */
111 bool set_preferred_target_page_bits(int bits
)
113 /* The target page size is the lowest common denominator for all
114 * the CPUs in the system, so we can only make it smaller, never
115 * larger. And we can't make it smaller once we've committed to
118 #ifdef TARGET_PAGE_BITS_VARY
119 assert(bits
>= TARGET_PAGE_BITS_MIN
);
120 if (target_page_bits
== 0 || target_page_bits
> bits
) {
121 if (target_page_bits_decided
) {
124 target_page_bits
= bits
;
130 #if !defined(CONFIG_USER_ONLY)
132 static void finalize_target_page_bits(void)
134 #ifdef TARGET_PAGE_BITS_VARY
135 if (target_page_bits
== 0) {
136 target_page_bits
= TARGET_PAGE_BITS_MIN
;
138 target_page_bits_decided
= true;
142 typedef struct PhysPageEntry PhysPageEntry
;
144 struct PhysPageEntry
{
145 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
147 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
151 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
153 /* Size of the L2 (and L3, etc) page tables. */
154 #define ADDR_SPACE_BITS 64
157 #define P_L2_SIZE (1 << P_L2_BITS)
159 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
161 typedef PhysPageEntry Node
[P_L2_SIZE
];
163 typedef struct PhysPageMap
{
166 unsigned sections_nb
;
167 unsigned sections_nb_alloc
;
169 unsigned nodes_nb_alloc
;
171 MemoryRegionSection
*sections
;
174 struct AddressSpaceDispatch
{
177 MemoryRegionSection
*mru_section
;
178 /* This is a multi-level map on the physical address space.
179 * The bottom level has pointers to MemoryRegionSections.
181 PhysPageEntry phys_map
;
186 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
187 typedef struct subpage_t
{
191 uint16_t sub_section
[];
194 #define PHYS_SECTION_UNASSIGNED 0
195 #define PHYS_SECTION_NOTDIRTY 1
196 #define PHYS_SECTION_ROM 2
197 #define PHYS_SECTION_WATCH 3
199 static void io_mem_init(void);
200 static void memory_map_init(void);
201 static void tcg_commit(MemoryListener
*listener
);
203 static MemoryRegion io_mem_watch
;
206 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
207 * @cpu: the CPU whose AddressSpace this is
208 * @as: the AddressSpace itself
209 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
210 * @tcg_as_listener: listener for tracking changes to the AddressSpace
212 struct CPUAddressSpace
{
215 struct AddressSpaceDispatch
*memory_dispatch
;
216 MemoryListener tcg_as_listener
;
221 #if !defined(CONFIG_USER_ONLY)
223 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
225 static unsigned alloc_hint
= 16;
226 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
227 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, alloc_hint
);
228 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
229 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
230 alloc_hint
= map
->nodes_nb_alloc
;
234 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
241 ret
= map
->nodes_nb
++;
243 assert(ret
!= PHYS_MAP_NODE_NIL
);
244 assert(ret
!= map
->nodes_nb_alloc
);
246 e
.skip
= leaf
? 0 : 1;
247 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
248 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
249 memcpy(&p
[i
], &e
, sizeof(e
));
254 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
255 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
259 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
261 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
262 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
264 p
= map
->nodes
[lp
->ptr
];
265 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
267 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
268 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
274 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
280 static void phys_page_set(AddressSpaceDispatch
*d
,
281 hwaddr index
, hwaddr nb
,
284 /* Wildly overreserve - it doesn't matter much. */
285 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
287 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
290 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
291 * and update our entry so we can skip it and go directly to the destination.
293 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
)
295 unsigned valid_ptr
= P_L2_SIZE
;
300 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
305 for (i
= 0; i
< P_L2_SIZE
; i
++) {
306 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
313 phys_page_compact(&p
[i
], nodes
);
317 /* We can only compress if there's only one child. */
322 assert(valid_ptr
< P_L2_SIZE
);
324 /* Don't compress if it won't fit in the # of bits we have. */
325 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
329 lp
->ptr
= p
[valid_ptr
].ptr
;
330 if (!p
[valid_ptr
].skip
) {
331 /* If our only child is a leaf, make this a leaf. */
332 /* By design, we should have made this node a leaf to begin with so we
333 * should never reach here.
334 * But since it's so simple to handle this, let's do it just in case we
339 lp
->skip
+= p
[valid_ptr
].skip
;
343 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
345 if (d
->phys_map
.skip
) {
346 phys_page_compact(&d
->phys_map
, d
->map
.nodes
);
350 static inline bool section_covers_addr(const MemoryRegionSection
*section
,
353 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
354 * the section must cover the entire address space.
356 return int128_gethi(section
->size
) ||
357 range_covers_byte(section
->offset_within_address_space
,
358 int128_getlo(section
->size
), addr
);
361 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
362 Node
*nodes
, MemoryRegionSection
*sections
)
365 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
368 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
369 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
370 return §ions
[PHYS_SECTION_UNASSIGNED
];
373 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
376 if (section_covers_addr(§ions
[lp
.ptr
], addr
)) {
377 return §ions
[lp
.ptr
];
379 return §ions
[PHYS_SECTION_UNASSIGNED
];
383 bool memory_region_is_unassigned(MemoryRegion
*mr
)
385 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
386 && mr
!= &io_mem_watch
;
389 /* Called from RCU critical section */
390 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
392 bool resolve_subpage
)
394 MemoryRegionSection
*section
= atomic_read(&d
->mru_section
);
398 if (section
&& section
!= &d
->map
.sections
[PHYS_SECTION_UNASSIGNED
] &&
399 section_covers_addr(section
, addr
)) {
402 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
,
406 if (resolve_subpage
&& section
->mr
->subpage
) {
407 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
408 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
411 atomic_set(&d
->mru_section
, section
);
416 /* Called from RCU critical section */
417 static MemoryRegionSection
*
418 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
419 hwaddr
*plen
, bool resolve_subpage
)
421 MemoryRegionSection
*section
;
425 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
426 /* Compute offset within MemoryRegionSection */
427 addr
-= section
->offset_within_address_space
;
429 /* Compute offset within MemoryRegion */
430 *xlat
= addr
+ section
->offset_within_region
;
434 /* MMIO registers can be expected to perform full-width accesses based only
435 * on their address, without considering adjacent registers that could
436 * decode to completely different MemoryRegions. When such registers
437 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
438 * regions overlap wildly. For this reason we cannot clamp the accesses
441 * If the length is small (as is the case for address_space_ldl/stl),
442 * everything works fine. If the incoming length is large, however,
443 * the caller really has to do the clamping through memory_access_size.
445 if (memory_region_is_ram(mr
)) {
446 diff
= int128_sub(section
->size
, int128_make64(addr
));
447 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
452 /* Called from RCU critical section */
453 IOMMUTLBEntry
address_space_get_iotlb_entry(AddressSpace
*as
, hwaddr addr
,
456 IOMMUTLBEntry iotlb
= {0};
457 MemoryRegionSection
*section
;
461 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
462 section
= address_space_lookup_region(d
, addr
, false);
463 addr
= addr
- section
->offset_within_address_space
464 + section
->offset_within_region
;
467 if (!mr
->iommu_ops
) {
471 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
472 if (!(iotlb
.perm
& (1 << is_write
))) {
473 iotlb
.target_as
= NULL
;
477 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
478 | (addr
& iotlb
.addr_mask
));
479 as
= iotlb
.target_as
;
485 /* Called from RCU critical section */
486 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
487 hwaddr
*xlat
, hwaddr
*plen
,
491 MemoryRegionSection
*section
;
495 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
496 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
499 if (!mr
->iommu_ops
) {
503 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
504 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
505 | (addr
& iotlb
.addr_mask
));
506 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
507 if (!(iotlb
.perm
& (1 << is_write
))) {
508 mr
= &io_mem_unassigned
;
512 as
= iotlb
.target_as
;
515 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
516 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
517 *plen
= MIN(page
, *plen
);
524 /* Called from RCU critical section */
525 MemoryRegionSection
*
526 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
527 hwaddr
*xlat
, hwaddr
*plen
)
529 MemoryRegionSection
*section
;
530 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpu
->cpu_ases
[asidx
].memory_dispatch
);
532 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
534 assert(!section
->mr
->iommu_ops
);
539 #if !defined(CONFIG_USER_ONLY)
541 static int cpu_common_post_load(void *opaque
, int version_id
)
543 CPUState
*cpu
= opaque
;
545 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
546 version_id is increased. */
547 cpu
->interrupt_request
&= ~0x01;
553 static int cpu_common_pre_load(void *opaque
)
555 CPUState
*cpu
= opaque
;
557 cpu
->exception_index
= -1;
562 static bool cpu_common_exception_index_needed(void *opaque
)
564 CPUState
*cpu
= opaque
;
566 return tcg_enabled() && cpu
->exception_index
!= -1;
569 static const VMStateDescription vmstate_cpu_common_exception_index
= {
570 .name
= "cpu_common/exception_index",
572 .minimum_version_id
= 1,
573 .needed
= cpu_common_exception_index_needed
,
574 .fields
= (VMStateField
[]) {
575 VMSTATE_INT32(exception_index
, CPUState
),
576 VMSTATE_END_OF_LIST()
580 static bool cpu_common_crash_occurred_needed(void *opaque
)
582 CPUState
*cpu
= opaque
;
584 return cpu
->crash_occurred
;
587 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
588 .name
= "cpu_common/crash_occurred",
590 .minimum_version_id
= 1,
591 .needed
= cpu_common_crash_occurred_needed
,
592 .fields
= (VMStateField
[]) {
593 VMSTATE_BOOL(crash_occurred
, CPUState
),
594 VMSTATE_END_OF_LIST()
598 const VMStateDescription vmstate_cpu_common
= {
599 .name
= "cpu_common",
601 .minimum_version_id
= 1,
602 .pre_load
= cpu_common_pre_load
,
603 .post_load
= cpu_common_post_load
,
604 .fields
= (VMStateField
[]) {
605 VMSTATE_UINT32(halted
, CPUState
),
606 VMSTATE_UINT32(interrupt_request
, CPUState
),
607 VMSTATE_END_OF_LIST()
609 .subsections
= (const VMStateDescription
*[]) {
610 &vmstate_cpu_common_exception_index
,
611 &vmstate_cpu_common_crash_occurred
,
618 CPUState
*qemu_get_cpu(int index
)
623 if (cpu
->cpu_index
== index
) {
631 #if !defined(CONFIG_USER_ONLY)
632 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
634 CPUAddressSpace
*newas
;
636 /* Target code should have set num_ases before calling us */
637 assert(asidx
< cpu
->num_ases
);
640 /* address space 0 gets the convenience alias */
644 /* KVM cannot currently support multiple address spaces. */
645 assert(asidx
== 0 || !kvm_enabled());
647 if (!cpu
->cpu_ases
) {
648 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
651 newas
= &cpu
->cpu_ases
[asidx
];
655 newas
->tcg_as_listener
.commit
= tcg_commit
;
656 memory_listener_register(&newas
->tcg_as_listener
, as
);
660 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
662 /* Return the AddressSpace corresponding to the specified index */
663 return cpu
->cpu_ases
[asidx
].as
;
667 void cpu_exec_unrealizefn(CPUState
*cpu
)
669 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
671 cpu_list_remove(cpu
);
673 if (cc
->vmsd
!= NULL
) {
674 vmstate_unregister(NULL
, cc
->vmsd
, cpu
);
676 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
677 vmstate_unregister(NULL
, &vmstate_cpu_common
, cpu
);
681 void cpu_exec_initfn(CPUState
*cpu
)
686 #ifndef CONFIG_USER_ONLY
687 cpu
->thread_id
= qemu_get_thread_id();
689 /* This is a softmmu CPU object, so create a property for it
690 * so users can wire up its memory. (This can't go in qom/cpu.c
691 * because that file is compiled only once for both user-mode
692 * and system builds.) The default if no link is set up is to use
693 * the system address space.
695 object_property_add_link(OBJECT(cpu
), "memory", TYPE_MEMORY_REGION
,
696 (Object
**)&cpu
->memory
,
697 qdev_prop_allow_set_link_before_realize
,
698 OBJ_PROP_LINK_UNREF_ON_RELEASE
,
700 cpu
->memory
= system_memory
;
701 object_ref(OBJECT(cpu
->memory
));
705 void cpu_exec_realizefn(CPUState
*cpu
, Error
**errp
)
707 CPUClass
*cc ATTRIBUTE_UNUSED
= CPU_GET_CLASS(cpu
);
711 #ifndef CONFIG_USER_ONLY
712 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
713 vmstate_register(NULL
, cpu
->cpu_index
, &vmstate_cpu_common
, cpu
);
715 if (cc
->vmsd
!= NULL
) {
716 vmstate_register(NULL
, cpu
->cpu_index
, cc
->vmsd
, cpu
);
721 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
723 /* Flush the whole TB as this will not have race conditions
724 * even if we don't have proper locking yet.
725 * Ideally we would just invalidate the TBs for the
731 #if defined(CONFIG_USER_ONLY)
732 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
737 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
743 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
747 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
748 int flags
, CPUWatchpoint
**watchpoint
)
753 /* Add a watchpoint. */
754 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
755 int flags
, CPUWatchpoint
**watchpoint
)
759 /* forbid ranges which are empty or run off the end of the address space */
760 if (len
== 0 || (addr
+ len
- 1) < addr
) {
761 error_report("tried to set invalid watchpoint at %"
762 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
765 wp
= g_malloc(sizeof(*wp
));
771 /* keep all GDB-injected watchpoints in front */
772 if (flags
& BP_GDB
) {
773 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
775 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
778 tlb_flush_page(cpu
, addr
);
785 /* Remove a specific watchpoint. */
786 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
791 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
792 if (addr
== wp
->vaddr
&& len
== wp
->len
793 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
794 cpu_watchpoint_remove_by_ref(cpu
, wp
);
801 /* Remove a specific watchpoint by reference. */
802 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
804 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
806 tlb_flush_page(cpu
, watchpoint
->vaddr
);
811 /* Remove all matching watchpoints. */
812 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
814 CPUWatchpoint
*wp
, *next
;
816 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
817 if (wp
->flags
& mask
) {
818 cpu_watchpoint_remove_by_ref(cpu
, wp
);
823 /* Return true if this watchpoint address matches the specified
824 * access (ie the address range covered by the watchpoint overlaps
825 * partially or completely with the address range covered by the
828 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
832 /* We know the lengths are non-zero, but a little caution is
833 * required to avoid errors in the case where the range ends
834 * exactly at the top of the address space and so addr + len
835 * wraps round to zero.
837 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
838 vaddr addrend
= addr
+ len
- 1;
840 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
845 /* Add a breakpoint. */
846 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
847 CPUBreakpoint
**breakpoint
)
851 bp
= g_malloc(sizeof(*bp
));
856 /* keep all GDB-injected breakpoints in front */
857 if (flags
& BP_GDB
) {
858 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
860 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
863 breakpoint_invalidate(cpu
, pc
);
871 /* Remove a specific breakpoint. */
872 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
876 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
877 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
878 cpu_breakpoint_remove_by_ref(cpu
, bp
);
885 /* Remove a specific breakpoint by reference. */
886 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
888 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
890 breakpoint_invalidate(cpu
, breakpoint
->pc
);
895 /* Remove all matching breakpoints. */
896 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
898 CPUBreakpoint
*bp
, *next
;
900 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
901 if (bp
->flags
& mask
) {
902 cpu_breakpoint_remove_by_ref(cpu
, bp
);
907 /* enable or disable single step mode. EXCP_DEBUG is returned by the
908 CPU loop after each instruction */
909 void cpu_single_step(CPUState
*cpu
, int enabled
)
911 if (cpu
->singlestep_enabled
!= enabled
) {
912 cpu
->singlestep_enabled
= enabled
;
914 kvm_update_guest_debug(cpu
, 0);
916 /* must flush all the translated code to avoid inconsistencies */
917 /* XXX: only flush what is necessary */
923 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
930 fprintf(stderr
, "qemu: fatal: ");
931 vfprintf(stderr
, fmt
, ap
);
932 fprintf(stderr
, "\n");
933 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
934 if (qemu_log_separate()) {
936 qemu_log("qemu: fatal: ");
937 qemu_log_vprintf(fmt
, ap2
);
939 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
947 #if defined(CONFIG_USER_ONLY)
949 struct sigaction act
;
950 sigfillset(&act
.sa_mask
);
951 act
.sa_handler
= SIG_DFL
;
952 sigaction(SIGABRT
, &act
, NULL
);
958 #if !defined(CONFIG_USER_ONLY)
959 /* Called from RCU critical section */
960 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
964 block
= atomic_rcu_read(&ram_list
.mru_block
);
965 if (block
&& addr
- block
->offset
< block
->max_length
) {
968 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
969 if (addr
- block
->offset
< block
->max_length
) {
974 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
978 /* It is safe to write mru_block outside the iothread lock. This
983 * xxx removed from list
987 * call_rcu(reclaim_ramblock, xxx);
990 * atomic_rcu_set is not needed here. The block was already published
991 * when it was placed into the list. Here we're just making an extra
992 * copy of the pointer.
994 ram_list
.mru_block
= block
;
998 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
1005 end
= TARGET_PAGE_ALIGN(start
+ length
);
1006 start
&= TARGET_PAGE_MASK
;
1009 block
= qemu_get_ram_block(start
);
1010 assert(block
== qemu_get_ram_block(end
- 1));
1011 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
1013 tlb_reset_dirty(cpu
, start1
, length
);
1018 /* Note: start and end must be within the same ram block. */
1019 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
1023 DirtyMemoryBlocks
*blocks
;
1024 unsigned long end
, page
;
1031 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
1032 page
= start
>> TARGET_PAGE_BITS
;
1036 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
1038 while (page
< end
) {
1039 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
1040 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
1041 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
1043 dirty
|= bitmap_test_and_clear_atomic(blocks
->blocks
[idx
],
1050 if (dirty
&& tcg_enabled()) {
1051 tlb_reset_dirty_range_all(start
, length
);
1057 /* Called from RCU critical section */
1058 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1059 MemoryRegionSection
*section
,
1061 hwaddr paddr
, hwaddr xlat
,
1063 target_ulong
*address
)
1068 if (memory_region_is_ram(section
->mr
)) {
1070 iotlb
= memory_region_get_ram_addr(section
->mr
) + xlat
;
1071 if (!section
->readonly
) {
1072 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1074 iotlb
|= PHYS_SECTION_ROM
;
1077 AddressSpaceDispatch
*d
;
1079 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1080 iotlb
= section
- d
->map
.sections
;
1084 /* Make accesses to pages with watchpoints go via the
1085 watchpoint trap routines. */
1086 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1087 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1088 /* Avoid trapping reads of pages with a write breakpoint. */
1089 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1090 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1091 *address
|= TLB_MMIO
;
1099 #endif /* defined(CONFIG_USER_ONLY) */
1101 #if !defined(CONFIG_USER_ONLY)
1103 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1105 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1107 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1108 qemu_anon_ram_alloc
;
1111 * Set a custom physical guest memory alloator.
1112 * Accelerators with unusual needs may need this. Hopefully, we can
1113 * get rid of it eventually.
1115 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1117 phys_mem_alloc
= alloc
;
1120 static uint16_t phys_section_add(PhysPageMap
*map
,
1121 MemoryRegionSection
*section
)
1123 /* The physical section number is ORed with a page-aligned
1124 * pointer to produce the iotlb entries. Thus it should
1125 * never overflow into the page-aligned value.
1127 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1129 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1130 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1131 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1132 map
->sections_nb_alloc
);
1134 map
->sections
[map
->sections_nb
] = *section
;
1135 memory_region_ref(section
->mr
);
1136 return map
->sections_nb
++;
1139 static void phys_section_destroy(MemoryRegion
*mr
)
1141 bool have_sub_page
= mr
->subpage
;
1143 memory_region_unref(mr
);
1145 if (have_sub_page
) {
1146 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1147 object_unref(OBJECT(&subpage
->iomem
));
1152 static void phys_sections_free(PhysPageMap
*map
)
1154 while (map
->sections_nb
> 0) {
1155 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1156 phys_section_destroy(section
->mr
);
1158 g_free(map
->sections
);
1162 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1165 hwaddr base
= section
->offset_within_address_space
1167 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1168 d
->map
.nodes
, d
->map
.sections
);
1169 MemoryRegionSection subsection
= {
1170 .offset_within_address_space
= base
,
1171 .size
= int128_make64(TARGET_PAGE_SIZE
),
1175 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1177 if (!(existing
->mr
->subpage
)) {
1178 subpage
= subpage_init(d
->as
, base
);
1179 subsection
.address_space
= d
->as
;
1180 subsection
.mr
= &subpage
->iomem
;
1181 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1182 phys_section_add(&d
->map
, &subsection
));
1184 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1186 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1187 end
= start
+ int128_get64(section
->size
) - 1;
1188 subpage_register(subpage
, start
, end
,
1189 phys_section_add(&d
->map
, section
));
1193 static void register_multipage(AddressSpaceDispatch
*d
,
1194 MemoryRegionSection
*section
)
1196 hwaddr start_addr
= section
->offset_within_address_space
;
1197 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1198 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1202 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1205 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1207 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1208 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1209 MemoryRegionSection now
= *section
, remain
= *section
;
1210 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1212 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1213 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1214 - now
.offset_within_address_space
;
1216 now
.size
= int128_min(int128_make64(left
), now
.size
);
1217 register_subpage(d
, &now
);
1219 now
.size
= int128_zero();
1221 while (int128_ne(remain
.size
, now
.size
)) {
1222 remain
.size
= int128_sub(remain
.size
, now
.size
);
1223 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1224 remain
.offset_within_region
+= int128_get64(now
.size
);
1226 if (int128_lt(remain
.size
, page_size
)) {
1227 register_subpage(d
, &now
);
1228 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1229 now
.size
= page_size
;
1230 register_subpage(d
, &now
);
1232 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1233 register_multipage(d
, &now
);
1238 void qemu_flush_coalesced_mmio_buffer(void)
1241 kvm_flush_coalesced_mmio_buffer();
1244 void qemu_mutex_lock_ramlist(void)
1246 qemu_mutex_lock(&ram_list
.mutex
);
1249 void qemu_mutex_unlock_ramlist(void)
1251 qemu_mutex_unlock(&ram_list
.mutex
);
1255 static int64_t get_file_size(int fd
)
1257 int64_t size
= lseek(fd
, 0, SEEK_END
);
1264 static void *file_ram_alloc(RAMBlock
*block
,
1269 bool unlink_on_error
= false;
1271 char *sanitized_name
;
1273 void *area
= MAP_FAILED
;
1277 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1279 "host lacks kvm mmu notifiers, -mem-path unsupported");
1284 fd
= open(path
, O_RDWR
);
1286 /* @path names an existing file, use it */
1289 if (errno
== ENOENT
) {
1290 /* @path names a file that doesn't exist, create it */
1291 fd
= open(path
, O_RDWR
| O_CREAT
| O_EXCL
, 0644);
1293 unlink_on_error
= true;
1296 } else if (errno
== EISDIR
) {
1297 /* @path names a directory, create a file there */
1298 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1299 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1300 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1306 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1308 g_free(sanitized_name
);
1310 fd
= mkstemp(filename
);
1318 if (errno
!= EEXIST
&& errno
!= EINTR
) {
1319 error_setg_errno(errp
, errno
,
1320 "can't open backing store %s for guest RAM",
1325 * Try again on EINTR and EEXIST. The latter happens when
1326 * something else creates the file between our two open().
1330 block
->page_size
= qemu_fd_getpagesize(fd
);
1331 block
->mr
->align
= block
->page_size
;
1332 #if defined(__s390x__)
1333 if (kvm_enabled()) {
1334 block
->mr
->align
= MAX(block
->mr
->align
, QEMU_VMALLOC_ALIGN
);
1338 file_size
= get_file_size(fd
);
1340 if (memory
< block
->page_size
) {
1341 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1342 "or larger than page size 0x%zx",
1343 memory
, block
->page_size
);
1347 if (file_size
> 0 && file_size
< memory
) {
1348 error_setg(errp
, "backing store %s size 0x%" PRIx64
1349 " does not match 'size' option 0x" RAM_ADDR_FMT
,
1350 path
, file_size
, memory
);
1354 memory
= ROUND_UP(memory
, block
->page_size
);
1357 * ftruncate is not supported by hugetlbfs in older
1358 * hosts, so don't bother bailing out on errors.
1359 * If anything goes wrong with it under other filesystems,
1362 * Do not truncate the non-empty backend file to avoid corrupting
1363 * the existing data in the file. Disabling shrinking is not
1364 * enough. For example, the current vNVDIMM implementation stores
1365 * the guest NVDIMM labels at the end of the backend file. If the
1366 * backend file is later extended, QEMU will not be able to find
1367 * those labels. Therefore, extending the non-empty backend file
1368 * is disabled as well.
1370 if (!file_size
&& ftruncate(fd
, memory
)) {
1371 perror("ftruncate");
1374 area
= qemu_ram_mmap(fd
, memory
, block
->mr
->align
,
1375 block
->flags
& RAM_SHARED
);
1376 if (area
== MAP_FAILED
) {
1377 error_setg_errno(errp
, errno
,
1378 "unable to map backing store for guest RAM");
1383 os_mem_prealloc(fd
, area
, memory
, errp
);
1384 if (errp
&& *errp
) {
1393 if (area
!= MAP_FAILED
) {
1394 qemu_ram_munmap(area
, memory
);
1396 if (unlink_on_error
) {
1406 /* Called with the ramlist lock held. */
1407 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1409 RAMBlock
*block
, *next_block
;
1410 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1412 assert(size
!= 0); /* it would hand out same offset multiple times */
1414 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1418 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1419 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1421 end
= block
->offset
+ block
->max_length
;
1423 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1424 if (next_block
->offset
>= end
) {
1425 next
= MIN(next
, next_block
->offset
);
1428 if (next
- end
>= size
&& next
- end
< mingap
) {
1430 mingap
= next
- end
;
1434 if (offset
== RAM_ADDR_MAX
) {
1435 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1443 ram_addr_t
last_ram_offset(void)
1446 ram_addr_t last
= 0;
1449 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1450 last
= MAX(last
, block
->offset
+ block
->max_length
);
1456 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1460 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1461 if (!machine_dump_guest_core(current_machine
)) {
1462 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1464 perror("qemu_madvise");
1465 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1466 "but dump_guest_core=off specified\n");
1471 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1476 /* Called with iothread lock held. */
1477 void qemu_ram_set_idstr(RAMBlock
*new_block
, const char *name
, DeviceState
*dev
)
1482 assert(!new_block
->idstr
[0]);
1485 char *id
= qdev_get_dev_path(dev
);
1487 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1491 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1494 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1495 if (block
!= new_block
&&
1496 !strcmp(block
->idstr
, new_block
->idstr
)) {
1497 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1505 /* Called with iothread lock held. */
1506 void qemu_ram_unset_idstr(RAMBlock
*block
)
1508 /* FIXME: arch_init.c assumes that this is not called throughout
1509 * migration. Ignore the problem since hot-unplug during migration
1510 * does not work anyway.
1513 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1517 size_t qemu_ram_pagesize(RAMBlock
*rb
)
1519 return rb
->page_size
;
1522 static int memory_try_enable_merging(void *addr
, size_t len
)
1524 if (!machine_mem_merge(current_machine
)) {
1525 /* disabled by the user */
1529 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1532 /* Only legal before guest might have detected the memory size: e.g. on
1533 * incoming migration, or right after reset.
1535 * As memory core doesn't know how is memory accessed, it is up to
1536 * resize callback to update device state and/or add assertions to detect
1537 * misuse, if necessary.
1539 int qemu_ram_resize(RAMBlock
*block
, ram_addr_t newsize
, Error
**errp
)
1543 newsize
= HOST_PAGE_ALIGN(newsize
);
1545 if (block
->used_length
== newsize
) {
1549 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1550 error_setg_errno(errp
, EINVAL
,
1551 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1552 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1553 newsize
, block
->used_length
);
1557 if (block
->max_length
< newsize
) {
1558 error_setg_errno(errp
, EINVAL
,
1559 "Length too large: %s: 0x" RAM_ADDR_FMT
1560 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1561 newsize
, block
->max_length
);
1565 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1566 block
->used_length
= newsize
;
1567 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1569 memory_region_set_size(block
->mr
, newsize
);
1570 if (block
->resized
) {
1571 block
->resized(block
->idstr
, newsize
, block
->host
);
1576 /* Called with ram_list.mutex held */
1577 static void dirty_memory_extend(ram_addr_t old_ram_size
,
1578 ram_addr_t new_ram_size
)
1580 ram_addr_t old_num_blocks
= DIV_ROUND_UP(old_ram_size
,
1581 DIRTY_MEMORY_BLOCK_SIZE
);
1582 ram_addr_t new_num_blocks
= DIV_ROUND_UP(new_ram_size
,
1583 DIRTY_MEMORY_BLOCK_SIZE
);
1586 /* Only need to extend if block count increased */
1587 if (new_num_blocks
<= old_num_blocks
) {
1591 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1592 DirtyMemoryBlocks
*old_blocks
;
1593 DirtyMemoryBlocks
*new_blocks
;
1596 old_blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
1597 new_blocks
= g_malloc(sizeof(*new_blocks
) +
1598 sizeof(new_blocks
->blocks
[0]) * new_num_blocks
);
1600 if (old_num_blocks
) {
1601 memcpy(new_blocks
->blocks
, old_blocks
->blocks
,
1602 old_num_blocks
* sizeof(old_blocks
->blocks
[0]));
1605 for (j
= old_num_blocks
; j
< new_num_blocks
; j
++) {
1606 new_blocks
->blocks
[j
] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE
);
1609 atomic_rcu_set(&ram_list
.dirty_memory
[i
], new_blocks
);
1612 g_free_rcu(old_blocks
, rcu
);
1617 static void ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1620 RAMBlock
*last_block
= NULL
;
1621 ram_addr_t old_ram_size
, new_ram_size
;
1624 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1626 qemu_mutex_lock_ramlist();
1627 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1629 if (!new_block
->host
) {
1630 if (xen_enabled()) {
1631 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1632 new_block
->mr
, &err
);
1634 error_propagate(errp
, err
);
1635 qemu_mutex_unlock_ramlist();
1639 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1640 &new_block
->mr
->align
);
1641 if (!new_block
->host
) {
1642 error_setg_errno(errp
, errno
,
1643 "cannot set up guest memory '%s'",
1644 memory_region_name(new_block
->mr
));
1645 qemu_mutex_unlock_ramlist();
1648 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1652 new_ram_size
= MAX(old_ram_size
,
1653 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1654 if (new_ram_size
> old_ram_size
) {
1655 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1656 dirty_memory_extend(old_ram_size
, new_ram_size
);
1658 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1659 * QLIST (which has an RCU-friendly variant) does not have insertion at
1660 * tail, so save the last element in last_block.
1662 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1664 if (block
->max_length
< new_block
->max_length
) {
1669 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1670 } else if (last_block
) {
1671 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1672 } else { /* list is empty */
1673 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1675 ram_list
.mru_block
= NULL
;
1677 /* Write list before version */
1680 qemu_mutex_unlock_ramlist();
1682 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1683 new_block
->used_length
,
1686 if (new_block
->host
) {
1687 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1688 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1689 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
1690 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1691 ram_block_notify_add(new_block
->host
, new_block
->max_length
);
1696 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1697 bool share
, const char *mem_path
,
1700 RAMBlock
*new_block
;
1701 Error
*local_err
= NULL
;
1703 if (xen_enabled()) {
1704 error_setg(errp
, "-mem-path not supported with Xen");
1708 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1710 * file_ram_alloc() needs to allocate just like
1711 * phys_mem_alloc, but we haven't bothered to provide
1715 "-mem-path not supported with this accelerator");
1719 size
= HOST_PAGE_ALIGN(size
);
1720 new_block
= g_malloc0(sizeof(*new_block
));
1722 new_block
->used_length
= size
;
1723 new_block
->max_length
= size
;
1724 new_block
->flags
= share
? RAM_SHARED
: 0;
1725 new_block
->host
= file_ram_alloc(new_block
, size
,
1727 if (!new_block
->host
) {
1732 ram_block_add(new_block
, &local_err
);
1735 error_propagate(errp
, local_err
);
1743 RAMBlock
*qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1744 void (*resized
)(const char*,
1747 void *host
, bool resizeable
,
1748 MemoryRegion
*mr
, Error
**errp
)
1750 RAMBlock
*new_block
;
1751 Error
*local_err
= NULL
;
1753 size
= HOST_PAGE_ALIGN(size
);
1754 max_size
= HOST_PAGE_ALIGN(max_size
);
1755 new_block
= g_malloc0(sizeof(*new_block
));
1757 new_block
->resized
= resized
;
1758 new_block
->used_length
= size
;
1759 new_block
->max_length
= max_size
;
1760 assert(max_size
>= size
);
1762 new_block
->page_size
= getpagesize();
1763 new_block
->host
= host
;
1765 new_block
->flags
|= RAM_PREALLOC
;
1768 new_block
->flags
|= RAM_RESIZEABLE
;
1770 ram_block_add(new_block
, &local_err
);
1773 error_propagate(errp
, local_err
);
1779 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1780 MemoryRegion
*mr
, Error
**errp
)
1782 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1785 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1787 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1790 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1791 void (*resized
)(const char*,
1794 MemoryRegion
*mr
, Error
**errp
)
1796 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1799 static void reclaim_ramblock(RAMBlock
*block
)
1801 if (block
->flags
& RAM_PREALLOC
) {
1803 } else if (xen_enabled()) {
1804 xen_invalidate_map_cache_entry(block
->host
);
1806 } else if (block
->fd
>= 0) {
1807 qemu_ram_munmap(block
->host
, block
->max_length
);
1811 qemu_anon_ram_free(block
->host
, block
->max_length
);
1816 void qemu_ram_free(RAMBlock
*block
)
1823 ram_block_notify_remove(block
->host
, block
->max_length
);
1826 qemu_mutex_lock_ramlist();
1827 QLIST_REMOVE_RCU(block
, next
);
1828 ram_list
.mru_block
= NULL
;
1829 /* Write list before version */
1832 call_rcu(block
, reclaim_ramblock
, rcu
);
1833 qemu_mutex_unlock_ramlist();
1837 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1844 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1845 offset
= addr
- block
->offset
;
1846 if (offset
< block
->max_length
) {
1847 vaddr
= ramblock_ptr(block
, offset
);
1848 if (block
->flags
& RAM_PREALLOC
) {
1850 } else if (xen_enabled()) {
1854 if (block
->fd
>= 0) {
1855 flags
|= (block
->flags
& RAM_SHARED
?
1856 MAP_SHARED
: MAP_PRIVATE
);
1857 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1858 flags
, block
->fd
, offset
);
1861 * Remap needs to match alloc. Accelerators that
1862 * set phys_mem_alloc never remap. If they did,
1863 * we'd need a remap hook here.
1865 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1867 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1868 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1871 if (area
!= vaddr
) {
1872 fprintf(stderr
, "Could not remap addr: "
1873 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1877 memory_try_enable_merging(vaddr
, length
);
1878 qemu_ram_setup_dump(vaddr
, length
);
1883 #endif /* !_WIN32 */
1885 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1886 * This should not be used for general purpose DMA. Use address_space_map
1887 * or address_space_rw instead. For local memory (e.g. video ram) that the
1888 * device owns, use memory_region_get_ram_ptr.
1890 * Called within RCU critical section.
1892 void *qemu_map_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
)
1894 RAMBlock
*block
= ram_block
;
1896 if (block
== NULL
) {
1897 block
= qemu_get_ram_block(addr
);
1898 addr
-= block
->offset
;
1901 if (xen_enabled() && block
->host
== NULL
) {
1902 /* We need to check if the requested address is in the RAM
1903 * because we don't want to map the entire memory in QEMU.
1904 * In that case just map until the end of the page.
1906 if (block
->offset
== 0) {
1907 return xen_map_cache(addr
, 0, 0);
1910 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1912 return ramblock_ptr(block
, addr
);
1915 /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
1916 * but takes a size argument.
1918 * Called within RCU critical section.
1920 static void *qemu_ram_ptr_length(RAMBlock
*ram_block
, ram_addr_t addr
,
1923 RAMBlock
*block
= ram_block
;
1928 if (block
== NULL
) {
1929 block
= qemu_get_ram_block(addr
);
1930 addr
-= block
->offset
;
1932 *size
= MIN(*size
, block
->max_length
- addr
);
1934 if (xen_enabled() && block
->host
== NULL
) {
1935 /* We need to check if the requested address is in the RAM
1936 * because we don't want to map the entire memory in QEMU.
1937 * In that case just map the requested area.
1939 if (block
->offset
== 0) {
1940 return xen_map_cache(addr
, *size
, 1);
1943 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1946 return ramblock_ptr(block
, addr
);
1950 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1953 * ptr: Host pointer to look up
1954 * round_offset: If true round the result offset down to a page boundary
1955 * *ram_addr: set to result ram_addr
1956 * *offset: set to result offset within the RAMBlock
1958 * Returns: RAMBlock (or NULL if not found)
1960 * By the time this function returns, the returned pointer is not protected
1961 * by RCU anymore. If the caller is not within an RCU critical section and
1962 * does not hold the iothread lock, it must have other means of protecting the
1963 * pointer, such as a reference to the region that includes the incoming
1966 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1970 uint8_t *host
= ptr
;
1972 if (xen_enabled()) {
1973 ram_addr_t ram_addr
;
1975 ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1976 block
= qemu_get_ram_block(ram_addr
);
1978 *offset
= ram_addr
- block
->offset
;
1985 block
= atomic_rcu_read(&ram_list
.mru_block
);
1986 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1990 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1991 /* This case append when the block is not mapped. */
1992 if (block
->host
== NULL
) {
1995 if (host
- block
->host
< block
->max_length
) {
2004 *offset
= (host
- block
->host
);
2006 *offset
&= TARGET_PAGE_MASK
;
2013 * Finds the named RAMBlock
2015 * name: The name of RAMBlock to find
2017 * Returns: RAMBlock (or NULL if not found)
2019 RAMBlock
*qemu_ram_block_by_name(const char *name
)
2023 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
2024 if (!strcmp(name
, block
->idstr
)) {
2032 /* Some of the softmmu routines need to translate from a host pointer
2033 (typically a TLB entry) back to a ram offset. */
2034 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2039 block
= qemu_ram_block_from_host(ptr
, false, &offset
);
2041 return RAM_ADDR_INVALID
;
2044 return block
->offset
+ offset
;
2047 /* Called within RCU critical section. */
2048 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
2049 uint64_t val
, unsigned size
)
2051 bool locked
= false;
2053 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
2056 tb_invalidate_phys_page_fast(ram_addr
, size
);
2060 stb_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2063 stw_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2066 stl_p(qemu_map_ram_ptr(NULL
, ram_addr
), val
);
2076 /* Set both VGA and migration bits for simplicity and to remove
2077 * the notdirty callback faster.
2079 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
2080 DIRTY_CLIENTS_NOCODE
);
2081 /* we remove the notdirty callback only if the code has been
2083 if (!cpu_physical_memory_is_clean(ram_addr
)) {
2084 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
2088 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2089 unsigned size
, bool is_write
)
2094 static const MemoryRegionOps notdirty_mem_ops
= {
2095 .write
= notdirty_mem_write
,
2096 .valid
.accepts
= notdirty_mem_accepts
,
2097 .endianness
= DEVICE_NATIVE_ENDIAN
,
2100 /* Generate a debug exception if a watchpoint has been hit. */
2101 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2103 CPUState
*cpu
= current_cpu
;
2104 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
2105 CPUArchState
*env
= cpu
->env_ptr
;
2106 target_ulong pc
, cs_base
;
2111 if (cpu
->watchpoint_hit
) {
2112 /* We re-entered the check after replacing the TB. Now raise
2113 * the debug interrupt so that is will trigger after the
2114 * current instruction. */
2115 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2118 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2119 vaddr
= cc
->adjust_watchpoint_address(cpu
, vaddr
, len
);
2120 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2121 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2122 && (wp
->flags
& flags
)) {
2123 if (flags
== BP_MEM_READ
) {
2124 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2126 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2128 wp
->hitaddr
= vaddr
;
2129 wp
->hitattrs
= attrs
;
2130 if (!cpu
->watchpoint_hit
) {
2131 if (wp
->flags
& BP_CPU
&&
2132 !cc
->debug_check_watchpoint(cpu
, wp
)) {
2133 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2136 cpu
->watchpoint_hit
= wp
;
2138 /* Both tb_lock and iothread_mutex will be reset when
2139 * cpu_loop_exit or cpu_loop_exit_noexc longjmp
2140 * back into the cpu_exec main loop.
2143 tb_check_watchpoint(cpu
);
2144 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2145 cpu
->exception_index
= EXCP_DEBUG
;
2148 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2149 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2150 cpu_loop_exit_noexc(cpu
);
2154 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2159 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2160 so these check for a hit then pass through to the normal out-of-line
2162 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2163 unsigned size
, MemTxAttrs attrs
)
2167 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2168 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2170 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2173 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2176 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2179 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2187 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2188 uint64_t val
, unsigned size
,
2192 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2193 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2195 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2198 address_space_stb(as
, addr
, val
, attrs
, &res
);
2201 address_space_stw(as
, addr
, val
, attrs
, &res
);
2204 address_space_stl(as
, addr
, val
, attrs
, &res
);
2211 static const MemoryRegionOps watch_mem_ops
= {
2212 .read_with_attrs
= watch_mem_read
,
2213 .write_with_attrs
= watch_mem_write
,
2214 .endianness
= DEVICE_NATIVE_ENDIAN
,
2217 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2218 unsigned len
, MemTxAttrs attrs
)
2220 subpage_t
*subpage
= opaque
;
2224 #if defined(DEBUG_SUBPAGE)
2225 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2226 subpage
, len
, addr
);
2228 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2235 *data
= ldub_p(buf
);
2238 *data
= lduw_p(buf
);
2251 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2252 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2254 subpage_t
*subpage
= opaque
;
2257 #if defined(DEBUG_SUBPAGE)
2258 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2259 " value %"PRIx64
"\n",
2260 __func__
, subpage
, len
, addr
, value
);
2278 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2282 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2283 unsigned len
, bool is_write
)
2285 subpage_t
*subpage
= opaque
;
2286 #if defined(DEBUG_SUBPAGE)
2287 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2288 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2291 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2295 static const MemoryRegionOps subpage_ops
= {
2296 .read_with_attrs
= subpage_read
,
2297 .write_with_attrs
= subpage_write
,
2298 .impl
.min_access_size
= 1,
2299 .impl
.max_access_size
= 8,
2300 .valid
.min_access_size
= 1,
2301 .valid
.max_access_size
= 8,
2302 .valid
.accepts
= subpage_accepts
,
2303 .endianness
= DEVICE_NATIVE_ENDIAN
,
2306 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2311 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2313 idx
= SUBPAGE_IDX(start
);
2314 eidx
= SUBPAGE_IDX(end
);
2315 #if defined(DEBUG_SUBPAGE)
2316 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2317 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2319 for (; idx
<= eidx
; idx
++) {
2320 mmio
->sub_section
[idx
] = section
;
2326 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2330 mmio
= g_malloc0(sizeof(subpage_t
) + TARGET_PAGE_SIZE
* sizeof(uint16_t));
2333 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2334 NULL
, TARGET_PAGE_SIZE
);
2335 mmio
->iomem
.subpage
= true;
2336 #if defined(DEBUG_SUBPAGE)
2337 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2338 mmio
, base
, TARGET_PAGE_SIZE
);
2340 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2345 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2349 MemoryRegionSection section
= {
2350 .address_space
= as
,
2352 .offset_within_address_space
= 0,
2353 .offset_within_region
= 0,
2354 .size
= int128_2_64(),
2357 return phys_section_add(map
, §ion
);
2360 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2362 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2363 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2364 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2365 MemoryRegionSection
*sections
= d
->map
.sections
;
2367 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2370 static void io_mem_init(void)
2372 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2373 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2376 /* io_mem_notdirty calls tb_invalidate_phys_page_fast,
2377 * which can be called without the iothread mutex.
2379 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2381 memory_region_clear_global_locking(&io_mem_notdirty
);
2383 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2387 static void mem_begin(MemoryListener
*listener
)
2389 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2390 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2393 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2394 assert(n
== PHYS_SECTION_UNASSIGNED
);
2395 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2396 assert(n
== PHYS_SECTION_NOTDIRTY
);
2397 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2398 assert(n
== PHYS_SECTION_ROM
);
2399 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2400 assert(n
== PHYS_SECTION_WATCH
);
2402 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2404 as
->next_dispatch
= d
;
2407 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2409 phys_sections_free(&d
->map
);
2413 static void mem_commit(MemoryListener
*listener
)
2415 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2416 AddressSpaceDispatch
*cur
= as
->dispatch
;
2417 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2419 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2421 atomic_rcu_set(&as
->dispatch
, next
);
2423 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2427 static void tcg_commit(MemoryListener
*listener
)
2429 CPUAddressSpace
*cpuas
;
2430 AddressSpaceDispatch
*d
;
2432 /* since each CPU stores ram addresses in its TLB cache, we must
2433 reset the modified entries */
2434 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2435 cpu_reloading_memory_map();
2436 /* The CPU and TLB are protected by the iothread lock.
2437 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2438 * may have split the RCU critical section.
2440 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2441 atomic_rcu_set(&cpuas
->memory_dispatch
, d
);
2442 tlb_flush(cpuas
->cpu
);
2445 void address_space_init_dispatch(AddressSpace
*as
)
2447 as
->dispatch
= NULL
;
2448 as
->dispatch_listener
= (MemoryListener
) {
2450 .commit
= mem_commit
,
2451 .region_add
= mem_add
,
2452 .region_nop
= mem_add
,
2455 memory_listener_register(&as
->dispatch_listener
, as
);
2458 void address_space_unregister(AddressSpace
*as
)
2460 memory_listener_unregister(&as
->dispatch_listener
);
2463 void address_space_destroy_dispatch(AddressSpace
*as
)
2465 AddressSpaceDispatch
*d
= as
->dispatch
;
2467 atomic_rcu_set(&as
->dispatch
, NULL
);
2469 call_rcu(d
, address_space_dispatch_free
, rcu
);
2473 static void memory_map_init(void)
2475 system_memory
= g_malloc(sizeof(*system_memory
));
2477 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2478 address_space_init(&address_space_memory
, system_memory
, "memory");
2480 system_io
= g_malloc(sizeof(*system_io
));
2481 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2483 address_space_init(&address_space_io
, system_io
, "I/O");
2486 MemoryRegion
*get_system_memory(void)
2488 return system_memory
;
2491 MemoryRegion
*get_system_io(void)
2496 #endif /* !defined(CONFIG_USER_ONLY) */
2498 /* physical memory access (slow version, mainly for debug) */
2499 #if defined(CONFIG_USER_ONLY)
2500 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2501 uint8_t *buf
, int len
, int is_write
)
2508 page
= addr
& TARGET_PAGE_MASK
;
2509 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2512 flags
= page_get_flags(page
);
2513 if (!(flags
& PAGE_VALID
))
2516 if (!(flags
& PAGE_WRITE
))
2518 /* XXX: this code should not depend on lock_user */
2519 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2522 unlock_user(p
, addr
, l
);
2524 if (!(flags
& PAGE_READ
))
2526 /* XXX: this code should not depend on lock_user */
2527 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2530 unlock_user(p
, addr
, 0);
2541 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2544 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2545 addr
+= memory_region_get_ram_addr(mr
);
2547 /* No early return if dirty_log_mask is or becomes 0, because
2548 * cpu_physical_memory_set_dirty_range will still call
2549 * xen_modified_memory.
2551 if (dirty_log_mask
) {
2553 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2555 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2557 tb_invalidate_phys_range(addr
, addr
+ length
);
2559 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2561 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2564 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2566 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2568 /* Regions are assumed to support 1-4 byte accesses unless
2569 otherwise specified. */
2570 if (access_size_max
== 0) {
2571 access_size_max
= 4;
2574 /* Bound the maximum access by the alignment of the address. */
2575 if (!mr
->ops
->impl
.unaligned
) {
2576 unsigned align_size_max
= addr
& -addr
;
2577 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2578 access_size_max
= align_size_max
;
2582 /* Don't attempt accesses larger than the maximum. */
2583 if (l
> access_size_max
) {
2584 l
= access_size_max
;
2591 static bool prepare_mmio_access(MemoryRegion
*mr
)
2593 bool unlocked
= !qemu_mutex_iothread_locked();
2594 bool release_lock
= false;
2596 if (unlocked
&& mr
->global_locking
) {
2597 qemu_mutex_lock_iothread();
2599 release_lock
= true;
2601 if (mr
->flush_coalesced_mmio
) {
2603 qemu_mutex_lock_iothread();
2605 qemu_flush_coalesced_mmio_buffer();
2607 qemu_mutex_unlock_iothread();
2611 return release_lock
;
2614 /* Called within RCU critical section. */
2615 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2618 int len
, hwaddr addr1
,
2619 hwaddr l
, MemoryRegion
*mr
)
2623 MemTxResult result
= MEMTX_OK
;
2624 bool release_lock
= false;
2627 if (!memory_access_is_direct(mr
, true)) {
2628 release_lock
|= prepare_mmio_access(mr
);
2629 l
= memory_access_size(mr
, l
, addr1
);
2630 /* XXX: could force current_cpu to NULL to avoid
2634 /* 64 bit write access */
2636 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2640 /* 32 bit write access */
2641 val
= (uint32_t)ldl_p(buf
);
2642 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2646 /* 16 bit write access */
2648 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2652 /* 8 bit write access */
2654 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2662 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2663 memcpy(ptr
, buf
, l
);
2664 invalidate_and_set_dirty(mr
, addr1
, l
);
2668 qemu_mutex_unlock_iothread();
2669 release_lock
= false;
2681 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2687 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2688 const uint8_t *buf
, int len
)
2693 MemTxResult result
= MEMTX_OK
;
2698 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2699 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2707 /* Called within RCU critical section. */
2708 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2709 MemTxAttrs attrs
, uint8_t *buf
,
2710 int len
, hwaddr addr1
, hwaddr l
,
2715 MemTxResult result
= MEMTX_OK
;
2716 bool release_lock
= false;
2719 if (!memory_access_is_direct(mr
, false)) {
2721 release_lock
|= prepare_mmio_access(mr
);
2722 l
= memory_access_size(mr
, l
, addr1
);
2725 /* 64 bit read access */
2726 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2731 /* 32 bit read access */
2732 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2737 /* 16 bit read access */
2738 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2743 /* 8 bit read access */
2744 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2753 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2754 memcpy(buf
, ptr
, l
);
2758 qemu_mutex_unlock_iothread();
2759 release_lock
= false;
2771 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2777 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2778 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2783 MemTxResult result
= MEMTX_OK
;
2788 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2789 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2797 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2798 uint8_t *buf
, int len
, bool is_write
)
2801 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2803 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2807 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2808 int len
, int is_write
)
2810 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2811 buf
, len
, is_write
);
2814 enum write_rom_type
{
2819 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2820 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2830 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2832 if (!(memory_region_is_ram(mr
) ||
2833 memory_region_is_romd(mr
))) {
2834 l
= memory_access_size(mr
, l
, addr1
);
2837 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2840 memcpy(ptr
, buf
, l
);
2841 invalidate_and_set_dirty(mr
, addr1
, l
);
2844 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2855 /* used for ROM loading : can write in RAM and ROM */
2856 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2857 const uint8_t *buf
, int len
)
2859 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2862 void cpu_flush_icache_range(hwaddr start
, int len
)
2865 * This function should do the same thing as an icache flush that was
2866 * triggered from within the guest. For TCG we are always cache coherent,
2867 * so there is no need to flush anything. For KVM / Xen we need to flush
2868 * the host's instruction cache at least.
2870 if (tcg_enabled()) {
2874 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2875 start
, NULL
, len
, FLUSH_CACHE
);
2886 static BounceBuffer bounce
;
2888 typedef struct MapClient
{
2890 QLIST_ENTRY(MapClient
) link
;
2893 QemuMutex map_client_list_lock
;
2894 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2895 = QLIST_HEAD_INITIALIZER(map_client_list
);
2897 static void cpu_unregister_map_client_do(MapClient
*client
)
2899 QLIST_REMOVE(client
, link
);
2903 static void cpu_notify_map_clients_locked(void)
2907 while (!QLIST_EMPTY(&map_client_list
)) {
2908 client
= QLIST_FIRST(&map_client_list
);
2909 qemu_bh_schedule(client
->bh
);
2910 cpu_unregister_map_client_do(client
);
2914 void cpu_register_map_client(QEMUBH
*bh
)
2916 MapClient
*client
= g_malloc(sizeof(*client
));
2918 qemu_mutex_lock(&map_client_list_lock
);
2920 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2921 if (!atomic_read(&bounce
.in_use
)) {
2922 cpu_notify_map_clients_locked();
2924 qemu_mutex_unlock(&map_client_list_lock
);
2927 void cpu_exec_init_all(void)
2929 qemu_mutex_init(&ram_list
.mutex
);
2930 /* The data structures we set up here depend on knowing the page size,
2931 * so no more changes can be made after this point.
2932 * In an ideal world, nothing we did before we had finished the
2933 * machine setup would care about the target page size, and we could
2934 * do this much later, rather than requiring board models to state
2935 * up front what their requirements are.
2937 finalize_target_page_bits();
2940 qemu_mutex_init(&map_client_list_lock
);
2943 void cpu_unregister_map_client(QEMUBH
*bh
)
2947 qemu_mutex_lock(&map_client_list_lock
);
2948 QLIST_FOREACH(client
, &map_client_list
, link
) {
2949 if (client
->bh
== bh
) {
2950 cpu_unregister_map_client_do(client
);
2954 qemu_mutex_unlock(&map_client_list_lock
);
2957 static void cpu_notify_map_clients(void)
2959 qemu_mutex_lock(&map_client_list_lock
);
2960 cpu_notify_map_clients_locked();
2961 qemu_mutex_unlock(&map_client_list_lock
);
2964 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2972 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2973 if (!memory_access_is_direct(mr
, is_write
)) {
2974 l
= memory_access_size(mr
, l
, addr
);
2975 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2989 address_space_extend_translation(AddressSpace
*as
, hwaddr addr
, hwaddr target_len
,
2990 MemoryRegion
*mr
, hwaddr base
, hwaddr len
,
2995 MemoryRegion
*this_mr
;
3001 if (target_len
== 0) {
3006 this_mr
= address_space_translate(as
, addr
, &xlat
, &len
, is_write
);
3007 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
3013 /* Map a physical memory region into a host virtual address.
3014 * May map a subset of the requested range, given by and returned in *plen.
3015 * May return NULL if resources needed to perform the mapping are exhausted.
3016 * Use only for reads OR writes - not for read-modify-write operations.
3017 * Use cpu_register_map_client() to know when retrying the map operation is
3018 * likely to succeed.
3020 void *address_space_map(AddressSpace
*as
,
3036 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
3038 if (!memory_access_is_direct(mr
, is_write
)) {
3039 if (atomic_xchg(&bounce
.in_use
, true)) {
3043 /* Avoid unbounded allocations */
3044 l
= MIN(l
, TARGET_PAGE_SIZE
);
3045 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
3049 memory_region_ref(mr
);
3052 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
3058 return bounce
.buffer
;
3062 memory_region_ref(mr
);
3063 *plen
= address_space_extend_translation(as
, addr
, len
, mr
, xlat
, l
, is_write
);
3064 ptr
= qemu_ram_ptr_length(mr
->ram_block
, xlat
, plen
);
3070 /* Unmaps a memory region previously mapped by address_space_map().
3071 * Will also mark the memory as dirty if is_write == 1. access_len gives
3072 * the amount of memory that was actually read or written by the caller.
3074 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
3075 int is_write
, hwaddr access_len
)
3077 if (buffer
!= bounce
.buffer
) {
3081 mr
= memory_region_from_host(buffer
, &addr1
);
3084 invalidate_and_set_dirty(mr
, addr1
, access_len
);
3086 if (xen_enabled()) {
3087 xen_invalidate_map_cache_entry(buffer
);
3089 memory_region_unref(mr
);
3093 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
3094 bounce
.buffer
, access_len
);
3096 qemu_vfree(bounce
.buffer
);
3097 bounce
.buffer
= NULL
;
3098 memory_region_unref(bounce
.mr
);
3099 atomic_mb_set(&bounce
.in_use
, false);
3100 cpu_notify_map_clients();
3103 void *cpu_physical_memory_map(hwaddr addr
,
3107 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
3110 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
3111 int is_write
, hwaddr access_len
)
3113 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3116 #define ARG1_DECL AddressSpace *as
3119 #define TRANSLATE(...) address_space_translate(as, __VA_ARGS__)
3120 #define IS_DIRECT(mr, is_write) memory_access_is_direct(mr, is_write)
3121 #define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs)
3122 #define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
3123 #define RCU_READ_LOCK(...) rcu_read_lock()
3124 #define RCU_READ_UNLOCK(...) rcu_read_unlock()
3125 #include "memory_ldst.inc.c"
3127 int64_t address_space_cache_init(MemoryRegionCache
*cache
,
3140 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
3141 if (!memory_access_is_direct(mr
, is_write
)) {
3145 l
= address_space_extend_translation(as
, addr
, len
, mr
, xlat
, l
, is_write
);
3146 ptr
= qemu_ram_ptr_length(mr
->ram_block
, xlat
, &l
);
3149 cache
->is_write
= is_write
;
3153 memory_region_ref(cache
->mr
);
3158 void address_space_cache_invalidate(MemoryRegionCache
*cache
,
3162 assert(cache
->is_write
);
3163 invalidate_and_set_dirty(cache
->mr
, addr
+ cache
->xlat
, access_len
);
3166 void address_space_cache_destroy(MemoryRegionCache
*cache
)
3172 if (xen_enabled()) {
3173 xen_invalidate_map_cache_entry(cache
->ptr
);
3175 memory_region_unref(cache
->mr
);
3179 /* Called from RCU critical section. This function has the same
3180 * semantics as address_space_translate, but it only works on a
3181 * predefined range of a MemoryRegion that was mapped with
3182 * address_space_cache_init.
3184 static inline MemoryRegion
*address_space_translate_cached(
3185 MemoryRegionCache
*cache
, hwaddr addr
, hwaddr
*xlat
,
3186 hwaddr
*plen
, bool is_write
)
3188 assert(addr
< cache
->len
&& *plen
<= cache
->len
- addr
);
3189 *xlat
= addr
+ cache
->xlat
;
3193 #define ARG1_DECL MemoryRegionCache *cache
3195 #define SUFFIX _cached
3196 #define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__)
3197 #define IS_DIRECT(mr, is_write) true
3198 #define MAP_RAM(mr, ofs) (cache->ptr + (ofs - cache->xlat))
3199 #define INVALIDATE(mr, ofs, len) ((void)0)
3200 #define RCU_READ_LOCK() ((void)0)
3201 #define RCU_READ_UNLOCK() ((void)0)
3202 #include "memory_ldst.inc.c"
3204 /* virtual memory access for debug (includes writing to ROM) */
3205 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3206 uint8_t *buf
, int len
, int is_write
)
3216 page
= addr
& TARGET_PAGE_MASK
;
3217 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3218 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3219 /* if no physical page mapped, return an error */
3220 if (phys_addr
== -1)
3222 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3225 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3227 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3230 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3231 MEMTXATTRS_UNSPECIFIED
,
3242 * Allows code that needs to deal with migration bitmaps etc to still be built
3243 * target independent.
3245 size_t qemu_target_page_bits(void)
3247 return TARGET_PAGE_BITS
;
3253 * A helper function for the _utterly broken_ virtio device model to find out if
3254 * it's running on a big endian machine. Don't do this at home kids!
3256 bool target_words_bigendian(void);
3257 bool target_words_bigendian(void)
3259 #if defined(TARGET_WORDS_BIGENDIAN)
3266 #ifndef CONFIG_USER_ONLY
3267 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3274 mr
= address_space_translate(&address_space_memory
,
3275 phys_addr
, &phys_addr
, &l
, false);
3277 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3282 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3288 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3289 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3290 block
->used_length
, opaque
);
3300 * Unmap pages of memory from start to start+length such that
3301 * they a) read as 0, b) Trigger whatever fault mechanism
3302 * the OS provides for postcopy.
3303 * The pages must be unmapped by the end of the function.
3304 * Returns: 0 on success, none-0 on failure
3307 int ram_block_discard_range(RAMBlock
*rb
, uint64_t start
, size_t length
)
3311 uint8_t *host_startaddr
= rb
->host
+ start
;
3313 if ((uintptr_t)host_startaddr
& (rb
->page_size
- 1)) {
3314 error_report("ram_block_discard_range: Unaligned start address: %p",
3319 if ((start
+ length
) <= rb
->used_length
) {
3320 uint8_t *host_endaddr
= host_startaddr
+ length
;
3321 if ((uintptr_t)host_endaddr
& (rb
->page_size
- 1)) {
3322 error_report("ram_block_discard_range: Unaligned end address: %p",
3327 errno
= ENOTSUP
; /* If we are missing MADVISE etc */
3329 #if defined(CONFIG_MADVISE)
3330 /* Note: We need the madvise MADV_DONTNEED behaviour of definitely
3333 ret
= madvise(host_startaddr
, length
, MADV_DONTNEED
);
3337 error_report("ram_block_discard_range: Failed to discard range "
3338 "%s:%" PRIx64
" +%zx (%d)",
3339 rb
->idstr
, start
, length
, ret
);
3342 error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64
3343 "/%zx/" RAM_ADDR_FMT
")",
3344 rb
->idstr
, start
, length
, rb
->used_length
);