4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
25 #include "qemu/cutils.h"
27 #include "exec/exec-all.h"
29 #include "hw/qdev-core.h"
30 #if !defined(CONFIG_USER_ONLY)
31 #include "hw/boards.h"
32 #include "hw/xen/xen.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "qemu/timer.h"
37 #include "qemu/config-file.h"
38 #include "qemu/error-report.h"
39 #if defined(CONFIG_USER_ONLY)
41 #else /* !CONFIG_USER_ONLY */
43 #include "exec/memory.h"
44 #include "sysemu/dma.h"
45 #include "exec/address-spaces.h"
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "translate-all.h"
53 #include "sysemu/replay.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
59 #include "qemu/range.h"
61 #include "qemu/mmap-alloc.h"
64 //#define DEBUG_SUBPAGE
66 #if !defined(CONFIG_USER_ONLY)
67 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
68 * are protected by the ramlist lock.
70 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
72 static MemoryRegion
*system_memory
;
73 static MemoryRegion
*system_io
;
75 AddressSpace address_space_io
;
76 AddressSpace address_space_memory
;
78 MemoryRegion io_mem_rom
, io_mem_notdirty
;
79 static MemoryRegion io_mem_unassigned
;
81 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
82 #define RAM_PREALLOC (1 << 0)
84 /* RAM is mmap-ed with MAP_SHARED */
85 #define RAM_SHARED (1 << 1)
87 /* Only a portion of RAM (used_length) is actually used, and migrated.
88 * This used_length size can change across reboots.
90 #define RAM_RESIZEABLE (1 << 2)
94 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
95 /* current CPU in the current thread. It is only valid inside
97 __thread CPUState
*current_cpu
;
98 /* 0 = Do not count executed instructions.
99 1 = Precise instruction counting.
100 2 = Adaptive rate instruction counting. */
103 #if !defined(CONFIG_USER_ONLY)
105 typedef struct PhysPageEntry PhysPageEntry
;
107 struct PhysPageEntry
{
108 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
110 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
114 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
116 /* Size of the L2 (and L3, etc) page tables. */
117 #define ADDR_SPACE_BITS 64
120 #define P_L2_SIZE (1 << P_L2_BITS)
122 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
124 typedef PhysPageEntry Node
[P_L2_SIZE
];
126 typedef struct PhysPageMap
{
129 unsigned sections_nb
;
130 unsigned sections_nb_alloc
;
132 unsigned nodes_nb_alloc
;
134 MemoryRegionSection
*sections
;
137 struct AddressSpaceDispatch
{
140 MemoryRegionSection
*mru_section
;
141 /* This is a multi-level map on the physical address space.
142 * The bottom level has pointers to MemoryRegionSections.
144 PhysPageEntry phys_map
;
149 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
150 typedef struct subpage_t
{
154 uint16_t sub_section
[TARGET_PAGE_SIZE
];
157 #define PHYS_SECTION_UNASSIGNED 0
158 #define PHYS_SECTION_NOTDIRTY 1
159 #define PHYS_SECTION_ROM 2
160 #define PHYS_SECTION_WATCH 3
162 static void io_mem_init(void);
163 static void memory_map_init(void);
164 static void tcg_commit(MemoryListener
*listener
);
166 static MemoryRegion io_mem_watch
;
169 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
170 * @cpu: the CPU whose AddressSpace this is
171 * @as: the AddressSpace itself
172 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
173 * @tcg_as_listener: listener for tracking changes to the AddressSpace
175 struct CPUAddressSpace
{
178 struct AddressSpaceDispatch
*memory_dispatch
;
179 MemoryListener tcg_as_listener
;
184 #if !defined(CONFIG_USER_ONLY)
186 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
188 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
189 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
190 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
191 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
195 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
202 ret
= map
->nodes_nb
++;
204 assert(ret
!= PHYS_MAP_NODE_NIL
);
205 assert(ret
!= map
->nodes_nb_alloc
);
207 e
.skip
= leaf
? 0 : 1;
208 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
209 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
210 memcpy(&p
[i
], &e
, sizeof(e
));
215 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
216 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
220 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
222 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
223 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
225 p
= map
->nodes
[lp
->ptr
];
226 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
228 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
229 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
235 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
241 static void phys_page_set(AddressSpaceDispatch
*d
,
242 hwaddr index
, hwaddr nb
,
245 /* Wildly overreserve - it doesn't matter much. */
246 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
248 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
251 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
252 * and update our entry so we can skip it and go directly to the destination.
254 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
256 unsigned valid_ptr
= P_L2_SIZE
;
261 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
266 for (i
= 0; i
< P_L2_SIZE
; i
++) {
267 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
274 phys_page_compact(&p
[i
], nodes
, compacted
);
278 /* We can only compress if there's only one child. */
283 assert(valid_ptr
< P_L2_SIZE
);
285 /* Don't compress if it won't fit in the # of bits we have. */
286 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
290 lp
->ptr
= p
[valid_ptr
].ptr
;
291 if (!p
[valid_ptr
].skip
) {
292 /* If our only child is a leaf, make this a leaf. */
293 /* By design, we should have made this node a leaf to begin with so we
294 * should never reach here.
295 * But since it's so simple to handle this, let's do it just in case we
300 lp
->skip
+= p
[valid_ptr
].skip
;
304 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
306 DECLARE_BITMAP(compacted
, nodes_nb
);
308 if (d
->phys_map
.skip
) {
309 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
313 static inline bool section_covers_addr(const MemoryRegionSection
*section
,
316 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
317 * the section must cover the entire address space.
319 return section
->size
.hi
||
320 range_covers_byte(section
->offset_within_address_space
,
321 section
->size
.lo
, addr
);
324 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
325 Node
*nodes
, MemoryRegionSection
*sections
)
328 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
331 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
332 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
333 return §ions
[PHYS_SECTION_UNASSIGNED
];
336 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
339 if (section_covers_addr(§ions
[lp
.ptr
], addr
)) {
340 return §ions
[lp
.ptr
];
342 return §ions
[PHYS_SECTION_UNASSIGNED
];
346 bool memory_region_is_unassigned(MemoryRegion
*mr
)
348 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
349 && mr
!= &io_mem_watch
;
352 /* Called from RCU critical section */
353 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
355 bool resolve_subpage
)
357 MemoryRegionSection
*section
= atomic_read(&d
->mru_section
);
361 if (section
&& section
!= &d
->map
.sections
[PHYS_SECTION_UNASSIGNED
] &&
362 section_covers_addr(section
, addr
)) {
365 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
,
369 if (resolve_subpage
&& section
->mr
->subpage
) {
370 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
371 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
374 atomic_set(&d
->mru_section
, section
);
379 /* Called from RCU critical section */
380 static MemoryRegionSection
*
381 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
382 hwaddr
*plen
, bool resolve_subpage
)
384 MemoryRegionSection
*section
;
388 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
389 /* Compute offset within MemoryRegionSection */
390 addr
-= section
->offset_within_address_space
;
392 /* Compute offset within MemoryRegion */
393 *xlat
= addr
+ section
->offset_within_region
;
397 /* MMIO registers can be expected to perform full-width accesses based only
398 * on their address, without considering adjacent registers that could
399 * decode to completely different MemoryRegions. When such registers
400 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
401 * regions overlap wildly. For this reason we cannot clamp the accesses
404 * If the length is small (as is the case for address_space_ldl/stl),
405 * everything works fine. If the incoming length is large, however,
406 * the caller really has to do the clamping through memory_access_size.
408 if (memory_region_is_ram(mr
)) {
409 diff
= int128_sub(section
->size
, int128_make64(addr
));
410 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
415 /* Called from RCU critical section */
416 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
417 hwaddr
*xlat
, hwaddr
*plen
,
421 MemoryRegionSection
*section
;
425 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
426 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
429 if (!mr
->iommu_ops
) {
433 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
434 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
435 | (addr
& iotlb
.addr_mask
));
436 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
437 if (!(iotlb
.perm
& (1 << is_write
))) {
438 mr
= &io_mem_unassigned
;
442 as
= iotlb
.target_as
;
445 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
446 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
447 *plen
= MIN(page
, *plen
);
454 /* Called from RCU critical section */
455 MemoryRegionSection
*
456 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
457 hwaddr
*xlat
, hwaddr
*plen
)
459 MemoryRegionSection
*section
;
460 AddressSpaceDispatch
*d
= cpu
->cpu_ases
[asidx
].memory_dispatch
;
462 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
464 assert(!section
->mr
->iommu_ops
);
469 #if !defined(CONFIG_USER_ONLY)
471 static int cpu_common_post_load(void *opaque
, int version_id
)
473 CPUState
*cpu
= opaque
;
475 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
476 version_id is increased. */
477 cpu
->interrupt_request
&= ~0x01;
483 static int cpu_common_pre_load(void *opaque
)
485 CPUState
*cpu
= opaque
;
487 cpu
->exception_index
= -1;
492 static bool cpu_common_exception_index_needed(void *opaque
)
494 CPUState
*cpu
= opaque
;
496 return tcg_enabled() && cpu
->exception_index
!= -1;
499 static const VMStateDescription vmstate_cpu_common_exception_index
= {
500 .name
= "cpu_common/exception_index",
502 .minimum_version_id
= 1,
503 .needed
= cpu_common_exception_index_needed
,
504 .fields
= (VMStateField
[]) {
505 VMSTATE_INT32(exception_index
, CPUState
),
506 VMSTATE_END_OF_LIST()
510 static bool cpu_common_crash_occurred_needed(void *opaque
)
512 CPUState
*cpu
= opaque
;
514 return cpu
->crash_occurred
;
517 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
518 .name
= "cpu_common/crash_occurred",
520 .minimum_version_id
= 1,
521 .needed
= cpu_common_crash_occurred_needed
,
522 .fields
= (VMStateField
[]) {
523 VMSTATE_BOOL(crash_occurred
, CPUState
),
524 VMSTATE_END_OF_LIST()
528 const VMStateDescription vmstate_cpu_common
= {
529 .name
= "cpu_common",
531 .minimum_version_id
= 1,
532 .pre_load
= cpu_common_pre_load
,
533 .post_load
= cpu_common_post_load
,
534 .fields
= (VMStateField
[]) {
535 VMSTATE_UINT32(halted
, CPUState
),
536 VMSTATE_UINT32(interrupt_request
, CPUState
),
537 VMSTATE_END_OF_LIST()
539 .subsections
= (const VMStateDescription
*[]) {
540 &vmstate_cpu_common_exception_index
,
541 &vmstate_cpu_common_crash_occurred
,
548 CPUState
*qemu_get_cpu(int index
)
553 if (cpu
->cpu_index
== index
) {
561 #if !defined(CONFIG_USER_ONLY)
562 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
564 CPUAddressSpace
*newas
;
566 /* Target code should have set num_ases before calling us */
567 assert(asidx
< cpu
->num_ases
);
570 /* address space 0 gets the convenience alias */
574 /* KVM cannot currently support multiple address spaces. */
575 assert(asidx
== 0 || !kvm_enabled());
577 if (!cpu
->cpu_ases
) {
578 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
581 newas
= &cpu
->cpu_ases
[asidx
];
585 newas
->tcg_as_listener
.commit
= tcg_commit
;
586 memory_listener_register(&newas
->tcg_as_listener
, as
);
590 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
592 /* Return the AddressSpace corresponding to the specified index */
593 return cpu
->cpu_ases
[asidx
].as
;
597 #ifndef CONFIG_USER_ONLY
598 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
600 static int cpu_get_free_index(Error
**errp
)
602 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
604 if (cpu
>= MAX_CPUMASK_BITS
) {
605 error_setg(errp
, "Trying to use more CPUs than max of %d",
610 bitmap_set(cpu_index_map
, cpu
, 1);
614 void cpu_exec_exit(CPUState
*cpu
)
616 if (cpu
->cpu_index
== -1) {
617 /* cpu_index was never allocated by this @cpu or was already freed. */
621 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
626 static int cpu_get_free_index(Error
**errp
)
631 CPU_FOREACH(some_cpu
) {
637 void cpu_exec_exit(CPUState
*cpu
)
642 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
644 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
645 Error
*local_err
= NULL
;
650 #ifndef CONFIG_USER_ONLY
651 cpu
->thread_id
= qemu_get_thread_id();
653 /* This is a softmmu CPU object, so create a property for it
654 * so users can wire up its memory. (This can't go in qom/cpu.c
655 * because that file is compiled only once for both user-mode
656 * and system builds.) The default if no link is set up is to use
657 * the system address space.
659 object_property_add_link(OBJECT(cpu
), "memory", TYPE_MEMORY_REGION
,
660 (Object
**)&cpu
->memory
,
661 qdev_prop_allow_set_link_before_realize
,
662 OBJ_PROP_LINK_UNREF_ON_RELEASE
,
664 cpu
->memory
= system_memory
;
665 object_ref(OBJECT(cpu
->memory
));
668 #if defined(CONFIG_USER_ONLY)
671 cpu
->cpu_index
= cpu_get_free_index(&local_err
);
673 error_propagate(errp
, local_err
);
674 #if defined(CONFIG_USER_ONLY)
679 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
680 #if defined(CONFIG_USER_ONLY)
684 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
685 vmstate_register(NULL
, cpu
->cpu_index
, &vmstate_cpu_common
, cpu
);
687 if (cc
->vmsd
!= NULL
) {
688 vmstate_register(NULL
, cpu
->cpu_index
, cc
->vmsd
, cpu
);
693 #if defined(CONFIG_USER_ONLY)
694 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
696 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
699 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
702 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
703 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
705 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
706 phys
| (pc
& ~TARGET_PAGE_MASK
));
711 #if defined(CONFIG_USER_ONLY)
712 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
717 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
723 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
727 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
728 int flags
, CPUWatchpoint
**watchpoint
)
733 /* Add a watchpoint. */
734 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
735 int flags
, CPUWatchpoint
**watchpoint
)
739 /* forbid ranges which are empty or run off the end of the address space */
740 if (len
== 0 || (addr
+ len
- 1) < addr
) {
741 error_report("tried to set invalid watchpoint at %"
742 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
745 wp
= g_malloc(sizeof(*wp
));
751 /* keep all GDB-injected watchpoints in front */
752 if (flags
& BP_GDB
) {
753 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
755 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
758 tlb_flush_page(cpu
, addr
);
765 /* Remove a specific watchpoint. */
766 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
771 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
772 if (addr
== wp
->vaddr
&& len
== wp
->len
773 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
774 cpu_watchpoint_remove_by_ref(cpu
, wp
);
781 /* Remove a specific watchpoint by reference. */
782 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
784 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
786 tlb_flush_page(cpu
, watchpoint
->vaddr
);
791 /* Remove all matching watchpoints. */
792 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
794 CPUWatchpoint
*wp
, *next
;
796 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
797 if (wp
->flags
& mask
) {
798 cpu_watchpoint_remove_by_ref(cpu
, wp
);
803 /* Return true if this watchpoint address matches the specified
804 * access (ie the address range covered by the watchpoint overlaps
805 * partially or completely with the address range covered by the
808 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
812 /* We know the lengths are non-zero, but a little caution is
813 * required to avoid errors in the case where the range ends
814 * exactly at the top of the address space and so addr + len
815 * wraps round to zero.
817 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
818 vaddr addrend
= addr
+ len
- 1;
820 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
825 /* Add a breakpoint. */
826 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
827 CPUBreakpoint
**breakpoint
)
831 bp
= g_malloc(sizeof(*bp
));
836 /* keep all GDB-injected breakpoints in front */
837 if (flags
& BP_GDB
) {
838 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
840 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
843 breakpoint_invalidate(cpu
, pc
);
851 /* Remove a specific breakpoint. */
852 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
856 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
857 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
858 cpu_breakpoint_remove_by_ref(cpu
, bp
);
865 /* Remove a specific breakpoint by reference. */
866 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
868 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
870 breakpoint_invalidate(cpu
, breakpoint
->pc
);
875 /* Remove all matching breakpoints. */
876 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
878 CPUBreakpoint
*bp
, *next
;
880 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
881 if (bp
->flags
& mask
) {
882 cpu_breakpoint_remove_by_ref(cpu
, bp
);
887 /* enable or disable single step mode. EXCP_DEBUG is returned by the
888 CPU loop after each instruction */
889 void cpu_single_step(CPUState
*cpu
, int enabled
)
891 if (cpu
->singlestep_enabled
!= enabled
) {
892 cpu
->singlestep_enabled
= enabled
;
894 kvm_update_guest_debug(cpu
, 0);
896 /* must flush all the translated code to avoid inconsistencies */
897 /* XXX: only flush what is necessary */
903 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
910 fprintf(stderr
, "qemu: fatal: ");
911 vfprintf(stderr
, fmt
, ap
);
912 fprintf(stderr
, "\n");
913 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
914 if (qemu_log_separate()) {
915 qemu_log("qemu: fatal: ");
916 qemu_log_vprintf(fmt
, ap2
);
918 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
925 #if defined(CONFIG_USER_ONLY)
927 struct sigaction act
;
928 sigfillset(&act
.sa_mask
);
929 act
.sa_handler
= SIG_DFL
;
930 sigaction(SIGABRT
, &act
, NULL
);
936 #if !defined(CONFIG_USER_ONLY)
937 /* Called from RCU critical section */
938 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
942 block
= atomic_rcu_read(&ram_list
.mru_block
);
943 if (block
&& addr
- block
->offset
< block
->max_length
) {
946 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
947 if (addr
- block
->offset
< block
->max_length
) {
952 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
956 /* It is safe to write mru_block outside the iothread lock. This
961 * xxx removed from list
965 * call_rcu(reclaim_ramblock, xxx);
968 * atomic_rcu_set is not needed here. The block was already published
969 * when it was placed into the list. Here we're just making an extra
970 * copy of the pointer.
972 ram_list
.mru_block
= block
;
976 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
983 end
= TARGET_PAGE_ALIGN(start
+ length
);
984 start
&= TARGET_PAGE_MASK
;
987 block
= qemu_get_ram_block(start
);
988 assert(block
== qemu_get_ram_block(end
- 1));
989 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
991 tlb_reset_dirty(cpu
, start1
, length
);
996 /* Note: start and end must be within the same ram block. */
997 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
1001 DirtyMemoryBlocks
*blocks
;
1002 unsigned long end
, page
;
1009 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
1010 page
= start
>> TARGET_PAGE_BITS
;
1014 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
1016 while (page
< end
) {
1017 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
1018 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
1019 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
1021 dirty
|= bitmap_test_and_clear_atomic(blocks
->blocks
[idx
],
1028 if (dirty
&& tcg_enabled()) {
1029 tlb_reset_dirty_range_all(start
, length
);
1035 /* Called from RCU critical section */
1036 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1037 MemoryRegionSection
*section
,
1039 hwaddr paddr
, hwaddr xlat
,
1041 target_ulong
*address
)
1046 if (memory_region_is_ram(section
->mr
)) {
1048 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1050 if (!section
->readonly
) {
1051 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1053 iotlb
|= PHYS_SECTION_ROM
;
1056 AddressSpaceDispatch
*d
;
1058 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1059 iotlb
= section
- d
->map
.sections
;
1063 /* Make accesses to pages with watchpoints go via the
1064 watchpoint trap routines. */
1065 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1066 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1067 /* Avoid trapping reads of pages with a write breakpoint. */
1068 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1069 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1070 *address
|= TLB_MMIO
;
1078 #endif /* defined(CONFIG_USER_ONLY) */
1080 #if !defined(CONFIG_USER_ONLY)
1082 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1084 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1086 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1087 qemu_anon_ram_alloc
;
1090 * Set a custom physical guest memory alloator.
1091 * Accelerators with unusual needs may need this. Hopefully, we can
1092 * get rid of it eventually.
1094 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1096 phys_mem_alloc
= alloc
;
1099 static uint16_t phys_section_add(PhysPageMap
*map
,
1100 MemoryRegionSection
*section
)
1102 /* The physical section number is ORed with a page-aligned
1103 * pointer to produce the iotlb entries. Thus it should
1104 * never overflow into the page-aligned value.
1106 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1108 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1109 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1110 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1111 map
->sections_nb_alloc
);
1113 map
->sections
[map
->sections_nb
] = *section
;
1114 memory_region_ref(section
->mr
);
1115 return map
->sections_nb
++;
1118 static void phys_section_destroy(MemoryRegion
*mr
)
1120 bool have_sub_page
= mr
->subpage
;
1122 memory_region_unref(mr
);
1124 if (have_sub_page
) {
1125 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1126 object_unref(OBJECT(&subpage
->iomem
));
1131 static void phys_sections_free(PhysPageMap
*map
)
1133 while (map
->sections_nb
> 0) {
1134 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1135 phys_section_destroy(section
->mr
);
1137 g_free(map
->sections
);
1141 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1144 hwaddr base
= section
->offset_within_address_space
1146 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1147 d
->map
.nodes
, d
->map
.sections
);
1148 MemoryRegionSection subsection
= {
1149 .offset_within_address_space
= base
,
1150 .size
= int128_make64(TARGET_PAGE_SIZE
),
1154 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1156 if (!(existing
->mr
->subpage
)) {
1157 subpage
= subpage_init(d
->as
, base
);
1158 subsection
.address_space
= d
->as
;
1159 subsection
.mr
= &subpage
->iomem
;
1160 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1161 phys_section_add(&d
->map
, &subsection
));
1163 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1165 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1166 end
= start
+ int128_get64(section
->size
) - 1;
1167 subpage_register(subpage
, start
, end
,
1168 phys_section_add(&d
->map
, section
));
1172 static void register_multipage(AddressSpaceDispatch
*d
,
1173 MemoryRegionSection
*section
)
1175 hwaddr start_addr
= section
->offset_within_address_space
;
1176 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1177 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1181 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1184 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1186 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1187 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1188 MemoryRegionSection now
= *section
, remain
= *section
;
1189 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1191 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1192 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1193 - now
.offset_within_address_space
;
1195 now
.size
= int128_min(int128_make64(left
), now
.size
);
1196 register_subpage(d
, &now
);
1198 now
.size
= int128_zero();
1200 while (int128_ne(remain
.size
, now
.size
)) {
1201 remain
.size
= int128_sub(remain
.size
, now
.size
);
1202 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1203 remain
.offset_within_region
+= int128_get64(now
.size
);
1205 if (int128_lt(remain
.size
, page_size
)) {
1206 register_subpage(d
, &now
);
1207 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1208 now
.size
= page_size
;
1209 register_subpage(d
, &now
);
1211 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1212 register_multipage(d
, &now
);
1217 void qemu_flush_coalesced_mmio_buffer(void)
1220 kvm_flush_coalesced_mmio_buffer();
1223 void qemu_mutex_lock_ramlist(void)
1225 qemu_mutex_lock(&ram_list
.mutex
);
1228 void qemu_mutex_unlock_ramlist(void)
1230 qemu_mutex_unlock(&ram_list
.mutex
);
1234 static void *file_ram_alloc(RAMBlock
*block
,
1239 bool unlink_on_error
= false;
1241 char *sanitized_name
;
1247 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1249 "host lacks kvm mmu notifiers, -mem-path unsupported");
1254 fd
= open(path
, O_RDWR
);
1256 /* @path names an existing file, use it */
1259 if (errno
== ENOENT
) {
1260 /* @path names a file that doesn't exist, create it */
1261 fd
= open(path
, O_RDWR
| O_CREAT
| O_EXCL
, 0644);
1263 unlink_on_error
= true;
1266 } else if (errno
== EISDIR
) {
1267 /* @path names a directory, create a file there */
1268 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1269 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1270 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1276 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1278 g_free(sanitized_name
);
1280 fd
= mkstemp(filename
);
1288 if (errno
!= EEXIST
&& errno
!= EINTR
) {
1289 error_setg_errno(errp
, errno
,
1290 "can't open backing store %s for guest RAM",
1295 * Try again on EINTR and EEXIST. The latter happens when
1296 * something else creates the file between our two open().
1300 page_size
= qemu_fd_getpagesize(fd
);
1301 block
->mr
->align
= page_size
;
1303 if (memory
< page_size
) {
1304 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1305 "or larger than page size 0x%" PRIx64
,
1310 memory
= ROUND_UP(memory
, page_size
);
1313 * ftruncate is not supported by hugetlbfs in older
1314 * hosts, so don't bother bailing out on errors.
1315 * If anything goes wrong with it under other filesystems,
1318 if (ftruncate(fd
, memory
)) {
1319 perror("ftruncate");
1322 area
= qemu_ram_mmap(fd
, memory
, page_size
, block
->flags
& RAM_SHARED
);
1323 if (area
== MAP_FAILED
) {
1324 error_setg_errno(errp
, errno
,
1325 "unable to map backing store for guest RAM");
1330 os_mem_prealloc(fd
, area
, memory
);
1337 if (unlink_on_error
) {
1347 /* Called with the ramlist lock held. */
1348 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1350 RAMBlock
*block
, *next_block
;
1351 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1353 assert(size
!= 0); /* it would hand out same offset multiple times */
1355 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1359 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1360 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1362 end
= block
->offset
+ block
->max_length
;
1364 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1365 if (next_block
->offset
>= end
) {
1366 next
= MIN(next
, next_block
->offset
);
1369 if (next
- end
>= size
&& next
- end
< mingap
) {
1371 mingap
= next
- end
;
1375 if (offset
== RAM_ADDR_MAX
) {
1376 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1384 ram_addr_t
last_ram_offset(void)
1387 ram_addr_t last
= 0;
1390 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1391 last
= MAX(last
, block
->offset
+ block
->max_length
);
1397 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1401 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1402 if (!machine_dump_guest_core(current_machine
)) {
1403 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1405 perror("qemu_madvise");
1406 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1407 "but dump_guest_core=off specified\n");
1412 /* Called within an RCU critical section, or while the ramlist lock
1415 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1419 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1420 if (block
->offset
== addr
) {
1428 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1433 /* Called with iothread lock held. */
1434 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1436 RAMBlock
*new_block
, *block
;
1439 new_block
= find_ram_block(addr
);
1441 assert(!new_block
->idstr
[0]);
1444 char *id
= qdev_get_dev_path(dev
);
1446 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1450 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1452 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1453 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1454 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1462 /* Called with iothread lock held. */
1463 void qemu_ram_unset_idstr(ram_addr_t addr
)
1467 /* FIXME: arch_init.c assumes that this is not called throughout
1468 * migration. Ignore the problem since hot-unplug during migration
1469 * does not work anyway.
1473 block
= find_ram_block(addr
);
1475 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1480 static int memory_try_enable_merging(void *addr
, size_t len
)
1482 if (!machine_mem_merge(current_machine
)) {
1483 /* disabled by the user */
1487 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1490 /* Only legal before guest might have detected the memory size: e.g. on
1491 * incoming migration, or right after reset.
1493 * As memory core doesn't know how is memory accessed, it is up to
1494 * resize callback to update device state and/or add assertions to detect
1495 * misuse, if necessary.
1497 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1499 RAMBlock
*block
= find_ram_block(base
);
1503 newsize
= HOST_PAGE_ALIGN(newsize
);
1505 if (block
->used_length
== newsize
) {
1509 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1510 error_setg_errno(errp
, EINVAL
,
1511 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1512 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1513 newsize
, block
->used_length
);
1517 if (block
->max_length
< newsize
) {
1518 error_setg_errno(errp
, EINVAL
,
1519 "Length too large: %s: 0x" RAM_ADDR_FMT
1520 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1521 newsize
, block
->max_length
);
1525 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1526 block
->used_length
= newsize
;
1527 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1529 memory_region_set_size(block
->mr
, newsize
);
1530 if (block
->resized
) {
1531 block
->resized(block
->idstr
, newsize
, block
->host
);
1536 /* Called with ram_list.mutex held */
1537 static void dirty_memory_extend(ram_addr_t old_ram_size
,
1538 ram_addr_t new_ram_size
)
1540 ram_addr_t old_num_blocks
= DIV_ROUND_UP(old_ram_size
,
1541 DIRTY_MEMORY_BLOCK_SIZE
);
1542 ram_addr_t new_num_blocks
= DIV_ROUND_UP(new_ram_size
,
1543 DIRTY_MEMORY_BLOCK_SIZE
);
1546 /* Only need to extend if block count increased */
1547 if (new_num_blocks
<= old_num_blocks
) {
1551 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1552 DirtyMemoryBlocks
*old_blocks
;
1553 DirtyMemoryBlocks
*new_blocks
;
1556 old_blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
1557 new_blocks
= g_malloc(sizeof(*new_blocks
) +
1558 sizeof(new_blocks
->blocks
[0]) * new_num_blocks
);
1560 if (old_num_blocks
) {
1561 memcpy(new_blocks
->blocks
, old_blocks
->blocks
,
1562 old_num_blocks
* sizeof(old_blocks
->blocks
[0]));
1565 for (j
= old_num_blocks
; j
< new_num_blocks
; j
++) {
1566 new_blocks
->blocks
[j
] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE
);
1569 atomic_rcu_set(&ram_list
.dirty_memory
[i
], new_blocks
);
1572 g_free_rcu(old_blocks
, rcu
);
1577 static void ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1580 RAMBlock
*last_block
= NULL
;
1581 ram_addr_t old_ram_size
, new_ram_size
;
1584 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1586 qemu_mutex_lock_ramlist();
1587 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1589 if (!new_block
->host
) {
1590 if (xen_enabled()) {
1591 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1592 new_block
->mr
, &err
);
1594 error_propagate(errp
, err
);
1595 qemu_mutex_unlock_ramlist();
1599 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1600 &new_block
->mr
->align
);
1601 if (!new_block
->host
) {
1602 error_setg_errno(errp
, errno
,
1603 "cannot set up guest memory '%s'",
1604 memory_region_name(new_block
->mr
));
1605 qemu_mutex_unlock_ramlist();
1608 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1612 new_ram_size
= MAX(old_ram_size
,
1613 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1614 if (new_ram_size
> old_ram_size
) {
1615 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1616 dirty_memory_extend(old_ram_size
, new_ram_size
);
1618 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1619 * QLIST (which has an RCU-friendly variant) does not have insertion at
1620 * tail, so save the last element in last_block.
1622 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1624 if (block
->max_length
< new_block
->max_length
) {
1629 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1630 } else if (last_block
) {
1631 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1632 } else { /* list is empty */
1633 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1635 ram_list
.mru_block
= NULL
;
1637 /* Write list before version */
1640 qemu_mutex_unlock_ramlist();
1642 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1643 new_block
->used_length
,
1646 if (new_block
->host
) {
1647 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1648 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1649 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1650 if (kvm_enabled()) {
1651 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1657 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1658 bool share
, const char *mem_path
,
1661 RAMBlock
*new_block
;
1662 Error
*local_err
= NULL
;
1664 if (xen_enabled()) {
1665 error_setg(errp
, "-mem-path not supported with Xen");
1669 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1671 * file_ram_alloc() needs to allocate just like
1672 * phys_mem_alloc, but we haven't bothered to provide
1676 "-mem-path not supported with this accelerator");
1680 size
= HOST_PAGE_ALIGN(size
);
1681 new_block
= g_malloc0(sizeof(*new_block
));
1683 new_block
->used_length
= size
;
1684 new_block
->max_length
= size
;
1685 new_block
->flags
= share
? RAM_SHARED
: 0;
1686 new_block
->host
= file_ram_alloc(new_block
, size
,
1688 if (!new_block
->host
) {
1693 ram_block_add(new_block
, &local_err
);
1696 error_propagate(errp
, local_err
);
1704 RAMBlock
*qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1705 void (*resized
)(const char*,
1708 void *host
, bool resizeable
,
1709 MemoryRegion
*mr
, Error
**errp
)
1711 RAMBlock
*new_block
;
1712 Error
*local_err
= NULL
;
1714 size
= HOST_PAGE_ALIGN(size
);
1715 max_size
= HOST_PAGE_ALIGN(max_size
);
1716 new_block
= g_malloc0(sizeof(*new_block
));
1718 new_block
->resized
= resized
;
1719 new_block
->used_length
= size
;
1720 new_block
->max_length
= max_size
;
1721 assert(max_size
>= size
);
1723 new_block
->host
= host
;
1725 new_block
->flags
|= RAM_PREALLOC
;
1728 new_block
->flags
|= RAM_RESIZEABLE
;
1730 ram_block_add(new_block
, &local_err
);
1733 error_propagate(errp
, local_err
);
1739 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1740 MemoryRegion
*mr
, Error
**errp
)
1742 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1745 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1747 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1750 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1751 void (*resized
)(const char*,
1754 MemoryRegion
*mr
, Error
**errp
)
1756 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1759 static void reclaim_ramblock(RAMBlock
*block
)
1761 if (block
->flags
& RAM_PREALLOC
) {
1763 } else if (xen_enabled()) {
1764 xen_invalidate_map_cache_entry(block
->host
);
1766 } else if (block
->fd
>= 0) {
1767 qemu_ram_munmap(block
->host
, block
->max_length
);
1771 qemu_anon_ram_free(block
->host
, block
->max_length
);
1776 void qemu_ram_free(RAMBlock
*block
)
1782 qemu_mutex_lock_ramlist();
1783 QLIST_REMOVE_RCU(block
, next
);
1784 ram_list
.mru_block
= NULL
;
1785 /* Write list before version */
1788 call_rcu(block
, reclaim_ramblock
, rcu
);
1789 qemu_mutex_unlock_ramlist();
1793 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1800 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1801 offset
= addr
- block
->offset
;
1802 if (offset
< block
->max_length
) {
1803 vaddr
= ramblock_ptr(block
, offset
);
1804 if (block
->flags
& RAM_PREALLOC
) {
1806 } else if (xen_enabled()) {
1810 if (block
->fd
>= 0) {
1811 flags
|= (block
->flags
& RAM_SHARED
?
1812 MAP_SHARED
: MAP_PRIVATE
);
1813 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1814 flags
, block
->fd
, offset
);
1817 * Remap needs to match alloc. Accelerators that
1818 * set phys_mem_alloc never remap. If they did,
1819 * we'd need a remap hook here.
1821 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1823 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1824 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1827 if (area
!= vaddr
) {
1828 fprintf(stderr
, "Could not remap addr: "
1829 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1833 memory_try_enable_merging(vaddr
, length
);
1834 qemu_ram_setup_dump(vaddr
, length
);
1839 #endif /* !_WIN32 */
1841 int qemu_get_ram_fd(ram_addr_t addr
)
1847 block
= qemu_get_ram_block(addr
);
1853 void qemu_set_ram_fd(ram_addr_t addr
, int fd
)
1858 block
= qemu_get_ram_block(addr
);
1863 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1869 block
= qemu_get_ram_block(addr
);
1870 ptr
= ramblock_ptr(block
, 0);
1875 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1876 * This should not be used for general purpose DMA. Use address_space_map
1877 * or address_space_rw instead. For local memory (e.g. video ram) that the
1878 * device owns, use memory_region_get_ram_ptr.
1880 * Called within RCU critical section.
1882 void *qemu_get_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
)
1884 RAMBlock
*block
= ram_block
;
1886 if (block
== NULL
) {
1887 block
= qemu_get_ram_block(addr
);
1890 if (xen_enabled() && block
->host
== NULL
) {
1891 /* We need to check if the requested address is in the RAM
1892 * because we don't want to map the entire memory in QEMU.
1893 * In that case just map until the end of the page.
1895 if (block
->offset
== 0) {
1896 return xen_map_cache(addr
, 0, 0);
1899 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1901 return ramblock_ptr(block
, addr
- block
->offset
);
1904 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1905 * but takes a size argument.
1907 * Called within RCU critical section.
1909 static void *qemu_ram_ptr_length(RAMBlock
*ram_block
, ram_addr_t addr
,
1912 RAMBlock
*block
= ram_block
;
1913 ram_addr_t offset_inside_block
;
1918 if (block
== NULL
) {
1919 block
= qemu_get_ram_block(addr
);
1921 offset_inside_block
= addr
- block
->offset
;
1922 *size
= MIN(*size
, block
->max_length
- offset_inside_block
);
1924 if (xen_enabled() && block
->host
== NULL
) {
1925 /* We need to check if the requested address is in the RAM
1926 * because we don't want to map the entire memory in QEMU.
1927 * In that case just map the requested area.
1929 if (block
->offset
== 0) {
1930 return xen_map_cache(addr
, *size
, 1);
1933 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1936 return ramblock_ptr(block
, offset_inside_block
);
1940 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1943 * ptr: Host pointer to look up
1944 * round_offset: If true round the result offset down to a page boundary
1945 * *ram_addr: set to result ram_addr
1946 * *offset: set to result offset within the RAMBlock
1948 * Returns: RAMBlock (or NULL if not found)
1950 * By the time this function returns, the returned pointer is not protected
1951 * by RCU anymore. If the caller is not within an RCU critical section and
1952 * does not hold the iothread lock, it must have other means of protecting the
1953 * pointer, such as a reference to the region that includes the incoming
1956 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1957 ram_addr_t
*ram_addr
,
1961 uint8_t *host
= ptr
;
1963 if (xen_enabled()) {
1965 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1966 block
= qemu_get_ram_block(*ram_addr
);
1968 *offset
= (host
- block
->host
);
1975 block
= atomic_rcu_read(&ram_list
.mru_block
);
1976 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1980 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1981 /* This case append when the block is not mapped. */
1982 if (block
->host
== NULL
) {
1985 if (host
- block
->host
< block
->max_length
) {
1994 *offset
= (host
- block
->host
);
1996 *offset
&= TARGET_PAGE_MASK
;
1998 *ram_addr
= block
->offset
+ *offset
;
2004 * Finds the named RAMBlock
2006 * name: The name of RAMBlock to find
2008 * Returns: RAMBlock (or NULL if not found)
2010 RAMBlock
*qemu_ram_block_by_name(const char *name
)
2014 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
2015 if (!strcmp(name
, block
->idstr
)) {
2023 /* Some of the softmmu routines need to translate from a host pointer
2024 (typically a TLB entry) back to a ram offset. */
2025 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
2028 ram_addr_t offset
; /* Not used */
2030 block
= qemu_ram_block_from_host(ptr
, false, ram_addr
, &offset
);
2039 /* Called within RCU critical section. */
2040 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
2041 uint64_t val
, unsigned size
)
2043 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
2044 tb_invalidate_phys_page_fast(ram_addr
, size
);
2048 stb_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
2051 stw_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
2054 stl_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
2059 /* Set both VGA and migration bits for simplicity and to remove
2060 * the notdirty callback faster.
2062 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
2063 DIRTY_CLIENTS_NOCODE
);
2064 /* we remove the notdirty callback only if the code has been
2066 if (!cpu_physical_memory_is_clean(ram_addr
)) {
2067 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
2071 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2072 unsigned size
, bool is_write
)
2077 static const MemoryRegionOps notdirty_mem_ops
= {
2078 .write
= notdirty_mem_write
,
2079 .valid
.accepts
= notdirty_mem_accepts
,
2080 .endianness
= DEVICE_NATIVE_ENDIAN
,
2083 /* Generate a debug exception if a watchpoint has been hit. */
2084 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2086 CPUState
*cpu
= current_cpu
;
2087 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
2088 CPUArchState
*env
= cpu
->env_ptr
;
2089 target_ulong pc
, cs_base
;
2094 if (cpu
->watchpoint_hit
) {
2095 /* We re-entered the check after replacing the TB. Now raise
2096 * the debug interrupt so that is will trigger after the
2097 * current instruction. */
2098 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2101 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2102 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2103 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2104 && (wp
->flags
& flags
)) {
2105 if (flags
== BP_MEM_READ
) {
2106 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2108 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2110 wp
->hitaddr
= vaddr
;
2111 wp
->hitattrs
= attrs
;
2112 if (!cpu
->watchpoint_hit
) {
2113 if (wp
->flags
& BP_CPU
&&
2114 !cc
->debug_check_watchpoint(cpu
, wp
)) {
2115 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2118 cpu
->watchpoint_hit
= wp
;
2119 tb_check_watchpoint(cpu
);
2120 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2121 cpu
->exception_index
= EXCP_DEBUG
;
2124 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2125 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2126 cpu_resume_from_signal(cpu
, NULL
);
2130 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2135 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2136 so these check for a hit then pass through to the normal out-of-line
2138 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2139 unsigned size
, MemTxAttrs attrs
)
2143 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2144 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2146 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2149 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2152 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2155 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2163 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2164 uint64_t val
, unsigned size
,
2168 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2169 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2171 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2174 address_space_stb(as
, addr
, val
, attrs
, &res
);
2177 address_space_stw(as
, addr
, val
, attrs
, &res
);
2180 address_space_stl(as
, addr
, val
, attrs
, &res
);
2187 static const MemoryRegionOps watch_mem_ops
= {
2188 .read_with_attrs
= watch_mem_read
,
2189 .write_with_attrs
= watch_mem_write
,
2190 .endianness
= DEVICE_NATIVE_ENDIAN
,
2193 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2194 unsigned len
, MemTxAttrs attrs
)
2196 subpage_t
*subpage
= opaque
;
2200 #if defined(DEBUG_SUBPAGE)
2201 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2202 subpage
, len
, addr
);
2204 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2211 *data
= ldub_p(buf
);
2214 *data
= lduw_p(buf
);
2227 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2228 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2230 subpage_t
*subpage
= opaque
;
2233 #if defined(DEBUG_SUBPAGE)
2234 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2235 " value %"PRIx64
"\n",
2236 __func__
, subpage
, len
, addr
, value
);
2254 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2258 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2259 unsigned len
, bool is_write
)
2261 subpage_t
*subpage
= opaque
;
2262 #if defined(DEBUG_SUBPAGE)
2263 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2264 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2267 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2271 static const MemoryRegionOps subpage_ops
= {
2272 .read_with_attrs
= subpage_read
,
2273 .write_with_attrs
= subpage_write
,
2274 .impl
.min_access_size
= 1,
2275 .impl
.max_access_size
= 8,
2276 .valid
.min_access_size
= 1,
2277 .valid
.max_access_size
= 8,
2278 .valid
.accepts
= subpage_accepts
,
2279 .endianness
= DEVICE_NATIVE_ENDIAN
,
2282 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2287 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2289 idx
= SUBPAGE_IDX(start
);
2290 eidx
= SUBPAGE_IDX(end
);
2291 #if defined(DEBUG_SUBPAGE)
2292 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2293 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2295 for (; idx
<= eidx
; idx
++) {
2296 mmio
->sub_section
[idx
] = section
;
2302 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2306 mmio
= g_malloc0(sizeof(subpage_t
));
2310 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2311 NULL
, TARGET_PAGE_SIZE
);
2312 mmio
->iomem
.subpage
= true;
2313 #if defined(DEBUG_SUBPAGE)
2314 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2315 mmio
, base
, TARGET_PAGE_SIZE
);
2317 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2322 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2326 MemoryRegionSection section
= {
2327 .address_space
= as
,
2329 .offset_within_address_space
= 0,
2330 .offset_within_region
= 0,
2331 .size
= int128_2_64(),
2334 return phys_section_add(map
, §ion
);
2337 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2339 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2340 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2341 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2342 MemoryRegionSection
*sections
= d
->map
.sections
;
2344 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2347 static void io_mem_init(void)
2349 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2350 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2352 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2354 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2358 static void mem_begin(MemoryListener
*listener
)
2360 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2361 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2364 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2365 assert(n
== PHYS_SECTION_UNASSIGNED
);
2366 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2367 assert(n
== PHYS_SECTION_NOTDIRTY
);
2368 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2369 assert(n
== PHYS_SECTION_ROM
);
2370 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2371 assert(n
== PHYS_SECTION_WATCH
);
2373 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2375 as
->next_dispatch
= d
;
2378 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2380 phys_sections_free(&d
->map
);
2384 static void mem_commit(MemoryListener
*listener
)
2386 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2387 AddressSpaceDispatch
*cur
= as
->dispatch
;
2388 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2390 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2392 atomic_rcu_set(&as
->dispatch
, next
);
2394 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2398 static void tcg_commit(MemoryListener
*listener
)
2400 CPUAddressSpace
*cpuas
;
2401 AddressSpaceDispatch
*d
;
2403 /* since each CPU stores ram addresses in its TLB cache, we must
2404 reset the modified entries */
2405 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2406 cpu_reloading_memory_map();
2407 /* The CPU and TLB are protected by the iothread lock.
2408 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2409 * may have split the RCU critical section.
2411 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2412 cpuas
->memory_dispatch
= d
;
2413 tlb_flush(cpuas
->cpu
, 1);
2416 void address_space_init_dispatch(AddressSpace
*as
)
2418 as
->dispatch
= NULL
;
2419 as
->dispatch_listener
= (MemoryListener
) {
2421 .commit
= mem_commit
,
2422 .region_add
= mem_add
,
2423 .region_nop
= mem_add
,
2426 memory_listener_register(&as
->dispatch_listener
, as
);
2429 void address_space_unregister(AddressSpace
*as
)
2431 memory_listener_unregister(&as
->dispatch_listener
);
2434 void address_space_destroy_dispatch(AddressSpace
*as
)
2436 AddressSpaceDispatch
*d
= as
->dispatch
;
2438 atomic_rcu_set(&as
->dispatch
, NULL
);
2440 call_rcu(d
, address_space_dispatch_free
, rcu
);
2444 static void memory_map_init(void)
2446 system_memory
= g_malloc(sizeof(*system_memory
));
2448 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2449 address_space_init(&address_space_memory
, system_memory
, "memory");
2451 system_io
= g_malloc(sizeof(*system_io
));
2452 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2454 address_space_init(&address_space_io
, system_io
, "I/O");
2457 MemoryRegion
*get_system_memory(void)
2459 return system_memory
;
2462 MemoryRegion
*get_system_io(void)
2467 #endif /* !defined(CONFIG_USER_ONLY) */
2469 /* physical memory access (slow version, mainly for debug) */
2470 #if defined(CONFIG_USER_ONLY)
2471 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2472 uint8_t *buf
, int len
, int is_write
)
2479 page
= addr
& TARGET_PAGE_MASK
;
2480 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2483 flags
= page_get_flags(page
);
2484 if (!(flags
& PAGE_VALID
))
2487 if (!(flags
& PAGE_WRITE
))
2489 /* XXX: this code should not depend on lock_user */
2490 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2493 unlock_user(p
, addr
, l
);
2495 if (!(flags
& PAGE_READ
))
2497 /* XXX: this code should not depend on lock_user */
2498 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2501 unlock_user(p
, addr
, 0);
2512 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2515 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2516 /* No early return if dirty_log_mask is or becomes 0, because
2517 * cpu_physical_memory_set_dirty_range will still call
2518 * xen_modified_memory.
2520 if (dirty_log_mask
) {
2522 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2524 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2525 tb_invalidate_phys_range(addr
, addr
+ length
);
2526 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2528 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2531 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2533 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2535 /* Regions are assumed to support 1-4 byte accesses unless
2536 otherwise specified. */
2537 if (access_size_max
== 0) {
2538 access_size_max
= 4;
2541 /* Bound the maximum access by the alignment of the address. */
2542 if (!mr
->ops
->impl
.unaligned
) {
2543 unsigned align_size_max
= addr
& -addr
;
2544 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2545 access_size_max
= align_size_max
;
2549 /* Don't attempt accesses larger than the maximum. */
2550 if (l
> access_size_max
) {
2551 l
= access_size_max
;
2558 static bool prepare_mmio_access(MemoryRegion
*mr
)
2560 bool unlocked
= !qemu_mutex_iothread_locked();
2561 bool release_lock
= false;
2563 if (unlocked
&& mr
->global_locking
) {
2564 qemu_mutex_lock_iothread();
2566 release_lock
= true;
2568 if (mr
->flush_coalesced_mmio
) {
2570 qemu_mutex_lock_iothread();
2572 qemu_flush_coalesced_mmio_buffer();
2574 qemu_mutex_unlock_iothread();
2578 return release_lock
;
2581 /* Called within RCU critical section. */
2582 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2585 int len
, hwaddr addr1
,
2586 hwaddr l
, MemoryRegion
*mr
)
2590 MemTxResult result
= MEMTX_OK
;
2591 bool release_lock
= false;
2594 if (!memory_access_is_direct(mr
, true)) {
2595 release_lock
|= prepare_mmio_access(mr
);
2596 l
= memory_access_size(mr
, l
, addr1
);
2597 /* XXX: could force current_cpu to NULL to avoid
2601 /* 64 bit write access */
2603 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2607 /* 32 bit write access */
2609 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2613 /* 16 bit write access */
2615 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2619 /* 8 bit write access */
2621 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2628 addr1
+= memory_region_get_ram_addr(mr
);
2630 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
2631 memcpy(ptr
, buf
, l
);
2632 invalidate_and_set_dirty(mr
, addr1
, l
);
2636 qemu_mutex_unlock_iothread();
2637 release_lock
= false;
2649 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2655 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2656 const uint8_t *buf
, int len
)
2661 MemTxResult result
= MEMTX_OK
;
2666 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2667 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2675 /* Called within RCU critical section. */
2676 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2677 MemTxAttrs attrs
, uint8_t *buf
,
2678 int len
, hwaddr addr1
, hwaddr l
,
2683 MemTxResult result
= MEMTX_OK
;
2684 bool release_lock
= false;
2687 if (!memory_access_is_direct(mr
, false)) {
2689 release_lock
|= prepare_mmio_access(mr
);
2690 l
= memory_access_size(mr
, l
, addr1
);
2693 /* 64 bit read access */
2694 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2699 /* 32 bit read access */
2700 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2705 /* 16 bit read access */
2706 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2711 /* 8 bit read access */
2712 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2721 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
2722 memory_region_get_ram_addr(mr
) + addr1
);
2723 memcpy(buf
, ptr
, l
);
2727 qemu_mutex_unlock_iothread();
2728 release_lock
= false;
2740 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2746 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2747 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2752 MemTxResult result
= MEMTX_OK
;
2757 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2758 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2766 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2767 uint8_t *buf
, int len
, bool is_write
)
2770 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2772 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2776 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2777 int len
, int is_write
)
2779 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2780 buf
, len
, is_write
);
2783 enum write_rom_type
{
2788 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2789 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2799 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2801 if (!(memory_region_is_ram(mr
) ||
2802 memory_region_is_romd(mr
))) {
2803 l
= memory_access_size(mr
, l
, addr1
);
2805 addr1
+= memory_region_get_ram_addr(mr
);
2807 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
2810 memcpy(ptr
, buf
, l
);
2811 invalidate_and_set_dirty(mr
, addr1
, l
);
2814 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2825 /* used for ROM loading : can write in RAM and ROM */
2826 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2827 const uint8_t *buf
, int len
)
2829 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2832 void cpu_flush_icache_range(hwaddr start
, int len
)
2835 * This function should do the same thing as an icache flush that was
2836 * triggered from within the guest. For TCG we are always cache coherent,
2837 * so there is no need to flush anything. For KVM / Xen we need to flush
2838 * the host's instruction cache at least.
2840 if (tcg_enabled()) {
2844 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2845 start
, NULL
, len
, FLUSH_CACHE
);
2856 static BounceBuffer bounce
;
2858 typedef struct MapClient
{
2860 QLIST_ENTRY(MapClient
) link
;
2863 QemuMutex map_client_list_lock
;
2864 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2865 = QLIST_HEAD_INITIALIZER(map_client_list
);
2867 static void cpu_unregister_map_client_do(MapClient
*client
)
2869 QLIST_REMOVE(client
, link
);
2873 static void cpu_notify_map_clients_locked(void)
2877 while (!QLIST_EMPTY(&map_client_list
)) {
2878 client
= QLIST_FIRST(&map_client_list
);
2879 qemu_bh_schedule(client
->bh
);
2880 cpu_unregister_map_client_do(client
);
2884 void cpu_register_map_client(QEMUBH
*bh
)
2886 MapClient
*client
= g_malloc(sizeof(*client
));
2888 qemu_mutex_lock(&map_client_list_lock
);
2890 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2891 if (!atomic_read(&bounce
.in_use
)) {
2892 cpu_notify_map_clients_locked();
2894 qemu_mutex_unlock(&map_client_list_lock
);
2897 void cpu_exec_init_all(void)
2899 qemu_mutex_init(&ram_list
.mutex
);
2902 qemu_mutex_init(&map_client_list_lock
);
2905 void cpu_unregister_map_client(QEMUBH
*bh
)
2909 qemu_mutex_lock(&map_client_list_lock
);
2910 QLIST_FOREACH(client
, &map_client_list
, link
) {
2911 if (client
->bh
== bh
) {
2912 cpu_unregister_map_client_do(client
);
2916 qemu_mutex_unlock(&map_client_list_lock
);
2919 static void cpu_notify_map_clients(void)
2921 qemu_mutex_lock(&map_client_list_lock
);
2922 cpu_notify_map_clients_locked();
2923 qemu_mutex_unlock(&map_client_list_lock
);
2926 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2934 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2935 if (!memory_access_is_direct(mr
, is_write
)) {
2936 l
= memory_access_size(mr
, l
, addr
);
2937 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2949 /* Map a physical memory region into a host virtual address.
2950 * May map a subset of the requested range, given by and returned in *plen.
2951 * May return NULL if resources needed to perform the mapping are exhausted.
2952 * Use only for reads OR writes - not for read-modify-write operations.
2953 * Use cpu_register_map_client() to know when retrying the map operation is
2954 * likely to succeed.
2956 void *address_space_map(AddressSpace
*as
,
2963 hwaddr l
, xlat
, base
;
2964 MemoryRegion
*mr
, *this_mr
;
2974 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2976 if (!memory_access_is_direct(mr
, is_write
)) {
2977 if (atomic_xchg(&bounce
.in_use
, true)) {
2981 /* Avoid unbounded allocations */
2982 l
= MIN(l
, TARGET_PAGE_SIZE
);
2983 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2987 memory_region_ref(mr
);
2990 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2996 return bounce
.buffer
;
3000 raddr
= memory_region_get_ram_addr(mr
);
3011 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
3012 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
3017 memory_region_ref(mr
);
3019 ptr
= qemu_ram_ptr_length(mr
->ram_block
, raddr
+ base
, plen
);
3025 /* Unmaps a memory region previously mapped by address_space_map().
3026 * Will also mark the memory as dirty if is_write == 1. access_len gives
3027 * the amount of memory that was actually read or written by the caller.
3029 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
3030 int is_write
, hwaddr access_len
)
3032 if (buffer
!= bounce
.buffer
) {
3036 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
3039 invalidate_and_set_dirty(mr
, addr1
, access_len
);
3041 if (xen_enabled()) {
3042 xen_invalidate_map_cache_entry(buffer
);
3044 memory_region_unref(mr
);
3048 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
3049 bounce
.buffer
, access_len
);
3051 qemu_vfree(bounce
.buffer
);
3052 bounce
.buffer
= NULL
;
3053 memory_region_unref(bounce
.mr
);
3054 atomic_mb_set(&bounce
.in_use
, false);
3055 cpu_notify_map_clients();
3058 void *cpu_physical_memory_map(hwaddr addr
,
3062 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
3065 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
3066 int is_write
, hwaddr access_len
)
3068 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3071 /* warning: addr must be aligned */
3072 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
3074 MemTxResult
*result
,
3075 enum device_endian endian
)
3083 bool release_lock
= false;
3086 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
3087 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
3088 release_lock
|= prepare_mmio_access(mr
);
3091 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
3092 #if defined(TARGET_WORDS_BIGENDIAN)
3093 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3097 if (endian
== DEVICE_BIG_ENDIAN
) {
3103 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3104 (memory_region_get_ram_addr(mr
)
3108 case DEVICE_LITTLE_ENDIAN
:
3109 val
= ldl_le_p(ptr
);
3111 case DEVICE_BIG_ENDIAN
:
3112 val
= ldl_be_p(ptr
);
3124 qemu_mutex_unlock_iothread();
3130 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3131 MemTxAttrs attrs
, MemTxResult
*result
)
3133 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3134 DEVICE_NATIVE_ENDIAN
);
3137 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3138 MemTxAttrs attrs
, MemTxResult
*result
)
3140 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3141 DEVICE_LITTLE_ENDIAN
);
3144 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3145 MemTxAttrs attrs
, MemTxResult
*result
)
3147 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3151 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3153 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3156 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3158 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3161 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3163 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3166 /* warning: addr must be aligned */
3167 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3169 MemTxResult
*result
,
3170 enum device_endian endian
)
3178 bool release_lock
= false;
3181 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3183 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3184 release_lock
|= prepare_mmio_access(mr
);
3187 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3188 #if defined(TARGET_WORDS_BIGENDIAN)
3189 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3193 if (endian
== DEVICE_BIG_ENDIAN
) {
3199 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3200 (memory_region_get_ram_addr(mr
)
3204 case DEVICE_LITTLE_ENDIAN
:
3205 val
= ldq_le_p(ptr
);
3207 case DEVICE_BIG_ENDIAN
:
3208 val
= ldq_be_p(ptr
);
3220 qemu_mutex_unlock_iothread();
3226 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3227 MemTxAttrs attrs
, MemTxResult
*result
)
3229 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3230 DEVICE_NATIVE_ENDIAN
);
3233 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3234 MemTxAttrs attrs
, MemTxResult
*result
)
3236 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3237 DEVICE_LITTLE_ENDIAN
);
3240 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3241 MemTxAttrs attrs
, MemTxResult
*result
)
3243 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3247 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3249 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3252 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3254 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3257 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3259 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3263 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3264 MemTxAttrs attrs
, MemTxResult
*result
)
3269 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3276 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3278 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3281 /* warning: addr must be aligned */
3282 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3285 MemTxResult
*result
,
3286 enum device_endian endian
)
3294 bool release_lock
= false;
3297 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3299 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3300 release_lock
|= prepare_mmio_access(mr
);
3303 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3304 #if defined(TARGET_WORDS_BIGENDIAN)
3305 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3309 if (endian
== DEVICE_BIG_ENDIAN
) {
3315 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3316 (memory_region_get_ram_addr(mr
)
3320 case DEVICE_LITTLE_ENDIAN
:
3321 val
= lduw_le_p(ptr
);
3323 case DEVICE_BIG_ENDIAN
:
3324 val
= lduw_be_p(ptr
);
3336 qemu_mutex_unlock_iothread();
3342 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3343 MemTxAttrs attrs
, MemTxResult
*result
)
3345 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3346 DEVICE_NATIVE_ENDIAN
);
3349 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3350 MemTxAttrs attrs
, MemTxResult
*result
)
3352 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3353 DEVICE_LITTLE_ENDIAN
);
3356 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3357 MemTxAttrs attrs
, MemTxResult
*result
)
3359 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3363 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3365 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3368 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3370 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3373 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3375 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3378 /* warning: addr must be aligned. The ram page is not masked as dirty
3379 and the code inside is not invalidated. It is useful if the dirty
3380 bits are used to track modified PTEs */
3381 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3382 MemTxAttrs attrs
, MemTxResult
*result
)
3389 uint8_t dirty_log_mask
;
3390 bool release_lock
= false;
3393 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3395 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3396 release_lock
|= prepare_mmio_access(mr
);
3398 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3400 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3401 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3404 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3405 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3406 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3413 qemu_mutex_unlock_iothread();
3418 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3420 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3423 /* warning: addr must be aligned */
3424 static inline void address_space_stl_internal(AddressSpace
*as
,
3425 hwaddr addr
, uint32_t val
,
3427 MemTxResult
*result
,
3428 enum device_endian endian
)
3435 bool release_lock
= false;
3438 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3440 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3441 release_lock
|= prepare_mmio_access(mr
);
3443 #if defined(TARGET_WORDS_BIGENDIAN)
3444 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3448 if (endian
== DEVICE_BIG_ENDIAN
) {
3452 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3455 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3456 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3458 case DEVICE_LITTLE_ENDIAN
:
3461 case DEVICE_BIG_ENDIAN
:
3468 invalidate_and_set_dirty(mr
, addr1
, 4);
3475 qemu_mutex_unlock_iothread();
3480 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3481 MemTxAttrs attrs
, MemTxResult
*result
)
3483 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3484 DEVICE_NATIVE_ENDIAN
);
3487 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3488 MemTxAttrs attrs
, MemTxResult
*result
)
3490 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3491 DEVICE_LITTLE_ENDIAN
);
3494 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3495 MemTxAttrs attrs
, MemTxResult
*result
)
3497 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3501 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3503 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3506 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3508 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3511 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3513 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3517 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3518 MemTxAttrs attrs
, MemTxResult
*result
)
3523 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3529 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3531 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3534 /* warning: addr must be aligned */
3535 static inline void address_space_stw_internal(AddressSpace
*as
,
3536 hwaddr addr
, uint32_t val
,
3538 MemTxResult
*result
,
3539 enum device_endian endian
)
3546 bool release_lock
= false;
3549 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3550 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3551 release_lock
|= prepare_mmio_access(mr
);
3553 #if defined(TARGET_WORDS_BIGENDIAN)
3554 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3558 if (endian
== DEVICE_BIG_ENDIAN
) {
3562 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3565 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3566 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3568 case DEVICE_LITTLE_ENDIAN
:
3571 case DEVICE_BIG_ENDIAN
:
3578 invalidate_and_set_dirty(mr
, addr1
, 2);
3585 qemu_mutex_unlock_iothread();
3590 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3591 MemTxAttrs attrs
, MemTxResult
*result
)
3593 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3594 DEVICE_NATIVE_ENDIAN
);
3597 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3598 MemTxAttrs attrs
, MemTxResult
*result
)
3600 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3601 DEVICE_LITTLE_ENDIAN
);
3604 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3605 MemTxAttrs attrs
, MemTxResult
*result
)
3607 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3611 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3613 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3616 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3618 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3621 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3623 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3627 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3628 MemTxAttrs attrs
, MemTxResult
*result
)
3632 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3638 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3639 MemTxAttrs attrs
, MemTxResult
*result
)
3642 val
= cpu_to_le64(val
);
3643 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3648 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3649 MemTxAttrs attrs
, MemTxResult
*result
)
3652 val
= cpu_to_be64(val
);
3653 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3659 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3661 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3664 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3666 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3669 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3671 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3674 /* virtual memory access for debug (includes writing to ROM) */
3675 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3676 uint8_t *buf
, int len
, int is_write
)
3686 page
= addr
& TARGET_PAGE_MASK
;
3687 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3688 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3689 /* if no physical page mapped, return an error */
3690 if (phys_addr
== -1)
3692 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3695 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3697 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3700 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3701 MEMTXATTRS_UNSPECIFIED
,
3712 * Allows code that needs to deal with migration bitmaps etc to still be built
3713 * target independent.
3715 size_t qemu_target_page_bits(void)
3717 return TARGET_PAGE_BITS
;
3723 * A helper function for the _utterly broken_ virtio device model to find out if
3724 * it's running on a big endian machine. Don't do this at home kids!
3726 bool target_words_bigendian(void);
3727 bool target_words_bigendian(void)
3729 #if defined(TARGET_WORDS_BIGENDIAN)
3736 #ifndef CONFIG_USER_ONLY
3737 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3744 mr
= address_space_translate(&address_space_memory
,
3745 phys_addr
, &phys_addr
, &l
, false);
3747 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3752 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3758 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3759 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3760 block
->used_length
, opaque
);