4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
33 #include "qemu/osdep.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/xen/xen.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "exec/memory.h"
41 #include "sysemu/dma.h"
42 #include "exec/address-spaces.h"
43 #if defined(CONFIG_USER_ONLY)
45 #else /* !CONFIG_USER_ONLY */
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "translate-all.h"
53 #include "sysemu/replay.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
58 #include "qemu/range.h"
60 #include "qemu/mmap-alloc.h"
63 //#define DEBUG_SUBPAGE
65 #if !defined(CONFIG_USER_ONLY)
66 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
69 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
71 static MemoryRegion
*system_memory
;
72 static MemoryRegion
*system_io
;
74 AddressSpace address_space_io
;
75 AddressSpace address_space_memory
;
77 MemoryRegion io_mem_rom
, io_mem_notdirty
;
78 static MemoryRegion io_mem_unassigned
;
80 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81 #define RAM_PREALLOC (1 << 0)
83 /* RAM is mmap-ed with MAP_SHARED */
84 #define RAM_SHARED (1 << 1)
86 /* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
89 #define RAM_RESIZEABLE (1 << 2)
93 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
94 /* current CPU in the current thread. It is only valid inside
96 __thread CPUState
*current_cpu
;
97 /* 0 = Do not count executed instructions.
98 1 = Precise instruction counting.
99 2 = Adaptive rate instruction counting. */
102 #if !defined(CONFIG_USER_ONLY)
104 typedef struct PhysPageEntry PhysPageEntry
;
106 struct PhysPageEntry
{
107 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
109 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
113 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
115 /* Size of the L2 (and L3, etc) page tables. */
116 #define ADDR_SPACE_BITS 64
119 #define P_L2_SIZE (1 << P_L2_BITS)
121 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
123 typedef PhysPageEntry Node
[P_L2_SIZE
];
125 typedef struct PhysPageMap
{
128 unsigned sections_nb
;
129 unsigned sections_nb_alloc
;
131 unsigned nodes_nb_alloc
;
133 MemoryRegionSection
*sections
;
136 struct AddressSpaceDispatch
{
139 /* This is a multi-level map on the physical address space.
140 * The bottom level has pointers to MemoryRegionSections.
142 PhysPageEntry phys_map
;
147 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
148 typedef struct subpage_t
{
152 uint16_t sub_section
[TARGET_PAGE_SIZE
];
155 #define PHYS_SECTION_UNASSIGNED 0
156 #define PHYS_SECTION_NOTDIRTY 1
157 #define PHYS_SECTION_ROM 2
158 #define PHYS_SECTION_WATCH 3
160 static void io_mem_init(void);
161 static void memory_map_init(void);
162 static void tcg_commit(MemoryListener
*listener
);
164 static MemoryRegion io_mem_watch
;
167 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
168 * @cpu: the CPU whose AddressSpace this is
169 * @as: the AddressSpace itself
170 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
171 * @tcg_as_listener: listener for tracking changes to the AddressSpace
173 struct CPUAddressSpace
{
176 struct AddressSpaceDispatch
*memory_dispatch
;
177 MemoryListener tcg_as_listener
;
182 #if !defined(CONFIG_USER_ONLY)
184 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
186 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
187 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
188 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
189 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
193 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
200 ret
= map
->nodes_nb
++;
202 assert(ret
!= PHYS_MAP_NODE_NIL
);
203 assert(ret
!= map
->nodes_nb_alloc
);
205 e
.skip
= leaf
? 0 : 1;
206 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
207 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
208 memcpy(&p
[i
], &e
, sizeof(e
));
213 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
214 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
218 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
220 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
221 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
223 p
= map
->nodes
[lp
->ptr
];
224 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
226 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
227 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
233 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
239 static void phys_page_set(AddressSpaceDispatch
*d
,
240 hwaddr index
, hwaddr nb
,
243 /* Wildly overreserve - it doesn't matter much. */
244 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
246 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
249 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
250 * and update our entry so we can skip it and go directly to the destination.
252 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
254 unsigned valid_ptr
= P_L2_SIZE
;
259 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
264 for (i
= 0; i
< P_L2_SIZE
; i
++) {
265 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
272 phys_page_compact(&p
[i
], nodes
, compacted
);
276 /* We can only compress if there's only one child. */
281 assert(valid_ptr
< P_L2_SIZE
);
283 /* Don't compress if it won't fit in the # of bits we have. */
284 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
288 lp
->ptr
= p
[valid_ptr
].ptr
;
289 if (!p
[valid_ptr
].skip
) {
290 /* If our only child is a leaf, make this a leaf. */
291 /* By design, we should have made this node a leaf to begin with so we
292 * should never reach here.
293 * But since it's so simple to handle this, let's do it just in case we
298 lp
->skip
+= p
[valid_ptr
].skip
;
302 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
304 DECLARE_BITMAP(compacted
, nodes_nb
);
306 if (d
->phys_map
.skip
) {
307 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
311 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
312 Node
*nodes
, MemoryRegionSection
*sections
)
315 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
318 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
319 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
320 return §ions
[PHYS_SECTION_UNASSIGNED
];
323 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
326 if (sections
[lp
.ptr
].size
.hi
||
327 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
328 sections
[lp
.ptr
].size
.lo
, addr
)) {
329 return §ions
[lp
.ptr
];
331 return §ions
[PHYS_SECTION_UNASSIGNED
];
335 bool memory_region_is_unassigned(MemoryRegion
*mr
)
337 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
338 && mr
!= &io_mem_watch
;
341 /* Called from RCU critical section */
342 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
344 bool resolve_subpage
)
346 MemoryRegionSection
*section
;
349 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
350 if (resolve_subpage
&& section
->mr
->subpage
) {
351 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
352 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
357 /* Called from RCU critical section */
358 static MemoryRegionSection
*
359 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
360 hwaddr
*plen
, bool resolve_subpage
)
362 MemoryRegionSection
*section
;
366 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
367 /* Compute offset within MemoryRegionSection */
368 addr
-= section
->offset_within_address_space
;
370 /* Compute offset within MemoryRegion */
371 *xlat
= addr
+ section
->offset_within_region
;
375 /* MMIO registers can be expected to perform full-width accesses based only
376 * on their address, without considering adjacent registers that could
377 * decode to completely different MemoryRegions. When such registers
378 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
379 * regions overlap wildly. For this reason we cannot clamp the accesses
382 * If the length is small (as is the case for address_space_ldl/stl),
383 * everything works fine. If the incoming length is large, however,
384 * the caller really has to do the clamping through memory_access_size.
386 if (memory_region_is_ram(mr
)) {
387 diff
= int128_sub(section
->size
, int128_make64(addr
));
388 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
393 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
396 return memory_region_is_ram(mr
) && !mr
->readonly
;
398 return memory_region_is_ram(mr
) || memory_region_is_romd(mr
);
404 /* Called from RCU critical section */
405 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
406 hwaddr
*xlat
, hwaddr
*plen
,
410 MemoryRegionSection
*section
;
414 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
415 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
418 if (!mr
->iommu_ops
) {
422 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
423 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
424 | (addr
& iotlb
.addr_mask
));
425 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
426 if (!(iotlb
.perm
& (1 << is_write
))) {
427 mr
= &io_mem_unassigned
;
431 as
= iotlb
.target_as
;
434 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
435 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
436 *plen
= MIN(page
, *plen
);
443 /* Called from RCU critical section */
444 MemoryRegionSection
*
445 address_space_translate_for_iotlb(CPUState
*cpu
, hwaddr addr
,
446 hwaddr
*xlat
, hwaddr
*plen
)
448 MemoryRegionSection
*section
;
449 section
= address_space_translate_internal(cpu
->cpu_ases
[0].memory_dispatch
,
450 addr
, xlat
, plen
, false);
452 assert(!section
->mr
->iommu_ops
);
457 #if !defined(CONFIG_USER_ONLY)
459 static int cpu_common_post_load(void *opaque
, int version_id
)
461 CPUState
*cpu
= opaque
;
463 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
464 version_id is increased. */
465 cpu
->interrupt_request
&= ~0x01;
471 static int cpu_common_pre_load(void *opaque
)
473 CPUState
*cpu
= opaque
;
475 cpu
->exception_index
= -1;
480 static bool cpu_common_exception_index_needed(void *opaque
)
482 CPUState
*cpu
= opaque
;
484 return tcg_enabled() && cpu
->exception_index
!= -1;
487 static const VMStateDescription vmstate_cpu_common_exception_index
= {
488 .name
= "cpu_common/exception_index",
490 .minimum_version_id
= 1,
491 .needed
= cpu_common_exception_index_needed
,
492 .fields
= (VMStateField
[]) {
493 VMSTATE_INT32(exception_index
, CPUState
),
494 VMSTATE_END_OF_LIST()
498 static bool cpu_common_crash_occurred_needed(void *opaque
)
500 CPUState
*cpu
= opaque
;
502 return cpu
->crash_occurred
;
505 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
506 .name
= "cpu_common/crash_occurred",
508 .minimum_version_id
= 1,
509 .needed
= cpu_common_crash_occurred_needed
,
510 .fields
= (VMStateField
[]) {
511 VMSTATE_BOOL(crash_occurred
, CPUState
),
512 VMSTATE_END_OF_LIST()
516 const VMStateDescription vmstate_cpu_common
= {
517 .name
= "cpu_common",
519 .minimum_version_id
= 1,
520 .pre_load
= cpu_common_pre_load
,
521 .post_load
= cpu_common_post_load
,
522 .fields
= (VMStateField
[]) {
523 VMSTATE_UINT32(halted
, CPUState
),
524 VMSTATE_UINT32(interrupt_request
, CPUState
),
525 VMSTATE_END_OF_LIST()
527 .subsections
= (const VMStateDescription
*[]) {
528 &vmstate_cpu_common_exception_index
,
529 &vmstate_cpu_common_crash_occurred
,
536 CPUState
*qemu_get_cpu(int index
)
541 if (cpu
->cpu_index
== index
) {
549 #if !defined(CONFIG_USER_ONLY)
550 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
552 /* We only support one address space per cpu at the moment. */
553 assert(cpu
->as
== as
);
556 /* We've already registered the listener for our only AS */
560 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, 1);
561 cpu
->cpu_ases
[0].cpu
= cpu
;
562 cpu
->cpu_ases
[0].as
= as
;
563 cpu
->cpu_ases
[0].tcg_as_listener
.commit
= tcg_commit
;
564 memory_listener_register(&cpu
->cpu_ases
[0].tcg_as_listener
, as
);
568 #ifndef CONFIG_USER_ONLY
569 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
571 static int cpu_get_free_index(Error
**errp
)
573 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
575 if (cpu
>= MAX_CPUMASK_BITS
) {
576 error_setg(errp
, "Trying to use more CPUs than max of %d",
581 bitmap_set(cpu_index_map
, cpu
, 1);
585 void cpu_exec_exit(CPUState
*cpu
)
587 if (cpu
->cpu_index
== -1) {
588 /* cpu_index was never allocated by this @cpu or was already freed. */
592 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
597 static int cpu_get_free_index(Error
**errp
)
602 CPU_FOREACH(some_cpu
) {
608 void cpu_exec_exit(CPUState
*cpu
)
613 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
615 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
617 Error
*local_err
= NULL
;
619 #ifndef CONFIG_USER_ONLY
620 cpu
->as
= &address_space_memory
;
621 cpu
->thread_id
= qemu_get_thread_id();
624 #if defined(CONFIG_USER_ONLY)
627 cpu_index
= cpu
->cpu_index
= cpu_get_free_index(&local_err
);
629 error_propagate(errp
, local_err
);
630 #if defined(CONFIG_USER_ONLY)
635 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
636 #if defined(CONFIG_USER_ONLY)
639 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
640 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
642 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
643 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
644 cpu_save
, cpu_load
, cpu
->env_ptr
);
645 assert(cc
->vmsd
== NULL
);
646 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
648 if (cc
->vmsd
!= NULL
) {
649 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
653 #if defined(CONFIG_USER_ONLY)
654 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
656 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
659 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
661 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
663 tb_invalidate_phys_addr(cpu
->as
,
664 phys
| (pc
& ~TARGET_PAGE_MASK
));
669 #if defined(CONFIG_USER_ONLY)
670 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
675 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
681 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
685 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
686 int flags
, CPUWatchpoint
**watchpoint
)
691 /* Add a watchpoint. */
692 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
693 int flags
, CPUWatchpoint
**watchpoint
)
697 /* forbid ranges which are empty or run off the end of the address space */
698 if (len
== 0 || (addr
+ len
- 1) < addr
) {
699 error_report("tried to set invalid watchpoint at %"
700 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
703 wp
= g_malloc(sizeof(*wp
));
709 /* keep all GDB-injected watchpoints in front */
710 if (flags
& BP_GDB
) {
711 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
713 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
716 tlb_flush_page(cpu
, addr
);
723 /* Remove a specific watchpoint. */
724 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
729 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
730 if (addr
== wp
->vaddr
&& len
== wp
->len
731 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
732 cpu_watchpoint_remove_by_ref(cpu
, wp
);
739 /* Remove a specific watchpoint by reference. */
740 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
742 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
744 tlb_flush_page(cpu
, watchpoint
->vaddr
);
749 /* Remove all matching watchpoints. */
750 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
752 CPUWatchpoint
*wp
, *next
;
754 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
755 if (wp
->flags
& mask
) {
756 cpu_watchpoint_remove_by_ref(cpu
, wp
);
761 /* Return true if this watchpoint address matches the specified
762 * access (ie the address range covered by the watchpoint overlaps
763 * partially or completely with the address range covered by the
766 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
770 /* We know the lengths are non-zero, but a little caution is
771 * required to avoid errors in the case where the range ends
772 * exactly at the top of the address space and so addr + len
773 * wraps round to zero.
775 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
776 vaddr addrend
= addr
+ len
- 1;
778 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
783 /* Add a breakpoint. */
784 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
785 CPUBreakpoint
**breakpoint
)
789 bp
= g_malloc(sizeof(*bp
));
794 /* keep all GDB-injected breakpoints in front */
795 if (flags
& BP_GDB
) {
796 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
798 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
801 breakpoint_invalidate(cpu
, pc
);
809 /* Remove a specific breakpoint. */
810 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
814 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
815 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
816 cpu_breakpoint_remove_by_ref(cpu
, bp
);
823 /* Remove a specific breakpoint by reference. */
824 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
826 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
828 breakpoint_invalidate(cpu
, breakpoint
->pc
);
833 /* Remove all matching breakpoints. */
834 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
836 CPUBreakpoint
*bp
, *next
;
838 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
839 if (bp
->flags
& mask
) {
840 cpu_breakpoint_remove_by_ref(cpu
, bp
);
845 /* enable or disable single step mode. EXCP_DEBUG is returned by the
846 CPU loop after each instruction */
847 void cpu_single_step(CPUState
*cpu
, int enabled
)
849 if (cpu
->singlestep_enabled
!= enabled
) {
850 cpu
->singlestep_enabled
= enabled
;
852 kvm_update_guest_debug(cpu
, 0);
854 /* must flush all the translated code to avoid inconsistencies */
855 /* XXX: only flush what is necessary */
861 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
868 fprintf(stderr
, "qemu: fatal: ");
869 vfprintf(stderr
, fmt
, ap
);
870 fprintf(stderr
, "\n");
871 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
872 if (qemu_log_separate()) {
873 qemu_log("qemu: fatal: ");
874 qemu_log_vprintf(fmt
, ap2
);
876 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
883 #if defined(CONFIG_USER_ONLY)
885 struct sigaction act
;
886 sigfillset(&act
.sa_mask
);
887 act
.sa_handler
= SIG_DFL
;
888 sigaction(SIGABRT
, &act
, NULL
);
894 #if !defined(CONFIG_USER_ONLY)
895 /* Called from RCU critical section */
896 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
900 block
= atomic_rcu_read(&ram_list
.mru_block
);
901 if (block
&& addr
- block
->offset
< block
->max_length
) {
904 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
905 if (addr
- block
->offset
< block
->max_length
) {
910 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
914 /* It is safe to write mru_block outside the iothread lock. This
919 * xxx removed from list
923 * call_rcu(reclaim_ramblock, xxx);
926 * atomic_rcu_set is not needed here. The block was already published
927 * when it was placed into the list. Here we're just making an extra
928 * copy of the pointer.
930 ram_list
.mru_block
= block
;
934 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
941 end
= TARGET_PAGE_ALIGN(start
+ length
);
942 start
&= TARGET_PAGE_MASK
;
945 block
= qemu_get_ram_block(start
);
946 assert(block
== qemu_get_ram_block(end
- 1));
947 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
949 tlb_reset_dirty(cpu
, start1
, length
);
954 /* Note: start and end must be within the same ram block. */
955 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
959 unsigned long end
, page
;
966 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
967 page
= start
>> TARGET_PAGE_BITS
;
968 dirty
= bitmap_test_and_clear_atomic(ram_list
.dirty_memory
[client
],
971 if (dirty
&& tcg_enabled()) {
972 tlb_reset_dirty_range_all(start
, length
);
978 /* Called from RCU critical section */
979 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
980 MemoryRegionSection
*section
,
982 hwaddr paddr
, hwaddr xlat
,
984 target_ulong
*address
)
989 if (memory_region_is_ram(section
->mr
)) {
991 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
993 if (!section
->readonly
) {
994 iotlb
|= PHYS_SECTION_NOTDIRTY
;
996 iotlb
|= PHYS_SECTION_ROM
;
999 AddressSpaceDispatch
*d
;
1001 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1002 iotlb
= section
- d
->map
.sections
;
1006 /* Make accesses to pages with watchpoints go via the
1007 watchpoint trap routines. */
1008 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1009 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1010 /* Avoid trapping reads of pages with a write breakpoint. */
1011 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1012 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1013 *address
|= TLB_MMIO
;
1021 #endif /* defined(CONFIG_USER_ONLY) */
1023 #if !defined(CONFIG_USER_ONLY)
1025 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1027 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1029 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1030 qemu_anon_ram_alloc
;
1033 * Set a custom physical guest memory alloator.
1034 * Accelerators with unusual needs may need this. Hopefully, we can
1035 * get rid of it eventually.
1037 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1039 phys_mem_alloc
= alloc
;
1042 static uint16_t phys_section_add(PhysPageMap
*map
,
1043 MemoryRegionSection
*section
)
1045 /* The physical section number is ORed with a page-aligned
1046 * pointer to produce the iotlb entries. Thus it should
1047 * never overflow into the page-aligned value.
1049 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1051 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1052 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1053 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1054 map
->sections_nb_alloc
);
1056 map
->sections
[map
->sections_nb
] = *section
;
1057 memory_region_ref(section
->mr
);
1058 return map
->sections_nb
++;
1061 static void phys_section_destroy(MemoryRegion
*mr
)
1063 bool have_sub_page
= mr
->subpage
;
1065 memory_region_unref(mr
);
1067 if (have_sub_page
) {
1068 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1069 object_unref(OBJECT(&subpage
->iomem
));
1074 static void phys_sections_free(PhysPageMap
*map
)
1076 while (map
->sections_nb
> 0) {
1077 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1078 phys_section_destroy(section
->mr
);
1080 g_free(map
->sections
);
1084 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1087 hwaddr base
= section
->offset_within_address_space
1089 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1090 d
->map
.nodes
, d
->map
.sections
);
1091 MemoryRegionSection subsection
= {
1092 .offset_within_address_space
= base
,
1093 .size
= int128_make64(TARGET_PAGE_SIZE
),
1097 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1099 if (!(existing
->mr
->subpage
)) {
1100 subpage
= subpage_init(d
->as
, base
);
1101 subsection
.address_space
= d
->as
;
1102 subsection
.mr
= &subpage
->iomem
;
1103 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1104 phys_section_add(&d
->map
, &subsection
));
1106 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1108 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1109 end
= start
+ int128_get64(section
->size
) - 1;
1110 subpage_register(subpage
, start
, end
,
1111 phys_section_add(&d
->map
, section
));
1115 static void register_multipage(AddressSpaceDispatch
*d
,
1116 MemoryRegionSection
*section
)
1118 hwaddr start_addr
= section
->offset_within_address_space
;
1119 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1120 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1124 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1127 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1129 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1130 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1131 MemoryRegionSection now
= *section
, remain
= *section
;
1132 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1134 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1135 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1136 - now
.offset_within_address_space
;
1138 now
.size
= int128_min(int128_make64(left
), now
.size
);
1139 register_subpage(d
, &now
);
1141 now
.size
= int128_zero();
1143 while (int128_ne(remain
.size
, now
.size
)) {
1144 remain
.size
= int128_sub(remain
.size
, now
.size
);
1145 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1146 remain
.offset_within_region
+= int128_get64(now
.size
);
1148 if (int128_lt(remain
.size
, page_size
)) {
1149 register_subpage(d
, &now
);
1150 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1151 now
.size
= page_size
;
1152 register_subpage(d
, &now
);
1154 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1155 register_multipage(d
, &now
);
1160 void qemu_flush_coalesced_mmio_buffer(void)
1163 kvm_flush_coalesced_mmio_buffer();
1166 void qemu_mutex_lock_ramlist(void)
1168 qemu_mutex_lock(&ram_list
.mutex
);
1171 void qemu_mutex_unlock_ramlist(void)
1173 qemu_mutex_unlock(&ram_list
.mutex
);
1178 #include <sys/vfs.h>
1180 #define HUGETLBFS_MAGIC 0x958458f6
1182 static long gethugepagesize(const char *path
, Error
**errp
)
1188 ret
= statfs(path
, &fs
);
1189 } while (ret
!= 0 && errno
== EINTR
);
1192 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1200 static void *file_ram_alloc(RAMBlock
*block
,
1207 char *sanitized_name
;
1212 Error
*local_err
= NULL
;
1214 hpagesize
= gethugepagesize(path
, &local_err
);
1216 error_propagate(errp
, local_err
);
1219 block
->mr
->align
= hpagesize
;
1221 if (memory
< hpagesize
) {
1222 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1223 "or larger than huge page size 0x%" PRIx64
,
1228 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1230 "host lacks kvm mmu notifiers, -mem-path unsupported");
1234 if (!stat(path
, &st
) && S_ISDIR(st
.st_mode
)) {
1235 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1236 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1237 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1243 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1245 g_free(sanitized_name
);
1247 fd
= mkstemp(filename
);
1253 fd
= open(path
, O_RDWR
| O_CREAT
, 0644);
1257 error_setg_errno(errp
, errno
,
1258 "unable to create backing store for hugepages");
1262 memory
= ROUND_UP(memory
, hpagesize
);
1265 * ftruncate is not supported by hugetlbfs in older
1266 * hosts, so don't bother bailing out on errors.
1267 * If anything goes wrong with it under other filesystems,
1270 if (ftruncate(fd
, memory
)) {
1271 perror("ftruncate");
1274 area
= qemu_ram_mmap(fd
, memory
, hpagesize
, block
->flags
& RAM_SHARED
);
1275 if (area
== MAP_FAILED
) {
1276 error_setg_errno(errp
, errno
,
1277 "unable to map backing store for hugepages");
1283 os_mem_prealloc(fd
, area
, memory
);
1294 /* Called with the ramlist lock held. */
1295 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1297 RAMBlock
*block
, *next_block
;
1298 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1300 assert(size
!= 0); /* it would hand out same offset multiple times */
1302 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1306 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1307 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1309 end
= block
->offset
+ block
->max_length
;
1311 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1312 if (next_block
->offset
>= end
) {
1313 next
= MIN(next
, next_block
->offset
);
1316 if (next
- end
>= size
&& next
- end
< mingap
) {
1318 mingap
= next
- end
;
1322 if (offset
== RAM_ADDR_MAX
) {
1323 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1331 ram_addr_t
last_ram_offset(void)
1334 ram_addr_t last
= 0;
1337 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1338 last
= MAX(last
, block
->offset
+ block
->max_length
);
1344 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1348 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1349 if (!machine_dump_guest_core(current_machine
)) {
1350 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1352 perror("qemu_madvise");
1353 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1354 "but dump_guest_core=off specified\n");
1359 /* Called within an RCU critical section, or while the ramlist lock
1362 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1366 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1367 if (block
->offset
== addr
) {
1375 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1380 /* Called with iothread lock held. */
1381 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1383 RAMBlock
*new_block
, *block
;
1386 new_block
= find_ram_block(addr
);
1388 assert(!new_block
->idstr
[0]);
1391 char *id
= qdev_get_dev_path(dev
);
1393 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1397 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1399 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1400 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1401 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1409 /* Called with iothread lock held. */
1410 void qemu_ram_unset_idstr(ram_addr_t addr
)
1414 /* FIXME: arch_init.c assumes that this is not called throughout
1415 * migration. Ignore the problem since hot-unplug during migration
1416 * does not work anyway.
1420 block
= find_ram_block(addr
);
1422 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1427 static int memory_try_enable_merging(void *addr
, size_t len
)
1429 if (!machine_mem_merge(current_machine
)) {
1430 /* disabled by the user */
1434 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1437 /* Only legal before guest might have detected the memory size: e.g. on
1438 * incoming migration, or right after reset.
1440 * As memory core doesn't know how is memory accessed, it is up to
1441 * resize callback to update device state and/or add assertions to detect
1442 * misuse, if necessary.
1444 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1446 RAMBlock
*block
= find_ram_block(base
);
1450 newsize
= HOST_PAGE_ALIGN(newsize
);
1452 if (block
->used_length
== newsize
) {
1456 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1457 error_setg_errno(errp
, EINVAL
,
1458 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1459 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1460 newsize
, block
->used_length
);
1464 if (block
->max_length
< newsize
) {
1465 error_setg_errno(errp
, EINVAL
,
1466 "Length too large: %s: 0x" RAM_ADDR_FMT
1467 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1468 newsize
, block
->max_length
);
1472 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1473 block
->used_length
= newsize
;
1474 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1476 memory_region_set_size(block
->mr
, newsize
);
1477 if (block
->resized
) {
1478 block
->resized(block
->idstr
, newsize
, block
->host
);
1483 static ram_addr_t
ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1486 RAMBlock
*last_block
= NULL
;
1487 ram_addr_t old_ram_size
, new_ram_size
;
1489 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1491 qemu_mutex_lock_ramlist();
1492 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1494 if (!new_block
->host
) {
1495 if (xen_enabled()) {
1496 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1499 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1500 &new_block
->mr
->align
);
1501 if (!new_block
->host
) {
1502 error_setg_errno(errp
, errno
,
1503 "cannot set up guest memory '%s'",
1504 memory_region_name(new_block
->mr
));
1505 qemu_mutex_unlock_ramlist();
1508 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1512 new_ram_size
= MAX(old_ram_size
,
1513 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1514 if (new_ram_size
> old_ram_size
) {
1515 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1517 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1518 * QLIST (which has an RCU-friendly variant) does not have insertion at
1519 * tail, so save the last element in last_block.
1521 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1523 if (block
->max_length
< new_block
->max_length
) {
1528 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1529 } else if (last_block
) {
1530 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1531 } else { /* list is empty */
1532 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1534 ram_list
.mru_block
= NULL
;
1536 /* Write list before version */
1539 qemu_mutex_unlock_ramlist();
1541 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1543 if (new_ram_size
> old_ram_size
) {
1546 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1547 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1548 ram_list
.dirty_memory
[i
] =
1549 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1550 old_ram_size
, new_ram_size
);
1553 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1554 new_block
->used_length
,
1557 if (new_block
->host
) {
1558 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1559 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1560 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1561 if (kvm_enabled()) {
1562 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1566 return new_block
->offset
;
1570 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1571 bool share
, const char *mem_path
,
1574 RAMBlock
*new_block
;
1576 Error
*local_err
= NULL
;
1578 if (xen_enabled()) {
1579 error_setg(errp
, "-mem-path not supported with Xen");
1583 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1585 * file_ram_alloc() needs to allocate just like
1586 * phys_mem_alloc, but we haven't bothered to provide
1590 "-mem-path not supported with this accelerator");
1594 size
= HOST_PAGE_ALIGN(size
);
1595 new_block
= g_malloc0(sizeof(*new_block
));
1597 new_block
->used_length
= size
;
1598 new_block
->max_length
= size
;
1599 new_block
->flags
= share
? RAM_SHARED
: 0;
1600 new_block
->host
= file_ram_alloc(new_block
, size
,
1602 if (!new_block
->host
) {
1607 addr
= ram_block_add(new_block
, &local_err
);
1610 error_propagate(errp
, local_err
);
1618 ram_addr_t
qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1619 void (*resized
)(const char*,
1622 void *host
, bool resizeable
,
1623 MemoryRegion
*mr
, Error
**errp
)
1625 RAMBlock
*new_block
;
1627 Error
*local_err
= NULL
;
1629 size
= HOST_PAGE_ALIGN(size
);
1630 max_size
= HOST_PAGE_ALIGN(max_size
);
1631 new_block
= g_malloc0(sizeof(*new_block
));
1633 new_block
->resized
= resized
;
1634 new_block
->used_length
= size
;
1635 new_block
->max_length
= max_size
;
1636 assert(max_size
>= size
);
1638 new_block
->host
= host
;
1640 new_block
->flags
|= RAM_PREALLOC
;
1643 new_block
->flags
|= RAM_RESIZEABLE
;
1645 addr
= ram_block_add(new_block
, &local_err
);
1648 error_propagate(errp
, local_err
);
1654 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1655 MemoryRegion
*mr
, Error
**errp
)
1657 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1660 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1662 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1665 ram_addr_t
qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1666 void (*resized
)(const char*,
1669 MemoryRegion
*mr
, Error
**errp
)
1671 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1674 static void reclaim_ramblock(RAMBlock
*block
)
1676 if (block
->flags
& RAM_PREALLOC
) {
1678 } else if (xen_enabled()) {
1679 xen_invalidate_map_cache_entry(block
->host
);
1681 } else if (block
->fd
>= 0) {
1682 qemu_ram_munmap(block
->host
, block
->max_length
);
1686 qemu_anon_ram_free(block
->host
, block
->max_length
);
1691 void qemu_ram_free(ram_addr_t addr
)
1695 qemu_mutex_lock_ramlist();
1696 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1697 if (addr
== block
->offset
) {
1698 QLIST_REMOVE_RCU(block
, next
);
1699 ram_list
.mru_block
= NULL
;
1700 /* Write list before version */
1703 call_rcu(block
, reclaim_ramblock
, rcu
);
1707 qemu_mutex_unlock_ramlist();
1711 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1718 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1719 offset
= addr
- block
->offset
;
1720 if (offset
< block
->max_length
) {
1721 vaddr
= ramblock_ptr(block
, offset
);
1722 if (block
->flags
& RAM_PREALLOC
) {
1724 } else if (xen_enabled()) {
1728 if (block
->fd
>= 0) {
1729 flags
|= (block
->flags
& RAM_SHARED
?
1730 MAP_SHARED
: MAP_PRIVATE
);
1731 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1732 flags
, block
->fd
, offset
);
1735 * Remap needs to match alloc. Accelerators that
1736 * set phys_mem_alloc never remap. If they did,
1737 * we'd need a remap hook here.
1739 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1741 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1742 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1745 if (area
!= vaddr
) {
1746 fprintf(stderr
, "Could not remap addr: "
1747 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1751 memory_try_enable_merging(vaddr
, length
);
1752 qemu_ram_setup_dump(vaddr
, length
);
1757 #endif /* !_WIN32 */
1759 int qemu_get_ram_fd(ram_addr_t addr
)
1765 block
= qemu_get_ram_block(addr
);
1771 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1777 block
= qemu_get_ram_block(addr
);
1778 ptr
= ramblock_ptr(block
, 0);
1783 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1784 * This should not be used for general purpose DMA. Use address_space_map
1785 * or address_space_rw instead. For local memory (e.g. video ram) that the
1786 * device owns, use memory_region_get_ram_ptr.
1788 * Called within RCU critical section.
1790 void *qemu_get_ram_ptr(ram_addr_t addr
)
1792 RAMBlock
*block
= qemu_get_ram_block(addr
);
1794 if (xen_enabled() && block
->host
== NULL
) {
1795 /* We need to check if the requested address is in the RAM
1796 * because we don't want to map the entire memory in QEMU.
1797 * In that case just map until the end of the page.
1799 if (block
->offset
== 0) {
1800 return xen_map_cache(addr
, 0, 0);
1803 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1805 return ramblock_ptr(block
, addr
- block
->offset
);
1808 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1809 * but takes a size argument.
1811 * Called within RCU critical section.
1813 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1816 ram_addr_t offset_inside_block
;
1821 block
= qemu_get_ram_block(addr
);
1822 offset_inside_block
= addr
- block
->offset
;
1823 *size
= MIN(*size
, block
->max_length
- offset_inside_block
);
1825 if (xen_enabled() && block
->host
== NULL
) {
1826 /* We need to check if the requested address is in the RAM
1827 * because we don't want to map the entire memory in QEMU.
1828 * In that case just map the requested area.
1830 if (block
->offset
== 0) {
1831 return xen_map_cache(addr
, *size
, 1);
1834 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1837 return ramblock_ptr(block
, offset_inside_block
);
1841 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1844 * ptr: Host pointer to look up
1845 * round_offset: If true round the result offset down to a page boundary
1846 * *ram_addr: set to result ram_addr
1847 * *offset: set to result offset within the RAMBlock
1849 * Returns: RAMBlock (or NULL if not found)
1851 * By the time this function returns, the returned pointer is not protected
1852 * by RCU anymore. If the caller is not within an RCU critical section and
1853 * does not hold the iothread lock, it must have other means of protecting the
1854 * pointer, such as a reference to the region that includes the incoming
1857 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1858 ram_addr_t
*ram_addr
,
1862 uint8_t *host
= ptr
;
1864 if (xen_enabled()) {
1866 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1867 block
= qemu_get_ram_block(*ram_addr
);
1869 *offset
= (host
- block
->host
);
1876 block
= atomic_rcu_read(&ram_list
.mru_block
);
1877 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1881 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1882 /* This case append when the block is not mapped. */
1883 if (block
->host
== NULL
) {
1886 if (host
- block
->host
< block
->max_length
) {
1895 *offset
= (host
- block
->host
);
1897 *offset
&= TARGET_PAGE_MASK
;
1899 *ram_addr
= block
->offset
+ *offset
;
1905 * Finds the named RAMBlock
1907 * name: The name of RAMBlock to find
1909 * Returns: RAMBlock (or NULL if not found)
1911 RAMBlock
*qemu_ram_block_by_name(const char *name
)
1915 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1916 if (!strcmp(name
, block
->idstr
)) {
1924 /* Some of the softmmu routines need to translate from a host pointer
1925 (typically a TLB entry) back to a ram offset. */
1926 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1929 ram_addr_t offset
; /* Not used */
1931 block
= qemu_ram_block_from_host(ptr
, false, ram_addr
, &offset
);
1940 /* Called within RCU critical section. */
1941 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1942 uint64_t val
, unsigned size
)
1944 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1945 tb_invalidate_phys_page_fast(ram_addr
, size
);
1949 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1952 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1955 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1960 /* Set both VGA and migration bits for simplicity and to remove
1961 * the notdirty callback faster.
1963 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
1964 DIRTY_CLIENTS_NOCODE
);
1965 /* we remove the notdirty callback only if the code has been
1967 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1968 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
1972 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1973 unsigned size
, bool is_write
)
1978 static const MemoryRegionOps notdirty_mem_ops
= {
1979 .write
= notdirty_mem_write
,
1980 .valid
.accepts
= notdirty_mem_accepts
,
1981 .endianness
= DEVICE_NATIVE_ENDIAN
,
1984 /* Generate a debug exception if a watchpoint has been hit. */
1985 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
1987 CPUState
*cpu
= current_cpu
;
1988 CPUArchState
*env
= cpu
->env_ptr
;
1989 target_ulong pc
, cs_base
;
1994 if (cpu
->watchpoint_hit
) {
1995 /* We re-entered the check after replacing the TB. Now raise
1996 * the debug interrupt so that is will trigger after the
1997 * current instruction. */
1998 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2001 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2002 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2003 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2004 && (wp
->flags
& flags
)) {
2005 if (flags
== BP_MEM_READ
) {
2006 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2008 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2010 wp
->hitaddr
= vaddr
;
2011 wp
->hitattrs
= attrs
;
2012 if (!cpu
->watchpoint_hit
) {
2013 cpu
->watchpoint_hit
= wp
;
2014 tb_check_watchpoint(cpu
);
2015 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2016 cpu
->exception_index
= EXCP_DEBUG
;
2019 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2020 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2021 cpu_resume_from_signal(cpu
, NULL
);
2025 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2030 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2031 so these check for a hit then pass through to the normal out-of-line
2033 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2034 unsigned size
, MemTxAttrs attrs
)
2039 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2042 data
= address_space_ldub(&address_space_memory
, addr
, attrs
, &res
);
2045 data
= address_space_lduw(&address_space_memory
, addr
, attrs
, &res
);
2048 data
= address_space_ldl(&address_space_memory
, addr
, attrs
, &res
);
2056 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2057 uint64_t val
, unsigned size
,
2062 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2065 address_space_stb(&address_space_memory
, addr
, val
, attrs
, &res
);
2068 address_space_stw(&address_space_memory
, addr
, val
, attrs
, &res
);
2071 address_space_stl(&address_space_memory
, addr
, val
, attrs
, &res
);
2078 static const MemoryRegionOps watch_mem_ops
= {
2079 .read_with_attrs
= watch_mem_read
,
2080 .write_with_attrs
= watch_mem_write
,
2081 .endianness
= DEVICE_NATIVE_ENDIAN
,
2084 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2085 unsigned len
, MemTxAttrs attrs
)
2087 subpage_t
*subpage
= opaque
;
2091 #if defined(DEBUG_SUBPAGE)
2092 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2093 subpage
, len
, addr
);
2095 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2102 *data
= ldub_p(buf
);
2105 *data
= lduw_p(buf
);
2118 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2119 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2121 subpage_t
*subpage
= opaque
;
2124 #if defined(DEBUG_SUBPAGE)
2125 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2126 " value %"PRIx64
"\n",
2127 __func__
, subpage
, len
, addr
, value
);
2145 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2149 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2150 unsigned len
, bool is_write
)
2152 subpage_t
*subpage
= opaque
;
2153 #if defined(DEBUG_SUBPAGE)
2154 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2155 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2158 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2162 static const MemoryRegionOps subpage_ops
= {
2163 .read_with_attrs
= subpage_read
,
2164 .write_with_attrs
= subpage_write
,
2165 .impl
.min_access_size
= 1,
2166 .impl
.max_access_size
= 8,
2167 .valid
.min_access_size
= 1,
2168 .valid
.max_access_size
= 8,
2169 .valid
.accepts
= subpage_accepts
,
2170 .endianness
= DEVICE_NATIVE_ENDIAN
,
2173 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2178 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2180 idx
= SUBPAGE_IDX(start
);
2181 eidx
= SUBPAGE_IDX(end
);
2182 #if defined(DEBUG_SUBPAGE)
2183 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2184 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2186 for (; idx
<= eidx
; idx
++) {
2187 mmio
->sub_section
[idx
] = section
;
2193 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2197 mmio
= g_malloc0(sizeof(subpage_t
));
2201 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2202 NULL
, TARGET_PAGE_SIZE
);
2203 mmio
->iomem
.subpage
= true;
2204 #if defined(DEBUG_SUBPAGE)
2205 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2206 mmio
, base
, TARGET_PAGE_SIZE
);
2208 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2213 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2217 MemoryRegionSection section
= {
2218 .address_space
= as
,
2220 .offset_within_address_space
= 0,
2221 .offset_within_region
= 0,
2222 .size
= int128_2_64(),
2225 return phys_section_add(map
, §ion
);
2228 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
)
2230 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[0];
2231 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2232 MemoryRegionSection
*sections
= d
->map
.sections
;
2234 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2237 static void io_mem_init(void)
2239 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2240 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2242 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2244 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2248 static void mem_begin(MemoryListener
*listener
)
2250 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2251 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2254 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2255 assert(n
== PHYS_SECTION_UNASSIGNED
);
2256 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2257 assert(n
== PHYS_SECTION_NOTDIRTY
);
2258 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2259 assert(n
== PHYS_SECTION_ROM
);
2260 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2261 assert(n
== PHYS_SECTION_WATCH
);
2263 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2265 as
->next_dispatch
= d
;
2268 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2270 phys_sections_free(&d
->map
);
2274 static void mem_commit(MemoryListener
*listener
)
2276 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2277 AddressSpaceDispatch
*cur
= as
->dispatch
;
2278 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2280 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2282 atomic_rcu_set(&as
->dispatch
, next
);
2284 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2288 static void tcg_commit(MemoryListener
*listener
)
2290 CPUAddressSpace
*cpuas
;
2291 AddressSpaceDispatch
*d
;
2293 /* since each CPU stores ram addresses in its TLB cache, we must
2294 reset the modified entries */
2295 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2296 cpu_reloading_memory_map();
2297 /* The CPU and TLB are protected by the iothread lock.
2298 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2299 * may have split the RCU critical section.
2301 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2302 cpuas
->memory_dispatch
= d
;
2303 tlb_flush(cpuas
->cpu
, 1);
2306 void address_space_init_dispatch(AddressSpace
*as
)
2308 as
->dispatch
= NULL
;
2309 as
->dispatch_listener
= (MemoryListener
) {
2311 .commit
= mem_commit
,
2312 .region_add
= mem_add
,
2313 .region_nop
= mem_add
,
2316 memory_listener_register(&as
->dispatch_listener
, as
);
2319 void address_space_unregister(AddressSpace
*as
)
2321 memory_listener_unregister(&as
->dispatch_listener
);
2324 void address_space_destroy_dispatch(AddressSpace
*as
)
2326 AddressSpaceDispatch
*d
= as
->dispatch
;
2328 atomic_rcu_set(&as
->dispatch
, NULL
);
2330 call_rcu(d
, address_space_dispatch_free
, rcu
);
2334 static void memory_map_init(void)
2336 system_memory
= g_malloc(sizeof(*system_memory
));
2338 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2339 address_space_init(&address_space_memory
, system_memory
, "memory");
2341 system_io
= g_malloc(sizeof(*system_io
));
2342 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2344 address_space_init(&address_space_io
, system_io
, "I/O");
2347 MemoryRegion
*get_system_memory(void)
2349 return system_memory
;
2352 MemoryRegion
*get_system_io(void)
2357 #endif /* !defined(CONFIG_USER_ONLY) */
2359 /* physical memory access (slow version, mainly for debug) */
2360 #if defined(CONFIG_USER_ONLY)
2361 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2362 uint8_t *buf
, int len
, int is_write
)
2369 page
= addr
& TARGET_PAGE_MASK
;
2370 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2373 flags
= page_get_flags(page
);
2374 if (!(flags
& PAGE_VALID
))
2377 if (!(flags
& PAGE_WRITE
))
2379 /* XXX: this code should not depend on lock_user */
2380 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2383 unlock_user(p
, addr
, l
);
2385 if (!(flags
& PAGE_READ
))
2387 /* XXX: this code should not depend on lock_user */
2388 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2391 unlock_user(p
, addr
, 0);
2402 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2405 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2406 /* No early return if dirty_log_mask is or becomes 0, because
2407 * cpu_physical_memory_set_dirty_range will still call
2408 * xen_modified_memory.
2410 if (dirty_log_mask
) {
2412 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2414 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2415 tb_invalidate_phys_range(addr
, addr
+ length
);
2416 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2418 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2421 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2423 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2425 /* Regions are assumed to support 1-4 byte accesses unless
2426 otherwise specified. */
2427 if (access_size_max
== 0) {
2428 access_size_max
= 4;
2431 /* Bound the maximum access by the alignment of the address. */
2432 if (!mr
->ops
->impl
.unaligned
) {
2433 unsigned align_size_max
= addr
& -addr
;
2434 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2435 access_size_max
= align_size_max
;
2439 /* Don't attempt accesses larger than the maximum. */
2440 if (l
> access_size_max
) {
2441 l
= access_size_max
;
2448 static bool prepare_mmio_access(MemoryRegion
*mr
)
2450 bool unlocked
= !qemu_mutex_iothread_locked();
2451 bool release_lock
= false;
2453 if (unlocked
&& mr
->global_locking
) {
2454 qemu_mutex_lock_iothread();
2456 release_lock
= true;
2458 if (mr
->flush_coalesced_mmio
) {
2460 qemu_mutex_lock_iothread();
2462 qemu_flush_coalesced_mmio_buffer();
2464 qemu_mutex_unlock_iothread();
2468 return release_lock
;
2471 /* Called within RCU critical section. */
2472 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2475 int len
, hwaddr addr1
,
2476 hwaddr l
, MemoryRegion
*mr
)
2480 MemTxResult result
= MEMTX_OK
;
2481 bool release_lock
= false;
2484 if (!memory_access_is_direct(mr
, true)) {
2485 release_lock
|= prepare_mmio_access(mr
);
2486 l
= memory_access_size(mr
, l
, addr1
);
2487 /* XXX: could force current_cpu to NULL to avoid
2491 /* 64 bit write access */
2493 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2497 /* 32 bit write access */
2499 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2503 /* 16 bit write access */
2505 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2509 /* 8 bit write access */
2511 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2518 addr1
+= memory_region_get_ram_addr(mr
);
2520 ptr
= qemu_get_ram_ptr(addr1
);
2521 memcpy(ptr
, buf
, l
);
2522 invalidate_and_set_dirty(mr
, addr1
, l
);
2526 qemu_mutex_unlock_iothread();
2527 release_lock
= false;
2539 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2545 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2546 const uint8_t *buf
, int len
)
2551 MemTxResult result
= MEMTX_OK
;
2556 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2557 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2565 /* Called within RCU critical section. */
2566 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2567 MemTxAttrs attrs
, uint8_t *buf
,
2568 int len
, hwaddr addr1
, hwaddr l
,
2573 MemTxResult result
= MEMTX_OK
;
2574 bool release_lock
= false;
2577 if (!memory_access_is_direct(mr
, false)) {
2579 release_lock
|= prepare_mmio_access(mr
);
2580 l
= memory_access_size(mr
, l
, addr1
);
2583 /* 64 bit read access */
2584 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2589 /* 32 bit read access */
2590 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2595 /* 16 bit read access */
2596 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2601 /* 8 bit read access */
2602 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2611 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2612 memcpy(buf
, ptr
, l
);
2616 qemu_mutex_unlock_iothread();
2617 release_lock
= false;
2629 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2635 MemTxResult
address_space_read(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2636 uint8_t *buf
, int len
)
2641 MemTxResult result
= MEMTX_OK
;
2646 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2647 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2655 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2656 uint8_t *buf
, int len
, bool is_write
)
2659 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2661 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2665 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2666 int len
, int is_write
)
2668 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2669 buf
, len
, is_write
);
2672 enum write_rom_type
{
2677 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2678 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2688 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2690 if (!(memory_region_is_ram(mr
) ||
2691 memory_region_is_romd(mr
))) {
2692 l
= memory_access_size(mr
, l
, addr1
);
2694 addr1
+= memory_region_get_ram_addr(mr
);
2696 ptr
= qemu_get_ram_ptr(addr1
);
2699 memcpy(ptr
, buf
, l
);
2700 invalidate_and_set_dirty(mr
, addr1
, l
);
2703 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2714 /* used for ROM loading : can write in RAM and ROM */
2715 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2716 const uint8_t *buf
, int len
)
2718 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2721 void cpu_flush_icache_range(hwaddr start
, int len
)
2724 * This function should do the same thing as an icache flush that was
2725 * triggered from within the guest. For TCG we are always cache coherent,
2726 * so there is no need to flush anything. For KVM / Xen we need to flush
2727 * the host's instruction cache at least.
2729 if (tcg_enabled()) {
2733 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2734 start
, NULL
, len
, FLUSH_CACHE
);
2745 static BounceBuffer bounce
;
2747 typedef struct MapClient
{
2749 QLIST_ENTRY(MapClient
) link
;
2752 QemuMutex map_client_list_lock
;
2753 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2754 = QLIST_HEAD_INITIALIZER(map_client_list
);
2756 static void cpu_unregister_map_client_do(MapClient
*client
)
2758 QLIST_REMOVE(client
, link
);
2762 static void cpu_notify_map_clients_locked(void)
2766 while (!QLIST_EMPTY(&map_client_list
)) {
2767 client
= QLIST_FIRST(&map_client_list
);
2768 qemu_bh_schedule(client
->bh
);
2769 cpu_unregister_map_client_do(client
);
2773 void cpu_register_map_client(QEMUBH
*bh
)
2775 MapClient
*client
= g_malloc(sizeof(*client
));
2777 qemu_mutex_lock(&map_client_list_lock
);
2779 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2780 if (!atomic_read(&bounce
.in_use
)) {
2781 cpu_notify_map_clients_locked();
2783 qemu_mutex_unlock(&map_client_list_lock
);
2786 void cpu_exec_init_all(void)
2788 qemu_mutex_init(&ram_list
.mutex
);
2791 qemu_mutex_init(&map_client_list_lock
);
2794 void cpu_unregister_map_client(QEMUBH
*bh
)
2798 qemu_mutex_lock(&map_client_list_lock
);
2799 QLIST_FOREACH(client
, &map_client_list
, link
) {
2800 if (client
->bh
== bh
) {
2801 cpu_unregister_map_client_do(client
);
2805 qemu_mutex_unlock(&map_client_list_lock
);
2808 static void cpu_notify_map_clients(void)
2810 qemu_mutex_lock(&map_client_list_lock
);
2811 cpu_notify_map_clients_locked();
2812 qemu_mutex_unlock(&map_client_list_lock
);
2815 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2823 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2824 if (!memory_access_is_direct(mr
, is_write
)) {
2825 l
= memory_access_size(mr
, l
, addr
);
2826 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2838 /* Map a physical memory region into a host virtual address.
2839 * May map a subset of the requested range, given by and returned in *plen.
2840 * May return NULL if resources needed to perform the mapping are exhausted.
2841 * Use only for reads OR writes - not for read-modify-write operations.
2842 * Use cpu_register_map_client() to know when retrying the map operation is
2843 * likely to succeed.
2845 void *address_space_map(AddressSpace
*as
,
2852 hwaddr l
, xlat
, base
;
2853 MemoryRegion
*mr
, *this_mr
;
2863 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2865 if (!memory_access_is_direct(mr
, is_write
)) {
2866 if (atomic_xchg(&bounce
.in_use
, true)) {
2870 /* Avoid unbounded allocations */
2871 l
= MIN(l
, TARGET_PAGE_SIZE
);
2872 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2876 memory_region_ref(mr
);
2879 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2885 return bounce
.buffer
;
2889 raddr
= memory_region_get_ram_addr(mr
);
2900 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2901 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2906 memory_region_ref(mr
);
2908 ptr
= qemu_ram_ptr_length(raddr
+ base
, plen
);
2914 /* Unmaps a memory region previously mapped by address_space_map().
2915 * Will also mark the memory as dirty if is_write == 1. access_len gives
2916 * the amount of memory that was actually read or written by the caller.
2918 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2919 int is_write
, hwaddr access_len
)
2921 if (buffer
!= bounce
.buffer
) {
2925 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2928 invalidate_and_set_dirty(mr
, addr1
, access_len
);
2930 if (xen_enabled()) {
2931 xen_invalidate_map_cache_entry(buffer
);
2933 memory_region_unref(mr
);
2937 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
2938 bounce
.buffer
, access_len
);
2940 qemu_vfree(bounce
.buffer
);
2941 bounce
.buffer
= NULL
;
2942 memory_region_unref(bounce
.mr
);
2943 atomic_mb_set(&bounce
.in_use
, false);
2944 cpu_notify_map_clients();
2947 void *cpu_physical_memory_map(hwaddr addr
,
2951 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2954 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2955 int is_write
, hwaddr access_len
)
2957 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2960 /* warning: addr must be aligned */
2961 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
2963 MemTxResult
*result
,
2964 enum device_endian endian
)
2972 bool release_lock
= false;
2975 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2976 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2977 release_lock
|= prepare_mmio_access(mr
);
2980 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
2981 #if defined(TARGET_WORDS_BIGENDIAN)
2982 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2986 if (endian
== DEVICE_BIG_ENDIAN
) {
2992 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2996 case DEVICE_LITTLE_ENDIAN
:
2997 val
= ldl_le_p(ptr
);
2999 case DEVICE_BIG_ENDIAN
:
3000 val
= ldl_be_p(ptr
);
3012 qemu_mutex_unlock_iothread();
3018 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3019 MemTxAttrs attrs
, MemTxResult
*result
)
3021 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3022 DEVICE_NATIVE_ENDIAN
);
3025 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3026 MemTxAttrs attrs
, MemTxResult
*result
)
3028 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3029 DEVICE_LITTLE_ENDIAN
);
3032 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3033 MemTxAttrs attrs
, MemTxResult
*result
)
3035 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3039 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3041 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3044 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3046 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3049 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3051 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3054 /* warning: addr must be aligned */
3055 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3057 MemTxResult
*result
,
3058 enum device_endian endian
)
3066 bool release_lock
= false;
3069 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3071 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3072 release_lock
|= prepare_mmio_access(mr
);
3075 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3076 #if defined(TARGET_WORDS_BIGENDIAN)
3077 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3081 if (endian
== DEVICE_BIG_ENDIAN
) {
3087 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3091 case DEVICE_LITTLE_ENDIAN
:
3092 val
= ldq_le_p(ptr
);
3094 case DEVICE_BIG_ENDIAN
:
3095 val
= ldq_be_p(ptr
);
3107 qemu_mutex_unlock_iothread();
3113 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3114 MemTxAttrs attrs
, MemTxResult
*result
)
3116 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3117 DEVICE_NATIVE_ENDIAN
);
3120 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3121 MemTxAttrs attrs
, MemTxResult
*result
)
3123 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3124 DEVICE_LITTLE_ENDIAN
);
3127 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3128 MemTxAttrs attrs
, MemTxResult
*result
)
3130 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3134 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3136 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3139 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3141 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3144 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3146 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3150 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3151 MemTxAttrs attrs
, MemTxResult
*result
)
3156 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3163 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3165 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3168 /* warning: addr must be aligned */
3169 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3172 MemTxResult
*result
,
3173 enum device_endian endian
)
3181 bool release_lock
= false;
3184 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3186 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3187 release_lock
|= prepare_mmio_access(mr
);
3190 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3191 #if defined(TARGET_WORDS_BIGENDIAN)
3192 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3196 if (endian
== DEVICE_BIG_ENDIAN
) {
3202 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3206 case DEVICE_LITTLE_ENDIAN
:
3207 val
= lduw_le_p(ptr
);
3209 case DEVICE_BIG_ENDIAN
:
3210 val
= lduw_be_p(ptr
);
3222 qemu_mutex_unlock_iothread();
3228 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3229 MemTxAttrs attrs
, MemTxResult
*result
)
3231 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3232 DEVICE_NATIVE_ENDIAN
);
3235 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3236 MemTxAttrs attrs
, MemTxResult
*result
)
3238 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3239 DEVICE_LITTLE_ENDIAN
);
3242 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3243 MemTxAttrs attrs
, MemTxResult
*result
)
3245 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3249 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3251 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3254 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3256 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3259 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3261 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3264 /* warning: addr must be aligned. The ram page is not masked as dirty
3265 and the code inside is not invalidated. It is useful if the dirty
3266 bits are used to track modified PTEs */
3267 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3268 MemTxAttrs attrs
, MemTxResult
*result
)
3275 uint8_t dirty_log_mask
;
3276 bool release_lock
= false;
3279 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3281 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3282 release_lock
|= prepare_mmio_access(mr
);
3284 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3286 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3287 ptr
= qemu_get_ram_ptr(addr1
);
3290 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3291 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3292 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3299 qemu_mutex_unlock_iothread();
3304 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3306 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3309 /* warning: addr must be aligned */
3310 static inline void address_space_stl_internal(AddressSpace
*as
,
3311 hwaddr addr
, uint32_t val
,
3313 MemTxResult
*result
,
3314 enum device_endian endian
)
3321 bool release_lock
= false;
3324 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3326 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3327 release_lock
|= prepare_mmio_access(mr
);
3329 #if defined(TARGET_WORDS_BIGENDIAN)
3330 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3334 if (endian
== DEVICE_BIG_ENDIAN
) {
3338 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3341 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3342 ptr
= qemu_get_ram_ptr(addr1
);
3344 case DEVICE_LITTLE_ENDIAN
:
3347 case DEVICE_BIG_ENDIAN
:
3354 invalidate_and_set_dirty(mr
, addr1
, 4);
3361 qemu_mutex_unlock_iothread();
3366 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3367 MemTxAttrs attrs
, MemTxResult
*result
)
3369 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3370 DEVICE_NATIVE_ENDIAN
);
3373 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3374 MemTxAttrs attrs
, MemTxResult
*result
)
3376 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3377 DEVICE_LITTLE_ENDIAN
);
3380 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3381 MemTxAttrs attrs
, MemTxResult
*result
)
3383 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3387 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3389 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3392 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3394 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3397 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3399 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3403 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3404 MemTxAttrs attrs
, MemTxResult
*result
)
3409 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3415 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3417 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3420 /* warning: addr must be aligned */
3421 static inline void address_space_stw_internal(AddressSpace
*as
,
3422 hwaddr addr
, uint32_t val
,
3424 MemTxResult
*result
,
3425 enum device_endian endian
)
3432 bool release_lock
= false;
3435 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3436 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3437 release_lock
|= prepare_mmio_access(mr
);
3439 #if defined(TARGET_WORDS_BIGENDIAN)
3440 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3444 if (endian
== DEVICE_BIG_ENDIAN
) {
3448 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3451 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3452 ptr
= qemu_get_ram_ptr(addr1
);
3454 case DEVICE_LITTLE_ENDIAN
:
3457 case DEVICE_BIG_ENDIAN
:
3464 invalidate_and_set_dirty(mr
, addr1
, 2);
3471 qemu_mutex_unlock_iothread();
3476 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3477 MemTxAttrs attrs
, MemTxResult
*result
)
3479 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3480 DEVICE_NATIVE_ENDIAN
);
3483 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3484 MemTxAttrs attrs
, MemTxResult
*result
)
3486 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3487 DEVICE_LITTLE_ENDIAN
);
3490 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3491 MemTxAttrs attrs
, MemTxResult
*result
)
3493 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3497 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3499 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3502 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3504 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3507 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3509 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3513 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3514 MemTxAttrs attrs
, MemTxResult
*result
)
3518 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3524 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3525 MemTxAttrs attrs
, MemTxResult
*result
)
3528 val
= cpu_to_le64(val
);
3529 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3534 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3535 MemTxAttrs attrs
, MemTxResult
*result
)
3538 val
= cpu_to_be64(val
);
3539 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3545 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3547 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3550 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3552 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3555 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3557 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3560 /* virtual memory access for debug (includes writing to ROM) */
3561 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3562 uint8_t *buf
, int len
, int is_write
)
3569 page
= addr
& TARGET_PAGE_MASK
;
3570 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
3571 /* if no physical page mapped, return an error */
3572 if (phys_addr
== -1)
3574 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3577 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3579 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
3581 address_space_rw(cpu
->as
, phys_addr
, MEMTXATTRS_UNSPECIFIED
,
3592 * Allows code that needs to deal with migration bitmaps etc to still be built
3593 * target independent.
3595 size_t qemu_target_page_bits(void)
3597 return TARGET_PAGE_BITS
;
3603 * A helper function for the _utterly broken_ virtio device model to find out if
3604 * it's running on a big endian machine. Don't do this at home kids!
3606 bool target_words_bigendian(void);
3607 bool target_words_bigendian(void)
3609 #if defined(TARGET_WORDS_BIGENDIAN)
3616 #ifndef CONFIG_USER_ONLY
3617 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3624 mr
= address_space_translate(&address_space_memory
,
3625 phys_addr
, &phys_addr
, &l
, false);
3627 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3632 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3638 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3639 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3640 block
->used_length
, opaque
);