4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
33 #include "qemu/osdep.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/xen/xen.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "exec/memory.h"
41 #include "sysemu/dma.h"
42 #include "exec/address-spaces.h"
43 #if defined(CONFIG_USER_ONLY)
45 #else /* !CONFIG_USER_ONLY */
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "translate-all.h"
53 #include "sysemu/replay.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
58 #include "qemu/range.h"
60 #include "qemu/mmap-alloc.h"
63 //#define DEBUG_SUBPAGE
65 #if !defined(CONFIG_USER_ONLY)
66 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
69 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
71 static MemoryRegion
*system_memory
;
72 static MemoryRegion
*system_io
;
74 AddressSpace address_space_io
;
75 AddressSpace address_space_memory
;
77 MemoryRegion io_mem_rom
, io_mem_notdirty
;
78 static MemoryRegion io_mem_unassigned
;
80 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81 #define RAM_PREALLOC (1 << 0)
83 /* RAM is mmap-ed with MAP_SHARED */
84 #define RAM_SHARED (1 << 1)
86 /* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
89 #define RAM_RESIZEABLE (1 << 2)
93 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
94 /* current CPU in the current thread. It is only valid inside
96 __thread CPUState
*current_cpu
;
97 /* 0 = Do not count executed instructions.
98 1 = Precise instruction counting.
99 2 = Adaptive rate instruction counting. */
102 #if !defined(CONFIG_USER_ONLY)
104 typedef struct PhysPageEntry PhysPageEntry
;
106 struct PhysPageEntry
{
107 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
109 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
113 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
115 /* Size of the L2 (and L3, etc) page tables. */
116 #define ADDR_SPACE_BITS 64
119 #define P_L2_SIZE (1 << P_L2_BITS)
121 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
123 typedef PhysPageEntry Node
[P_L2_SIZE
];
125 typedef struct PhysPageMap
{
128 unsigned sections_nb
;
129 unsigned sections_nb_alloc
;
131 unsigned nodes_nb_alloc
;
133 MemoryRegionSection
*sections
;
136 struct AddressSpaceDispatch
{
139 /* This is a multi-level map on the physical address space.
140 * The bottom level has pointers to MemoryRegionSections.
142 PhysPageEntry phys_map
;
147 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
148 typedef struct subpage_t
{
152 uint16_t sub_section
[TARGET_PAGE_SIZE
];
155 #define PHYS_SECTION_UNASSIGNED 0
156 #define PHYS_SECTION_NOTDIRTY 1
157 #define PHYS_SECTION_ROM 2
158 #define PHYS_SECTION_WATCH 3
160 static void io_mem_init(void);
161 static void memory_map_init(void);
162 static void tcg_commit(MemoryListener
*listener
);
164 static MemoryRegion io_mem_watch
;
167 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
168 * @cpu: the CPU whose AddressSpace this is
169 * @as: the AddressSpace itself
170 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
171 * @tcg_as_listener: listener for tracking changes to the AddressSpace
173 struct CPUAddressSpace
{
176 struct AddressSpaceDispatch
*memory_dispatch
;
177 MemoryListener tcg_as_listener
;
182 #if !defined(CONFIG_USER_ONLY)
184 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
186 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
187 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
188 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
189 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
193 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
200 ret
= map
->nodes_nb
++;
202 assert(ret
!= PHYS_MAP_NODE_NIL
);
203 assert(ret
!= map
->nodes_nb_alloc
);
205 e
.skip
= leaf
? 0 : 1;
206 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
207 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
208 memcpy(&p
[i
], &e
, sizeof(e
));
213 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
214 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
218 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
220 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
221 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
223 p
= map
->nodes
[lp
->ptr
];
224 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
226 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
227 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
233 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
239 static void phys_page_set(AddressSpaceDispatch
*d
,
240 hwaddr index
, hwaddr nb
,
243 /* Wildly overreserve - it doesn't matter much. */
244 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
246 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
249 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
250 * and update our entry so we can skip it and go directly to the destination.
252 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
254 unsigned valid_ptr
= P_L2_SIZE
;
259 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
264 for (i
= 0; i
< P_L2_SIZE
; i
++) {
265 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
272 phys_page_compact(&p
[i
], nodes
, compacted
);
276 /* We can only compress if there's only one child. */
281 assert(valid_ptr
< P_L2_SIZE
);
283 /* Don't compress if it won't fit in the # of bits we have. */
284 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
288 lp
->ptr
= p
[valid_ptr
].ptr
;
289 if (!p
[valid_ptr
].skip
) {
290 /* If our only child is a leaf, make this a leaf. */
291 /* By design, we should have made this node a leaf to begin with so we
292 * should never reach here.
293 * But since it's so simple to handle this, let's do it just in case we
298 lp
->skip
+= p
[valid_ptr
].skip
;
302 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
304 DECLARE_BITMAP(compacted
, nodes_nb
);
306 if (d
->phys_map
.skip
) {
307 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
311 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
312 Node
*nodes
, MemoryRegionSection
*sections
)
315 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
318 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
319 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
320 return §ions
[PHYS_SECTION_UNASSIGNED
];
323 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
326 if (sections
[lp
.ptr
].size
.hi
||
327 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
328 sections
[lp
.ptr
].size
.lo
, addr
)) {
329 return §ions
[lp
.ptr
];
331 return §ions
[PHYS_SECTION_UNASSIGNED
];
335 bool memory_region_is_unassigned(MemoryRegion
*mr
)
337 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
338 && mr
!= &io_mem_watch
;
341 /* Called from RCU critical section */
342 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
344 bool resolve_subpage
)
346 MemoryRegionSection
*section
;
349 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
350 if (resolve_subpage
&& section
->mr
->subpage
) {
351 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
352 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
357 /* Called from RCU critical section */
358 static MemoryRegionSection
*
359 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
360 hwaddr
*plen
, bool resolve_subpage
)
362 MemoryRegionSection
*section
;
366 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
367 /* Compute offset within MemoryRegionSection */
368 addr
-= section
->offset_within_address_space
;
370 /* Compute offset within MemoryRegion */
371 *xlat
= addr
+ section
->offset_within_region
;
375 /* MMIO registers can be expected to perform full-width accesses based only
376 * on their address, without considering adjacent registers that could
377 * decode to completely different MemoryRegions. When such registers
378 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
379 * regions overlap wildly. For this reason we cannot clamp the accesses
382 * If the length is small (as is the case for address_space_ldl/stl),
383 * everything works fine. If the incoming length is large, however,
384 * the caller really has to do the clamping through memory_access_size.
386 if (memory_region_is_ram(mr
)) {
387 diff
= int128_sub(section
->size
, int128_make64(addr
));
388 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
393 /* Called from RCU critical section */
394 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
395 hwaddr
*xlat
, hwaddr
*plen
,
399 MemoryRegionSection
*section
;
403 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
404 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
407 if (!mr
->iommu_ops
) {
411 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
412 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
413 | (addr
& iotlb
.addr_mask
));
414 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
415 if (!(iotlb
.perm
& (1 << is_write
))) {
416 mr
= &io_mem_unassigned
;
420 as
= iotlb
.target_as
;
423 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
424 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
425 *plen
= MIN(page
, *plen
);
432 /* Called from RCU critical section */
433 MemoryRegionSection
*
434 address_space_translate_for_iotlb(CPUState
*cpu
, hwaddr addr
,
435 hwaddr
*xlat
, hwaddr
*plen
)
437 MemoryRegionSection
*section
;
438 section
= address_space_translate_internal(cpu
->cpu_ases
[0].memory_dispatch
,
439 addr
, xlat
, plen
, false);
441 assert(!section
->mr
->iommu_ops
);
446 #if !defined(CONFIG_USER_ONLY)
448 static int cpu_common_post_load(void *opaque
, int version_id
)
450 CPUState
*cpu
= opaque
;
452 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
453 version_id is increased. */
454 cpu
->interrupt_request
&= ~0x01;
460 static int cpu_common_pre_load(void *opaque
)
462 CPUState
*cpu
= opaque
;
464 cpu
->exception_index
= -1;
469 static bool cpu_common_exception_index_needed(void *opaque
)
471 CPUState
*cpu
= opaque
;
473 return tcg_enabled() && cpu
->exception_index
!= -1;
476 static const VMStateDescription vmstate_cpu_common_exception_index
= {
477 .name
= "cpu_common/exception_index",
479 .minimum_version_id
= 1,
480 .needed
= cpu_common_exception_index_needed
,
481 .fields
= (VMStateField
[]) {
482 VMSTATE_INT32(exception_index
, CPUState
),
483 VMSTATE_END_OF_LIST()
487 static bool cpu_common_crash_occurred_needed(void *opaque
)
489 CPUState
*cpu
= opaque
;
491 return cpu
->crash_occurred
;
494 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
495 .name
= "cpu_common/crash_occurred",
497 .minimum_version_id
= 1,
498 .needed
= cpu_common_crash_occurred_needed
,
499 .fields
= (VMStateField
[]) {
500 VMSTATE_BOOL(crash_occurred
, CPUState
),
501 VMSTATE_END_OF_LIST()
505 const VMStateDescription vmstate_cpu_common
= {
506 .name
= "cpu_common",
508 .minimum_version_id
= 1,
509 .pre_load
= cpu_common_pre_load
,
510 .post_load
= cpu_common_post_load
,
511 .fields
= (VMStateField
[]) {
512 VMSTATE_UINT32(halted
, CPUState
),
513 VMSTATE_UINT32(interrupt_request
, CPUState
),
514 VMSTATE_END_OF_LIST()
516 .subsections
= (const VMStateDescription
*[]) {
517 &vmstate_cpu_common_exception_index
,
518 &vmstate_cpu_common_crash_occurred
,
525 CPUState
*qemu_get_cpu(int index
)
530 if (cpu
->cpu_index
== index
) {
538 #if !defined(CONFIG_USER_ONLY)
539 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
541 CPUAddressSpace
*newas
;
543 /* Target code should have set num_ases before calling us */
544 assert(asidx
< cpu
->num_ases
);
547 /* address space 0 gets the convenience alias */
551 /* KVM cannot currently support multiple address spaces. */
552 assert(asidx
== 0 || !kvm_enabled());
554 if (!cpu
->cpu_ases
) {
555 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
558 newas
= &cpu
->cpu_ases
[asidx
];
562 newas
->tcg_as_listener
.commit
= tcg_commit
;
563 memory_listener_register(&newas
->tcg_as_listener
, as
);
568 #ifndef CONFIG_USER_ONLY
569 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
571 static int cpu_get_free_index(Error
**errp
)
573 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
575 if (cpu
>= MAX_CPUMASK_BITS
) {
576 error_setg(errp
, "Trying to use more CPUs than max of %d",
581 bitmap_set(cpu_index_map
, cpu
, 1);
585 void cpu_exec_exit(CPUState
*cpu
)
587 if (cpu
->cpu_index
== -1) {
588 /* cpu_index was never allocated by this @cpu or was already freed. */
592 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
597 static int cpu_get_free_index(Error
**errp
)
602 CPU_FOREACH(some_cpu
) {
608 void cpu_exec_exit(CPUState
*cpu
)
613 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
615 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
617 Error
*local_err
= NULL
;
622 #ifndef CONFIG_USER_ONLY
623 cpu
->thread_id
= qemu_get_thread_id();
626 #if defined(CONFIG_USER_ONLY)
629 cpu_index
= cpu
->cpu_index
= cpu_get_free_index(&local_err
);
631 error_propagate(errp
, local_err
);
632 #if defined(CONFIG_USER_ONLY)
637 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
638 #if defined(CONFIG_USER_ONLY)
641 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
642 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
644 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
645 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
646 cpu_save
, cpu_load
, cpu
->env_ptr
);
647 assert(cc
->vmsd
== NULL
);
648 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
650 if (cc
->vmsd
!= NULL
) {
651 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
655 #if defined(CONFIG_USER_ONLY)
656 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
658 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
661 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
663 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
665 tb_invalidate_phys_addr(cpu
->as
,
666 phys
| (pc
& ~TARGET_PAGE_MASK
));
671 #if defined(CONFIG_USER_ONLY)
672 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
677 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
683 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
687 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
688 int flags
, CPUWatchpoint
**watchpoint
)
693 /* Add a watchpoint. */
694 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
695 int flags
, CPUWatchpoint
**watchpoint
)
699 /* forbid ranges which are empty or run off the end of the address space */
700 if (len
== 0 || (addr
+ len
- 1) < addr
) {
701 error_report("tried to set invalid watchpoint at %"
702 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
705 wp
= g_malloc(sizeof(*wp
));
711 /* keep all GDB-injected watchpoints in front */
712 if (flags
& BP_GDB
) {
713 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
715 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
718 tlb_flush_page(cpu
, addr
);
725 /* Remove a specific watchpoint. */
726 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
731 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
732 if (addr
== wp
->vaddr
&& len
== wp
->len
733 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
734 cpu_watchpoint_remove_by_ref(cpu
, wp
);
741 /* Remove a specific watchpoint by reference. */
742 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
744 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
746 tlb_flush_page(cpu
, watchpoint
->vaddr
);
751 /* Remove all matching watchpoints. */
752 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
754 CPUWatchpoint
*wp
, *next
;
756 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
757 if (wp
->flags
& mask
) {
758 cpu_watchpoint_remove_by_ref(cpu
, wp
);
763 /* Return true if this watchpoint address matches the specified
764 * access (ie the address range covered by the watchpoint overlaps
765 * partially or completely with the address range covered by the
768 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
772 /* We know the lengths are non-zero, but a little caution is
773 * required to avoid errors in the case where the range ends
774 * exactly at the top of the address space and so addr + len
775 * wraps round to zero.
777 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
778 vaddr addrend
= addr
+ len
- 1;
780 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
785 /* Add a breakpoint. */
786 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
787 CPUBreakpoint
**breakpoint
)
791 bp
= g_malloc(sizeof(*bp
));
796 /* keep all GDB-injected breakpoints in front */
797 if (flags
& BP_GDB
) {
798 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
800 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
803 breakpoint_invalidate(cpu
, pc
);
811 /* Remove a specific breakpoint. */
812 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
816 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
817 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
818 cpu_breakpoint_remove_by_ref(cpu
, bp
);
825 /* Remove a specific breakpoint by reference. */
826 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
828 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
830 breakpoint_invalidate(cpu
, breakpoint
->pc
);
835 /* Remove all matching breakpoints. */
836 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
838 CPUBreakpoint
*bp
, *next
;
840 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
841 if (bp
->flags
& mask
) {
842 cpu_breakpoint_remove_by_ref(cpu
, bp
);
847 /* enable or disable single step mode. EXCP_DEBUG is returned by the
848 CPU loop after each instruction */
849 void cpu_single_step(CPUState
*cpu
, int enabled
)
851 if (cpu
->singlestep_enabled
!= enabled
) {
852 cpu
->singlestep_enabled
= enabled
;
854 kvm_update_guest_debug(cpu
, 0);
856 /* must flush all the translated code to avoid inconsistencies */
857 /* XXX: only flush what is necessary */
863 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
870 fprintf(stderr
, "qemu: fatal: ");
871 vfprintf(stderr
, fmt
, ap
);
872 fprintf(stderr
, "\n");
873 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
874 if (qemu_log_separate()) {
875 qemu_log("qemu: fatal: ");
876 qemu_log_vprintf(fmt
, ap2
);
878 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
885 #if defined(CONFIG_USER_ONLY)
887 struct sigaction act
;
888 sigfillset(&act
.sa_mask
);
889 act
.sa_handler
= SIG_DFL
;
890 sigaction(SIGABRT
, &act
, NULL
);
896 #if !defined(CONFIG_USER_ONLY)
897 /* Called from RCU critical section */
898 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
902 block
= atomic_rcu_read(&ram_list
.mru_block
);
903 if (block
&& addr
- block
->offset
< block
->max_length
) {
906 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
907 if (addr
- block
->offset
< block
->max_length
) {
912 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
916 /* It is safe to write mru_block outside the iothread lock. This
921 * xxx removed from list
925 * call_rcu(reclaim_ramblock, xxx);
928 * atomic_rcu_set is not needed here. The block was already published
929 * when it was placed into the list. Here we're just making an extra
930 * copy of the pointer.
932 ram_list
.mru_block
= block
;
936 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
943 end
= TARGET_PAGE_ALIGN(start
+ length
);
944 start
&= TARGET_PAGE_MASK
;
947 block
= qemu_get_ram_block(start
);
948 assert(block
== qemu_get_ram_block(end
- 1));
949 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
951 tlb_reset_dirty(cpu
, start1
, length
);
956 /* Note: start and end must be within the same ram block. */
957 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
961 unsigned long end
, page
;
968 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
969 page
= start
>> TARGET_PAGE_BITS
;
970 dirty
= bitmap_test_and_clear_atomic(ram_list
.dirty_memory
[client
],
973 if (dirty
&& tcg_enabled()) {
974 tlb_reset_dirty_range_all(start
, length
);
980 /* Called from RCU critical section */
981 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
982 MemoryRegionSection
*section
,
984 hwaddr paddr
, hwaddr xlat
,
986 target_ulong
*address
)
991 if (memory_region_is_ram(section
->mr
)) {
993 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
995 if (!section
->readonly
) {
996 iotlb
|= PHYS_SECTION_NOTDIRTY
;
998 iotlb
|= PHYS_SECTION_ROM
;
1001 AddressSpaceDispatch
*d
;
1003 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1004 iotlb
= section
- d
->map
.sections
;
1008 /* Make accesses to pages with watchpoints go via the
1009 watchpoint trap routines. */
1010 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1011 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1012 /* Avoid trapping reads of pages with a write breakpoint. */
1013 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1014 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1015 *address
|= TLB_MMIO
;
1023 #endif /* defined(CONFIG_USER_ONLY) */
1025 #if !defined(CONFIG_USER_ONLY)
1027 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1029 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1031 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1032 qemu_anon_ram_alloc
;
1035 * Set a custom physical guest memory alloator.
1036 * Accelerators with unusual needs may need this. Hopefully, we can
1037 * get rid of it eventually.
1039 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1041 phys_mem_alloc
= alloc
;
1044 static uint16_t phys_section_add(PhysPageMap
*map
,
1045 MemoryRegionSection
*section
)
1047 /* The physical section number is ORed with a page-aligned
1048 * pointer to produce the iotlb entries. Thus it should
1049 * never overflow into the page-aligned value.
1051 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1053 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1054 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1055 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1056 map
->sections_nb_alloc
);
1058 map
->sections
[map
->sections_nb
] = *section
;
1059 memory_region_ref(section
->mr
);
1060 return map
->sections_nb
++;
1063 static void phys_section_destroy(MemoryRegion
*mr
)
1065 bool have_sub_page
= mr
->subpage
;
1067 memory_region_unref(mr
);
1069 if (have_sub_page
) {
1070 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1071 object_unref(OBJECT(&subpage
->iomem
));
1076 static void phys_sections_free(PhysPageMap
*map
)
1078 while (map
->sections_nb
> 0) {
1079 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1080 phys_section_destroy(section
->mr
);
1082 g_free(map
->sections
);
1086 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1089 hwaddr base
= section
->offset_within_address_space
1091 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1092 d
->map
.nodes
, d
->map
.sections
);
1093 MemoryRegionSection subsection
= {
1094 .offset_within_address_space
= base
,
1095 .size
= int128_make64(TARGET_PAGE_SIZE
),
1099 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1101 if (!(existing
->mr
->subpage
)) {
1102 subpage
= subpage_init(d
->as
, base
);
1103 subsection
.address_space
= d
->as
;
1104 subsection
.mr
= &subpage
->iomem
;
1105 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1106 phys_section_add(&d
->map
, &subsection
));
1108 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1110 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1111 end
= start
+ int128_get64(section
->size
) - 1;
1112 subpage_register(subpage
, start
, end
,
1113 phys_section_add(&d
->map
, section
));
1117 static void register_multipage(AddressSpaceDispatch
*d
,
1118 MemoryRegionSection
*section
)
1120 hwaddr start_addr
= section
->offset_within_address_space
;
1121 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1122 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1126 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1129 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1131 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1132 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1133 MemoryRegionSection now
= *section
, remain
= *section
;
1134 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1136 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1137 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1138 - now
.offset_within_address_space
;
1140 now
.size
= int128_min(int128_make64(left
), now
.size
);
1141 register_subpage(d
, &now
);
1143 now
.size
= int128_zero();
1145 while (int128_ne(remain
.size
, now
.size
)) {
1146 remain
.size
= int128_sub(remain
.size
, now
.size
);
1147 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1148 remain
.offset_within_region
+= int128_get64(now
.size
);
1150 if (int128_lt(remain
.size
, page_size
)) {
1151 register_subpage(d
, &now
);
1152 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1153 now
.size
= page_size
;
1154 register_subpage(d
, &now
);
1156 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1157 register_multipage(d
, &now
);
1162 void qemu_flush_coalesced_mmio_buffer(void)
1165 kvm_flush_coalesced_mmio_buffer();
1168 void qemu_mutex_lock_ramlist(void)
1170 qemu_mutex_lock(&ram_list
.mutex
);
1173 void qemu_mutex_unlock_ramlist(void)
1175 qemu_mutex_unlock(&ram_list
.mutex
);
1180 #include <sys/vfs.h>
1182 #define HUGETLBFS_MAGIC 0x958458f6
1184 static long gethugepagesize(const char *path
, Error
**errp
)
1190 ret
= statfs(path
, &fs
);
1191 } while (ret
!= 0 && errno
== EINTR
);
1194 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1202 static void *file_ram_alloc(RAMBlock
*block
,
1209 char *sanitized_name
;
1214 Error
*local_err
= NULL
;
1216 hpagesize
= gethugepagesize(path
, &local_err
);
1218 error_propagate(errp
, local_err
);
1221 block
->mr
->align
= hpagesize
;
1223 if (memory
< hpagesize
) {
1224 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1225 "or larger than huge page size 0x%" PRIx64
,
1230 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1232 "host lacks kvm mmu notifiers, -mem-path unsupported");
1236 if (!stat(path
, &st
) && S_ISDIR(st
.st_mode
)) {
1237 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1238 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1239 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1245 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1247 g_free(sanitized_name
);
1249 fd
= mkstemp(filename
);
1255 fd
= open(path
, O_RDWR
| O_CREAT
, 0644);
1259 error_setg_errno(errp
, errno
,
1260 "unable to create backing store for hugepages");
1264 memory
= ROUND_UP(memory
, hpagesize
);
1267 * ftruncate is not supported by hugetlbfs in older
1268 * hosts, so don't bother bailing out on errors.
1269 * If anything goes wrong with it under other filesystems,
1272 if (ftruncate(fd
, memory
)) {
1273 perror("ftruncate");
1276 area
= qemu_ram_mmap(fd
, memory
, hpagesize
, block
->flags
& RAM_SHARED
);
1277 if (area
== MAP_FAILED
) {
1278 error_setg_errno(errp
, errno
,
1279 "unable to map backing store for hugepages");
1285 os_mem_prealloc(fd
, area
, memory
);
1296 /* Called with the ramlist lock held. */
1297 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1299 RAMBlock
*block
, *next_block
;
1300 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1302 assert(size
!= 0); /* it would hand out same offset multiple times */
1304 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1308 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1309 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1311 end
= block
->offset
+ block
->max_length
;
1313 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1314 if (next_block
->offset
>= end
) {
1315 next
= MIN(next
, next_block
->offset
);
1318 if (next
- end
>= size
&& next
- end
< mingap
) {
1320 mingap
= next
- end
;
1324 if (offset
== RAM_ADDR_MAX
) {
1325 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1333 ram_addr_t
last_ram_offset(void)
1336 ram_addr_t last
= 0;
1339 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1340 last
= MAX(last
, block
->offset
+ block
->max_length
);
1346 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1350 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1351 if (!machine_dump_guest_core(current_machine
)) {
1352 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1354 perror("qemu_madvise");
1355 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1356 "but dump_guest_core=off specified\n");
1361 /* Called within an RCU critical section, or while the ramlist lock
1364 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1368 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1369 if (block
->offset
== addr
) {
1377 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1382 /* Called with iothread lock held. */
1383 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1385 RAMBlock
*new_block
, *block
;
1388 new_block
= find_ram_block(addr
);
1390 assert(!new_block
->idstr
[0]);
1393 char *id
= qdev_get_dev_path(dev
);
1395 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1399 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1401 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1402 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1403 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1411 /* Called with iothread lock held. */
1412 void qemu_ram_unset_idstr(ram_addr_t addr
)
1416 /* FIXME: arch_init.c assumes that this is not called throughout
1417 * migration. Ignore the problem since hot-unplug during migration
1418 * does not work anyway.
1422 block
= find_ram_block(addr
);
1424 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1429 static int memory_try_enable_merging(void *addr
, size_t len
)
1431 if (!machine_mem_merge(current_machine
)) {
1432 /* disabled by the user */
1436 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1439 /* Only legal before guest might have detected the memory size: e.g. on
1440 * incoming migration, or right after reset.
1442 * As memory core doesn't know how is memory accessed, it is up to
1443 * resize callback to update device state and/or add assertions to detect
1444 * misuse, if necessary.
1446 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1448 RAMBlock
*block
= find_ram_block(base
);
1452 newsize
= HOST_PAGE_ALIGN(newsize
);
1454 if (block
->used_length
== newsize
) {
1458 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1459 error_setg_errno(errp
, EINVAL
,
1460 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1461 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1462 newsize
, block
->used_length
);
1466 if (block
->max_length
< newsize
) {
1467 error_setg_errno(errp
, EINVAL
,
1468 "Length too large: %s: 0x" RAM_ADDR_FMT
1469 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1470 newsize
, block
->max_length
);
1474 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1475 block
->used_length
= newsize
;
1476 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1478 memory_region_set_size(block
->mr
, newsize
);
1479 if (block
->resized
) {
1480 block
->resized(block
->idstr
, newsize
, block
->host
);
1485 static ram_addr_t
ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1488 RAMBlock
*last_block
= NULL
;
1489 ram_addr_t old_ram_size
, new_ram_size
;
1491 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1493 qemu_mutex_lock_ramlist();
1494 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1496 if (!new_block
->host
) {
1497 if (xen_enabled()) {
1498 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1501 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1502 &new_block
->mr
->align
);
1503 if (!new_block
->host
) {
1504 error_setg_errno(errp
, errno
,
1505 "cannot set up guest memory '%s'",
1506 memory_region_name(new_block
->mr
));
1507 qemu_mutex_unlock_ramlist();
1510 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1514 new_ram_size
= MAX(old_ram_size
,
1515 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1516 if (new_ram_size
> old_ram_size
) {
1517 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1519 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1520 * QLIST (which has an RCU-friendly variant) does not have insertion at
1521 * tail, so save the last element in last_block.
1523 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1525 if (block
->max_length
< new_block
->max_length
) {
1530 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1531 } else if (last_block
) {
1532 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1533 } else { /* list is empty */
1534 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1536 ram_list
.mru_block
= NULL
;
1538 /* Write list before version */
1541 qemu_mutex_unlock_ramlist();
1543 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1545 if (new_ram_size
> old_ram_size
) {
1548 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1549 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1550 ram_list
.dirty_memory
[i
] =
1551 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1552 old_ram_size
, new_ram_size
);
1555 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1556 new_block
->used_length
,
1559 if (new_block
->host
) {
1560 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1561 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1562 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1563 if (kvm_enabled()) {
1564 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1568 return new_block
->offset
;
1572 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1573 bool share
, const char *mem_path
,
1576 RAMBlock
*new_block
;
1578 Error
*local_err
= NULL
;
1580 if (xen_enabled()) {
1581 error_setg(errp
, "-mem-path not supported with Xen");
1585 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1587 * file_ram_alloc() needs to allocate just like
1588 * phys_mem_alloc, but we haven't bothered to provide
1592 "-mem-path not supported with this accelerator");
1596 size
= HOST_PAGE_ALIGN(size
);
1597 new_block
= g_malloc0(sizeof(*new_block
));
1599 new_block
->used_length
= size
;
1600 new_block
->max_length
= size
;
1601 new_block
->flags
= share
? RAM_SHARED
: 0;
1602 new_block
->host
= file_ram_alloc(new_block
, size
,
1604 if (!new_block
->host
) {
1609 addr
= ram_block_add(new_block
, &local_err
);
1612 error_propagate(errp
, local_err
);
1620 ram_addr_t
qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1621 void (*resized
)(const char*,
1624 void *host
, bool resizeable
,
1625 MemoryRegion
*mr
, Error
**errp
)
1627 RAMBlock
*new_block
;
1629 Error
*local_err
= NULL
;
1631 size
= HOST_PAGE_ALIGN(size
);
1632 max_size
= HOST_PAGE_ALIGN(max_size
);
1633 new_block
= g_malloc0(sizeof(*new_block
));
1635 new_block
->resized
= resized
;
1636 new_block
->used_length
= size
;
1637 new_block
->max_length
= max_size
;
1638 assert(max_size
>= size
);
1640 new_block
->host
= host
;
1642 new_block
->flags
|= RAM_PREALLOC
;
1645 new_block
->flags
|= RAM_RESIZEABLE
;
1647 addr
= ram_block_add(new_block
, &local_err
);
1650 error_propagate(errp
, local_err
);
1656 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1657 MemoryRegion
*mr
, Error
**errp
)
1659 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1662 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1664 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1667 ram_addr_t
qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1668 void (*resized
)(const char*,
1671 MemoryRegion
*mr
, Error
**errp
)
1673 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1676 static void reclaim_ramblock(RAMBlock
*block
)
1678 if (block
->flags
& RAM_PREALLOC
) {
1680 } else if (xen_enabled()) {
1681 xen_invalidate_map_cache_entry(block
->host
);
1683 } else if (block
->fd
>= 0) {
1684 qemu_ram_munmap(block
->host
, block
->max_length
);
1688 qemu_anon_ram_free(block
->host
, block
->max_length
);
1693 void qemu_ram_free(ram_addr_t addr
)
1697 qemu_mutex_lock_ramlist();
1698 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1699 if (addr
== block
->offset
) {
1700 QLIST_REMOVE_RCU(block
, next
);
1701 ram_list
.mru_block
= NULL
;
1702 /* Write list before version */
1705 call_rcu(block
, reclaim_ramblock
, rcu
);
1709 qemu_mutex_unlock_ramlist();
1713 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1720 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1721 offset
= addr
- block
->offset
;
1722 if (offset
< block
->max_length
) {
1723 vaddr
= ramblock_ptr(block
, offset
);
1724 if (block
->flags
& RAM_PREALLOC
) {
1726 } else if (xen_enabled()) {
1730 if (block
->fd
>= 0) {
1731 flags
|= (block
->flags
& RAM_SHARED
?
1732 MAP_SHARED
: MAP_PRIVATE
);
1733 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1734 flags
, block
->fd
, offset
);
1737 * Remap needs to match alloc. Accelerators that
1738 * set phys_mem_alloc never remap. If they did,
1739 * we'd need a remap hook here.
1741 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1743 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1744 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1747 if (area
!= vaddr
) {
1748 fprintf(stderr
, "Could not remap addr: "
1749 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1753 memory_try_enable_merging(vaddr
, length
);
1754 qemu_ram_setup_dump(vaddr
, length
);
1759 #endif /* !_WIN32 */
1761 int qemu_get_ram_fd(ram_addr_t addr
)
1767 block
= qemu_get_ram_block(addr
);
1773 void qemu_set_ram_fd(ram_addr_t addr
, int fd
)
1778 block
= qemu_get_ram_block(addr
);
1783 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1789 block
= qemu_get_ram_block(addr
);
1790 ptr
= ramblock_ptr(block
, 0);
1795 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1796 * This should not be used for general purpose DMA. Use address_space_map
1797 * or address_space_rw instead. For local memory (e.g. video ram) that the
1798 * device owns, use memory_region_get_ram_ptr.
1800 * Called within RCU critical section.
1802 void *qemu_get_ram_ptr(ram_addr_t addr
)
1804 RAMBlock
*block
= qemu_get_ram_block(addr
);
1806 if (xen_enabled() && block
->host
== NULL
) {
1807 /* We need to check if the requested address is in the RAM
1808 * because we don't want to map the entire memory in QEMU.
1809 * In that case just map until the end of the page.
1811 if (block
->offset
== 0) {
1812 return xen_map_cache(addr
, 0, 0);
1815 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1817 return ramblock_ptr(block
, addr
- block
->offset
);
1820 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1821 * but takes a size argument.
1823 * Called within RCU critical section.
1825 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1828 ram_addr_t offset_inside_block
;
1833 block
= qemu_get_ram_block(addr
);
1834 offset_inside_block
= addr
- block
->offset
;
1835 *size
= MIN(*size
, block
->max_length
- offset_inside_block
);
1837 if (xen_enabled() && block
->host
== NULL
) {
1838 /* We need to check if the requested address is in the RAM
1839 * because we don't want to map the entire memory in QEMU.
1840 * In that case just map the requested area.
1842 if (block
->offset
== 0) {
1843 return xen_map_cache(addr
, *size
, 1);
1846 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1849 return ramblock_ptr(block
, offset_inside_block
);
1853 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1856 * ptr: Host pointer to look up
1857 * round_offset: If true round the result offset down to a page boundary
1858 * *ram_addr: set to result ram_addr
1859 * *offset: set to result offset within the RAMBlock
1861 * Returns: RAMBlock (or NULL if not found)
1863 * By the time this function returns, the returned pointer is not protected
1864 * by RCU anymore. If the caller is not within an RCU critical section and
1865 * does not hold the iothread lock, it must have other means of protecting the
1866 * pointer, such as a reference to the region that includes the incoming
1869 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1870 ram_addr_t
*ram_addr
,
1874 uint8_t *host
= ptr
;
1876 if (xen_enabled()) {
1878 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1879 block
= qemu_get_ram_block(*ram_addr
);
1881 *offset
= (host
- block
->host
);
1888 block
= atomic_rcu_read(&ram_list
.mru_block
);
1889 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1893 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1894 /* This case append when the block is not mapped. */
1895 if (block
->host
== NULL
) {
1898 if (host
- block
->host
< block
->max_length
) {
1907 *offset
= (host
- block
->host
);
1909 *offset
&= TARGET_PAGE_MASK
;
1911 *ram_addr
= block
->offset
+ *offset
;
1917 * Finds the named RAMBlock
1919 * name: The name of RAMBlock to find
1921 * Returns: RAMBlock (or NULL if not found)
1923 RAMBlock
*qemu_ram_block_by_name(const char *name
)
1927 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1928 if (!strcmp(name
, block
->idstr
)) {
1936 /* Some of the softmmu routines need to translate from a host pointer
1937 (typically a TLB entry) back to a ram offset. */
1938 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1941 ram_addr_t offset
; /* Not used */
1943 block
= qemu_ram_block_from_host(ptr
, false, ram_addr
, &offset
);
1952 /* Called within RCU critical section. */
1953 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1954 uint64_t val
, unsigned size
)
1956 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1957 tb_invalidate_phys_page_fast(ram_addr
, size
);
1961 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1964 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1967 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1972 /* Set both VGA and migration bits for simplicity and to remove
1973 * the notdirty callback faster.
1975 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
1976 DIRTY_CLIENTS_NOCODE
);
1977 /* we remove the notdirty callback only if the code has been
1979 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1980 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
1984 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1985 unsigned size
, bool is_write
)
1990 static const MemoryRegionOps notdirty_mem_ops
= {
1991 .write
= notdirty_mem_write
,
1992 .valid
.accepts
= notdirty_mem_accepts
,
1993 .endianness
= DEVICE_NATIVE_ENDIAN
,
1996 /* Generate a debug exception if a watchpoint has been hit. */
1997 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
1999 CPUState
*cpu
= current_cpu
;
2000 CPUArchState
*env
= cpu
->env_ptr
;
2001 target_ulong pc
, cs_base
;
2006 if (cpu
->watchpoint_hit
) {
2007 /* We re-entered the check after replacing the TB. Now raise
2008 * the debug interrupt so that is will trigger after the
2009 * current instruction. */
2010 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2013 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2014 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2015 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2016 && (wp
->flags
& flags
)) {
2017 if (flags
== BP_MEM_READ
) {
2018 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2020 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2022 wp
->hitaddr
= vaddr
;
2023 wp
->hitattrs
= attrs
;
2024 if (!cpu
->watchpoint_hit
) {
2025 cpu
->watchpoint_hit
= wp
;
2026 tb_check_watchpoint(cpu
);
2027 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2028 cpu
->exception_index
= EXCP_DEBUG
;
2031 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2032 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2033 cpu_resume_from_signal(cpu
, NULL
);
2037 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2042 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2043 so these check for a hit then pass through to the normal out-of-line
2045 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2046 unsigned size
, MemTxAttrs attrs
)
2051 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2054 data
= address_space_ldub(&address_space_memory
, addr
, attrs
, &res
);
2057 data
= address_space_lduw(&address_space_memory
, addr
, attrs
, &res
);
2060 data
= address_space_ldl(&address_space_memory
, addr
, attrs
, &res
);
2068 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2069 uint64_t val
, unsigned size
,
2074 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2077 address_space_stb(&address_space_memory
, addr
, val
, attrs
, &res
);
2080 address_space_stw(&address_space_memory
, addr
, val
, attrs
, &res
);
2083 address_space_stl(&address_space_memory
, addr
, val
, attrs
, &res
);
2090 static const MemoryRegionOps watch_mem_ops
= {
2091 .read_with_attrs
= watch_mem_read
,
2092 .write_with_attrs
= watch_mem_write
,
2093 .endianness
= DEVICE_NATIVE_ENDIAN
,
2096 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2097 unsigned len
, MemTxAttrs attrs
)
2099 subpage_t
*subpage
= opaque
;
2103 #if defined(DEBUG_SUBPAGE)
2104 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2105 subpage
, len
, addr
);
2107 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2114 *data
= ldub_p(buf
);
2117 *data
= lduw_p(buf
);
2130 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2131 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2133 subpage_t
*subpage
= opaque
;
2136 #if defined(DEBUG_SUBPAGE)
2137 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2138 " value %"PRIx64
"\n",
2139 __func__
, subpage
, len
, addr
, value
);
2157 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2161 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2162 unsigned len
, bool is_write
)
2164 subpage_t
*subpage
= opaque
;
2165 #if defined(DEBUG_SUBPAGE)
2166 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2167 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2170 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2174 static const MemoryRegionOps subpage_ops
= {
2175 .read_with_attrs
= subpage_read
,
2176 .write_with_attrs
= subpage_write
,
2177 .impl
.min_access_size
= 1,
2178 .impl
.max_access_size
= 8,
2179 .valid
.min_access_size
= 1,
2180 .valid
.max_access_size
= 8,
2181 .valid
.accepts
= subpage_accepts
,
2182 .endianness
= DEVICE_NATIVE_ENDIAN
,
2185 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2190 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2192 idx
= SUBPAGE_IDX(start
);
2193 eidx
= SUBPAGE_IDX(end
);
2194 #if defined(DEBUG_SUBPAGE)
2195 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2196 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2198 for (; idx
<= eidx
; idx
++) {
2199 mmio
->sub_section
[idx
] = section
;
2205 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2209 mmio
= g_malloc0(sizeof(subpage_t
));
2213 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2214 NULL
, TARGET_PAGE_SIZE
);
2215 mmio
->iomem
.subpage
= true;
2216 #if defined(DEBUG_SUBPAGE)
2217 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2218 mmio
, base
, TARGET_PAGE_SIZE
);
2220 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2225 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2229 MemoryRegionSection section
= {
2230 .address_space
= as
,
2232 .offset_within_address_space
= 0,
2233 .offset_within_region
= 0,
2234 .size
= int128_2_64(),
2237 return phys_section_add(map
, §ion
);
2240 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
)
2242 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[0];
2243 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2244 MemoryRegionSection
*sections
= d
->map
.sections
;
2246 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2249 static void io_mem_init(void)
2251 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2252 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2254 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2256 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2260 static void mem_begin(MemoryListener
*listener
)
2262 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2263 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2266 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2267 assert(n
== PHYS_SECTION_UNASSIGNED
);
2268 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2269 assert(n
== PHYS_SECTION_NOTDIRTY
);
2270 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2271 assert(n
== PHYS_SECTION_ROM
);
2272 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2273 assert(n
== PHYS_SECTION_WATCH
);
2275 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2277 as
->next_dispatch
= d
;
2280 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2282 phys_sections_free(&d
->map
);
2286 static void mem_commit(MemoryListener
*listener
)
2288 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2289 AddressSpaceDispatch
*cur
= as
->dispatch
;
2290 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2292 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2294 atomic_rcu_set(&as
->dispatch
, next
);
2296 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2300 static void tcg_commit(MemoryListener
*listener
)
2302 CPUAddressSpace
*cpuas
;
2303 AddressSpaceDispatch
*d
;
2305 /* since each CPU stores ram addresses in its TLB cache, we must
2306 reset the modified entries */
2307 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2308 cpu_reloading_memory_map();
2309 /* The CPU and TLB are protected by the iothread lock.
2310 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2311 * may have split the RCU critical section.
2313 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2314 cpuas
->memory_dispatch
= d
;
2315 tlb_flush(cpuas
->cpu
, 1);
2318 void address_space_init_dispatch(AddressSpace
*as
)
2320 as
->dispatch
= NULL
;
2321 as
->dispatch_listener
= (MemoryListener
) {
2323 .commit
= mem_commit
,
2324 .region_add
= mem_add
,
2325 .region_nop
= mem_add
,
2328 memory_listener_register(&as
->dispatch_listener
, as
);
2331 void address_space_unregister(AddressSpace
*as
)
2333 memory_listener_unregister(&as
->dispatch_listener
);
2336 void address_space_destroy_dispatch(AddressSpace
*as
)
2338 AddressSpaceDispatch
*d
= as
->dispatch
;
2340 atomic_rcu_set(&as
->dispatch
, NULL
);
2342 call_rcu(d
, address_space_dispatch_free
, rcu
);
2346 static void memory_map_init(void)
2348 system_memory
= g_malloc(sizeof(*system_memory
));
2350 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2351 address_space_init(&address_space_memory
, system_memory
, "memory");
2353 system_io
= g_malloc(sizeof(*system_io
));
2354 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2356 address_space_init(&address_space_io
, system_io
, "I/O");
2359 MemoryRegion
*get_system_memory(void)
2361 return system_memory
;
2364 MemoryRegion
*get_system_io(void)
2369 #endif /* !defined(CONFIG_USER_ONLY) */
2371 /* physical memory access (slow version, mainly for debug) */
2372 #if defined(CONFIG_USER_ONLY)
2373 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2374 uint8_t *buf
, int len
, int is_write
)
2381 page
= addr
& TARGET_PAGE_MASK
;
2382 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2385 flags
= page_get_flags(page
);
2386 if (!(flags
& PAGE_VALID
))
2389 if (!(flags
& PAGE_WRITE
))
2391 /* XXX: this code should not depend on lock_user */
2392 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2395 unlock_user(p
, addr
, l
);
2397 if (!(flags
& PAGE_READ
))
2399 /* XXX: this code should not depend on lock_user */
2400 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2403 unlock_user(p
, addr
, 0);
2414 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2417 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2418 /* No early return if dirty_log_mask is or becomes 0, because
2419 * cpu_physical_memory_set_dirty_range will still call
2420 * xen_modified_memory.
2422 if (dirty_log_mask
) {
2424 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2426 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2427 tb_invalidate_phys_range(addr
, addr
+ length
);
2428 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2430 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2433 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2435 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2437 /* Regions are assumed to support 1-4 byte accesses unless
2438 otherwise specified. */
2439 if (access_size_max
== 0) {
2440 access_size_max
= 4;
2443 /* Bound the maximum access by the alignment of the address. */
2444 if (!mr
->ops
->impl
.unaligned
) {
2445 unsigned align_size_max
= addr
& -addr
;
2446 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2447 access_size_max
= align_size_max
;
2451 /* Don't attempt accesses larger than the maximum. */
2452 if (l
> access_size_max
) {
2453 l
= access_size_max
;
2460 static bool prepare_mmio_access(MemoryRegion
*mr
)
2462 bool unlocked
= !qemu_mutex_iothread_locked();
2463 bool release_lock
= false;
2465 if (unlocked
&& mr
->global_locking
) {
2466 qemu_mutex_lock_iothread();
2468 release_lock
= true;
2470 if (mr
->flush_coalesced_mmio
) {
2472 qemu_mutex_lock_iothread();
2474 qemu_flush_coalesced_mmio_buffer();
2476 qemu_mutex_unlock_iothread();
2480 return release_lock
;
2483 /* Called within RCU critical section. */
2484 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2487 int len
, hwaddr addr1
,
2488 hwaddr l
, MemoryRegion
*mr
)
2492 MemTxResult result
= MEMTX_OK
;
2493 bool release_lock
= false;
2496 if (!memory_access_is_direct(mr
, true)) {
2497 release_lock
|= prepare_mmio_access(mr
);
2498 l
= memory_access_size(mr
, l
, addr1
);
2499 /* XXX: could force current_cpu to NULL to avoid
2503 /* 64 bit write access */
2505 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2509 /* 32 bit write access */
2511 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2515 /* 16 bit write access */
2517 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2521 /* 8 bit write access */
2523 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2530 addr1
+= memory_region_get_ram_addr(mr
);
2532 ptr
= qemu_get_ram_ptr(addr1
);
2533 memcpy(ptr
, buf
, l
);
2534 invalidate_and_set_dirty(mr
, addr1
, l
);
2538 qemu_mutex_unlock_iothread();
2539 release_lock
= false;
2551 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2557 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2558 const uint8_t *buf
, int len
)
2563 MemTxResult result
= MEMTX_OK
;
2568 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2569 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2577 /* Called within RCU critical section. */
2578 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2579 MemTxAttrs attrs
, uint8_t *buf
,
2580 int len
, hwaddr addr1
, hwaddr l
,
2585 MemTxResult result
= MEMTX_OK
;
2586 bool release_lock
= false;
2589 if (!memory_access_is_direct(mr
, false)) {
2591 release_lock
|= prepare_mmio_access(mr
);
2592 l
= memory_access_size(mr
, l
, addr1
);
2595 /* 64 bit read access */
2596 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2601 /* 32 bit read access */
2602 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2607 /* 16 bit read access */
2608 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2613 /* 8 bit read access */
2614 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2623 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2624 memcpy(buf
, ptr
, l
);
2628 qemu_mutex_unlock_iothread();
2629 release_lock
= false;
2641 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2647 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2648 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2653 MemTxResult result
= MEMTX_OK
;
2658 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2659 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2667 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2668 uint8_t *buf
, int len
, bool is_write
)
2671 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2673 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2677 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2678 int len
, int is_write
)
2680 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2681 buf
, len
, is_write
);
2684 enum write_rom_type
{
2689 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2690 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2700 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2702 if (!(memory_region_is_ram(mr
) ||
2703 memory_region_is_romd(mr
))) {
2704 l
= memory_access_size(mr
, l
, addr1
);
2706 addr1
+= memory_region_get_ram_addr(mr
);
2708 ptr
= qemu_get_ram_ptr(addr1
);
2711 memcpy(ptr
, buf
, l
);
2712 invalidate_and_set_dirty(mr
, addr1
, l
);
2715 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2726 /* used for ROM loading : can write in RAM and ROM */
2727 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2728 const uint8_t *buf
, int len
)
2730 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2733 void cpu_flush_icache_range(hwaddr start
, int len
)
2736 * This function should do the same thing as an icache flush that was
2737 * triggered from within the guest. For TCG we are always cache coherent,
2738 * so there is no need to flush anything. For KVM / Xen we need to flush
2739 * the host's instruction cache at least.
2741 if (tcg_enabled()) {
2745 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2746 start
, NULL
, len
, FLUSH_CACHE
);
2757 static BounceBuffer bounce
;
2759 typedef struct MapClient
{
2761 QLIST_ENTRY(MapClient
) link
;
2764 QemuMutex map_client_list_lock
;
2765 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2766 = QLIST_HEAD_INITIALIZER(map_client_list
);
2768 static void cpu_unregister_map_client_do(MapClient
*client
)
2770 QLIST_REMOVE(client
, link
);
2774 static void cpu_notify_map_clients_locked(void)
2778 while (!QLIST_EMPTY(&map_client_list
)) {
2779 client
= QLIST_FIRST(&map_client_list
);
2780 qemu_bh_schedule(client
->bh
);
2781 cpu_unregister_map_client_do(client
);
2785 void cpu_register_map_client(QEMUBH
*bh
)
2787 MapClient
*client
= g_malloc(sizeof(*client
));
2789 qemu_mutex_lock(&map_client_list_lock
);
2791 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2792 if (!atomic_read(&bounce
.in_use
)) {
2793 cpu_notify_map_clients_locked();
2795 qemu_mutex_unlock(&map_client_list_lock
);
2798 void cpu_exec_init_all(void)
2800 qemu_mutex_init(&ram_list
.mutex
);
2803 qemu_mutex_init(&map_client_list_lock
);
2806 void cpu_unregister_map_client(QEMUBH
*bh
)
2810 qemu_mutex_lock(&map_client_list_lock
);
2811 QLIST_FOREACH(client
, &map_client_list
, link
) {
2812 if (client
->bh
== bh
) {
2813 cpu_unregister_map_client_do(client
);
2817 qemu_mutex_unlock(&map_client_list_lock
);
2820 static void cpu_notify_map_clients(void)
2822 qemu_mutex_lock(&map_client_list_lock
);
2823 cpu_notify_map_clients_locked();
2824 qemu_mutex_unlock(&map_client_list_lock
);
2827 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2835 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2836 if (!memory_access_is_direct(mr
, is_write
)) {
2837 l
= memory_access_size(mr
, l
, addr
);
2838 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2850 /* Map a physical memory region into a host virtual address.
2851 * May map a subset of the requested range, given by and returned in *plen.
2852 * May return NULL if resources needed to perform the mapping are exhausted.
2853 * Use only for reads OR writes - not for read-modify-write operations.
2854 * Use cpu_register_map_client() to know when retrying the map operation is
2855 * likely to succeed.
2857 void *address_space_map(AddressSpace
*as
,
2864 hwaddr l
, xlat
, base
;
2865 MemoryRegion
*mr
, *this_mr
;
2875 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2877 if (!memory_access_is_direct(mr
, is_write
)) {
2878 if (atomic_xchg(&bounce
.in_use
, true)) {
2882 /* Avoid unbounded allocations */
2883 l
= MIN(l
, TARGET_PAGE_SIZE
);
2884 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2888 memory_region_ref(mr
);
2891 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2897 return bounce
.buffer
;
2901 raddr
= memory_region_get_ram_addr(mr
);
2912 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2913 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2918 memory_region_ref(mr
);
2920 ptr
= qemu_ram_ptr_length(raddr
+ base
, plen
);
2926 /* Unmaps a memory region previously mapped by address_space_map().
2927 * Will also mark the memory as dirty if is_write == 1. access_len gives
2928 * the amount of memory that was actually read or written by the caller.
2930 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2931 int is_write
, hwaddr access_len
)
2933 if (buffer
!= bounce
.buffer
) {
2937 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2940 invalidate_and_set_dirty(mr
, addr1
, access_len
);
2942 if (xen_enabled()) {
2943 xen_invalidate_map_cache_entry(buffer
);
2945 memory_region_unref(mr
);
2949 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
2950 bounce
.buffer
, access_len
);
2952 qemu_vfree(bounce
.buffer
);
2953 bounce
.buffer
= NULL
;
2954 memory_region_unref(bounce
.mr
);
2955 atomic_mb_set(&bounce
.in_use
, false);
2956 cpu_notify_map_clients();
2959 void *cpu_physical_memory_map(hwaddr addr
,
2963 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2966 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2967 int is_write
, hwaddr access_len
)
2969 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2972 /* warning: addr must be aligned */
2973 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
2975 MemTxResult
*result
,
2976 enum device_endian endian
)
2984 bool release_lock
= false;
2987 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2988 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2989 release_lock
|= prepare_mmio_access(mr
);
2992 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
2993 #if defined(TARGET_WORDS_BIGENDIAN)
2994 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2998 if (endian
== DEVICE_BIG_ENDIAN
) {
3004 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3008 case DEVICE_LITTLE_ENDIAN
:
3009 val
= ldl_le_p(ptr
);
3011 case DEVICE_BIG_ENDIAN
:
3012 val
= ldl_be_p(ptr
);
3024 qemu_mutex_unlock_iothread();
3030 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3031 MemTxAttrs attrs
, MemTxResult
*result
)
3033 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3034 DEVICE_NATIVE_ENDIAN
);
3037 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3038 MemTxAttrs attrs
, MemTxResult
*result
)
3040 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3041 DEVICE_LITTLE_ENDIAN
);
3044 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3045 MemTxAttrs attrs
, MemTxResult
*result
)
3047 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3051 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3053 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3056 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3058 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3061 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3063 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3066 /* warning: addr must be aligned */
3067 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3069 MemTxResult
*result
,
3070 enum device_endian endian
)
3078 bool release_lock
= false;
3081 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3083 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3084 release_lock
|= prepare_mmio_access(mr
);
3087 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3088 #if defined(TARGET_WORDS_BIGENDIAN)
3089 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3093 if (endian
== DEVICE_BIG_ENDIAN
) {
3099 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3103 case DEVICE_LITTLE_ENDIAN
:
3104 val
= ldq_le_p(ptr
);
3106 case DEVICE_BIG_ENDIAN
:
3107 val
= ldq_be_p(ptr
);
3119 qemu_mutex_unlock_iothread();
3125 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3126 MemTxAttrs attrs
, MemTxResult
*result
)
3128 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3129 DEVICE_NATIVE_ENDIAN
);
3132 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3133 MemTxAttrs attrs
, MemTxResult
*result
)
3135 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3136 DEVICE_LITTLE_ENDIAN
);
3139 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3140 MemTxAttrs attrs
, MemTxResult
*result
)
3142 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3146 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3148 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3151 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3153 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3156 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3158 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3162 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3163 MemTxAttrs attrs
, MemTxResult
*result
)
3168 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3175 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3177 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3180 /* warning: addr must be aligned */
3181 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3184 MemTxResult
*result
,
3185 enum device_endian endian
)
3193 bool release_lock
= false;
3196 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3198 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3199 release_lock
|= prepare_mmio_access(mr
);
3202 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3203 #if defined(TARGET_WORDS_BIGENDIAN)
3204 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3208 if (endian
== DEVICE_BIG_ENDIAN
) {
3214 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3218 case DEVICE_LITTLE_ENDIAN
:
3219 val
= lduw_le_p(ptr
);
3221 case DEVICE_BIG_ENDIAN
:
3222 val
= lduw_be_p(ptr
);
3234 qemu_mutex_unlock_iothread();
3240 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3241 MemTxAttrs attrs
, MemTxResult
*result
)
3243 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3244 DEVICE_NATIVE_ENDIAN
);
3247 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3248 MemTxAttrs attrs
, MemTxResult
*result
)
3250 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3251 DEVICE_LITTLE_ENDIAN
);
3254 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3255 MemTxAttrs attrs
, MemTxResult
*result
)
3257 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3261 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3263 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3266 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3268 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3271 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3273 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3276 /* warning: addr must be aligned. The ram page is not masked as dirty
3277 and the code inside is not invalidated. It is useful if the dirty
3278 bits are used to track modified PTEs */
3279 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3280 MemTxAttrs attrs
, MemTxResult
*result
)
3287 uint8_t dirty_log_mask
;
3288 bool release_lock
= false;
3291 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3293 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3294 release_lock
|= prepare_mmio_access(mr
);
3296 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3298 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3299 ptr
= qemu_get_ram_ptr(addr1
);
3302 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3303 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3304 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3311 qemu_mutex_unlock_iothread();
3316 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3318 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3321 /* warning: addr must be aligned */
3322 static inline void address_space_stl_internal(AddressSpace
*as
,
3323 hwaddr addr
, uint32_t val
,
3325 MemTxResult
*result
,
3326 enum device_endian endian
)
3333 bool release_lock
= false;
3336 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3338 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3339 release_lock
|= prepare_mmio_access(mr
);
3341 #if defined(TARGET_WORDS_BIGENDIAN)
3342 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3346 if (endian
== DEVICE_BIG_ENDIAN
) {
3350 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3353 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3354 ptr
= qemu_get_ram_ptr(addr1
);
3356 case DEVICE_LITTLE_ENDIAN
:
3359 case DEVICE_BIG_ENDIAN
:
3366 invalidate_and_set_dirty(mr
, addr1
, 4);
3373 qemu_mutex_unlock_iothread();
3378 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3379 MemTxAttrs attrs
, MemTxResult
*result
)
3381 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3382 DEVICE_NATIVE_ENDIAN
);
3385 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3386 MemTxAttrs attrs
, MemTxResult
*result
)
3388 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3389 DEVICE_LITTLE_ENDIAN
);
3392 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3393 MemTxAttrs attrs
, MemTxResult
*result
)
3395 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3399 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3401 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3404 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3406 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3409 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3411 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3415 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3416 MemTxAttrs attrs
, MemTxResult
*result
)
3421 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3427 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3429 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3432 /* warning: addr must be aligned */
3433 static inline void address_space_stw_internal(AddressSpace
*as
,
3434 hwaddr addr
, uint32_t val
,
3436 MemTxResult
*result
,
3437 enum device_endian endian
)
3444 bool release_lock
= false;
3447 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3448 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3449 release_lock
|= prepare_mmio_access(mr
);
3451 #if defined(TARGET_WORDS_BIGENDIAN)
3452 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3456 if (endian
== DEVICE_BIG_ENDIAN
) {
3460 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3463 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3464 ptr
= qemu_get_ram_ptr(addr1
);
3466 case DEVICE_LITTLE_ENDIAN
:
3469 case DEVICE_BIG_ENDIAN
:
3476 invalidate_and_set_dirty(mr
, addr1
, 2);
3483 qemu_mutex_unlock_iothread();
3488 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3489 MemTxAttrs attrs
, MemTxResult
*result
)
3491 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3492 DEVICE_NATIVE_ENDIAN
);
3495 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3496 MemTxAttrs attrs
, MemTxResult
*result
)
3498 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3499 DEVICE_LITTLE_ENDIAN
);
3502 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3503 MemTxAttrs attrs
, MemTxResult
*result
)
3505 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3509 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3511 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3514 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3516 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3519 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3521 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3525 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3526 MemTxAttrs attrs
, MemTxResult
*result
)
3530 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3536 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3537 MemTxAttrs attrs
, MemTxResult
*result
)
3540 val
= cpu_to_le64(val
);
3541 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3546 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3547 MemTxAttrs attrs
, MemTxResult
*result
)
3550 val
= cpu_to_be64(val
);
3551 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3557 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3559 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3562 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3564 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3567 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3569 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3572 /* virtual memory access for debug (includes writing to ROM) */
3573 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3574 uint8_t *buf
, int len
, int is_write
)
3581 page
= addr
& TARGET_PAGE_MASK
;
3582 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
3583 /* if no physical page mapped, return an error */
3584 if (phys_addr
== -1)
3586 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3589 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3591 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
3593 address_space_rw(cpu
->as
, phys_addr
, MEMTXATTRS_UNSPECIFIED
,
3604 * Allows code that needs to deal with migration bitmaps etc to still be built
3605 * target independent.
3607 size_t qemu_target_page_bits(void)
3609 return TARGET_PAGE_BITS
;
3615 * A helper function for the _utterly broken_ virtio device model to find out if
3616 * it's running on a big endian machine. Don't do this at home kids!
3618 bool target_words_bigendian(void);
3619 bool target_words_bigendian(void)
3621 #if defined(TARGET_WORDS_BIGENDIAN)
3628 #ifndef CONFIG_USER_ONLY
3629 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3636 mr
= address_space_translate(&address_space_memory
,
3637 phys_addr
, &phys_addr
, &l
, false);
3639 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3644 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3650 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3651 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3652 block
->used_length
, opaque
);