4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
33 #include "qemu/osdep.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/xen/xen.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "exec/memory.h"
41 #include "sysemu/dma.h"
42 #include "exec/address-spaces.h"
43 #if defined(CONFIG_USER_ONLY)
45 #else /* !CONFIG_USER_ONLY */
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "translate-all.h"
53 #include "sysemu/replay.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
58 #include "qemu/range.h"
60 #include "qemu/mmap-alloc.h"
63 //#define DEBUG_SUBPAGE
65 #if !defined(CONFIG_USER_ONLY)
66 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
69 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
71 static MemoryRegion
*system_memory
;
72 static MemoryRegion
*system_io
;
74 AddressSpace address_space_io
;
75 AddressSpace address_space_memory
;
77 MemoryRegion io_mem_rom
, io_mem_notdirty
;
78 static MemoryRegion io_mem_unassigned
;
80 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81 #define RAM_PREALLOC (1 << 0)
83 /* RAM is mmap-ed with MAP_SHARED */
84 #define RAM_SHARED (1 << 1)
86 /* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
89 #define RAM_RESIZEABLE (1 << 2)
93 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
94 /* current CPU in the current thread. It is only valid inside
96 __thread CPUState
*current_cpu
;
97 /* 0 = Do not count executed instructions.
98 1 = Precise instruction counting.
99 2 = Adaptive rate instruction counting. */
102 #if !defined(CONFIG_USER_ONLY)
104 typedef struct PhysPageEntry PhysPageEntry
;
106 struct PhysPageEntry
{
107 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
109 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
113 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
115 /* Size of the L2 (and L3, etc) page tables. */
116 #define ADDR_SPACE_BITS 64
119 #define P_L2_SIZE (1 << P_L2_BITS)
121 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
123 typedef PhysPageEntry Node
[P_L2_SIZE
];
125 typedef struct PhysPageMap
{
128 unsigned sections_nb
;
129 unsigned sections_nb_alloc
;
131 unsigned nodes_nb_alloc
;
133 MemoryRegionSection
*sections
;
136 struct AddressSpaceDispatch
{
139 /* This is a multi-level map on the physical address space.
140 * The bottom level has pointers to MemoryRegionSections.
142 PhysPageEntry phys_map
;
147 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
148 typedef struct subpage_t
{
152 uint16_t sub_section
[TARGET_PAGE_SIZE
];
155 #define PHYS_SECTION_UNASSIGNED 0
156 #define PHYS_SECTION_NOTDIRTY 1
157 #define PHYS_SECTION_ROM 2
158 #define PHYS_SECTION_WATCH 3
160 static void io_mem_init(void);
161 static void memory_map_init(void);
162 static void tcg_commit(MemoryListener
*listener
);
164 static MemoryRegion io_mem_watch
;
167 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
168 * @cpu: the CPU whose AddressSpace this is
169 * @as: the AddressSpace itself
170 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
171 * @tcg_as_listener: listener for tracking changes to the AddressSpace
173 struct CPUAddressSpace
{
176 struct AddressSpaceDispatch
*memory_dispatch
;
177 MemoryListener tcg_as_listener
;
182 #if !defined(CONFIG_USER_ONLY)
184 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
186 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
187 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
188 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
189 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
193 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
200 ret
= map
->nodes_nb
++;
202 assert(ret
!= PHYS_MAP_NODE_NIL
);
203 assert(ret
!= map
->nodes_nb_alloc
);
205 e
.skip
= leaf
? 0 : 1;
206 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
207 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
208 memcpy(&p
[i
], &e
, sizeof(e
));
213 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
214 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
218 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
220 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
221 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
223 p
= map
->nodes
[lp
->ptr
];
224 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
226 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
227 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
233 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
239 static void phys_page_set(AddressSpaceDispatch
*d
,
240 hwaddr index
, hwaddr nb
,
243 /* Wildly overreserve - it doesn't matter much. */
244 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
246 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
249 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
250 * and update our entry so we can skip it and go directly to the destination.
252 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
254 unsigned valid_ptr
= P_L2_SIZE
;
259 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
264 for (i
= 0; i
< P_L2_SIZE
; i
++) {
265 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
272 phys_page_compact(&p
[i
], nodes
, compacted
);
276 /* We can only compress if there's only one child. */
281 assert(valid_ptr
< P_L2_SIZE
);
283 /* Don't compress if it won't fit in the # of bits we have. */
284 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
288 lp
->ptr
= p
[valid_ptr
].ptr
;
289 if (!p
[valid_ptr
].skip
) {
290 /* If our only child is a leaf, make this a leaf. */
291 /* By design, we should have made this node a leaf to begin with so we
292 * should never reach here.
293 * But since it's so simple to handle this, let's do it just in case we
298 lp
->skip
+= p
[valid_ptr
].skip
;
302 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
304 DECLARE_BITMAP(compacted
, nodes_nb
);
306 if (d
->phys_map
.skip
) {
307 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
311 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
312 Node
*nodes
, MemoryRegionSection
*sections
)
315 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
318 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
319 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
320 return §ions
[PHYS_SECTION_UNASSIGNED
];
323 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
326 if (sections
[lp
.ptr
].size
.hi
||
327 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
328 sections
[lp
.ptr
].size
.lo
, addr
)) {
329 return §ions
[lp
.ptr
];
331 return §ions
[PHYS_SECTION_UNASSIGNED
];
335 bool memory_region_is_unassigned(MemoryRegion
*mr
)
337 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
338 && mr
!= &io_mem_watch
;
341 /* Called from RCU critical section */
342 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
344 bool resolve_subpage
)
346 MemoryRegionSection
*section
;
349 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
350 if (resolve_subpage
&& section
->mr
->subpage
) {
351 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
352 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
357 /* Called from RCU critical section */
358 static MemoryRegionSection
*
359 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
360 hwaddr
*plen
, bool resolve_subpage
)
362 MemoryRegionSection
*section
;
366 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
367 /* Compute offset within MemoryRegionSection */
368 addr
-= section
->offset_within_address_space
;
370 /* Compute offset within MemoryRegion */
371 *xlat
= addr
+ section
->offset_within_region
;
375 /* MMIO registers can be expected to perform full-width accesses based only
376 * on their address, without considering adjacent registers that could
377 * decode to completely different MemoryRegions. When such registers
378 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
379 * regions overlap wildly. For this reason we cannot clamp the accesses
382 * If the length is small (as is the case for address_space_ldl/stl),
383 * everything works fine. If the incoming length is large, however,
384 * the caller really has to do the clamping through memory_access_size.
386 if (memory_region_is_ram(mr
)) {
387 diff
= int128_sub(section
->size
, int128_make64(addr
));
388 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
393 /* Called from RCU critical section */
394 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
395 hwaddr
*xlat
, hwaddr
*plen
,
399 MemoryRegionSection
*section
;
403 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
404 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
407 if (!mr
->iommu_ops
) {
411 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
412 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
413 | (addr
& iotlb
.addr_mask
));
414 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
415 if (!(iotlb
.perm
& (1 << is_write
))) {
416 mr
= &io_mem_unassigned
;
420 as
= iotlb
.target_as
;
423 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
424 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
425 *plen
= MIN(page
, *plen
);
432 /* Called from RCU critical section */
433 MemoryRegionSection
*
434 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
435 hwaddr
*xlat
, hwaddr
*plen
)
437 MemoryRegionSection
*section
;
438 AddressSpaceDispatch
*d
= cpu
->cpu_ases
[asidx
].memory_dispatch
;
440 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
442 assert(!section
->mr
->iommu_ops
);
447 #if !defined(CONFIG_USER_ONLY)
449 static int cpu_common_post_load(void *opaque
, int version_id
)
451 CPUState
*cpu
= opaque
;
453 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
454 version_id is increased. */
455 cpu
->interrupt_request
&= ~0x01;
461 static int cpu_common_pre_load(void *opaque
)
463 CPUState
*cpu
= opaque
;
465 cpu
->exception_index
= -1;
470 static bool cpu_common_exception_index_needed(void *opaque
)
472 CPUState
*cpu
= opaque
;
474 return tcg_enabled() && cpu
->exception_index
!= -1;
477 static const VMStateDescription vmstate_cpu_common_exception_index
= {
478 .name
= "cpu_common/exception_index",
480 .minimum_version_id
= 1,
481 .needed
= cpu_common_exception_index_needed
,
482 .fields
= (VMStateField
[]) {
483 VMSTATE_INT32(exception_index
, CPUState
),
484 VMSTATE_END_OF_LIST()
488 static bool cpu_common_crash_occurred_needed(void *opaque
)
490 CPUState
*cpu
= opaque
;
492 return cpu
->crash_occurred
;
495 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
496 .name
= "cpu_common/crash_occurred",
498 .minimum_version_id
= 1,
499 .needed
= cpu_common_crash_occurred_needed
,
500 .fields
= (VMStateField
[]) {
501 VMSTATE_BOOL(crash_occurred
, CPUState
),
502 VMSTATE_END_OF_LIST()
506 const VMStateDescription vmstate_cpu_common
= {
507 .name
= "cpu_common",
509 .minimum_version_id
= 1,
510 .pre_load
= cpu_common_pre_load
,
511 .post_load
= cpu_common_post_load
,
512 .fields
= (VMStateField
[]) {
513 VMSTATE_UINT32(halted
, CPUState
),
514 VMSTATE_UINT32(interrupt_request
, CPUState
),
515 VMSTATE_END_OF_LIST()
517 .subsections
= (const VMStateDescription
*[]) {
518 &vmstate_cpu_common_exception_index
,
519 &vmstate_cpu_common_crash_occurred
,
526 CPUState
*qemu_get_cpu(int index
)
531 if (cpu
->cpu_index
== index
) {
539 #if !defined(CONFIG_USER_ONLY)
540 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
542 CPUAddressSpace
*newas
;
544 /* Target code should have set num_ases before calling us */
545 assert(asidx
< cpu
->num_ases
);
548 /* address space 0 gets the convenience alias */
552 /* KVM cannot currently support multiple address spaces. */
553 assert(asidx
== 0 || !kvm_enabled());
555 if (!cpu
->cpu_ases
) {
556 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
559 newas
= &cpu
->cpu_ases
[asidx
];
563 newas
->tcg_as_listener
.commit
= tcg_commit
;
564 memory_listener_register(&newas
->tcg_as_listener
, as
);
568 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
570 /* Return the AddressSpace corresponding to the specified index */
571 return cpu
->cpu_ases
[asidx
].as
;
575 #ifndef CONFIG_USER_ONLY
576 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
578 static int cpu_get_free_index(Error
**errp
)
580 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
582 if (cpu
>= MAX_CPUMASK_BITS
) {
583 error_setg(errp
, "Trying to use more CPUs than max of %d",
588 bitmap_set(cpu_index_map
, cpu
, 1);
592 void cpu_exec_exit(CPUState
*cpu
)
594 if (cpu
->cpu_index
== -1) {
595 /* cpu_index was never allocated by this @cpu or was already freed. */
599 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
604 static int cpu_get_free_index(Error
**errp
)
609 CPU_FOREACH(some_cpu
) {
615 void cpu_exec_exit(CPUState
*cpu
)
620 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
622 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
624 Error
*local_err
= NULL
;
629 #ifndef CONFIG_USER_ONLY
630 cpu
->thread_id
= qemu_get_thread_id();
633 #if defined(CONFIG_USER_ONLY)
636 cpu_index
= cpu
->cpu_index
= cpu_get_free_index(&local_err
);
638 error_propagate(errp
, local_err
);
639 #if defined(CONFIG_USER_ONLY)
644 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
645 #if defined(CONFIG_USER_ONLY)
648 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
649 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
651 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
652 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
653 cpu_save
, cpu_load
, cpu
->env_ptr
);
654 assert(cc
->vmsd
== NULL
);
655 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
657 if (cc
->vmsd
!= NULL
) {
658 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
662 #if defined(CONFIG_USER_ONLY)
663 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
665 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
668 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
671 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
672 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
674 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
675 phys
| (pc
& ~TARGET_PAGE_MASK
));
680 #if defined(CONFIG_USER_ONLY)
681 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
686 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
692 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
696 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
697 int flags
, CPUWatchpoint
**watchpoint
)
702 /* Add a watchpoint. */
703 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
704 int flags
, CPUWatchpoint
**watchpoint
)
708 /* forbid ranges which are empty or run off the end of the address space */
709 if (len
== 0 || (addr
+ len
- 1) < addr
) {
710 error_report("tried to set invalid watchpoint at %"
711 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
714 wp
= g_malloc(sizeof(*wp
));
720 /* keep all GDB-injected watchpoints in front */
721 if (flags
& BP_GDB
) {
722 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
724 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
727 tlb_flush_page(cpu
, addr
);
734 /* Remove a specific watchpoint. */
735 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
740 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
741 if (addr
== wp
->vaddr
&& len
== wp
->len
742 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
743 cpu_watchpoint_remove_by_ref(cpu
, wp
);
750 /* Remove a specific watchpoint by reference. */
751 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
753 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
755 tlb_flush_page(cpu
, watchpoint
->vaddr
);
760 /* Remove all matching watchpoints. */
761 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
763 CPUWatchpoint
*wp
, *next
;
765 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
766 if (wp
->flags
& mask
) {
767 cpu_watchpoint_remove_by_ref(cpu
, wp
);
772 /* Return true if this watchpoint address matches the specified
773 * access (ie the address range covered by the watchpoint overlaps
774 * partially or completely with the address range covered by the
777 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
781 /* We know the lengths are non-zero, but a little caution is
782 * required to avoid errors in the case where the range ends
783 * exactly at the top of the address space and so addr + len
784 * wraps round to zero.
786 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
787 vaddr addrend
= addr
+ len
- 1;
789 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
794 /* Add a breakpoint. */
795 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
796 CPUBreakpoint
**breakpoint
)
800 bp
= g_malloc(sizeof(*bp
));
805 /* keep all GDB-injected breakpoints in front */
806 if (flags
& BP_GDB
) {
807 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
809 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
812 breakpoint_invalidate(cpu
, pc
);
820 /* Remove a specific breakpoint. */
821 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
825 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
826 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
827 cpu_breakpoint_remove_by_ref(cpu
, bp
);
834 /* Remove a specific breakpoint by reference. */
835 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
837 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
839 breakpoint_invalidate(cpu
, breakpoint
->pc
);
844 /* Remove all matching breakpoints. */
845 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
847 CPUBreakpoint
*bp
, *next
;
849 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
850 if (bp
->flags
& mask
) {
851 cpu_breakpoint_remove_by_ref(cpu
, bp
);
856 /* enable or disable single step mode. EXCP_DEBUG is returned by the
857 CPU loop after each instruction */
858 void cpu_single_step(CPUState
*cpu
, int enabled
)
860 if (cpu
->singlestep_enabled
!= enabled
) {
861 cpu
->singlestep_enabled
= enabled
;
863 kvm_update_guest_debug(cpu
, 0);
865 /* must flush all the translated code to avoid inconsistencies */
866 /* XXX: only flush what is necessary */
872 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
879 fprintf(stderr
, "qemu: fatal: ");
880 vfprintf(stderr
, fmt
, ap
);
881 fprintf(stderr
, "\n");
882 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
883 if (qemu_log_separate()) {
884 qemu_log("qemu: fatal: ");
885 qemu_log_vprintf(fmt
, ap2
);
887 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
894 #if defined(CONFIG_USER_ONLY)
896 struct sigaction act
;
897 sigfillset(&act
.sa_mask
);
898 act
.sa_handler
= SIG_DFL
;
899 sigaction(SIGABRT
, &act
, NULL
);
905 #if !defined(CONFIG_USER_ONLY)
906 /* Called from RCU critical section */
907 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
911 block
= atomic_rcu_read(&ram_list
.mru_block
);
912 if (block
&& addr
- block
->offset
< block
->max_length
) {
915 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
916 if (addr
- block
->offset
< block
->max_length
) {
921 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
925 /* It is safe to write mru_block outside the iothread lock. This
930 * xxx removed from list
934 * call_rcu(reclaim_ramblock, xxx);
937 * atomic_rcu_set is not needed here. The block was already published
938 * when it was placed into the list. Here we're just making an extra
939 * copy of the pointer.
941 ram_list
.mru_block
= block
;
945 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
952 end
= TARGET_PAGE_ALIGN(start
+ length
);
953 start
&= TARGET_PAGE_MASK
;
956 block
= qemu_get_ram_block(start
);
957 assert(block
== qemu_get_ram_block(end
- 1));
958 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
960 tlb_reset_dirty(cpu
, start1
, length
);
965 /* Note: start and end must be within the same ram block. */
966 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
970 unsigned long end
, page
;
977 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
978 page
= start
>> TARGET_PAGE_BITS
;
979 dirty
= bitmap_test_and_clear_atomic(ram_list
.dirty_memory
[client
],
982 if (dirty
&& tcg_enabled()) {
983 tlb_reset_dirty_range_all(start
, length
);
989 /* Called from RCU critical section */
990 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
991 MemoryRegionSection
*section
,
993 hwaddr paddr
, hwaddr xlat
,
995 target_ulong
*address
)
1000 if (memory_region_is_ram(section
->mr
)) {
1002 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1004 if (!section
->readonly
) {
1005 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1007 iotlb
|= PHYS_SECTION_ROM
;
1010 AddressSpaceDispatch
*d
;
1012 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1013 iotlb
= section
- d
->map
.sections
;
1017 /* Make accesses to pages with watchpoints go via the
1018 watchpoint trap routines. */
1019 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1020 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1021 /* Avoid trapping reads of pages with a write breakpoint. */
1022 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1023 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1024 *address
|= TLB_MMIO
;
1032 #endif /* defined(CONFIG_USER_ONLY) */
1034 #if !defined(CONFIG_USER_ONLY)
1036 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1038 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1040 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1041 qemu_anon_ram_alloc
;
1044 * Set a custom physical guest memory alloator.
1045 * Accelerators with unusual needs may need this. Hopefully, we can
1046 * get rid of it eventually.
1048 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1050 phys_mem_alloc
= alloc
;
1053 static uint16_t phys_section_add(PhysPageMap
*map
,
1054 MemoryRegionSection
*section
)
1056 /* The physical section number is ORed with a page-aligned
1057 * pointer to produce the iotlb entries. Thus it should
1058 * never overflow into the page-aligned value.
1060 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1062 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1063 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1064 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1065 map
->sections_nb_alloc
);
1067 map
->sections
[map
->sections_nb
] = *section
;
1068 memory_region_ref(section
->mr
);
1069 return map
->sections_nb
++;
1072 static void phys_section_destroy(MemoryRegion
*mr
)
1074 bool have_sub_page
= mr
->subpage
;
1076 memory_region_unref(mr
);
1078 if (have_sub_page
) {
1079 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1080 object_unref(OBJECT(&subpage
->iomem
));
1085 static void phys_sections_free(PhysPageMap
*map
)
1087 while (map
->sections_nb
> 0) {
1088 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1089 phys_section_destroy(section
->mr
);
1091 g_free(map
->sections
);
1095 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1098 hwaddr base
= section
->offset_within_address_space
1100 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1101 d
->map
.nodes
, d
->map
.sections
);
1102 MemoryRegionSection subsection
= {
1103 .offset_within_address_space
= base
,
1104 .size
= int128_make64(TARGET_PAGE_SIZE
),
1108 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1110 if (!(existing
->mr
->subpage
)) {
1111 subpage
= subpage_init(d
->as
, base
);
1112 subsection
.address_space
= d
->as
;
1113 subsection
.mr
= &subpage
->iomem
;
1114 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1115 phys_section_add(&d
->map
, &subsection
));
1117 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1119 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1120 end
= start
+ int128_get64(section
->size
) - 1;
1121 subpage_register(subpage
, start
, end
,
1122 phys_section_add(&d
->map
, section
));
1126 static void register_multipage(AddressSpaceDispatch
*d
,
1127 MemoryRegionSection
*section
)
1129 hwaddr start_addr
= section
->offset_within_address_space
;
1130 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1131 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1135 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1138 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1140 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1141 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1142 MemoryRegionSection now
= *section
, remain
= *section
;
1143 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1145 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1146 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1147 - now
.offset_within_address_space
;
1149 now
.size
= int128_min(int128_make64(left
), now
.size
);
1150 register_subpage(d
, &now
);
1152 now
.size
= int128_zero();
1154 while (int128_ne(remain
.size
, now
.size
)) {
1155 remain
.size
= int128_sub(remain
.size
, now
.size
);
1156 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1157 remain
.offset_within_region
+= int128_get64(now
.size
);
1159 if (int128_lt(remain
.size
, page_size
)) {
1160 register_subpage(d
, &now
);
1161 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1162 now
.size
= page_size
;
1163 register_subpage(d
, &now
);
1165 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1166 register_multipage(d
, &now
);
1171 void qemu_flush_coalesced_mmio_buffer(void)
1174 kvm_flush_coalesced_mmio_buffer();
1177 void qemu_mutex_lock_ramlist(void)
1179 qemu_mutex_lock(&ram_list
.mutex
);
1182 void qemu_mutex_unlock_ramlist(void)
1184 qemu_mutex_unlock(&ram_list
.mutex
);
1189 #include <sys/vfs.h>
1191 #define HUGETLBFS_MAGIC 0x958458f6
1193 static long gethugepagesize(const char *path
, Error
**errp
)
1199 ret
= statfs(path
, &fs
);
1200 } while (ret
!= 0 && errno
== EINTR
);
1203 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1211 static void *file_ram_alloc(RAMBlock
*block
,
1218 char *sanitized_name
;
1223 Error
*local_err
= NULL
;
1225 hpagesize
= gethugepagesize(path
, &local_err
);
1227 error_propagate(errp
, local_err
);
1230 block
->mr
->align
= hpagesize
;
1232 if (memory
< hpagesize
) {
1233 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1234 "or larger than huge page size 0x%" PRIx64
,
1239 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1241 "host lacks kvm mmu notifiers, -mem-path unsupported");
1245 if (!stat(path
, &st
) && S_ISDIR(st
.st_mode
)) {
1246 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1247 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1248 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1254 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1256 g_free(sanitized_name
);
1258 fd
= mkstemp(filename
);
1264 fd
= open(path
, O_RDWR
| O_CREAT
, 0644);
1268 error_setg_errno(errp
, errno
,
1269 "unable to create backing store for hugepages");
1273 memory
= ROUND_UP(memory
, hpagesize
);
1276 * ftruncate is not supported by hugetlbfs in older
1277 * hosts, so don't bother bailing out on errors.
1278 * If anything goes wrong with it under other filesystems,
1281 if (ftruncate(fd
, memory
)) {
1282 perror("ftruncate");
1285 area
= qemu_ram_mmap(fd
, memory
, hpagesize
, block
->flags
& RAM_SHARED
);
1286 if (area
== MAP_FAILED
) {
1287 error_setg_errno(errp
, errno
,
1288 "unable to map backing store for hugepages");
1294 os_mem_prealloc(fd
, area
, memory
);
1305 /* Called with the ramlist lock held. */
1306 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1308 RAMBlock
*block
, *next_block
;
1309 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1311 assert(size
!= 0); /* it would hand out same offset multiple times */
1313 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1317 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1318 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1320 end
= block
->offset
+ block
->max_length
;
1322 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1323 if (next_block
->offset
>= end
) {
1324 next
= MIN(next
, next_block
->offset
);
1327 if (next
- end
>= size
&& next
- end
< mingap
) {
1329 mingap
= next
- end
;
1333 if (offset
== RAM_ADDR_MAX
) {
1334 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1342 ram_addr_t
last_ram_offset(void)
1345 ram_addr_t last
= 0;
1348 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1349 last
= MAX(last
, block
->offset
+ block
->max_length
);
1355 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1359 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1360 if (!machine_dump_guest_core(current_machine
)) {
1361 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1363 perror("qemu_madvise");
1364 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1365 "but dump_guest_core=off specified\n");
1370 /* Called within an RCU critical section, or while the ramlist lock
1373 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1377 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1378 if (block
->offset
== addr
) {
1386 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1391 /* Called with iothread lock held. */
1392 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1394 RAMBlock
*new_block
, *block
;
1397 new_block
= find_ram_block(addr
);
1399 assert(!new_block
->idstr
[0]);
1402 char *id
= qdev_get_dev_path(dev
);
1404 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1408 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1410 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1411 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1412 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1420 /* Called with iothread lock held. */
1421 void qemu_ram_unset_idstr(ram_addr_t addr
)
1425 /* FIXME: arch_init.c assumes that this is not called throughout
1426 * migration. Ignore the problem since hot-unplug during migration
1427 * does not work anyway.
1431 block
= find_ram_block(addr
);
1433 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1438 static int memory_try_enable_merging(void *addr
, size_t len
)
1440 if (!machine_mem_merge(current_machine
)) {
1441 /* disabled by the user */
1445 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1448 /* Only legal before guest might have detected the memory size: e.g. on
1449 * incoming migration, or right after reset.
1451 * As memory core doesn't know how is memory accessed, it is up to
1452 * resize callback to update device state and/or add assertions to detect
1453 * misuse, if necessary.
1455 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1457 RAMBlock
*block
= find_ram_block(base
);
1461 newsize
= HOST_PAGE_ALIGN(newsize
);
1463 if (block
->used_length
== newsize
) {
1467 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1468 error_setg_errno(errp
, EINVAL
,
1469 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1470 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1471 newsize
, block
->used_length
);
1475 if (block
->max_length
< newsize
) {
1476 error_setg_errno(errp
, EINVAL
,
1477 "Length too large: %s: 0x" RAM_ADDR_FMT
1478 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1479 newsize
, block
->max_length
);
1483 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1484 block
->used_length
= newsize
;
1485 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1487 memory_region_set_size(block
->mr
, newsize
);
1488 if (block
->resized
) {
1489 block
->resized(block
->idstr
, newsize
, block
->host
);
1494 static ram_addr_t
ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1497 RAMBlock
*last_block
= NULL
;
1498 ram_addr_t old_ram_size
, new_ram_size
;
1500 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1502 qemu_mutex_lock_ramlist();
1503 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1505 if (!new_block
->host
) {
1506 if (xen_enabled()) {
1507 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1510 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1511 &new_block
->mr
->align
);
1512 if (!new_block
->host
) {
1513 error_setg_errno(errp
, errno
,
1514 "cannot set up guest memory '%s'",
1515 memory_region_name(new_block
->mr
));
1516 qemu_mutex_unlock_ramlist();
1519 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1523 new_ram_size
= MAX(old_ram_size
,
1524 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1525 if (new_ram_size
> old_ram_size
) {
1526 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1528 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1529 * QLIST (which has an RCU-friendly variant) does not have insertion at
1530 * tail, so save the last element in last_block.
1532 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1534 if (block
->max_length
< new_block
->max_length
) {
1539 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1540 } else if (last_block
) {
1541 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1542 } else { /* list is empty */
1543 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1545 ram_list
.mru_block
= NULL
;
1547 /* Write list before version */
1550 qemu_mutex_unlock_ramlist();
1552 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1554 if (new_ram_size
> old_ram_size
) {
1557 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1558 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1559 ram_list
.dirty_memory
[i
] =
1560 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1561 old_ram_size
, new_ram_size
);
1564 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1565 new_block
->used_length
,
1568 if (new_block
->host
) {
1569 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1570 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1571 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1572 if (kvm_enabled()) {
1573 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1577 return new_block
->offset
;
1581 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1582 bool share
, const char *mem_path
,
1585 RAMBlock
*new_block
;
1587 Error
*local_err
= NULL
;
1589 if (xen_enabled()) {
1590 error_setg(errp
, "-mem-path not supported with Xen");
1594 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1596 * file_ram_alloc() needs to allocate just like
1597 * phys_mem_alloc, but we haven't bothered to provide
1601 "-mem-path not supported with this accelerator");
1605 size
= HOST_PAGE_ALIGN(size
);
1606 new_block
= g_malloc0(sizeof(*new_block
));
1608 new_block
->used_length
= size
;
1609 new_block
->max_length
= size
;
1610 new_block
->flags
= share
? RAM_SHARED
: 0;
1611 new_block
->host
= file_ram_alloc(new_block
, size
,
1613 if (!new_block
->host
) {
1618 addr
= ram_block_add(new_block
, &local_err
);
1621 error_propagate(errp
, local_err
);
1629 ram_addr_t
qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1630 void (*resized
)(const char*,
1633 void *host
, bool resizeable
,
1634 MemoryRegion
*mr
, Error
**errp
)
1636 RAMBlock
*new_block
;
1638 Error
*local_err
= NULL
;
1640 size
= HOST_PAGE_ALIGN(size
);
1641 max_size
= HOST_PAGE_ALIGN(max_size
);
1642 new_block
= g_malloc0(sizeof(*new_block
));
1644 new_block
->resized
= resized
;
1645 new_block
->used_length
= size
;
1646 new_block
->max_length
= max_size
;
1647 assert(max_size
>= size
);
1649 new_block
->host
= host
;
1651 new_block
->flags
|= RAM_PREALLOC
;
1654 new_block
->flags
|= RAM_RESIZEABLE
;
1656 addr
= ram_block_add(new_block
, &local_err
);
1659 error_propagate(errp
, local_err
);
1665 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1666 MemoryRegion
*mr
, Error
**errp
)
1668 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1671 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1673 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1676 ram_addr_t
qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1677 void (*resized
)(const char*,
1680 MemoryRegion
*mr
, Error
**errp
)
1682 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1685 static void reclaim_ramblock(RAMBlock
*block
)
1687 if (block
->flags
& RAM_PREALLOC
) {
1689 } else if (xen_enabled()) {
1690 xen_invalidate_map_cache_entry(block
->host
);
1692 } else if (block
->fd
>= 0) {
1693 qemu_ram_munmap(block
->host
, block
->max_length
);
1697 qemu_anon_ram_free(block
->host
, block
->max_length
);
1702 void qemu_ram_free(ram_addr_t addr
)
1706 qemu_mutex_lock_ramlist();
1707 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1708 if (addr
== block
->offset
) {
1709 QLIST_REMOVE_RCU(block
, next
);
1710 ram_list
.mru_block
= NULL
;
1711 /* Write list before version */
1714 call_rcu(block
, reclaim_ramblock
, rcu
);
1718 qemu_mutex_unlock_ramlist();
1722 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1729 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1730 offset
= addr
- block
->offset
;
1731 if (offset
< block
->max_length
) {
1732 vaddr
= ramblock_ptr(block
, offset
);
1733 if (block
->flags
& RAM_PREALLOC
) {
1735 } else if (xen_enabled()) {
1739 if (block
->fd
>= 0) {
1740 flags
|= (block
->flags
& RAM_SHARED
?
1741 MAP_SHARED
: MAP_PRIVATE
);
1742 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1743 flags
, block
->fd
, offset
);
1746 * Remap needs to match alloc. Accelerators that
1747 * set phys_mem_alloc never remap. If they did,
1748 * we'd need a remap hook here.
1750 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1752 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1753 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1756 if (area
!= vaddr
) {
1757 fprintf(stderr
, "Could not remap addr: "
1758 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1762 memory_try_enable_merging(vaddr
, length
);
1763 qemu_ram_setup_dump(vaddr
, length
);
1768 #endif /* !_WIN32 */
1770 int qemu_get_ram_fd(ram_addr_t addr
)
1776 block
= qemu_get_ram_block(addr
);
1782 void qemu_set_ram_fd(ram_addr_t addr
, int fd
)
1787 block
= qemu_get_ram_block(addr
);
1792 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1798 block
= qemu_get_ram_block(addr
);
1799 ptr
= ramblock_ptr(block
, 0);
1804 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1805 * This should not be used for general purpose DMA. Use address_space_map
1806 * or address_space_rw instead. For local memory (e.g. video ram) that the
1807 * device owns, use memory_region_get_ram_ptr.
1809 * Called within RCU critical section.
1811 void *qemu_get_ram_ptr(ram_addr_t addr
)
1813 RAMBlock
*block
= qemu_get_ram_block(addr
);
1815 if (xen_enabled() && block
->host
== NULL
) {
1816 /* We need to check if the requested address is in the RAM
1817 * because we don't want to map the entire memory in QEMU.
1818 * In that case just map until the end of the page.
1820 if (block
->offset
== 0) {
1821 return xen_map_cache(addr
, 0, 0);
1824 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1826 return ramblock_ptr(block
, addr
- block
->offset
);
1829 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1830 * but takes a size argument.
1832 * Called within RCU critical section.
1834 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1837 ram_addr_t offset_inside_block
;
1842 block
= qemu_get_ram_block(addr
);
1843 offset_inside_block
= addr
- block
->offset
;
1844 *size
= MIN(*size
, block
->max_length
- offset_inside_block
);
1846 if (xen_enabled() && block
->host
== NULL
) {
1847 /* We need to check if the requested address is in the RAM
1848 * because we don't want to map the entire memory in QEMU.
1849 * In that case just map the requested area.
1851 if (block
->offset
== 0) {
1852 return xen_map_cache(addr
, *size
, 1);
1855 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1858 return ramblock_ptr(block
, offset_inside_block
);
1862 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1865 * ptr: Host pointer to look up
1866 * round_offset: If true round the result offset down to a page boundary
1867 * *ram_addr: set to result ram_addr
1868 * *offset: set to result offset within the RAMBlock
1870 * Returns: RAMBlock (or NULL if not found)
1872 * By the time this function returns, the returned pointer is not protected
1873 * by RCU anymore. If the caller is not within an RCU critical section and
1874 * does not hold the iothread lock, it must have other means of protecting the
1875 * pointer, such as a reference to the region that includes the incoming
1878 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1879 ram_addr_t
*ram_addr
,
1883 uint8_t *host
= ptr
;
1885 if (xen_enabled()) {
1887 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1888 block
= qemu_get_ram_block(*ram_addr
);
1890 *offset
= (host
- block
->host
);
1897 block
= atomic_rcu_read(&ram_list
.mru_block
);
1898 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1902 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1903 /* This case append when the block is not mapped. */
1904 if (block
->host
== NULL
) {
1907 if (host
- block
->host
< block
->max_length
) {
1916 *offset
= (host
- block
->host
);
1918 *offset
&= TARGET_PAGE_MASK
;
1920 *ram_addr
= block
->offset
+ *offset
;
1926 * Finds the named RAMBlock
1928 * name: The name of RAMBlock to find
1930 * Returns: RAMBlock (or NULL if not found)
1932 RAMBlock
*qemu_ram_block_by_name(const char *name
)
1936 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1937 if (!strcmp(name
, block
->idstr
)) {
1945 /* Some of the softmmu routines need to translate from a host pointer
1946 (typically a TLB entry) back to a ram offset. */
1947 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1950 ram_addr_t offset
; /* Not used */
1952 block
= qemu_ram_block_from_host(ptr
, false, ram_addr
, &offset
);
1961 /* Called within RCU critical section. */
1962 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1963 uint64_t val
, unsigned size
)
1965 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1966 tb_invalidate_phys_page_fast(ram_addr
, size
);
1970 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1973 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1976 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1981 /* Set both VGA and migration bits for simplicity and to remove
1982 * the notdirty callback faster.
1984 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
1985 DIRTY_CLIENTS_NOCODE
);
1986 /* we remove the notdirty callback only if the code has been
1988 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1989 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
1993 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1994 unsigned size
, bool is_write
)
1999 static const MemoryRegionOps notdirty_mem_ops
= {
2000 .write
= notdirty_mem_write
,
2001 .valid
.accepts
= notdirty_mem_accepts
,
2002 .endianness
= DEVICE_NATIVE_ENDIAN
,
2005 /* Generate a debug exception if a watchpoint has been hit. */
2006 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2008 CPUState
*cpu
= current_cpu
;
2009 CPUArchState
*env
= cpu
->env_ptr
;
2010 target_ulong pc
, cs_base
;
2015 if (cpu
->watchpoint_hit
) {
2016 /* We re-entered the check after replacing the TB. Now raise
2017 * the debug interrupt so that is will trigger after the
2018 * current instruction. */
2019 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2022 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2023 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2024 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2025 && (wp
->flags
& flags
)) {
2026 if (flags
== BP_MEM_READ
) {
2027 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2029 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2031 wp
->hitaddr
= vaddr
;
2032 wp
->hitattrs
= attrs
;
2033 if (!cpu
->watchpoint_hit
) {
2034 cpu
->watchpoint_hit
= wp
;
2035 tb_check_watchpoint(cpu
);
2036 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2037 cpu
->exception_index
= EXCP_DEBUG
;
2040 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2041 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2042 cpu_resume_from_signal(cpu
, NULL
);
2046 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2051 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2052 so these check for a hit then pass through to the normal out-of-line
2054 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2055 unsigned size
, MemTxAttrs attrs
)
2059 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2060 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2062 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2065 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2068 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2071 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2079 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2080 uint64_t val
, unsigned size
,
2084 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2085 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2087 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2090 address_space_stb(as
, addr
, val
, attrs
, &res
);
2093 address_space_stw(as
, addr
, val
, attrs
, &res
);
2096 address_space_stl(as
, addr
, val
, attrs
, &res
);
2103 static const MemoryRegionOps watch_mem_ops
= {
2104 .read_with_attrs
= watch_mem_read
,
2105 .write_with_attrs
= watch_mem_write
,
2106 .endianness
= DEVICE_NATIVE_ENDIAN
,
2109 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2110 unsigned len
, MemTxAttrs attrs
)
2112 subpage_t
*subpage
= opaque
;
2116 #if defined(DEBUG_SUBPAGE)
2117 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2118 subpage
, len
, addr
);
2120 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2127 *data
= ldub_p(buf
);
2130 *data
= lduw_p(buf
);
2143 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2144 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2146 subpage_t
*subpage
= opaque
;
2149 #if defined(DEBUG_SUBPAGE)
2150 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2151 " value %"PRIx64
"\n",
2152 __func__
, subpage
, len
, addr
, value
);
2170 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2174 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2175 unsigned len
, bool is_write
)
2177 subpage_t
*subpage
= opaque
;
2178 #if defined(DEBUG_SUBPAGE)
2179 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2180 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2183 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2187 static const MemoryRegionOps subpage_ops
= {
2188 .read_with_attrs
= subpage_read
,
2189 .write_with_attrs
= subpage_write
,
2190 .impl
.min_access_size
= 1,
2191 .impl
.max_access_size
= 8,
2192 .valid
.min_access_size
= 1,
2193 .valid
.max_access_size
= 8,
2194 .valid
.accepts
= subpage_accepts
,
2195 .endianness
= DEVICE_NATIVE_ENDIAN
,
2198 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2203 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2205 idx
= SUBPAGE_IDX(start
);
2206 eidx
= SUBPAGE_IDX(end
);
2207 #if defined(DEBUG_SUBPAGE)
2208 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2209 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2211 for (; idx
<= eidx
; idx
++) {
2212 mmio
->sub_section
[idx
] = section
;
2218 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2222 mmio
= g_malloc0(sizeof(subpage_t
));
2226 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2227 NULL
, TARGET_PAGE_SIZE
);
2228 mmio
->iomem
.subpage
= true;
2229 #if defined(DEBUG_SUBPAGE)
2230 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2231 mmio
, base
, TARGET_PAGE_SIZE
);
2233 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2238 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2242 MemoryRegionSection section
= {
2243 .address_space
= as
,
2245 .offset_within_address_space
= 0,
2246 .offset_within_region
= 0,
2247 .size
= int128_2_64(),
2250 return phys_section_add(map
, §ion
);
2253 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2255 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2256 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2257 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2258 MemoryRegionSection
*sections
= d
->map
.sections
;
2260 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2263 static void io_mem_init(void)
2265 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2266 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2268 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2270 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2274 static void mem_begin(MemoryListener
*listener
)
2276 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2277 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2280 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2281 assert(n
== PHYS_SECTION_UNASSIGNED
);
2282 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2283 assert(n
== PHYS_SECTION_NOTDIRTY
);
2284 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2285 assert(n
== PHYS_SECTION_ROM
);
2286 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2287 assert(n
== PHYS_SECTION_WATCH
);
2289 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2291 as
->next_dispatch
= d
;
2294 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2296 phys_sections_free(&d
->map
);
2300 static void mem_commit(MemoryListener
*listener
)
2302 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2303 AddressSpaceDispatch
*cur
= as
->dispatch
;
2304 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2306 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2308 atomic_rcu_set(&as
->dispatch
, next
);
2310 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2314 static void tcg_commit(MemoryListener
*listener
)
2316 CPUAddressSpace
*cpuas
;
2317 AddressSpaceDispatch
*d
;
2319 /* since each CPU stores ram addresses in its TLB cache, we must
2320 reset the modified entries */
2321 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2322 cpu_reloading_memory_map();
2323 /* The CPU and TLB are protected by the iothread lock.
2324 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2325 * may have split the RCU critical section.
2327 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2328 cpuas
->memory_dispatch
= d
;
2329 tlb_flush(cpuas
->cpu
, 1);
2332 void address_space_init_dispatch(AddressSpace
*as
)
2334 as
->dispatch
= NULL
;
2335 as
->dispatch_listener
= (MemoryListener
) {
2337 .commit
= mem_commit
,
2338 .region_add
= mem_add
,
2339 .region_nop
= mem_add
,
2342 memory_listener_register(&as
->dispatch_listener
, as
);
2345 void address_space_unregister(AddressSpace
*as
)
2347 memory_listener_unregister(&as
->dispatch_listener
);
2350 void address_space_destroy_dispatch(AddressSpace
*as
)
2352 AddressSpaceDispatch
*d
= as
->dispatch
;
2354 atomic_rcu_set(&as
->dispatch
, NULL
);
2356 call_rcu(d
, address_space_dispatch_free
, rcu
);
2360 static void memory_map_init(void)
2362 system_memory
= g_malloc(sizeof(*system_memory
));
2364 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2365 address_space_init(&address_space_memory
, system_memory
, "memory");
2367 system_io
= g_malloc(sizeof(*system_io
));
2368 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2370 address_space_init(&address_space_io
, system_io
, "I/O");
2373 MemoryRegion
*get_system_memory(void)
2375 return system_memory
;
2378 MemoryRegion
*get_system_io(void)
2383 #endif /* !defined(CONFIG_USER_ONLY) */
2385 /* physical memory access (slow version, mainly for debug) */
2386 #if defined(CONFIG_USER_ONLY)
2387 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2388 uint8_t *buf
, int len
, int is_write
)
2395 page
= addr
& TARGET_PAGE_MASK
;
2396 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2399 flags
= page_get_flags(page
);
2400 if (!(flags
& PAGE_VALID
))
2403 if (!(flags
& PAGE_WRITE
))
2405 /* XXX: this code should not depend on lock_user */
2406 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2409 unlock_user(p
, addr
, l
);
2411 if (!(flags
& PAGE_READ
))
2413 /* XXX: this code should not depend on lock_user */
2414 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2417 unlock_user(p
, addr
, 0);
2428 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2431 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2432 /* No early return if dirty_log_mask is or becomes 0, because
2433 * cpu_physical_memory_set_dirty_range will still call
2434 * xen_modified_memory.
2436 if (dirty_log_mask
) {
2438 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2440 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2441 tb_invalidate_phys_range(addr
, addr
+ length
);
2442 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2444 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2447 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2449 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2451 /* Regions are assumed to support 1-4 byte accesses unless
2452 otherwise specified. */
2453 if (access_size_max
== 0) {
2454 access_size_max
= 4;
2457 /* Bound the maximum access by the alignment of the address. */
2458 if (!mr
->ops
->impl
.unaligned
) {
2459 unsigned align_size_max
= addr
& -addr
;
2460 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2461 access_size_max
= align_size_max
;
2465 /* Don't attempt accesses larger than the maximum. */
2466 if (l
> access_size_max
) {
2467 l
= access_size_max
;
2474 static bool prepare_mmio_access(MemoryRegion
*mr
)
2476 bool unlocked
= !qemu_mutex_iothread_locked();
2477 bool release_lock
= false;
2479 if (unlocked
&& mr
->global_locking
) {
2480 qemu_mutex_lock_iothread();
2482 release_lock
= true;
2484 if (mr
->flush_coalesced_mmio
) {
2486 qemu_mutex_lock_iothread();
2488 qemu_flush_coalesced_mmio_buffer();
2490 qemu_mutex_unlock_iothread();
2494 return release_lock
;
2497 /* Called within RCU critical section. */
2498 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2501 int len
, hwaddr addr1
,
2502 hwaddr l
, MemoryRegion
*mr
)
2506 MemTxResult result
= MEMTX_OK
;
2507 bool release_lock
= false;
2510 if (!memory_access_is_direct(mr
, true)) {
2511 release_lock
|= prepare_mmio_access(mr
);
2512 l
= memory_access_size(mr
, l
, addr1
);
2513 /* XXX: could force current_cpu to NULL to avoid
2517 /* 64 bit write access */
2519 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2523 /* 32 bit write access */
2525 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2529 /* 16 bit write access */
2531 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2535 /* 8 bit write access */
2537 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2544 addr1
+= memory_region_get_ram_addr(mr
);
2546 ptr
= qemu_get_ram_ptr(addr1
);
2547 memcpy(ptr
, buf
, l
);
2548 invalidate_and_set_dirty(mr
, addr1
, l
);
2552 qemu_mutex_unlock_iothread();
2553 release_lock
= false;
2565 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2571 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2572 const uint8_t *buf
, int len
)
2577 MemTxResult result
= MEMTX_OK
;
2582 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2583 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2591 /* Called within RCU critical section. */
2592 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2593 MemTxAttrs attrs
, uint8_t *buf
,
2594 int len
, hwaddr addr1
, hwaddr l
,
2599 MemTxResult result
= MEMTX_OK
;
2600 bool release_lock
= false;
2603 if (!memory_access_is_direct(mr
, false)) {
2605 release_lock
|= prepare_mmio_access(mr
);
2606 l
= memory_access_size(mr
, l
, addr1
);
2609 /* 64 bit read access */
2610 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2615 /* 32 bit read access */
2616 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2621 /* 16 bit read access */
2622 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2627 /* 8 bit read access */
2628 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2637 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2638 memcpy(buf
, ptr
, l
);
2642 qemu_mutex_unlock_iothread();
2643 release_lock
= false;
2655 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2661 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2662 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2667 MemTxResult result
= MEMTX_OK
;
2672 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2673 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2681 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2682 uint8_t *buf
, int len
, bool is_write
)
2685 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2687 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2691 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2692 int len
, int is_write
)
2694 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2695 buf
, len
, is_write
);
2698 enum write_rom_type
{
2703 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2704 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2714 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2716 if (!(memory_region_is_ram(mr
) ||
2717 memory_region_is_romd(mr
))) {
2718 l
= memory_access_size(mr
, l
, addr1
);
2720 addr1
+= memory_region_get_ram_addr(mr
);
2722 ptr
= qemu_get_ram_ptr(addr1
);
2725 memcpy(ptr
, buf
, l
);
2726 invalidate_and_set_dirty(mr
, addr1
, l
);
2729 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2740 /* used for ROM loading : can write in RAM and ROM */
2741 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2742 const uint8_t *buf
, int len
)
2744 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2747 void cpu_flush_icache_range(hwaddr start
, int len
)
2750 * This function should do the same thing as an icache flush that was
2751 * triggered from within the guest. For TCG we are always cache coherent,
2752 * so there is no need to flush anything. For KVM / Xen we need to flush
2753 * the host's instruction cache at least.
2755 if (tcg_enabled()) {
2759 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2760 start
, NULL
, len
, FLUSH_CACHE
);
2771 static BounceBuffer bounce
;
2773 typedef struct MapClient
{
2775 QLIST_ENTRY(MapClient
) link
;
2778 QemuMutex map_client_list_lock
;
2779 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2780 = QLIST_HEAD_INITIALIZER(map_client_list
);
2782 static void cpu_unregister_map_client_do(MapClient
*client
)
2784 QLIST_REMOVE(client
, link
);
2788 static void cpu_notify_map_clients_locked(void)
2792 while (!QLIST_EMPTY(&map_client_list
)) {
2793 client
= QLIST_FIRST(&map_client_list
);
2794 qemu_bh_schedule(client
->bh
);
2795 cpu_unregister_map_client_do(client
);
2799 void cpu_register_map_client(QEMUBH
*bh
)
2801 MapClient
*client
= g_malloc(sizeof(*client
));
2803 qemu_mutex_lock(&map_client_list_lock
);
2805 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2806 if (!atomic_read(&bounce
.in_use
)) {
2807 cpu_notify_map_clients_locked();
2809 qemu_mutex_unlock(&map_client_list_lock
);
2812 void cpu_exec_init_all(void)
2814 qemu_mutex_init(&ram_list
.mutex
);
2817 qemu_mutex_init(&map_client_list_lock
);
2820 void cpu_unregister_map_client(QEMUBH
*bh
)
2824 qemu_mutex_lock(&map_client_list_lock
);
2825 QLIST_FOREACH(client
, &map_client_list
, link
) {
2826 if (client
->bh
== bh
) {
2827 cpu_unregister_map_client_do(client
);
2831 qemu_mutex_unlock(&map_client_list_lock
);
2834 static void cpu_notify_map_clients(void)
2836 qemu_mutex_lock(&map_client_list_lock
);
2837 cpu_notify_map_clients_locked();
2838 qemu_mutex_unlock(&map_client_list_lock
);
2841 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2849 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2850 if (!memory_access_is_direct(mr
, is_write
)) {
2851 l
= memory_access_size(mr
, l
, addr
);
2852 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2864 /* Map a physical memory region into a host virtual address.
2865 * May map a subset of the requested range, given by and returned in *plen.
2866 * May return NULL if resources needed to perform the mapping are exhausted.
2867 * Use only for reads OR writes - not for read-modify-write operations.
2868 * Use cpu_register_map_client() to know when retrying the map operation is
2869 * likely to succeed.
2871 void *address_space_map(AddressSpace
*as
,
2878 hwaddr l
, xlat
, base
;
2879 MemoryRegion
*mr
, *this_mr
;
2889 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2891 if (!memory_access_is_direct(mr
, is_write
)) {
2892 if (atomic_xchg(&bounce
.in_use
, true)) {
2896 /* Avoid unbounded allocations */
2897 l
= MIN(l
, TARGET_PAGE_SIZE
);
2898 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2902 memory_region_ref(mr
);
2905 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2911 return bounce
.buffer
;
2915 raddr
= memory_region_get_ram_addr(mr
);
2926 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2927 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2932 memory_region_ref(mr
);
2934 ptr
= qemu_ram_ptr_length(raddr
+ base
, plen
);
2940 /* Unmaps a memory region previously mapped by address_space_map().
2941 * Will also mark the memory as dirty if is_write == 1. access_len gives
2942 * the amount of memory that was actually read or written by the caller.
2944 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2945 int is_write
, hwaddr access_len
)
2947 if (buffer
!= bounce
.buffer
) {
2951 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2954 invalidate_and_set_dirty(mr
, addr1
, access_len
);
2956 if (xen_enabled()) {
2957 xen_invalidate_map_cache_entry(buffer
);
2959 memory_region_unref(mr
);
2963 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
2964 bounce
.buffer
, access_len
);
2966 qemu_vfree(bounce
.buffer
);
2967 bounce
.buffer
= NULL
;
2968 memory_region_unref(bounce
.mr
);
2969 atomic_mb_set(&bounce
.in_use
, false);
2970 cpu_notify_map_clients();
2973 void *cpu_physical_memory_map(hwaddr addr
,
2977 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2980 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2981 int is_write
, hwaddr access_len
)
2983 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2986 /* warning: addr must be aligned */
2987 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
2989 MemTxResult
*result
,
2990 enum device_endian endian
)
2998 bool release_lock
= false;
3001 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
3002 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
3003 release_lock
|= prepare_mmio_access(mr
);
3006 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
3007 #if defined(TARGET_WORDS_BIGENDIAN)
3008 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3012 if (endian
== DEVICE_BIG_ENDIAN
) {
3018 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3022 case DEVICE_LITTLE_ENDIAN
:
3023 val
= ldl_le_p(ptr
);
3025 case DEVICE_BIG_ENDIAN
:
3026 val
= ldl_be_p(ptr
);
3038 qemu_mutex_unlock_iothread();
3044 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3045 MemTxAttrs attrs
, MemTxResult
*result
)
3047 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3048 DEVICE_NATIVE_ENDIAN
);
3051 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3052 MemTxAttrs attrs
, MemTxResult
*result
)
3054 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3055 DEVICE_LITTLE_ENDIAN
);
3058 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3059 MemTxAttrs attrs
, MemTxResult
*result
)
3061 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3065 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3067 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3070 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3072 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3075 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3077 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3080 /* warning: addr must be aligned */
3081 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3083 MemTxResult
*result
,
3084 enum device_endian endian
)
3092 bool release_lock
= false;
3095 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3097 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3098 release_lock
|= prepare_mmio_access(mr
);
3101 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3102 #if defined(TARGET_WORDS_BIGENDIAN)
3103 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3107 if (endian
== DEVICE_BIG_ENDIAN
) {
3113 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3117 case DEVICE_LITTLE_ENDIAN
:
3118 val
= ldq_le_p(ptr
);
3120 case DEVICE_BIG_ENDIAN
:
3121 val
= ldq_be_p(ptr
);
3133 qemu_mutex_unlock_iothread();
3139 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3140 MemTxAttrs attrs
, MemTxResult
*result
)
3142 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3143 DEVICE_NATIVE_ENDIAN
);
3146 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3147 MemTxAttrs attrs
, MemTxResult
*result
)
3149 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3150 DEVICE_LITTLE_ENDIAN
);
3153 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3154 MemTxAttrs attrs
, MemTxResult
*result
)
3156 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3160 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3162 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3165 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3167 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3170 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3172 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3176 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3177 MemTxAttrs attrs
, MemTxResult
*result
)
3182 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3189 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3191 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3194 /* warning: addr must be aligned */
3195 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3198 MemTxResult
*result
,
3199 enum device_endian endian
)
3207 bool release_lock
= false;
3210 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3212 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3213 release_lock
|= prepare_mmio_access(mr
);
3216 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3217 #if defined(TARGET_WORDS_BIGENDIAN)
3218 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3222 if (endian
== DEVICE_BIG_ENDIAN
) {
3228 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3232 case DEVICE_LITTLE_ENDIAN
:
3233 val
= lduw_le_p(ptr
);
3235 case DEVICE_BIG_ENDIAN
:
3236 val
= lduw_be_p(ptr
);
3248 qemu_mutex_unlock_iothread();
3254 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3255 MemTxAttrs attrs
, MemTxResult
*result
)
3257 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3258 DEVICE_NATIVE_ENDIAN
);
3261 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3262 MemTxAttrs attrs
, MemTxResult
*result
)
3264 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3265 DEVICE_LITTLE_ENDIAN
);
3268 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3269 MemTxAttrs attrs
, MemTxResult
*result
)
3271 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3275 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3277 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3280 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3282 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3285 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3287 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3290 /* warning: addr must be aligned. The ram page is not masked as dirty
3291 and the code inside is not invalidated. It is useful if the dirty
3292 bits are used to track modified PTEs */
3293 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3294 MemTxAttrs attrs
, MemTxResult
*result
)
3301 uint8_t dirty_log_mask
;
3302 bool release_lock
= false;
3305 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3307 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3308 release_lock
|= prepare_mmio_access(mr
);
3310 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3312 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3313 ptr
= qemu_get_ram_ptr(addr1
);
3316 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3317 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3318 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3325 qemu_mutex_unlock_iothread();
3330 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3332 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3335 /* warning: addr must be aligned */
3336 static inline void address_space_stl_internal(AddressSpace
*as
,
3337 hwaddr addr
, uint32_t val
,
3339 MemTxResult
*result
,
3340 enum device_endian endian
)
3347 bool release_lock
= false;
3350 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3352 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3353 release_lock
|= prepare_mmio_access(mr
);
3355 #if defined(TARGET_WORDS_BIGENDIAN)
3356 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3360 if (endian
== DEVICE_BIG_ENDIAN
) {
3364 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3367 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3368 ptr
= qemu_get_ram_ptr(addr1
);
3370 case DEVICE_LITTLE_ENDIAN
:
3373 case DEVICE_BIG_ENDIAN
:
3380 invalidate_and_set_dirty(mr
, addr1
, 4);
3387 qemu_mutex_unlock_iothread();
3392 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3393 MemTxAttrs attrs
, MemTxResult
*result
)
3395 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3396 DEVICE_NATIVE_ENDIAN
);
3399 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3400 MemTxAttrs attrs
, MemTxResult
*result
)
3402 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3403 DEVICE_LITTLE_ENDIAN
);
3406 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3407 MemTxAttrs attrs
, MemTxResult
*result
)
3409 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3413 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3415 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3418 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3420 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3423 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3425 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3429 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3430 MemTxAttrs attrs
, MemTxResult
*result
)
3435 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3441 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3443 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3446 /* warning: addr must be aligned */
3447 static inline void address_space_stw_internal(AddressSpace
*as
,
3448 hwaddr addr
, uint32_t val
,
3450 MemTxResult
*result
,
3451 enum device_endian endian
)
3458 bool release_lock
= false;
3461 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3462 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3463 release_lock
|= prepare_mmio_access(mr
);
3465 #if defined(TARGET_WORDS_BIGENDIAN)
3466 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3470 if (endian
== DEVICE_BIG_ENDIAN
) {
3474 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3477 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3478 ptr
= qemu_get_ram_ptr(addr1
);
3480 case DEVICE_LITTLE_ENDIAN
:
3483 case DEVICE_BIG_ENDIAN
:
3490 invalidate_and_set_dirty(mr
, addr1
, 2);
3497 qemu_mutex_unlock_iothread();
3502 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3503 MemTxAttrs attrs
, MemTxResult
*result
)
3505 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3506 DEVICE_NATIVE_ENDIAN
);
3509 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3510 MemTxAttrs attrs
, MemTxResult
*result
)
3512 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3513 DEVICE_LITTLE_ENDIAN
);
3516 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3517 MemTxAttrs attrs
, MemTxResult
*result
)
3519 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3523 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3525 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3528 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3530 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3533 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3535 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3539 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3540 MemTxAttrs attrs
, MemTxResult
*result
)
3544 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3550 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3551 MemTxAttrs attrs
, MemTxResult
*result
)
3554 val
= cpu_to_le64(val
);
3555 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3560 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3561 MemTxAttrs attrs
, MemTxResult
*result
)
3564 val
= cpu_to_be64(val
);
3565 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3571 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3573 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3576 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3578 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3581 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3583 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3586 /* virtual memory access for debug (includes writing to ROM) */
3587 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3588 uint8_t *buf
, int len
, int is_write
)
3598 page
= addr
& TARGET_PAGE_MASK
;
3599 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3600 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3601 /* if no physical page mapped, return an error */
3602 if (phys_addr
== -1)
3604 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3607 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3609 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3612 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3613 MEMTXATTRS_UNSPECIFIED
,
3624 * Allows code that needs to deal with migration bitmaps etc to still be built
3625 * target independent.
3627 size_t qemu_target_page_bits(void)
3629 return TARGET_PAGE_BITS
;
3635 * A helper function for the _utterly broken_ virtio device model to find out if
3636 * it's running on a big endian machine. Don't do this at home kids!
3638 bool target_words_bigendian(void);
3639 bool target_words_bigendian(void)
3641 #if defined(TARGET_WORDS_BIGENDIAN)
3648 #ifndef CONFIG_USER_ONLY
3649 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3656 mr
= address_space_translate(&address_space_memory
,
3657 phys_addr
, &phys_addr
, &l
, false);
3659 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3664 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3670 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3671 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3672 block
->used_length
, opaque
);