4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
33 #include "qemu/osdep.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/xen/xen.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "exec/memory.h"
41 #include "sysemu/dma.h"
42 #include "exec/address-spaces.h"
43 #if defined(CONFIG_USER_ONLY)
45 #else /* !CONFIG_USER_ONLY */
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "translate-all.h"
53 #include "sysemu/replay.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
58 #include "qemu/range.h"
60 #include "qemu/mmap-alloc.h"
63 //#define DEBUG_SUBPAGE
65 #if !defined(CONFIG_USER_ONLY)
66 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
69 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
71 static MemoryRegion
*system_memory
;
72 static MemoryRegion
*system_io
;
74 AddressSpace address_space_io
;
75 AddressSpace address_space_memory
;
77 MemoryRegion io_mem_rom
, io_mem_notdirty
;
78 static MemoryRegion io_mem_unassigned
;
80 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81 #define RAM_PREALLOC (1 << 0)
83 /* RAM is mmap-ed with MAP_SHARED */
84 #define RAM_SHARED (1 << 1)
86 /* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
89 #define RAM_RESIZEABLE (1 << 2)
93 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
94 /* current CPU in the current thread. It is only valid inside
96 __thread CPUState
*current_cpu
;
97 /* 0 = Do not count executed instructions.
98 1 = Precise instruction counting.
99 2 = Adaptive rate instruction counting. */
102 #if !defined(CONFIG_USER_ONLY)
104 typedef struct PhysPageEntry PhysPageEntry
;
106 struct PhysPageEntry
{
107 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
109 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
113 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
115 /* Size of the L2 (and L3, etc) page tables. */
116 #define ADDR_SPACE_BITS 64
119 #define P_L2_SIZE (1 << P_L2_BITS)
121 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
123 typedef PhysPageEntry Node
[P_L2_SIZE
];
125 typedef struct PhysPageMap
{
128 unsigned sections_nb
;
129 unsigned sections_nb_alloc
;
131 unsigned nodes_nb_alloc
;
133 MemoryRegionSection
*sections
;
136 struct AddressSpaceDispatch
{
139 /* This is a multi-level map on the physical address space.
140 * The bottom level has pointers to MemoryRegionSections.
142 PhysPageEntry phys_map
;
147 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
148 typedef struct subpage_t
{
152 uint16_t sub_section
[TARGET_PAGE_SIZE
];
155 #define PHYS_SECTION_UNASSIGNED 0
156 #define PHYS_SECTION_NOTDIRTY 1
157 #define PHYS_SECTION_ROM 2
158 #define PHYS_SECTION_WATCH 3
160 static void io_mem_init(void);
161 static void memory_map_init(void);
162 static void tcg_commit(MemoryListener
*listener
);
164 static MemoryRegion io_mem_watch
;
167 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
168 * @cpu: the CPU whose AddressSpace this is
169 * @as: the AddressSpace itself
170 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
171 * @tcg_as_listener: listener for tracking changes to the AddressSpace
173 struct CPUAddressSpace
{
176 struct AddressSpaceDispatch
*memory_dispatch
;
177 MemoryListener tcg_as_listener
;
182 #if !defined(CONFIG_USER_ONLY)
184 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
186 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
187 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
188 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
189 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
193 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
200 ret
= map
->nodes_nb
++;
202 assert(ret
!= PHYS_MAP_NODE_NIL
);
203 assert(ret
!= map
->nodes_nb_alloc
);
205 e
.skip
= leaf
? 0 : 1;
206 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
207 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
208 memcpy(&p
[i
], &e
, sizeof(e
));
213 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
214 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
218 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
220 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
221 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
223 p
= map
->nodes
[lp
->ptr
];
224 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
226 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
227 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
233 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
239 static void phys_page_set(AddressSpaceDispatch
*d
,
240 hwaddr index
, hwaddr nb
,
243 /* Wildly overreserve - it doesn't matter much. */
244 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
246 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
249 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
250 * and update our entry so we can skip it and go directly to the destination.
252 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
254 unsigned valid_ptr
= P_L2_SIZE
;
259 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
264 for (i
= 0; i
< P_L2_SIZE
; i
++) {
265 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
272 phys_page_compact(&p
[i
], nodes
, compacted
);
276 /* We can only compress if there's only one child. */
281 assert(valid_ptr
< P_L2_SIZE
);
283 /* Don't compress if it won't fit in the # of bits we have. */
284 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
288 lp
->ptr
= p
[valid_ptr
].ptr
;
289 if (!p
[valid_ptr
].skip
) {
290 /* If our only child is a leaf, make this a leaf. */
291 /* By design, we should have made this node a leaf to begin with so we
292 * should never reach here.
293 * But since it's so simple to handle this, let's do it just in case we
298 lp
->skip
+= p
[valid_ptr
].skip
;
302 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
304 DECLARE_BITMAP(compacted
, nodes_nb
);
306 if (d
->phys_map
.skip
) {
307 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
311 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
312 Node
*nodes
, MemoryRegionSection
*sections
)
315 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
318 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
319 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
320 return §ions
[PHYS_SECTION_UNASSIGNED
];
323 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
326 if (sections
[lp
.ptr
].size
.hi
||
327 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
328 sections
[lp
.ptr
].size
.lo
, addr
)) {
329 return §ions
[lp
.ptr
];
331 return §ions
[PHYS_SECTION_UNASSIGNED
];
335 bool memory_region_is_unassigned(MemoryRegion
*mr
)
337 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
338 && mr
!= &io_mem_watch
;
341 /* Called from RCU critical section */
342 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
344 bool resolve_subpage
)
346 MemoryRegionSection
*section
;
349 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
350 if (resolve_subpage
&& section
->mr
->subpage
) {
351 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
352 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
357 /* Called from RCU critical section */
358 static MemoryRegionSection
*
359 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
360 hwaddr
*plen
, bool resolve_subpage
)
362 MemoryRegionSection
*section
;
366 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
367 /* Compute offset within MemoryRegionSection */
368 addr
-= section
->offset_within_address_space
;
370 /* Compute offset within MemoryRegion */
371 *xlat
= addr
+ section
->offset_within_region
;
375 /* MMIO registers can be expected to perform full-width accesses based only
376 * on their address, without considering adjacent registers that could
377 * decode to completely different MemoryRegions. When such registers
378 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
379 * regions overlap wildly. For this reason we cannot clamp the accesses
382 * If the length is small (as is the case for address_space_ldl/stl),
383 * everything works fine. If the incoming length is large, however,
384 * the caller really has to do the clamping through memory_access_size.
386 if (memory_region_is_ram(mr
)) {
387 diff
= int128_sub(section
->size
, int128_make64(addr
));
388 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
393 /* Called from RCU critical section */
394 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
395 hwaddr
*xlat
, hwaddr
*plen
,
399 MemoryRegionSection
*section
;
403 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
404 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
407 if (!mr
->iommu_ops
) {
411 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
412 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
413 | (addr
& iotlb
.addr_mask
));
414 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
415 if (!(iotlb
.perm
& (1 << is_write
))) {
416 mr
= &io_mem_unassigned
;
420 as
= iotlb
.target_as
;
423 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
424 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
425 *plen
= MIN(page
, *plen
);
432 /* Called from RCU critical section */
433 MemoryRegionSection
*
434 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
435 hwaddr
*xlat
, hwaddr
*plen
)
437 MemoryRegionSection
*section
;
438 AddressSpaceDispatch
*d
= cpu
->cpu_ases
[asidx
].memory_dispatch
;
440 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
442 assert(!section
->mr
->iommu_ops
);
447 #if !defined(CONFIG_USER_ONLY)
449 static int cpu_common_post_load(void *opaque
, int version_id
)
451 CPUState
*cpu
= opaque
;
453 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
454 version_id is increased. */
455 cpu
->interrupt_request
&= ~0x01;
461 static int cpu_common_pre_load(void *opaque
)
463 CPUState
*cpu
= opaque
;
465 cpu
->exception_index
= -1;
470 static bool cpu_common_exception_index_needed(void *opaque
)
472 CPUState
*cpu
= opaque
;
474 return tcg_enabled() && cpu
->exception_index
!= -1;
477 static const VMStateDescription vmstate_cpu_common_exception_index
= {
478 .name
= "cpu_common/exception_index",
480 .minimum_version_id
= 1,
481 .needed
= cpu_common_exception_index_needed
,
482 .fields
= (VMStateField
[]) {
483 VMSTATE_INT32(exception_index
, CPUState
),
484 VMSTATE_END_OF_LIST()
488 static bool cpu_common_crash_occurred_needed(void *opaque
)
490 CPUState
*cpu
= opaque
;
492 return cpu
->crash_occurred
;
495 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
496 .name
= "cpu_common/crash_occurred",
498 .minimum_version_id
= 1,
499 .needed
= cpu_common_crash_occurred_needed
,
500 .fields
= (VMStateField
[]) {
501 VMSTATE_BOOL(crash_occurred
, CPUState
),
502 VMSTATE_END_OF_LIST()
506 const VMStateDescription vmstate_cpu_common
= {
507 .name
= "cpu_common",
509 .minimum_version_id
= 1,
510 .pre_load
= cpu_common_pre_load
,
511 .post_load
= cpu_common_post_load
,
512 .fields
= (VMStateField
[]) {
513 VMSTATE_UINT32(halted
, CPUState
),
514 VMSTATE_UINT32(interrupt_request
, CPUState
),
515 VMSTATE_END_OF_LIST()
517 .subsections
= (const VMStateDescription
*[]) {
518 &vmstate_cpu_common_exception_index
,
519 &vmstate_cpu_common_crash_occurred
,
526 CPUState
*qemu_get_cpu(int index
)
531 if (cpu
->cpu_index
== index
) {
539 #if !defined(CONFIG_USER_ONLY)
540 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
542 CPUAddressSpace
*newas
;
544 /* Target code should have set num_ases before calling us */
545 assert(asidx
< cpu
->num_ases
);
548 /* address space 0 gets the convenience alias */
552 /* KVM cannot currently support multiple address spaces. */
553 assert(asidx
== 0 || !kvm_enabled());
555 if (!cpu
->cpu_ases
) {
556 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
559 newas
= &cpu
->cpu_ases
[asidx
];
563 newas
->tcg_as_listener
.commit
= tcg_commit
;
564 memory_listener_register(&newas
->tcg_as_listener
, as
);
568 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
570 /* Return the AddressSpace corresponding to the specified index */
571 return cpu
->cpu_ases
[asidx
].as
;
575 #ifndef CONFIG_USER_ONLY
576 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
578 static int cpu_get_free_index(Error
**errp
)
580 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
582 if (cpu
>= MAX_CPUMASK_BITS
) {
583 error_setg(errp
, "Trying to use more CPUs than max of %d",
588 bitmap_set(cpu_index_map
, cpu
, 1);
592 void cpu_exec_exit(CPUState
*cpu
)
594 if (cpu
->cpu_index
== -1) {
595 /* cpu_index was never allocated by this @cpu or was already freed. */
599 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
604 static int cpu_get_free_index(Error
**errp
)
609 CPU_FOREACH(some_cpu
) {
615 void cpu_exec_exit(CPUState
*cpu
)
620 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
622 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
624 Error
*local_err
= NULL
;
629 #ifndef CONFIG_USER_ONLY
630 cpu
->thread_id
= qemu_get_thread_id();
633 #if defined(CONFIG_USER_ONLY)
636 cpu_index
= cpu
->cpu_index
= cpu_get_free_index(&local_err
);
638 error_propagate(errp
, local_err
);
639 #if defined(CONFIG_USER_ONLY)
644 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
645 #if defined(CONFIG_USER_ONLY)
648 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
649 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
651 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
652 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
653 cpu_save
, cpu_load
, cpu
->env_ptr
);
654 assert(cc
->vmsd
== NULL
);
655 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
657 if (cc
->vmsd
!= NULL
) {
658 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
662 #if defined(CONFIG_USER_ONLY)
663 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
665 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
668 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
671 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
672 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
674 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
675 phys
| (pc
& ~TARGET_PAGE_MASK
));
680 #if defined(CONFIG_USER_ONLY)
681 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
686 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
692 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
696 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
697 int flags
, CPUWatchpoint
**watchpoint
)
702 /* Add a watchpoint. */
703 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
704 int flags
, CPUWatchpoint
**watchpoint
)
708 /* forbid ranges which are empty or run off the end of the address space */
709 if (len
== 0 || (addr
+ len
- 1) < addr
) {
710 error_report("tried to set invalid watchpoint at %"
711 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
714 wp
= g_malloc(sizeof(*wp
));
720 /* keep all GDB-injected watchpoints in front */
721 if (flags
& BP_GDB
) {
722 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
724 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
727 tlb_flush_page(cpu
, addr
);
734 /* Remove a specific watchpoint. */
735 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
740 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
741 if (addr
== wp
->vaddr
&& len
== wp
->len
742 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
743 cpu_watchpoint_remove_by_ref(cpu
, wp
);
750 /* Remove a specific watchpoint by reference. */
751 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
753 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
755 tlb_flush_page(cpu
, watchpoint
->vaddr
);
760 /* Remove all matching watchpoints. */
761 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
763 CPUWatchpoint
*wp
, *next
;
765 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
766 if (wp
->flags
& mask
) {
767 cpu_watchpoint_remove_by_ref(cpu
, wp
);
772 /* Return true if this watchpoint address matches the specified
773 * access (ie the address range covered by the watchpoint overlaps
774 * partially or completely with the address range covered by the
777 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
781 /* We know the lengths are non-zero, but a little caution is
782 * required to avoid errors in the case where the range ends
783 * exactly at the top of the address space and so addr + len
784 * wraps round to zero.
786 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
787 vaddr addrend
= addr
+ len
- 1;
789 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
794 /* Add a breakpoint. */
795 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
796 CPUBreakpoint
**breakpoint
)
800 bp
= g_malloc(sizeof(*bp
));
805 /* keep all GDB-injected breakpoints in front */
806 if (flags
& BP_GDB
) {
807 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
809 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
812 breakpoint_invalidate(cpu
, pc
);
820 /* Remove a specific breakpoint. */
821 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
825 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
826 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
827 cpu_breakpoint_remove_by_ref(cpu
, bp
);
834 /* Remove a specific breakpoint by reference. */
835 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
837 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
839 breakpoint_invalidate(cpu
, breakpoint
->pc
);
844 /* Remove all matching breakpoints. */
845 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
847 CPUBreakpoint
*bp
, *next
;
849 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
850 if (bp
->flags
& mask
) {
851 cpu_breakpoint_remove_by_ref(cpu
, bp
);
856 /* enable or disable single step mode. EXCP_DEBUG is returned by the
857 CPU loop after each instruction */
858 void cpu_single_step(CPUState
*cpu
, int enabled
)
860 if (cpu
->singlestep_enabled
!= enabled
) {
861 cpu
->singlestep_enabled
= enabled
;
863 kvm_update_guest_debug(cpu
, 0);
865 /* must flush all the translated code to avoid inconsistencies */
866 /* XXX: only flush what is necessary */
872 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
879 fprintf(stderr
, "qemu: fatal: ");
880 vfprintf(stderr
, fmt
, ap
);
881 fprintf(stderr
, "\n");
882 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
883 if (qemu_log_separate()) {
884 qemu_log("qemu: fatal: ");
885 qemu_log_vprintf(fmt
, ap2
);
887 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
894 #if defined(CONFIG_USER_ONLY)
896 struct sigaction act
;
897 sigfillset(&act
.sa_mask
);
898 act
.sa_handler
= SIG_DFL
;
899 sigaction(SIGABRT
, &act
, NULL
);
905 #if !defined(CONFIG_USER_ONLY)
906 /* Called from RCU critical section */
907 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
911 block
= atomic_rcu_read(&ram_list
.mru_block
);
912 if (block
&& addr
- block
->offset
< block
->max_length
) {
915 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
916 if (addr
- block
->offset
< block
->max_length
) {
921 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
925 /* It is safe to write mru_block outside the iothread lock. This
930 * xxx removed from list
934 * call_rcu(reclaim_ramblock, xxx);
937 * atomic_rcu_set is not needed here. The block was already published
938 * when it was placed into the list. Here we're just making an extra
939 * copy of the pointer.
941 ram_list
.mru_block
= block
;
945 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
952 end
= TARGET_PAGE_ALIGN(start
+ length
);
953 start
&= TARGET_PAGE_MASK
;
956 block
= qemu_get_ram_block(start
);
957 assert(block
== qemu_get_ram_block(end
- 1));
958 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
960 tlb_reset_dirty(cpu
, start1
, length
);
965 /* Note: start and end must be within the same ram block. */
966 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
970 unsigned long end
, page
;
977 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
978 page
= start
>> TARGET_PAGE_BITS
;
979 dirty
= bitmap_test_and_clear_atomic(ram_list
.dirty_memory
[client
],
982 if (dirty
&& tcg_enabled()) {
983 tlb_reset_dirty_range_all(start
, length
);
989 /* Called from RCU critical section */
990 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
991 MemoryRegionSection
*section
,
993 hwaddr paddr
, hwaddr xlat
,
995 target_ulong
*address
)
1000 if (memory_region_is_ram(section
->mr
)) {
1002 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1004 if (!section
->readonly
) {
1005 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1007 iotlb
|= PHYS_SECTION_ROM
;
1010 AddressSpaceDispatch
*d
;
1012 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1013 iotlb
= section
- d
->map
.sections
;
1017 /* Make accesses to pages with watchpoints go via the
1018 watchpoint trap routines. */
1019 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1020 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1021 /* Avoid trapping reads of pages with a write breakpoint. */
1022 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1023 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1024 *address
|= TLB_MMIO
;
1032 #endif /* defined(CONFIG_USER_ONLY) */
1034 #if !defined(CONFIG_USER_ONLY)
1036 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1038 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1040 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1041 qemu_anon_ram_alloc
;
1044 * Set a custom physical guest memory alloator.
1045 * Accelerators with unusual needs may need this. Hopefully, we can
1046 * get rid of it eventually.
1048 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1050 phys_mem_alloc
= alloc
;
1053 static uint16_t phys_section_add(PhysPageMap
*map
,
1054 MemoryRegionSection
*section
)
1056 /* The physical section number is ORed with a page-aligned
1057 * pointer to produce the iotlb entries. Thus it should
1058 * never overflow into the page-aligned value.
1060 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1062 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1063 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1064 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1065 map
->sections_nb_alloc
);
1067 map
->sections
[map
->sections_nb
] = *section
;
1068 memory_region_ref(section
->mr
);
1069 return map
->sections_nb
++;
1072 static void phys_section_destroy(MemoryRegion
*mr
)
1074 bool have_sub_page
= mr
->subpage
;
1076 memory_region_unref(mr
);
1078 if (have_sub_page
) {
1079 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1080 object_unref(OBJECT(&subpage
->iomem
));
1085 static void phys_sections_free(PhysPageMap
*map
)
1087 while (map
->sections_nb
> 0) {
1088 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1089 phys_section_destroy(section
->mr
);
1091 g_free(map
->sections
);
1095 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1098 hwaddr base
= section
->offset_within_address_space
1100 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1101 d
->map
.nodes
, d
->map
.sections
);
1102 MemoryRegionSection subsection
= {
1103 .offset_within_address_space
= base
,
1104 .size
= int128_make64(TARGET_PAGE_SIZE
),
1108 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1110 if (!(existing
->mr
->subpage
)) {
1111 subpage
= subpage_init(d
->as
, base
);
1112 subsection
.address_space
= d
->as
;
1113 subsection
.mr
= &subpage
->iomem
;
1114 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1115 phys_section_add(&d
->map
, &subsection
));
1117 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1119 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1120 end
= start
+ int128_get64(section
->size
) - 1;
1121 subpage_register(subpage
, start
, end
,
1122 phys_section_add(&d
->map
, section
));
1126 static void register_multipage(AddressSpaceDispatch
*d
,
1127 MemoryRegionSection
*section
)
1129 hwaddr start_addr
= section
->offset_within_address_space
;
1130 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1131 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1135 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1138 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1140 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1141 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1142 MemoryRegionSection now
= *section
, remain
= *section
;
1143 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1145 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1146 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1147 - now
.offset_within_address_space
;
1149 now
.size
= int128_min(int128_make64(left
), now
.size
);
1150 register_subpage(d
, &now
);
1152 now
.size
= int128_zero();
1154 while (int128_ne(remain
.size
, now
.size
)) {
1155 remain
.size
= int128_sub(remain
.size
, now
.size
);
1156 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1157 remain
.offset_within_region
+= int128_get64(now
.size
);
1159 if (int128_lt(remain
.size
, page_size
)) {
1160 register_subpage(d
, &now
);
1161 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1162 now
.size
= page_size
;
1163 register_subpage(d
, &now
);
1165 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1166 register_multipage(d
, &now
);
1171 void qemu_flush_coalesced_mmio_buffer(void)
1174 kvm_flush_coalesced_mmio_buffer();
1177 void qemu_mutex_lock_ramlist(void)
1179 qemu_mutex_lock(&ram_list
.mutex
);
1182 void qemu_mutex_unlock_ramlist(void)
1184 qemu_mutex_unlock(&ram_list
.mutex
);
1189 #include <sys/vfs.h>
1191 #define HUGETLBFS_MAGIC 0x958458f6
1193 static long gethugepagesize(const char *path
, Error
**errp
)
1199 ret
= statfs(path
, &fs
);
1200 } while (ret
!= 0 && errno
== EINTR
);
1203 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1211 static void *file_ram_alloc(RAMBlock
*block
,
1218 char *sanitized_name
;
1223 Error
*local_err
= NULL
;
1225 hpagesize
= gethugepagesize(path
, &local_err
);
1227 error_propagate(errp
, local_err
);
1230 block
->mr
->align
= hpagesize
;
1232 if (memory
< hpagesize
) {
1233 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1234 "or larger than huge page size 0x%" PRIx64
,
1239 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1241 "host lacks kvm mmu notifiers, -mem-path unsupported");
1245 if (!stat(path
, &st
) && S_ISDIR(st
.st_mode
)) {
1246 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1247 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1248 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1254 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1256 g_free(sanitized_name
);
1258 fd
= mkstemp(filename
);
1264 fd
= open(path
, O_RDWR
| O_CREAT
, 0644);
1268 error_setg_errno(errp
, errno
,
1269 "unable to create backing store for hugepages");
1273 memory
= ROUND_UP(memory
, hpagesize
);
1276 * ftruncate is not supported by hugetlbfs in older
1277 * hosts, so don't bother bailing out on errors.
1278 * If anything goes wrong with it under other filesystems,
1281 if (ftruncate(fd
, memory
)) {
1282 perror("ftruncate");
1285 area
= qemu_ram_mmap(fd
, memory
, hpagesize
, block
->flags
& RAM_SHARED
);
1286 if (area
== MAP_FAILED
) {
1287 error_setg_errno(errp
, errno
,
1288 "unable to map backing store for hugepages");
1294 os_mem_prealloc(fd
, area
, memory
);
1305 /* Called with the ramlist lock held. */
1306 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1308 RAMBlock
*block
, *next_block
;
1309 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1311 assert(size
!= 0); /* it would hand out same offset multiple times */
1313 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1317 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1318 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1320 end
= block
->offset
+ block
->max_length
;
1322 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1323 if (next_block
->offset
>= end
) {
1324 next
= MIN(next
, next_block
->offset
);
1327 if (next
- end
>= size
&& next
- end
< mingap
) {
1329 mingap
= next
- end
;
1333 if (offset
== RAM_ADDR_MAX
) {
1334 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1342 ram_addr_t
last_ram_offset(void)
1345 ram_addr_t last
= 0;
1348 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1349 last
= MAX(last
, block
->offset
+ block
->max_length
);
1355 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1359 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1360 if (!machine_dump_guest_core(current_machine
)) {
1361 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1363 perror("qemu_madvise");
1364 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1365 "but dump_guest_core=off specified\n");
1370 /* Called within an RCU critical section, or while the ramlist lock
1373 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1377 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1378 if (block
->offset
== addr
) {
1386 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1391 /* Called with iothread lock held. */
1392 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1394 RAMBlock
*new_block
, *block
;
1397 new_block
= find_ram_block(addr
);
1399 assert(!new_block
->idstr
[0]);
1402 char *id
= qdev_get_dev_path(dev
);
1404 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1408 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1410 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1411 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1412 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1420 /* Called with iothread lock held. */
1421 void qemu_ram_unset_idstr(ram_addr_t addr
)
1425 /* FIXME: arch_init.c assumes that this is not called throughout
1426 * migration. Ignore the problem since hot-unplug during migration
1427 * does not work anyway.
1431 block
= find_ram_block(addr
);
1433 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1438 static int memory_try_enable_merging(void *addr
, size_t len
)
1440 if (!machine_mem_merge(current_machine
)) {
1441 /* disabled by the user */
1445 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1448 /* Only legal before guest might have detected the memory size: e.g. on
1449 * incoming migration, or right after reset.
1451 * As memory core doesn't know how is memory accessed, it is up to
1452 * resize callback to update device state and/or add assertions to detect
1453 * misuse, if necessary.
1455 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1457 RAMBlock
*block
= find_ram_block(base
);
1461 newsize
= HOST_PAGE_ALIGN(newsize
);
1463 if (block
->used_length
== newsize
) {
1467 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1468 error_setg_errno(errp
, EINVAL
,
1469 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1470 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1471 newsize
, block
->used_length
);
1475 if (block
->max_length
< newsize
) {
1476 error_setg_errno(errp
, EINVAL
,
1477 "Length too large: %s: 0x" RAM_ADDR_FMT
1478 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1479 newsize
, block
->max_length
);
1483 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1484 block
->used_length
= newsize
;
1485 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1487 memory_region_set_size(block
->mr
, newsize
);
1488 if (block
->resized
) {
1489 block
->resized(block
->idstr
, newsize
, block
->host
);
1494 static ram_addr_t
ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1497 RAMBlock
*last_block
= NULL
;
1498 ram_addr_t old_ram_size
, new_ram_size
;
1500 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1502 qemu_mutex_lock_ramlist();
1503 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1505 if (!new_block
->host
) {
1506 if (xen_enabled()) {
1507 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1510 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1511 &new_block
->mr
->align
);
1512 if (!new_block
->host
) {
1513 error_setg_errno(errp
, errno
,
1514 "cannot set up guest memory '%s'",
1515 memory_region_name(new_block
->mr
));
1516 qemu_mutex_unlock_ramlist();
1519 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1523 new_ram_size
= MAX(old_ram_size
,
1524 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1525 if (new_ram_size
> old_ram_size
) {
1526 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1528 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1529 * QLIST (which has an RCU-friendly variant) does not have insertion at
1530 * tail, so save the last element in last_block.
1532 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1534 if (block
->max_length
< new_block
->max_length
) {
1539 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1540 } else if (last_block
) {
1541 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1542 } else { /* list is empty */
1543 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1545 ram_list
.mru_block
= NULL
;
1547 /* Write list before version */
1550 qemu_mutex_unlock_ramlist();
1552 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1554 if (new_ram_size
> old_ram_size
) {
1557 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1558 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1559 ram_list
.dirty_memory
[i
] =
1560 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1561 old_ram_size
, new_ram_size
);
1564 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1565 new_block
->used_length
,
1568 if (new_block
->host
) {
1569 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1570 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1571 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1572 if (kvm_enabled()) {
1573 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1577 return new_block
->offset
;
1581 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1582 bool share
, const char *mem_path
,
1585 RAMBlock
*new_block
;
1587 Error
*local_err
= NULL
;
1589 if (xen_enabled()) {
1590 error_setg(errp
, "-mem-path not supported with Xen");
1594 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1596 * file_ram_alloc() needs to allocate just like
1597 * phys_mem_alloc, but we haven't bothered to provide
1601 "-mem-path not supported with this accelerator");
1605 size
= HOST_PAGE_ALIGN(size
);
1606 new_block
= g_malloc0(sizeof(*new_block
));
1608 new_block
->used_length
= size
;
1609 new_block
->max_length
= size
;
1610 new_block
->flags
= share
? RAM_SHARED
: 0;
1611 new_block
->host
= file_ram_alloc(new_block
, size
,
1613 if (!new_block
->host
) {
1618 addr
= ram_block_add(new_block
, &local_err
);
1621 error_propagate(errp
, local_err
);
1629 ram_addr_t
qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1630 void (*resized
)(const char*,
1633 void *host
, bool resizeable
,
1634 MemoryRegion
*mr
, Error
**errp
)
1636 RAMBlock
*new_block
;
1638 Error
*local_err
= NULL
;
1640 size
= HOST_PAGE_ALIGN(size
);
1641 max_size
= HOST_PAGE_ALIGN(max_size
);
1642 new_block
= g_malloc0(sizeof(*new_block
));
1644 new_block
->resized
= resized
;
1645 new_block
->used_length
= size
;
1646 new_block
->max_length
= max_size
;
1647 assert(max_size
>= size
);
1649 new_block
->host
= host
;
1651 new_block
->flags
|= RAM_PREALLOC
;
1654 new_block
->flags
|= RAM_RESIZEABLE
;
1656 addr
= ram_block_add(new_block
, &local_err
);
1659 error_propagate(errp
, local_err
);
1665 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1666 MemoryRegion
*mr
, Error
**errp
)
1668 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1671 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1673 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1676 ram_addr_t
qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1677 void (*resized
)(const char*,
1680 MemoryRegion
*mr
, Error
**errp
)
1682 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1685 static void reclaim_ramblock(RAMBlock
*block
)
1687 if (block
->flags
& RAM_PREALLOC
) {
1689 } else if (xen_enabled()) {
1690 xen_invalidate_map_cache_entry(block
->host
);
1692 } else if (block
->fd
>= 0) {
1693 qemu_ram_munmap(block
->host
, block
->max_length
);
1697 qemu_anon_ram_free(block
->host
, block
->max_length
);
1702 void qemu_ram_free(ram_addr_t addr
)
1706 qemu_mutex_lock_ramlist();
1707 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1708 if (addr
== block
->offset
) {
1709 QLIST_REMOVE_RCU(block
, next
);
1710 ram_list
.mru_block
= NULL
;
1711 /* Write list before version */
1714 call_rcu(block
, reclaim_ramblock
, rcu
);
1718 qemu_mutex_unlock_ramlist();
1722 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1729 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1730 offset
= addr
- block
->offset
;
1731 if (offset
< block
->max_length
) {
1732 vaddr
= ramblock_ptr(block
, offset
);
1733 if (block
->flags
& RAM_PREALLOC
) {
1735 } else if (xen_enabled()) {
1739 if (block
->fd
>= 0) {
1740 flags
|= (block
->flags
& RAM_SHARED
?
1741 MAP_SHARED
: MAP_PRIVATE
);
1742 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1743 flags
, block
->fd
, offset
);
1746 * Remap needs to match alloc. Accelerators that
1747 * set phys_mem_alloc never remap. If they did,
1748 * we'd need a remap hook here.
1750 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1752 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1753 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1756 if (area
!= vaddr
) {
1757 fprintf(stderr
, "Could not remap addr: "
1758 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1762 memory_try_enable_merging(vaddr
, length
);
1763 qemu_ram_setup_dump(vaddr
, length
);
1768 #endif /* !_WIN32 */
1770 int qemu_get_ram_fd(ram_addr_t addr
)
1776 block
= qemu_get_ram_block(addr
);
1782 void qemu_set_ram_fd(ram_addr_t addr
, int fd
)
1787 block
= qemu_get_ram_block(addr
);
1792 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1798 block
= qemu_get_ram_block(addr
);
1799 ptr
= ramblock_ptr(block
, 0);
1804 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1805 * This should not be used for general purpose DMA. Use address_space_map
1806 * or address_space_rw instead. For local memory (e.g. video ram) that the
1807 * device owns, use memory_region_get_ram_ptr.
1809 * Called within RCU critical section.
1811 void *qemu_get_ram_ptr(ram_addr_t addr
)
1813 RAMBlock
*block
= qemu_get_ram_block(addr
);
1815 if (xen_enabled() && block
->host
== NULL
) {
1816 /* We need to check if the requested address is in the RAM
1817 * because we don't want to map the entire memory in QEMU.
1818 * In that case just map until the end of the page.
1820 if (block
->offset
== 0) {
1821 return xen_map_cache(addr
, 0, 0);
1824 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1826 return ramblock_ptr(block
, addr
- block
->offset
);
1829 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1830 * but takes a size argument.
1832 * Called within RCU critical section.
1834 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1837 ram_addr_t offset_inside_block
;
1842 block
= qemu_get_ram_block(addr
);
1843 offset_inside_block
= addr
- block
->offset
;
1844 *size
= MIN(*size
, block
->max_length
- offset_inside_block
);
1846 if (xen_enabled() && block
->host
== NULL
) {
1847 /* We need to check if the requested address is in the RAM
1848 * because we don't want to map the entire memory in QEMU.
1849 * In that case just map the requested area.
1851 if (block
->offset
== 0) {
1852 return xen_map_cache(addr
, *size
, 1);
1855 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1858 return ramblock_ptr(block
, offset_inside_block
);
1862 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1865 * ptr: Host pointer to look up
1866 * round_offset: If true round the result offset down to a page boundary
1867 * *ram_addr: set to result ram_addr
1868 * *offset: set to result offset within the RAMBlock
1870 * Returns: RAMBlock (or NULL if not found)
1872 * By the time this function returns, the returned pointer is not protected
1873 * by RCU anymore. If the caller is not within an RCU critical section and
1874 * does not hold the iothread lock, it must have other means of protecting the
1875 * pointer, such as a reference to the region that includes the incoming
1878 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1879 ram_addr_t
*ram_addr
,
1883 uint8_t *host
= ptr
;
1885 if (xen_enabled()) {
1887 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1888 block
= qemu_get_ram_block(*ram_addr
);
1890 *offset
= (host
- block
->host
);
1897 block
= atomic_rcu_read(&ram_list
.mru_block
);
1898 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1902 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1903 /* This case append when the block is not mapped. */
1904 if (block
->host
== NULL
) {
1907 if (host
- block
->host
< block
->max_length
) {
1916 *offset
= (host
- block
->host
);
1918 *offset
&= TARGET_PAGE_MASK
;
1920 *ram_addr
= block
->offset
+ *offset
;
1926 * Finds the named RAMBlock
1928 * name: The name of RAMBlock to find
1930 * Returns: RAMBlock (or NULL if not found)
1932 RAMBlock
*qemu_ram_block_by_name(const char *name
)
1936 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1937 if (!strcmp(name
, block
->idstr
)) {
1945 /* Some of the softmmu routines need to translate from a host pointer
1946 (typically a TLB entry) back to a ram offset. */
1947 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1950 ram_addr_t offset
; /* Not used */
1952 block
= qemu_ram_block_from_host(ptr
, false, ram_addr
, &offset
);
1961 /* Called within RCU critical section. */
1962 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1963 uint64_t val
, unsigned size
)
1965 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1966 tb_invalidate_phys_page_fast(ram_addr
, size
);
1970 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1973 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1976 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1981 /* Set both VGA and migration bits for simplicity and to remove
1982 * the notdirty callback faster.
1984 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
1985 DIRTY_CLIENTS_NOCODE
);
1986 /* we remove the notdirty callback only if the code has been
1988 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1989 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
1993 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1994 unsigned size
, bool is_write
)
1999 static const MemoryRegionOps notdirty_mem_ops
= {
2000 .write
= notdirty_mem_write
,
2001 .valid
.accepts
= notdirty_mem_accepts
,
2002 .endianness
= DEVICE_NATIVE_ENDIAN
,
2005 /* Generate a debug exception if a watchpoint has been hit. */
2006 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2008 CPUState
*cpu
= current_cpu
;
2009 CPUArchState
*env
= cpu
->env_ptr
;
2010 target_ulong pc
, cs_base
;
2015 if (cpu
->watchpoint_hit
) {
2016 /* We re-entered the check after replacing the TB. Now raise
2017 * the debug interrupt so that is will trigger after the
2018 * current instruction. */
2019 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2022 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2023 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2024 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2025 && (wp
->flags
& flags
)) {
2026 if (flags
== BP_MEM_READ
) {
2027 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2029 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2031 wp
->hitaddr
= vaddr
;
2032 wp
->hitattrs
= attrs
;
2033 if (!cpu
->watchpoint_hit
) {
2034 cpu
->watchpoint_hit
= wp
;
2035 tb_check_watchpoint(cpu
);
2036 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2037 cpu
->exception_index
= EXCP_DEBUG
;
2040 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2041 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2042 cpu_resume_from_signal(cpu
, NULL
);
2046 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2051 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2052 so these check for a hit then pass through to the normal out-of-line
2054 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2055 unsigned size
, MemTxAttrs attrs
)
2060 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2063 data
= address_space_ldub(&address_space_memory
, addr
, attrs
, &res
);
2066 data
= address_space_lduw(&address_space_memory
, addr
, attrs
, &res
);
2069 data
= address_space_ldl(&address_space_memory
, addr
, attrs
, &res
);
2077 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2078 uint64_t val
, unsigned size
,
2083 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2086 address_space_stb(&address_space_memory
, addr
, val
, attrs
, &res
);
2089 address_space_stw(&address_space_memory
, addr
, val
, attrs
, &res
);
2092 address_space_stl(&address_space_memory
, addr
, val
, attrs
, &res
);
2099 static const MemoryRegionOps watch_mem_ops
= {
2100 .read_with_attrs
= watch_mem_read
,
2101 .write_with_attrs
= watch_mem_write
,
2102 .endianness
= DEVICE_NATIVE_ENDIAN
,
2105 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2106 unsigned len
, MemTxAttrs attrs
)
2108 subpage_t
*subpage
= opaque
;
2112 #if defined(DEBUG_SUBPAGE)
2113 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2114 subpage
, len
, addr
);
2116 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2123 *data
= ldub_p(buf
);
2126 *data
= lduw_p(buf
);
2139 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2140 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2142 subpage_t
*subpage
= opaque
;
2145 #if defined(DEBUG_SUBPAGE)
2146 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2147 " value %"PRIx64
"\n",
2148 __func__
, subpage
, len
, addr
, value
);
2166 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2170 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2171 unsigned len
, bool is_write
)
2173 subpage_t
*subpage
= opaque
;
2174 #if defined(DEBUG_SUBPAGE)
2175 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2176 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2179 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2183 static const MemoryRegionOps subpage_ops
= {
2184 .read_with_attrs
= subpage_read
,
2185 .write_with_attrs
= subpage_write
,
2186 .impl
.min_access_size
= 1,
2187 .impl
.max_access_size
= 8,
2188 .valid
.min_access_size
= 1,
2189 .valid
.max_access_size
= 8,
2190 .valid
.accepts
= subpage_accepts
,
2191 .endianness
= DEVICE_NATIVE_ENDIAN
,
2194 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2199 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2201 idx
= SUBPAGE_IDX(start
);
2202 eidx
= SUBPAGE_IDX(end
);
2203 #if defined(DEBUG_SUBPAGE)
2204 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2205 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2207 for (; idx
<= eidx
; idx
++) {
2208 mmio
->sub_section
[idx
] = section
;
2214 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2218 mmio
= g_malloc0(sizeof(subpage_t
));
2222 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2223 NULL
, TARGET_PAGE_SIZE
);
2224 mmio
->iomem
.subpage
= true;
2225 #if defined(DEBUG_SUBPAGE)
2226 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2227 mmio
, base
, TARGET_PAGE_SIZE
);
2229 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2234 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2238 MemoryRegionSection section
= {
2239 .address_space
= as
,
2241 .offset_within_address_space
= 0,
2242 .offset_within_region
= 0,
2243 .size
= int128_2_64(),
2246 return phys_section_add(map
, §ion
);
2249 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2251 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2252 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2253 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2254 MemoryRegionSection
*sections
= d
->map
.sections
;
2256 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2259 static void io_mem_init(void)
2261 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2262 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2264 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2266 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2270 static void mem_begin(MemoryListener
*listener
)
2272 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2273 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2276 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2277 assert(n
== PHYS_SECTION_UNASSIGNED
);
2278 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2279 assert(n
== PHYS_SECTION_NOTDIRTY
);
2280 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2281 assert(n
== PHYS_SECTION_ROM
);
2282 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2283 assert(n
== PHYS_SECTION_WATCH
);
2285 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2287 as
->next_dispatch
= d
;
2290 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2292 phys_sections_free(&d
->map
);
2296 static void mem_commit(MemoryListener
*listener
)
2298 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2299 AddressSpaceDispatch
*cur
= as
->dispatch
;
2300 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2302 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2304 atomic_rcu_set(&as
->dispatch
, next
);
2306 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2310 static void tcg_commit(MemoryListener
*listener
)
2312 CPUAddressSpace
*cpuas
;
2313 AddressSpaceDispatch
*d
;
2315 /* since each CPU stores ram addresses in its TLB cache, we must
2316 reset the modified entries */
2317 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2318 cpu_reloading_memory_map();
2319 /* The CPU and TLB are protected by the iothread lock.
2320 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2321 * may have split the RCU critical section.
2323 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2324 cpuas
->memory_dispatch
= d
;
2325 tlb_flush(cpuas
->cpu
, 1);
2328 void address_space_init_dispatch(AddressSpace
*as
)
2330 as
->dispatch
= NULL
;
2331 as
->dispatch_listener
= (MemoryListener
) {
2333 .commit
= mem_commit
,
2334 .region_add
= mem_add
,
2335 .region_nop
= mem_add
,
2338 memory_listener_register(&as
->dispatch_listener
, as
);
2341 void address_space_unregister(AddressSpace
*as
)
2343 memory_listener_unregister(&as
->dispatch_listener
);
2346 void address_space_destroy_dispatch(AddressSpace
*as
)
2348 AddressSpaceDispatch
*d
= as
->dispatch
;
2350 atomic_rcu_set(&as
->dispatch
, NULL
);
2352 call_rcu(d
, address_space_dispatch_free
, rcu
);
2356 static void memory_map_init(void)
2358 system_memory
= g_malloc(sizeof(*system_memory
));
2360 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2361 address_space_init(&address_space_memory
, system_memory
, "memory");
2363 system_io
= g_malloc(sizeof(*system_io
));
2364 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2366 address_space_init(&address_space_io
, system_io
, "I/O");
2369 MemoryRegion
*get_system_memory(void)
2371 return system_memory
;
2374 MemoryRegion
*get_system_io(void)
2379 #endif /* !defined(CONFIG_USER_ONLY) */
2381 /* physical memory access (slow version, mainly for debug) */
2382 #if defined(CONFIG_USER_ONLY)
2383 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2384 uint8_t *buf
, int len
, int is_write
)
2391 page
= addr
& TARGET_PAGE_MASK
;
2392 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2395 flags
= page_get_flags(page
);
2396 if (!(flags
& PAGE_VALID
))
2399 if (!(flags
& PAGE_WRITE
))
2401 /* XXX: this code should not depend on lock_user */
2402 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2405 unlock_user(p
, addr
, l
);
2407 if (!(flags
& PAGE_READ
))
2409 /* XXX: this code should not depend on lock_user */
2410 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2413 unlock_user(p
, addr
, 0);
2424 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2427 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2428 /* No early return if dirty_log_mask is or becomes 0, because
2429 * cpu_physical_memory_set_dirty_range will still call
2430 * xen_modified_memory.
2432 if (dirty_log_mask
) {
2434 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2436 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2437 tb_invalidate_phys_range(addr
, addr
+ length
);
2438 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2440 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2443 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2445 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2447 /* Regions are assumed to support 1-4 byte accesses unless
2448 otherwise specified. */
2449 if (access_size_max
== 0) {
2450 access_size_max
= 4;
2453 /* Bound the maximum access by the alignment of the address. */
2454 if (!mr
->ops
->impl
.unaligned
) {
2455 unsigned align_size_max
= addr
& -addr
;
2456 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2457 access_size_max
= align_size_max
;
2461 /* Don't attempt accesses larger than the maximum. */
2462 if (l
> access_size_max
) {
2463 l
= access_size_max
;
2470 static bool prepare_mmio_access(MemoryRegion
*mr
)
2472 bool unlocked
= !qemu_mutex_iothread_locked();
2473 bool release_lock
= false;
2475 if (unlocked
&& mr
->global_locking
) {
2476 qemu_mutex_lock_iothread();
2478 release_lock
= true;
2480 if (mr
->flush_coalesced_mmio
) {
2482 qemu_mutex_lock_iothread();
2484 qemu_flush_coalesced_mmio_buffer();
2486 qemu_mutex_unlock_iothread();
2490 return release_lock
;
2493 /* Called within RCU critical section. */
2494 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2497 int len
, hwaddr addr1
,
2498 hwaddr l
, MemoryRegion
*mr
)
2502 MemTxResult result
= MEMTX_OK
;
2503 bool release_lock
= false;
2506 if (!memory_access_is_direct(mr
, true)) {
2507 release_lock
|= prepare_mmio_access(mr
);
2508 l
= memory_access_size(mr
, l
, addr1
);
2509 /* XXX: could force current_cpu to NULL to avoid
2513 /* 64 bit write access */
2515 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2519 /* 32 bit write access */
2521 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2525 /* 16 bit write access */
2527 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2531 /* 8 bit write access */
2533 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2540 addr1
+= memory_region_get_ram_addr(mr
);
2542 ptr
= qemu_get_ram_ptr(addr1
);
2543 memcpy(ptr
, buf
, l
);
2544 invalidate_and_set_dirty(mr
, addr1
, l
);
2548 qemu_mutex_unlock_iothread();
2549 release_lock
= false;
2561 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2567 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2568 const uint8_t *buf
, int len
)
2573 MemTxResult result
= MEMTX_OK
;
2578 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2579 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2587 /* Called within RCU critical section. */
2588 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2589 MemTxAttrs attrs
, uint8_t *buf
,
2590 int len
, hwaddr addr1
, hwaddr l
,
2595 MemTxResult result
= MEMTX_OK
;
2596 bool release_lock
= false;
2599 if (!memory_access_is_direct(mr
, false)) {
2601 release_lock
|= prepare_mmio_access(mr
);
2602 l
= memory_access_size(mr
, l
, addr1
);
2605 /* 64 bit read access */
2606 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2611 /* 32 bit read access */
2612 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2617 /* 16 bit read access */
2618 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2623 /* 8 bit read access */
2624 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2633 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2634 memcpy(buf
, ptr
, l
);
2638 qemu_mutex_unlock_iothread();
2639 release_lock
= false;
2651 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2657 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2658 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2663 MemTxResult result
= MEMTX_OK
;
2668 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2669 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2677 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2678 uint8_t *buf
, int len
, bool is_write
)
2681 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2683 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2687 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2688 int len
, int is_write
)
2690 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2691 buf
, len
, is_write
);
2694 enum write_rom_type
{
2699 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2700 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2710 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2712 if (!(memory_region_is_ram(mr
) ||
2713 memory_region_is_romd(mr
))) {
2714 l
= memory_access_size(mr
, l
, addr1
);
2716 addr1
+= memory_region_get_ram_addr(mr
);
2718 ptr
= qemu_get_ram_ptr(addr1
);
2721 memcpy(ptr
, buf
, l
);
2722 invalidate_and_set_dirty(mr
, addr1
, l
);
2725 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2736 /* used for ROM loading : can write in RAM and ROM */
2737 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2738 const uint8_t *buf
, int len
)
2740 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2743 void cpu_flush_icache_range(hwaddr start
, int len
)
2746 * This function should do the same thing as an icache flush that was
2747 * triggered from within the guest. For TCG we are always cache coherent,
2748 * so there is no need to flush anything. For KVM / Xen we need to flush
2749 * the host's instruction cache at least.
2751 if (tcg_enabled()) {
2755 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2756 start
, NULL
, len
, FLUSH_CACHE
);
2767 static BounceBuffer bounce
;
2769 typedef struct MapClient
{
2771 QLIST_ENTRY(MapClient
) link
;
2774 QemuMutex map_client_list_lock
;
2775 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2776 = QLIST_HEAD_INITIALIZER(map_client_list
);
2778 static void cpu_unregister_map_client_do(MapClient
*client
)
2780 QLIST_REMOVE(client
, link
);
2784 static void cpu_notify_map_clients_locked(void)
2788 while (!QLIST_EMPTY(&map_client_list
)) {
2789 client
= QLIST_FIRST(&map_client_list
);
2790 qemu_bh_schedule(client
->bh
);
2791 cpu_unregister_map_client_do(client
);
2795 void cpu_register_map_client(QEMUBH
*bh
)
2797 MapClient
*client
= g_malloc(sizeof(*client
));
2799 qemu_mutex_lock(&map_client_list_lock
);
2801 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2802 if (!atomic_read(&bounce
.in_use
)) {
2803 cpu_notify_map_clients_locked();
2805 qemu_mutex_unlock(&map_client_list_lock
);
2808 void cpu_exec_init_all(void)
2810 qemu_mutex_init(&ram_list
.mutex
);
2813 qemu_mutex_init(&map_client_list_lock
);
2816 void cpu_unregister_map_client(QEMUBH
*bh
)
2820 qemu_mutex_lock(&map_client_list_lock
);
2821 QLIST_FOREACH(client
, &map_client_list
, link
) {
2822 if (client
->bh
== bh
) {
2823 cpu_unregister_map_client_do(client
);
2827 qemu_mutex_unlock(&map_client_list_lock
);
2830 static void cpu_notify_map_clients(void)
2832 qemu_mutex_lock(&map_client_list_lock
);
2833 cpu_notify_map_clients_locked();
2834 qemu_mutex_unlock(&map_client_list_lock
);
2837 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2845 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2846 if (!memory_access_is_direct(mr
, is_write
)) {
2847 l
= memory_access_size(mr
, l
, addr
);
2848 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2860 /* Map a physical memory region into a host virtual address.
2861 * May map a subset of the requested range, given by and returned in *plen.
2862 * May return NULL if resources needed to perform the mapping are exhausted.
2863 * Use only for reads OR writes - not for read-modify-write operations.
2864 * Use cpu_register_map_client() to know when retrying the map operation is
2865 * likely to succeed.
2867 void *address_space_map(AddressSpace
*as
,
2874 hwaddr l
, xlat
, base
;
2875 MemoryRegion
*mr
, *this_mr
;
2885 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2887 if (!memory_access_is_direct(mr
, is_write
)) {
2888 if (atomic_xchg(&bounce
.in_use
, true)) {
2892 /* Avoid unbounded allocations */
2893 l
= MIN(l
, TARGET_PAGE_SIZE
);
2894 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2898 memory_region_ref(mr
);
2901 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2907 return bounce
.buffer
;
2911 raddr
= memory_region_get_ram_addr(mr
);
2922 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2923 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2928 memory_region_ref(mr
);
2930 ptr
= qemu_ram_ptr_length(raddr
+ base
, plen
);
2936 /* Unmaps a memory region previously mapped by address_space_map().
2937 * Will also mark the memory as dirty if is_write == 1. access_len gives
2938 * the amount of memory that was actually read or written by the caller.
2940 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2941 int is_write
, hwaddr access_len
)
2943 if (buffer
!= bounce
.buffer
) {
2947 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2950 invalidate_and_set_dirty(mr
, addr1
, access_len
);
2952 if (xen_enabled()) {
2953 xen_invalidate_map_cache_entry(buffer
);
2955 memory_region_unref(mr
);
2959 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
2960 bounce
.buffer
, access_len
);
2962 qemu_vfree(bounce
.buffer
);
2963 bounce
.buffer
= NULL
;
2964 memory_region_unref(bounce
.mr
);
2965 atomic_mb_set(&bounce
.in_use
, false);
2966 cpu_notify_map_clients();
2969 void *cpu_physical_memory_map(hwaddr addr
,
2973 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2976 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2977 int is_write
, hwaddr access_len
)
2979 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2982 /* warning: addr must be aligned */
2983 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
2985 MemTxResult
*result
,
2986 enum device_endian endian
)
2994 bool release_lock
= false;
2997 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2998 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2999 release_lock
|= prepare_mmio_access(mr
);
3002 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
3003 #if defined(TARGET_WORDS_BIGENDIAN)
3004 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3008 if (endian
== DEVICE_BIG_ENDIAN
) {
3014 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3018 case DEVICE_LITTLE_ENDIAN
:
3019 val
= ldl_le_p(ptr
);
3021 case DEVICE_BIG_ENDIAN
:
3022 val
= ldl_be_p(ptr
);
3034 qemu_mutex_unlock_iothread();
3040 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3041 MemTxAttrs attrs
, MemTxResult
*result
)
3043 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3044 DEVICE_NATIVE_ENDIAN
);
3047 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3048 MemTxAttrs attrs
, MemTxResult
*result
)
3050 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3051 DEVICE_LITTLE_ENDIAN
);
3054 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3055 MemTxAttrs attrs
, MemTxResult
*result
)
3057 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3061 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3063 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3066 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3068 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3071 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3073 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3076 /* warning: addr must be aligned */
3077 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3079 MemTxResult
*result
,
3080 enum device_endian endian
)
3088 bool release_lock
= false;
3091 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3093 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3094 release_lock
|= prepare_mmio_access(mr
);
3097 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3098 #if defined(TARGET_WORDS_BIGENDIAN)
3099 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3103 if (endian
== DEVICE_BIG_ENDIAN
) {
3109 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3113 case DEVICE_LITTLE_ENDIAN
:
3114 val
= ldq_le_p(ptr
);
3116 case DEVICE_BIG_ENDIAN
:
3117 val
= ldq_be_p(ptr
);
3129 qemu_mutex_unlock_iothread();
3135 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3136 MemTxAttrs attrs
, MemTxResult
*result
)
3138 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3139 DEVICE_NATIVE_ENDIAN
);
3142 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3143 MemTxAttrs attrs
, MemTxResult
*result
)
3145 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3146 DEVICE_LITTLE_ENDIAN
);
3149 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3150 MemTxAttrs attrs
, MemTxResult
*result
)
3152 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3156 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3158 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3161 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3163 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3166 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3168 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3172 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3173 MemTxAttrs attrs
, MemTxResult
*result
)
3178 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3185 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3187 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3190 /* warning: addr must be aligned */
3191 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3194 MemTxResult
*result
,
3195 enum device_endian endian
)
3203 bool release_lock
= false;
3206 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3208 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3209 release_lock
|= prepare_mmio_access(mr
);
3212 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3213 #if defined(TARGET_WORDS_BIGENDIAN)
3214 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3218 if (endian
== DEVICE_BIG_ENDIAN
) {
3224 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3228 case DEVICE_LITTLE_ENDIAN
:
3229 val
= lduw_le_p(ptr
);
3231 case DEVICE_BIG_ENDIAN
:
3232 val
= lduw_be_p(ptr
);
3244 qemu_mutex_unlock_iothread();
3250 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3251 MemTxAttrs attrs
, MemTxResult
*result
)
3253 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3254 DEVICE_NATIVE_ENDIAN
);
3257 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3258 MemTxAttrs attrs
, MemTxResult
*result
)
3260 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3261 DEVICE_LITTLE_ENDIAN
);
3264 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3265 MemTxAttrs attrs
, MemTxResult
*result
)
3267 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3271 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3273 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3276 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3278 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3281 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3283 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3286 /* warning: addr must be aligned. The ram page is not masked as dirty
3287 and the code inside is not invalidated. It is useful if the dirty
3288 bits are used to track modified PTEs */
3289 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3290 MemTxAttrs attrs
, MemTxResult
*result
)
3297 uint8_t dirty_log_mask
;
3298 bool release_lock
= false;
3301 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3303 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3304 release_lock
|= prepare_mmio_access(mr
);
3306 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3308 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3309 ptr
= qemu_get_ram_ptr(addr1
);
3312 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3313 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3314 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3321 qemu_mutex_unlock_iothread();
3326 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3328 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3331 /* warning: addr must be aligned */
3332 static inline void address_space_stl_internal(AddressSpace
*as
,
3333 hwaddr addr
, uint32_t val
,
3335 MemTxResult
*result
,
3336 enum device_endian endian
)
3343 bool release_lock
= false;
3346 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3348 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3349 release_lock
|= prepare_mmio_access(mr
);
3351 #if defined(TARGET_WORDS_BIGENDIAN)
3352 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3356 if (endian
== DEVICE_BIG_ENDIAN
) {
3360 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3363 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3364 ptr
= qemu_get_ram_ptr(addr1
);
3366 case DEVICE_LITTLE_ENDIAN
:
3369 case DEVICE_BIG_ENDIAN
:
3376 invalidate_and_set_dirty(mr
, addr1
, 4);
3383 qemu_mutex_unlock_iothread();
3388 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3389 MemTxAttrs attrs
, MemTxResult
*result
)
3391 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3392 DEVICE_NATIVE_ENDIAN
);
3395 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3396 MemTxAttrs attrs
, MemTxResult
*result
)
3398 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3399 DEVICE_LITTLE_ENDIAN
);
3402 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3403 MemTxAttrs attrs
, MemTxResult
*result
)
3405 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3409 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3411 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3414 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3416 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3419 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3421 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3425 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3426 MemTxAttrs attrs
, MemTxResult
*result
)
3431 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3437 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3439 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3442 /* warning: addr must be aligned */
3443 static inline void address_space_stw_internal(AddressSpace
*as
,
3444 hwaddr addr
, uint32_t val
,
3446 MemTxResult
*result
,
3447 enum device_endian endian
)
3454 bool release_lock
= false;
3457 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3458 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3459 release_lock
|= prepare_mmio_access(mr
);
3461 #if defined(TARGET_WORDS_BIGENDIAN)
3462 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3466 if (endian
== DEVICE_BIG_ENDIAN
) {
3470 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3473 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3474 ptr
= qemu_get_ram_ptr(addr1
);
3476 case DEVICE_LITTLE_ENDIAN
:
3479 case DEVICE_BIG_ENDIAN
:
3486 invalidate_and_set_dirty(mr
, addr1
, 2);
3493 qemu_mutex_unlock_iothread();
3498 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3499 MemTxAttrs attrs
, MemTxResult
*result
)
3501 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3502 DEVICE_NATIVE_ENDIAN
);
3505 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3506 MemTxAttrs attrs
, MemTxResult
*result
)
3508 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3509 DEVICE_LITTLE_ENDIAN
);
3512 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3513 MemTxAttrs attrs
, MemTxResult
*result
)
3515 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3519 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3521 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3524 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3526 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3529 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3531 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3535 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3536 MemTxAttrs attrs
, MemTxResult
*result
)
3540 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3546 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3547 MemTxAttrs attrs
, MemTxResult
*result
)
3550 val
= cpu_to_le64(val
);
3551 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3556 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3557 MemTxAttrs attrs
, MemTxResult
*result
)
3560 val
= cpu_to_be64(val
);
3561 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3567 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3569 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3572 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3574 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3577 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3579 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3582 /* virtual memory access for debug (includes writing to ROM) */
3583 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3584 uint8_t *buf
, int len
, int is_write
)
3594 page
= addr
& TARGET_PAGE_MASK
;
3595 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3596 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3597 /* if no physical page mapped, return an error */
3598 if (phys_addr
== -1)
3600 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3603 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3605 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3608 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3609 MEMTXATTRS_UNSPECIFIED
,
3620 * Allows code that needs to deal with migration bitmaps etc to still be built
3621 * target independent.
3623 size_t qemu_target_page_bits(void)
3625 return TARGET_PAGE_BITS
;
3631 * A helper function for the _utterly broken_ virtio device model to find out if
3632 * it's running on a big endian machine. Don't do this at home kids!
3634 bool target_words_bigendian(void);
3635 bool target_words_bigendian(void)
3637 #if defined(TARGET_WORDS_BIGENDIAN)
3644 #ifndef CONFIG_USER_ONLY
3645 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3652 mr
= address_space_translate(&address_space_memory
,
3653 phys_addr
, &phys_addr
, &l
, false);
3655 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3660 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3666 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3667 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3668 block
->used_length
, opaque
);