4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
25 #include "qemu/cutils.h"
27 #include "exec/exec-all.h"
29 #include "hw/qdev-core.h"
30 #if !defined(CONFIG_USER_ONLY)
31 #include "hw/boards.h"
32 #include "hw/xen/xen.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "qemu/timer.h"
37 #include "qemu/config-file.h"
38 #include "qemu/error-report.h"
39 #if defined(CONFIG_USER_ONLY)
41 #else /* !CONFIG_USER_ONLY */
43 #include "exec/memory.h"
44 #include "exec/ioport.h"
45 #include "sysemu/dma.h"
46 #include "exec/address-spaces.h"
47 #include "sysemu/xen-mapcache.h"
50 #include "exec/cpu-all.h"
51 #include "qemu/rcu_queue.h"
52 #include "qemu/main-loop.h"
53 #include "translate-all.h"
54 #include "sysemu/replay.h"
56 #include "exec/memory-internal.h"
57 #include "exec/ram_addr.h"
60 #include "qemu/range.h"
62 #include "qemu/mmap-alloc.h"
65 //#define DEBUG_SUBPAGE
67 #if !defined(CONFIG_USER_ONLY)
68 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
69 * are protected by the ramlist lock.
71 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
73 static MemoryRegion
*system_memory
;
74 static MemoryRegion
*system_io
;
76 AddressSpace address_space_io
;
77 AddressSpace address_space_memory
;
79 MemoryRegion io_mem_rom
, io_mem_notdirty
;
80 static MemoryRegion io_mem_unassigned
;
82 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
83 #define RAM_PREALLOC (1 << 0)
85 /* RAM is mmap-ed with MAP_SHARED */
86 #define RAM_SHARED (1 << 1)
88 /* Only a portion of RAM (used_length) is actually used, and migrated.
89 * This used_length size can change across reboots.
91 #define RAM_RESIZEABLE (1 << 2)
95 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
96 /* current CPU in the current thread. It is only valid inside
98 __thread CPUState
*current_cpu
;
99 /* 0 = Do not count executed instructions.
100 1 = Precise instruction counting.
101 2 = Adaptive rate instruction counting. */
104 #if !defined(CONFIG_USER_ONLY)
106 typedef struct PhysPageEntry PhysPageEntry
;
108 struct PhysPageEntry
{
109 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
111 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
115 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
117 /* Size of the L2 (and L3, etc) page tables. */
118 #define ADDR_SPACE_BITS 64
121 #define P_L2_SIZE (1 << P_L2_BITS)
123 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
125 typedef PhysPageEntry Node
[P_L2_SIZE
];
127 typedef struct PhysPageMap
{
130 unsigned sections_nb
;
131 unsigned sections_nb_alloc
;
133 unsigned nodes_nb_alloc
;
135 MemoryRegionSection
*sections
;
138 struct AddressSpaceDispatch
{
141 MemoryRegionSection
*mru_section
;
142 /* This is a multi-level map on the physical address space.
143 * The bottom level has pointers to MemoryRegionSections.
145 PhysPageEntry phys_map
;
150 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
151 typedef struct subpage_t
{
155 uint16_t sub_section
[TARGET_PAGE_SIZE
];
158 #define PHYS_SECTION_UNASSIGNED 0
159 #define PHYS_SECTION_NOTDIRTY 1
160 #define PHYS_SECTION_ROM 2
161 #define PHYS_SECTION_WATCH 3
163 static void io_mem_init(void);
164 static void memory_map_init(void);
165 static void tcg_commit(MemoryListener
*listener
);
167 static MemoryRegion io_mem_watch
;
170 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
171 * @cpu: the CPU whose AddressSpace this is
172 * @as: the AddressSpace itself
173 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
174 * @tcg_as_listener: listener for tracking changes to the AddressSpace
176 struct CPUAddressSpace
{
179 struct AddressSpaceDispatch
*memory_dispatch
;
180 MemoryListener tcg_as_listener
;
185 #if !defined(CONFIG_USER_ONLY)
187 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
189 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
190 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
191 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
192 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
196 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
203 ret
= map
->nodes_nb
++;
205 assert(ret
!= PHYS_MAP_NODE_NIL
);
206 assert(ret
!= map
->nodes_nb_alloc
);
208 e
.skip
= leaf
? 0 : 1;
209 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
210 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
211 memcpy(&p
[i
], &e
, sizeof(e
));
216 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
217 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
221 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
223 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
224 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
226 p
= map
->nodes
[lp
->ptr
];
227 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
229 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
230 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
236 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
242 static void phys_page_set(AddressSpaceDispatch
*d
,
243 hwaddr index
, hwaddr nb
,
246 /* Wildly overreserve - it doesn't matter much. */
247 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
249 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
252 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
253 * and update our entry so we can skip it and go directly to the destination.
255 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
257 unsigned valid_ptr
= P_L2_SIZE
;
262 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
267 for (i
= 0; i
< P_L2_SIZE
; i
++) {
268 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
275 phys_page_compact(&p
[i
], nodes
, compacted
);
279 /* We can only compress if there's only one child. */
284 assert(valid_ptr
< P_L2_SIZE
);
286 /* Don't compress if it won't fit in the # of bits we have. */
287 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
291 lp
->ptr
= p
[valid_ptr
].ptr
;
292 if (!p
[valid_ptr
].skip
) {
293 /* If our only child is a leaf, make this a leaf. */
294 /* By design, we should have made this node a leaf to begin with so we
295 * should never reach here.
296 * But since it's so simple to handle this, let's do it just in case we
301 lp
->skip
+= p
[valid_ptr
].skip
;
305 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
307 DECLARE_BITMAP(compacted
, nodes_nb
);
309 if (d
->phys_map
.skip
) {
310 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
314 static inline bool section_covers_addr(const MemoryRegionSection
*section
,
317 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
318 * the section must cover the entire address space.
320 return section
->size
.hi
||
321 range_covers_byte(section
->offset_within_address_space
,
322 section
->size
.lo
, addr
);
325 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
326 Node
*nodes
, MemoryRegionSection
*sections
)
329 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
332 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
333 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
334 return §ions
[PHYS_SECTION_UNASSIGNED
];
337 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
340 if (section_covers_addr(§ions
[lp
.ptr
], addr
)) {
341 return §ions
[lp
.ptr
];
343 return §ions
[PHYS_SECTION_UNASSIGNED
];
347 bool memory_region_is_unassigned(MemoryRegion
*mr
)
349 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
350 && mr
!= &io_mem_watch
;
353 /* Called from RCU critical section */
354 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
356 bool resolve_subpage
)
358 MemoryRegionSection
*section
= atomic_read(&d
->mru_section
);
362 if (section
&& section
!= &d
->map
.sections
[PHYS_SECTION_UNASSIGNED
] &&
363 section_covers_addr(section
, addr
)) {
366 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
,
370 if (resolve_subpage
&& section
->mr
->subpage
) {
371 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
372 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
375 atomic_set(&d
->mru_section
, section
);
380 /* Called from RCU critical section */
381 static MemoryRegionSection
*
382 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
383 hwaddr
*plen
, bool resolve_subpage
)
385 MemoryRegionSection
*section
;
389 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
390 /* Compute offset within MemoryRegionSection */
391 addr
-= section
->offset_within_address_space
;
393 /* Compute offset within MemoryRegion */
394 *xlat
= addr
+ section
->offset_within_region
;
398 /* MMIO registers can be expected to perform full-width accesses based only
399 * on their address, without considering adjacent registers that could
400 * decode to completely different MemoryRegions. When such registers
401 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
402 * regions overlap wildly. For this reason we cannot clamp the accesses
405 * If the length is small (as is the case for address_space_ldl/stl),
406 * everything works fine. If the incoming length is large, however,
407 * the caller really has to do the clamping through memory_access_size.
409 if (memory_region_is_ram(mr
)) {
410 diff
= int128_sub(section
->size
, int128_make64(addr
));
411 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
416 /* Called from RCU critical section */
417 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
418 hwaddr
*xlat
, hwaddr
*plen
,
422 MemoryRegionSection
*section
;
426 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
427 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
430 if (!mr
->iommu_ops
) {
434 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
435 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
436 | (addr
& iotlb
.addr_mask
));
437 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
438 if (!(iotlb
.perm
& (1 << is_write
))) {
439 mr
= &io_mem_unassigned
;
443 as
= iotlb
.target_as
;
446 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
447 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
448 *plen
= MIN(page
, *plen
);
455 /* Called from RCU critical section */
456 MemoryRegionSection
*
457 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
458 hwaddr
*xlat
, hwaddr
*plen
)
460 MemoryRegionSection
*section
;
461 AddressSpaceDispatch
*d
= cpu
->cpu_ases
[asidx
].memory_dispatch
;
463 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
465 assert(!section
->mr
->iommu_ops
);
470 #if !defined(CONFIG_USER_ONLY)
472 static int cpu_common_post_load(void *opaque
, int version_id
)
474 CPUState
*cpu
= opaque
;
476 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
477 version_id is increased. */
478 cpu
->interrupt_request
&= ~0x01;
484 static int cpu_common_pre_load(void *opaque
)
486 CPUState
*cpu
= opaque
;
488 cpu
->exception_index
= -1;
493 static bool cpu_common_exception_index_needed(void *opaque
)
495 CPUState
*cpu
= opaque
;
497 return tcg_enabled() && cpu
->exception_index
!= -1;
500 static const VMStateDescription vmstate_cpu_common_exception_index
= {
501 .name
= "cpu_common/exception_index",
503 .minimum_version_id
= 1,
504 .needed
= cpu_common_exception_index_needed
,
505 .fields
= (VMStateField
[]) {
506 VMSTATE_INT32(exception_index
, CPUState
),
507 VMSTATE_END_OF_LIST()
511 static bool cpu_common_crash_occurred_needed(void *opaque
)
513 CPUState
*cpu
= opaque
;
515 return cpu
->crash_occurred
;
518 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
519 .name
= "cpu_common/crash_occurred",
521 .minimum_version_id
= 1,
522 .needed
= cpu_common_crash_occurred_needed
,
523 .fields
= (VMStateField
[]) {
524 VMSTATE_BOOL(crash_occurred
, CPUState
),
525 VMSTATE_END_OF_LIST()
529 const VMStateDescription vmstate_cpu_common
= {
530 .name
= "cpu_common",
532 .minimum_version_id
= 1,
533 .pre_load
= cpu_common_pre_load
,
534 .post_load
= cpu_common_post_load
,
535 .fields
= (VMStateField
[]) {
536 VMSTATE_UINT32(halted
, CPUState
),
537 VMSTATE_UINT32(interrupt_request
, CPUState
),
538 VMSTATE_END_OF_LIST()
540 .subsections
= (const VMStateDescription
*[]) {
541 &vmstate_cpu_common_exception_index
,
542 &vmstate_cpu_common_crash_occurred
,
549 CPUState
*qemu_get_cpu(int index
)
554 if (cpu
->cpu_index
== index
) {
562 #if !defined(CONFIG_USER_ONLY)
563 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
565 CPUAddressSpace
*newas
;
567 /* Target code should have set num_ases before calling us */
568 assert(asidx
< cpu
->num_ases
);
571 /* address space 0 gets the convenience alias */
575 /* KVM cannot currently support multiple address spaces. */
576 assert(asidx
== 0 || !kvm_enabled());
578 if (!cpu
->cpu_ases
) {
579 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
582 newas
= &cpu
->cpu_ases
[asidx
];
586 newas
->tcg_as_listener
.commit
= tcg_commit
;
587 memory_listener_register(&newas
->tcg_as_listener
, as
);
591 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
593 /* Return the AddressSpace corresponding to the specified index */
594 return cpu
->cpu_ases
[asidx
].as
;
598 #ifndef CONFIG_USER_ONLY
599 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
601 static int cpu_get_free_index(Error
**errp
)
603 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
605 if (cpu
>= MAX_CPUMASK_BITS
) {
606 error_setg(errp
, "Trying to use more CPUs than max of %d",
611 bitmap_set(cpu_index_map
, cpu
, 1);
615 void cpu_exec_exit(CPUState
*cpu
)
617 if (cpu
->cpu_index
== -1) {
618 /* cpu_index was never allocated by this @cpu or was already freed. */
622 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
627 static int cpu_get_free_index(Error
**errp
)
632 CPU_FOREACH(some_cpu
) {
638 void cpu_exec_exit(CPUState
*cpu
)
643 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
645 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
646 Error
*local_err
= NULL
;
651 #ifndef CONFIG_USER_ONLY
652 cpu
->thread_id
= qemu_get_thread_id();
654 /* This is a softmmu CPU object, so create a property for it
655 * so users can wire up its memory. (This can't go in qom/cpu.c
656 * because that file is compiled only once for both user-mode
657 * and system builds.) The default if no link is set up is to use
658 * the system address space.
660 object_property_add_link(OBJECT(cpu
), "memory", TYPE_MEMORY_REGION
,
661 (Object
**)&cpu
->memory
,
662 qdev_prop_allow_set_link_before_realize
,
663 OBJ_PROP_LINK_UNREF_ON_RELEASE
,
665 cpu
->memory
= system_memory
;
666 object_ref(OBJECT(cpu
->memory
));
669 #if defined(CONFIG_USER_ONLY)
672 cpu
->cpu_index
= cpu_get_free_index(&local_err
);
674 error_propagate(errp
, local_err
);
675 #if defined(CONFIG_USER_ONLY)
680 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
681 #if defined(CONFIG_USER_ONLY)
685 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
686 vmstate_register(NULL
, cpu
->cpu_index
, &vmstate_cpu_common
, cpu
);
688 if (cc
->vmsd
!= NULL
) {
689 vmstate_register(NULL
, cpu
->cpu_index
, cc
->vmsd
, cpu
);
694 #if defined(CONFIG_USER_ONLY)
695 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
697 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
700 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
703 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
704 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
706 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
707 phys
| (pc
& ~TARGET_PAGE_MASK
));
712 #if defined(CONFIG_USER_ONLY)
713 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
718 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
724 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
728 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
729 int flags
, CPUWatchpoint
**watchpoint
)
734 /* Add a watchpoint. */
735 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
736 int flags
, CPUWatchpoint
**watchpoint
)
740 /* forbid ranges which are empty or run off the end of the address space */
741 if (len
== 0 || (addr
+ len
- 1) < addr
) {
742 error_report("tried to set invalid watchpoint at %"
743 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
746 wp
= g_malloc(sizeof(*wp
));
752 /* keep all GDB-injected watchpoints in front */
753 if (flags
& BP_GDB
) {
754 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
756 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
759 tlb_flush_page(cpu
, addr
);
766 /* Remove a specific watchpoint. */
767 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
772 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
773 if (addr
== wp
->vaddr
&& len
== wp
->len
774 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
775 cpu_watchpoint_remove_by_ref(cpu
, wp
);
782 /* Remove a specific watchpoint by reference. */
783 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
785 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
787 tlb_flush_page(cpu
, watchpoint
->vaddr
);
792 /* Remove all matching watchpoints. */
793 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
795 CPUWatchpoint
*wp
, *next
;
797 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
798 if (wp
->flags
& mask
) {
799 cpu_watchpoint_remove_by_ref(cpu
, wp
);
804 /* Return true if this watchpoint address matches the specified
805 * access (ie the address range covered by the watchpoint overlaps
806 * partially or completely with the address range covered by the
809 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
813 /* We know the lengths are non-zero, but a little caution is
814 * required to avoid errors in the case where the range ends
815 * exactly at the top of the address space and so addr + len
816 * wraps round to zero.
818 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
819 vaddr addrend
= addr
+ len
- 1;
821 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
826 /* Add a breakpoint. */
827 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
828 CPUBreakpoint
**breakpoint
)
832 bp
= g_malloc(sizeof(*bp
));
837 /* keep all GDB-injected breakpoints in front */
838 if (flags
& BP_GDB
) {
839 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
841 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
844 breakpoint_invalidate(cpu
, pc
);
852 /* Remove a specific breakpoint. */
853 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
857 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
858 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
859 cpu_breakpoint_remove_by_ref(cpu
, bp
);
866 /* Remove a specific breakpoint by reference. */
867 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
869 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
871 breakpoint_invalidate(cpu
, breakpoint
->pc
);
876 /* Remove all matching breakpoints. */
877 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
879 CPUBreakpoint
*bp
, *next
;
881 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
882 if (bp
->flags
& mask
) {
883 cpu_breakpoint_remove_by_ref(cpu
, bp
);
888 /* enable or disable single step mode. EXCP_DEBUG is returned by the
889 CPU loop after each instruction */
890 void cpu_single_step(CPUState
*cpu
, int enabled
)
892 if (cpu
->singlestep_enabled
!= enabled
) {
893 cpu
->singlestep_enabled
= enabled
;
895 kvm_update_guest_debug(cpu
, 0);
897 /* must flush all the translated code to avoid inconsistencies */
898 /* XXX: only flush what is necessary */
904 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
911 fprintf(stderr
, "qemu: fatal: ");
912 vfprintf(stderr
, fmt
, ap
);
913 fprintf(stderr
, "\n");
914 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
915 if (qemu_log_separate()) {
916 qemu_log("qemu: fatal: ");
917 qemu_log_vprintf(fmt
, ap2
);
919 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
926 #if defined(CONFIG_USER_ONLY)
928 struct sigaction act
;
929 sigfillset(&act
.sa_mask
);
930 act
.sa_handler
= SIG_DFL
;
931 sigaction(SIGABRT
, &act
, NULL
);
937 #if !defined(CONFIG_USER_ONLY)
938 /* Called from RCU critical section */
939 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
943 block
= atomic_rcu_read(&ram_list
.mru_block
);
944 if (block
&& addr
- block
->offset
< block
->max_length
) {
947 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
948 if (addr
- block
->offset
< block
->max_length
) {
953 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
957 /* It is safe to write mru_block outside the iothread lock. This
962 * xxx removed from list
966 * call_rcu(reclaim_ramblock, xxx);
969 * atomic_rcu_set is not needed here. The block was already published
970 * when it was placed into the list. Here we're just making an extra
971 * copy of the pointer.
973 ram_list
.mru_block
= block
;
977 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
984 end
= TARGET_PAGE_ALIGN(start
+ length
);
985 start
&= TARGET_PAGE_MASK
;
988 block
= qemu_get_ram_block(start
);
989 assert(block
== qemu_get_ram_block(end
- 1));
990 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
992 tlb_reset_dirty(cpu
, start1
, length
);
997 /* Note: start and end must be within the same ram block. */
998 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
1002 DirtyMemoryBlocks
*blocks
;
1003 unsigned long end
, page
;
1010 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
1011 page
= start
>> TARGET_PAGE_BITS
;
1015 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
1017 while (page
< end
) {
1018 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
1019 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
1020 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
1022 dirty
|= bitmap_test_and_clear_atomic(blocks
->blocks
[idx
],
1029 if (dirty
&& tcg_enabled()) {
1030 tlb_reset_dirty_range_all(start
, length
);
1036 /* Called from RCU critical section */
1037 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1038 MemoryRegionSection
*section
,
1040 hwaddr paddr
, hwaddr xlat
,
1042 target_ulong
*address
)
1047 if (memory_region_is_ram(section
->mr
)) {
1049 iotlb
= memory_region_get_ram_addr(section
->mr
) + xlat
;
1050 if (!section
->readonly
) {
1051 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1053 iotlb
|= PHYS_SECTION_ROM
;
1056 AddressSpaceDispatch
*d
;
1058 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1059 iotlb
= section
- d
->map
.sections
;
1063 /* Make accesses to pages with watchpoints go via the
1064 watchpoint trap routines. */
1065 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1066 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1067 /* Avoid trapping reads of pages with a write breakpoint. */
1068 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1069 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1070 *address
|= TLB_MMIO
;
1078 #endif /* defined(CONFIG_USER_ONLY) */
1080 #if !defined(CONFIG_USER_ONLY)
1082 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1084 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1086 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1087 qemu_anon_ram_alloc
;
1090 * Set a custom physical guest memory alloator.
1091 * Accelerators with unusual needs may need this. Hopefully, we can
1092 * get rid of it eventually.
1094 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1096 phys_mem_alloc
= alloc
;
1099 static uint16_t phys_section_add(PhysPageMap
*map
,
1100 MemoryRegionSection
*section
)
1102 /* The physical section number is ORed with a page-aligned
1103 * pointer to produce the iotlb entries. Thus it should
1104 * never overflow into the page-aligned value.
1106 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1108 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1109 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1110 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1111 map
->sections_nb_alloc
);
1113 map
->sections
[map
->sections_nb
] = *section
;
1114 memory_region_ref(section
->mr
);
1115 return map
->sections_nb
++;
1118 static void phys_section_destroy(MemoryRegion
*mr
)
1120 bool have_sub_page
= mr
->subpage
;
1122 memory_region_unref(mr
);
1124 if (have_sub_page
) {
1125 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1126 object_unref(OBJECT(&subpage
->iomem
));
1131 static void phys_sections_free(PhysPageMap
*map
)
1133 while (map
->sections_nb
> 0) {
1134 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1135 phys_section_destroy(section
->mr
);
1137 g_free(map
->sections
);
1141 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1144 hwaddr base
= section
->offset_within_address_space
1146 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1147 d
->map
.nodes
, d
->map
.sections
);
1148 MemoryRegionSection subsection
= {
1149 .offset_within_address_space
= base
,
1150 .size
= int128_make64(TARGET_PAGE_SIZE
),
1154 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1156 if (!(existing
->mr
->subpage
)) {
1157 subpage
= subpage_init(d
->as
, base
);
1158 subsection
.address_space
= d
->as
;
1159 subsection
.mr
= &subpage
->iomem
;
1160 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1161 phys_section_add(&d
->map
, &subsection
));
1163 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1165 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1166 end
= start
+ int128_get64(section
->size
) - 1;
1167 subpage_register(subpage
, start
, end
,
1168 phys_section_add(&d
->map
, section
));
1172 static void register_multipage(AddressSpaceDispatch
*d
,
1173 MemoryRegionSection
*section
)
1175 hwaddr start_addr
= section
->offset_within_address_space
;
1176 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1177 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1181 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1184 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1186 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1187 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1188 MemoryRegionSection now
= *section
, remain
= *section
;
1189 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1191 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1192 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1193 - now
.offset_within_address_space
;
1195 now
.size
= int128_min(int128_make64(left
), now
.size
);
1196 register_subpage(d
, &now
);
1198 now
.size
= int128_zero();
1200 while (int128_ne(remain
.size
, now
.size
)) {
1201 remain
.size
= int128_sub(remain
.size
, now
.size
);
1202 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1203 remain
.offset_within_region
+= int128_get64(now
.size
);
1205 if (int128_lt(remain
.size
, page_size
)) {
1206 register_subpage(d
, &now
);
1207 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1208 now
.size
= page_size
;
1209 register_subpage(d
, &now
);
1211 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1212 register_multipage(d
, &now
);
1217 void qemu_flush_coalesced_mmio_buffer(void)
1220 kvm_flush_coalesced_mmio_buffer();
1223 void qemu_mutex_lock_ramlist(void)
1225 qemu_mutex_lock(&ram_list
.mutex
);
1228 void qemu_mutex_unlock_ramlist(void)
1230 qemu_mutex_unlock(&ram_list
.mutex
);
1234 static void *file_ram_alloc(RAMBlock
*block
,
1239 bool unlink_on_error
= false;
1241 char *sanitized_name
;
1247 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1249 "host lacks kvm mmu notifiers, -mem-path unsupported");
1254 fd
= open(path
, O_RDWR
);
1256 /* @path names an existing file, use it */
1259 if (errno
== ENOENT
) {
1260 /* @path names a file that doesn't exist, create it */
1261 fd
= open(path
, O_RDWR
| O_CREAT
| O_EXCL
, 0644);
1263 unlink_on_error
= true;
1266 } else if (errno
== EISDIR
) {
1267 /* @path names a directory, create a file there */
1268 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1269 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1270 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1276 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1278 g_free(sanitized_name
);
1280 fd
= mkstemp(filename
);
1288 if (errno
!= EEXIST
&& errno
!= EINTR
) {
1289 error_setg_errno(errp
, errno
,
1290 "can't open backing store %s for guest RAM",
1295 * Try again on EINTR and EEXIST. The latter happens when
1296 * something else creates the file between our two open().
1300 page_size
= qemu_fd_getpagesize(fd
);
1301 block
->mr
->align
= MAX(page_size
, QEMU_VMALLOC_ALIGN
);
1303 if (memory
< page_size
) {
1304 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1305 "or larger than page size 0x%" PRIx64
,
1310 memory
= ROUND_UP(memory
, page_size
);
1313 * ftruncate is not supported by hugetlbfs in older
1314 * hosts, so don't bother bailing out on errors.
1315 * If anything goes wrong with it under other filesystems,
1318 if (ftruncate(fd
, memory
)) {
1319 perror("ftruncate");
1322 area
= qemu_ram_mmap(fd
, memory
, block
->mr
->align
,
1323 block
->flags
& RAM_SHARED
);
1324 if (area
== MAP_FAILED
) {
1325 error_setg_errno(errp
, errno
,
1326 "unable to map backing store for guest RAM");
1331 os_mem_prealloc(fd
, area
, memory
);
1338 if (unlink_on_error
) {
1348 /* Called with the ramlist lock held. */
1349 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1351 RAMBlock
*block
, *next_block
;
1352 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1354 assert(size
!= 0); /* it would hand out same offset multiple times */
1356 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1360 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1361 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1363 end
= block
->offset
+ block
->max_length
;
1365 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1366 if (next_block
->offset
>= end
) {
1367 next
= MIN(next
, next_block
->offset
);
1370 if (next
- end
>= size
&& next
- end
< mingap
) {
1372 mingap
= next
- end
;
1376 if (offset
== RAM_ADDR_MAX
) {
1377 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1385 ram_addr_t
last_ram_offset(void)
1388 ram_addr_t last
= 0;
1391 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1392 last
= MAX(last
, block
->offset
+ block
->max_length
);
1398 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1402 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1403 if (!machine_dump_guest_core(current_machine
)) {
1404 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1406 perror("qemu_madvise");
1407 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1408 "but dump_guest_core=off specified\n");
1413 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1418 /* Called with iothread lock held. */
1419 void qemu_ram_set_idstr(RAMBlock
*new_block
, const char *name
, DeviceState
*dev
)
1424 assert(!new_block
->idstr
[0]);
1427 char *id
= qdev_get_dev_path(dev
);
1429 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1433 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1436 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1437 if (block
!= new_block
&&
1438 !strcmp(block
->idstr
, new_block
->idstr
)) {
1439 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1447 /* Called with iothread lock held. */
1448 void qemu_ram_unset_idstr(RAMBlock
*block
)
1450 /* FIXME: arch_init.c assumes that this is not called throughout
1451 * migration. Ignore the problem since hot-unplug during migration
1452 * does not work anyway.
1455 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1459 static int memory_try_enable_merging(void *addr
, size_t len
)
1461 if (!machine_mem_merge(current_machine
)) {
1462 /* disabled by the user */
1466 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1469 /* Only legal before guest might have detected the memory size: e.g. on
1470 * incoming migration, or right after reset.
1472 * As memory core doesn't know how is memory accessed, it is up to
1473 * resize callback to update device state and/or add assertions to detect
1474 * misuse, if necessary.
1476 int qemu_ram_resize(RAMBlock
*block
, ram_addr_t newsize
, Error
**errp
)
1480 newsize
= HOST_PAGE_ALIGN(newsize
);
1482 if (block
->used_length
== newsize
) {
1486 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1487 error_setg_errno(errp
, EINVAL
,
1488 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1489 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1490 newsize
, block
->used_length
);
1494 if (block
->max_length
< newsize
) {
1495 error_setg_errno(errp
, EINVAL
,
1496 "Length too large: %s: 0x" RAM_ADDR_FMT
1497 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1498 newsize
, block
->max_length
);
1502 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1503 block
->used_length
= newsize
;
1504 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1506 memory_region_set_size(block
->mr
, newsize
);
1507 if (block
->resized
) {
1508 block
->resized(block
->idstr
, newsize
, block
->host
);
1513 /* Called with ram_list.mutex held */
1514 static void dirty_memory_extend(ram_addr_t old_ram_size
,
1515 ram_addr_t new_ram_size
)
1517 ram_addr_t old_num_blocks
= DIV_ROUND_UP(old_ram_size
,
1518 DIRTY_MEMORY_BLOCK_SIZE
);
1519 ram_addr_t new_num_blocks
= DIV_ROUND_UP(new_ram_size
,
1520 DIRTY_MEMORY_BLOCK_SIZE
);
1523 /* Only need to extend if block count increased */
1524 if (new_num_blocks
<= old_num_blocks
) {
1528 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1529 DirtyMemoryBlocks
*old_blocks
;
1530 DirtyMemoryBlocks
*new_blocks
;
1533 old_blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
1534 new_blocks
= g_malloc(sizeof(*new_blocks
) +
1535 sizeof(new_blocks
->blocks
[0]) * new_num_blocks
);
1537 if (old_num_blocks
) {
1538 memcpy(new_blocks
->blocks
, old_blocks
->blocks
,
1539 old_num_blocks
* sizeof(old_blocks
->blocks
[0]));
1542 for (j
= old_num_blocks
; j
< new_num_blocks
; j
++) {
1543 new_blocks
->blocks
[j
] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE
);
1546 atomic_rcu_set(&ram_list
.dirty_memory
[i
], new_blocks
);
1549 g_free_rcu(old_blocks
, rcu
);
1554 static void ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1557 RAMBlock
*last_block
= NULL
;
1558 ram_addr_t old_ram_size
, new_ram_size
;
1561 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1563 qemu_mutex_lock_ramlist();
1564 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1566 if (!new_block
->host
) {
1567 if (xen_enabled()) {
1568 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1569 new_block
->mr
, &err
);
1571 error_propagate(errp
, err
);
1572 qemu_mutex_unlock_ramlist();
1576 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1577 &new_block
->mr
->align
);
1578 if (!new_block
->host
) {
1579 error_setg_errno(errp
, errno
,
1580 "cannot set up guest memory '%s'",
1581 memory_region_name(new_block
->mr
));
1582 qemu_mutex_unlock_ramlist();
1585 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1589 new_ram_size
= MAX(old_ram_size
,
1590 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1591 if (new_ram_size
> old_ram_size
) {
1592 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1593 dirty_memory_extend(old_ram_size
, new_ram_size
);
1595 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1596 * QLIST (which has an RCU-friendly variant) does not have insertion at
1597 * tail, so save the last element in last_block.
1599 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1601 if (block
->max_length
< new_block
->max_length
) {
1606 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1607 } else if (last_block
) {
1608 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1609 } else { /* list is empty */
1610 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1612 ram_list
.mru_block
= NULL
;
1614 /* Write list before version */
1617 qemu_mutex_unlock_ramlist();
1619 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1620 new_block
->used_length
,
1623 if (new_block
->host
) {
1624 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1625 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1626 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1627 if (kvm_enabled()) {
1628 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1634 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1635 bool share
, const char *mem_path
,
1638 RAMBlock
*new_block
;
1639 Error
*local_err
= NULL
;
1641 if (xen_enabled()) {
1642 error_setg(errp
, "-mem-path not supported with Xen");
1646 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1648 * file_ram_alloc() needs to allocate just like
1649 * phys_mem_alloc, but we haven't bothered to provide
1653 "-mem-path not supported with this accelerator");
1657 size
= HOST_PAGE_ALIGN(size
);
1658 new_block
= g_malloc0(sizeof(*new_block
));
1660 new_block
->used_length
= size
;
1661 new_block
->max_length
= size
;
1662 new_block
->flags
= share
? RAM_SHARED
: 0;
1663 new_block
->host
= file_ram_alloc(new_block
, size
,
1665 if (!new_block
->host
) {
1670 ram_block_add(new_block
, &local_err
);
1673 error_propagate(errp
, local_err
);
1681 RAMBlock
*qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1682 void (*resized
)(const char*,
1685 void *host
, bool resizeable
,
1686 MemoryRegion
*mr
, Error
**errp
)
1688 RAMBlock
*new_block
;
1689 Error
*local_err
= NULL
;
1691 size
= HOST_PAGE_ALIGN(size
);
1692 max_size
= HOST_PAGE_ALIGN(max_size
);
1693 new_block
= g_malloc0(sizeof(*new_block
));
1695 new_block
->resized
= resized
;
1696 new_block
->used_length
= size
;
1697 new_block
->max_length
= max_size
;
1698 assert(max_size
>= size
);
1700 new_block
->host
= host
;
1702 new_block
->flags
|= RAM_PREALLOC
;
1705 new_block
->flags
|= RAM_RESIZEABLE
;
1707 ram_block_add(new_block
, &local_err
);
1710 error_propagate(errp
, local_err
);
1716 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1717 MemoryRegion
*mr
, Error
**errp
)
1719 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1722 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1724 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1727 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1728 void (*resized
)(const char*,
1731 MemoryRegion
*mr
, Error
**errp
)
1733 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1736 static void reclaim_ramblock(RAMBlock
*block
)
1738 if (block
->flags
& RAM_PREALLOC
) {
1740 } else if (xen_enabled()) {
1741 xen_invalidate_map_cache_entry(block
->host
);
1743 } else if (block
->fd
>= 0) {
1744 qemu_ram_munmap(block
->host
, block
->max_length
);
1748 qemu_anon_ram_free(block
->host
, block
->max_length
);
1753 void qemu_ram_free(RAMBlock
*block
)
1759 qemu_mutex_lock_ramlist();
1760 QLIST_REMOVE_RCU(block
, next
);
1761 ram_list
.mru_block
= NULL
;
1762 /* Write list before version */
1765 call_rcu(block
, reclaim_ramblock
, rcu
);
1766 qemu_mutex_unlock_ramlist();
1770 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1777 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1778 offset
= addr
- block
->offset
;
1779 if (offset
< block
->max_length
) {
1780 vaddr
= ramblock_ptr(block
, offset
);
1781 if (block
->flags
& RAM_PREALLOC
) {
1783 } else if (xen_enabled()) {
1787 if (block
->fd
>= 0) {
1788 flags
|= (block
->flags
& RAM_SHARED
?
1789 MAP_SHARED
: MAP_PRIVATE
);
1790 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1791 flags
, block
->fd
, offset
);
1794 * Remap needs to match alloc. Accelerators that
1795 * set phys_mem_alloc never remap. If they did,
1796 * we'd need a remap hook here.
1798 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1800 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1801 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1804 if (area
!= vaddr
) {
1805 fprintf(stderr
, "Could not remap addr: "
1806 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1810 memory_try_enable_merging(vaddr
, length
);
1811 qemu_ram_setup_dump(vaddr
, length
);
1816 #endif /* !_WIN32 */
1818 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1819 * This should not be used for general purpose DMA. Use address_space_map
1820 * or address_space_rw instead. For local memory (e.g. video ram) that the
1821 * device owns, use memory_region_get_ram_ptr.
1823 * Called within RCU critical section.
1825 void *qemu_get_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
)
1827 RAMBlock
*block
= ram_block
;
1829 if (block
== NULL
) {
1830 block
= qemu_get_ram_block(addr
);
1833 if (xen_enabled() && block
->host
== NULL
) {
1834 /* We need to check if the requested address is in the RAM
1835 * because we don't want to map the entire memory in QEMU.
1836 * In that case just map until the end of the page.
1838 if (block
->offset
== 0) {
1839 return xen_map_cache(addr
, 0, 0);
1842 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1844 return ramblock_ptr(block
, addr
- block
->offset
);
1847 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1848 * but takes a size argument.
1850 * Called within RCU critical section.
1852 static void *qemu_ram_ptr_length(RAMBlock
*ram_block
, ram_addr_t addr
,
1855 RAMBlock
*block
= ram_block
;
1856 ram_addr_t offset_inside_block
;
1861 if (block
== NULL
) {
1862 block
= qemu_get_ram_block(addr
);
1864 offset_inside_block
= addr
- block
->offset
;
1865 *size
= MIN(*size
, block
->max_length
- offset_inside_block
);
1867 if (xen_enabled() && block
->host
== NULL
) {
1868 /* We need to check if the requested address is in the RAM
1869 * because we don't want to map the entire memory in QEMU.
1870 * In that case just map the requested area.
1872 if (block
->offset
== 0) {
1873 return xen_map_cache(addr
, *size
, 1);
1876 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1879 return ramblock_ptr(block
, offset_inside_block
);
1883 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1886 * ptr: Host pointer to look up
1887 * round_offset: If true round the result offset down to a page boundary
1888 * *ram_addr: set to result ram_addr
1889 * *offset: set to result offset within the RAMBlock
1891 * Returns: RAMBlock (or NULL if not found)
1893 * By the time this function returns, the returned pointer is not protected
1894 * by RCU anymore. If the caller is not within an RCU critical section and
1895 * does not hold the iothread lock, it must have other means of protecting the
1896 * pointer, such as a reference to the region that includes the incoming
1899 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1900 ram_addr_t
*ram_addr
,
1904 uint8_t *host
= ptr
;
1906 if (xen_enabled()) {
1908 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1909 block
= qemu_get_ram_block(*ram_addr
);
1911 *offset
= (host
- block
->host
);
1918 block
= atomic_rcu_read(&ram_list
.mru_block
);
1919 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1923 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1924 /* This case append when the block is not mapped. */
1925 if (block
->host
== NULL
) {
1928 if (host
- block
->host
< block
->max_length
) {
1937 *offset
= (host
- block
->host
);
1939 *offset
&= TARGET_PAGE_MASK
;
1941 *ram_addr
= block
->offset
+ *offset
;
1947 * Finds the named RAMBlock
1949 * name: The name of RAMBlock to find
1951 * Returns: RAMBlock (or NULL if not found)
1953 RAMBlock
*qemu_ram_block_by_name(const char *name
)
1957 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1958 if (!strcmp(name
, block
->idstr
)) {
1966 /* Some of the softmmu routines need to translate from a host pointer
1967 (typically a TLB entry) back to a ram offset. */
1968 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1971 ram_addr_t offset
; /* Not used */
1973 block
= qemu_ram_block_from_host(ptr
, false, ram_addr
, &offset
);
1982 /* Called within RCU critical section. */
1983 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1984 uint64_t val
, unsigned size
)
1986 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1987 tb_invalidate_phys_page_fast(ram_addr
, size
);
1991 stb_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
1994 stw_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
1997 stl_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
2002 /* Set both VGA and migration bits for simplicity and to remove
2003 * the notdirty callback faster.
2005 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
2006 DIRTY_CLIENTS_NOCODE
);
2007 /* we remove the notdirty callback only if the code has been
2009 if (!cpu_physical_memory_is_clean(ram_addr
)) {
2010 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
2014 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2015 unsigned size
, bool is_write
)
2020 static const MemoryRegionOps notdirty_mem_ops
= {
2021 .write
= notdirty_mem_write
,
2022 .valid
.accepts
= notdirty_mem_accepts
,
2023 .endianness
= DEVICE_NATIVE_ENDIAN
,
2026 /* Generate a debug exception if a watchpoint has been hit. */
2027 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2029 CPUState
*cpu
= current_cpu
;
2030 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
2031 CPUArchState
*env
= cpu
->env_ptr
;
2032 target_ulong pc
, cs_base
;
2037 if (cpu
->watchpoint_hit
) {
2038 /* We re-entered the check after replacing the TB. Now raise
2039 * the debug interrupt so that is will trigger after the
2040 * current instruction. */
2041 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2044 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2045 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2046 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2047 && (wp
->flags
& flags
)) {
2048 if (flags
== BP_MEM_READ
) {
2049 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2051 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2053 wp
->hitaddr
= vaddr
;
2054 wp
->hitattrs
= attrs
;
2055 if (!cpu
->watchpoint_hit
) {
2056 if (wp
->flags
& BP_CPU
&&
2057 !cc
->debug_check_watchpoint(cpu
, wp
)) {
2058 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2061 cpu
->watchpoint_hit
= wp
;
2062 tb_check_watchpoint(cpu
);
2063 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2064 cpu
->exception_index
= EXCP_DEBUG
;
2067 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2068 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2069 cpu_resume_from_signal(cpu
, NULL
);
2073 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2078 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2079 so these check for a hit then pass through to the normal out-of-line
2081 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2082 unsigned size
, MemTxAttrs attrs
)
2086 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2087 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2089 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2092 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2095 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2098 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2106 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2107 uint64_t val
, unsigned size
,
2111 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2112 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2114 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2117 address_space_stb(as
, addr
, val
, attrs
, &res
);
2120 address_space_stw(as
, addr
, val
, attrs
, &res
);
2123 address_space_stl(as
, addr
, val
, attrs
, &res
);
2130 static const MemoryRegionOps watch_mem_ops
= {
2131 .read_with_attrs
= watch_mem_read
,
2132 .write_with_attrs
= watch_mem_write
,
2133 .endianness
= DEVICE_NATIVE_ENDIAN
,
2136 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2137 unsigned len
, MemTxAttrs attrs
)
2139 subpage_t
*subpage
= opaque
;
2143 #if defined(DEBUG_SUBPAGE)
2144 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2145 subpage
, len
, addr
);
2147 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2154 *data
= ldub_p(buf
);
2157 *data
= lduw_p(buf
);
2170 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2171 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2173 subpage_t
*subpage
= opaque
;
2176 #if defined(DEBUG_SUBPAGE)
2177 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2178 " value %"PRIx64
"\n",
2179 __func__
, subpage
, len
, addr
, value
);
2197 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2201 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2202 unsigned len
, bool is_write
)
2204 subpage_t
*subpage
= opaque
;
2205 #if defined(DEBUG_SUBPAGE)
2206 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2207 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2210 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2214 static const MemoryRegionOps subpage_ops
= {
2215 .read_with_attrs
= subpage_read
,
2216 .write_with_attrs
= subpage_write
,
2217 .impl
.min_access_size
= 1,
2218 .impl
.max_access_size
= 8,
2219 .valid
.min_access_size
= 1,
2220 .valid
.max_access_size
= 8,
2221 .valid
.accepts
= subpage_accepts
,
2222 .endianness
= DEVICE_NATIVE_ENDIAN
,
2225 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2230 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2232 idx
= SUBPAGE_IDX(start
);
2233 eidx
= SUBPAGE_IDX(end
);
2234 #if defined(DEBUG_SUBPAGE)
2235 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2236 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2238 for (; idx
<= eidx
; idx
++) {
2239 mmio
->sub_section
[idx
] = section
;
2245 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2249 mmio
= g_malloc0(sizeof(subpage_t
));
2253 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2254 NULL
, TARGET_PAGE_SIZE
);
2255 mmio
->iomem
.subpage
= true;
2256 #if defined(DEBUG_SUBPAGE)
2257 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2258 mmio
, base
, TARGET_PAGE_SIZE
);
2260 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2265 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2269 MemoryRegionSection section
= {
2270 .address_space
= as
,
2272 .offset_within_address_space
= 0,
2273 .offset_within_region
= 0,
2274 .size
= int128_2_64(),
2277 return phys_section_add(map
, §ion
);
2280 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2282 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2283 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2284 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2285 MemoryRegionSection
*sections
= d
->map
.sections
;
2287 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2290 static void io_mem_init(void)
2292 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2293 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2295 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2297 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2301 static void mem_begin(MemoryListener
*listener
)
2303 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2304 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2307 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2308 assert(n
== PHYS_SECTION_UNASSIGNED
);
2309 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2310 assert(n
== PHYS_SECTION_NOTDIRTY
);
2311 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2312 assert(n
== PHYS_SECTION_ROM
);
2313 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2314 assert(n
== PHYS_SECTION_WATCH
);
2316 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2318 as
->next_dispatch
= d
;
2321 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2323 phys_sections_free(&d
->map
);
2327 static void mem_commit(MemoryListener
*listener
)
2329 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2330 AddressSpaceDispatch
*cur
= as
->dispatch
;
2331 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2333 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2335 atomic_rcu_set(&as
->dispatch
, next
);
2337 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2341 static void tcg_commit(MemoryListener
*listener
)
2343 CPUAddressSpace
*cpuas
;
2344 AddressSpaceDispatch
*d
;
2346 /* since each CPU stores ram addresses in its TLB cache, we must
2347 reset the modified entries */
2348 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2349 cpu_reloading_memory_map();
2350 /* The CPU and TLB are protected by the iothread lock.
2351 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2352 * may have split the RCU critical section.
2354 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2355 cpuas
->memory_dispatch
= d
;
2356 tlb_flush(cpuas
->cpu
, 1);
2359 void address_space_init_dispatch(AddressSpace
*as
)
2361 as
->dispatch
= NULL
;
2362 as
->dispatch_listener
= (MemoryListener
) {
2364 .commit
= mem_commit
,
2365 .region_add
= mem_add
,
2366 .region_nop
= mem_add
,
2369 memory_listener_register(&as
->dispatch_listener
, as
);
2372 void address_space_unregister(AddressSpace
*as
)
2374 memory_listener_unregister(&as
->dispatch_listener
);
2377 void address_space_destroy_dispatch(AddressSpace
*as
)
2379 AddressSpaceDispatch
*d
= as
->dispatch
;
2381 atomic_rcu_set(&as
->dispatch
, NULL
);
2383 call_rcu(d
, address_space_dispatch_free
, rcu
);
2387 static void memory_map_init(void)
2389 system_memory
= g_malloc(sizeof(*system_memory
));
2391 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2392 address_space_init(&address_space_memory
, system_memory
, "memory");
2394 system_io
= g_malloc(sizeof(*system_io
));
2395 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2397 address_space_init(&address_space_io
, system_io
, "I/O");
2400 MemoryRegion
*get_system_memory(void)
2402 return system_memory
;
2405 MemoryRegion
*get_system_io(void)
2410 #endif /* !defined(CONFIG_USER_ONLY) */
2412 /* physical memory access (slow version, mainly for debug) */
2413 #if defined(CONFIG_USER_ONLY)
2414 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2415 uint8_t *buf
, int len
, int is_write
)
2422 page
= addr
& TARGET_PAGE_MASK
;
2423 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2426 flags
= page_get_flags(page
);
2427 if (!(flags
& PAGE_VALID
))
2430 if (!(flags
& PAGE_WRITE
))
2432 /* XXX: this code should not depend on lock_user */
2433 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2436 unlock_user(p
, addr
, l
);
2438 if (!(flags
& PAGE_READ
))
2440 /* XXX: this code should not depend on lock_user */
2441 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2444 unlock_user(p
, addr
, 0);
2455 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2458 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2459 /* No early return if dirty_log_mask is or becomes 0, because
2460 * cpu_physical_memory_set_dirty_range will still call
2461 * xen_modified_memory.
2463 if (dirty_log_mask
) {
2465 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2467 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2468 tb_invalidate_phys_range(addr
, addr
+ length
);
2469 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2471 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2474 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2476 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2478 /* Regions are assumed to support 1-4 byte accesses unless
2479 otherwise specified. */
2480 if (access_size_max
== 0) {
2481 access_size_max
= 4;
2484 /* Bound the maximum access by the alignment of the address. */
2485 if (!mr
->ops
->impl
.unaligned
) {
2486 unsigned align_size_max
= addr
& -addr
;
2487 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2488 access_size_max
= align_size_max
;
2492 /* Don't attempt accesses larger than the maximum. */
2493 if (l
> access_size_max
) {
2494 l
= access_size_max
;
2501 static bool prepare_mmio_access(MemoryRegion
*mr
)
2503 bool unlocked
= !qemu_mutex_iothread_locked();
2504 bool release_lock
= false;
2506 if (unlocked
&& mr
->global_locking
) {
2507 qemu_mutex_lock_iothread();
2509 release_lock
= true;
2511 if (mr
->flush_coalesced_mmio
) {
2513 qemu_mutex_lock_iothread();
2515 qemu_flush_coalesced_mmio_buffer();
2517 qemu_mutex_unlock_iothread();
2521 return release_lock
;
2524 /* Called within RCU critical section. */
2525 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2528 int len
, hwaddr addr1
,
2529 hwaddr l
, MemoryRegion
*mr
)
2533 MemTxResult result
= MEMTX_OK
;
2534 bool release_lock
= false;
2537 if (!memory_access_is_direct(mr
, true)) {
2538 release_lock
|= prepare_mmio_access(mr
);
2539 l
= memory_access_size(mr
, l
, addr1
);
2540 /* XXX: could force current_cpu to NULL to avoid
2544 /* 64 bit write access */
2546 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2550 /* 32 bit write access */
2552 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2556 /* 16 bit write access */
2558 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2562 /* 8 bit write access */
2564 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2571 addr1
+= memory_region_get_ram_addr(mr
);
2573 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
2574 memcpy(ptr
, buf
, l
);
2575 invalidate_and_set_dirty(mr
, addr1
, l
);
2579 qemu_mutex_unlock_iothread();
2580 release_lock
= false;
2592 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2598 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2599 const uint8_t *buf
, int len
)
2604 MemTxResult result
= MEMTX_OK
;
2609 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2610 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2618 /* Called within RCU critical section. */
2619 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2620 MemTxAttrs attrs
, uint8_t *buf
,
2621 int len
, hwaddr addr1
, hwaddr l
,
2626 MemTxResult result
= MEMTX_OK
;
2627 bool release_lock
= false;
2630 if (!memory_access_is_direct(mr
, false)) {
2632 release_lock
|= prepare_mmio_access(mr
);
2633 l
= memory_access_size(mr
, l
, addr1
);
2636 /* 64 bit read access */
2637 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2642 /* 32 bit read access */
2643 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2648 /* 16 bit read access */
2649 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2654 /* 8 bit read access */
2655 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2664 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
2665 memory_region_get_ram_addr(mr
) + addr1
);
2666 memcpy(buf
, ptr
, l
);
2670 qemu_mutex_unlock_iothread();
2671 release_lock
= false;
2683 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2689 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2690 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2695 MemTxResult result
= MEMTX_OK
;
2700 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2701 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2709 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2710 uint8_t *buf
, int len
, bool is_write
)
2713 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2715 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2719 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2720 int len
, int is_write
)
2722 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2723 buf
, len
, is_write
);
2726 enum write_rom_type
{
2731 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2732 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2742 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2744 if (!(memory_region_is_ram(mr
) ||
2745 memory_region_is_romd(mr
))) {
2746 l
= memory_access_size(mr
, l
, addr1
);
2748 addr1
+= memory_region_get_ram_addr(mr
);
2750 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
2753 memcpy(ptr
, buf
, l
);
2754 invalidate_and_set_dirty(mr
, addr1
, l
);
2757 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2768 /* used for ROM loading : can write in RAM and ROM */
2769 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2770 const uint8_t *buf
, int len
)
2772 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2775 void cpu_flush_icache_range(hwaddr start
, int len
)
2778 * This function should do the same thing as an icache flush that was
2779 * triggered from within the guest. For TCG we are always cache coherent,
2780 * so there is no need to flush anything. For KVM / Xen we need to flush
2781 * the host's instruction cache at least.
2783 if (tcg_enabled()) {
2787 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2788 start
, NULL
, len
, FLUSH_CACHE
);
2799 static BounceBuffer bounce
;
2801 typedef struct MapClient
{
2803 QLIST_ENTRY(MapClient
) link
;
2806 QemuMutex map_client_list_lock
;
2807 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2808 = QLIST_HEAD_INITIALIZER(map_client_list
);
2810 static void cpu_unregister_map_client_do(MapClient
*client
)
2812 QLIST_REMOVE(client
, link
);
2816 static void cpu_notify_map_clients_locked(void)
2820 while (!QLIST_EMPTY(&map_client_list
)) {
2821 client
= QLIST_FIRST(&map_client_list
);
2822 qemu_bh_schedule(client
->bh
);
2823 cpu_unregister_map_client_do(client
);
2827 void cpu_register_map_client(QEMUBH
*bh
)
2829 MapClient
*client
= g_malloc(sizeof(*client
));
2831 qemu_mutex_lock(&map_client_list_lock
);
2833 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2834 if (!atomic_read(&bounce
.in_use
)) {
2835 cpu_notify_map_clients_locked();
2837 qemu_mutex_unlock(&map_client_list_lock
);
2840 void cpu_exec_init_all(void)
2842 qemu_mutex_init(&ram_list
.mutex
);
2845 qemu_mutex_init(&map_client_list_lock
);
2848 void cpu_unregister_map_client(QEMUBH
*bh
)
2852 qemu_mutex_lock(&map_client_list_lock
);
2853 QLIST_FOREACH(client
, &map_client_list
, link
) {
2854 if (client
->bh
== bh
) {
2855 cpu_unregister_map_client_do(client
);
2859 qemu_mutex_unlock(&map_client_list_lock
);
2862 static void cpu_notify_map_clients(void)
2864 qemu_mutex_lock(&map_client_list_lock
);
2865 cpu_notify_map_clients_locked();
2866 qemu_mutex_unlock(&map_client_list_lock
);
2869 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2877 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2878 if (!memory_access_is_direct(mr
, is_write
)) {
2879 l
= memory_access_size(mr
, l
, addr
);
2880 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2892 /* Map a physical memory region into a host virtual address.
2893 * May map a subset of the requested range, given by and returned in *plen.
2894 * May return NULL if resources needed to perform the mapping are exhausted.
2895 * Use only for reads OR writes - not for read-modify-write operations.
2896 * Use cpu_register_map_client() to know when retrying the map operation is
2897 * likely to succeed.
2899 void *address_space_map(AddressSpace
*as
,
2906 hwaddr l
, xlat
, base
;
2907 MemoryRegion
*mr
, *this_mr
;
2917 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2919 if (!memory_access_is_direct(mr
, is_write
)) {
2920 if (atomic_xchg(&bounce
.in_use
, true)) {
2924 /* Avoid unbounded allocations */
2925 l
= MIN(l
, TARGET_PAGE_SIZE
);
2926 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2930 memory_region_ref(mr
);
2933 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2939 return bounce
.buffer
;
2943 raddr
= memory_region_get_ram_addr(mr
);
2954 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2955 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2960 memory_region_ref(mr
);
2962 ptr
= qemu_ram_ptr_length(mr
->ram_block
, raddr
+ base
, plen
);
2968 /* Unmaps a memory region previously mapped by address_space_map().
2969 * Will also mark the memory as dirty if is_write == 1. access_len gives
2970 * the amount of memory that was actually read or written by the caller.
2972 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2973 int is_write
, hwaddr access_len
)
2975 if (buffer
!= bounce
.buffer
) {
2979 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2982 invalidate_and_set_dirty(mr
, addr1
, access_len
);
2984 if (xen_enabled()) {
2985 xen_invalidate_map_cache_entry(buffer
);
2987 memory_region_unref(mr
);
2991 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
2992 bounce
.buffer
, access_len
);
2994 qemu_vfree(bounce
.buffer
);
2995 bounce
.buffer
= NULL
;
2996 memory_region_unref(bounce
.mr
);
2997 atomic_mb_set(&bounce
.in_use
, false);
2998 cpu_notify_map_clients();
3001 void *cpu_physical_memory_map(hwaddr addr
,
3005 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
3008 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
3009 int is_write
, hwaddr access_len
)
3011 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3014 /* warning: addr must be aligned */
3015 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
3017 MemTxResult
*result
,
3018 enum device_endian endian
)
3026 bool release_lock
= false;
3029 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
3030 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
3031 release_lock
|= prepare_mmio_access(mr
);
3034 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
3035 #if defined(TARGET_WORDS_BIGENDIAN)
3036 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3040 if (endian
== DEVICE_BIG_ENDIAN
) {
3046 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3047 memory_region_get_ram_addr(mr
) + addr1
);
3049 case DEVICE_LITTLE_ENDIAN
:
3050 val
= ldl_le_p(ptr
);
3052 case DEVICE_BIG_ENDIAN
:
3053 val
= ldl_be_p(ptr
);
3065 qemu_mutex_unlock_iothread();
3071 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3072 MemTxAttrs attrs
, MemTxResult
*result
)
3074 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3075 DEVICE_NATIVE_ENDIAN
);
3078 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3079 MemTxAttrs attrs
, MemTxResult
*result
)
3081 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3082 DEVICE_LITTLE_ENDIAN
);
3085 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3086 MemTxAttrs attrs
, MemTxResult
*result
)
3088 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3092 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3094 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3097 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3099 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3102 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3104 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3107 /* warning: addr must be aligned */
3108 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3110 MemTxResult
*result
,
3111 enum device_endian endian
)
3119 bool release_lock
= false;
3122 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3124 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3125 release_lock
|= prepare_mmio_access(mr
);
3128 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3129 #if defined(TARGET_WORDS_BIGENDIAN)
3130 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3134 if (endian
== DEVICE_BIG_ENDIAN
) {
3140 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3141 memory_region_get_ram_addr(mr
) + addr1
);
3143 case DEVICE_LITTLE_ENDIAN
:
3144 val
= ldq_le_p(ptr
);
3146 case DEVICE_BIG_ENDIAN
:
3147 val
= ldq_be_p(ptr
);
3159 qemu_mutex_unlock_iothread();
3165 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3166 MemTxAttrs attrs
, MemTxResult
*result
)
3168 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3169 DEVICE_NATIVE_ENDIAN
);
3172 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3173 MemTxAttrs attrs
, MemTxResult
*result
)
3175 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3176 DEVICE_LITTLE_ENDIAN
);
3179 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3180 MemTxAttrs attrs
, MemTxResult
*result
)
3182 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3186 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3188 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3191 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3193 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3196 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3198 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3202 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3203 MemTxAttrs attrs
, MemTxResult
*result
)
3208 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3215 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3217 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3220 /* warning: addr must be aligned */
3221 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3224 MemTxResult
*result
,
3225 enum device_endian endian
)
3233 bool release_lock
= false;
3236 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3238 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3239 release_lock
|= prepare_mmio_access(mr
);
3242 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3243 #if defined(TARGET_WORDS_BIGENDIAN)
3244 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3248 if (endian
== DEVICE_BIG_ENDIAN
) {
3254 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3255 memory_region_get_ram_addr(mr
) + addr1
);
3257 case DEVICE_LITTLE_ENDIAN
:
3258 val
= lduw_le_p(ptr
);
3260 case DEVICE_BIG_ENDIAN
:
3261 val
= lduw_be_p(ptr
);
3273 qemu_mutex_unlock_iothread();
3279 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3280 MemTxAttrs attrs
, MemTxResult
*result
)
3282 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3283 DEVICE_NATIVE_ENDIAN
);
3286 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3287 MemTxAttrs attrs
, MemTxResult
*result
)
3289 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3290 DEVICE_LITTLE_ENDIAN
);
3293 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3294 MemTxAttrs attrs
, MemTxResult
*result
)
3296 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3300 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3302 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3305 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3307 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3310 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3312 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3315 /* warning: addr must be aligned. The ram page is not masked as dirty
3316 and the code inside is not invalidated. It is useful if the dirty
3317 bits are used to track modified PTEs */
3318 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3319 MemTxAttrs attrs
, MemTxResult
*result
)
3326 uint8_t dirty_log_mask
;
3327 bool release_lock
= false;
3330 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3332 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3333 release_lock
|= prepare_mmio_access(mr
);
3335 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3337 addr1
+= memory_region_get_ram_addr(mr
);
3338 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3341 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3342 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3343 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3350 qemu_mutex_unlock_iothread();
3355 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3357 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3360 /* warning: addr must be aligned */
3361 static inline void address_space_stl_internal(AddressSpace
*as
,
3362 hwaddr addr
, uint32_t val
,
3364 MemTxResult
*result
,
3365 enum device_endian endian
)
3372 bool release_lock
= false;
3375 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3377 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3378 release_lock
|= prepare_mmio_access(mr
);
3380 #if defined(TARGET_WORDS_BIGENDIAN)
3381 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3385 if (endian
== DEVICE_BIG_ENDIAN
) {
3389 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3392 addr1
+= memory_region_get_ram_addr(mr
);
3393 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3395 case DEVICE_LITTLE_ENDIAN
:
3398 case DEVICE_BIG_ENDIAN
:
3405 invalidate_and_set_dirty(mr
, addr1
, 4);
3412 qemu_mutex_unlock_iothread();
3417 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3418 MemTxAttrs attrs
, MemTxResult
*result
)
3420 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3421 DEVICE_NATIVE_ENDIAN
);
3424 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3425 MemTxAttrs attrs
, MemTxResult
*result
)
3427 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3428 DEVICE_LITTLE_ENDIAN
);
3431 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3432 MemTxAttrs attrs
, MemTxResult
*result
)
3434 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3438 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3440 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3443 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3445 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3448 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3450 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3454 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3455 MemTxAttrs attrs
, MemTxResult
*result
)
3460 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3466 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3468 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3471 /* warning: addr must be aligned */
3472 static inline void address_space_stw_internal(AddressSpace
*as
,
3473 hwaddr addr
, uint32_t val
,
3475 MemTxResult
*result
,
3476 enum device_endian endian
)
3483 bool release_lock
= false;
3486 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3487 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3488 release_lock
|= prepare_mmio_access(mr
);
3490 #if defined(TARGET_WORDS_BIGENDIAN)
3491 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3495 if (endian
== DEVICE_BIG_ENDIAN
) {
3499 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3502 addr1
+= memory_region_get_ram_addr(mr
);
3503 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3505 case DEVICE_LITTLE_ENDIAN
:
3508 case DEVICE_BIG_ENDIAN
:
3515 invalidate_and_set_dirty(mr
, addr1
, 2);
3522 qemu_mutex_unlock_iothread();
3527 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3528 MemTxAttrs attrs
, MemTxResult
*result
)
3530 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3531 DEVICE_NATIVE_ENDIAN
);
3534 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3535 MemTxAttrs attrs
, MemTxResult
*result
)
3537 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3538 DEVICE_LITTLE_ENDIAN
);
3541 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3542 MemTxAttrs attrs
, MemTxResult
*result
)
3544 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3548 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3550 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3553 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3555 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3558 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3560 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3564 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3565 MemTxAttrs attrs
, MemTxResult
*result
)
3569 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3575 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3576 MemTxAttrs attrs
, MemTxResult
*result
)
3579 val
= cpu_to_le64(val
);
3580 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3585 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3586 MemTxAttrs attrs
, MemTxResult
*result
)
3589 val
= cpu_to_be64(val
);
3590 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3596 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3598 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3601 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3603 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3606 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3608 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3611 /* virtual memory access for debug (includes writing to ROM) */
3612 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3613 uint8_t *buf
, int len
, int is_write
)
3623 page
= addr
& TARGET_PAGE_MASK
;
3624 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3625 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3626 /* if no physical page mapped, return an error */
3627 if (phys_addr
== -1)
3629 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3632 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3634 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3637 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3638 MEMTXATTRS_UNSPECIFIED
,
3649 * Allows code that needs to deal with migration bitmaps etc to still be built
3650 * target independent.
3652 size_t qemu_target_page_bits(void)
3654 return TARGET_PAGE_BITS
;
3660 * A helper function for the _utterly broken_ virtio device model to find out if
3661 * it's running on a big endian machine. Don't do this at home kids!
3663 bool target_words_bigendian(void);
3664 bool target_words_bigendian(void)
3666 #if defined(TARGET_WORDS_BIGENDIAN)
3673 #ifndef CONFIG_USER_ONLY
3674 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3681 mr
= address_space_translate(&address_space_memory
,
3682 phys_addr
, &phys_addr
, &l
, false);
3684 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3689 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3695 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3696 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3697 block
->used_length
, opaque
);