4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
24 #include "qemu-common.h"
28 #if !defined(CONFIG_USER_ONLY)
29 #include "hw/boards.h"
32 #include "sysemu/kvm.h"
33 #include "sysemu/sysemu.h"
34 #include "hw/xen/xen.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "qemu/error-report.h"
38 #include "exec/memory.h"
39 #include "sysemu/dma.h"
40 #include "exec/address-spaces.h"
41 #if defined(CONFIG_USER_ONLY)
43 #else /* !CONFIG_USER_ONLY */
44 #include "sysemu/xen-mapcache.h"
47 #include "exec/cpu-all.h"
48 #include "qemu/rcu_queue.h"
49 #include "qemu/main-loop.h"
50 #include "translate-all.h"
51 #include "sysemu/replay.h"
53 #include "exec/memory-internal.h"
54 #include "exec/ram_addr.h"
57 #include "qemu/range.h"
59 #include "qemu/mmap-alloc.h"
62 //#define DEBUG_SUBPAGE
64 #if !defined(CONFIG_USER_ONLY)
65 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
66 * are protected by the ramlist lock.
68 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
70 static MemoryRegion
*system_memory
;
71 static MemoryRegion
*system_io
;
73 AddressSpace address_space_io
;
74 AddressSpace address_space_memory
;
76 MemoryRegion io_mem_rom
, io_mem_notdirty
;
77 static MemoryRegion io_mem_unassigned
;
79 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
80 #define RAM_PREALLOC (1 << 0)
82 /* RAM is mmap-ed with MAP_SHARED */
83 #define RAM_SHARED (1 << 1)
85 /* Only a portion of RAM (used_length) is actually used, and migrated.
86 * This used_length size can change across reboots.
88 #define RAM_RESIZEABLE (1 << 2)
92 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
93 /* current CPU in the current thread. It is only valid inside
95 __thread CPUState
*current_cpu
;
96 /* 0 = Do not count executed instructions.
97 1 = Precise instruction counting.
98 2 = Adaptive rate instruction counting. */
101 #if !defined(CONFIG_USER_ONLY)
103 typedef struct PhysPageEntry PhysPageEntry
;
105 struct PhysPageEntry
{
106 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
108 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
112 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
114 /* Size of the L2 (and L3, etc) page tables. */
115 #define ADDR_SPACE_BITS 64
118 #define P_L2_SIZE (1 << P_L2_BITS)
120 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
122 typedef PhysPageEntry Node
[P_L2_SIZE
];
124 typedef struct PhysPageMap
{
127 unsigned sections_nb
;
128 unsigned sections_nb_alloc
;
130 unsigned nodes_nb_alloc
;
132 MemoryRegionSection
*sections
;
135 struct AddressSpaceDispatch
{
138 /* This is a multi-level map on the physical address space.
139 * The bottom level has pointers to MemoryRegionSections.
141 PhysPageEntry phys_map
;
146 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
147 typedef struct subpage_t
{
151 uint16_t sub_section
[TARGET_PAGE_SIZE
];
154 #define PHYS_SECTION_UNASSIGNED 0
155 #define PHYS_SECTION_NOTDIRTY 1
156 #define PHYS_SECTION_ROM 2
157 #define PHYS_SECTION_WATCH 3
159 static void io_mem_init(void);
160 static void memory_map_init(void);
161 static void tcg_commit(MemoryListener
*listener
);
163 static MemoryRegion io_mem_watch
;
166 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
167 * @cpu: the CPU whose AddressSpace this is
168 * @as: the AddressSpace itself
169 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
170 * @tcg_as_listener: listener for tracking changes to the AddressSpace
172 struct CPUAddressSpace
{
175 struct AddressSpaceDispatch
*memory_dispatch
;
176 MemoryListener tcg_as_listener
;
181 #if !defined(CONFIG_USER_ONLY)
183 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
185 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
186 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
187 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
188 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
192 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
199 ret
= map
->nodes_nb
++;
201 assert(ret
!= PHYS_MAP_NODE_NIL
);
202 assert(ret
!= map
->nodes_nb_alloc
);
204 e
.skip
= leaf
? 0 : 1;
205 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
206 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
207 memcpy(&p
[i
], &e
, sizeof(e
));
212 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
213 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
217 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
219 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
220 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
222 p
= map
->nodes
[lp
->ptr
];
223 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
225 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
226 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
232 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
238 static void phys_page_set(AddressSpaceDispatch
*d
,
239 hwaddr index
, hwaddr nb
,
242 /* Wildly overreserve - it doesn't matter much. */
243 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
245 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
248 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
249 * and update our entry so we can skip it and go directly to the destination.
251 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
253 unsigned valid_ptr
= P_L2_SIZE
;
258 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
263 for (i
= 0; i
< P_L2_SIZE
; i
++) {
264 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
271 phys_page_compact(&p
[i
], nodes
, compacted
);
275 /* We can only compress if there's only one child. */
280 assert(valid_ptr
< P_L2_SIZE
);
282 /* Don't compress if it won't fit in the # of bits we have. */
283 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
287 lp
->ptr
= p
[valid_ptr
].ptr
;
288 if (!p
[valid_ptr
].skip
) {
289 /* If our only child is a leaf, make this a leaf. */
290 /* By design, we should have made this node a leaf to begin with so we
291 * should never reach here.
292 * But since it's so simple to handle this, let's do it just in case we
297 lp
->skip
+= p
[valid_ptr
].skip
;
301 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
303 DECLARE_BITMAP(compacted
, nodes_nb
);
305 if (d
->phys_map
.skip
) {
306 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
310 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
311 Node
*nodes
, MemoryRegionSection
*sections
)
314 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
317 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
318 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
319 return §ions
[PHYS_SECTION_UNASSIGNED
];
322 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
325 if (sections
[lp
.ptr
].size
.hi
||
326 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
327 sections
[lp
.ptr
].size
.lo
, addr
)) {
328 return §ions
[lp
.ptr
];
330 return §ions
[PHYS_SECTION_UNASSIGNED
];
334 bool memory_region_is_unassigned(MemoryRegion
*mr
)
336 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
337 && mr
!= &io_mem_watch
;
340 /* Called from RCU critical section */
341 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
343 bool resolve_subpage
)
345 MemoryRegionSection
*section
;
348 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
349 if (resolve_subpage
&& section
->mr
->subpage
) {
350 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
351 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
356 /* Called from RCU critical section */
357 static MemoryRegionSection
*
358 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
359 hwaddr
*plen
, bool resolve_subpage
)
361 MemoryRegionSection
*section
;
365 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
366 /* Compute offset within MemoryRegionSection */
367 addr
-= section
->offset_within_address_space
;
369 /* Compute offset within MemoryRegion */
370 *xlat
= addr
+ section
->offset_within_region
;
374 /* MMIO registers can be expected to perform full-width accesses based only
375 * on their address, without considering adjacent registers that could
376 * decode to completely different MemoryRegions. When such registers
377 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
378 * regions overlap wildly. For this reason we cannot clamp the accesses
381 * If the length is small (as is the case for address_space_ldl/stl),
382 * everything works fine. If the incoming length is large, however,
383 * the caller really has to do the clamping through memory_access_size.
385 if (memory_region_is_ram(mr
)) {
386 diff
= int128_sub(section
->size
, int128_make64(addr
));
387 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
392 /* Called from RCU critical section */
393 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
394 hwaddr
*xlat
, hwaddr
*plen
,
398 MemoryRegionSection
*section
;
402 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
403 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
406 if (!mr
->iommu_ops
) {
410 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
411 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
412 | (addr
& iotlb
.addr_mask
));
413 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
414 if (!(iotlb
.perm
& (1 << is_write
))) {
415 mr
= &io_mem_unassigned
;
419 as
= iotlb
.target_as
;
422 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
423 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
424 *plen
= MIN(page
, *plen
);
431 /* Called from RCU critical section */
432 MemoryRegionSection
*
433 address_space_translate_for_iotlb(CPUState
*cpu
, int asidx
, hwaddr addr
,
434 hwaddr
*xlat
, hwaddr
*plen
)
436 MemoryRegionSection
*section
;
437 AddressSpaceDispatch
*d
= cpu
->cpu_ases
[asidx
].memory_dispatch
;
439 section
= address_space_translate_internal(d
, addr
, xlat
, plen
, false);
441 assert(!section
->mr
->iommu_ops
);
446 #if !defined(CONFIG_USER_ONLY)
448 static int cpu_common_post_load(void *opaque
, int version_id
)
450 CPUState
*cpu
= opaque
;
452 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
453 version_id is increased. */
454 cpu
->interrupt_request
&= ~0x01;
460 static int cpu_common_pre_load(void *opaque
)
462 CPUState
*cpu
= opaque
;
464 cpu
->exception_index
= -1;
469 static bool cpu_common_exception_index_needed(void *opaque
)
471 CPUState
*cpu
= opaque
;
473 return tcg_enabled() && cpu
->exception_index
!= -1;
476 static const VMStateDescription vmstate_cpu_common_exception_index
= {
477 .name
= "cpu_common/exception_index",
479 .minimum_version_id
= 1,
480 .needed
= cpu_common_exception_index_needed
,
481 .fields
= (VMStateField
[]) {
482 VMSTATE_INT32(exception_index
, CPUState
),
483 VMSTATE_END_OF_LIST()
487 static bool cpu_common_crash_occurred_needed(void *opaque
)
489 CPUState
*cpu
= opaque
;
491 return cpu
->crash_occurred
;
494 static const VMStateDescription vmstate_cpu_common_crash_occurred
= {
495 .name
= "cpu_common/crash_occurred",
497 .minimum_version_id
= 1,
498 .needed
= cpu_common_crash_occurred_needed
,
499 .fields
= (VMStateField
[]) {
500 VMSTATE_BOOL(crash_occurred
, CPUState
),
501 VMSTATE_END_OF_LIST()
505 const VMStateDescription vmstate_cpu_common
= {
506 .name
= "cpu_common",
508 .minimum_version_id
= 1,
509 .pre_load
= cpu_common_pre_load
,
510 .post_load
= cpu_common_post_load
,
511 .fields
= (VMStateField
[]) {
512 VMSTATE_UINT32(halted
, CPUState
),
513 VMSTATE_UINT32(interrupt_request
, CPUState
),
514 VMSTATE_END_OF_LIST()
516 .subsections
= (const VMStateDescription
*[]) {
517 &vmstate_cpu_common_exception_index
,
518 &vmstate_cpu_common_crash_occurred
,
525 CPUState
*qemu_get_cpu(int index
)
530 if (cpu
->cpu_index
== index
) {
538 #if !defined(CONFIG_USER_ONLY)
539 void cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
, int asidx
)
541 CPUAddressSpace
*newas
;
543 /* Target code should have set num_ases before calling us */
544 assert(asidx
< cpu
->num_ases
);
547 /* address space 0 gets the convenience alias */
551 /* KVM cannot currently support multiple address spaces. */
552 assert(asidx
== 0 || !kvm_enabled());
554 if (!cpu
->cpu_ases
) {
555 cpu
->cpu_ases
= g_new0(CPUAddressSpace
, cpu
->num_ases
);
558 newas
= &cpu
->cpu_ases
[asidx
];
562 newas
->tcg_as_listener
.commit
= tcg_commit
;
563 memory_listener_register(&newas
->tcg_as_listener
, as
);
567 AddressSpace
*cpu_get_address_space(CPUState
*cpu
, int asidx
)
569 /* Return the AddressSpace corresponding to the specified index */
570 return cpu
->cpu_ases
[asidx
].as
;
574 #ifndef CONFIG_USER_ONLY
575 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
577 static int cpu_get_free_index(Error
**errp
)
579 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
581 if (cpu
>= MAX_CPUMASK_BITS
) {
582 error_setg(errp
, "Trying to use more CPUs than max of %d",
587 bitmap_set(cpu_index_map
, cpu
, 1);
591 void cpu_exec_exit(CPUState
*cpu
)
593 if (cpu
->cpu_index
== -1) {
594 /* cpu_index was never allocated by this @cpu or was already freed. */
598 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
603 static int cpu_get_free_index(Error
**errp
)
608 CPU_FOREACH(some_cpu
) {
614 void cpu_exec_exit(CPUState
*cpu
)
619 void cpu_exec_init(CPUState
*cpu
, Error
**errp
)
621 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
623 Error
*local_err
= NULL
;
628 #ifndef CONFIG_USER_ONLY
629 cpu
->thread_id
= qemu_get_thread_id();
631 /* This is a softmmu CPU object, so create a property for it
632 * so users can wire up its memory. (This can't go in qom/cpu.c
633 * because that file is compiled only once for both user-mode
634 * and system builds.) The default if no link is set up is to use
635 * the system address space.
637 object_property_add_link(OBJECT(cpu
), "memory", TYPE_MEMORY_REGION
,
638 (Object
**)&cpu
->memory
,
639 qdev_prop_allow_set_link_before_realize
,
640 OBJ_PROP_LINK_UNREF_ON_RELEASE
,
642 cpu
->memory
= system_memory
;
643 object_ref(OBJECT(cpu
->memory
));
646 #if defined(CONFIG_USER_ONLY)
649 cpu_index
= cpu
->cpu_index
= cpu_get_free_index(&local_err
);
651 error_propagate(errp
, local_err
);
652 #if defined(CONFIG_USER_ONLY)
657 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
658 #if defined(CONFIG_USER_ONLY)
661 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
662 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
664 if (cc
->vmsd
!= NULL
) {
665 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
669 #if defined(CONFIG_USER_ONLY)
670 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
672 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
675 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
678 hwaddr phys
= cpu_get_phys_page_attrs_debug(cpu
, pc
, &attrs
);
679 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
681 tb_invalidate_phys_addr(cpu
->cpu_ases
[asidx
].as
,
682 phys
| (pc
& ~TARGET_PAGE_MASK
));
687 #if defined(CONFIG_USER_ONLY)
688 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
693 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
699 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
703 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
704 int flags
, CPUWatchpoint
**watchpoint
)
709 /* Add a watchpoint. */
710 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
711 int flags
, CPUWatchpoint
**watchpoint
)
715 /* forbid ranges which are empty or run off the end of the address space */
716 if (len
== 0 || (addr
+ len
- 1) < addr
) {
717 error_report("tried to set invalid watchpoint at %"
718 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
721 wp
= g_malloc(sizeof(*wp
));
727 /* keep all GDB-injected watchpoints in front */
728 if (flags
& BP_GDB
) {
729 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
731 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
734 tlb_flush_page(cpu
, addr
);
741 /* Remove a specific watchpoint. */
742 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
747 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
748 if (addr
== wp
->vaddr
&& len
== wp
->len
749 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
750 cpu_watchpoint_remove_by_ref(cpu
, wp
);
757 /* Remove a specific watchpoint by reference. */
758 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
760 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
762 tlb_flush_page(cpu
, watchpoint
->vaddr
);
767 /* Remove all matching watchpoints. */
768 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
770 CPUWatchpoint
*wp
, *next
;
772 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
773 if (wp
->flags
& mask
) {
774 cpu_watchpoint_remove_by_ref(cpu
, wp
);
779 /* Return true if this watchpoint address matches the specified
780 * access (ie the address range covered by the watchpoint overlaps
781 * partially or completely with the address range covered by the
784 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
788 /* We know the lengths are non-zero, but a little caution is
789 * required to avoid errors in the case where the range ends
790 * exactly at the top of the address space and so addr + len
791 * wraps round to zero.
793 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
794 vaddr addrend
= addr
+ len
- 1;
796 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
801 /* Add a breakpoint. */
802 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
803 CPUBreakpoint
**breakpoint
)
807 bp
= g_malloc(sizeof(*bp
));
812 /* keep all GDB-injected breakpoints in front */
813 if (flags
& BP_GDB
) {
814 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
816 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
819 breakpoint_invalidate(cpu
, pc
);
827 /* Remove a specific breakpoint. */
828 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
832 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
833 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
834 cpu_breakpoint_remove_by_ref(cpu
, bp
);
841 /* Remove a specific breakpoint by reference. */
842 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
844 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
846 breakpoint_invalidate(cpu
, breakpoint
->pc
);
851 /* Remove all matching breakpoints. */
852 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
854 CPUBreakpoint
*bp
, *next
;
856 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
857 if (bp
->flags
& mask
) {
858 cpu_breakpoint_remove_by_ref(cpu
, bp
);
863 /* enable or disable single step mode. EXCP_DEBUG is returned by the
864 CPU loop after each instruction */
865 void cpu_single_step(CPUState
*cpu
, int enabled
)
867 if (cpu
->singlestep_enabled
!= enabled
) {
868 cpu
->singlestep_enabled
= enabled
;
870 kvm_update_guest_debug(cpu
, 0);
872 /* must flush all the translated code to avoid inconsistencies */
873 /* XXX: only flush what is necessary */
879 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
886 fprintf(stderr
, "qemu: fatal: ");
887 vfprintf(stderr
, fmt
, ap
);
888 fprintf(stderr
, "\n");
889 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
890 if (qemu_log_separate()) {
891 qemu_log("qemu: fatal: ");
892 qemu_log_vprintf(fmt
, ap2
);
894 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
901 #if defined(CONFIG_USER_ONLY)
903 struct sigaction act
;
904 sigfillset(&act
.sa_mask
);
905 act
.sa_handler
= SIG_DFL
;
906 sigaction(SIGABRT
, &act
, NULL
);
912 #if !defined(CONFIG_USER_ONLY)
913 /* Called from RCU critical section */
914 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
918 block
= atomic_rcu_read(&ram_list
.mru_block
);
919 if (block
&& addr
- block
->offset
< block
->max_length
) {
922 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
923 if (addr
- block
->offset
< block
->max_length
) {
928 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
932 /* It is safe to write mru_block outside the iothread lock. This
937 * xxx removed from list
941 * call_rcu(reclaim_ramblock, xxx);
944 * atomic_rcu_set is not needed here. The block was already published
945 * when it was placed into the list. Here we're just making an extra
946 * copy of the pointer.
948 ram_list
.mru_block
= block
;
952 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
959 end
= TARGET_PAGE_ALIGN(start
+ length
);
960 start
&= TARGET_PAGE_MASK
;
963 block
= qemu_get_ram_block(start
);
964 assert(block
== qemu_get_ram_block(end
- 1));
965 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
967 tlb_reset_dirty(cpu
, start1
, length
);
972 /* Note: start and end must be within the same ram block. */
973 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
977 DirtyMemoryBlocks
*blocks
;
978 unsigned long end
, page
;
985 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
986 page
= start
>> TARGET_PAGE_BITS
;
990 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
993 unsigned long idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
994 unsigned long offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
995 unsigned long num
= MIN(end
- page
, DIRTY_MEMORY_BLOCK_SIZE
- offset
);
997 dirty
|= bitmap_test_and_clear_atomic(blocks
->blocks
[idx
],
1004 if (dirty
&& tcg_enabled()) {
1005 tlb_reset_dirty_range_all(start
, length
);
1011 /* Called from RCU critical section */
1012 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
1013 MemoryRegionSection
*section
,
1015 hwaddr paddr
, hwaddr xlat
,
1017 target_ulong
*address
)
1022 if (memory_region_is_ram(section
->mr
)) {
1024 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1026 if (!section
->readonly
) {
1027 iotlb
|= PHYS_SECTION_NOTDIRTY
;
1029 iotlb
|= PHYS_SECTION_ROM
;
1032 AddressSpaceDispatch
*d
;
1034 d
= atomic_rcu_read(§ion
->address_space
->dispatch
);
1035 iotlb
= section
- d
->map
.sections
;
1039 /* Make accesses to pages with watchpoints go via the
1040 watchpoint trap routines. */
1041 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1042 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
1043 /* Avoid trapping reads of pages with a write breakpoint. */
1044 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1045 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
1046 *address
|= TLB_MMIO
;
1054 #endif /* defined(CONFIG_USER_ONLY) */
1056 #if !defined(CONFIG_USER_ONLY)
1058 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1060 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
1062 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
1063 qemu_anon_ram_alloc
;
1066 * Set a custom physical guest memory alloator.
1067 * Accelerators with unusual needs may need this. Hopefully, we can
1068 * get rid of it eventually.
1070 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
1072 phys_mem_alloc
= alloc
;
1075 static uint16_t phys_section_add(PhysPageMap
*map
,
1076 MemoryRegionSection
*section
)
1078 /* The physical section number is ORed with a page-aligned
1079 * pointer to produce the iotlb entries. Thus it should
1080 * never overflow into the page-aligned value.
1082 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1084 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1085 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1086 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1087 map
->sections_nb_alloc
);
1089 map
->sections
[map
->sections_nb
] = *section
;
1090 memory_region_ref(section
->mr
);
1091 return map
->sections_nb
++;
1094 static void phys_section_destroy(MemoryRegion
*mr
)
1096 bool have_sub_page
= mr
->subpage
;
1098 memory_region_unref(mr
);
1100 if (have_sub_page
) {
1101 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1102 object_unref(OBJECT(&subpage
->iomem
));
1107 static void phys_sections_free(PhysPageMap
*map
)
1109 while (map
->sections_nb
> 0) {
1110 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1111 phys_section_destroy(section
->mr
);
1113 g_free(map
->sections
);
1117 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1120 hwaddr base
= section
->offset_within_address_space
1122 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1123 d
->map
.nodes
, d
->map
.sections
);
1124 MemoryRegionSection subsection
= {
1125 .offset_within_address_space
= base
,
1126 .size
= int128_make64(TARGET_PAGE_SIZE
),
1130 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1132 if (!(existing
->mr
->subpage
)) {
1133 subpage
= subpage_init(d
->as
, base
);
1134 subsection
.address_space
= d
->as
;
1135 subsection
.mr
= &subpage
->iomem
;
1136 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1137 phys_section_add(&d
->map
, &subsection
));
1139 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1141 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1142 end
= start
+ int128_get64(section
->size
) - 1;
1143 subpage_register(subpage
, start
, end
,
1144 phys_section_add(&d
->map
, section
));
1148 static void register_multipage(AddressSpaceDispatch
*d
,
1149 MemoryRegionSection
*section
)
1151 hwaddr start_addr
= section
->offset_within_address_space
;
1152 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1153 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1157 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1160 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1162 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1163 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1164 MemoryRegionSection now
= *section
, remain
= *section
;
1165 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1167 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1168 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1169 - now
.offset_within_address_space
;
1171 now
.size
= int128_min(int128_make64(left
), now
.size
);
1172 register_subpage(d
, &now
);
1174 now
.size
= int128_zero();
1176 while (int128_ne(remain
.size
, now
.size
)) {
1177 remain
.size
= int128_sub(remain
.size
, now
.size
);
1178 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1179 remain
.offset_within_region
+= int128_get64(now
.size
);
1181 if (int128_lt(remain
.size
, page_size
)) {
1182 register_subpage(d
, &now
);
1183 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1184 now
.size
= page_size
;
1185 register_subpage(d
, &now
);
1187 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1188 register_multipage(d
, &now
);
1193 void qemu_flush_coalesced_mmio_buffer(void)
1196 kvm_flush_coalesced_mmio_buffer();
1199 void qemu_mutex_lock_ramlist(void)
1201 qemu_mutex_lock(&ram_list
.mutex
);
1204 void qemu_mutex_unlock_ramlist(void)
1206 qemu_mutex_unlock(&ram_list
.mutex
);
1211 #include <sys/vfs.h>
1213 #define HUGETLBFS_MAGIC 0x958458f6
1215 static long gethugepagesize(const char *path
, Error
**errp
)
1221 ret
= statfs(path
, &fs
);
1222 } while (ret
!= 0 && errno
== EINTR
);
1225 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1233 static void *file_ram_alloc(RAMBlock
*block
,
1240 char *sanitized_name
;
1245 Error
*local_err
= NULL
;
1247 hpagesize
= gethugepagesize(path
, &local_err
);
1249 error_propagate(errp
, local_err
);
1252 block
->mr
->align
= hpagesize
;
1254 if (memory
< hpagesize
) {
1255 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1256 "or larger than huge page size 0x%" PRIx64
,
1261 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1263 "host lacks kvm mmu notifiers, -mem-path unsupported");
1267 if (!stat(path
, &st
) && S_ISDIR(st
.st_mode
)) {
1268 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1269 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1270 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1276 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1278 g_free(sanitized_name
);
1280 fd
= mkstemp(filename
);
1286 fd
= open(path
, O_RDWR
| O_CREAT
, 0644);
1290 error_setg_errno(errp
, errno
,
1291 "unable to create backing store for hugepages");
1295 memory
= ROUND_UP(memory
, hpagesize
);
1298 * ftruncate is not supported by hugetlbfs in older
1299 * hosts, so don't bother bailing out on errors.
1300 * If anything goes wrong with it under other filesystems,
1303 if (ftruncate(fd
, memory
)) {
1304 perror("ftruncate");
1307 area
= qemu_ram_mmap(fd
, memory
, hpagesize
, block
->flags
& RAM_SHARED
);
1308 if (area
== MAP_FAILED
) {
1309 error_setg_errno(errp
, errno
,
1310 "unable to map backing store for hugepages");
1316 os_mem_prealloc(fd
, area
, memory
);
1327 /* Called with the ramlist lock held. */
1328 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1330 RAMBlock
*block
, *next_block
;
1331 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1333 assert(size
!= 0); /* it would hand out same offset multiple times */
1335 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1339 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1340 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1342 end
= block
->offset
+ block
->max_length
;
1344 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1345 if (next_block
->offset
>= end
) {
1346 next
= MIN(next
, next_block
->offset
);
1349 if (next
- end
>= size
&& next
- end
< mingap
) {
1351 mingap
= next
- end
;
1355 if (offset
== RAM_ADDR_MAX
) {
1356 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1364 ram_addr_t
last_ram_offset(void)
1367 ram_addr_t last
= 0;
1370 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1371 last
= MAX(last
, block
->offset
+ block
->max_length
);
1377 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1381 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1382 if (!machine_dump_guest_core(current_machine
)) {
1383 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1385 perror("qemu_madvise");
1386 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1387 "but dump_guest_core=off specified\n");
1392 /* Called within an RCU critical section, or while the ramlist lock
1395 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1399 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1400 if (block
->offset
== addr
) {
1408 const char *qemu_ram_get_idstr(RAMBlock
*rb
)
1413 /* Called with iothread lock held. */
1414 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1416 RAMBlock
*new_block
, *block
;
1419 new_block
= find_ram_block(addr
);
1421 assert(!new_block
->idstr
[0]);
1424 char *id
= qdev_get_dev_path(dev
);
1426 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1430 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1432 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1433 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1434 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1442 /* Called with iothread lock held. */
1443 void qemu_ram_unset_idstr(ram_addr_t addr
)
1447 /* FIXME: arch_init.c assumes that this is not called throughout
1448 * migration. Ignore the problem since hot-unplug during migration
1449 * does not work anyway.
1453 block
= find_ram_block(addr
);
1455 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1460 static int memory_try_enable_merging(void *addr
, size_t len
)
1462 if (!machine_mem_merge(current_machine
)) {
1463 /* disabled by the user */
1467 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1470 /* Only legal before guest might have detected the memory size: e.g. on
1471 * incoming migration, or right after reset.
1473 * As memory core doesn't know how is memory accessed, it is up to
1474 * resize callback to update device state and/or add assertions to detect
1475 * misuse, if necessary.
1477 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1479 RAMBlock
*block
= find_ram_block(base
);
1483 newsize
= HOST_PAGE_ALIGN(newsize
);
1485 if (block
->used_length
== newsize
) {
1489 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1490 error_setg_errno(errp
, EINVAL
,
1491 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1492 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1493 newsize
, block
->used_length
);
1497 if (block
->max_length
< newsize
) {
1498 error_setg_errno(errp
, EINVAL
,
1499 "Length too large: %s: 0x" RAM_ADDR_FMT
1500 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1501 newsize
, block
->max_length
);
1505 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1506 block
->used_length
= newsize
;
1507 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1509 memory_region_set_size(block
->mr
, newsize
);
1510 if (block
->resized
) {
1511 block
->resized(block
->idstr
, newsize
, block
->host
);
1516 /* Called with ram_list.mutex held */
1517 static void dirty_memory_extend(ram_addr_t old_ram_size
,
1518 ram_addr_t new_ram_size
)
1520 ram_addr_t old_num_blocks
= DIV_ROUND_UP(old_ram_size
,
1521 DIRTY_MEMORY_BLOCK_SIZE
);
1522 ram_addr_t new_num_blocks
= DIV_ROUND_UP(new_ram_size
,
1523 DIRTY_MEMORY_BLOCK_SIZE
);
1526 /* Only need to extend if block count increased */
1527 if (new_num_blocks
<= old_num_blocks
) {
1531 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1532 DirtyMemoryBlocks
*old_blocks
;
1533 DirtyMemoryBlocks
*new_blocks
;
1536 old_blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
1537 new_blocks
= g_malloc(sizeof(*new_blocks
) +
1538 sizeof(new_blocks
->blocks
[0]) * new_num_blocks
);
1540 if (old_num_blocks
) {
1541 memcpy(new_blocks
->blocks
, old_blocks
->blocks
,
1542 old_num_blocks
* sizeof(old_blocks
->blocks
[0]));
1545 for (j
= old_num_blocks
; j
< new_num_blocks
; j
++) {
1546 new_blocks
->blocks
[j
] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE
);
1549 atomic_rcu_set(&ram_list
.dirty_memory
[i
], new_blocks
);
1552 g_free_rcu(old_blocks
, rcu
);
1557 static void ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1560 RAMBlock
*last_block
= NULL
;
1561 ram_addr_t old_ram_size
, new_ram_size
;
1564 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1566 qemu_mutex_lock_ramlist();
1567 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1569 if (!new_block
->host
) {
1570 if (xen_enabled()) {
1571 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1572 new_block
->mr
, &err
);
1574 error_propagate(errp
, err
);
1575 qemu_mutex_unlock_ramlist();
1578 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1579 &new_block
->mr
->align
);
1580 if (!new_block
->host
) {
1581 error_setg_errno(errp
, errno
,
1582 "cannot set up guest memory '%s'",
1583 memory_region_name(new_block
->mr
));
1584 qemu_mutex_unlock_ramlist();
1586 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1590 new_ram_size
= MAX(old_ram_size
,
1591 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1592 if (new_ram_size
> old_ram_size
) {
1593 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1594 dirty_memory_extend(old_ram_size
, new_ram_size
);
1596 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1597 * QLIST (which has an RCU-friendly variant) does not have insertion at
1598 * tail, so save the last element in last_block.
1600 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1602 if (block
->max_length
< new_block
->max_length
) {
1607 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1608 } else if (last_block
) {
1609 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1610 } else { /* list is empty */
1611 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1613 ram_list
.mru_block
= NULL
;
1615 /* Write list before version */
1618 qemu_mutex_unlock_ramlist();
1620 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1621 new_block
->used_length
,
1624 if (new_block
->host
) {
1625 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1626 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1627 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1628 if (kvm_enabled()) {
1629 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1635 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1636 bool share
, const char *mem_path
,
1639 RAMBlock
*new_block
;
1640 Error
*local_err
= NULL
;
1642 if (xen_enabled()) {
1643 error_setg(errp
, "-mem-path not supported with Xen");
1647 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1649 * file_ram_alloc() needs to allocate just like
1650 * phys_mem_alloc, but we haven't bothered to provide
1654 "-mem-path not supported with this accelerator");
1658 size
= HOST_PAGE_ALIGN(size
);
1659 new_block
= g_malloc0(sizeof(*new_block
));
1661 new_block
->used_length
= size
;
1662 new_block
->max_length
= size
;
1663 new_block
->flags
= share
? RAM_SHARED
: 0;
1664 new_block
->host
= file_ram_alloc(new_block
, size
,
1666 if (!new_block
->host
) {
1671 ram_block_add(new_block
, &local_err
);
1674 error_propagate(errp
, local_err
);
1682 RAMBlock
*qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1683 void (*resized
)(const char*,
1686 void *host
, bool resizeable
,
1687 MemoryRegion
*mr
, Error
**errp
)
1689 RAMBlock
*new_block
;
1690 Error
*local_err
= NULL
;
1692 size
= HOST_PAGE_ALIGN(size
);
1693 max_size
= HOST_PAGE_ALIGN(max_size
);
1694 new_block
= g_malloc0(sizeof(*new_block
));
1696 new_block
->resized
= resized
;
1697 new_block
->used_length
= size
;
1698 new_block
->max_length
= max_size
;
1699 assert(max_size
>= size
);
1701 new_block
->host
= host
;
1703 new_block
->flags
|= RAM_PREALLOC
;
1706 new_block
->flags
|= RAM_RESIZEABLE
;
1708 ram_block_add(new_block
, &local_err
);
1711 error_propagate(errp
, local_err
);
1717 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1718 MemoryRegion
*mr
, Error
**errp
)
1720 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1723 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1725 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1728 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1729 void (*resized
)(const char*,
1732 MemoryRegion
*mr
, Error
**errp
)
1734 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1737 static void reclaim_ramblock(RAMBlock
*block
)
1739 if (block
->flags
& RAM_PREALLOC
) {
1741 } else if (xen_enabled()) {
1742 xen_invalidate_map_cache_entry(block
->host
);
1744 } else if (block
->fd
>= 0) {
1745 qemu_ram_munmap(block
->host
, block
->max_length
);
1749 qemu_anon_ram_free(block
->host
, block
->max_length
);
1754 void qemu_ram_free(ram_addr_t addr
)
1758 qemu_mutex_lock_ramlist();
1759 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1760 if (addr
== block
->offset
) {
1761 QLIST_REMOVE_RCU(block
, next
);
1762 ram_list
.mru_block
= NULL
;
1763 /* Write list before version */
1766 call_rcu(block
, reclaim_ramblock
, rcu
);
1770 qemu_mutex_unlock_ramlist();
1774 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1781 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1782 offset
= addr
- block
->offset
;
1783 if (offset
< block
->max_length
) {
1784 vaddr
= ramblock_ptr(block
, offset
);
1785 if (block
->flags
& RAM_PREALLOC
) {
1787 } else if (xen_enabled()) {
1791 if (block
->fd
>= 0) {
1792 flags
|= (block
->flags
& RAM_SHARED
?
1793 MAP_SHARED
: MAP_PRIVATE
);
1794 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1795 flags
, block
->fd
, offset
);
1798 * Remap needs to match alloc. Accelerators that
1799 * set phys_mem_alloc never remap. If they did,
1800 * we'd need a remap hook here.
1802 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1804 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1805 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1808 if (area
!= vaddr
) {
1809 fprintf(stderr
, "Could not remap addr: "
1810 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1814 memory_try_enable_merging(vaddr
, length
);
1815 qemu_ram_setup_dump(vaddr
, length
);
1820 #endif /* !_WIN32 */
1822 int qemu_get_ram_fd(ram_addr_t addr
)
1828 block
= qemu_get_ram_block(addr
);
1834 void qemu_set_ram_fd(ram_addr_t addr
, int fd
)
1839 block
= qemu_get_ram_block(addr
);
1844 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1850 block
= qemu_get_ram_block(addr
);
1851 ptr
= ramblock_ptr(block
, 0);
1856 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1857 * This should not be used for general purpose DMA. Use address_space_map
1858 * or address_space_rw instead. For local memory (e.g. video ram) that the
1859 * device owns, use memory_region_get_ram_ptr.
1861 * Called within RCU critical section.
1863 void *qemu_get_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
)
1865 RAMBlock
*block
= ram_block
;
1867 if (block
== NULL
) {
1868 block
= qemu_get_ram_block(addr
);
1871 if (xen_enabled() && block
->host
== NULL
) {
1872 /* We need to check if the requested address is in the RAM
1873 * because we don't want to map the entire memory in QEMU.
1874 * In that case just map until the end of the page.
1876 if (block
->offset
== 0) {
1877 return xen_map_cache(addr
, 0, 0);
1880 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1882 return ramblock_ptr(block
, addr
- block
->offset
);
1885 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1886 * but takes a size argument.
1888 * Called within RCU critical section.
1890 static void *qemu_ram_ptr_length(RAMBlock
*ram_block
, ram_addr_t addr
,
1893 RAMBlock
*block
= ram_block
;
1894 ram_addr_t offset_inside_block
;
1899 if (block
== NULL
) {
1900 block
= qemu_get_ram_block(addr
);
1902 offset_inside_block
= addr
- block
->offset
;
1903 *size
= MIN(*size
, block
->max_length
- offset_inside_block
);
1905 if (xen_enabled() && block
->host
== NULL
) {
1906 /* We need to check if the requested address is in the RAM
1907 * because we don't want to map the entire memory in QEMU.
1908 * In that case just map the requested area.
1910 if (block
->offset
== 0) {
1911 return xen_map_cache(addr
, *size
, 1);
1914 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1917 return ramblock_ptr(block
, offset_inside_block
);
1921 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1924 * ptr: Host pointer to look up
1925 * round_offset: If true round the result offset down to a page boundary
1926 * *ram_addr: set to result ram_addr
1927 * *offset: set to result offset within the RAMBlock
1929 * Returns: RAMBlock (or NULL if not found)
1931 * By the time this function returns, the returned pointer is not protected
1932 * by RCU anymore. If the caller is not within an RCU critical section and
1933 * does not hold the iothread lock, it must have other means of protecting the
1934 * pointer, such as a reference to the region that includes the incoming
1937 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
1938 ram_addr_t
*ram_addr
,
1942 uint8_t *host
= ptr
;
1944 if (xen_enabled()) {
1946 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1947 block
= qemu_get_ram_block(*ram_addr
);
1949 *offset
= (host
- block
->host
);
1956 block
= atomic_rcu_read(&ram_list
.mru_block
);
1957 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1961 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1962 /* This case append when the block is not mapped. */
1963 if (block
->host
== NULL
) {
1966 if (host
- block
->host
< block
->max_length
) {
1975 *offset
= (host
- block
->host
);
1977 *offset
&= TARGET_PAGE_MASK
;
1979 *ram_addr
= block
->offset
+ *offset
;
1985 * Finds the named RAMBlock
1987 * name: The name of RAMBlock to find
1989 * Returns: RAMBlock (or NULL if not found)
1991 RAMBlock
*qemu_ram_block_by_name(const char *name
)
1995 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1996 if (!strcmp(name
, block
->idstr
)) {
2004 /* Some of the softmmu routines need to translate from a host pointer
2005 (typically a TLB entry) back to a ram offset. */
2006 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
2009 ram_addr_t offset
; /* Not used */
2011 block
= qemu_ram_block_from_host(ptr
, false, ram_addr
, &offset
);
2020 /* Called within RCU critical section. */
2021 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
2022 uint64_t val
, unsigned size
)
2024 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
2025 tb_invalidate_phys_page_fast(ram_addr
, size
);
2029 stb_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
2032 stw_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
2035 stl_p(qemu_get_ram_ptr(NULL
, ram_addr
), val
);
2040 /* Set both VGA and migration bits for simplicity and to remove
2041 * the notdirty callback faster.
2043 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
2044 DIRTY_CLIENTS_NOCODE
);
2045 /* we remove the notdirty callback only if the code has been
2047 if (!cpu_physical_memory_is_clean(ram_addr
)) {
2048 tlb_set_dirty(current_cpu
, current_cpu
->mem_io_vaddr
);
2052 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
2053 unsigned size
, bool is_write
)
2058 static const MemoryRegionOps notdirty_mem_ops
= {
2059 .write
= notdirty_mem_write
,
2060 .valid
.accepts
= notdirty_mem_accepts
,
2061 .endianness
= DEVICE_NATIVE_ENDIAN
,
2064 /* Generate a debug exception if a watchpoint has been hit. */
2065 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
2067 CPUState
*cpu
= current_cpu
;
2068 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
2069 CPUArchState
*env
= cpu
->env_ptr
;
2070 target_ulong pc
, cs_base
;
2075 if (cpu
->watchpoint_hit
) {
2076 /* We re-entered the check after replacing the TB. Now raise
2077 * the debug interrupt so that is will trigger after the
2078 * current instruction. */
2079 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
2082 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2083 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
2084 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
2085 && (wp
->flags
& flags
)) {
2086 if (flags
== BP_MEM_READ
) {
2087 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
2089 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
2091 wp
->hitaddr
= vaddr
;
2092 wp
->hitattrs
= attrs
;
2093 if (!cpu
->watchpoint_hit
) {
2094 if (wp
->flags
& BP_CPU
&&
2095 !cc
->debug_check_watchpoint(cpu
, wp
)) {
2096 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2099 cpu
->watchpoint_hit
= wp
;
2100 tb_check_watchpoint(cpu
);
2101 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2102 cpu
->exception_index
= EXCP_DEBUG
;
2105 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2106 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
2107 cpu_resume_from_signal(cpu
, NULL
);
2111 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2116 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2117 so these check for a hit then pass through to the normal out-of-line
2119 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
2120 unsigned size
, MemTxAttrs attrs
)
2124 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2125 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2127 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
2130 data
= address_space_ldub(as
, addr
, attrs
, &res
);
2133 data
= address_space_lduw(as
, addr
, attrs
, &res
);
2136 data
= address_space_ldl(as
, addr
, attrs
, &res
);
2144 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
2145 uint64_t val
, unsigned size
,
2149 int asidx
= cpu_asidx_from_attrs(current_cpu
, attrs
);
2150 AddressSpace
*as
= current_cpu
->cpu_ases
[asidx
].as
;
2152 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2155 address_space_stb(as
, addr
, val
, attrs
, &res
);
2158 address_space_stw(as
, addr
, val
, attrs
, &res
);
2161 address_space_stl(as
, addr
, val
, attrs
, &res
);
2168 static const MemoryRegionOps watch_mem_ops
= {
2169 .read_with_attrs
= watch_mem_read
,
2170 .write_with_attrs
= watch_mem_write
,
2171 .endianness
= DEVICE_NATIVE_ENDIAN
,
2174 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2175 unsigned len
, MemTxAttrs attrs
)
2177 subpage_t
*subpage
= opaque
;
2181 #if defined(DEBUG_SUBPAGE)
2182 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2183 subpage
, len
, addr
);
2185 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2192 *data
= ldub_p(buf
);
2195 *data
= lduw_p(buf
);
2208 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2209 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2211 subpage_t
*subpage
= opaque
;
2214 #if defined(DEBUG_SUBPAGE)
2215 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2216 " value %"PRIx64
"\n",
2217 __func__
, subpage
, len
, addr
, value
);
2235 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2239 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2240 unsigned len
, bool is_write
)
2242 subpage_t
*subpage
= opaque
;
2243 #if defined(DEBUG_SUBPAGE)
2244 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2245 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2248 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2252 static const MemoryRegionOps subpage_ops
= {
2253 .read_with_attrs
= subpage_read
,
2254 .write_with_attrs
= subpage_write
,
2255 .impl
.min_access_size
= 1,
2256 .impl
.max_access_size
= 8,
2257 .valid
.min_access_size
= 1,
2258 .valid
.max_access_size
= 8,
2259 .valid
.accepts
= subpage_accepts
,
2260 .endianness
= DEVICE_NATIVE_ENDIAN
,
2263 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2268 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2270 idx
= SUBPAGE_IDX(start
);
2271 eidx
= SUBPAGE_IDX(end
);
2272 #if defined(DEBUG_SUBPAGE)
2273 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2274 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2276 for (; idx
<= eidx
; idx
++) {
2277 mmio
->sub_section
[idx
] = section
;
2283 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2287 mmio
= g_malloc0(sizeof(subpage_t
));
2291 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2292 NULL
, TARGET_PAGE_SIZE
);
2293 mmio
->iomem
.subpage
= true;
2294 #if defined(DEBUG_SUBPAGE)
2295 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2296 mmio
, base
, TARGET_PAGE_SIZE
);
2298 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2303 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2307 MemoryRegionSection section
= {
2308 .address_space
= as
,
2310 .offset_within_address_space
= 0,
2311 .offset_within_region
= 0,
2312 .size
= int128_2_64(),
2315 return phys_section_add(map
, §ion
);
2318 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
, MemTxAttrs attrs
)
2320 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
2321 CPUAddressSpace
*cpuas
= &cpu
->cpu_ases
[asidx
];
2322 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpuas
->memory_dispatch
);
2323 MemoryRegionSection
*sections
= d
->map
.sections
;
2325 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2328 static void io_mem_init(void)
2330 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2331 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2333 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2335 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2339 static void mem_begin(MemoryListener
*listener
)
2341 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2342 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2345 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2346 assert(n
== PHYS_SECTION_UNASSIGNED
);
2347 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2348 assert(n
== PHYS_SECTION_NOTDIRTY
);
2349 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2350 assert(n
== PHYS_SECTION_ROM
);
2351 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2352 assert(n
== PHYS_SECTION_WATCH
);
2354 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2356 as
->next_dispatch
= d
;
2359 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2361 phys_sections_free(&d
->map
);
2365 static void mem_commit(MemoryListener
*listener
)
2367 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2368 AddressSpaceDispatch
*cur
= as
->dispatch
;
2369 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2371 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2373 atomic_rcu_set(&as
->dispatch
, next
);
2375 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2379 static void tcg_commit(MemoryListener
*listener
)
2381 CPUAddressSpace
*cpuas
;
2382 AddressSpaceDispatch
*d
;
2384 /* since each CPU stores ram addresses in its TLB cache, we must
2385 reset the modified entries */
2386 cpuas
= container_of(listener
, CPUAddressSpace
, tcg_as_listener
);
2387 cpu_reloading_memory_map();
2388 /* The CPU and TLB are protected by the iothread lock.
2389 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2390 * may have split the RCU critical section.
2392 d
= atomic_rcu_read(&cpuas
->as
->dispatch
);
2393 cpuas
->memory_dispatch
= d
;
2394 tlb_flush(cpuas
->cpu
, 1);
2397 void address_space_init_dispatch(AddressSpace
*as
)
2399 as
->dispatch
= NULL
;
2400 as
->dispatch_listener
= (MemoryListener
) {
2402 .commit
= mem_commit
,
2403 .region_add
= mem_add
,
2404 .region_nop
= mem_add
,
2407 memory_listener_register(&as
->dispatch_listener
, as
);
2410 void address_space_unregister(AddressSpace
*as
)
2412 memory_listener_unregister(&as
->dispatch_listener
);
2415 void address_space_destroy_dispatch(AddressSpace
*as
)
2417 AddressSpaceDispatch
*d
= as
->dispatch
;
2419 atomic_rcu_set(&as
->dispatch
, NULL
);
2421 call_rcu(d
, address_space_dispatch_free
, rcu
);
2425 static void memory_map_init(void)
2427 system_memory
= g_malloc(sizeof(*system_memory
));
2429 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2430 address_space_init(&address_space_memory
, system_memory
, "memory");
2432 system_io
= g_malloc(sizeof(*system_io
));
2433 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2435 address_space_init(&address_space_io
, system_io
, "I/O");
2438 MemoryRegion
*get_system_memory(void)
2440 return system_memory
;
2443 MemoryRegion
*get_system_io(void)
2448 #endif /* !defined(CONFIG_USER_ONLY) */
2450 /* physical memory access (slow version, mainly for debug) */
2451 #if defined(CONFIG_USER_ONLY)
2452 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2453 uint8_t *buf
, int len
, int is_write
)
2460 page
= addr
& TARGET_PAGE_MASK
;
2461 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2464 flags
= page_get_flags(page
);
2465 if (!(flags
& PAGE_VALID
))
2468 if (!(flags
& PAGE_WRITE
))
2470 /* XXX: this code should not depend on lock_user */
2471 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2474 unlock_user(p
, addr
, l
);
2476 if (!(flags
& PAGE_READ
))
2478 /* XXX: this code should not depend on lock_user */
2479 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2482 unlock_user(p
, addr
, 0);
2493 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2496 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2497 /* No early return if dirty_log_mask is or becomes 0, because
2498 * cpu_physical_memory_set_dirty_range will still call
2499 * xen_modified_memory.
2501 if (dirty_log_mask
) {
2503 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2505 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2506 tb_invalidate_phys_range(addr
, addr
+ length
);
2507 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2509 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2512 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2514 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2516 /* Regions are assumed to support 1-4 byte accesses unless
2517 otherwise specified. */
2518 if (access_size_max
== 0) {
2519 access_size_max
= 4;
2522 /* Bound the maximum access by the alignment of the address. */
2523 if (!mr
->ops
->impl
.unaligned
) {
2524 unsigned align_size_max
= addr
& -addr
;
2525 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2526 access_size_max
= align_size_max
;
2530 /* Don't attempt accesses larger than the maximum. */
2531 if (l
> access_size_max
) {
2532 l
= access_size_max
;
2539 static bool prepare_mmio_access(MemoryRegion
*mr
)
2541 bool unlocked
= !qemu_mutex_iothread_locked();
2542 bool release_lock
= false;
2544 if (unlocked
&& mr
->global_locking
) {
2545 qemu_mutex_lock_iothread();
2547 release_lock
= true;
2549 if (mr
->flush_coalesced_mmio
) {
2551 qemu_mutex_lock_iothread();
2553 qemu_flush_coalesced_mmio_buffer();
2555 qemu_mutex_unlock_iothread();
2559 return release_lock
;
2562 /* Called within RCU critical section. */
2563 static MemTxResult
address_space_write_continue(AddressSpace
*as
, hwaddr addr
,
2566 int len
, hwaddr addr1
,
2567 hwaddr l
, MemoryRegion
*mr
)
2571 MemTxResult result
= MEMTX_OK
;
2572 bool release_lock
= false;
2575 if (!memory_access_is_direct(mr
, true)) {
2576 release_lock
|= prepare_mmio_access(mr
);
2577 l
= memory_access_size(mr
, l
, addr1
);
2578 /* XXX: could force current_cpu to NULL to avoid
2582 /* 64 bit write access */
2584 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2588 /* 32 bit write access */
2590 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2594 /* 16 bit write access */
2596 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2600 /* 8 bit write access */
2602 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2609 addr1
+= memory_region_get_ram_addr(mr
);
2611 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
2612 memcpy(ptr
, buf
, l
);
2613 invalidate_and_set_dirty(mr
, addr1
, l
);
2617 qemu_mutex_unlock_iothread();
2618 release_lock
= false;
2630 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2636 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2637 const uint8_t *buf
, int len
)
2642 MemTxResult result
= MEMTX_OK
;
2647 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2648 result
= address_space_write_continue(as
, addr
, attrs
, buf
, len
,
2656 /* Called within RCU critical section. */
2657 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
2658 MemTxAttrs attrs
, uint8_t *buf
,
2659 int len
, hwaddr addr1
, hwaddr l
,
2664 MemTxResult result
= MEMTX_OK
;
2665 bool release_lock
= false;
2668 if (!memory_access_is_direct(mr
, false)) {
2670 release_lock
|= prepare_mmio_access(mr
);
2671 l
= memory_access_size(mr
, l
, addr1
);
2674 /* 64 bit read access */
2675 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2680 /* 32 bit read access */
2681 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2686 /* 16 bit read access */
2687 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2692 /* 8 bit read access */
2693 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2702 ptr
= qemu_get_ram_ptr(mr
->ram_block
, mr
->ram_addr
+ addr1
);
2703 memcpy(buf
, ptr
, l
);
2707 qemu_mutex_unlock_iothread();
2708 release_lock
= false;
2720 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2726 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2727 MemTxAttrs attrs
, uint8_t *buf
, int len
)
2732 MemTxResult result
= MEMTX_OK
;
2737 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2738 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
2746 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2747 uint8_t *buf
, int len
, bool is_write
)
2750 return address_space_write(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2752 return address_space_read(as
, addr
, attrs
, (uint8_t *)buf
, len
);
2756 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2757 int len
, int is_write
)
2759 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2760 buf
, len
, is_write
);
2763 enum write_rom_type
{
2768 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2769 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2779 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2781 if (!(memory_region_is_ram(mr
) ||
2782 memory_region_is_romd(mr
))) {
2783 l
= memory_access_size(mr
, l
, addr1
);
2785 addr1
+= memory_region_get_ram_addr(mr
);
2787 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
2790 memcpy(ptr
, buf
, l
);
2791 invalidate_and_set_dirty(mr
, addr1
, l
);
2794 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2805 /* used for ROM loading : can write in RAM and ROM */
2806 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2807 const uint8_t *buf
, int len
)
2809 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2812 void cpu_flush_icache_range(hwaddr start
, int len
)
2815 * This function should do the same thing as an icache flush that was
2816 * triggered from within the guest. For TCG we are always cache coherent,
2817 * so there is no need to flush anything. For KVM / Xen we need to flush
2818 * the host's instruction cache at least.
2820 if (tcg_enabled()) {
2824 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2825 start
, NULL
, len
, FLUSH_CACHE
);
2836 static BounceBuffer bounce
;
2838 typedef struct MapClient
{
2840 QLIST_ENTRY(MapClient
) link
;
2843 QemuMutex map_client_list_lock
;
2844 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2845 = QLIST_HEAD_INITIALIZER(map_client_list
);
2847 static void cpu_unregister_map_client_do(MapClient
*client
)
2849 QLIST_REMOVE(client
, link
);
2853 static void cpu_notify_map_clients_locked(void)
2857 while (!QLIST_EMPTY(&map_client_list
)) {
2858 client
= QLIST_FIRST(&map_client_list
);
2859 qemu_bh_schedule(client
->bh
);
2860 cpu_unregister_map_client_do(client
);
2864 void cpu_register_map_client(QEMUBH
*bh
)
2866 MapClient
*client
= g_malloc(sizeof(*client
));
2868 qemu_mutex_lock(&map_client_list_lock
);
2870 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2871 if (!atomic_read(&bounce
.in_use
)) {
2872 cpu_notify_map_clients_locked();
2874 qemu_mutex_unlock(&map_client_list_lock
);
2877 void cpu_exec_init_all(void)
2879 qemu_mutex_init(&ram_list
.mutex
);
2882 qemu_mutex_init(&map_client_list_lock
);
2885 void cpu_unregister_map_client(QEMUBH
*bh
)
2889 qemu_mutex_lock(&map_client_list_lock
);
2890 QLIST_FOREACH(client
, &map_client_list
, link
) {
2891 if (client
->bh
== bh
) {
2892 cpu_unregister_map_client_do(client
);
2896 qemu_mutex_unlock(&map_client_list_lock
);
2899 static void cpu_notify_map_clients(void)
2901 qemu_mutex_lock(&map_client_list_lock
);
2902 cpu_notify_map_clients_locked();
2903 qemu_mutex_unlock(&map_client_list_lock
);
2906 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2914 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2915 if (!memory_access_is_direct(mr
, is_write
)) {
2916 l
= memory_access_size(mr
, l
, addr
);
2917 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2929 /* Map a physical memory region into a host virtual address.
2930 * May map a subset of the requested range, given by and returned in *plen.
2931 * May return NULL if resources needed to perform the mapping are exhausted.
2932 * Use only for reads OR writes - not for read-modify-write operations.
2933 * Use cpu_register_map_client() to know when retrying the map operation is
2934 * likely to succeed.
2936 void *address_space_map(AddressSpace
*as
,
2943 hwaddr l
, xlat
, base
;
2944 MemoryRegion
*mr
, *this_mr
;
2954 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2956 if (!memory_access_is_direct(mr
, is_write
)) {
2957 if (atomic_xchg(&bounce
.in_use
, true)) {
2961 /* Avoid unbounded allocations */
2962 l
= MIN(l
, TARGET_PAGE_SIZE
);
2963 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2967 memory_region_ref(mr
);
2970 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2976 return bounce
.buffer
;
2980 raddr
= memory_region_get_ram_addr(mr
);
2991 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2992 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2997 memory_region_ref(mr
);
2999 ptr
= qemu_ram_ptr_length(mr
->ram_block
, raddr
+ base
, plen
);
3005 /* Unmaps a memory region previously mapped by address_space_map().
3006 * Will also mark the memory as dirty if is_write == 1. access_len gives
3007 * the amount of memory that was actually read or written by the caller.
3009 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
3010 int is_write
, hwaddr access_len
)
3012 if (buffer
!= bounce
.buffer
) {
3016 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
3019 invalidate_and_set_dirty(mr
, addr1
, access_len
);
3021 if (xen_enabled()) {
3022 xen_invalidate_map_cache_entry(buffer
);
3024 memory_region_unref(mr
);
3028 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
3029 bounce
.buffer
, access_len
);
3031 qemu_vfree(bounce
.buffer
);
3032 bounce
.buffer
= NULL
;
3033 memory_region_unref(bounce
.mr
);
3034 atomic_mb_set(&bounce
.in_use
, false);
3035 cpu_notify_map_clients();
3038 void *cpu_physical_memory_map(hwaddr addr
,
3042 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
3045 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
3046 int is_write
, hwaddr access_len
)
3048 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
3051 /* warning: addr must be aligned */
3052 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
3054 MemTxResult
*result
,
3055 enum device_endian endian
)
3063 bool release_lock
= false;
3066 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
3067 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
3068 release_lock
|= prepare_mmio_access(mr
);
3071 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
3072 #if defined(TARGET_WORDS_BIGENDIAN)
3073 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3077 if (endian
== DEVICE_BIG_ENDIAN
) {
3083 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3084 (memory_region_get_ram_addr(mr
)
3088 case DEVICE_LITTLE_ENDIAN
:
3089 val
= ldl_le_p(ptr
);
3091 case DEVICE_BIG_ENDIAN
:
3092 val
= ldl_be_p(ptr
);
3104 qemu_mutex_unlock_iothread();
3110 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
3111 MemTxAttrs attrs
, MemTxResult
*result
)
3113 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3114 DEVICE_NATIVE_ENDIAN
);
3117 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
3118 MemTxAttrs attrs
, MemTxResult
*result
)
3120 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3121 DEVICE_LITTLE_ENDIAN
);
3124 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
3125 MemTxAttrs attrs
, MemTxResult
*result
)
3127 return address_space_ldl_internal(as
, addr
, attrs
, result
,
3131 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
3133 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3136 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
3138 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3141 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
3143 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3146 /* warning: addr must be aligned */
3147 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
3149 MemTxResult
*result
,
3150 enum device_endian endian
)
3158 bool release_lock
= false;
3161 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3163 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
3164 release_lock
|= prepare_mmio_access(mr
);
3167 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
3168 #if defined(TARGET_WORDS_BIGENDIAN)
3169 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3173 if (endian
== DEVICE_BIG_ENDIAN
) {
3179 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3180 (memory_region_get_ram_addr(mr
)
3184 case DEVICE_LITTLE_ENDIAN
:
3185 val
= ldq_le_p(ptr
);
3187 case DEVICE_BIG_ENDIAN
:
3188 val
= ldq_be_p(ptr
);
3200 qemu_mutex_unlock_iothread();
3206 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
3207 MemTxAttrs attrs
, MemTxResult
*result
)
3209 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3210 DEVICE_NATIVE_ENDIAN
);
3213 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
3214 MemTxAttrs attrs
, MemTxResult
*result
)
3216 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3217 DEVICE_LITTLE_ENDIAN
);
3220 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
3221 MemTxAttrs attrs
, MemTxResult
*result
)
3223 return address_space_ldq_internal(as
, addr
, attrs
, result
,
3227 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
3229 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3232 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3234 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3237 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3239 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3243 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3244 MemTxAttrs attrs
, MemTxResult
*result
)
3249 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3256 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3258 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3261 /* warning: addr must be aligned */
3262 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3265 MemTxResult
*result
,
3266 enum device_endian endian
)
3274 bool release_lock
= false;
3277 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3279 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3280 release_lock
|= prepare_mmio_access(mr
);
3283 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3284 #if defined(TARGET_WORDS_BIGENDIAN)
3285 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3289 if (endian
== DEVICE_BIG_ENDIAN
) {
3295 ptr
= qemu_get_ram_ptr(mr
->ram_block
,
3296 (memory_region_get_ram_addr(mr
)
3300 case DEVICE_LITTLE_ENDIAN
:
3301 val
= lduw_le_p(ptr
);
3303 case DEVICE_BIG_ENDIAN
:
3304 val
= lduw_be_p(ptr
);
3316 qemu_mutex_unlock_iothread();
3322 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3323 MemTxAttrs attrs
, MemTxResult
*result
)
3325 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3326 DEVICE_NATIVE_ENDIAN
);
3329 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3330 MemTxAttrs attrs
, MemTxResult
*result
)
3332 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3333 DEVICE_LITTLE_ENDIAN
);
3336 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3337 MemTxAttrs attrs
, MemTxResult
*result
)
3339 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3343 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3345 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3348 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3350 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3353 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3355 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3358 /* warning: addr must be aligned. The ram page is not masked as dirty
3359 and the code inside is not invalidated. It is useful if the dirty
3360 bits are used to track modified PTEs */
3361 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3362 MemTxAttrs attrs
, MemTxResult
*result
)
3369 uint8_t dirty_log_mask
;
3370 bool release_lock
= false;
3373 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3375 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3376 release_lock
|= prepare_mmio_access(mr
);
3378 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3380 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3381 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3384 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3385 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3386 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3393 qemu_mutex_unlock_iothread();
3398 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3400 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3403 /* warning: addr must be aligned */
3404 static inline void address_space_stl_internal(AddressSpace
*as
,
3405 hwaddr addr
, uint32_t val
,
3407 MemTxResult
*result
,
3408 enum device_endian endian
)
3415 bool release_lock
= false;
3418 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3420 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3421 release_lock
|= prepare_mmio_access(mr
);
3423 #if defined(TARGET_WORDS_BIGENDIAN)
3424 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3428 if (endian
== DEVICE_BIG_ENDIAN
) {
3432 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3435 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3436 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3438 case DEVICE_LITTLE_ENDIAN
:
3441 case DEVICE_BIG_ENDIAN
:
3448 invalidate_and_set_dirty(mr
, addr1
, 4);
3455 qemu_mutex_unlock_iothread();
3460 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3461 MemTxAttrs attrs
, MemTxResult
*result
)
3463 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3464 DEVICE_NATIVE_ENDIAN
);
3467 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3468 MemTxAttrs attrs
, MemTxResult
*result
)
3470 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3471 DEVICE_LITTLE_ENDIAN
);
3474 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3475 MemTxAttrs attrs
, MemTxResult
*result
)
3477 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3481 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3483 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3486 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3488 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3491 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3493 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3497 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3498 MemTxAttrs attrs
, MemTxResult
*result
)
3503 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3509 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3511 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3514 /* warning: addr must be aligned */
3515 static inline void address_space_stw_internal(AddressSpace
*as
,
3516 hwaddr addr
, uint32_t val
,
3518 MemTxResult
*result
,
3519 enum device_endian endian
)
3526 bool release_lock
= false;
3529 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3530 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3531 release_lock
|= prepare_mmio_access(mr
);
3533 #if defined(TARGET_WORDS_BIGENDIAN)
3534 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3538 if (endian
== DEVICE_BIG_ENDIAN
) {
3542 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3545 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3546 ptr
= qemu_get_ram_ptr(mr
->ram_block
, addr1
);
3548 case DEVICE_LITTLE_ENDIAN
:
3551 case DEVICE_BIG_ENDIAN
:
3558 invalidate_and_set_dirty(mr
, addr1
, 2);
3565 qemu_mutex_unlock_iothread();
3570 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3571 MemTxAttrs attrs
, MemTxResult
*result
)
3573 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3574 DEVICE_NATIVE_ENDIAN
);
3577 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3578 MemTxAttrs attrs
, MemTxResult
*result
)
3580 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3581 DEVICE_LITTLE_ENDIAN
);
3584 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3585 MemTxAttrs attrs
, MemTxResult
*result
)
3587 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3591 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3593 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3596 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3598 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3601 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3603 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3607 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3608 MemTxAttrs attrs
, MemTxResult
*result
)
3612 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3618 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3619 MemTxAttrs attrs
, MemTxResult
*result
)
3622 val
= cpu_to_le64(val
);
3623 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3628 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3629 MemTxAttrs attrs
, MemTxResult
*result
)
3632 val
= cpu_to_be64(val
);
3633 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3639 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3641 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3644 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3646 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3649 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3651 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3654 /* virtual memory access for debug (includes writing to ROM) */
3655 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3656 uint8_t *buf
, int len
, int is_write
)
3666 page
= addr
& TARGET_PAGE_MASK
;
3667 phys_addr
= cpu_get_phys_page_attrs_debug(cpu
, page
, &attrs
);
3668 asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
3669 /* if no physical page mapped, return an error */
3670 if (phys_addr
== -1)
3672 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3675 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3677 cpu_physical_memory_write_rom(cpu
->cpu_ases
[asidx
].as
,
3680 address_space_rw(cpu
->cpu_ases
[asidx
].as
, phys_addr
,
3681 MEMTXATTRS_UNSPECIFIED
,
3692 * Allows code that needs to deal with migration bitmaps etc to still be built
3693 * target independent.
3695 size_t qemu_target_page_bits(void)
3697 return TARGET_PAGE_BITS
;
3703 * A helper function for the _utterly broken_ virtio device model to find out if
3704 * it's running on a big endian machine. Don't do this at home kids!
3706 bool target_words_bigendian(void);
3707 bool target_words_bigendian(void)
3709 #if defined(TARGET_WORDS_BIGENDIAN)
3716 #ifndef CONFIG_USER_ONLY
3717 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3724 mr
= address_space_translate(&address_space_memory
,
3725 phys_addr
, &phys_addr
, &l
, false);
3727 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3732 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3738 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3739 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3740 block
->used_length
, opaque
);