4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
34 #include "hw/xen/xen.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
53 //#define DEBUG_UNASSIGNED
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
58 static int in_migration
;
60 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
62 static MemoryRegion
*system_memory
;
63 static MemoryRegion
*system_io
;
65 AddressSpace address_space_io
;
66 AddressSpace address_space_memory
;
67 DMAContext dma_context_memory
;
69 MemoryRegion io_mem_ram
, io_mem_rom
, io_mem_unassigned
, io_mem_notdirty
;
70 static MemoryRegion io_mem_subpage_ram
;
74 CPUArchState
*first_cpu
;
75 /* current CPU in the current thread. It is only valid inside
77 DEFINE_TLS(CPUArchState
*,cpu_single_env
);
78 /* 0 = Do not count executed instructions.
79 1 = Precise instruction counting.
80 2 = Adaptive rate instruction counting. */
83 #if !defined(CONFIG_USER_ONLY)
85 static MemoryRegionSection
*phys_sections
;
86 static unsigned phys_sections_nb
, phys_sections_nb_alloc
;
87 static uint16_t phys_section_unassigned
;
88 static uint16_t phys_section_notdirty
;
89 static uint16_t phys_section_rom
;
90 static uint16_t phys_section_watch
;
92 /* Simple allocator for PhysPageEntry nodes */
93 static PhysPageEntry (*phys_map_nodes
)[L2_SIZE
];
94 static unsigned phys_map_nodes_nb
, phys_map_nodes_nb_alloc
;
96 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
98 static void io_mem_init(void);
99 static void memory_map_init(void);
100 static void *qemu_safe_ram_ptr(ram_addr_t addr
);
102 static MemoryRegion io_mem_watch
;
105 #if !defined(CONFIG_USER_ONLY)
107 static void phys_map_node_reserve(unsigned nodes
)
109 if (phys_map_nodes_nb
+ nodes
> phys_map_nodes_nb_alloc
) {
110 typedef PhysPageEntry Node
[L2_SIZE
];
111 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
* 2, 16);
112 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
,
113 phys_map_nodes_nb
+ nodes
);
114 phys_map_nodes
= g_renew(Node
, phys_map_nodes
,
115 phys_map_nodes_nb_alloc
);
119 static uint16_t phys_map_node_alloc(void)
124 ret
= phys_map_nodes_nb
++;
125 assert(ret
!= PHYS_MAP_NODE_NIL
);
126 assert(ret
!= phys_map_nodes_nb_alloc
);
127 for (i
= 0; i
< L2_SIZE
; ++i
) {
128 phys_map_nodes
[ret
][i
].is_leaf
= 0;
129 phys_map_nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
134 static void phys_map_nodes_reset(void)
136 phys_map_nodes_nb
= 0;
140 static void phys_page_set_level(PhysPageEntry
*lp
, hwaddr
*index
,
141 hwaddr
*nb
, uint16_t leaf
,
146 hwaddr step
= (hwaddr
)1 << (level
* L2_BITS
);
148 if (!lp
->is_leaf
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
149 lp
->ptr
= phys_map_node_alloc();
150 p
= phys_map_nodes
[lp
->ptr
];
152 for (i
= 0; i
< L2_SIZE
; i
++) {
154 p
[i
].ptr
= phys_section_unassigned
;
158 p
= phys_map_nodes
[lp
->ptr
];
160 lp
= &p
[(*index
>> (level
* L2_BITS
)) & (L2_SIZE
- 1)];
162 while (*nb
&& lp
< &p
[L2_SIZE
]) {
163 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
169 phys_page_set_level(lp
, index
, nb
, leaf
, level
- 1);
175 static void phys_page_set(AddressSpaceDispatch
*d
,
176 hwaddr index
, hwaddr nb
,
179 /* Wildly overreserve - it doesn't matter much. */
180 phys_map_node_reserve(3 * P_L2_LEVELS
);
182 phys_page_set_level(&d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
185 MemoryRegionSection
*phys_page_find(AddressSpaceDispatch
*d
, hwaddr index
)
187 PhysPageEntry lp
= d
->phys_map
;
190 uint16_t s_index
= phys_section_unassigned
;
192 for (i
= P_L2_LEVELS
- 1; i
>= 0 && !lp
.is_leaf
; i
--) {
193 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
196 p
= phys_map_nodes
[lp
.ptr
];
197 lp
= p
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
202 return &phys_sections
[s_index
];
205 bool memory_region_is_unassigned(MemoryRegion
*mr
)
207 return mr
!= &io_mem_ram
&& mr
!= &io_mem_rom
208 && mr
!= &io_mem_notdirty
&& !mr
->rom_device
209 && mr
!= &io_mem_watch
;
213 void cpu_exec_init_all(void)
215 #if !defined(CONFIG_USER_ONLY)
216 qemu_mutex_init(&ram_list
.mutex
);
222 #if !defined(CONFIG_USER_ONLY)
224 static int cpu_common_post_load(void *opaque
, int version_id
)
226 CPUState
*cpu
= opaque
;
228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
230 cpu
->interrupt_request
&= ~0x01;
231 tlb_flush(cpu
->env_ptr
, 1);
236 static const VMStateDescription vmstate_cpu_common
= {
237 .name
= "cpu_common",
239 .minimum_version_id
= 1,
240 .minimum_version_id_old
= 1,
241 .post_load
= cpu_common_post_load
,
242 .fields
= (VMStateField
[]) {
243 VMSTATE_UINT32(halted
, CPUState
),
244 VMSTATE_UINT32(interrupt_request
, CPUState
),
245 VMSTATE_END_OF_LIST()
249 #define vmstate_cpu_common vmstate_dummy
252 CPUState
*qemu_get_cpu(int index
)
254 CPUArchState
*env
= first_cpu
;
255 CPUState
*cpu
= NULL
;
258 cpu
= ENV_GET_CPU(env
);
259 if (cpu
->cpu_index
== index
) {
265 return env
? cpu
: NULL
;
268 void qemu_for_each_cpu(void (*func
)(CPUState
*cpu
, void *data
), void *data
)
270 CPUArchState
*env
= first_cpu
;
273 func(ENV_GET_CPU(env
), data
);
278 void cpu_exec_init(CPUArchState
*env
)
280 CPUState
*cpu
= ENV_GET_CPU(env
);
281 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
285 #if defined(CONFIG_USER_ONLY)
288 env
->next_cpu
= NULL
;
291 while (*penv
!= NULL
) {
292 penv
= &(*penv
)->next_cpu
;
295 cpu
->cpu_index
= cpu_index
;
297 QTAILQ_INIT(&env
->breakpoints
);
298 QTAILQ_INIT(&env
->watchpoints
);
299 #ifndef CONFIG_USER_ONLY
300 cpu
->thread_id
= qemu_get_thread_id();
303 #if defined(CONFIG_USER_ONLY)
306 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
307 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
308 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
309 cpu_save
, cpu_load
, env
);
310 assert(cc
->vmsd
== NULL
);
312 if (cc
->vmsd
!= NULL
) {
313 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
317 #if defined(TARGET_HAS_ICE)
318 #if defined(CONFIG_USER_ONLY)
319 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
321 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
324 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
326 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env
, pc
) |
327 (pc
& ~TARGET_PAGE_MASK
));
330 #endif /* TARGET_HAS_ICE */
332 #if defined(CONFIG_USER_ONLY)
333 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
338 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
339 int flags
, CPUWatchpoint
**watchpoint
)
344 /* Add a watchpoint. */
345 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
346 int flags
, CPUWatchpoint
**watchpoint
)
348 target_ulong len_mask
= ~(len
- 1);
351 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
352 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
353 len
== 0 || len
> TARGET_PAGE_SIZE
) {
354 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
355 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
358 wp
= g_malloc(sizeof(*wp
));
361 wp
->len_mask
= len_mask
;
364 /* keep all GDB-injected watchpoints in front */
366 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
368 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
370 tlb_flush_page(env
, addr
);
377 /* Remove a specific watchpoint. */
378 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
381 target_ulong len_mask
= ~(len
- 1);
384 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
385 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
386 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
387 cpu_watchpoint_remove_by_ref(env
, wp
);
394 /* Remove a specific watchpoint by reference. */
395 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
397 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
399 tlb_flush_page(env
, watchpoint
->vaddr
);
404 /* Remove all matching watchpoints. */
405 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
407 CPUWatchpoint
*wp
, *next
;
409 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
410 if (wp
->flags
& mask
)
411 cpu_watchpoint_remove_by_ref(env
, wp
);
416 /* Add a breakpoint. */
417 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
418 CPUBreakpoint
**breakpoint
)
420 #if defined(TARGET_HAS_ICE)
423 bp
= g_malloc(sizeof(*bp
));
428 /* keep all GDB-injected breakpoints in front */
430 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
432 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
434 breakpoint_invalidate(env
, pc
);
444 /* Remove a specific breakpoint. */
445 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
447 #if defined(TARGET_HAS_ICE)
450 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
451 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
452 cpu_breakpoint_remove_by_ref(env
, bp
);
462 /* Remove a specific breakpoint by reference. */
463 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
465 #if defined(TARGET_HAS_ICE)
466 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
468 breakpoint_invalidate(env
, breakpoint
->pc
);
474 /* Remove all matching breakpoints. */
475 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
477 #if defined(TARGET_HAS_ICE)
478 CPUBreakpoint
*bp
, *next
;
480 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
481 if (bp
->flags
& mask
)
482 cpu_breakpoint_remove_by_ref(env
, bp
);
487 /* enable or disable single step mode. EXCP_DEBUG is returned by the
488 CPU loop after each instruction */
489 void cpu_single_step(CPUArchState
*env
, int enabled
)
491 #if defined(TARGET_HAS_ICE)
492 if (env
->singlestep_enabled
!= enabled
) {
493 env
->singlestep_enabled
= enabled
;
495 kvm_update_guest_debug(env
, 0);
497 /* must flush all the translated code to avoid inconsistencies */
498 /* XXX: only flush what is necessary */
505 void cpu_exit(CPUArchState
*env
)
507 CPUState
*cpu
= ENV_GET_CPU(env
);
509 cpu
->exit_request
= 1;
510 cpu
->tcg_exit_req
= 1;
513 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
520 fprintf(stderr
, "qemu: fatal: ");
521 vfprintf(stderr
, fmt
, ap
);
522 fprintf(stderr
, "\n");
523 cpu_dump_state(env
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
524 if (qemu_log_enabled()) {
525 qemu_log("qemu: fatal: ");
526 qemu_log_vprintf(fmt
, ap2
);
528 log_cpu_state(env
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
534 #if defined(CONFIG_USER_ONLY)
536 struct sigaction act
;
537 sigfillset(&act
.sa_mask
);
538 act
.sa_handler
= SIG_DFL
;
539 sigaction(SIGABRT
, &act
, NULL
);
545 CPUArchState
*cpu_copy(CPUArchState
*env
)
547 CPUArchState
*new_env
= cpu_init(env
->cpu_model_str
);
548 CPUArchState
*next_cpu
= new_env
->next_cpu
;
549 #if defined(TARGET_HAS_ICE)
554 memcpy(new_env
, env
, sizeof(CPUArchState
));
556 /* Preserve chaining. */
557 new_env
->next_cpu
= next_cpu
;
559 /* Clone all break/watchpoints.
560 Note: Once we support ptrace with hw-debug register access, make sure
561 BP_CPU break/watchpoints are handled correctly on clone. */
562 QTAILQ_INIT(&env
->breakpoints
);
563 QTAILQ_INIT(&env
->watchpoints
);
564 #if defined(TARGET_HAS_ICE)
565 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
566 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
568 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
569 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
577 #if !defined(CONFIG_USER_ONLY)
578 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t end
,
583 /* we modify the TLB cache so that the dirty bit will be set again
584 when accessing the range */
585 start1
= (uintptr_t)qemu_safe_ram_ptr(start
);
586 /* Check that we don't span multiple blocks - this breaks the
587 address comparisons below. */
588 if ((uintptr_t)qemu_safe_ram_ptr(end
- 1) - start1
589 != (end
- 1) - start
) {
592 cpu_tlb_reset_dirty_all(start1
, length
);
596 /* Note: start and end must be within the same ram block. */
597 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
602 start
&= TARGET_PAGE_MASK
;
603 end
= TARGET_PAGE_ALIGN(end
);
605 length
= end
- start
;
608 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
611 tlb_reset_dirty_range_all(start
, end
, length
);
615 static int cpu_physical_memory_set_dirty_tracking(int enable
)
618 in_migration
= enable
;
622 hwaddr
memory_region_section_get_iotlb(CPUArchState
*env
,
623 MemoryRegionSection
*section
,
627 target_ulong
*address
)
632 if (memory_region_is_ram(section
->mr
)) {
634 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
635 + memory_region_section_addr(section
, paddr
);
636 if (!section
->readonly
) {
637 iotlb
|= phys_section_notdirty
;
639 iotlb
|= phys_section_rom
;
642 iotlb
= section
- phys_sections
;
643 iotlb
+= memory_region_section_addr(section
, paddr
);
646 /* Make accesses to pages with watchpoints go via the
647 watchpoint trap routines. */
648 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
649 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
650 /* Avoid trapping reads of pages with a write breakpoint. */
651 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
652 iotlb
= phys_section_watch
+ paddr
;
653 *address
|= TLB_MMIO
;
661 #endif /* defined(CONFIG_USER_ONLY) */
663 #if !defined(CONFIG_USER_ONLY)
665 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
666 typedef struct subpage_t
{
669 uint16_t sub_section
[TARGET_PAGE_SIZE
];
672 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
674 static subpage_t
*subpage_init(hwaddr base
);
675 static void destroy_page_desc(uint16_t section_index
)
677 MemoryRegionSection
*section
= &phys_sections
[section_index
];
678 MemoryRegion
*mr
= section
->mr
;
681 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
682 memory_region_destroy(&subpage
->iomem
);
687 static void destroy_l2_mapping(PhysPageEntry
*lp
, unsigned level
)
692 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
696 p
= phys_map_nodes
[lp
->ptr
];
697 for (i
= 0; i
< L2_SIZE
; ++i
) {
699 destroy_l2_mapping(&p
[i
], level
- 1);
701 destroy_page_desc(p
[i
].ptr
);
705 lp
->ptr
= PHYS_MAP_NODE_NIL
;
708 static void destroy_all_mappings(AddressSpaceDispatch
*d
)
710 destroy_l2_mapping(&d
->phys_map
, P_L2_LEVELS
- 1);
711 phys_map_nodes_reset();
714 static uint16_t phys_section_add(MemoryRegionSection
*section
)
716 if (phys_sections_nb
== phys_sections_nb_alloc
) {
717 phys_sections_nb_alloc
= MAX(phys_sections_nb_alloc
* 2, 16);
718 phys_sections
= g_renew(MemoryRegionSection
, phys_sections
,
719 phys_sections_nb_alloc
);
721 phys_sections
[phys_sections_nb
] = *section
;
722 return phys_sections_nb
++;
725 static void phys_sections_clear(void)
727 phys_sections_nb
= 0;
730 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
733 hwaddr base
= section
->offset_within_address_space
735 MemoryRegionSection
*existing
= phys_page_find(d
, base
>> TARGET_PAGE_BITS
);
736 MemoryRegionSection subsection
= {
737 .offset_within_address_space
= base
,
738 .size
= TARGET_PAGE_SIZE
,
742 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
744 if (!(existing
->mr
->subpage
)) {
745 subpage
= subpage_init(base
);
746 subsection
.mr
= &subpage
->iomem
;
747 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
748 phys_section_add(&subsection
));
750 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
752 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
753 end
= start
+ section
->size
- 1;
754 subpage_register(subpage
, start
, end
, phys_section_add(section
));
758 static void register_multipage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
760 hwaddr start_addr
= section
->offset_within_address_space
;
761 ram_addr_t size
= section
->size
;
763 uint16_t section_index
= phys_section_add(section
);
768 phys_page_set(d
, addr
>> TARGET_PAGE_BITS
, size
>> TARGET_PAGE_BITS
,
772 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
774 AddressSpaceDispatch
*d
= container_of(listener
, AddressSpaceDispatch
, listener
);
775 MemoryRegionSection now
= *section
, remain
= *section
;
777 if ((now
.offset_within_address_space
& ~TARGET_PAGE_MASK
)
778 || (now
.size
< TARGET_PAGE_SIZE
)) {
779 now
.size
= MIN(TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
780 - now
.offset_within_address_space
,
782 register_subpage(d
, &now
);
783 remain
.size
-= now
.size
;
784 remain
.offset_within_address_space
+= now
.size
;
785 remain
.offset_within_region
+= now
.size
;
787 while (remain
.size
>= TARGET_PAGE_SIZE
) {
789 if (remain
.offset_within_region
& ~TARGET_PAGE_MASK
) {
790 now
.size
= TARGET_PAGE_SIZE
;
791 register_subpage(d
, &now
);
793 now
.size
&= TARGET_PAGE_MASK
;
794 register_multipage(d
, &now
);
796 remain
.size
-= now
.size
;
797 remain
.offset_within_address_space
+= now
.size
;
798 remain
.offset_within_region
+= now
.size
;
802 register_subpage(d
, &now
);
806 void qemu_flush_coalesced_mmio_buffer(void)
809 kvm_flush_coalesced_mmio_buffer();
812 void qemu_mutex_lock_ramlist(void)
814 qemu_mutex_lock(&ram_list
.mutex
);
817 void qemu_mutex_unlock_ramlist(void)
819 qemu_mutex_unlock(&ram_list
.mutex
);
822 #if defined(__linux__) && !defined(TARGET_S390X)
826 #define HUGETLBFS_MAGIC 0x958458f6
828 static long gethugepagesize(const char *path
)
834 ret
= statfs(path
, &fs
);
835 } while (ret
!= 0 && errno
== EINTR
);
842 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
843 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
848 static void *file_ram_alloc(RAMBlock
*block
,
853 char *sanitized_name
;
860 unsigned long hpagesize
;
862 hpagesize
= gethugepagesize(path
);
867 if (memory
< hpagesize
) {
871 if (kvm_enabled() && !kvm_has_sync_mmu()) {
872 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
876 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
877 sanitized_name
= g_strdup(block
->mr
->name
);
878 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
883 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
885 g_free(sanitized_name
);
887 fd
= mkstemp(filename
);
889 perror("unable to create backing store for hugepages");
896 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
899 * ftruncate is not supported by hugetlbfs in older
900 * hosts, so don't bother bailing out on errors.
901 * If anything goes wrong with it under other filesystems,
904 if (ftruncate(fd
, memory
))
908 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
909 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
910 * to sidestep this quirk.
912 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
913 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
915 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
917 if (area
== MAP_FAILED
) {
918 perror("file_ram_alloc: can't mmap RAM pages");
927 static ram_addr_t
find_ram_offset(ram_addr_t size
)
929 RAMBlock
*block
, *next_block
;
930 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
932 assert(size
!= 0); /* it would hand out same offset multiple times */
934 if (QTAILQ_EMPTY(&ram_list
.blocks
))
937 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
938 ram_addr_t end
, next
= RAM_ADDR_MAX
;
940 end
= block
->offset
+ block
->length
;
942 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
943 if (next_block
->offset
>= end
) {
944 next
= MIN(next
, next_block
->offset
);
947 if (next
- end
>= size
&& next
- end
< mingap
) {
953 if (offset
== RAM_ADDR_MAX
) {
954 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
962 ram_addr_t
last_ram_offset(void)
967 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
968 last
= MAX(last
, block
->offset
+ block
->length
);
973 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
976 QemuOpts
*machine_opts
;
978 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
979 machine_opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
981 !qemu_opt_get_bool(machine_opts
, "dump-guest-core", true)) {
982 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
984 perror("qemu_madvise");
985 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
986 "but dump_guest_core=off specified\n");
991 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
993 RAMBlock
*new_block
, *block
;
996 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
997 if (block
->offset
== addr
) {
1003 assert(!new_block
->idstr
[0]);
1006 char *id
= qdev_get_dev_path(dev
);
1008 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1012 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1014 /* This assumes the iothread lock is taken here too. */
1015 qemu_mutex_lock_ramlist();
1016 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1017 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1018 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1023 qemu_mutex_unlock_ramlist();
1026 static int memory_try_enable_merging(void *addr
, size_t len
)
1030 opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
1031 if (opts
&& !qemu_opt_get_bool(opts
, "mem-merge", true)) {
1032 /* disabled by the user */
1036 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1039 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1042 RAMBlock
*block
, *new_block
;
1044 size
= TARGET_PAGE_ALIGN(size
);
1045 new_block
= g_malloc0(sizeof(*new_block
));
1047 /* This assumes the iothread lock is taken here too. */
1048 qemu_mutex_lock_ramlist();
1050 new_block
->offset
= find_ram_offset(size
);
1052 new_block
->host
= host
;
1053 new_block
->flags
|= RAM_PREALLOC_MASK
;
1056 #if defined (__linux__) && !defined(TARGET_S390X)
1057 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
1058 if (!new_block
->host
) {
1059 new_block
->host
= qemu_anon_ram_alloc(size
);
1060 memory_try_enable_merging(new_block
->host
, size
);
1063 fprintf(stderr
, "-mem-path option unsupported\n");
1067 if (xen_enabled()) {
1068 xen_ram_alloc(new_block
->offset
, size
, mr
);
1069 } else if (kvm_enabled()) {
1070 /* some s390/kvm configurations have special constraints */
1071 new_block
->host
= kvm_ram_alloc(size
);
1073 new_block
->host
= qemu_anon_ram_alloc(size
);
1075 memory_try_enable_merging(new_block
->host
, size
);
1078 new_block
->length
= size
;
1080 /* Keep the list sorted from biggest to smallest block. */
1081 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1082 if (block
->length
< new_block
->length
) {
1087 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1089 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1091 ram_list
.mru_block
= NULL
;
1094 qemu_mutex_unlock_ramlist();
1096 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
1097 last_ram_offset() >> TARGET_PAGE_BITS
);
1098 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
1099 0, size
>> TARGET_PAGE_BITS
);
1100 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
, 0xff);
1102 qemu_ram_setup_dump(new_block
->host
, size
);
1103 qemu_madvise(new_block
->host
, size
, QEMU_MADV_HUGEPAGE
);
1106 kvm_setup_guest_memory(new_block
->host
, size
);
1108 return new_block
->offset
;
1111 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
1113 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
1116 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1120 /* This assumes the iothread lock is taken here too. */
1121 qemu_mutex_lock_ramlist();
1122 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1123 if (addr
== block
->offset
) {
1124 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1125 ram_list
.mru_block
= NULL
;
1131 qemu_mutex_unlock_ramlist();
1134 void qemu_ram_free(ram_addr_t addr
)
1138 /* This assumes the iothread lock is taken here too. */
1139 qemu_mutex_lock_ramlist();
1140 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1141 if (addr
== block
->offset
) {
1142 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1143 ram_list
.mru_block
= NULL
;
1145 if (block
->flags
& RAM_PREALLOC_MASK
) {
1147 } else if (mem_path
) {
1148 #if defined (__linux__) && !defined(TARGET_S390X)
1150 munmap(block
->host
, block
->length
);
1153 qemu_anon_ram_free(block
->host
, block
->length
);
1159 if (xen_enabled()) {
1160 xen_invalidate_map_cache_entry(block
->host
);
1162 qemu_anon_ram_free(block
->host
, block
->length
);
1169 qemu_mutex_unlock_ramlist();
1174 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1181 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1182 offset
= addr
- block
->offset
;
1183 if (offset
< block
->length
) {
1184 vaddr
= block
->host
+ offset
;
1185 if (block
->flags
& RAM_PREALLOC_MASK
) {
1189 munmap(vaddr
, length
);
1191 #if defined(__linux__) && !defined(TARGET_S390X)
1194 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
1197 flags
|= MAP_PRIVATE
;
1199 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1200 flags
, block
->fd
, offset
);
1202 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1203 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1210 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1211 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
1212 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
1215 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1216 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1220 if (area
!= vaddr
) {
1221 fprintf(stderr
, "Could not remap addr: "
1222 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1226 memory_try_enable_merging(vaddr
, length
);
1227 qemu_ram_setup_dump(vaddr
, length
);
1233 #endif /* !_WIN32 */
1235 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1236 With the exception of the softmmu code in this file, this should
1237 only be used for local memory (e.g. video ram) that the device owns,
1238 and knows it isn't going to access beyond the end of the block.
1240 It should not be used for general purpose DMA.
1241 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1243 void *qemu_get_ram_ptr(ram_addr_t addr
)
1247 /* The list is protected by the iothread lock here. */
1248 block
= ram_list
.mru_block
;
1249 if (block
&& addr
- block
->offset
< block
->length
) {
1252 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1253 if (addr
- block
->offset
< block
->length
) {
1258 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1262 ram_list
.mru_block
= block
;
1263 if (xen_enabled()) {
1264 /* We need to check if the requested address is in the RAM
1265 * because we don't want to map the entire memory in QEMU.
1266 * In that case just map until the end of the page.
1268 if (block
->offset
== 0) {
1269 return xen_map_cache(addr
, 0, 0);
1270 } else if (block
->host
== NULL
) {
1272 xen_map_cache(block
->offset
, block
->length
, 1);
1275 return block
->host
+ (addr
- block
->offset
);
1278 /* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1279 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1281 * ??? Is this still necessary?
1283 static void *qemu_safe_ram_ptr(ram_addr_t addr
)
1287 /* The list is protected by the iothread lock here. */
1288 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1289 if (addr
- block
->offset
< block
->length
) {
1290 if (xen_enabled()) {
1291 /* We need to check if the requested address is in the RAM
1292 * because we don't want to map the entire memory in QEMU.
1293 * In that case just map until the end of the page.
1295 if (block
->offset
== 0) {
1296 return xen_map_cache(addr
, 0, 0);
1297 } else if (block
->host
== NULL
) {
1299 xen_map_cache(block
->offset
, block
->length
, 1);
1302 return block
->host
+ (addr
- block
->offset
);
1306 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1312 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1313 * but takes a size argument */
1314 static void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
1319 if (xen_enabled()) {
1320 return xen_map_cache(addr
, *size
, 1);
1324 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1325 if (addr
- block
->offset
< block
->length
) {
1326 if (addr
- block
->offset
+ *size
> block
->length
)
1327 *size
= block
->length
- addr
+ block
->offset
;
1328 return block
->host
+ (addr
- block
->offset
);
1332 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1337 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1340 uint8_t *host
= ptr
;
1342 if (xen_enabled()) {
1343 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1347 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1348 /* This case append when the block is not mapped. */
1349 if (block
->host
== NULL
) {
1352 if (host
- block
->host
< block
->length
) {
1353 *ram_addr
= block
->offset
+ (host
- block
->host
);
1361 /* Some of the softmmu routines need to translate from a host pointer
1362 (typically a TLB entry) back to a ram offset. */
1363 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
1365 ram_addr_t ram_addr
;
1367 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
1368 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
1374 static uint64_t unassigned_mem_read(void *opaque
, hwaddr addr
,
1377 #ifdef DEBUG_UNASSIGNED
1378 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
1380 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1381 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, size
);
1386 static void unassigned_mem_write(void *opaque
, hwaddr addr
,
1387 uint64_t val
, unsigned size
)
1389 #ifdef DEBUG_UNASSIGNED
1390 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
1392 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1393 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, size
);
1397 static const MemoryRegionOps unassigned_mem_ops
= {
1398 .read
= unassigned_mem_read
,
1399 .write
= unassigned_mem_write
,
1400 .endianness
= DEVICE_NATIVE_ENDIAN
,
1403 static uint64_t error_mem_read(void *opaque
, hwaddr addr
,
1409 static void error_mem_write(void *opaque
, hwaddr addr
,
1410 uint64_t value
, unsigned size
)
1415 static const MemoryRegionOps error_mem_ops
= {
1416 .read
= error_mem_read
,
1417 .write
= error_mem_write
,
1418 .endianness
= DEVICE_NATIVE_ENDIAN
,
1421 static const MemoryRegionOps rom_mem_ops
= {
1422 .read
= error_mem_read
,
1423 .write
= unassigned_mem_write
,
1424 .endianness
= DEVICE_NATIVE_ENDIAN
,
1427 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1428 uint64_t val
, unsigned size
)
1431 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1432 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1433 #if !defined(CONFIG_USER_ONLY)
1434 tb_invalidate_phys_page_fast(ram_addr
, size
);
1435 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1440 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1443 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1446 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1451 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1452 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
1453 /* we remove the notdirty callback only if the code has been
1455 if (dirty_flags
== 0xff)
1456 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
1459 static const MemoryRegionOps notdirty_mem_ops
= {
1460 .read
= error_mem_read
,
1461 .write
= notdirty_mem_write
,
1462 .endianness
= DEVICE_NATIVE_ENDIAN
,
1465 /* Generate a debug exception if a watchpoint has been hit. */
1466 static void check_watchpoint(int offset
, int len_mask
, int flags
)
1468 CPUArchState
*env
= cpu_single_env
;
1469 target_ulong pc
, cs_base
;
1474 if (env
->watchpoint_hit
) {
1475 /* We re-entered the check after replacing the TB. Now raise
1476 * the debug interrupt so that is will trigger after the
1477 * current instruction. */
1478 cpu_interrupt(ENV_GET_CPU(env
), CPU_INTERRUPT_DEBUG
);
1481 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1482 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1483 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
1484 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
1485 wp
->flags
|= BP_WATCHPOINT_HIT
;
1486 if (!env
->watchpoint_hit
) {
1487 env
->watchpoint_hit
= wp
;
1488 tb_check_watchpoint(env
);
1489 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1490 env
->exception_index
= EXCP_DEBUG
;
1493 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1494 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
1495 cpu_resume_from_signal(env
, NULL
);
1499 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1504 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1505 so these check for a hit then pass through to the normal out-of-line
1507 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1510 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
1512 case 1: return ldub_phys(addr
);
1513 case 2: return lduw_phys(addr
);
1514 case 4: return ldl_phys(addr
);
1519 static void watch_mem_write(void *opaque
, hwaddr addr
,
1520 uint64_t val
, unsigned size
)
1522 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
1525 stb_phys(addr
, val
);
1528 stw_phys(addr
, val
);
1531 stl_phys(addr
, val
);
1537 static const MemoryRegionOps watch_mem_ops
= {
1538 .read
= watch_mem_read
,
1539 .write
= watch_mem_write
,
1540 .endianness
= DEVICE_NATIVE_ENDIAN
,
1543 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1546 subpage_t
*mmio
= opaque
;
1547 unsigned int idx
= SUBPAGE_IDX(addr
);
1548 MemoryRegionSection
*section
;
1549 #if defined(DEBUG_SUBPAGE)
1550 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
1551 mmio
, len
, addr
, idx
);
1554 section
= &phys_sections
[mmio
->sub_section
[idx
]];
1556 addr
-= section
->offset_within_address_space
;
1557 addr
+= section
->offset_within_region
;
1558 return io_mem_read(section
->mr
, addr
, len
);
1561 static void subpage_write(void *opaque
, hwaddr addr
,
1562 uint64_t value
, unsigned len
)
1564 subpage_t
*mmio
= opaque
;
1565 unsigned int idx
= SUBPAGE_IDX(addr
);
1566 MemoryRegionSection
*section
;
1567 #if defined(DEBUG_SUBPAGE)
1568 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1569 " idx %d value %"PRIx64
"\n",
1570 __func__
, mmio
, len
, addr
, idx
, value
);
1573 section
= &phys_sections
[mmio
->sub_section
[idx
]];
1575 addr
-= section
->offset_within_address_space
;
1576 addr
+= section
->offset_within_region
;
1577 io_mem_write(section
->mr
, addr
, value
, len
);
1580 static const MemoryRegionOps subpage_ops
= {
1581 .read
= subpage_read
,
1582 .write
= subpage_write
,
1583 .endianness
= DEVICE_NATIVE_ENDIAN
,
1586 static uint64_t subpage_ram_read(void *opaque
, hwaddr addr
,
1589 ram_addr_t raddr
= addr
;
1590 void *ptr
= qemu_get_ram_ptr(raddr
);
1592 case 1: return ldub_p(ptr
);
1593 case 2: return lduw_p(ptr
);
1594 case 4: return ldl_p(ptr
);
1599 static void subpage_ram_write(void *opaque
, hwaddr addr
,
1600 uint64_t value
, unsigned size
)
1602 ram_addr_t raddr
= addr
;
1603 void *ptr
= qemu_get_ram_ptr(raddr
);
1605 case 1: return stb_p(ptr
, value
);
1606 case 2: return stw_p(ptr
, value
);
1607 case 4: return stl_p(ptr
, value
);
1612 static const MemoryRegionOps subpage_ram_ops
= {
1613 .read
= subpage_ram_read
,
1614 .write
= subpage_ram_write
,
1615 .endianness
= DEVICE_NATIVE_ENDIAN
,
1618 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1623 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1625 idx
= SUBPAGE_IDX(start
);
1626 eidx
= SUBPAGE_IDX(end
);
1627 #if defined(DEBUG_SUBPAGE)
1628 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
1629 mmio
, start
, end
, idx
, eidx
, memory
);
1631 if (memory_region_is_ram(phys_sections
[section
].mr
)) {
1632 MemoryRegionSection new_section
= phys_sections
[section
];
1633 new_section
.mr
= &io_mem_subpage_ram
;
1634 section
= phys_section_add(&new_section
);
1636 for (; idx
<= eidx
; idx
++) {
1637 mmio
->sub_section
[idx
] = section
;
1643 static subpage_t
*subpage_init(hwaddr base
)
1647 mmio
= g_malloc0(sizeof(subpage_t
));
1650 memory_region_init_io(&mmio
->iomem
, &subpage_ops
, mmio
,
1651 "subpage", TARGET_PAGE_SIZE
);
1652 mmio
->iomem
.subpage
= true;
1653 #if defined(DEBUG_SUBPAGE)
1654 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
1655 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
1657 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, phys_section_unassigned
);
1662 static uint16_t dummy_section(MemoryRegion
*mr
)
1664 MemoryRegionSection section
= {
1666 .offset_within_address_space
= 0,
1667 .offset_within_region
= 0,
1671 return phys_section_add(§ion
);
1674 MemoryRegion
*iotlb_to_region(hwaddr index
)
1676 return phys_sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1679 static void io_mem_init(void)
1681 memory_region_init_io(&io_mem_ram
, &error_mem_ops
, NULL
, "ram", UINT64_MAX
);
1682 memory_region_init_io(&io_mem_rom
, &rom_mem_ops
, NULL
, "rom", UINT64_MAX
);
1683 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
1684 "unassigned", UINT64_MAX
);
1685 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
1686 "notdirty", UINT64_MAX
);
1687 memory_region_init_io(&io_mem_subpage_ram
, &subpage_ram_ops
, NULL
,
1688 "subpage-ram", UINT64_MAX
);
1689 memory_region_init_io(&io_mem_watch
, &watch_mem_ops
, NULL
,
1690 "watch", UINT64_MAX
);
1693 static void mem_begin(MemoryListener
*listener
)
1695 AddressSpaceDispatch
*d
= container_of(listener
, AddressSpaceDispatch
, listener
);
1697 destroy_all_mappings(d
);
1698 d
->phys_map
.ptr
= PHYS_MAP_NODE_NIL
;
1701 static void core_begin(MemoryListener
*listener
)
1703 phys_sections_clear();
1704 phys_section_unassigned
= dummy_section(&io_mem_unassigned
);
1705 phys_section_notdirty
= dummy_section(&io_mem_notdirty
);
1706 phys_section_rom
= dummy_section(&io_mem_rom
);
1707 phys_section_watch
= dummy_section(&io_mem_watch
);
1710 static void tcg_commit(MemoryListener
*listener
)
1714 /* since each CPU stores ram addresses in its TLB cache, we must
1715 reset the modified entries */
1717 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1722 static void core_log_global_start(MemoryListener
*listener
)
1724 cpu_physical_memory_set_dirty_tracking(1);
1727 static void core_log_global_stop(MemoryListener
*listener
)
1729 cpu_physical_memory_set_dirty_tracking(0);
1732 static void io_region_add(MemoryListener
*listener
,
1733 MemoryRegionSection
*section
)
1735 MemoryRegionIORange
*mrio
= g_new(MemoryRegionIORange
, 1);
1737 mrio
->mr
= section
->mr
;
1738 mrio
->offset
= section
->offset_within_region
;
1739 iorange_init(&mrio
->iorange
, &memory_region_iorange_ops
,
1740 section
->offset_within_address_space
, section
->size
);
1741 ioport_register(&mrio
->iorange
);
1744 static void io_region_del(MemoryListener
*listener
,
1745 MemoryRegionSection
*section
)
1747 isa_unassign_ioport(section
->offset_within_address_space
, section
->size
);
1750 static MemoryListener core_memory_listener
= {
1751 .begin
= core_begin
,
1752 .log_global_start
= core_log_global_start
,
1753 .log_global_stop
= core_log_global_stop
,
1757 static MemoryListener io_memory_listener
= {
1758 .region_add
= io_region_add
,
1759 .region_del
= io_region_del
,
1763 static MemoryListener tcg_memory_listener
= {
1764 .commit
= tcg_commit
,
1767 void address_space_init_dispatch(AddressSpace
*as
)
1769 AddressSpaceDispatch
*d
= g_new(AddressSpaceDispatch
, 1);
1771 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .is_leaf
= 0 };
1772 d
->listener
= (MemoryListener
) {
1774 .region_add
= mem_add
,
1775 .region_nop
= mem_add
,
1779 memory_listener_register(&d
->listener
, as
);
1782 void address_space_destroy_dispatch(AddressSpace
*as
)
1784 AddressSpaceDispatch
*d
= as
->dispatch
;
1786 memory_listener_unregister(&d
->listener
);
1787 destroy_l2_mapping(&d
->phys_map
, P_L2_LEVELS
- 1);
1789 as
->dispatch
= NULL
;
1792 static void memory_map_init(void)
1794 system_memory
= g_malloc(sizeof(*system_memory
));
1795 memory_region_init(system_memory
, "system", INT64_MAX
);
1796 address_space_init(&address_space_memory
, system_memory
);
1797 address_space_memory
.name
= "memory";
1799 system_io
= g_malloc(sizeof(*system_io
));
1800 memory_region_init(system_io
, "io", 65536);
1801 address_space_init(&address_space_io
, system_io
);
1802 address_space_io
.name
= "I/O";
1804 memory_listener_register(&core_memory_listener
, &address_space_memory
);
1805 memory_listener_register(&io_memory_listener
, &address_space_io
);
1806 memory_listener_register(&tcg_memory_listener
, &address_space_memory
);
1808 dma_context_init(&dma_context_memory
, &address_space_memory
,
1812 MemoryRegion
*get_system_memory(void)
1814 return system_memory
;
1817 MemoryRegion
*get_system_io(void)
1822 #endif /* !defined(CONFIG_USER_ONLY) */
1824 /* physical memory access (slow version, mainly for debug) */
1825 #if defined(CONFIG_USER_ONLY)
1826 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
1827 uint8_t *buf
, int len
, int is_write
)
1834 page
= addr
& TARGET_PAGE_MASK
;
1835 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1838 flags
= page_get_flags(page
);
1839 if (!(flags
& PAGE_VALID
))
1842 if (!(flags
& PAGE_WRITE
))
1844 /* XXX: this code should not depend on lock_user */
1845 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
1848 unlock_user(p
, addr
, l
);
1850 if (!(flags
& PAGE_READ
))
1852 /* XXX: this code should not depend on lock_user */
1853 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
1856 unlock_user(p
, addr
, 0);
1867 static void invalidate_and_set_dirty(hwaddr addr
,
1870 if (!cpu_physical_memory_is_dirty(addr
)) {
1871 /* invalidate code */
1872 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
1874 cpu_physical_memory_set_dirty_flags(addr
, (0xff & ~CODE_DIRTY_FLAG
));
1876 xen_modified_memory(addr
, length
);
1879 void address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
1880 int len
, bool is_write
)
1882 AddressSpaceDispatch
*d
= as
->dispatch
;
1887 MemoryRegionSection
*section
;
1890 page
= addr
& TARGET_PAGE_MASK
;
1891 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1894 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
1897 if (!memory_region_is_ram(section
->mr
)) {
1899 addr1
= memory_region_section_addr(section
, addr
);
1900 /* XXX: could force cpu_single_env to NULL to avoid
1902 if (l
>= 4 && ((addr1
& 3) == 0)) {
1903 /* 32 bit write access */
1905 io_mem_write(section
->mr
, addr1
, val
, 4);
1907 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
1908 /* 16 bit write access */
1910 io_mem_write(section
->mr
, addr1
, val
, 2);
1913 /* 8 bit write access */
1915 io_mem_write(section
->mr
, addr1
, val
, 1);
1918 } else if (!section
->readonly
) {
1920 addr1
= memory_region_get_ram_addr(section
->mr
)
1921 + memory_region_section_addr(section
, addr
);
1923 ptr
= qemu_get_ram_ptr(addr1
);
1924 memcpy(ptr
, buf
, l
);
1925 invalidate_and_set_dirty(addr1
, l
);
1928 if (!(memory_region_is_ram(section
->mr
) ||
1929 memory_region_is_romd(section
->mr
))) {
1932 addr1
= memory_region_section_addr(section
, addr
);
1933 if (l
>= 4 && ((addr1
& 3) == 0)) {
1934 /* 32 bit read access */
1935 val
= io_mem_read(section
->mr
, addr1
, 4);
1938 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
1939 /* 16 bit read access */
1940 val
= io_mem_read(section
->mr
, addr1
, 2);
1944 /* 8 bit read access */
1945 val
= io_mem_read(section
->mr
, addr1
, 1);
1951 ptr
= qemu_get_ram_ptr(section
->mr
->ram_addr
1952 + memory_region_section_addr(section
,
1954 memcpy(buf
, ptr
, l
);
1963 void address_space_write(AddressSpace
*as
, hwaddr addr
,
1964 const uint8_t *buf
, int len
)
1966 address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
1970 * address_space_read: read from an address space.
1972 * @as: #AddressSpace to be accessed
1973 * @addr: address within that address space
1974 * @buf: buffer with the data transferred
1976 void address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
1978 address_space_rw(as
, addr
, buf
, len
, false);
1982 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
1983 int len
, int is_write
)
1985 return address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
1988 /* used for ROM loading : can write in RAM and ROM */
1989 void cpu_physical_memory_write_rom(hwaddr addr
,
1990 const uint8_t *buf
, int len
)
1992 AddressSpaceDispatch
*d
= address_space_memory
.dispatch
;
1996 MemoryRegionSection
*section
;
1999 page
= addr
& TARGET_PAGE_MASK
;
2000 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2003 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
2005 if (!(memory_region_is_ram(section
->mr
) ||
2006 memory_region_is_romd(section
->mr
))) {
2009 unsigned long addr1
;
2010 addr1
= memory_region_get_ram_addr(section
->mr
)
2011 + memory_region_section_addr(section
, addr
);
2013 ptr
= qemu_get_ram_ptr(addr1
);
2014 memcpy(ptr
, buf
, l
);
2015 invalidate_and_set_dirty(addr1
, l
);
2029 static BounceBuffer bounce
;
2031 typedef struct MapClient
{
2033 void (*callback
)(void *opaque
);
2034 QLIST_ENTRY(MapClient
) link
;
2037 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2038 = QLIST_HEAD_INITIALIZER(map_client_list
);
2040 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2042 MapClient
*client
= g_malloc(sizeof(*client
));
2044 client
->opaque
= opaque
;
2045 client
->callback
= callback
;
2046 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2050 static void cpu_unregister_map_client(void *_client
)
2052 MapClient
*client
= (MapClient
*)_client
;
2054 QLIST_REMOVE(client
, link
);
2058 static void cpu_notify_map_clients(void)
2062 while (!QLIST_EMPTY(&map_client_list
)) {
2063 client
= QLIST_FIRST(&map_client_list
);
2064 client
->callback(client
->opaque
);
2065 cpu_unregister_map_client(client
);
2069 /* Map a physical memory region into a host virtual address.
2070 * May map a subset of the requested range, given by and returned in *plen.
2071 * May return NULL if resources needed to perform the mapping are exhausted.
2072 * Use only for reads OR writes - not for read-modify-write operations.
2073 * Use cpu_register_map_client() to know when retrying the map operation is
2074 * likely to succeed.
2076 void *address_space_map(AddressSpace
*as
,
2081 AddressSpaceDispatch
*d
= as
->dispatch
;
2086 MemoryRegionSection
*section
;
2087 ram_addr_t raddr
= RAM_ADDR_MAX
;
2092 page
= addr
& TARGET_PAGE_MASK
;
2093 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2096 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
2098 if (!(memory_region_is_ram(section
->mr
) && !section
->readonly
)) {
2099 if (todo
|| bounce
.buffer
) {
2102 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
2106 address_space_read(as
, addr
, bounce
.buffer
, l
);
2110 return bounce
.buffer
;
2113 raddr
= memory_region_get_ram_addr(section
->mr
)
2114 + memory_region_section_addr(section
, addr
);
2122 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
2127 /* Unmaps a memory region previously mapped by address_space_map().
2128 * Will also mark the memory as dirty if is_write == 1. access_len gives
2129 * the amount of memory that was actually read or written by the caller.
2131 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2132 int is_write
, hwaddr access_len
)
2134 if (buffer
!= bounce
.buffer
) {
2136 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
2137 while (access_len
) {
2139 l
= TARGET_PAGE_SIZE
;
2142 invalidate_and_set_dirty(addr1
, l
);
2147 if (xen_enabled()) {
2148 xen_invalidate_map_cache_entry(buffer
);
2153 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2155 qemu_vfree(bounce
.buffer
);
2156 bounce
.buffer
= NULL
;
2157 cpu_notify_map_clients();
2160 void *cpu_physical_memory_map(hwaddr addr
,
2164 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2167 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2168 int is_write
, hwaddr access_len
)
2170 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2173 /* warning: addr must be aligned */
2174 static inline uint32_t ldl_phys_internal(hwaddr addr
,
2175 enum device_endian endian
)
2179 MemoryRegionSection
*section
;
2181 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2183 if (!(memory_region_is_ram(section
->mr
) ||
2184 memory_region_is_romd(section
->mr
))) {
2186 addr
= memory_region_section_addr(section
, addr
);
2187 val
= io_mem_read(section
->mr
, addr
, 4);
2188 #if defined(TARGET_WORDS_BIGENDIAN)
2189 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2193 if (endian
== DEVICE_BIG_ENDIAN
) {
2199 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2201 + memory_region_section_addr(section
, addr
));
2203 case DEVICE_LITTLE_ENDIAN
:
2204 val
= ldl_le_p(ptr
);
2206 case DEVICE_BIG_ENDIAN
:
2207 val
= ldl_be_p(ptr
);
2217 uint32_t ldl_phys(hwaddr addr
)
2219 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2222 uint32_t ldl_le_phys(hwaddr addr
)
2224 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2227 uint32_t ldl_be_phys(hwaddr addr
)
2229 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2232 /* warning: addr must be aligned */
2233 static inline uint64_t ldq_phys_internal(hwaddr addr
,
2234 enum device_endian endian
)
2238 MemoryRegionSection
*section
;
2240 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2242 if (!(memory_region_is_ram(section
->mr
) ||
2243 memory_region_is_romd(section
->mr
))) {
2245 addr
= memory_region_section_addr(section
, addr
);
2247 /* XXX This is broken when device endian != cpu endian.
2248 Fix and add "endian" variable check */
2249 #ifdef TARGET_WORDS_BIGENDIAN
2250 val
= io_mem_read(section
->mr
, addr
, 4) << 32;
2251 val
|= io_mem_read(section
->mr
, addr
+ 4, 4);
2253 val
= io_mem_read(section
->mr
, addr
, 4);
2254 val
|= io_mem_read(section
->mr
, addr
+ 4, 4) << 32;
2258 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2260 + memory_region_section_addr(section
, addr
));
2262 case DEVICE_LITTLE_ENDIAN
:
2263 val
= ldq_le_p(ptr
);
2265 case DEVICE_BIG_ENDIAN
:
2266 val
= ldq_be_p(ptr
);
2276 uint64_t ldq_phys(hwaddr addr
)
2278 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2281 uint64_t ldq_le_phys(hwaddr addr
)
2283 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2286 uint64_t ldq_be_phys(hwaddr addr
)
2288 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2292 uint32_t ldub_phys(hwaddr addr
)
2295 cpu_physical_memory_read(addr
, &val
, 1);
2299 /* warning: addr must be aligned */
2300 static inline uint32_t lduw_phys_internal(hwaddr addr
,
2301 enum device_endian endian
)
2305 MemoryRegionSection
*section
;
2307 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2309 if (!(memory_region_is_ram(section
->mr
) ||
2310 memory_region_is_romd(section
->mr
))) {
2312 addr
= memory_region_section_addr(section
, addr
);
2313 val
= io_mem_read(section
->mr
, addr
, 2);
2314 #if defined(TARGET_WORDS_BIGENDIAN)
2315 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2319 if (endian
== DEVICE_BIG_ENDIAN
) {
2325 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2327 + memory_region_section_addr(section
, addr
));
2329 case DEVICE_LITTLE_ENDIAN
:
2330 val
= lduw_le_p(ptr
);
2332 case DEVICE_BIG_ENDIAN
:
2333 val
= lduw_be_p(ptr
);
2343 uint32_t lduw_phys(hwaddr addr
)
2345 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2348 uint32_t lduw_le_phys(hwaddr addr
)
2350 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2353 uint32_t lduw_be_phys(hwaddr addr
)
2355 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2358 /* warning: addr must be aligned. The ram page is not masked as dirty
2359 and the code inside is not invalidated. It is useful if the dirty
2360 bits are used to track modified PTEs */
2361 void stl_phys_notdirty(hwaddr addr
, uint32_t val
)
2364 MemoryRegionSection
*section
;
2366 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2368 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2369 addr
= memory_region_section_addr(section
, addr
);
2370 if (memory_region_is_ram(section
->mr
)) {
2371 section
= &phys_sections
[phys_section_rom
];
2373 io_mem_write(section
->mr
, addr
, val
, 4);
2375 unsigned long addr1
= (memory_region_get_ram_addr(section
->mr
)
2377 + memory_region_section_addr(section
, addr
);
2378 ptr
= qemu_get_ram_ptr(addr1
);
2381 if (unlikely(in_migration
)) {
2382 if (!cpu_physical_memory_is_dirty(addr1
)) {
2383 /* invalidate code */
2384 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2386 cpu_physical_memory_set_dirty_flags(
2387 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
2393 void stq_phys_notdirty(hwaddr addr
, uint64_t val
)
2396 MemoryRegionSection
*section
;
2398 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2400 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2401 addr
= memory_region_section_addr(section
, addr
);
2402 if (memory_region_is_ram(section
->mr
)) {
2403 section
= &phys_sections
[phys_section_rom
];
2405 #ifdef TARGET_WORDS_BIGENDIAN
2406 io_mem_write(section
->mr
, addr
, val
>> 32, 4);
2407 io_mem_write(section
->mr
, addr
+ 4, (uint32_t)val
, 4);
2409 io_mem_write(section
->mr
, addr
, (uint32_t)val
, 4);
2410 io_mem_write(section
->mr
, addr
+ 4, val
>> 32, 4);
2413 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2415 + memory_region_section_addr(section
, addr
));
2420 /* warning: addr must be aligned */
2421 static inline void stl_phys_internal(hwaddr addr
, uint32_t val
,
2422 enum device_endian endian
)
2425 MemoryRegionSection
*section
;
2427 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2429 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2430 addr
= memory_region_section_addr(section
, addr
);
2431 if (memory_region_is_ram(section
->mr
)) {
2432 section
= &phys_sections
[phys_section_rom
];
2434 #if defined(TARGET_WORDS_BIGENDIAN)
2435 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2439 if (endian
== DEVICE_BIG_ENDIAN
) {
2443 io_mem_write(section
->mr
, addr
, val
, 4);
2445 unsigned long addr1
;
2446 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
2447 + memory_region_section_addr(section
, addr
);
2449 ptr
= qemu_get_ram_ptr(addr1
);
2451 case DEVICE_LITTLE_ENDIAN
:
2454 case DEVICE_BIG_ENDIAN
:
2461 invalidate_and_set_dirty(addr1
, 4);
2465 void stl_phys(hwaddr addr
, uint32_t val
)
2467 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2470 void stl_le_phys(hwaddr addr
, uint32_t val
)
2472 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2475 void stl_be_phys(hwaddr addr
, uint32_t val
)
2477 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2481 void stb_phys(hwaddr addr
, uint32_t val
)
2484 cpu_physical_memory_write(addr
, &v
, 1);
2487 /* warning: addr must be aligned */
2488 static inline void stw_phys_internal(hwaddr addr
, uint32_t val
,
2489 enum device_endian endian
)
2492 MemoryRegionSection
*section
;
2494 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2496 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2497 addr
= memory_region_section_addr(section
, addr
);
2498 if (memory_region_is_ram(section
->mr
)) {
2499 section
= &phys_sections
[phys_section_rom
];
2501 #if defined(TARGET_WORDS_BIGENDIAN)
2502 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2506 if (endian
== DEVICE_BIG_ENDIAN
) {
2510 io_mem_write(section
->mr
, addr
, val
, 2);
2512 unsigned long addr1
;
2513 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
2514 + memory_region_section_addr(section
, addr
);
2516 ptr
= qemu_get_ram_ptr(addr1
);
2518 case DEVICE_LITTLE_ENDIAN
:
2521 case DEVICE_BIG_ENDIAN
:
2528 invalidate_and_set_dirty(addr1
, 2);
2532 void stw_phys(hwaddr addr
, uint32_t val
)
2534 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2537 void stw_le_phys(hwaddr addr
, uint32_t val
)
2539 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2542 void stw_be_phys(hwaddr addr
, uint32_t val
)
2544 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2548 void stq_phys(hwaddr addr
, uint64_t val
)
2551 cpu_physical_memory_write(addr
, &val
, 8);
2554 void stq_le_phys(hwaddr addr
, uint64_t val
)
2556 val
= cpu_to_le64(val
);
2557 cpu_physical_memory_write(addr
, &val
, 8);
2560 void stq_be_phys(hwaddr addr
, uint64_t val
)
2562 val
= cpu_to_be64(val
);
2563 cpu_physical_memory_write(addr
, &val
, 8);
2566 /* virtual memory access for debug (includes writing to ROM) */
2567 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
2568 uint8_t *buf
, int len
, int is_write
)
2575 page
= addr
& TARGET_PAGE_MASK
;
2576 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2577 /* if no physical page mapped, return an error */
2578 if (phys_addr
== -1)
2580 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2583 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2585 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
2587 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
2596 #if !defined(CONFIG_USER_ONLY)
2599 * A helper function for the _utterly broken_ virtio device model to find out if
2600 * it's running on a big endian machine. Don't do this at home kids!
2602 bool virtio_is_big_endian(void);
2603 bool virtio_is_big_endian(void)
2605 #if defined(TARGET_WORDS_BIGENDIAN)
2614 #ifndef CONFIG_USER_ONLY
2615 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2617 MemoryRegionSection
*section
;
2619 section
= phys_page_find(address_space_memory
.dispatch
,
2620 phys_addr
>> TARGET_PAGE_BITS
);
2622 return !(memory_region_is_ram(section
->mr
) ||
2623 memory_region_is_romd(section
->mr
));