4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
34 #include "hw/xen/xen.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
53 //#define DEBUG_UNASSIGNED
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
58 static int in_migration
;
60 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
62 static MemoryRegion
*system_memory
;
63 static MemoryRegion
*system_io
;
65 AddressSpace address_space_io
;
66 AddressSpace address_space_memory
;
67 DMAContext dma_context_memory
;
69 MemoryRegion io_mem_ram
, io_mem_rom
, io_mem_unassigned
, io_mem_notdirty
;
70 static MemoryRegion io_mem_subpage_ram
;
74 CPUArchState
*first_cpu
;
75 /* current CPU in the current thread. It is only valid inside
77 DEFINE_TLS(CPUArchState
*,cpu_single_env
);
78 /* 0 = Do not count executed instructions.
79 1 = Precise instruction counting.
80 2 = Adaptive rate instruction counting. */
83 #if !defined(CONFIG_USER_ONLY)
85 static MemoryRegionSection
*phys_sections
;
86 static unsigned phys_sections_nb
, phys_sections_nb_alloc
;
87 static uint16_t phys_section_unassigned
;
88 static uint16_t phys_section_notdirty
;
89 static uint16_t phys_section_rom
;
90 static uint16_t phys_section_watch
;
92 /* Simple allocator for PhysPageEntry nodes */
93 static PhysPageEntry (*phys_map_nodes
)[L2_SIZE
];
94 static unsigned phys_map_nodes_nb
, phys_map_nodes_nb_alloc
;
96 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
98 static void io_mem_init(void);
99 static void memory_map_init(void);
100 static void *qemu_safe_ram_ptr(ram_addr_t addr
);
102 static MemoryRegion io_mem_watch
;
105 #if !defined(CONFIG_USER_ONLY)
107 static void phys_map_node_reserve(unsigned nodes
)
109 if (phys_map_nodes_nb
+ nodes
> phys_map_nodes_nb_alloc
) {
110 typedef PhysPageEntry Node
[L2_SIZE
];
111 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
* 2, 16);
112 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
,
113 phys_map_nodes_nb
+ nodes
);
114 phys_map_nodes
= g_renew(Node
, phys_map_nodes
,
115 phys_map_nodes_nb_alloc
);
119 static uint16_t phys_map_node_alloc(void)
124 ret
= phys_map_nodes_nb
++;
125 assert(ret
!= PHYS_MAP_NODE_NIL
);
126 assert(ret
!= phys_map_nodes_nb_alloc
);
127 for (i
= 0; i
< L2_SIZE
; ++i
) {
128 phys_map_nodes
[ret
][i
].is_leaf
= 0;
129 phys_map_nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
134 static void phys_map_nodes_reset(void)
136 phys_map_nodes_nb
= 0;
140 static void phys_page_set_level(PhysPageEntry
*lp
, hwaddr
*index
,
141 hwaddr
*nb
, uint16_t leaf
,
146 hwaddr step
= (hwaddr
)1 << (level
* L2_BITS
);
148 if (!lp
->is_leaf
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
149 lp
->ptr
= phys_map_node_alloc();
150 p
= phys_map_nodes
[lp
->ptr
];
152 for (i
= 0; i
< L2_SIZE
; i
++) {
154 p
[i
].ptr
= phys_section_unassigned
;
158 p
= phys_map_nodes
[lp
->ptr
];
160 lp
= &p
[(*index
>> (level
* L2_BITS
)) & (L2_SIZE
- 1)];
162 while (*nb
&& lp
< &p
[L2_SIZE
]) {
163 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
169 phys_page_set_level(lp
, index
, nb
, leaf
, level
- 1);
175 static void phys_page_set(AddressSpaceDispatch
*d
,
176 hwaddr index
, hwaddr nb
,
179 /* Wildly overreserve - it doesn't matter much. */
180 phys_map_node_reserve(3 * P_L2_LEVELS
);
182 phys_page_set_level(&d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
185 MemoryRegionSection
*phys_page_find(AddressSpaceDispatch
*d
, hwaddr index
)
187 PhysPageEntry lp
= d
->phys_map
;
190 uint16_t s_index
= phys_section_unassigned
;
192 for (i
= P_L2_LEVELS
- 1; i
>= 0 && !lp
.is_leaf
; i
--) {
193 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
196 p
= phys_map_nodes
[lp
.ptr
];
197 lp
= p
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
202 return &phys_sections
[s_index
];
205 bool memory_region_is_unassigned(MemoryRegion
*mr
)
207 return mr
!= &io_mem_ram
&& mr
!= &io_mem_rom
208 && mr
!= &io_mem_notdirty
&& !mr
->rom_device
209 && mr
!= &io_mem_watch
;
213 void cpu_exec_init_all(void)
215 #if !defined(CONFIG_USER_ONLY)
216 qemu_mutex_init(&ram_list
.mutex
);
222 #if !defined(CONFIG_USER_ONLY)
224 static int cpu_common_post_load(void *opaque
, int version_id
)
226 CPUState
*cpu
= opaque
;
228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
230 cpu
->interrupt_request
&= ~0x01;
231 tlb_flush(cpu
->env_ptr
, 1);
236 static const VMStateDescription vmstate_cpu_common
= {
237 .name
= "cpu_common",
239 .minimum_version_id
= 1,
240 .minimum_version_id_old
= 1,
241 .post_load
= cpu_common_post_load
,
242 .fields
= (VMStateField
[]) {
243 VMSTATE_UINT32(halted
, CPUState
),
244 VMSTATE_UINT32(interrupt_request
, CPUState
),
245 VMSTATE_END_OF_LIST()
249 #define vmstate_cpu_common vmstate_dummy
252 CPUState
*qemu_get_cpu(int index
)
254 CPUArchState
*env
= first_cpu
;
255 CPUState
*cpu
= NULL
;
258 cpu
= ENV_GET_CPU(env
);
259 if (cpu
->cpu_index
== index
) {
265 return env
? cpu
: NULL
;
268 void qemu_for_each_cpu(void (*func
)(CPUState
*cpu
, void *data
), void *data
)
270 CPUArchState
*env
= first_cpu
;
273 func(ENV_GET_CPU(env
), data
);
278 void cpu_exec_init(CPUArchState
*env
)
280 CPUState
*cpu
= ENV_GET_CPU(env
);
281 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
285 #if defined(CONFIG_USER_ONLY)
288 env
->next_cpu
= NULL
;
291 while (*penv
!= NULL
) {
292 penv
= &(*penv
)->next_cpu
;
295 cpu
->cpu_index
= cpu_index
;
297 QTAILQ_INIT(&env
->breakpoints
);
298 QTAILQ_INIT(&env
->watchpoints
);
299 #ifndef CONFIG_USER_ONLY
300 cpu
->thread_id
= qemu_get_thread_id();
303 #if defined(CONFIG_USER_ONLY)
306 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
307 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
308 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
309 cpu_save
, cpu_load
, env
);
310 assert(cc
->vmsd
== NULL
);
312 if (cc
->vmsd
!= NULL
) {
313 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
317 #if defined(TARGET_HAS_ICE)
318 #if defined(CONFIG_USER_ONLY)
319 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
321 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
324 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
326 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env
, pc
) |
327 (pc
& ~TARGET_PAGE_MASK
));
330 #endif /* TARGET_HAS_ICE */
332 #if defined(CONFIG_USER_ONLY)
333 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
338 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
339 int flags
, CPUWatchpoint
**watchpoint
)
344 /* Add a watchpoint. */
345 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
346 int flags
, CPUWatchpoint
**watchpoint
)
348 target_ulong len_mask
= ~(len
- 1);
351 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
352 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
353 len
== 0 || len
> TARGET_PAGE_SIZE
) {
354 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
355 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
358 wp
= g_malloc(sizeof(*wp
));
361 wp
->len_mask
= len_mask
;
364 /* keep all GDB-injected watchpoints in front */
366 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
368 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
370 tlb_flush_page(env
, addr
);
377 /* Remove a specific watchpoint. */
378 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
381 target_ulong len_mask
= ~(len
- 1);
384 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
385 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
386 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
387 cpu_watchpoint_remove_by_ref(env
, wp
);
394 /* Remove a specific watchpoint by reference. */
395 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
397 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
399 tlb_flush_page(env
, watchpoint
->vaddr
);
404 /* Remove all matching watchpoints. */
405 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
407 CPUWatchpoint
*wp
, *next
;
409 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
410 if (wp
->flags
& mask
)
411 cpu_watchpoint_remove_by_ref(env
, wp
);
416 /* Add a breakpoint. */
417 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
418 CPUBreakpoint
**breakpoint
)
420 #if defined(TARGET_HAS_ICE)
423 bp
= g_malloc(sizeof(*bp
));
428 /* keep all GDB-injected breakpoints in front */
430 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
432 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
434 breakpoint_invalidate(env
, pc
);
444 /* Remove a specific breakpoint. */
445 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
447 #if defined(TARGET_HAS_ICE)
450 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
451 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
452 cpu_breakpoint_remove_by_ref(env
, bp
);
462 /* Remove a specific breakpoint by reference. */
463 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
465 #if defined(TARGET_HAS_ICE)
466 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
468 breakpoint_invalidate(env
, breakpoint
->pc
);
474 /* Remove all matching breakpoints. */
475 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
477 #if defined(TARGET_HAS_ICE)
478 CPUBreakpoint
*bp
, *next
;
480 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
481 if (bp
->flags
& mask
)
482 cpu_breakpoint_remove_by_ref(env
, bp
);
487 /* enable or disable single step mode. EXCP_DEBUG is returned by the
488 CPU loop after each instruction */
489 void cpu_single_step(CPUArchState
*env
, int enabled
)
491 #if defined(TARGET_HAS_ICE)
492 if (env
->singlestep_enabled
!= enabled
) {
493 env
->singlestep_enabled
= enabled
;
495 kvm_update_guest_debug(env
, 0);
497 /* must flush all the translated code to avoid inconsistencies */
498 /* XXX: only flush what is necessary */
505 void cpu_exit(CPUArchState
*env
)
507 CPUState
*cpu
= ENV_GET_CPU(env
);
509 cpu
->exit_request
= 1;
510 cpu
->tcg_exit_req
= 1;
513 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
520 fprintf(stderr
, "qemu: fatal: ");
521 vfprintf(stderr
, fmt
, ap
);
522 fprintf(stderr
, "\n");
523 cpu_dump_state(env
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
524 if (qemu_log_enabled()) {
525 qemu_log("qemu: fatal: ");
526 qemu_log_vprintf(fmt
, ap2
);
528 log_cpu_state(env
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
534 #if defined(CONFIG_USER_ONLY)
536 struct sigaction act
;
537 sigfillset(&act
.sa_mask
);
538 act
.sa_handler
= SIG_DFL
;
539 sigaction(SIGABRT
, &act
, NULL
);
545 CPUArchState
*cpu_copy(CPUArchState
*env
)
547 CPUArchState
*new_env
= cpu_init(env
->cpu_model_str
);
548 CPUArchState
*next_cpu
= new_env
->next_cpu
;
549 #if defined(TARGET_HAS_ICE)
554 memcpy(new_env
, env
, sizeof(CPUArchState
));
556 /* Preserve chaining. */
557 new_env
->next_cpu
= next_cpu
;
559 /* Clone all break/watchpoints.
560 Note: Once we support ptrace with hw-debug register access, make sure
561 BP_CPU break/watchpoints are handled correctly on clone. */
562 QTAILQ_INIT(&env
->breakpoints
);
563 QTAILQ_INIT(&env
->watchpoints
);
564 #if defined(TARGET_HAS_ICE)
565 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
566 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
568 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
569 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
577 #if !defined(CONFIG_USER_ONLY)
578 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t end
,
583 /* we modify the TLB cache so that the dirty bit will be set again
584 when accessing the range */
585 start1
= (uintptr_t)qemu_safe_ram_ptr(start
);
586 /* Check that we don't span multiple blocks - this breaks the
587 address comparisons below. */
588 if ((uintptr_t)qemu_safe_ram_ptr(end
- 1) - start1
589 != (end
- 1) - start
) {
592 cpu_tlb_reset_dirty_all(start1
, length
);
596 /* Note: start and end must be within the same ram block. */
597 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
602 start
&= TARGET_PAGE_MASK
;
603 end
= TARGET_PAGE_ALIGN(end
);
605 length
= end
- start
;
608 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
611 tlb_reset_dirty_range_all(start
, end
, length
);
615 static int cpu_physical_memory_set_dirty_tracking(int enable
)
618 in_migration
= enable
;
622 hwaddr
memory_region_section_get_iotlb(CPUArchState
*env
,
623 MemoryRegionSection
*section
,
627 target_ulong
*address
)
632 if (memory_region_is_ram(section
->mr
)) {
634 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
635 + memory_region_section_addr(section
, paddr
);
636 if (!section
->readonly
) {
637 iotlb
|= phys_section_notdirty
;
639 iotlb
|= phys_section_rom
;
642 iotlb
= section
- phys_sections
;
643 iotlb
+= memory_region_section_addr(section
, paddr
);
646 /* Make accesses to pages with watchpoints go via the
647 watchpoint trap routines. */
648 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
649 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
650 /* Avoid trapping reads of pages with a write breakpoint. */
651 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
652 iotlb
= phys_section_watch
+ paddr
;
653 *address
|= TLB_MMIO
;
661 #endif /* defined(CONFIG_USER_ONLY) */
663 #if !defined(CONFIG_USER_ONLY)
665 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
666 typedef struct subpage_t
{
669 uint16_t sub_section
[TARGET_PAGE_SIZE
];
672 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
674 static subpage_t
*subpage_init(hwaddr base
);
675 static void destroy_page_desc(uint16_t section_index
)
677 MemoryRegionSection
*section
= &phys_sections
[section_index
];
678 MemoryRegion
*mr
= section
->mr
;
681 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
682 memory_region_destroy(&subpage
->iomem
);
687 static void destroy_l2_mapping(PhysPageEntry
*lp
, unsigned level
)
692 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
696 p
= phys_map_nodes
[lp
->ptr
];
697 for (i
= 0; i
< L2_SIZE
; ++i
) {
699 destroy_l2_mapping(&p
[i
], level
- 1);
701 destroy_page_desc(p
[i
].ptr
);
705 lp
->ptr
= PHYS_MAP_NODE_NIL
;
708 static void destroy_all_mappings(AddressSpaceDispatch
*d
)
710 destroy_l2_mapping(&d
->phys_map
, P_L2_LEVELS
- 1);
711 phys_map_nodes_reset();
714 static uint16_t phys_section_add(MemoryRegionSection
*section
)
716 if (phys_sections_nb
== phys_sections_nb_alloc
) {
717 phys_sections_nb_alloc
= MAX(phys_sections_nb_alloc
* 2, 16);
718 phys_sections
= g_renew(MemoryRegionSection
, phys_sections
,
719 phys_sections_nb_alloc
);
721 phys_sections
[phys_sections_nb
] = *section
;
722 return phys_sections_nb
++;
725 static void phys_sections_clear(void)
727 phys_sections_nb
= 0;
730 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
733 hwaddr base
= section
->offset_within_address_space
735 MemoryRegionSection
*existing
= phys_page_find(d
, base
>> TARGET_PAGE_BITS
);
736 MemoryRegionSection subsection
= {
737 .offset_within_address_space
= base
,
738 .size
= TARGET_PAGE_SIZE
,
742 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
744 if (!(existing
->mr
->subpage
)) {
745 subpage
= subpage_init(base
);
746 subsection
.mr
= &subpage
->iomem
;
747 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
748 phys_section_add(&subsection
));
750 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
752 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
753 end
= start
+ section
->size
- 1;
754 subpage_register(subpage
, start
, end
, phys_section_add(section
));
758 static void register_multipage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
760 hwaddr start_addr
= section
->offset_within_address_space
;
761 ram_addr_t size
= section
->size
;
763 uint16_t section_index
= phys_section_add(section
);
768 phys_page_set(d
, addr
>> TARGET_PAGE_BITS
, size
>> TARGET_PAGE_BITS
,
772 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
774 AddressSpaceDispatch
*d
= container_of(listener
, AddressSpaceDispatch
, listener
);
775 MemoryRegionSection now
= *section
, remain
= *section
;
777 if ((now
.offset_within_address_space
& ~TARGET_PAGE_MASK
)
778 || (now
.size
< TARGET_PAGE_SIZE
)) {
779 now
.size
= MIN(TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
780 - now
.offset_within_address_space
,
782 register_subpage(d
, &now
);
783 remain
.size
-= now
.size
;
784 remain
.offset_within_address_space
+= now
.size
;
785 remain
.offset_within_region
+= now
.size
;
787 while (remain
.size
>= TARGET_PAGE_SIZE
) {
789 if (remain
.offset_within_region
& ~TARGET_PAGE_MASK
) {
790 now
.size
= TARGET_PAGE_SIZE
;
791 register_subpage(d
, &now
);
793 now
.size
&= TARGET_PAGE_MASK
;
794 register_multipage(d
, &now
);
796 remain
.size
-= now
.size
;
797 remain
.offset_within_address_space
+= now
.size
;
798 remain
.offset_within_region
+= now
.size
;
802 register_subpage(d
, &now
);
806 void qemu_flush_coalesced_mmio_buffer(void)
809 kvm_flush_coalesced_mmio_buffer();
812 void qemu_mutex_lock_ramlist(void)
814 qemu_mutex_lock(&ram_list
.mutex
);
817 void qemu_mutex_unlock_ramlist(void)
819 qemu_mutex_unlock(&ram_list
.mutex
);
822 #if defined(__linux__) && !defined(TARGET_S390X)
826 #define HUGETLBFS_MAGIC 0x958458f6
828 static long gethugepagesize(const char *path
)
834 ret
= statfs(path
, &fs
);
835 } while (ret
!= 0 && errno
== EINTR
);
842 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
843 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
848 static void *file_ram_alloc(RAMBlock
*block
,
853 char *sanitized_name
;
860 unsigned long hpagesize
;
862 hpagesize
= gethugepagesize(path
);
867 if (memory
< hpagesize
) {
871 if (kvm_enabled() && !kvm_has_sync_mmu()) {
872 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
876 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
877 sanitized_name
= g_strdup(block
->mr
->name
);
878 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
883 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
885 g_free(sanitized_name
);
887 fd
= mkstemp(filename
);
889 perror("unable to create backing store for hugepages");
896 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
899 * ftruncate is not supported by hugetlbfs in older
900 * hosts, so don't bother bailing out on errors.
901 * If anything goes wrong with it under other filesystems,
904 if (ftruncate(fd
, memory
))
908 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
909 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
910 * to sidestep this quirk.
912 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
913 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
915 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
917 if (area
== MAP_FAILED
) {
918 perror("file_ram_alloc: can't mmap RAM pages");
927 static ram_addr_t
find_ram_offset(ram_addr_t size
)
929 RAMBlock
*block
, *next_block
;
930 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
932 assert(size
!= 0); /* it would hand out same offset multiple times */
934 if (QTAILQ_EMPTY(&ram_list
.blocks
))
937 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
938 ram_addr_t end
, next
= RAM_ADDR_MAX
;
940 end
= block
->offset
+ block
->length
;
942 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
943 if (next_block
->offset
>= end
) {
944 next
= MIN(next
, next_block
->offset
);
947 if (next
- end
>= size
&& next
- end
< mingap
) {
953 if (offset
== RAM_ADDR_MAX
) {
954 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
962 ram_addr_t
last_ram_offset(void)
967 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
968 last
= MAX(last
, block
->offset
+ block
->length
);
973 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
976 QemuOpts
*machine_opts
;
978 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
979 machine_opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
981 !qemu_opt_get_bool(machine_opts
, "dump-guest-core", true)) {
982 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
984 perror("qemu_madvise");
985 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
986 "but dump_guest_core=off specified\n");
991 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
993 RAMBlock
*new_block
, *block
;
996 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
997 if (block
->offset
== addr
) {
1003 assert(!new_block
->idstr
[0]);
1006 char *id
= qdev_get_dev_path(dev
);
1008 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1012 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1014 /* This assumes the iothread lock is taken here too. */
1015 qemu_mutex_lock_ramlist();
1016 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1017 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1018 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1023 qemu_mutex_unlock_ramlist();
1026 static int memory_try_enable_merging(void *addr
, size_t len
)
1030 opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
1031 if (opts
&& !qemu_opt_get_bool(opts
, "mem-merge", true)) {
1032 /* disabled by the user */
1036 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1039 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1042 RAMBlock
*block
, *new_block
;
1044 size
= TARGET_PAGE_ALIGN(size
);
1045 new_block
= g_malloc0(sizeof(*new_block
));
1047 /* This assumes the iothread lock is taken here too. */
1048 qemu_mutex_lock_ramlist();
1050 new_block
->offset
= find_ram_offset(size
);
1052 new_block
->host
= host
;
1053 new_block
->flags
|= RAM_PREALLOC_MASK
;
1056 #if defined (__linux__) && !defined(TARGET_S390X)
1057 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
1058 if (!new_block
->host
) {
1059 new_block
->host
= qemu_anon_ram_alloc(size
);
1060 memory_try_enable_merging(new_block
->host
, size
);
1063 fprintf(stderr
, "-mem-path option unsupported\n");
1067 if (xen_enabled()) {
1068 xen_ram_alloc(new_block
->offset
, size
, mr
);
1069 } else if (kvm_enabled()) {
1070 /* some s390/kvm configurations have special constraints */
1071 new_block
->host
= kvm_ram_alloc(size
);
1073 new_block
->host
= qemu_anon_ram_alloc(size
);
1075 memory_try_enable_merging(new_block
->host
, size
);
1078 new_block
->length
= size
;
1080 /* Keep the list sorted from biggest to smallest block. */
1081 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1082 if (block
->length
< new_block
->length
) {
1087 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1089 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1091 ram_list
.mru_block
= NULL
;
1094 qemu_mutex_unlock_ramlist();
1096 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
1097 last_ram_offset() >> TARGET_PAGE_BITS
);
1098 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
1099 0, size
>> TARGET_PAGE_BITS
);
1100 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
, 0xff);
1102 qemu_ram_setup_dump(new_block
->host
, size
);
1103 qemu_madvise(new_block
->host
, size
, QEMU_MADV_HUGEPAGE
);
1106 kvm_setup_guest_memory(new_block
->host
, size
);
1108 return new_block
->offset
;
1111 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
1113 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
1116 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1120 /* This assumes the iothread lock is taken here too. */
1121 qemu_mutex_lock_ramlist();
1122 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1123 if (addr
== block
->offset
) {
1124 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1125 ram_list
.mru_block
= NULL
;
1131 qemu_mutex_unlock_ramlist();
1134 void qemu_ram_free(ram_addr_t addr
)
1138 /* This assumes the iothread lock is taken here too. */
1139 qemu_mutex_lock_ramlist();
1140 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1141 if (addr
== block
->offset
) {
1142 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1143 ram_list
.mru_block
= NULL
;
1145 if (block
->flags
& RAM_PREALLOC_MASK
) {
1147 } else if (mem_path
) {
1148 #if defined (__linux__) && !defined(TARGET_S390X)
1150 munmap(block
->host
, block
->length
);
1153 qemu_anon_ram_free(block
->host
, block
->length
);
1159 if (xen_enabled()) {
1160 xen_invalidate_map_cache_entry(block
->host
);
1162 qemu_anon_ram_free(block
->host
, block
->length
);
1169 qemu_mutex_unlock_ramlist();
1174 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1181 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1182 offset
= addr
- block
->offset
;
1183 if (offset
< block
->length
) {
1184 vaddr
= block
->host
+ offset
;
1185 if (block
->flags
& RAM_PREALLOC_MASK
) {
1189 munmap(vaddr
, length
);
1191 #if defined(__linux__) && !defined(TARGET_S390X)
1194 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
1197 flags
|= MAP_PRIVATE
;
1199 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1200 flags
, block
->fd
, offset
);
1202 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1203 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1210 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1211 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
1212 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
1215 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1216 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1220 if (area
!= vaddr
) {
1221 fprintf(stderr
, "Could not remap addr: "
1222 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1226 memory_try_enable_merging(vaddr
, length
);
1227 qemu_ram_setup_dump(vaddr
, length
);
1233 #endif /* !_WIN32 */
1235 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1236 With the exception of the softmmu code in this file, this should
1237 only be used for local memory (e.g. video ram) that the device owns,
1238 and knows it isn't going to access beyond the end of the block.
1240 It should not be used for general purpose DMA.
1241 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1243 void *qemu_get_ram_ptr(ram_addr_t addr
)
1247 /* The list is protected by the iothread lock here. */
1248 block
= ram_list
.mru_block
;
1249 if (block
&& addr
- block
->offset
< block
->length
) {
1252 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1253 if (addr
- block
->offset
< block
->length
) {
1258 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1262 ram_list
.mru_block
= block
;
1263 if (xen_enabled()) {
1264 /* We need to check if the requested address is in the RAM
1265 * because we don't want to map the entire memory in QEMU.
1266 * In that case just map until the end of the page.
1268 if (block
->offset
== 0) {
1269 return xen_map_cache(addr
, 0, 0);
1270 } else if (block
->host
== NULL
) {
1272 xen_map_cache(block
->offset
, block
->length
, 1);
1275 return block
->host
+ (addr
- block
->offset
);
1278 /* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1279 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1281 * ??? Is this still necessary?
1283 static void *qemu_safe_ram_ptr(ram_addr_t addr
)
1287 /* The list is protected by the iothread lock here. */
1288 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1289 if (addr
- block
->offset
< block
->length
) {
1290 if (xen_enabled()) {
1291 /* We need to check if the requested address is in the RAM
1292 * because we don't want to map the entire memory in QEMU.
1293 * In that case just map until the end of the page.
1295 if (block
->offset
== 0) {
1296 return xen_map_cache(addr
, 0, 0);
1297 } else if (block
->host
== NULL
) {
1299 xen_map_cache(block
->offset
, block
->length
, 1);
1302 return block
->host
+ (addr
- block
->offset
);
1306 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1312 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1313 * but takes a size argument */
1314 static void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
1319 if (xen_enabled()) {
1320 return xen_map_cache(addr
, *size
, 1);
1324 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1325 if (addr
- block
->offset
< block
->length
) {
1326 if (addr
- block
->offset
+ *size
> block
->length
)
1327 *size
= block
->length
- addr
+ block
->offset
;
1328 return block
->host
+ (addr
- block
->offset
);
1332 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1337 void qemu_put_ram_ptr(void *addr
)
1339 trace_qemu_put_ram_ptr(addr
);
1342 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1345 uint8_t *host
= ptr
;
1347 if (xen_enabled()) {
1348 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1352 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1353 /* This case append when the block is not mapped. */
1354 if (block
->host
== NULL
) {
1357 if (host
- block
->host
< block
->length
) {
1358 *ram_addr
= block
->offset
+ (host
- block
->host
);
1366 /* Some of the softmmu routines need to translate from a host pointer
1367 (typically a TLB entry) back to a ram offset. */
1368 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
1370 ram_addr_t ram_addr
;
1372 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
1373 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
1379 static uint64_t unassigned_mem_read(void *opaque
, hwaddr addr
,
1382 #ifdef DEBUG_UNASSIGNED
1383 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
1385 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1386 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, size
);
1391 static void unassigned_mem_write(void *opaque
, hwaddr addr
,
1392 uint64_t val
, unsigned size
)
1394 #ifdef DEBUG_UNASSIGNED
1395 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
1397 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1398 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, size
);
1402 static const MemoryRegionOps unassigned_mem_ops
= {
1403 .read
= unassigned_mem_read
,
1404 .write
= unassigned_mem_write
,
1405 .endianness
= DEVICE_NATIVE_ENDIAN
,
1408 static uint64_t error_mem_read(void *opaque
, hwaddr addr
,
1414 static void error_mem_write(void *opaque
, hwaddr addr
,
1415 uint64_t value
, unsigned size
)
1420 static const MemoryRegionOps error_mem_ops
= {
1421 .read
= error_mem_read
,
1422 .write
= error_mem_write
,
1423 .endianness
= DEVICE_NATIVE_ENDIAN
,
1426 static const MemoryRegionOps rom_mem_ops
= {
1427 .read
= error_mem_read
,
1428 .write
= unassigned_mem_write
,
1429 .endianness
= DEVICE_NATIVE_ENDIAN
,
1432 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1433 uint64_t val
, unsigned size
)
1436 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1437 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1438 #if !defined(CONFIG_USER_ONLY)
1439 tb_invalidate_phys_page_fast(ram_addr
, size
);
1440 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1445 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1448 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1451 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1456 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1457 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
1458 /* we remove the notdirty callback only if the code has been
1460 if (dirty_flags
== 0xff)
1461 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
1464 static const MemoryRegionOps notdirty_mem_ops
= {
1465 .read
= error_mem_read
,
1466 .write
= notdirty_mem_write
,
1467 .endianness
= DEVICE_NATIVE_ENDIAN
,
1470 /* Generate a debug exception if a watchpoint has been hit. */
1471 static void check_watchpoint(int offset
, int len_mask
, int flags
)
1473 CPUArchState
*env
= cpu_single_env
;
1474 target_ulong pc
, cs_base
;
1479 if (env
->watchpoint_hit
) {
1480 /* We re-entered the check after replacing the TB. Now raise
1481 * the debug interrupt so that is will trigger after the
1482 * current instruction. */
1483 cpu_interrupt(ENV_GET_CPU(env
), CPU_INTERRUPT_DEBUG
);
1486 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1487 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1488 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
1489 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
1490 wp
->flags
|= BP_WATCHPOINT_HIT
;
1491 if (!env
->watchpoint_hit
) {
1492 env
->watchpoint_hit
= wp
;
1493 tb_check_watchpoint(env
);
1494 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1495 env
->exception_index
= EXCP_DEBUG
;
1498 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1499 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
1500 cpu_resume_from_signal(env
, NULL
);
1504 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1509 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1510 so these check for a hit then pass through to the normal out-of-line
1512 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1515 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
1517 case 1: return ldub_phys(addr
);
1518 case 2: return lduw_phys(addr
);
1519 case 4: return ldl_phys(addr
);
1524 static void watch_mem_write(void *opaque
, hwaddr addr
,
1525 uint64_t val
, unsigned size
)
1527 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
1530 stb_phys(addr
, val
);
1533 stw_phys(addr
, val
);
1536 stl_phys(addr
, val
);
1542 static const MemoryRegionOps watch_mem_ops
= {
1543 .read
= watch_mem_read
,
1544 .write
= watch_mem_write
,
1545 .endianness
= DEVICE_NATIVE_ENDIAN
,
1548 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1551 subpage_t
*mmio
= opaque
;
1552 unsigned int idx
= SUBPAGE_IDX(addr
);
1553 MemoryRegionSection
*section
;
1554 #if defined(DEBUG_SUBPAGE)
1555 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
1556 mmio
, len
, addr
, idx
);
1559 section
= &phys_sections
[mmio
->sub_section
[idx
]];
1561 addr
-= section
->offset_within_address_space
;
1562 addr
+= section
->offset_within_region
;
1563 return io_mem_read(section
->mr
, addr
, len
);
1566 static void subpage_write(void *opaque
, hwaddr addr
,
1567 uint64_t value
, unsigned len
)
1569 subpage_t
*mmio
= opaque
;
1570 unsigned int idx
= SUBPAGE_IDX(addr
);
1571 MemoryRegionSection
*section
;
1572 #if defined(DEBUG_SUBPAGE)
1573 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1574 " idx %d value %"PRIx64
"\n",
1575 __func__
, mmio
, len
, addr
, idx
, value
);
1578 section
= &phys_sections
[mmio
->sub_section
[idx
]];
1580 addr
-= section
->offset_within_address_space
;
1581 addr
+= section
->offset_within_region
;
1582 io_mem_write(section
->mr
, addr
, value
, len
);
1585 static const MemoryRegionOps subpage_ops
= {
1586 .read
= subpage_read
,
1587 .write
= subpage_write
,
1588 .endianness
= DEVICE_NATIVE_ENDIAN
,
1591 static uint64_t subpage_ram_read(void *opaque
, hwaddr addr
,
1594 ram_addr_t raddr
= addr
;
1595 void *ptr
= qemu_get_ram_ptr(raddr
);
1597 case 1: return ldub_p(ptr
);
1598 case 2: return lduw_p(ptr
);
1599 case 4: return ldl_p(ptr
);
1604 static void subpage_ram_write(void *opaque
, hwaddr addr
,
1605 uint64_t value
, unsigned size
)
1607 ram_addr_t raddr
= addr
;
1608 void *ptr
= qemu_get_ram_ptr(raddr
);
1610 case 1: return stb_p(ptr
, value
);
1611 case 2: return stw_p(ptr
, value
);
1612 case 4: return stl_p(ptr
, value
);
1617 static const MemoryRegionOps subpage_ram_ops
= {
1618 .read
= subpage_ram_read
,
1619 .write
= subpage_ram_write
,
1620 .endianness
= DEVICE_NATIVE_ENDIAN
,
1623 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1628 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1630 idx
= SUBPAGE_IDX(start
);
1631 eidx
= SUBPAGE_IDX(end
);
1632 #if defined(DEBUG_SUBPAGE)
1633 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
1634 mmio
, start
, end
, idx
, eidx
, memory
);
1636 if (memory_region_is_ram(phys_sections
[section
].mr
)) {
1637 MemoryRegionSection new_section
= phys_sections
[section
];
1638 new_section
.mr
= &io_mem_subpage_ram
;
1639 section
= phys_section_add(&new_section
);
1641 for (; idx
<= eidx
; idx
++) {
1642 mmio
->sub_section
[idx
] = section
;
1648 static subpage_t
*subpage_init(hwaddr base
)
1652 mmio
= g_malloc0(sizeof(subpage_t
));
1655 memory_region_init_io(&mmio
->iomem
, &subpage_ops
, mmio
,
1656 "subpage", TARGET_PAGE_SIZE
);
1657 mmio
->iomem
.subpage
= true;
1658 #if defined(DEBUG_SUBPAGE)
1659 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
1660 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
1662 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, phys_section_unassigned
);
1667 static uint16_t dummy_section(MemoryRegion
*mr
)
1669 MemoryRegionSection section
= {
1671 .offset_within_address_space
= 0,
1672 .offset_within_region
= 0,
1676 return phys_section_add(§ion
);
1679 MemoryRegion
*iotlb_to_region(hwaddr index
)
1681 return phys_sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1684 static void io_mem_init(void)
1686 memory_region_init_io(&io_mem_ram
, &error_mem_ops
, NULL
, "ram", UINT64_MAX
);
1687 memory_region_init_io(&io_mem_rom
, &rom_mem_ops
, NULL
, "rom", UINT64_MAX
);
1688 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
1689 "unassigned", UINT64_MAX
);
1690 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
1691 "notdirty", UINT64_MAX
);
1692 memory_region_init_io(&io_mem_subpage_ram
, &subpage_ram_ops
, NULL
,
1693 "subpage-ram", UINT64_MAX
);
1694 memory_region_init_io(&io_mem_watch
, &watch_mem_ops
, NULL
,
1695 "watch", UINT64_MAX
);
1698 static void mem_begin(MemoryListener
*listener
)
1700 AddressSpaceDispatch
*d
= container_of(listener
, AddressSpaceDispatch
, listener
);
1702 destroy_all_mappings(d
);
1703 d
->phys_map
.ptr
= PHYS_MAP_NODE_NIL
;
1706 static void core_begin(MemoryListener
*listener
)
1708 phys_sections_clear();
1709 phys_section_unassigned
= dummy_section(&io_mem_unassigned
);
1710 phys_section_notdirty
= dummy_section(&io_mem_notdirty
);
1711 phys_section_rom
= dummy_section(&io_mem_rom
);
1712 phys_section_watch
= dummy_section(&io_mem_watch
);
1715 static void tcg_commit(MemoryListener
*listener
)
1719 /* since each CPU stores ram addresses in its TLB cache, we must
1720 reset the modified entries */
1722 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1727 static void core_log_global_start(MemoryListener
*listener
)
1729 cpu_physical_memory_set_dirty_tracking(1);
1732 static void core_log_global_stop(MemoryListener
*listener
)
1734 cpu_physical_memory_set_dirty_tracking(0);
1737 static void io_region_add(MemoryListener
*listener
,
1738 MemoryRegionSection
*section
)
1740 MemoryRegionIORange
*mrio
= g_new(MemoryRegionIORange
, 1);
1742 mrio
->mr
= section
->mr
;
1743 mrio
->offset
= section
->offset_within_region
;
1744 iorange_init(&mrio
->iorange
, &memory_region_iorange_ops
,
1745 section
->offset_within_address_space
, section
->size
);
1746 ioport_register(&mrio
->iorange
);
1749 static void io_region_del(MemoryListener
*listener
,
1750 MemoryRegionSection
*section
)
1752 isa_unassign_ioport(section
->offset_within_address_space
, section
->size
);
1755 static MemoryListener core_memory_listener
= {
1756 .begin
= core_begin
,
1757 .log_global_start
= core_log_global_start
,
1758 .log_global_stop
= core_log_global_stop
,
1762 static MemoryListener io_memory_listener
= {
1763 .region_add
= io_region_add
,
1764 .region_del
= io_region_del
,
1768 static MemoryListener tcg_memory_listener
= {
1769 .commit
= tcg_commit
,
1772 void address_space_init_dispatch(AddressSpace
*as
)
1774 AddressSpaceDispatch
*d
= g_new(AddressSpaceDispatch
, 1);
1776 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .is_leaf
= 0 };
1777 d
->listener
= (MemoryListener
) {
1779 .region_add
= mem_add
,
1780 .region_nop
= mem_add
,
1784 memory_listener_register(&d
->listener
, as
);
1787 void address_space_destroy_dispatch(AddressSpace
*as
)
1789 AddressSpaceDispatch
*d
= as
->dispatch
;
1791 memory_listener_unregister(&d
->listener
);
1792 destroy_l2_mapping(&d
->phys_map
, P_L2_LEVELS
- 1);
1794 as
->dispatch
= NULL
;
1797 static void memory_map_init(void)
1799 system_memory
= g_malloc(sizeof(*system_memory
));
1800 memory_region_init(system_memory
, "system", INT64_MAX
);
1801 address_space_init(&address_space_memory
, system_memory
);
1802 address_space_memory
.name
= "memory";
1804 system_io
= g_malloc(sizeof(*system_io
));
1805 memory_region_init(system_io
, "io", 65536);
1806 address_space_init(&address_space_io
, system_io
);
1807 address_space_io
.name
= "I/O";
1809 memory_listener_register(&core_memory_listener
, &address_space_memory
);
1810 memory_listener_register(&io_memory_listener
, &address_space_io
);
1811 memory_listener_register(&tcg_memory_listener
, &address_space_memory
);
1813 dma_context_init(&dma_context_memory
, &address_space_memory
,
1817 MemoryRegion
*get_system_memory(void)
1819 return system_memory
;
1822 MemoryRegion
*get_system_io(void)
1827 #endif /* !defined(CONFIG_USER_ONLY) */
1829 /* physical memory access (slow version, mainly for debug) */
1830 #if defined(CONFIG_USER_ONLY)
1831 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
1832 uint8_t *buf
, int len
, int is_write
)
1839 page
= addr
& TARGET_PAGE_MASK
;
1840 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1843 flags
= page_get_flags(page
);
1844 if (!(flags
& PAGE_VALID
))
1847 if (!(flags
& PAGE_WRITE
))
1849 /* XXX: this code should not depend on lock_user */
1850 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
1853 unlock_user(p
, addr
, l
);
1855 if (!(flags
& PAGE_READ
))
1857 /* XXX: this code should not depend on lock_user */
1858 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
1861 unlock_user(p
, addr
, 0);
1872 static void invalidate_and_set_dirty(hwaddr addr
,
1875 if (!cpu_physical_memory_is_dirty(addr
)) {
1876 /* invalidate code */
1877 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
1879 cpu_physical_memory_set_dirty_flags(addr
, (0xff & ~CODE_DIRTY_FLAG
));
1881 xen_modified_memory(addr
, length
);
1884 void address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
1885 int len
, bool is_write
)
1887 AddressSpaceDispatch
*d
= as
->dispatch
;
1892 MemoryRegionSection
*section
;
1895 page
= addr
& TARGET_PAGE_MASK
;
1896 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1899 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
1902 if (!memory_region_is_ram(section
->mr
)) {
1904 addr1
= memory_region_section_addr(section
, addr
);
1905 /* XXX: could force cpu_single_env to NULL to avoid
1907 if (l
>= 4 && ((addr1
& 3) == 0)) {
1908 /* 32 bit write access */
1910 io_mem_write(section
->mr
, addr1
, val
, 4);
1912 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
1913 /* 16 bit write access */
1915 io_mem_write(section
->mr
, addr1
, val
, 2);
1918 /* 8 bit write access */
1920 io_mem_write(section
->mr
, addr1
, val
, 1);
1923 } else if (!section
->readonly
) {
1925 addr1
= memory_region_get_ram_addr(section
->mr
)
1926 + memory_region_section_addr(section
, addr
);
1928 ptr
= qemu_get_ram_ptr(addr1
);
1929 memcpy(ptr
, buf
, l
);
1930 invalidate_and_set_dirty(addr1
, l
);
1931 qemu_put_ram_ptr(ptr
);
1934 if (!(memory_region_is_ram(section
->mr
) ||
1935 memory_region_is_romd(section
->mr
))) {
1938 addr1
= memory_region_section_addr(section
, addr
);
1939 if (l
>= 4 && ((addr1
& 3) == 0)) {
1940 /* 32 bit read access */
1941 val
= io_mem_read(section
->mr
, addr1
, 4);
1944 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
1945 /* 16 bit read access */
1946 val
= io_mem_read(section
->mr
, addr1
, 2);
1950 /* 8 bit read access */
1951 val
= io_mem_read(section
->mr
, addr1
, 1);
1957 ptr
= qemu_get_ram_ptr(section
->mr
->ram_addr
1958 + memory_region_section_addr(section
,
1960 memcpy(buf
, ptr
, l
);
1961 qemu_put_ram_ptr(ptr
);
1970 void address_space_write(AddressSpace
*as
, hwaddr addr
,
1971 const uint8_t *buf
, int len
)
1973 address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
1977 * address_space_read: read from an address space.
1979 * @as: #AddressSpace to be accessed
1980 * @addr: address within that address space
1981 * @buf: buffer with the data transferred
1983 void address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
1985 address_space_rw(as
, addr
, buf
, len
, false);
1989 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
1990 int len
, int is_write
)
1992 return address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
1995 /* used for ROM loading : can write in RAM and ROM */
1996 void cpu_physical_memory_write_rom(hwaddr addr
,
1997 const uint8_t *buf
, int len
)
1999 AddressSpaceDispatch
*d
= address_space_memory
.dispatch
;
2003 MemoryRegionSection
*section
;
2006 page
= addr
& TARGET_PAGE_MASK
;
2007 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2010 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
2012 if (!(memory_region_is_ram(section
->mr
) ||
2013 memory_region_is_romd(section
->mr
))) {
2016 unsigned long addr1
;
2017 addr1
= memory_region_get_ram_addr(section
->mr
)
2018 + memory_region_section_addr(section
, addr
);
2020 ptr
= qemu_get_ram_ptr(addr1
);
2021 memcpy(ptr
, buf
, l
);
2022 invalidate_and_set_dirty(addr1
, l
);
2023 qemu_put_ram_ptr(ptr
);
2037 static BounceBuffer bounce
;
2039 typedef struct MapClient
{
2041 void (*callback
)(void *opaque
);
2042 QLIST_ENTRY(MapClient
) link
;
2045 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2046 = QLIST_HEAD_INITIALIZER(map_client_list
);
2048 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2050 MapClient
*client
= g_malloc(sizeof(*client
));
2052 client
->opaque
= opaque
;
2053 client
->callback
= callback
;
2054 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2058 static void cpu_unregister_map_client(void *_client
)
2060 MapClient
*client
= (MapClient
*)_client
;
2062 QLIST_REMOVE(client
, link
);
2066 static void cpu_notify_map_clients(void)
2070 while (!QLIST_EMPTY(&map_client_list
)) {
2071 client
= QLIST_FIRST(&map_client_list
);
2072 client
->callback(client
->opaque
);
2073 cpu_unregister_map_client(client
);
2077 /* Map a physical memory region into a host virtual address.
2078 * May map a subset of the requested range, given by and returned in *plen.
2079 * May return NULL if resources needed to perform the mapping are exhausted.
2080 * Use only for reads OR writes - not for read-modify-write operations.
2081 * Use cpu_register_map_client() to know when retrying the map operation is
2082 * likely to succeed.
2084 void *address_space_map(AddressSpace
*as
,
2089 AddressSpaceDispatch
*d
= as
->dispatch
;
2094 MemoryRegionSection
*section
;
2095 ram_addr_t raddr
= RAM_ADDR_MAX
;
2100 page
= addr
& TARGET_PAGE_MASK
;
2101 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2104 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
2106 if (!(memory_region_is_ram(section
->mr
) && !section
->readonly
)) {
2107 if (todo
|| bounce
.buffer
) {
2110 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
2114 address_space_read(as
, addr
, bounce
.buffer
, l
);
2118 return bounce
.buffer
;
2121 raddr
= memory_region_get_ram_addr(section
->mr
)
2122 + memory_region_section_addr(section
, addr
);
2130 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
2135 /* Unmaps a memory region previously mapped by address_space_map().
2136 * Will also mark the memory as dirty if is_write == 1. access_len gives
2137 * the amount of memory that was actually read or written by the caller.
2139 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2140 int is_write
, hwaddr access_len
)
2142 if (buffer
!= bounce
.buffer
) {
2144 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
2145 while (access_len
) {
2147 l
= TARGET_PAGE_SIZE
;
2150 invalidate_and_set_dirty(addr1
, l
);
2155 if (xen_enabled()) {
2156 xen_invalidate_map_cache_entry(buffer
);
2161 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2163 qemu_vfree(bounce
.buffer
);
2164 bounce
.buffer
= NULL
;
2165 cpu_notify_map_clients();
2168 void *cpu_physical_memory_map(hwaddr addr
,
2172 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2175 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2176 int is_write
, hwaddr access_len
)
2178 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2181 /* warning: addr must be aligned */
2182 static inline uint32_t ldl_phys_internal(hwaddr addr
,
2183 enum device_endian endian
)
2187 MemoryRegionSection
*section
;
2189 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2191 if (!(memory_region_is_ram(section
->mr
) ||
2192 memory_region_is_romd(section
->mr
))) {
2194 addr
= memory_region_section_addr(section
, addr
);
2195 val
= io_mem_read(section
->mr
, addr
, 4);
2196 #if defined(TARGET_WORDS_BIGENDIAN)
2197 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2201 if (endian
== DEVICE_BIG_ENDIAN
) {
2207 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2209 + memory_region_section_addr(section
, addr
));
2211 case DEVICE_LITTLE_ENDIAN
:
2212 val
= ldl_le_p(ptr
);
2214 case DEVICE_BIG_ENDIAN
:
2215 val
= ldl_be_p(ptr
);
2225 uint32_t ldl_phys(hwaddr addr
)
2227 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2230 uint32_t ldl_le_phys(hwaddr addr
)
2232 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2235 uint32_t ldl_be_phys(hwaddr addr
)
2237 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2240 /* warning: addr must be aligned */
2241 static inline uint64_t ldq_phys_internal(hwaddr addr
,
2242 enum device_endian endian
)
2246 MemoryRegionSection
*section
;
2248 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2250 if (!(memory_region_is_ram(section
->mr
) ||
2251 memory_region_is_romd(section
->mr
))) {
2253 addr
= memory_region_section_addr(section
, addr
);
2255 /* XXX This is broken when device endian != cpu endian.
2256 Fix and add "endian" variable check */
2257 #ifdef TARGET_WORDS_BIGENDIAN
2258 val
= io_mem_read(section
->mr
, addr
, 4) << 32;
2259 val
|= io_mem_read(section
->mr
, addr
+ 4, 4);
2261 val
= io_mem_read(section
->mr
, addr
, 4);
2262 val
|= io_mem_read(section
->mr
, addr
+ 4, 4) << 32;
2266 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2268 + memory_region_section_addr(section
, addr
));
2270 case DEVICE_LITTLE_ENDIAN
:
2271 val
= ldq_le_p(ptr
);
2273 case DEVICE_BIG_ENDIAN
:
2274 val
= ldq_be_p(ptr
);
2284 uint64_t ldq_phys(hwaddr addr
)
2286 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2289 uint64_t ldq_le_phys(hwaddr addr
)
2291 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2294 uint64_t ldq_be_phys(hwaddr addr
)
2296 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2300 uint32_t ldub_phys(hwaddr addr
)
2303 cpu_physical_memory_read(addr
, &val
, 1);
2307 /* warning: addr must be aligned */
2308 static inline uint32_t lduw_phys_internal(hwaddr addr
,
2309 enum device_endian endian
)
2313 MemoryRegionSection
*section
;
2315 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2317 if (!(memory_region_is_ram(section
->mr
) ||
2318 memory_region_is_romd(section
->mr
))) {
2320 addr
= memory_region_section_addr(section
, addr
);
2321 val
= io_mem_read(section
->mr
, addr
, 2);
2322 #if defined(TARGET_WORDS_BIGENDIAN)
2323 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2327 if (endian
== DEVICE_BIG_ENDIAN
) {
2333 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2335 + memory_region_section_addr(section
, addr
));
2337 case DEVICE_LITTLE_ENDIAN
:
2338 val
= lduw_le_p(ptr
);
2340 case DEVICE_BIG_ENDIAN
:
2341 val
= lduw_be_p(ptr
);
2351 uint32_t lduw_phys(hwaddr addr
)
2353 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2356 uint32_t lduw_le_phys(hwaddr addr
)
2358 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2361 uint32_t lduw_be_phys(hwaddr addr
)
2363 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2366 /* warning: addr must be aligned. The ram page is not masked as dirty
2367 and the code inside is not invalidated. It is useful if the dirty
2368 bits are used to track modified PTEs */
2369 void stl_phys_notdirty(hwaddr addr
, uint32_t val
)
2372 MemoryRegionSection
*section
;
2374 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2376 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2377 addr
= memory_region_section_addr(section
, addr
);
2378 if (memory_region_is_ram(section
->mr
)) {
2379 section
= &phys_sections
[phys_section_rom
];
2381 io_mem_write(section
->mr
, addr
, val
, 4);
2383 unsigned long addr1
= (memory_region_get_ram_addr(section
->mr
)
2385 + memory_region_section_addr(section
, addr
);
2386 ptr
= qemu_get_ram_ptr(addr1
);
2389 if (unlikely(in_migration
)) {
2390 if (!cpu_physical_memory_is_dirty(addr1
)) {
2391 /* invalidate code */
2392 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2394 cpu_physical_memory_set_dirty_flags(
2395 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
2401 void stq_phys_notdirty(hwaddr addr
, uint64_t val
)
2404 MemoryRegionSection
*section
;
2406 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2408 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2409 addr
= memory_region_section_addr(section
, addr
);
2410 if (memory_region_is_ram(section
->mr
)) {
2411 section
= &phys_sections
[phys_section_rom
];
2413 #ifdef TARGET_WORDS_BIGENDIAN
2414 io_mem_write(section
->mr
, addr
, val
>> 32, 4);
2415 io_mem_write(section
->mr
, addr
+ 4, (uint32_t)val
, 4);
2417 io_mem_write(section
->mr
, addr
, (uint32_t)val
, 4);
2418 io_mem_write(section
->mr
, addr
+ 4, val
>> 32, 4);
2421 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2423 + memory_region_section_addr(section
, addr
));
2428 /* warning: addr must be aligned */
2429 static inline void stl_phys_internal(hwaddr addr
, uint32_t val
,
2430 enum device_endian endian
)
2433 MemoryRegionSection
*section
;
2435 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2437 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2438 addr
= memory_region_section_addr(section
, addr
);
2439 if (memory_region_is_ram(section
->mr
)) {
2440 section
= &phys_sections
[phys_section_rom
];
2442 #if defined(TARGET_WORDS_BIGENDIAN)
2443 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2447 if (endian
== DEVICE_BIG_ENDIAN
) {
2451 io_mem_write(section
->mr
, addr
, val
, 4);
2453 unsigned long addr1
;
2454 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
2455 + memory_region_section_addr(section
, addr
);
2457 ptr
= qemu_get_ram_ptr(addr1
);
2459 case DEVICE_LITTLE_ENDIAN
:
2462 case DEVICE_BIG_ENDIAN
:
2469 invalidate_and_set_dirty(addr1
, 4);
2473 void stl_phys(hwaddr addr
, uint32_t val
)
2475 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2478 void stl_le_phys(hwaddr addr
, uint32_t val
)
2480 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2483 void stl_be_phys(hwaddr addr
, uint32_t val
)
2485 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2489 void stb_phys(hwaddr addr
, uint32_t val
)
2492 cpu_physical_memory_write(addr
, &v
, 1);
2495 /* warning: addr must be aligned */
2496 static inline void stw_phys_internal(hwaddr addr
, uint32_t val
,
2497 enum device_endian endian
)
2500 MemoryRegionSection
*section
;
2502 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2504 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2505 addr
= memory_region_section_addr(section
, addr
);
2506 if (memory_region_is_ram(section
->mr
)) {
2507 section
= &phys_sections
[phys_section_rom
];
2509 #if defined(TARGET_WORDS_BIGENDIAN)
2510 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2514 if (endian
== DEVICE_BIG_ENDIAN
) {
2518 io_mem_write(section
->mr
, addr
, val
, 2);
2520 unsigned long addr1
;
2521 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
2522 + memory_region_section_addr(section
, addr
);
2524 ptr
= qemu_get_ram_ptr(addr1
);
2526 case DEVICE_LITTLE_ENDIAN
:
2529 case DEVICE_BIG_ENDIAN
:
2536 invalidate_and_set_dirty(addr1
, 2);
2540 void stw_phys(hwaddr addr
, uint32_t val
)
2542 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2545 void stw_le_phys(hwaddr addr
, uint32_t val
)
2547 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2550 void stw_be_phys(hwaddr addr
, uint32_t val
)
2552 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2556 void stq_phys(hwaddr addr
, uint64_t val
)
2559 cpu_physical_memory_write(addr
, &val
, 8);
2562 void stq_le_phys(hwaddr addr
, uint64_t val
)
2564 val
= cpu_to_le64(val
);
2565 cpu_physical_memory_write(addr
, &val
, 8);
2568 void stq_be_phys(hwaddr addr
, uint64_t val
)
2570 val
= cpu_to_be64(val
);
2571 cpu_physical_memory_write(addr
, &val
, 8);
2574 /* virtual memory access for debug (includes writing to ROM) */
2575 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
2576 uint8_t *buf
, int len
, int is_write
)
2583 page
= addr
& TARGET_PAGE_MASK
;
2584 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2585 /* if no physical page mapped, return an error */
2586 if (phys_addr
== -1)
2588 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2591 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2593 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
2595 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
2604 #if !defined(CONFIG_USER_ONLY)
2607 * A helper function for the _utterly broken_ virtio device model to find out if
2608 * it's running on a big endian machine. Don't do this at home kids!
2610 bool virtio_is_big_endian(void);
2611 bool virtio_is_big_endian(void)
2613 #if defined(TARGET_WORDS_BIGENDIAN)
2622 #ifndef CONFIG_USER_ONLY
2623 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2625 MemoryRegionSection
*section
;
2627 section
= phys_page_find(address_space_memory
.dispatch
,
2628 phys_addr
>> TARGET_PAGE_BITS
);
2630 return !(memory_region_is_ram(section
->mr
) ||
2631 memory_region_is_romd(section
->mr
));