4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
32 #include "qemu/osdep.h"
33 #include "sysemu/kvm.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "exec/memory.h"
38 #include "sysemu/dma.h"
39 #include "exec/address-spaces.h"
40 #if defined(CONFIG_USER_ONLY)
42 #else /* !CONFIG_USER_ONLY */
43 #include "sysemu/xen-mapcache.h"
46 #include "exec/cpu-all.h"
48 #include "exec/cputlb.h"
49 #include "translate-all.h"
51 #include "exec/memory-internal.h"
53 //#define DEBUG_UNASSIGNED
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
58 static int in_migration
;
60 RAMList ram_list
= { .blocks
= QTAILQ_HEAD_INITIALIZER(ram_list
.blocks
) };
62 static MemoryRegion
*system_memory
;
63 static MemoryRegion
*system_io
;
65 AddressSpace address_space_io
;
66 AddressSpace address_space_memory
;
67 DMAContext dma_context_memory
;
69 MemoryRegion io_mem_ram
, io_mem_rom
, io_mem_unassigned
, io_mem_notdirty
;
70 static MemoryRegion io_mem_subpage_ram
;
74 CPUArchState
*first_cpu
;
75 /* current CPU in the current thread. It is only valid inside
77 DEFINE_TLS(CPUArchState
*,cpu_single_env
);
78 /* 0 = Do not count executed instructions.
79 1 = Precise instruction counting.
80 2 = Adaptive rate instruction counting. */
83 #if !defined(CONFIG_USER_ONLY)
85 static MemoryRegionSection
*phys_sections
;
86 static unsigned phys_sections_nb
, phys_sections_nb_alloc
;
87 static uint16_t phys_section_unassigned
;
88 static uint16_t phys_section_notdirty
;
89 static uint16_t phys_section_rom
;
90 static uint16_t phys_section_watch
;
92 /* Simple allocator for PhysPageEntry nodes */
93 static PhysPageEntry (*phys_map_nodes
)[L2_SIZE
];
94 static unsigned phys_map_nodes_nb
, phys_map_nodes_nb_alloc
;
96 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
98 static void io_mem_init(void);
99 static void memory_map_init(void);
100 static void *qemu_safe_ram_ptr(ram_addr_t addr
);
102 static MemoryRegion io_mem_watch
;
105 #if !defined(CONFIG_USER_ONLY)
107 static void phys_map_node_reserve(unsigned nodes
)
109 if (phys_map_nodes_nb
+ nodes
> phys_map_nodes_nb_alloc
) {
110 typedef PhysPageEntry Node
[L2_SIZE
];
111 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
* 2, 16);
112 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
,
113 phys_map_nodes_nb
+ nodes
);
114 phys_map_nodes
= g_renew(Node
, phys_map_nodes
,
115 phys_map_nodes_nb_alloc
);
119 static uint16_t phys_map_node_alloc(void)
124 ret
= phys_map_nodes_nb
++;
125 assert(ret
!= PHYS_MAP_NODE_NIL
);
126 assert(ret
!= phys_map_nodes_nb_alloc
);
127 for (i
= 0; i
< L2_SIZE
; ++i
) {
128 phys_map_nodes
[ret
][i
].is_leaf
= 0;
129 phys_map_nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
134 static void phys_map_nodes_reset(void)
136 phys_map_nodes_nb
= 0;
140 static void phys_page_set_level(PhysPageEntry
*lp
, hwaddr
*index
,
141 hwaddr
*nb
, uint16_t leaf
,
146 hwaddr step
= (hwaddr
)1 << (level
* L2_BITS
);
148 if (!lp
->is_leaf
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
149 lp
->ptr
= phys_map_node_alloc();
150 p
= phys_map_nodes
[lp
->ptr
];
152 for (i
= 0; i
< L2_SIZE
; i
++) {
154 p
[i
].ptr
= phys_section_unassigned
;
158 p
= phys_map_nodes
[lp
->ptr
];
160 lp
= &p
[(*index
>> (level
* L2_BITS
)) & (L2_SIZE
- 1)];
162 while (*nb
&& lp
< &p
[L2_SIZE
]) {
163 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
169 phys_page_set_level(lp
, index
, nb
, leaf
, level
- 1);
175 static void phys_page_set(AddressSpaceDispatch
*d
,
176 hwaddr index
, hwaddr nb
,
179 /* Wildly overreserve - it doesn't matter much. */
180 phys_map_node_reserve(3 * P_L2_LEVELS
);
182 phys_page_set_level(&d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
185 MemoryRegionSection
*phys_page_find(AddressSpaceDispatch
*d
, hwaddr index
)
187 PhysPageEntry lp
= d
->phys_map
;
190 uint16_t s_index
= phys_section_unassigned
;
192 for (i
= P_L2_LEVELS
- 1; i
>= 0 && !lp
.is_leaf
; i
--) {
193 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
196 p
= phys_map_nodes
[lp
.ptr
];
197 lp
= p
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
202 return &phys_sections
[s_index
];
205 bool memory_region_is_unassigned(MemoryRegion
*mr
)
207 return mr
!= &io_mem_ram
&& mr
!= &io_mem_rom
208 && mr
!= &io_mem_notdirty
&& !mr
->rom_device
209 && mr
!= &io_mem_watch
;
213 void cpu_exec_init_all(void)
215 #if !defined(CONFIG_USER_ONLY)
216 qemu_mutex_init(&ram_list
.mutex
);
222 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
224 static int cpu_common_post_load(void *opaque
, int version_id
)
226 CPUArchState
*env
= opaque
;
228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
230 env
->interrupt_request
&= ~0x01;
236 static const VMStateDescription vmstate_cpu_common
= {
237 .name
= "cpu_common",
239 .minimum_version_id
= 1,
240 .minimum_version_id_old
= 1,
241 .post_load
= cpu_common_post_load
,
242 .fields
= (VMStateField
[]) {
243 VMSTATE_UINT32(halted
, CPUArchState
),
244 VMSTATE_UINT32(interrupt_request
, CPUArchState
),
245 VMSTATE_END_OF_LIST()
250 CPUArchState
*qemu_get_cpu(int cpu
)
252 CPUArchState
*env
= first_cpu
;
255 if (env
->cpu_index
== cpu
)
263 void cpu_exec_init(CPUArchState
*env
)
265 CPUState
*cpu
= ENV_GET_CPU(env
);
269 #if defined(CONFIG_USER_ONLY)
272 env
->next_cpu
= NULL
;
275 while (*penv
!= NULL
) {
276 penv
= &(*penv
)->next_cpu
;
279 env
->cpu_index
= cpu_index
;
281 QTAILQ_INIT(&env
->breakpoints
);
282 QTAILQ_INIT(&env
->watchpoints
);
283 #ifndef CONFIG_USER_ONLY
284 cpu
->thread_id
= qemu_get_thread_id();
287 #if defined(CONFIG_USER_ONLY)
290 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
291 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
292 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
293 cpu_save
, cpu_load
, env
);
297 #if defined(TARGET_HAS_ICE)
298 #if defined(CONFIG_USER_ONLY)
299 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
301 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
304 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
306 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env
, pc
) |
307 (pc
& ~TARGET_PAGE_MASK
));
310 #endif /* TARGET_HAS_ICE */
312 #if defined(CONFIG_USER_ONLY)
313 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
318 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
319 int flags
, CPUWatchpoint
**watchpoint
)
324 /* Add a watchpoint. */
325 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
326 int flags
, CPUWatchpoint
**watchpoint
)
328 target_ulong len_mask
= ~(len
- 1);
331 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
332 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
333 len
== 0 || len
> TARGET_PAGE_SIZE
) {
334 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
335 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
338 wp
= g_malloc(sizeof(*wp
));
341 wp
->len_mask
= len_mask
;
344 /* keep all GDB-injected watchpoints in front */
346 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
348 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
350 tlb_flush_page(env
, addr
);
357 /* Remove a specific watchpoint. */
358 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
361 target_ulong len_mask
= ~(len
- 1);
364 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
365 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
366 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
367 cpu_watchpoint_remove_by_ref(env
, wp
);
374 /* Remove a specific watchpoint by reference. */
375 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
377 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
379 tlb_flush_page(env
, watchpoint
->vaddr
);
384 /* Remove all matching watchpoints. */
385 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
387 CPUWatchpoint
*wp
, *next
;
389 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
390 if (wp
->flags
& mask
)
391 cpu_watchpoint_remove_by_ref(env
, wp
);
396 /* Add a breakpoint. */
397 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
398 CPUBreakpoint
**breakpoint
)
400 #if defined(TARGET_HAS_ICE)
403 bp
= g_malloc(sizeof(*bp
));
408 /* keep all GDB-injected breakpoints in front */
410 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
412 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
414 breakpoint_invalidate(env
, pc
);
424 /* Remove a specific breakpoint. */
425 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
427 #if defined(TARGET_HAS_ICE)
430 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
431 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
432 cpu_breakpoint_remove_by_ref(env
, bp
);
442 /* Remove a specific breakpoint by reference. */
443 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
445 #if defined(TARGET_HAS_ICE)
446 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
448 breakpoint_invalidate(env
, breakpoint
->pc
);
454 /* Remove all matching breakpoints. */
455 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
457 #if defined(TARGET_HAS_ICE)
458 CPUBreakpoint
*bp
, *next
;
460 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
461 if (bp
->flags
& mask
)
462 cpu_breakpoint_remove_by_ref(env
, bp
);
467 /* enable or disable single step mode. EXCP_DEBUG is returned by the
468 CPU loop after each instruction */
469 void cpu_single_step(CPUArchState
*env
, int enabled
)
471 #if defined(TARGET_HAS_ICE)
472 if (env
->singlestep_enabled
!= enabled
) {
473 env
->singlestep_enabled
= enabled
;
475 kvm_update_guest_debug(env
, 0);
477 /* must flush all the translated code to avoid inconsistencies */
478 /* XXX: only flush what is necessary */
485 void cpu_reset_interrupt(CPUArchState
*env
, int mask
)
487 env
->interrupt_request
&= ~mask
;
490 void cpu_exit(CPUArchState
*env
)
492 env
->exit_request
= 1;
496 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
503 fprintf(stderr
, "qemu: fatal: ");
504 vfprintf(stderr
, fmt
, ap
);
505 fprintf(stderr
, "\n");
506 cpu_dump_state(env
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
507 if (qemu_log_enabled()) {
508 qemu_log("qemu: fatal: ");
509 qemu_log_vprintf(fmt
, ap2
);
511 log_cpu_state(env
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
517 #if defined(CONFIG_USER_ONLY)
519 struct sigaction act
;
520 sigfillset(&act
.sa_mask
);
521 act
.sa_handler
= SIG_DFL
;
522 sigaction(SIGABRT
, &act
, NULL
);
528 CPUArchState
*cpu_copy(CPUArchState
*env
)
530 CPUArchState
*new_env
= cpu_init(env
->cpu_model_str
);
531 CPUArchState
*next_cpu
= new_env
->next_cpu
;
532 int cpu_index
= new_env
->cpu_index
;
533 #if defined(TARGET_HAS_ICE)
538 memcpy(new_env
, env
, sizeof(CPUArchState
));
540 /* Preserve chaining and index. */
541 new_env
->next_cpu
= next_cpu
;
542 new_env
->cpu_index
= cpu_index
;
544 /* Clone all break/watchpoints.
545 Note: Once we support ptrace with hw-debug register access, make sure
546 BP_CPU break/watchpoints are handled correctly on clone. */
547 QTAILQ_INIT(&env
->breakpoints
);
548 QTAILQ_INIT(&env
->watchpoints
);
549 #if defined(TARGET_HAS_ICE)
550 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
551 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
553 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
554 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
562 #if !defined(CONFIG_USER_ONLY)
563 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t end
,
568 /* we modify the TLB cache so that the dirty bit will be set again
569 when accessing the range */
570 start1
= (uintptr_t)qemu_safe_ram_ptr(start
);
571 /* Check that we don't span multiple blocks - this breaks the
572 address comparisons below. */
573 if ((uintptr_t)qemu_safe_ram_ptr(end
- 1) - start1
574 != (end
- 1) - start
) {
577 cpu_tlb_reset_dirty_all(start1
, length
);
581 /* Note: start and end must be within the same ram block. */
582 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
587 start
&= TARGET_PAGE_MASK
;
588 end
= TARGET_PAGE_ALIGN(end
);
590 length
= end
- start
;
593 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
596 tlb_reset_dirty_range_all(start
, end
, length
);
600 static int cpu_physical_memory_set_dirty_tracking(int enable
)
603 in_migration
= enable
;
607 hwaddr
memory_region_section_get_iotlb(CPUArchState
*env
,
608 MemoryRegionSection
*section
,
612 target_ulong
*address
)
617 if (memory_region_is_ram(section
->mr
)) {
619 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
620 + memory_region_section_addr(section
, paddr
);
621 if (!section
->readonly
) {
622 iotlb
|= phys_section_notdirty
;
624 iotlb
|= phys_section_rom
;
627 /* IO handlers are currently passed a physical address.
628 It would be nice to pass an offset from the base address
629 of that region. This would avoid having to special case RAM,
630 and avoid full address decoding in every device.
631 We can't use the high bits of pd for this because
632 IO_MEM_ROMD uses these as a ram address. */
633 iotlb
= section
- phys_sections
;
634 iotlb
+= memory_region_section_addr(section
, paddr
);
637 /* Make accesses to pages with watchpoints go via the
638 watchpoint trap routines. */
639 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
640 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
641 /* Avoid trapping reads of pages with a write breakpoint. */
642 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
643 iotlb
= phys_section_watch
+ paddr
;
644 *address
|= TLB_MMIO
;
652 #endif /* defined(CONFIG_USER_ONLY) */
654 #if !defined(CONFIG_USER_ONLY)
656 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
657 typedef struct subpage_t
{
660 uint16_t sub_section
[TARGET_PAGE_SIZE
];
663 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
665 static subpage_t
*subpage_init(hwaddr base
);
666 static void destroy_page_desc(uint16_t section_index
)
668 MemoryRegionSection
*section
= &phys_sections
[section_index
];
669 MemoryRegion
*mr
= section
->mr
;
672 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
673 memory_region_destroy(&subpage
->iomem
);
678 static void destroy_l2_mapping(PhysPageEntry
*lp
, unsigned level
)
683 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
687 p
= phys_map_nodes
[lp
->ptr
];
688 for (i
= 0; i
< L2_SIZE
; ++i
) {
690 destroy_l2_mapping(&p
[i
], level
- 1);
692 destroy_page_desc(p
[i
].ptr
);
696 lp
->ptr
= PHYS_MAP_NODE_NIL
;
699 static void destroy_all_mappings(AddressSpaceDispatch
*d
)
701 destroy_l2_mapping(&d
->phys_map
, P_L2_LEVELS
- 1);
702 phys_map_nodes_reset();
705 static uint16_t phys_section_add(MemoryRegionSection
*section
)
707 if (phys_sections_nb
== phys_sections_nb_alloc
) {
708 phys_sections_nb_alloc
= MAX(phys_sections_nb_alloc
* 2, 16);
709 phys_sections
= g_renew(MemoryRegionSection
, phys_sections
,
710 phys_sections_nb_alloc
);
712 phys_sections
[phys_sections_nb
] = *section
;
713 return phys_sections_nb
++;
716 static void phys_sections_clear(void)
718 phys_sections_nb
= 0;
721 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
724 hwaddr base
= section
->offset_within_address_space
726 MemoryRegionSection
*existing
= phys_page_find(d
, base
>> TARGET_PAGE_BITS
);
727 MemoryRegionSection subsection
= {
728 .offset_within_address_space
= base
,
729 .size
= TARGET_PAGE_SIZE
,
733 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
735 if (!(existing
->mr
->subpage
)) {
736 subpage
= subpage_init(base
);
737 subsection
.mr
= &subpage
->iomem
;
738 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
739 phys_section_add(&subsection
));
741 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
743 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
744 end
= start
+ section
->size
- 1;
745 subpage_register(subpage
, start
, end
, phys_section_add(section
));
749 static void register_multipage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
751 hwaddr start_addr
= section
->offset_within_address_space
;
752 ram_addr_t size
= section
->size
;
754 uint16_t section_index
= phys_section_add(section
);
759 phys_page_set(d
, addr
>> TARGET_PAGE_BITS
, size
>> TARGET_PAGE_BITS
,
763 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
765 AddressSpaceDispatch
*d
= container_of(listener
, AddressSpaceDispatch
, listener
);
766 MemoryRegionSection now
= *section
, remain
= *section
;
768 if ((now
.offset_within_address_space
& ~TARGET_PAGE_MASK
)
769 || (now
.size
< TARGET_PAGE_SIZE
)) {
770 now
.size
= MIN(TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
771 - now
.offset_within_address_space
,
773 register_subpage(d
, &now
);
774 remain
.size
-= now
.size
;
775 remain
.offset_within_address_space
+= now
.size
;
776 remain
.offset_within_region
+= now
.size
;
778 while (remain
.size
>= TARGET_PAGE_SIZE
) {
780 if (remain
.offset_within_region
& ~TARGET_PAGE_MASK
) {
781 now
.size
= TARGET_PAGE_SIZE
;
782 register_subpage(d
, &now
);
784 now
.size
&= TARGET_PAGE_MASK
;
785 register_multipage(d
, &now
);
787 remain
.size
-= now
.size
;
788 remain
.offset_within_address_space
+= now
.size
;
789 remain
.offset_within_region
+= now
.size
;
793 register_subpage(d
, &now
);
797 void qemu_flush_coalesced_mmio_buffer(void)
800 kvm_flush_coalesced_mmio_buffer();
803 void qemu_mutex_lock_ramlist(void)
805 qemu_mutex_lock(&ram_list
.mutex
);
808 void qemu_mutex_unlock_ramlist(void)
810 qemu_mutex_unlock(&ram_list
.mutex
);
813 #if defined(__linux__) && !defined(TARGET_S390X)
817 #define HUGETLBFS_MAGIC 0x958458f6
819 static long gethugepagesize(const char *path
)
825 ret
= statfs(path
, &fs
);
826 } while (ret
!= 0 && errno
== EINTR
);
833 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
834 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
839 static void *file_ram_alloc(RAMBlock
*block
,
849 unsigned long hpagesize
;
851 hpagesize
= gethugepagesize(path
);
856 if (memory
< hpagesize
) {
860 if (kvm_enabled() && !kvm_has_sync_mmu()) {
861 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
865 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
869 fd
= mkstemp(filename
);
871 perror("unable to create backing store for hugepages");
878 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
881 * ftruncate is not supported by hugetlbfs in older
882 * hosts, so don't bother bailing out on errors.
883 * If anything goes wrong with it under other filesystems,
886 if (ftruncate(fd
, memory
))
890 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
891 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
892 * to sidestep this quirk.
894 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
895 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
897 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
899 if (area
== MAP_FAILED
) {
900 perror("file_ram_alloc: can't mmap RAM pages");
909 static ram_addr_t
find_ram_offset(ram_addr_t size
)
911 RAMBlock
*block
, *next_block
;
912 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
914 if (QTAILQ_EMPTY(&ram_list
.blocks
))
917 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
918 ram_addr_t end
, next
= RAM_ADDR_MAX
;
920 end
= block
->offset
+ block
->length
;
922 QTAILQ_FOREACH(next_block
, &ram_list
.blocks
, next
) {
923 if (next_block
->offset
>= end
) {
924 next
= MIN(next
, next_block
->offset
);
927 if (next
- end
>= size
&& next
- end
< mingap
) {
933 if (offset
== RAM_ADDR_MAX
) {
934 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
942 ram_addr_t
last_ram_offset(void)
947 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
)
948 last
= MAX(last
, block
->offset
+ block
->length
);
953 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
956 QemuOpts
*machine_opts
;
958 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
959 machine_opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
961 !qemu_opt_get_bool(machine_opts
, "dump-guest-core", true)) {
962 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
964 perror("qemu_madvise");
965 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
966 "but dump_guest_core=off specified\n");
971 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
973 RAMBlock
*new_block
, *block
;
976 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
977 if (block
->offset
== addr
) {
983 assert(!new_block
->idstr
[0]);
986 char *id
= qdev_get_dev_path(dev
);
988 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
992 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
994 /* This assumes the iothread lock is taken here too. */
995 qemu_mutex_lock_ramlist();
996 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
997 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
998 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1003 qemu_mutex_unlock_ramlist();
1006 static int memory_try_enable_merging(void *addr
, size_t len
)
1010 opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
1011 if (opts
&& !qemu_opt_get_bool(opts
, "mem-merge", true)) {
1012 /* disabled by the user */
1016 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1019 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1022 RAMBlock
*block
, *new_block
;
1024 size
= TARGET_PAGE_ALIGN(size
);
1025 new_block
= g_malloc0(sizeof(*new_block
));
1027 /* This assumes the iothread lock is taken here too. */
1028 qemu_mutex_lock_ramlist();
1030 new_block
->offset
= find_ram_offset(size
);
1032 new_block
->host
= host
;
1033 new_block
->flags
|= RAM_PREALLOC_MASK
;
1036 #if defined (__linux__) && !defined(TARGET_S390X)
1037 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
1038 if (!new_block
->host
) {
1039 new_block
->host
= qemu_vmalloc(size
);
1040 memory_try_enable_merging(new_block
->host
, size
);
1043 fprintf(stderr
, "-mem-path option unsupported\n");
1047 if (xen_enabled()) {
1048 xen_ram_alloc(new_block
->offset
, size
, mr
);
1049 } else if (kvm_enabled()) {
1050 /* some s390/kvm configurations have special constraints */
1051 new_block
->host
= kvm_vmalloc(size
);
1053 new_block
->host
= qemu_vmalloc(size
);
1055 memory_try_enable_merging(new_block
->host
, size
);
1058 new_block
->length
= size
;
1060 /* Keep the list sorted from biggest to smallest block. */
1061 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1062 if (block
->length
< new_block
->length
) {
1067 QTAILQ_INSERT_BEFORE(block
, new_block
, next
);
1069 QTAILQ_INSERT_TAIL(&ram_list
.blocks
, new_block
, next
);
1071 ram_list
.mru_block
= NULL
;
1074 qemu_mutex_unlock_ramlist();
1076 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
1077 last_ram_offset() >> TARGET_PAGE_BITS
);
1078 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
1079 0, size
>> TARGET_PAGE_BITS
);
1080 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
, 0xff);
1082 qemu_ram_setup_dump(new_block
->host
, size
);
1083 qemu_madvise(new_block
->host
, size
, QEMU_MADV_HUGEPAGE
);
1086 kvm_setup_guest_memory(new_block
->host
, size
);
1088 return new_block
->offset
;
1091 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
1093 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
1096 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1100 /* This assumes the iothread lock is taken here too. */
1101 qemu_mutex_lock_ramlist();
1102 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1103 if (addr
== block
->offset
) {
1104 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1105 ram_list
.mru_block
= NULL
;
1111 qemu_mutex_unlock_ramlist();
1114 void qemu_ram_free(ram_addr_t addr
)
1118 /* This assumes the iothread lock is taken here too. */
1119 qemu_mutex_lock_ramlist();
1120 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1121 if (addr
== block
->offset
) {
1122 QTAILQ_REMOVE(&ram_list
.blocks
, block
, next
);
1123 ram_list
.mru_block
= NULL
;
1125 if (block
->flags
& RAM_PREALLOC_MASK
) {
1127 } else if (mem_path
) {
1128 #if defined (__linux__) && !defined(TARGET_S390X)
1130 munmap(block
->host
, block
->length
);
1133 qemu_vfree(block
->host
);
1139 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1140 munmap(block
->host
, block
->length
);
1142 if (xen_enabled()) {
1143 xen_invalidate_map_cache_entry(block
->host
);
1145 qemu_vfree(block
->host
);
1153 qemu_mutex_unlock_ramlist();
1158 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1165 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1166 offset
= addr
- block
->offset
;
1167 if (offset
< block
->length
) {
1168 vaddr
= block
->host
+ offset
;
1169 if (block
->flags
& RAM_PREALLOC_MASK
) {
1173 munmap(vaddr
, length
);
1175 #if defined(__linux__) && !defined(TARGET_S390X)
1178 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
1181 flags
|= MAP_PRIVATE
;
1183 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1184 flags
, block
->fd
, offset
);
1186 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1187 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1194 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
1195 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
1196 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
1199 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1200 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1204 if (area
!= vaddr
) {
1205 fprintf(stderr
, "Could not remap addr: "
1206 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1210 memory_try_enable_merging(vaddr
, length
);
1211 qemu_ram_setup_dump(vaddr
, length
);
1217 #endif /* !_WIN32 */
1219 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1220 With the exception of the softmmu code in this file, this should
1221 only be used for local memory (e.g. video ram) that the device owns,
1222 and knows it isn't going to access beyond the end of the block.
1224 It should not be used for general purpose DMA.
1225 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1227 void *qemu_get_ram_ptr(ram_addr_t addr
)
1231 /* The list is protected by the iothread lock here. */
1232 block
= ram_list
.mru_block
;
1233 if (block
&& addr
- block
->offset
< block
->length
) {
1236 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1237 if (addr
- block
->offset
< block
->length
) {
1242 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1246 ram_list
.mru_block
= block
;
1247 if (xen_enabled()) {
1248 /* We need to check if the requested address is in the RAM
1249 * because we don't want to map the entire memory in QEMU.
1250 * In that case just map until the end of the page.
1252 if (block
->offset
== 0) {
1253 return xen_map_cache(addr
, 0, 0);
1254 } else if (block
->host
== NULL
) {
1256 xen_map_cache(block
->offset
, block
->length
, 1);
1259 return block
->host
+ (addr
- block
->offset
);
1262 /* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1263 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1265 * ??? Is this still necessary?
1267 static void *qemu_safe_ram_ptr(ram_addr_t addr
)
1271 /* The list is protected by the iothread lock here. */
1272 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1273 if (addr
- block
->offset
< block
->length
) {
1274 if (xen_enabled()) {
1275 /* We need to check if the requested address is in the RAM
1276 * because we don't want to map the entire memory in QEMU.
1277 * In that case just map until the end of the page.
1279 if (block
->offset
== 0) {
1280 return xen_map_cache(addr
, 0, 0);
1281 } else if (block
->host
== NULL
) {
1283 xen_map_cache(block
->offset
, block
->length
, 1);
1286 return block
->host
+ (addr
- block
->offset
);
1290 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1296 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1297 * but takes a size argument */
1298 static void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
1303 if (xen_enabled()) {
1304 return xen_map_cache(addr
, *size
, 1);
1308 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1309 if (addr
- block
->offset
< block
->length
) {
1310 if (addr
- block
->offset
+ *size
> block
->length
)
1311 *size
= block
->length
- addr
+ block
->offset
;
1312 return block
->host
+ (addr
- block
->offset
);
1316 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1321 void qemu_put_ram_ptr(void *addr
)
1323 trace_qemu_put_ram_ptr(addr
);
1326 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1329 uint8_t *host
= ptr
;
1331 if (xen_enabled()) {
1332 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1336 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
1337 /* This case append when the block is not mapped. */
1338 if (block
->host
== NULL
) {
1341 if (host
- block
->host
< block
->length
) {
1342 *ram_addr
= block
->offset
+ (host
- block
->host
);
1350 /* Some of the softmmu routines need to translate from a host pointer
1351 (typically a TLB entry) back to a ram offset. */
1352 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
1354 ram_addr_t ram_addr
;
1356 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
1357 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
1363 static uint64_t unassigned_mem_read(void *opaque
, hwaddr addr
,
1366 #ifdef DEBUG_UNASSIGNED
1367 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
1369 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1370 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, size
);
1375 static void unassigned_mem_write(void *opaque
, hwaddr addr
,
1376 uint64_t val
, unsigned size
)
1378 #ifdef DEBUG_UNASSIGNED
1379 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
1381 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1382 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, size
);
1386 static const MemoryRegionOps unassigned_mem_ops
= {
1387 .read
= unassigned_mem_read
,
1388 .write
= unassigned_mem_write
,
1389 .endianness
= DEVICE_NATIVE_ENDIAN
,
1392 static uint64_t error_mem_read(void *opaque
, hwaddr addr
,
1398 static void error_mem_write(void *opaque
, hwaddr addr
,
1399 uint64_t value
, unsigned size
)
1404 static const MemoryRegionOps error_mem_ops
= {
1405 .read
= error_mem_read
,
1406 .write
= error_mem_write
,
1407 .endianness
= DEVICE_NATIVE_ENDIAN
,
1410 static const MemoryRegionOps rom_mem_ops
= {
1411 .read
= error_mem_read
,
1412 .write
= unassigned_mem_write
,
1413 .endianness
= DEVICE_NATIVE_ENDIAN
,
1416 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1417 uint64_t val
, unsigned size
)
1420 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1421 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1422 #if !defined(CONFIG_USER_ONLY)
1423 tb_invalidate_phys_page_fast(ram_addr
, size
);
1424 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
1429 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1432 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1435 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1440 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1441 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
1442 /* we remove the notdirty callback only if the code has been
1444 if (dirty_flags
== 0xff)
1445 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
1448 static const MemoryRegionOps notdirty_mem_ops
= {
1449 .read
= error_mem_read
,
1450 .write
= notdirty_mem_write
,
1451 .endianness
= DEVICE_NATIVE_ENDIAN
,
1454 /* Generate a debug exception if a watchpoint has been hit. */
1455 static void check_watchpoint(int offset
, int len_mask
, int flags
)
1457 CPUArchState
*env
= cpu_single_env
;
1458 target_ulong pc
, cs_base
;
1463 if (env
->watchpoint_hit
) {
1464 /* We re-entered the check after replacing the TB. Now raise
1465 * the debug interrupt so that is will trigger after the
1466 * current instruction. */
1467 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
1470 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1471 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1472 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
1473 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
1474 wp
->flags
|= BP_WATCHPOINT_HIT
;
1475 if (!env
->watchpoint_hit
) {
1476 env
->watchpoint_hit
= wp
;
1477 tb_check_watchpoint(env
);
1478 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1479 env
->exception_index
= EXCP_DEBUG
;
1482 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1483 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
1484 cpu_resume_from_signal(env
, NULL
);
1488 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1493 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1494 so these check for a hit then pass through to the normal out-of-line
1496 static uint64_t watch_mem_read(void *opaque
, hwaddr addr
,
1499 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
1501 case 1: return ldub_phys(addr
);
1502 case 2: return lduw_phys(addr
);
1503 case 4: return ldl_phys(addr
);
1508 static void watch_mem_write(void *opaque
, hwaddr addr
,
1509 uint64_t val
, unsigned size
)
1511 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
1514 stb_phys(addr
, val
);
1517 stw_phys(addr
, val
);
1520 stl_phys(addr
, val
);
1526 static const MemoryRegionOps watch_mem_ops
= {
1527 .read
= watch_mem_read
,
1528 .write
= watch_mem_write
,
1529 .endianness
= DEVICE_NATIVE_ENDIAN
,
1532 static uint64_t subpage_read(void *opaque
, hwaddr addr
,
1535 subpage_t
*mmio
= opaque
;
1536 unsigned int idx
= SUBPAGE_IDX(addr
);
1537 MemoryRegionSection
*section
;
1538 #if defined(DEBUG_SUBPAGE)
1539 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
1540 mmio
, len
, addr
, idx
);
1543 section
= &phys_sections
[mmio
->sub_section
[idx
]];
1545 addr
-= section
->offset_within_address_space
;
1546 addr
+= section
->offset_within_region
;
1547 return io_mem_read(section
->mr
, addr
, len
);
1550 static void subpage_write(void *opaque
, hwaddr addr
,
1551 uint64_t value
, unsigned len
)
1553 subpage_t
*mmio
= opaque
;
1554 unsigned int idx
= SUBPAGE_IDX(addr
);
1555 MemoryRegionSection
*section
;
1556 #if defined(DEBUG_SUBPAGE)
1557 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1558 " idx %d value %"PRIx64
"\n",
1559 __func__
, mmio
, len
, addr
, idx
, value
);
1562 section
= &phys_sections
[mmio
->sub_section
[idx
]];
1564 addr
-= section
->offset_within_address_space
;
1565 addr
+= section
->offset_within_region
;
1566 io_mem_write(section
->mr
, addr
, value
, len
);
1569 static const MemoryRegionOps subpage_ops
= {
1570 .read
= subpage_read
,
1571 .write
= subpage_write
,
1572 .endianness
= DEVICE_NATIVE_ENDIAN
,
1575 static uint64_t subpage_ram_read(void *opaque
, hwaddr addr
,
1578 ram_addr_t raddr
= addr
;
1579 void *ptr
= qemu_get_ram_ptr(raddr
);
1581 case 1: return ldub_p(ptr
);
1582 case 2: return lduw_p(ptr
);
1583 case 4: return ldl_p(ptr
);
1588 static void subpage_ram_write(void *opaque
, hwaddr addr
,
1589 uint64_t value
, unsigned size
)
1591 ram_addr_t raddr
= addr
;
1592 void *ptr
= qemu_get_ram_ptr(raddr
);
1594 case 1: return stb_p(ptr
, value
);
1595 case 2: return stw_p(ptr
, value
);
1596 case 4: return stl_p(ptr
, value
);
1601 static const MemoryRegionOps subpage_ram_ops
= {
1602 .read
= subpage_ram_read
,
1603 .write
= subpage_ram_write
,
1604 .endianness
= DEVICE_NATIVE_ENDIAN
,
1607 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1612 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
1614 idx
= SUBPAGE_IDX(start
);
1615 eidx
= SUBPAGE_IDX(end
);
1616 #if defined(DEBUG_SUBPAGE)
1617 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
1618 mmio
, start
, end
, idx
, eidx
, memory
);
1620 if (memory_region_is_ram(phys_sections
[section
].mr
)) {
1621 MemoryRegionSection new_section
= phys_sections
[section
];
1622 new_section
.mr
= &io_mem_subpage_ram
;
1623 section
= phys_section_add(&new_section
);
1625 for (; idx
<= eidx
; idx
++) {
1626 mmio
->sub_section
[idx
] = section
;
1632 static subpage_t
*subpage_init(hwaddr base
)
1636 mmio
= g_malloc0(sizeof(subpage_t
));
1639 memory_region_init_io(&mmio
->iomem
, &subpage_ops
, mmio
,
1640 "subpage", TARGET_PAGE_SIZE
);
1641 mmio
->iomem
.subpage
= true;
1642 #if defined(DEBUG_SUBPAGE)
1643 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
1644 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
1646 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, phys_section_unassigned
);
1651 static uint16_t dummy_section(MemoryRegion
*mr
)
1653 MemoryRegionSection section
= {
1655 .offset_within_address_space
= 0,
1656 .offset_within_region
= 0,
1660 return phys_section_add(§ion
);
1663 MemoryRegion
*iotlb_to_region(hwaddr index
)
1665 return phys_sections
[index
& ~TARGET_PAGE_MASK
].mr
;
1668 static void io_mem_init(void)
1670 memory_region_init_io(&io_mem_ram
, &error_mem_ops
, NULL
, "ram", UINT64_MAX
);
1671 memory_region_init_io(&io_mem_rom
, &rom_mem_ops
, NULL
, "rom", UINT64_MAX
);
1672 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
1673 "unassigned", UINT64_MAX
);
1674 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
1675 "notdirty", UINT64_MAX
);
1676 memory_region_init_io(&io_mem_subpage_ram
, &subpage_ram_ops
, NULL
,
1677 "subpage-ram", UINT64_MAX
);
1678 memory_region_init_io(&io_mem_watch
, &watch_mem_ops
, NULL
,
1679 "watch", UINT64_MAX
);
1682 static void mem_begin(MemoryListener
*listener
)
1684 AddressSpaceDispatch
*d
= container_of(listener
, AddressSpaceDispatch
, listener
);
1686 destroy_all_mappings(d
);
1687 d
->phys_map
.ptr
= PHYS_MAP_NODE_NIL
;
1690 static void core_begin(MemoryListener
*listener
)
1692 phys_sections_clear();
1693 phys_section_unassigned
= dummy_section(&io_mem_unassigned
);
1694 phys_section_notdirty
= dummy_section(&io_mem_notdirty
);
1695 phys_section_rom
= dummy_section(&io_mem_rom
);
1696 phys_section_watch
= dummy_section(&io_mem_watch
);
1699 static void tcg_commit(MemoryListener
*listener
)
1703 /* since each CPU stores ram addresses in its TLB cache, we must
1704 reset the modified entries */
1706 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1711 static void core_log_global_start(MemoryListener
*listener
)
1713 cpu_physical_memory_set_dirty_tracking(1);
1716 static void core_log_global_stop(MemoryListener
*listener
)
1718 cpu_physical_memory_set_dirty_tracking(0);
1721 static void io_region_add(MemoryListener
*listener
,
1722 MemoryRegionSection
*section
)
1724 MemoryRegionIORange
*mrio
= g_new(MemoryRegionIORange
, 1);
1726 mrio
->mr
= section
->mr
;
1727 mrio
->offset
= section
->offset_within_region
;
1728 iorange_init(&mrio
->iorange
, &memory_region_iorange_ops
,
1729 section
->offset_within_address_space
, section
->size
);
1730 ioport_register(&mrio
->iorange
);
1733 static void io_region_del(MemoryListener
*listener
,
1734 MemoryRegionSection
*section
)
1736 isa_unassign_ioport(section
->offset_within_address_space
, section
->size
);
1739 static MemoryListener core_memory_listener
= {
1740 .begin
= core_begin
,
1741 .log_global_start
= core_log_global_start
,
1742 .log_global_stop
= core_log_global_stop
,
1746 static MemoryListener io_memory_listener
= {
1747 .region_add
= io_region_add
,
1748 .region_del
= io_region_del
,
1752 static MemoryListener tcg_memory_listener
= {
1753 .commit
= tcg_commit
,
1756 void address_space_init_dispatch(AddressSpace
*as
)
1758 AddressSpaceDispatch
*d
= g_new(AddressSpaceDispatch
, 1);
1760 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .is_leaf
= 0 };
1761 d
->listener
= (MemoryListener
) {
1763 .region_add
= mem_add
,
1764 .region_nop
= mem_add
,
1768 memory_listener_register(&d
->listener
, as
);
1771 void address_space_destroy_dispatch(AddressSpace
*as
)
1773 AddressSpaceDispatch
*d
= as
->dispatch
;
1775 memory_listener_unregister(&d
->listener
);
1776 destroy_l2_mapping(&d
->phys_map
, P_L2_LEVELS
- 1);
1778 as
->dispatch
= NULL
;
1781 static void memory_map_init(void)
1783 system_memory
= g_malloc(sizeof(*system_memory
));
1784 memory_region_init(system_memory
, "system", INT64_MAX
);
1785 address_space_init(&address_space_memory
, system_memory
);
1786 address_space_memory
.name
= "memory";
1788 system_io
= g_malloc(sizeof(*system_io
));
1789 memory_region_init(system_io
, "io", 65536);
1790 address_space_init(&address_space_io
, system_io
);
1791 address_space_io
.name
= "I/O";
1793 memory_listener_register(&core_memory_listener
, &address_space_memory
);
1794 memory_listener_register(&io_memory_listener
, &address_space_io
);
1795 memory_listener_register(&tcg_memory_listener
, &address_space_memory
);
1797 dma_context_init(&dma_context_memory
, &address_space_memory
,
1801 MemoryRegion
*get_system_memory(void)
1803 return system_memory
;
1806 MemoryRegion
*get_system_io(void)
1811 #endif /* !defined(CONFIG_USER_ONLY) */
1813 /* physical memory access (slow version, mainly for debug) */
1814 #if defined(CONFIG_USER_ONLY)
1815 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
1816 uint8_t *buf
, int len
, int is_write
)
1823 page
= addr
& TARGET_PAGE_MASK
;
1824 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1827 flags
= page_get_flags(page
);
1828 if (!(flags
& PAGE_VALID
))
1831 if (!(flags
& PAGE_WRITE
))
1833 /* XXX: this code should not depend on lock_user */
1834 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
1837 unlock_user(p
, addr
, l
);
1839 if (!(flags
& PAGE_READ
))
1841 /* XXX: this code should not depend on lock_user */
1842 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
1845 unlock_user(p
, addr
, 0);
1856 static void invalidate_and_set_dirty(hwaddr addr
,
1859 if (!cpu_physical_memory_is_dirty(addr
)) {
1860 /* invalidate code */
1861 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
1863 cpu_physical_memory_set_dirty_flags(addr
, (0xff & ~CODE_DIRTY_FLAG
));
1865 xen_modified_memory(addr
, length
);
1868 void address_space_rw(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
,
1869 int len
, bool is_write
)
1871 AddressSpaceDispatch
*d
= as
->dispatch
;
1876 MemoryRegionSection
*section
;
1879 page
= addr
& TARGET_PAGE_MASK
;
1880 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1883 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
1886 if (!memory_region_is_ram(section
->mr
)) {
1888 addr1
= memory_region_section_addr(section
, addr
);
1889 /* XXX: could force cpu_single_env to NULL to avoid
1891 if (l
>= 4 && ((addr1
& 3) == 0)) {
1892 /* 32 bit write access */
1894 io_mem_write(section
->mr
, addr1
, val
, 4);
1896 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
1897 /* 16 bit write access */
1899 io_mem_write(section
->mr
, addr1
, val
, 2);
1902 /* 8 bit write access */
1904 io_mem_write(section
->mr
, addr1
, val
, 1);
1907 } else if (!section
->readonly
) {
1909 addr1
= memory_region_get_ram_addr(section
->mr
)
1910 + memory_region_section_addr(section
, addr
);
1912 ptr
= qemu_get_ram_ptr(addr1
);
1913 memcpy(ptr
, buf
, l
);
1914 invalidate_and_set_dirty(addr1
, l
);
1915 qemu_put_ram_ptr(ptr
);
1918 if (!(memory_region_is_ram(section
->mr
) ||
1919 memory_region_is_romd(section
->mr
))) {
1922 addr1
= memory_region_section_addr(section
, addr
);
1923 if (l
>= 4 && ((addr1
& 3) == 0)) {
1924 /* 32 bit read access */
1925 val
= io_mem_read(section
->mr
, addr1
, 4);
1928 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
1929 /* 16 bit read access */
1930 val
= io_mem_read(section
->mr
, addr1
, 2);
1934 /* 8 bit read access */
1935 val
= io_mem_read(section
->mr
, addr1
, 1);
1941 ptr
= qemu_get_ram_ptr(section
->mr
->ram_addr
1942 + memory_region_section_addr(section
,
1944 memcpy(buf
, ptr
, l
);
1945 qemu_put_ram_ptr(ptr
);
1954 void address_space_write(AddressSpace
*as
, hwaddr addr
,
1955 const uint8_t *buf
, int len
)
1957 address_space_rw(as
, addr
, (uint8_t *)buf
, len
, true);
1961 * address_space_read: read from an address space.
1963 * @as: #AddressSpace to be accessed
1964 * @addr: address within that address space
1965 * @buf: buffer with the data transferred
1967 void address_space_read(AddressSpace
*as
, hwaddr addr
, uint8_t *buf
, int len
)
1969 address_space_rw(as
, addr
, buf
, len
, false);
1973 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
1974 int len
, int is_write
)
1976 return address_space_rw(&address_space_memory
, addr
, buf
, len
, is_write
);
1979 /* used for ROM loading : can write in RAM and ROM */
1980 void cpu_physical_memory_write_rom(hwaddr addr
,
1981 const uint8_t *buf
, int len
)
1983 AddressSpaceDispatch
*d
= address_space_memory
.dispatch
;
1987 MemoryRegionSection
*section
;
1990 page
= addr
& TARGET_PAGE_MASK
;
1991 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1994 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
1996 if (!(memory_region_is_ram(section
->mr
) ||
1997 memory_region_is_romd(section
->mr
))) {
2000 unsigned long addr1
;
2001 addr1
= memory_region_get_ram_addr(section
->mr
)
2002 + memory_region_section_addr(section
, addr
);
2004 ptr
= qemu_get_ram_ptr(addr1
);
2005 memcpy(ptr
, buf
, l
);
2006 invalidate_and_set_dirty(addr1
, l
);
2007 qemu_put_ram_ptr(ptr
);
2021 static BounceBuffer bounce
;
2023 typedef struct MapClient
{
2025 void (*callback
)(void *opaque
);
2026 QLIST_ENTRY(MapClient
) link
;
2029 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2030 = QLIST_HEAD_INITIALIZER(map_client_list
);
2032 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
2034 MapClient
*client
= g_malloc(sizeof(*client
));
2036 client
->opaque
= opaque
;
2037 client
->callback
= callback
;
2038 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2042 static void cpu_unregister_map_client(void *_client
)
2044 MapClient
*client
= (MapClient
*)_client
;
2046 QLIST_REMOVE(client
, link
);
2050 static void cpu_notify_map_clients(void)
2054 while (!QLIST_EMPTY(&map_client_list
)) {
2055 client
= QLIST_FIRST(&map_client_list
);
2056 client
->callback(client
->opaque
);
2057 cpu_unregister_map_client(client
);
2061 /* Map a physical memory region into a host virtual address.
2062 * May map a subset of the requested range, given by and returned in *plen.
2063 * May return NULL if resources needed to perform the mapping are exhausted.
2064 * Use only for reads OR writes - not for read-modify-write operations.
2065 * Use cpu_register_map_client() to know when retrying the map operation is
2066 * likely to succeed.
2068 void *address_space_map(AddressSpace
*as
,
2073 AddressSpaceDispatch
*d
= as
->dispatch
;
2078 MemoryRegionSection
*section
;
2079 ram_addr_t raddr
= RAM_ADDR_MAX
;
2084 page
= addr
& TARGET_PAGE_MASK
;
2085 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2088 section
= phys_page_find(d
, page
>> TARGET_PAGE_BITS
);
2090 if (!(memory_region_is_ram(section
->mr
) && !section
->readonly
)) {
2091 if (todo
|| bounce
.buffer
) {
2094 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
2098 address_space_read(as
, addr
, bounce
.buffer
, l
);
2102 return bounce
.buffer
;
2105 raddr
= memory_region_get_ram_addr(section
->mr
)
2106 + memory_region_section_addr(section
, addr
);
2114 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
2119 /* Unmaps a memory region previously mapped by address_space_map().
2120 * Will also mark the memory as dirty if is_write == 1. access_len gives
2121 * the amount of memory that was actually read or written by the caller.
2123 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2124 int is_write
, hwaddr access_len
)
2126 if (buffer
!= bounce
.buffer
) {
2128 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
2129 while (access_len
) {
2131 l
= TARGET_PAGE_SIZE
;
2134 invalidate_and_set_dirty(addr1
, l
);
2139 if (xen_enabled()) {
2140 xen_invalidate_map_cache_entry(buffer
);
2145 address_space_write(as
, bounce
.addr
, bounce
.buffer
, access_len
);
2147 qemu_vfree(bounce
.buffer
);
2148 bounce
.buffer
= NULL
;
2149 cpu_notify_map_clients();
2152 void *cpu_physical_memory_map(hwaddr addr
,
2156 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2159 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2160 int is_write
, hwaddr access_len
)
2162 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2165 /* warning: addr must be aligned */
2166 static inline uint32_t ldl_phys_internal(hwaddr addr
,
2167 enum device_endian endian
)
2171 MemoryRegionSection
*section
;
2173 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2175 if (!(memory_region_is_ram(section
->mr
) ||
2176 memory_region_is_romd(section
->mr
))) {
2178 addr
= memory_region_section_addr(section
, addr
);
2179 val
= io_mem_read(section
->mr
, addr
, 4);
2180 #if defined(TARGET_WORDS_BIGENDIAN)
2181 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2185 if (endian
== DEVICE_BIG_ENDIAN
) {
2191 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2193 + memory_region_section_addr(section
, addr
));
2195 case DEVICE_LITTLE_ENDIAN
:
2196 val
= ldl_le_p(ptr
);
2198 case DEVICE_BIG_ENDIAN
:
2199 val
= ldl_be_p(ptr
);
2209 uint32_t ldl_phys(hwaddr addr
)
2211 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2214 uint32_t ldl_le_phys(hwaddr addr
)
2216 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2219 uint32_t ldl_be_phys(hwaddr addr
)
2221 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2224 /* warning: addr must be aligned */
2225 static inline uint64_t ldq_phys_internal(hwaddr addr
,
2226 enum device_endian endian
)
2230 MemoryRegionSection
*section
;
2232 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2234 if (!(memory_region_is_ram(section
->mr
) ||
2235 memory_region_is_romd(section
->mr
))) {
2237 addr
= memory_region_section_addr(section
, addr
);
2239 /* XXX This is broken when device endian != cpu endian.
2240 Fix and add "endian" variable check */
2241 #ifdef TARGET_WORDS_BIGENDIAN
2242 val
= io_mem_read(section
->mr
, addr
, 4) << 32;
2243 val
|= io_mem_read(section
->mr
, addr
+ 4, 4);
2245 val
= io_mem_read(section
->mr
, addr
, 4);
2246 val
|= io_mem_read(section
->mr
, addr
+ 4, 4) << 32;
2250 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2252 + memory_region_section_addr(section
, addr
));
2254 case DEVICE_LITTLE_ENDIAN
:
2255 val
= ldq_le_p(ptr
);
2257 case DEVICE_BIG_ENDIAN
:
2258 val
= ldq_be_p(ptr
);
2268 uint64_t ldq_phys(hwaddr addr
)
2270 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2273 uint64_t ldq_le_phys(hwaddr addr
)
2275 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2278 uint64_t ldq_be_phys(hwaddr addr
)
2280 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2284 uint32_t ldub_phys(hwaddr addr
)
2287 cpu_physical_memory_read(addr
, &val
, 1);
2291 /* warning: addr must be aligned */
2292 static inline uint32_t lduw_phys_internal(hwaddr addr
,
2293 enum device_endian endian
)
2297 MemoryRegionSection
*section
;
2299 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2301 if (!(memory_region_is_ram(section
->mr
) ||
2302 memory_region_is_romd(section
->mr
))) {
2304 addr
= memory_region_section_addr(section
, addr
);
2305 val
= io_mem_read(section
->mr
, addr
, 2);
2306 #if defined(TARGET_WORDS_BIGENDIAN)
2307 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2311 if (endian
== DEVICE_BIG_ENDIAN
) {
2317 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2319 + memory_region_section_addr(section
, addr
));
2321 case DEVICE_LITTLE_ENDIAN
:
2322 val
= lduw_le_p(ptr
);
2324 case DEVICE_BIG_ENDIAN
:
2325 val
= lduw_be_p(ptr
);
2335 uint32_t lduw_phys(hwaddr addr
)
2337 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
2340 uint32_t lduw_le_phys(hwaddr addr
)
2342 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
2345 uint32_t lduw_be_phys(hwaddr addr
)
2347 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
2350 /* warning: addr must be aligned. The ram page is not masked as dirty
2351 and the code inside is not invalidated. It is useful if the dirty
2352 bits are used to track modified PTEs */
2353 void stl_phys_notdirty(hwaddr addr
, uint32_t val
)
2356 MemoryRegionSection
*section
;
2358 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2360 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2361 addr
= memory_region_section_addr(section
, addr
);
2362 if (memory_region_is_ram(section
->mr
)) {
2363 section
= &phys_sections
[phys_section_rom
];
2365 io_mem_write(section
->mr
, addr
, val
, 4);
2367 unsigned long addr1
= (memory_region_get_ram_addr(section
->mr
)
2369 + memory_region_section_addr(section
, addr
);
2370 ptr
= qemu_get_ram_ptr(addr1
);
2373 if (unlikely(in_migration
)) {
2374 if (!cpu_physical_memory_is_dirty(addr1
)) {
2375 /* invalidate code */
2376 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2378 cpu_physical_memory_set_dirty_flags(
2379 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
2385 void stq_phys_notdirty(hwaddr addr
, uint64_t val
)
2388 MemoryRegionSection
*section
;
2390 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2392 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2393 addr
= memory_region_section_addr(section
, addr
);
2394 if (memory_region_is_ram(section
->mr
)) {
2395 section
= &phys_sections
[phys_section_rom
];
2397 #ifdef TARGET_WORDS_BIGENDIAN
2398 io_mem_write(section
->mr
, addr
, val
>> 32, 4);
2399 io_mem_write(section
->mr
, addr
+ 4, (uint32_t)val
, 4);
2401 io_mem_write(section
->mr
, addr
, (uint32_t)val
, 4);
2402 io_mem_write(section
->mr
, addr
+ 4, val
>> 32, 4);
2405 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
2407 + memory_region_section_addr(section
, addr
));
2412 /* warning: addr must be aligned */
2413 static inline void stl_phys_internal(hwaddr addr
, uint32_t val
,
2414 enum device_endian endian
)
2417 MemoryRegionSection
*section
;
2419 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2421 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2422 addr
= memory_region_section_addr(section
, addr
);
2423 if (memory_region_is_ram(section
->mr
)) {
2424 section
= &phys_sections
[phys_section_rom
];
2426 #if defined(TARGET_WORDS_BIGENDIAN)
2427 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2431 if (endian
== DEVICE_BIG_ENDIAN
) {
2435 io_mem_write(section
->mr
, addr
, val
, 4);
2437 unsigned long addr1
;
2438 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
2439 + memory_region_section_addr(section
, addr
);
2441 ptr
= qemu_get_ram_ptr(addr1
);
2443 case DEVICE_LITTLE_ENDIAN
:
2446 case DEVICE_BIG_ENDIAN
:
2453 invalidate_and_set_dirty(addr1
, 4);
2457 void stl_phys(hwaddr addr
, uint32_t val
)
2459 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2462 void stl_le_phys(hwaddr addr
, uint32_t val
)
2464 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2467 void stl_be_phys(hwaddr addr
, uint32_t val
)
2469 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2473 void stb_phys(hwaddr addr
, uint32_t val
)
2476 cpu_physical_memory_write(addr
, &v
, 1);
2479 /* warning: addr must be aligned */
2480 static inline void stw_phys_internal(hwaddr addr
, uint32_t val
,
2481 enum device_endian endian
)
2484 MemoryRegionSection
*section
;
2486 section
= phys_page_find(address_space_memory
.dispatch
, addr
>> TARGET_PAGE_BITS
);
2488 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
2489 addr
= memory_region_section_addr(section
, addr
);
2490 if (memory_region_is_ram(section
->mr
)) {
2491 section
= &phys_sections
[phys_section_rom
];
2493 #if defined(TARGET_WORDS_BIGENDIAN)
2494 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2498 if (endian
== DEVICE_BIG_ENDIAN
) {
2502 io_mem_write(section
->mr
, addr
, val
, 2);
2504 unsigned long addr1
;
2505 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
2506 + memory_region_section_addr(section
, addr
);
2508 ptr
= qemu_get_ram_ptr(addr1
);
2510 case DEVICE_LITTLE_ENDIAN
:
2513 case DEVICE_BIG_ENDIAN
:
2520 invalidate_and_set_dirty(addr1
, 2);
2524 void stw_phys(hwaddr addr
, uint32_t val
)
2526 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
2529 void stw_le_phys(hwaddr addr
, uint32_t val
)
2531 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
2534 void stw_be_phys(hwaddr addr
, uint32_t val
)
2536 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
2540 void stq_phys(hwaddr addr
, uint64_t val
)
2543 cpu_physical_memory_write(addr
, &val
, 8);
2546 void stq_le_phys(hwaddr addr
, uint64_t val
)
2548 val
= cpu_to_le64(val
);
2549 cpu_physical_memory_write(addr
, &val
, 8);
2552 void stq_be_phys(hwaddr addr
, uint64_t val
)
2554 val
= cpu_to_be64(val
);
2555 cpu_physical_memory_write(addr
, &val
, 8);
2558 /* virtual memory access for debug (includes writing to ROM) */
2559 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
2560 uint8_t *buf
, int len
, int is_write
)
2567 page
= addr
& TARGET_PAGE_MASK
;
2568 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2569 /* if no physical page mapped, return an error */
2570 if (phys_addr
== -1)
2572 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2575 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
2577 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
2579 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
2588 #if !defined(CONFIG_USER_ONLY)
2591 * A helper function for the _utterly broken_ virtio device model to find out if
2592 * it's running on a big endian machine. Don't do this at home kids!
2594 bool virtio_is_big_endian(void);
2595 bool virtio_is_big_endian(void)
2597 #if defined(TARGET_WORDS_BIGENDIAN)
2606 #ifndef CONFIG_USER_ONLY
2607 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
2609 MemoryRegionSection
*section
;
2611 section
= phys_page_find(address_space_memory
.dispatch
,
2612 phys_addr
>> TARGET_PAGE_BITS
);
2614 return !(memory_region_is_ram(section
->mr
) ||
2615 memory_region_is_romd(section
->mr
));