2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
62 #define WANT_EXEC_OBSOLETE
63 #include "exec-obsolete.h"
65 //#define DEBUG_TB_INVALIDATE
67 //#define DEBUG_UNASSIGNED
69 /* make various TB consistency checks */
70 //#define DEBUG_TB_CHECK
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
80 #define SMC_BITMAP_USE_THRESHOLD 10
82 static TranslationBlock
*tbs
;
83 static int code_gen_max_blocks
;
84 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
96 #elif defined(_WIN32) && !defined(_WIN64)
97 #define code_gen_section \
98 __attribute__((aligned (16)))
100 #define code_gen_section \
101 __attribute__((aligned (32)))
104 uint8_t code_gen_prologue
[1024] code_gen_section
;
105 static uint8_t *code_gen_buffer
;
106 static unsigned long code_gen_buffer_size
;
107 /* threshold to flush the translated code buffer */
108 static unsigned long code_gen_buffer_max_size
;
109 static uint8_t *code_gen_ptr
;
111 #if !defined(CONFIG_USER_ONLY)
113 static int in_migration
;
115 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
117 static MemoryRegion
*system_memory
;
118 static MemoryRegion
*system_io
;
120 MemoryRegion io_mem_ram
, io_mem_rom
, io_mem_unassigned
, io_mem_notdirty
;
121 static MemoryRegion io_mem_subpage_ram
;
125 CPUArchState
*first_cpu
;
126 /* current CPU in the current thread. It is only valid inside
128 DEFINE_TLS(CPUArchState
*,cpu_single_env
);
129 /* 0 = Do not count executed instructions.
130 1 = Precise instruction counting.
131 2 = Adaptive rate instruction counting. */
134 typedef struct PageDesc
{
135 /* list of TBs intersecting this ram page */
136 TranslationBlock
*first_tb
;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count
;
140 uint8_t *code_bitmap
;
141 #if defined(CONFIG_USER_ONLY)
146 /* In system mode we want L1_MAP to be based on ram offsets,
147 while in user mode we want it to be based on virtual addresses. */
148 #if !defined(CONFIG_USER_ONLY)
149 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
155 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
158 /* Size of the L2 (and L3, etc) page tables. */
160 #define L2_SIZE (1 << L2_BITS)
162 #define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165 /* The bits remaining after N lower levels of page tables. */
166 #define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169 #if V_L1_BITS_REM < 4
170 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172 #define V_L1_BITS V_L1_BITS_REM
175 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179 uintptr_t qemu_real_host_page_size
;
180 uintptr_t qemu_host_page_size
;
181 uintptr_t qemu_host_page_mask
;
183 /* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185 static void *l1_map
[V_L1_SIZE
];
187 #if !defined(CONFIG_USER_ONLY)
188 typedef struct PhysPageEntry PhysPageEntry
;
190 static MemoryRegionSection
*phys_sections
;
191 static unsigned phys_sections_nb
, phys_sections_nb_alloc
;
192 static uint16_t phys_section_unassigned
;
193 static uint16_t phys_section_notdirty
;
194 static uint16_t phys_section_rom
;
195 static uint16_t phys_section_watch
;
197 struct PhysPageEntry
{
198 uint16_t is_leaf
: 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
203 /* Simple allocator for PhysPageEntry nodes */
204 static PhysPageEntry (*phys_map_nodes
)[L2_SIZE
];
205 static unsigned phys_map_nodes_nb
, phys_map_nodes_nb_alloc
;
207 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
209 /* This is a multi-level map on the physical address space.
210 The bottom level has pointers to MemoryRegionSections. */
211 static PhysPageEntry phys_map
= { .ptr
= PHYS_MAP_NODE_NIL
, .is_leaf
= 0 };
213 static void io_mem_init(void);
214 static void memory_map_init(void);
216 static MemoryRegion io_mem_watch
;
220 static int tb_flush_count
;
221 static int tb_phys_invalidate_count
;
224 static void map_exec(void *addr
, long size
)
227 VirtualProtect(addr
, size
,
228 PAGE_EXECUTE_READWRITE
, &old_protect
);
232 static void map_exec(void *addr
, long size
)
234 unsigned long start
, end
, page_size
;
236 page_size
= getpagesize();
237 start
= (unsigned long)addr
;
238 start
&= ~(page_size
- 1);
240 end
= (unsigned long)addr
+ size
;
241 end
+= page_size
- 1;
242 end
&= ~(page_size
- 1);
244 mprotect((void *)start
, end
- start
,
245 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
249 static void page_init(void)
251 /* NOTE: we can always suppose that qemu_host_page_size >=
255 SYSTEM_INFO system_info
;
257 GetSystemInfo(&system_info
);
258 qemu_real_host_page_size
= system_info
.dwPageSize
;
261 qemu_real_host_page_size
= getpagesize();
263 if (qemu_host_page_size
== 0)
264 qemu_host_page_size
= qemu_real_host_page_size
;
265 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
266 qemu_host_page_size
= TARGET_PAGE_SIZE
;
267 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
269 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
271 #ifdef HAVE_KINFO_GETVMMAP
272 struct kinfo_vmentry
*freep
;
275 freep
= kinfo_getvmmap(getpid(), &cnt
);
278 for (i
= 0; i
< cnt
; i
++) {
279 unsigned long startaddr
, endaddr
;
281 startaddr
= freep
[i
].kve_start
;
282 endaddr
= freep
[i
].kve_end
;
283 if (h2g_valid(startaddr
)) {
284 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
286 if (h2g_valid(endaddr
)) {
287 endaddr
= h2g(endaddr
);
288 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
290 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
292 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
303 last_brk
= (unsigned long)sbrk(0);
305 f
= fopen("/compat/linux/proc/self/maps", "r");
310 unsigned long startaddr
, endaddr
;
313 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
315 if (n
== 2 && h2g_valid(startaddr
)) {
316 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
318 if (h2g_valid(endaddr
)) {
319 endaddr
= h2g(endaddr
);
323 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
335 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
341 #if defined(CONFIG_USER_ONLY)
342 /* We can't use g_malloc because it may recurse into a locked mutex. */
343 # define ALLOC(P, SIZE) \
345 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
346 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
349 # define ALLOC(P, SIZE) \
350 do { P = g_malloc0(SIZE); } while (0)
353 /* Level 1. Always allocated. */
354 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
357 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
364 ALLOC(p
, sizeof(void *) * L2_SIZE
);
368 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
376 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
382 return pd
+ (index
& (L2_SIZE
- 1));
385 static inline PageDesc
*page_find(tb_page_addr_t index
)
387 return page_find_alloc(index
, 0);
390 #if !defined(CONFIG_USER_ONLY)
392 static void phys_map_node_reserve(unsigned nodes
)
394 if (phys_map_nodes_nb
+ nodes
> phys_map_nodes_nb_alloc
) {
395 typedef PhysPageEntry Node
[L2_SIZE
];
396 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
* 2, 16);
397 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
,
398 phys_map_nodes_nb
+ nodes
);
399 phys_map_nodes
= g_renew(Node
, phys_map_nodes
,
400 phys_map_nodes_nb_alloc
);
404 static uint16_t phys_map_node_alloc(void)
409 ret
= phys_map_nodes_nb
++;
410 assert(ret
!= PHYS_MAP_NODE_NIL
);
411 assert(ret
!= phys_map_nodes_nb_alloc
);
412 for (i
= 0; i
< L2_SIZE
; ++i
) {
413 phys_map_nodes
[ret
][i
].is_leaf
= 0;
414 phys_map_nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
419 static void phys_map_nodes_reset(void)
421 phys_map_nodes_nb
= 0;
425 static void phys_page_set_level(PhysPageEntry
*lp
, target_phys_addr_t
*index
,
426 target_phys_addr_t
*nb
, uint16_t leaf
,
431 target_phys_addr_t step
= (target_phys_addr_t
)1 << (level
* L2_BITS
);
433 if (!lp
->is_leaf
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
434 lp
->ptr
= phys_map_node_alloc();
435 p
= phys_map_nodes
[lp
->ptr
];
437 for (i
= 0; i
< L2_SIZE
; i
++) {
439 p
[i
].ptr
= phys_section_unassigned
;
443 p
= phys_map_nodes
[lp
->ptr
];
445 lp
= &p
[(*index
>> (level
* L2_BITS
)) & (L2_SIZE
- 1)];
447 while (*nb
&& lp
< &p
[L2_SIZE
]) {
448 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
454 phys_page_set_level(lp
, index
, nb
, leaf
, level
- 1);
460 static void phys_page_set(target_phys_addr_t index
, target_phys_addr_t nb
,
463 /* Wildly overreserve - it doesn't matter much. */
464 phys_map_node_reserve(3 * P_L2_LEVELS
);
466 phys_page_set_level(&phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
469 MemoryRegionSection
*phys_page_find(target_phys_addr_t index
)
471 PhysPageEntry lp
= phys_map
;
474 uint16_t s_index
= phys_section_unassigned
;
476 for (i
= P_L2_LEVELS
- 1; i
>= 0 && !lp
.is_leaf
; i
--) {
477 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
480 p
= phys_map_nodes
[lp
.ptr
];
481 lp
= p
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
486 return &phys_sections
[s_index
];
489 bool memory_region_is_unassigned(MemoryRegion
*mr
)
491 return mr
!= &io_mem_ram
&& mr
!= &io_mem_rom
492 && mr
!= &io_mem_notdirty
&& !mr
->rom_device
493 && mr
!= &io_mem_watch
;
496 #define mmap_lock() do { } while(0)
497 #define mmap_unlock() do { } while(0)
500 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
502 #if defined(CONFIG_USER_ONLY)
503 /* Currently it is not recommended to allocate big chunks of data in
504 user mode. It will change when a dedicated libc will be used */
505 #define USE_STATIC_CODE_GEN_BUFFER
508 #ifdef USE_STATIC_CODE_GEN_BUFFER
509 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
510 __attribute__((aligned (CODE_GEN_ALIGN
)));
513 static void code_gen_alloc(unsigned long tb_size
)
515 #ifdef USE_STATIC_CODE_GEN_BUFFER
516 code_gen_buffer
= static_code_gen_buffer
;
517 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
518 map_exec(code_gen_buffer
, code_gen_buffer_size
);
520 code_gen_buffer_size
= tb_size
;
521 if (code_gen_buffer_size
== 0) {
522 #if defined(CONFIG_USER_ONLY)
523 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
525 /* XXX: needs adjustments */
526 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
529 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
530 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
531 /* The code gen buffer location may have constraints depending on
532 the host cpu and OS */
533 #if defined(__linux__)
538 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
539 #if defined(__x86_64__)
541 /* Cannot map more than that */
542 if (code_gen_buffer_size
> (800 * 1024 * 1024))
543 code_gen_buffer_size
= (800 * 1024 * 1024);
544 #elif defined(__sparc_v9__)
545 // Map the buffer below 2G, so we can use direct calls and branches
547 start
= (void *) 0x60000000UL
;
548 if (code_gen_buffer_size
> (512 * 1024 * 1024))
549 code_gen_buffer_size
= (512 * 1024 * 1024);
550 #elif defined(__arm__)
551 /* Keep the buffer no bigger than 16MB to branch between blocks */
552 if (code_gen_buffer_size
> 16 * 1024 * 1024)
553 code_gen_buffer_size
= 16 * 1024 * 1024;
554 #elif defined(__s390x__)
555 /* Map the buffer so that we can use direct calls and branches. */
556 /* We have a +- 4GB range on the branches; leave some slop. */
557 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
558 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
560 start
= (void *)0x90000000UL
;
562 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
563 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
565 if (code_gen_buffer
== MAP_FAILED
) {
566 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
570 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
571 || defined(__DragonFly__) || defined(__OpenBSD__) \
572 || defined(__NetBSD__)
576 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
577 #if defined(__x86_64__)
578 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
579 * 0x40000000 is free */
581 addr
= (void *)0x40000000;
582 /* Cannot map more than that */
583 if (code_gen_buffer_size
> (800 * 1024 * 1024))
584 code_gen_buffer_size
= (800 * 1024 * 1024);
585 #elif defined(__sparc_v9__)
586 // Map the buffer below 2G, so we can use direct calls and branches
588 addr
= (void *) 0x60000000UL
;
589 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
590 code_gen_buffer_size
= (512 * 1024 * 1024);
593 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
594 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
596 if (code_gen_buffer
== MAP_FAILED
) {
597 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
602 code_gen_buffer
= g_malloc(code_gen_buffer_size
);
603 map_exec(code_gen_buffer
, code_gen_buffer_size
);
605 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
606 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
607 code_gen_buffer_max_size
= code_gen_buffer_size
-
608 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
609 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
610 tbs
= g_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
613 /* Must be called before using the QEMU cpus. 'tb_size' is the size
614 (in bytes) allocated to the translation buffer. Zero means default
616 void tcg_exec_init(unsigned long tb_size
)
619 code_gen_alloc(tb_size
);
620 code_gen_ptr
= code_gen_buffer
;
621 tcg_register_jit(code_gen_buffer
, code_gen_buffer_size
);
623 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
624 /* There's no guest base to take into account, so go ahead and
625 initialize the prologue now. */
626 tcg_prologue_init(&tcg_ctx
);
630 bool tcg_enabled(void)
632 return code_gen_buffer
!= NULL
;
635 void cpu_exec_init_all(void)
637 #if !defined(CONFIG_USER_ONLY)
643 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
645 static int cpu_common_post_load(void *opaque
, int version_id
)
647 CPUArchState
*env
= opaque
;
649 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
650 version_id is increased. */
651 env
->interrupt_request
&= ~0x01;
657 static const VMStateDescription vmstate_cpu_common
= {
658 .name
= "cpu_common",
660 .minimum_version_id
= 1,
661 .minimum_version_id_old
= 1,
662 .post_load
= cpu_common_post_load
,
663 .fields
= (VMStateField
[]) {
664 VMSTATE_UINT32(halted
, CPUArchState
),
665 VMSTATE_UINT32(interrupt_request
, CPUArchState
),
666 VMSTATE_END_OF_LIST()
671 CPUArchState
*qemu_get_cpu(int cpu
)
673 CPUArchState
*env
= first_cpu
;
676 if (env
->cpu_index
== cpu
)
684 void cpu_exec_init(CPUArchState
*env
)
689 #if defined(CONFIG_USER_ONLY)
692 env
->next_cpu
= NULL
;
695 while (*penv
!= NULL
) {
696 penv
= &(*penv
)->next_cpu
;
699 env
->cpu_index
= cpu_index
;
701 QTAILQ_INIT(&env
->breakpoints
);
702 QTAILQ_INIT(&env
->watchpoints
);
703 #ifndef CONFIG_USER_ONLY
704 env
->thread_id
= qemu_get_thread_id();
707 #if defined(CONFIG_USER_ONLY)
710 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
711 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
712 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
713 cpu_save
, cpu_load
, env
);
717 /* Allocate a new translation block. Flush the translation buffer if
718 too many translation blocks or too much generated code. */
719 static TranslationBlock
*tb_alloc(target_ulong pc
)
721 TranslationBlock
*tb
;
723 if (nb_tbs
>= code_gen_max_blocks
||
724 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
732 void tb_free(TranslationBlock
*tb
)
734 /* In practice this is mostly used for single use temporary TB
735 Ignore the hard cases and just back up if this TB happens to
736 be the last one generated. */
737 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
738 code_gen_ptr
= tb
->tc_ptr
;
743 static inline void invalidate_page_bitmap(PageDesc
*p
)
745 if (p
->code_bitmap
) {
746 g_free(p
->code_bitmap
);
747 p
->code_bitmap
= NULL
;
749 p
->code_write_count
= 0;
752 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
754 static void page_flush_tb_1 (int level
, void **lp
)
763 for (i
= 0; i
< L2_SIZE
; ++i
) {
764 pd
[i
].first_tb
= NULL
;
765 invalidate_page_bitmap(pd
+ i
);
769 for (i
= 0; i
< L2_SIZE
; ++i
) {
770 page_flush_tb_1 (level
- 1, pp
+ i
);
775 static void page_flush_tb(void)
778 for (i
= 0; i
< V_L1_SIZE
; i
++) {
779 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
783 /* flush all the translation blocks */
784 /* XXX: tb_flush is currently not thread safe */
785 void tb_flush(CPUArchState
*env1
)
788 #if defined(DEBUG_FLUSH)
789 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
790 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
792 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
794 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
795 cpu_abort(env1
, "Internal error: code buffer overflow\n");
799 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
800 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
803 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
806 code_gen_ptr
= code_gen_buffer
;
807 /* XXX: flush processor icache at this point if cache flush is
812 #ifdef DEBUG_TB_CHECK
814 static void tb_invalidate_check(target_ulong address
)
816 TranslationBlock
*tb
;
818 address
&= TARGET_PAGE_MASK
;
819 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
820 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
821 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
822 address
>= tb
->pc
+ tb
->size
)) {
823 printf("ERROR invalidate: address=" TARGET_FMT_lx
824 " PC=%08lx size=%04x\n",
825 address
, (long)tb
->pc
, tb
->size
);
831 /* verify that all the pages have correct rights for code */
832 static void tb_page_check(void)
834 TranslationBlock
*tb
;
835 int i
, flags1
, flags2
;
837 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
838 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
839 flags1
= page_get_flags(tb
->pc
);
840 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
841 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
842 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
843 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
851 /* invalidate one TB */
852 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
855 TranslationBlock
*tb1
;
859 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
862 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
866 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
868 TranslationBlock
*tb1
;
873 n1
= (uintptr_t)tb1
& 3;
874 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
876 *ptb
= tb1
->page_next
[n1
];
879 ptb
= &tb1
->page_next
[n1
];
883 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
885 TranslationBlock
*tb1
, **ptb
;
888 ptb
= &tb
->jmp_next
[n
];
891 /* find tb(n) in circular list */
894 n1
= (uintptr_t)tb1
& 3;
895 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
896 if (n1
== n
&& tb1
== tb
)
899 ptb
= &tb1
->jmp_first
;
901 ptb
= &tb1
->jmp_next
[n1
];
904 /* now we can suppress tb(n) from the list */
905 *ptb
= tb
->jmp_next
[n
];
907 tb
->jmp_next
[n
] = NULL
;
911 /* reset the jump entry 'n' of a TB so that it is not chained to
913 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
915 tb_set_jmp_target(tb
, n
, (uintptr_t)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
918 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
923 tb_page_addr_t phys_pc
;
924 TranslationBlock
*tb1
, *tb2
;
926 /* remove the TB from the hash list */
927 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
928 h
= tb_phys_hash_func(phys_pc
);
929 tb_remove(&tb_phys_hash
[h
], tb
,
930 offsetof(TranslationBlock
, phys_hash_next
));
932 /* remove the TB from the page list */
933 if (tb
->page_addr
[0] != page_addr
) {
934 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
935 tb_page_remove(&p
->first_tb
, tb
);
936 invalidate_page_bitmap(p
);
938 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
939 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
940 tb_page_remove(&p
->first_tb
, tb
);
941 invalidate_page_bitmap(p
);
944 tb_invalidated_flag
= 1;
946 /* remove the TB from the hash list */
947 h
= tb_jmp_cache_hash_func(tb
->pc
);
948 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
949 if (env
->tb_jmp_cache
[h
] == tb
)
950 env
->tb_jmp_cache
[h
] = NULL
;
953 /* suppress this TB from the two jump lists */
954 tb_jmp_remove(tb
, 0);
955 tb_jmp_remove(tb
, 1);
957 /* suppress any remaining jumps to this TB */
960 n1
= (uintptr_t)tb1
& 3;
963 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
964 tb2
= tb1
->jmp_next
[n1
];
965 tb_reset_jump(tb1
, n1
);
966 tb1
->jmp_next
[n1
] = NULL
;
969 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2); /* fail safe */
971 tb_phys_invalidate_count
++;
974 static inline void set_bits(uint8_t *tab
, int start
, int len
)
980 mask
= 0xff << (start
& 7);
981 if ((start
& ~7) == (end
& ~7)) {
983 mask
&= ~(0xff << (end
& 7));
988 start
= (start
+ 8) & ~7;
990 while (start
< end1
) {
995 mask
= ~(0xff << (end
& 7));
1001 static void build_page_bitmap(PageDesc
*p
)
1003 int n
, tb_start
, tb_end
;
1004 TranslationBlock
*tb
;
1006 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
1009 while (tb
!= NULL
) {
1010 n
= (uintptr_t)tb
& 3;
1011 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1012 /* NOTE: this is subtle as a TB may span two physical pages */
1014 /* NOTE: tb_end may be after the end of the page, but
1015 it is not a problem */
1016 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
1017 tb_end
= tb_start
+ tb
->size
;
1018 if (tb_end
> TARGET_PAGE_SIZE
)
1019 tb_end
= TARGET_PAGE_SIZE
;
1022 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1024 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
1025 tb
= tb
->page_next
[n
];
1029 TranslationBlock
*tb_gen_code(CPUArchState
*env
,
1030 target_ulong pc
, target_ulong cs_base
,
1031 int flags
, int cflags
)
1033 TranslationBlock
*tb
;
1035 tb_page_addr_t phys_pc
, phys_page2
;
1036 target_ulong virt_page2
;
1039 phys_pc
= get_page_addr_code(env
, pc
);
1042 /* flush must be done */
1044 /* cannot fail at this point */
1046 /* Don't forget to invalidate previous TB info. */
1047 tb_invalidated_flag
= 1;
1049 tc_ptr
= code_gen_ptr
;
1050 tb
->tc_ptr
= tc_ptr
;
1051 tb
->cs_base
= cs_base
;
1053 tb
->cflags
= cflags
;
1054 cpu_gen_code(env
, tb
, &code_gen_size
);
1055 code_gen_ptr
= (void *)(((uintptr_t)code_gen_ptr
+ code_gen_size
+
1056 CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1058 /* check next page if needed */
1059 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1061 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1062 phys_page2
= get_page_addr_code(env
, virt_page2
);
1064 tb_link_page(tb
, phys_pc
, phys_page2
);
1069 * Invalidate all TBs which intersect with the target physical address range
1070 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1071 * 'is_cpu_write_access' should be true if called from a real cpu write
1072 * access: the virtual CPU will exit the current TB if code is modified inside
1075 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
,
1076 int is_cpu_write_access
)
1078 while (start
< end
) {
1079 tb_invalidate_phys_page_range(start
, end
, is_cpu_write_access
);
1080 start
&= TARGET_PAGE_MASK
;
1081 start
+= TARGET_PAGE_SIZE
;
1086 * Invalidate all TBs which intersect with the target physical address range
1087 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1088 * 'is_cpu_write_access' should be true if called from a real cpu write
1089 * access: the virtual CPU will exit the current TB if code is modified inside
1092 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1093 int is_cpu_write_access
)
1095 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1096 CPUArchState
*env
= cpu_single_env
;
1097 tb_page_addr_t tb_start
, tb_end
;
1100 #ifdef TARGET_HAS_PRECISE_SMC
1101 int current_tb_not_found
= is_cpu_write_access
;
1102 TranslationBlock
*current_tb
= NULL
;
1103 int current_tb_modified
= 0;
1104 target_ulong current_pc
= 0;
1105 target_ulong current_cs_base
= 0;
1106 int current_flags
= 0;
1107 #endif /* TARGET_HAS_PRECISE_SMC */
1109 p
= page_find(start
>> TARGET_PAGE_BITS
);
1112 if (!p
->code_bitmap
&&
1113 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1114 is_cpu_write_access
) {
1115 /* build code bitmap */
1116 build_page_bitmap(p
);
1119 /* we remove all the TBs in the range [start, end[ */
1120 /* XXX: see if in some cases it could be faster to invalidate all the code */
1122 while (tb
!= NULL
) {
1123 n
= (uintptr_t)tb
& 3;
1124 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1125 tb_next
= tb
->page_next
[n
];
1126 /* NOTE: this is subtle as a TB may span two physical pages */
1128 /* NOTE: tb_end may be after the end of the page, but
1129 it is not a problem */
1130 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1131 tb_end
= tb_start
+ tb
->size
;
1133 tb_start
= tb
->page_addr
[1];
1134 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1136 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1137 #ifdef TARGET_HAS_PRECISE_SMC
1138 if (current_tb_not_found
) {
1139 current_tb_not_found
= 0;
1141 if (env
->mem_io_pc
) {
1142 /* now we have a real cpu fault */
1143 current_tb
= tb_find_pc(env
->mem_io_pc
);
1146 if (current_tb
== tb
&&
1147 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1148 /* If we are modifying the current TB, we must stop
1149 its execution. We could be more precise by checking
1150 that the modification is after the current PC, but it
1151 would require a specialized function to partially
1152 restore the CPU state */
1154 current_tb_modified
= 1;
1155 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1156 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1159 #endif /* TARGET_HAS_PRECISE_SMC */
1160 /* we need to do that to handle the case where a signal
1161 occurs while doing tb_phys_invalidate() */
1164 saved_tb
= env
->current_tb
;
1165 env
->current_tb
= NULL
;
1167 tb_phys_invalidate(tb
, -1);
1169 env
->current_tb
= saved_tb
;
1170 if (env
->interrupt_request
&& env
->current_tb
)
1171 cpu_interrupt(env
, env
->interrupt_request
);
1176 #if !defined(CONFIG_USER_ONLY)
1177 /* if no code remaining, no need to continue to use slow writes */
1179 invalidate_page_bitmap(p
);
1180 if (is_cpu_write_access
) {
1181 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1185 #ifdef TARGET_HAS_PRECISE_SMC
1186 if (current_tb_modified
) {
1187 /* we generate a block containing just the instruction
1188 modifying the memory. It will ensure that it cannot modify
1190 env
->current_tb
= NULL
;
1191 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1192 cpu_resume_from_signal(env
, NULL
);
1197 /* len must be <= 8 and start must be a multiple of len */
1198 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1204 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1205 cpu_single_env
->mem_io_vaddr
, len
,
1206 cpu_single_env
->eip
,
1207 cpu_single_env
->eip
+
1208 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1211 p
= page_find(start
>> TARGET_PAGE_BITS
);
1214 if (p
->code_bitmap
) {
1215 offset
= start
& ~TARGET_PAGE_MASK
;
1216 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1217 if (b
& ((1 << len
) - 1))
1221 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1225 #if !defined(CONFIG_SOFTMMU)
1226 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1227 uintptr_t pc
, void *puc
)
1229 TranslationBlock
*tb
;
1232 #ifdef TARGET_HAS_PRECISE_SMC
1233 TranslationBlock
*current_tb
= NULL
;
1234 CPUArchState
*env
= cpu_single_env
;
1235 int current_tb_modified
= 0;
1236 target_ulong current_pc
= 0;
1237 target_ulong current_cs_base
= 0;
1238 int current_flags
= 0;
1241 addr
&= TARGET_PAGE_MASK
;
1242 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1246 #ifdef TARGET_HAS_PRECISE_SMC
1247 if (tb
&& pc
!= 0) {
1248 current_tb
= tb_find_pc(pc
);
1251 while (tb
!= NULL
) {
1252 n
= (uintptr_t)tb
& 3;
1253 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1254 #ifdef TARGET_HAS_PRECISE_SMC
1255 if (current_tb
== tb
&&
1256 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1257 /* If we are modifying the current TB, we must stop
1258 its execution. We could be more precise by checking
1259 that the modification is after the current PC, but it
1260 would require a specialized function to partially
1261 restore the CPU state */
1263 current_tb_modified
= 1;
1264 cpu_restore_state(current_tb
, env
, pc
);
1265 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1268 #endif /* TARGET_HAS_PRECISE_SMC */
1269 tb_phys_invalidate(tb
, addr
);
1270 tb
= tb
->page_next
[n
];
1273 #ifdef TARGET_HAS_PRECISE_SMC
1274 if (current_tb_modified
) {
1275 /* we generate a block containing just the instruction
1276 modifying the memory. It will ensure that it cannot modify
1278 env
->current_tb
= NULL
;
1279 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1280 cpu_resume_from_signal(env
, puc
);
1286 /* add the tb in the target page and protect it if necessary */
1287 static inline void tb_alloc_page(TranslationBlock
*tb
,
1288 unsigned int n
, tb_page_addr_t page_addr
)
1291 #ifndef CONFIG_USER_ONLY
1292 bool page_already_protected
;
1295 tb
->page_addr
[n
] = page_addr
;
1296 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1297 tb
->page_next
[n
] = p
->first_tb
;
1298 #ifndef CONFIG_USER_ONLY
1299 page_already_protected
= p
->first_tb
!= NULL
;
1301 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1302 invalidate_page_bitmap(p
);
1304 #if defined(TARGET_HAS_SMC) || 1
1306 #if defined(CONFIG_USER_ONLY)
1307 if (p
->flags
& PAGE_WRITE
) {
1312 /* force the host page as non writable (writes will have a
1313 page fault + mprotect overhead) */
1314 page_addr
&= qemu_host_page_mask
;
1316 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1317 addr
+= TARGET_PAGE_SIZE
) {
1319 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1323 p2
->flags
&= ~PAGE_WRITE
;
1325 mprotect(g2h(page_addr
), qemu_host_page_size
,
1326 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1327 #ifdef DEBUG_TB_INVALIDATE
1328 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1333 /* if some code is already present, then the pages are already
1334 protected. So we handle the case where only the first TB is
1335 allocated in a physical page */
1336 if (!page_already_protected
) {
1337 tlb_protect_code(page_addr
);
1341 #endif /* TARGET_HAS_SMC */
1344 /* add a new TB and link it to the physical page tables. phys_page2 is
1345 (-1) to indicate that only one page contains the TB. */
1346 void tb_link_page(TranslationBlock
*tb
,
1347 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1350 TranslationBlock
**ptb
;
1352 /* Grab the mmap lock to stop another thread invalidating this TB
1353 before we are done. */
1355 /* add in the physical hash table */
1356 h
= tb_phys_hash_func(phys_pc
);
1357 ptb
= &tb_phys_hash
[h
];
1358 tb
->phys_hash_next
= *ptb
;
1361 /* add in the page list */
1362 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1363 if (phys_page2
!= -1)
1364 tb_alloc_page(tb
, 1, phys_page2
);
1366 tb
->page_addr
[1] = -1;
1368 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2);
1369 tb
->jmp_next
[0] = NULL
;
1370 tb
->jmp_next
[1] = NULL
;
1372 /* init original jump addresses */
1373 if (tb
->tb_next_offset
[0] != 0xffff)
1374 tb_reset_jump(tb
, 0);
1375 if (tb
->tb_next_offset
[1] != 0xffff)
1376 tb_reset_jump(tb
, 1);
1378 #ifdef DEBUG_TB_CHECK
1384 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1385 tb[1].tc_ptr. Return NULL if not found */
1386 TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1388 int m_min
, m_max
, m
;
1390 TranslationBlock
*tb
;
1394 if (tc_ptr
< (uintptr_t)code_gen_buffer
||
1395 tc_ptr
>= (uintptr_t)code_gen_ptr
) {
1398 /* binary search (cf Knuth) */
1401 while (m_min
<= m_max
) {
1402 m
= (m_min
+ m_max
) >> 1;
1404 v
= (uintptr_t)tb
->tc_ptr
;
1407 else if (tc_ptr
< v
) {
1416 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1418 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1420 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1423 tb1
= tb
->jmp_next
[n
];
1425 /* find head of list */
1427 n1
= (uintptr_t)tb1
& 3;
1428 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1431 tb1
= tb1
->jmp_next
[n1
];
1433 /* we are now sure now that tb jumps to tb1 */
1436 /* remove tb from the jmp_first list */
1437 ptb
= &tb_next
->jmp_first
;
1440 n1
= (uintptr_t)tb1
& 3;
1441 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1442 if (n1
== n
&& tb1
== tb
)
1444 ptb
= &tb1
->jmp_next
[n1
];
1446 *ptb
= tb
->jmp_next
[n
];
1447 tb
->jmp_next
[n
] = NULL
;
1449 /* suppress the jump to next tb in generated code */
1450 tb_reset_jump(tb
, n
);
1452 /* suppress jumps in the tb on which we could have jumped */
1453 tb_reset_jump_recursive(tb_next
);
1457 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1459 tb_reset_jump_recursive2(tb
, 0);
1460 tb_reset_jump_recursive2(tb
, 1);
1463 #if defined(TARGET_HAS_ICE)
1464 #if defined(CONFIG_USER_ONLY)
1465 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
1467 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1470 void tb_invalidate_phys_addr(target_phys_addr_t addr
)
1472 ram_addr_t ram_addr
;
1473 MemoryRegionSection
*section
;
1475 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1476 if (!(memory_region_is_ram(section
->mr
)
1477 || (section
->mr
->rom_device
&& section
->mr
->readable
))) {
1480 ram_addr
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1481 + memory_region_section_addr(section
, addr
);
1482 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1485 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
1487 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env
, pc
) |
1488 (pc
& ~TARGET_PAGE_MASK
));
1491 #endif /* TARGET_HAS_ICE */
1493 #if defined(CONFIG_USER_ONLY)
1494 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
1499 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1500 int flags
, CPUWatchpoint
**watchpoint
)
1505 /* Add a watchpoint. */
1506 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1507 int flags
, CPUWatchpoint
**watchpoint
)
1509 target_ulong len_mask
= ~(len
- 1);
1512 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1513 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
1514 len
== 0 || len
> TARGET_PAGE_SIZE
) {
1515 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1516 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1519 wp
= g_malloc(sizeof(*wp
));
1522 wp
->len_mask
= len_mask
;
1525 /* keep all GDB-injected watchpoints in front */
1527 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1529 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1531 tlb_flush_page(env
, addr
);
1538 /* Remove a specific watchpoint. */
1539 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1542 target_ulong len_mask
= ~(len
- 1);
1545 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1546 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1547 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1548 cpu_watchpoint_remove_by_ref(env
, wp
);
1555 /* Remove a specific watchpoint by reference. */
1556 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
1558 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1560 tlb_flush_page(env
, watchpoint
->vaddr
);
1565 /* Remove all matching watchpoints. */
1566 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
1568 CPUWatchpoint
*wp
, *next
;
1570 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1571 if (wp
->flags
& mask
)
1572 cpu_watchpoint_remove_by_ref(env
, wp
);
1577 /* Add a breakpoint. */
1578 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
1579 CPUBreakpoint
**breakpoint
)
1581 #if defined(TARGET_HAS_ICE)
1584 bp
= g_malloc(sizeof(*bp
));
1589 /* keep all GDB-injected breakpoints in front */
1591 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1593 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1595 breakpoint_invalidate(env
, pc
);
1605 /* Remove a specific breakpoint. */
1606 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
1608 #if defined(TARGET_HAS_ICE)
1611 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1612 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1613 cpu_breakpoint_remove_by_ref(env
, bp
);
1623 /* Remove a specific breakpoint by reference. */
1624 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
1626 #if defined(TARGET_HAS_ICE)
1627 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1629 breakpoint_invalidate(env
, breakpoint
->pc
);
1635 /* Remove all matching breakpoints. */
1636 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
1638 #if defined(TARGET_HAS_ICE)
1639 CPUBreakpoint
*bp
, *next
;
1641 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1642 if (bp
->flags
& mask
)
1643 cpu_breakpoint_remove_by_ref(env
, bp
);
1648 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1649 CPU loop after each instruction */
1650 void cpu_single_step(CPUArchState
*env
, int enabled
)
1652 #if defined(TARGET_HAS_ICE)
1653 if (env
->singlestep_enabled
!= enabled
) {
1654 env
->singlestep_enabled
= enabled
;
1656 kvm_update_guest_debug(env
, 0);
1658 /* must flush all the translated code to avoid inconsistencies */
1659 /* XXX: only flush what is necessary */
1666 static void cpu_unlink_tb(CPUArchState
*env
)
1668 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1669 problem and hope the cpu will stop of its own accord. For userspace
1670 emulation this often isn't actually as bad as it sounds. Often
1671 signals are used primarily to interrupt blocking syscalls. */
1672 TranslationBlock
*tb
;
1673 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1675 spin_lock(&interrupt_lock
);
1676 tb
= env
->current_tb
;
1677 /* if the cpu is currently executing code, we must unlink it and
1678 all the potentially executing TB */
1680 env
->current_tb
= NULL
;
1681 tb_reset_jump_recursive(tb
);
1683 spin_unlock(&interrupt_lock
);
1686 #ifndef CONFIG_USER_ONLY
1687 /* mask must never be zero, except for A20 change call */
1688 static void tcg_handle_interrupt(CPUArchState
*env
, int mask
)
1692 old_mask
= env
->interrupt_request
;
1693 env
->interrupt_request
|= mask
;
1696 * If called from iothread context, wake the target cpu in
1699 if (!qemu_cpu_is_self(env
)) {
1705 env
->icount_decr
.u16
.high
= 0xffff;
1707 && (mask
& ~old_mask
) != 0) {
1708 cpu_abort(env
, "Raised interrupt while not in I/O function");
1715 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1717 #else /* CONFIG_USER_ONLY */
1719 void cpu_interrupt(CPUArchState
*env
, int mask
)
1721 env
->interrupt_request
|= mask
;
1724 #endif /* CONFIG_USER_ONLY */
1726 void cpu_reset_interrupt(CPUArchState
*env
, int mask
)
1728 env
->interrupt_request
&= ~mask
;
1731 void cpu_exit(CPUArchState
*env
)
1733 env
->exit_request
= 1;
1737 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
1744 fprintf(stderr
, "qemu: fatal: ");
1745 vfprintf(stderr
, fmt
, ap
);
1746 fprintf(stderr
, "\n");
1748 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1750 cpu_dump_state(env
, stderr
, fprintf
, 0);
1752 if (qemu_log_enabled()) {
1753 qemu_log("qemu: fatal: ");
1754 qemu_log_vprintf(fmt
, ap2
);
1757 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1759 log_cpu_state(env
, 0);
1766 #if defined(CONFIG_USER_ONLY)
1768 struct sigaction act
;
1769 sigfillset(&act
.sa_mask
);
1770 act
.sa_handler
= SIG_DFL
;
1771 sigaction(SIGABRT
, &act
, NULL
);
1777 CPUArchState
*cpu_copy(CPUArchState
*env
)
1779 CPUArchState
*new_env
= cpu_init(env
->cpu_model_str
);
1780 CPUArchState
*next_cpu
= new_env
->next_cpu
;
1781 int cpu_index
= new_env
->cpu_index
;
1782 #if defined(TARGET_HAS_ICE)
1787 memcpy(new_env
, env
, sizeof(CPUArchState
));
1789 /* Preserve chaining and index. */
1790 new_env
->next_cpu
= next_cpu
;
1791 new_env
->cpu_index
= cpu_index
;
1793 /* Clone all break/watchpoints.
1794 Note: Once we support ptrace with hw-debug register access, make sure
1795 BP_CPU break/watchpoints are handled correctly on clone. */
1796 QTAILQ_INIT(&env
->breakpoints
);
1797 QTAILQ_INIT(&env
->watchpoints
);
1798 #if defined(TARGET_HAS_ICE)
1799 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1800 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1802 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1803 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1811 #if !defined(CONFIG_USER_ONLY)
1812 void tb_flush_jmp_cache(CPUArchState
*env
, target_ulong addr
)
1816 /* Discard jump cache entries for any tb which might potentially
1817 overlap the flushed page. */
1818 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1819 memset (&env
->tb_jmp_cache
[i
], 0,
1820 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1822 i
= tb_jmp_cache_hash_page(addr
);
1823 memset (&env
->tb_jmp_cache
[i
], 0,
1824 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1827 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t end
,
1832 /* we modify the TLB cache so that the dirty bit will be set again
1833 when accessing the range */
1834 start1
= (uintptr_t)qemu_safe_ram_ptr(start
);
1835 /* Check that we don't span multiple blocks - this breaks the
1836 address comparisons below. */
1837 if ((uintptr_t)qemu_safe_ram_ptr(end
- 1) - start1
1838 != (end
- 1) - start
) {
1841 cpu_tlb_reset_dirty_all(start1
, length
);
1845 /* Note: start and end must be within the same ram block. */
1846 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1851 start
&= TARGET_PAGE_MASK
;
1852 end
= TARGET_PAGE_ALIGN(end
);
1854 length
= end
- start
;
1857 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
1859 if (tcg_enabled()) {
1860 tlb_reset_dirty_range_all(start
, end
, length
);
1864 int cpu_physical_memory_set_dirty_tracking(int enable
)
1867 in_migration
= enable
;
1871 target_phys_addr_t
memory_region_section_get_iotlb(CPUArchState
*env
,
1872 MemoryRegionSection
*section
,
1874 target_phys_addr_t paddr
,
1876 target_ulong
*address
)
1878 target_phys_addr_t iotlb
;
1881 if (memory_region_is_ram(section
->mr
)) {
1883 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1884 + memory_region_section_addr(section
, paddr
);
1885 if (!section
->readonly
) {
1886 iotlb
|= phys_section_notdirty
;
1888 iotlb
|= phys_section_rom
;
1891 /* IO handlers are currently passed a physical address.
1892 It would be nice to pass an offset from the base address
1893 of that region. This would avoid having to special case RAM,
1894 and avoid full address decoding in every device.
1895 We can't use the high bits of pd for this because
1896 IO_MEM_ROMD uses these as a ram address. */
1897 iotlb
= section
- phys_sections
;
1898 iotlb
+= memory_region_section_addr(section
, paddr
);
1901 /* Make accesses to pages with watchpoints go via the
1902 watchpoint trap routines. */
1903 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1904 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
1905 /* Avoid trapping reads of pages with a write breakpoint. */
1906 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1907 iotlb
= phys_section_watch
+ paddr
;
1908 *address
|= TLB_MMIO
;
1919 * Walks guest process memory "regions" one by one
1920 * and calls callback function 'fn' for each region.
1923 struct walk_memory_regions_data
1925 walk_memory_regions_fn fn
;
1931 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1932 abi_ulong end
, int new_prot
)
1934 if (data
->start
!= -1ul) {
1935 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1941 data
->start
= (new_prot
? end
: -1ul);
1942 data
->prot
= new_prot
;
1947 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1948 abi_ulong base
, int level
, void **lp
)
1954 return walk_memory_regions_end(data
, base
, 0);
1959 for (i
= 0; i
< L2_SIZE
; ++i
) {
1960 int prot
= pd
[i
].flags
;
1962 pa
= base
| (i
<< TARGET_PAGE_BITS
);
1963 if (prot
!= data
->prot
) {
1964 rc
= walk_memory_regions_end(data
, pa
, prot
);
1972 for (i
= 0; i
< L2_SIZE
; ++i
) {
1973 pa
= base
| ((abi_ulong
)i
<<
1974 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
1975 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
1985 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
1987 struct walk_memory_regions_data data
;
1995 for (i
= 0; i
< V_L1_SIZE
; i
++) {
1996 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
1997 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2003 return walk_memory_regions_end(&data
, 0, 0);
2006 static int dump_region(void *priv
, abi_ulong start
,
2007 abi_ulong end
, unsigned long prot
)
2009 FILE *f
= (FILE *)priv
;
2011 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2012 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2013 start
, end
, end
- start
,
2014 ((prot
& PAGE_READ
) ? 'r' : '-'),
2015 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2016 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2021 /* dump memory mappings */
2022 void page_dump(FILE *f
)
2024 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2025 "start", "end", "size", "prot");
2026 walk_memory_regions(f
, dump_region
);
2029 int page_get_flags(target_ulong address
)
2033 p
= page_find(address
>> TARGET_PAGE_BITS
);
2039 /* Modify the flags of a page and invalidate the code if necessary.
2040 The flag PAGE_WRITE_ORG is positioned automatically depending
2041 on PAGE_WRITE. The mmap_lock should already be held. */
2042 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2044 target_ulong addr
, len
;
2046 /* This function should never be called with addresses outside the
2047 guest address space. If this assert fires, it probably indicates
2048 a missing call to h2g_valid. */
2049 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2050 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2052 assert(start
< end
);
2054 start
= start
& TARGET_PAGE_MASK
;
2055 end
= TARGET_PAGE_ALIGN(end
);
2057 if (flags
& PAGE_WRITE
) {
2058 flags
|= PAGE_WRITE_ORG
;
2061 for (addr
= start
, len
= end
- start
;
2063 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2064 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2066 /* If the write protection bit is set, then we invalidate
2068 if (!(p
->flags
& PAGE_WRITE
) &&
2069 (flags
& PAGE_WRITE
) &&
2071 tb_invalidate_phys_page(addr
, 0, NULL
);
2077 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2083 /* This function should never be called with addresses outside the
2084 guest address space. If this assert fires, it probably indicates
2085 a missing call to h2g_valid. */
2086 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2087 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2093 if (start
+ len
- 1 < start
) {
2094 /* We've wrapped around. */
2098 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2099 start
= start
& TARGET_PAGE_MASK
;
2101 for (addr
= start
, len
= end
- start
;
2103 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2104 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2107 if( !(p
->flags
& PAGE_VALID
) )
2110 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2112 if (flags
& PAGE_WRITE
) {
2113 if (!(p
->flags
& PAGE_WRITE_ORG
))
2115 /* unprotect the page if it was put read-only because it
2116 contains translated code */
2117 if (!(p
->flags
& PAGE_WRITE
)) {
2118 if (!page_unprotect(addr
, 0, NULL
))
2127 /* called from signal handler: invalidate the code and unprotect the
2128 page. Return TRUE if the fault was successfully handled. */
2129 int page_unprotect(target_ulong address
, uintptr_t pc
, void *puc
)
2133 target_ulong host_start
, host_end
, addr
;
2135 /* Technically this isn't safe inside a signal handler. However we
2136 know this only ever happens in a synchronous SEGV handler, so in
2137 practice it seems to be ok. */
2140 p
= page_find(address
>> TARGET_PAGE_BITS
);
2146 /* if the page was really writable, then we change its
2147 protection back to writable */
2148 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2149 host_start
= address
& qemu_host_page_mask
;
2150 host_end
= host_start
+ qemu_host_page_size
;
2153 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2154 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2155 p
->flags
|= PAGE_WRITE
;
2158 /* and since the content will be modified, we must invalidate
2159 the corresponding translated code. */
2160 tb_invalidate_phys_page(addr
, pc
, puc
);
2161 #ifdef DEBUG_TB_CHECK
2162 tb_invalidate_check(addr
);
2165 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2174 #endif /* defined(CONFIG_USER_ONLY) */
2176 #if !defined(CONFIG_USER_ONLY)
2178 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2179 typedef struct subpage_t
{
2181 target_phys_addr_t base
;
2182 uint16_t sub_section
[TARGET_PAGE_SIZE
];
2185 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2187 static subpage_t
*subpage_init(target_phys_addr_t base
);
2188 static void destroy_page_desc(uint16_t section_index
)
2190 MemoryRegionSection
*section
= &phys_sections
[section_index
];
2191 MemoryRegion
*mr
= section
->mr
;
2194 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
2195 memory_region_destroy(&subpage
->iomem
);
2200 static void destroy_l2_mapping(PhysPageEntry
*lp
, unsigned level
)
2205 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
2209 p
= phys_map_nodes
[lp
->ptr
];
2210 for (i
= 0; i
< L2_SIZE
; ++i
) {
2211 if (!p
[i
].is_leaf
) {
2212 destroy_l2_mapping(&p
[i
], level
- 1);
2214 destroy_page_desc(p
[i
].ptr
);
2218 lp
->ptr
= PHYS_MAP_NODE_NIL
;
2221 static void destroy_all_mappings(void)
2223 destroy_l2_mapping(&phys_map
, P_L2_LEVELS
- 1);
2224 phys_map_nodes_reset();
2227 static uint16_t phys_section_add(MemoryRegionSection
*section
)
2229 if (phys_sections_nb
== phys_sections_nb_alloc
) {
2230 phys_sections_nb_alloc
= MAX(phys_sections_nb_alloc
* 2, 16);
2231 phys_sections
= g_renew(MemoryRegionSection
, phys_sections
,
2232 phys_sections_nb_alloc
);
2234 phys_sections
[phys_sections_nb
] = *section
;
2235 return phys_sections_nb
++;
2238 static void phys_sections_clear(void)
2240 phys_sections_nb
= 0;
2243 /* register physical memory.
2244 For RAM, 'size' must be a multiple of the target page size.
2245 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2246 io memory page. The address used when calling the IO function is
2247 the offset from the start of the region, plus region_offset. Both
2248 start_addr and region_offset are rounded down to a page boundary
2249 before calculating this offset. This should not be a problem unless
2250 the low bits of start_addr and region_offset differ. */
2251 static void register_subpage(MemoryRegionSection
*section
)
2254 target_phys_addr_t base
= section
->offset_within_address_space
2256 MemoryRegionSection
*existing
= phys_page_find(base
>> TARGET_PAGE_BITS
);
2257 MemoryRegionSection subsection
= {
2258 .offset_within_address_space
= base
,
2259 .size
= TARGET_PAGE_SIZE
,
2261 target_phys_addr_t start
, end
;
2263 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
2265 if (!(existing
->mr
->subpage
)) {
2266 subpage
= subpage_init(base
);
2267 subsection
.mr
= &subpage
->iomem
;
2268 phys_page_set(base
>> TARGET_PAGE_BITS
, 1,
2269 phys_section_add(&subsection
));
2271 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
2273 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
2274 end
= start
+ section
->size
- 1;
2275 subpage_register(subpage
, start
, end
, phys_section_add(section
));
2279 static void register_multipage(MemoryRegionSection
*section
)
2281 target_phys_addr_t start_addr
= section
->offset_within_address_space
;
2282 ram_addr_t size
= section
->size
;
2283 target_phys_addr_t addr
;
2284 uint16_t section_index
= phys_section_add(section
);
2289 phys_page_set(addr
>> TARGET_PAGE_BITS
, size
>> TARGET_PAGE_BITS
,
2293 void cpu_register_physical_memory_log(MemoryRegionSection
*section
,
2296 MemoryRegionSection now
= *section
, remain
= *section
;
2298 if ((now
.offset_within_address_space
& ~TARGET_PAGE_MASK
)
2299 || (now
.size
< TARGET_PAGE_SIZE
)) {
2300 now
.size
= MIN(TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
2301 - now
.offset_within_address_space
,
2303 register_subpage(&now
);
2304 remain
.size
-= now
.size
;
2305 remain
.offset_within_address_space
+= now
.size
;
2306 remain
.offset_within_region
+= now
.size
;
2308 while (remain
.size
>= TARGET_PAGE_SIZE
) {
2310 if (remain
.offset_within_region
& ~TARGET_PAGE_MASK
) {
2311 now
.size
= TARGET_PAGE_SIZE
;
2312 register_subpage(&now
);
2314 now
.size
&= TARGET_PAGE_MASK
;
2315 register_multipage(&now
);
2317 remain
.size
-= now
.size
;
2318 remain
.offset_within_address_space
+= now
.size
;
2319 remain
.offset_within_region
+= now
.size
;
2323 register_subpage(&now
);
2328 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2331 kvm_coalesce_mmio_region(addr
, size
);
2334 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2337 kvm_uncoalesce_mmio_region(addr
, size
);
2340 void qemu_flush_coalesced_mmio_buffer(void)
2343 kvm_flush_coalesced_mmio_buffer();
2346 #if defined(__linux__) && !defined(TARGET_S390X)
2348 #include <sys/vfs.h>
2350 #define HUGETLBFS_MAGIC 0x958458f6
2352 static long gethugepagesize(const char *path
)
2358 ret
= statfs(path
, &fs
);
2359 } while (ret
!= 0 && errno
== EINTR
);
2366 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2367 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2372 static void *file_ram_alloc(RAMBlock
*block
,
2382 unsigned long hpagesize
;
2384 hpagesize
= gethugepagesize(path
);
2389 if (memory
< hpagesize
) {
2393 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2394 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2398 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2402 fd
= mkstemp(filename
);
2404 perror("unable to create backing store for hugepages");
2411 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2414 * ftruncate is not supported by hugetlbfs in older
2415 * hosts, so don't bother bailing out on errors.
2416 * If anything goes wrong with it under other filesystems,
2419 if (ftruncate(fd
, memory
))
2420 perror("ftruncate");
2423 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2424 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2425 * to sidestep this quirk.
2427 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2428 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2430 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2432 if (area
== MAP_FAILED
) {
2433 perror("file_ram_alloc: can't mmap RAM pages");
2442 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2444 RAMBlock
*block
, *next_block
;
2445 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
2447 if (QLIST_EMPTY(&ram_list
.blocks
))
2450 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2451 ram_addr_t end
, next
= RAM_ADDR_MAX
;
2453 end
= block
->offset
+ block
->length
;
2455 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2456 if (next_block
->offset
>= end
) {
2457 next
= MIN(next
, next_block
->offset
);
2460 if (next
- end
>= size
&& next
- end
< mingap
) {
2462 mingap
= next
- end
;
2466 if (offset
== RAM_ADDR_MAX
) {
2467 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
2475 static ram_addr_t
last_ram_offset(void)
2478 ram_addr_t last
= 0;
2480 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2481 last
= MAX(last
, block
->offset
+ block
->length
);
2486 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
2488 RAMBlock
*new_block
, *block
;
2491 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2492 if (block
->offset
== addr
) {
2498 assert(!new_block
->idstr
[0]);
2501 char *id
= qdev_get_dev_path(dev
);
2503 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2507 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2509 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2510 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
2511 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2518 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
2521 RAMBlock
*new_block
;
2523 size
= TARGET_PAGE_ALIGN(size
);
2524 new_block
= g_malloc0(sizeof(*new_block
));
2527 new_block
->offset
= find_ram_offset(size
);
2529 new_block
->host
= host
;
2530 new_block
->flags
|= RAM_PREALLOC_MASK
;
2533 #if defined (__linux__) && !defined(TARGET_S390X)
2534 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2535 if (!new_block
->host
) {
2536 new_block
->host
= qemu_vmalloc(size
);
2537 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2540 fprintf(stderr
, "-mem-path option unsupported\n");
2544 if (xen_enabled()) {
2545 xen_ram_alloc(new_block
->offset
, size
, mr
);
2546 } else if (kvm_enabled()) {
2547 /* some s390/kvm configurations have special constraints */
2548 new_block
->host
= kvm_vmalloc(size
);
2550 new_block
->host
= qemu_vmalloc(size
);
2552 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2555 new_block
->length
= size
;
2557 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2559 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
2560 last_ram_offset() >> TARGET_PAGE_BITS
);
2561 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
, 0xff);
2564 kvm_setup_guest_memory(new_block
->host
, size
);
2566 return new_block
->offset
;
2569 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
2571 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
2574 void qemu_ram_free_from_ptr(ram_addr_t addr
)
2578 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2579 if (addr
== block
->offset
) {
2580 QLIST_REMOVE(block
, next
);
2587 void qemu_ram_free(ram_addr_t addr
)
2591 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2592 if (addr
== block
->offset
) {
2593 QLIST_REMOVE(block
, next
);
2594 if (block
->flags
& RAM_PREALLOC_MASK
) {
2596 } else if (mem_path
) {
2597 #if defined (__linux__) && !defined(TARGET_S390X)
2599 munmap(block
->host
, block
->length
);
2602 qemu_vfree(block
->host
);
2608 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2609 munmap(block
->host
, block
->length
);
2611 if (xen_enabled()) {
2612 xen_invalidate_map_cache_entry(block
->host
);
2614 qemu_vfree(block
->host
);
2626 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2633 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2634 offset
= addr
- block
->offset
;
2635 if (offset
< block
->length
) {
2636 vaddr
= block
->host
+ offset
;
2637 if (block
->flags
& RAM_PREALLOC_MASK
) {
2641 munmap(vaddr
, length
);
2643 #if defined(__linux__) && !defined(TARGET_S390X)
2646 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
2649 flags
|= MAP_PRIVATE
;
2651 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2652 flags
, block
->fd
, offset
);
2654 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2655 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2662 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2663 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
2664 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2667 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2668 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2672 if (area
!= vaddr
) {
2673 fprintf(stderr
, "Could not remap addr: "
2674 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
2678 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
2684 #endif /* !_WIN32 */
2686 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2687 With the exception of the softmmu code in this file, this should
2688 only be used for local memory (e.g. video ram) that the device owns,
2689 and knows it isn't going to access beyond the end of the block.
2691 It should not be used for general purpose DMA.
2692 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2694 void *qemu_get_ram_ptr(ram_addr_t addr
)
2698 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2699 if (addr
- block
->offset
< block
->length
) {
2700 /* Move this entry to to start of the list. */
2701 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
2702 QLIST_REMOVE(block
, next
);
2703 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
2705 if (xen_enabled()) {
2706 /* We need to check if the requested address is in the RAM
2707 * because we don't want to map the entire memory in QEMU.
2708 * In that case just map until the end of the page.
2710 if (block
->offset
== 0) {
2711 return xen_map_cache(addr
, 0, 0);
2712 } else if (block
->host
== NULL
) {
2714 xen_map_cache(block
->offset
, block
->length
, 1);
2717 return block
->host
+ (addr
- block
->offset
);
2721 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2727 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2728 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2730 void *qemu_safe_ram_ptr(ram_addr_t addr
)
2734 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2735 if (addr
- block
->offset
< block
->length
) {
2736 if (xen_enabled()) {
2737 /* We need to check if the requested address is in the RAM
2738 * because we don't want to map the entire memory in QEMU.
2739 * In that case just map until the end of the page.
2741 if (block
->offset
== 0) {
2742 return xen_map_cache(addr
, 0, 0);
2743 } else if (block
->host
== NULL
) {
2745 xen_map_cache(block
->offset
, block
->length
, 1);
2748 return block
->host
+ (addr
- block
->offset
);
2752 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2758 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2759 * but takes a size argument */
2760 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
2765 if (xen_enabled()) {
2766 return xen_map_cache(addr
, *size
, 1);
2770 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2771 if (addr
- block
->offset
< block
->length
) {
2772 if (addr
- block
->offset
+ *size
> block
->length
)
2773 *size
= block
->length
- addr
+ block
->offset
;
2774 return block
->host
+ (addr
- block
->offset
);
2778 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2783 void qemu_put_ram_ptr(void *addr
)
2785 trace_qemu_put_ram_ptr(addr
);
2788 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
2791 uint8_t *host
= ptr
;
2793 if (xen_enabled()) {
2794 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
2798 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2799 /* This case append when the block is not mapped. */
2800 if (block
->host
== NULL
) {
2803 if (host
- block
->host
< block
->length
) {
2804 *ram_addr
= block
->offset
+ (host
- block
->host
);
2812 /* Some of the softmmu routines need to translate from a host pointer
2813 (typically a TLB entry) back to a ram offset. */
2814 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
2816 ram_addr_t ram_addr
;
2818 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
2819 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2825 static uint64_t unassigned_mem_read(void *opaque
, target_phys_addr_t addr
,
2828 #ifdef DEBUG_UNASSIGNED
2829 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2831 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2832 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, size
);
2837 static void unassigned_mem_write(void *opaque
, target_phys_addr_t addr
,
2838 uint64_t val
, unsigned size
)
2840 #ifdef DEBUG_UNASSIGNED
2841 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
2843 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2844 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, size
);
2848 static const MemoryRegionOps unassigned_mem_ops
= {
2849 .read
= unassigned_mem_read
,
2850 .write
= unassigned_mem_write
,
2851 .endianness
= DEVICE_NATIVE_ENDIAN
,
2854 static uint64_t error_mem_read(void *opaque
, target_phys_addr_t addr
,
2860 static void error_mem_write(void *opaque
, target_phys_addr_t addr
,
2861 uint64_t value
, unsigned size
)
2866 static const MemoryRegionOps error_mem_ops
= {
2867 .read
= error_mem_read
,
2868 .write
= error_mem_write
,
2869 .endianness
= DEVICE_NATIVE_ENDIAN
,
2872 static const MemoryRegionOps rom_mem_ops
= {
2873 .read
= error_mem_read
,
2874 .write
= unassigned_mem_write
,
2875 .endianness
= DEVICE_NATIVE_ENDIAN
,
2878 static void notdirty_mem_write(void *opaque
, target_phys_addr_t ram_addr
,
2879 uint64_t val
, unsigned size
)
2882 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
2883 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2884 #if !defined(CONFIG_USER_ONLY)
2885 tb_invalidate_phys_page_fast(ram_addr
, size
);
2886 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
2891 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2894 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2897 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2902 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2903 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
2904 /* we remove the notdirty callback only if the code has been
2906 if (dirty_flags
== 0xff)
2907 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2910 static const MemoryRegionOps notdirty_mem_ops
= {
2911 .read
= error_mem_read
,
2912 .write
= notdirty_mem_write
,
2913 .endianness
= DEVICE_NATIVE_ENDIAN
,
2916 /* Generate a debug exception if a watchpoint has been hit. */
2917 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2919 CPUArchState
*env
= cpu_single_env
;
2920 target_ulong pc
, cs_base
;
2921 TranslationBlock
*tb
;
2926 if (env
->watchpoint_hit
) {
2927 /* We re-entered the check after replacing the TB. Now raise
2928 * the debug interrupt so that is will trigger after the
2929 * current instruction. */
2930 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2933 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2934 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2935 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2936 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2937 wp
->flags
|= BP_WATCHPOINT_HIT
;
2938 if (!env
->watchpoint_hit
) {
2939 env
->watchpoint_hit
= wp
;
2940 tb
= tb_find_pc(env
->mem_io_pc
);
2942 cpu_abort(env
, "check_watchpoint: could not find TB for "
2943 "pc=%p", (void *)env
->mem_io_pc
);
2945 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
2946 tb_phys_invalidate(tb
, -1);
2947 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2948 env
->exception_index
= EXCP_DEBUG
;
2951 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2952 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2953 cpu_resume_from_signal(env
, NULL
);
2957 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2962 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2963 so these check for a hit then pass through to the normal out-of-line
2965 static uint64_t watch_mem_read(void *opaque
, target_phys_addr_t addr
,
2968 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
2970 case 1: return ldub_phys(addr
);
2971 case 2: return lduw_phys(addr
);
2972 case 4: return ldl_phys(addr
);
2977 static void watch_mem_write(void *opaque
, target_phys_addr_t addr
,
2978 uint64_t val
, unsigned size
)
2980 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
2983 stb_phys(addr
, val
);
2986 stw_phys(addr
, val
);
2989 stl_phys(addr
, val
);
2995 static const MemoryRegionOps watch_mem_ops
= {
2996 .read
= watch_mem_read
,
2997 .write
= watch_mem_write
,
2998 .endianness
= DEVICE_NATIVE_ENDIAN
,
3001 static uint64_t subpage_read(void *opaque
, target_phys_addr_t addr
,
3004 subpage_t
*mmio
= opaque
;
3005 unsigned int idx
= SUBPAGE_IDX(addr
);
3006 MemoryRegionSection
*section
;
3007 #if defined(DEBUG_SUBPAGE)
3008 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3009 mmio
, len
, addr
, idx
);
3012 section
= &phys_sections
[mmio
->sub_section
[idx
]];
3014 addr
-= section
->offset_within_address_space
;
3015 addr
+= section
->offset_within_region
;
3016 return io_mem_read(section
->mr
, addr
, len
);
3019 static void subpage_write(void *opaque
, target_phys_addr_t addr
,
3020 uint64_t value
, unsigned len
)
3022 subpage_t
*mmio
= opaque
;
3023 unsigned int idx
= SUBPAGE_IDX(addr
);
3024 MemoryRegionSection
*section
;
3025 #if defined(DEBUG_SUBPAGE)
3026 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3027 " idx %d value %"PRIx64
"\n",
3028 __func__
, mmio
, len
, addr
, idx
, value
);
3031 section
= &phys_sections
[mmio
->sub_section
[idx
]];
3033 addr
-= section
->offset_within_address_space
;
3034 addr
+= section
->offset_within_region
;
3035 io_mem_write(section
->mr
, addr
, value
, len
);
3038 static const MemoryRegionOps subpage_ops
= {
3039 .read
= subpage_read
,
3040 .write
= subpage_write
,
3041 .endianness
= DEVICE_NATIVE_ENDIAN
,
3044 static uint64_t subpage_ram_read(void *opaque
, target_phys_addr_t addr
,
3047 ram_addr_t raddr
= addr
;
3048 void *ptr
= qemu_get_ram_ptr(raddr
);
3050 case 1: return ldub_p(ptr
);
3051 case 2: return lduw_p(ptr
);
3052 case 4: return ldl_p(ptr
);
3057 static void subpage_ram_write(void *opaque
, target_phys_addr_t addr
,
3058 uint64_t value
, unsigned size
)
3060 ram_addr_t raddr
= addr
;
3061 void *ptr
= qemu_get_ram_ptr(raddr
);
3063 case 1: return stb_p(ptr
, value
);
3064 case 2: return stw_p(ptr
, value
);
3065 case 4: return stl_p(ptr
, value
);
3070 static const MemoryRegionOps subpage_ram_ops
= {
3071 .read
= subpage_ram_read
,
3072 .write
= subpage_ram_write
,
3073 .endianness
= DEVICE_NATIVE_ENDIAN
,
3076 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3081 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3083 idx
= SUBPAGE_IDX(start
);
3084 eidx
= SUBPAGE_IDX(end
);
3085 #if defined(DEBUG_SUBPAGE)
3086 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3087 mmio
, start
, end
, idx
, eidx
, memory
);
3089 if (memory_region_is_ram(phys_sections
[section
].mr
)) {
3090 MemoryRegionSection new_section
= phys_sections
[section
];
3091 new_section
.mr
= &io_mem_subpage_ram
;
3092 section
= phys_section_add(&new_section
);
3094 for (; idx
<= eidx
; idx
++) {
3095 mmio
->sub_section
[idx
] = section
;
3101 static subpage_t
*subpage_init(target_phys_addr_t base
)
3105 mmio
= g_malloc0(sizeof(subpage_t
));
3108 memory_region_init_io(&mmio
->iomem
, &subpage_ops
, mmio
,
3109 "subpage", TARGET_PAGE_SIZE
);
3110 mmio
->iomem
.subpage
= true;
3111 #if defined(DEBUG_SUBPAGE)
3112 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3113 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3115 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, phys_section_unassigned
);
3120 static uint16_t dummy_section(MemoryRegion
*mr
)
3122 MemoryRegionSection section
= {
3124 .offset_within_address_space
= 0,
3125 .offset_within_region
= 0,
3129 return phys_section_add(§ion
);
3132 MemoryRegion
*iotlb_to_region(target_phys_addr_t index
)
3134 return phys_sections
[index
& ~TARGET_PAGE_MASK
].mr
;
3137 static void io_mem_init(void)
3139 memory_region_init_io(&io_mem_ram
, &error_mem_ops
, NULL
, "ram", UINT64_MAX
);
3140 memory_region_init_io(&io_mem_rom
, &rom_mem_ops
, NULL
, "rom", UINT64_MAX
);
3141 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
3142 "unassigned", UINT64_MAX
);
3143 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
3144 "notdirty", UINT64_MAX
);
3145 memory_region_init_io(&io_mem_subpage_ram
, &subpage_ram_ops
, NULL
,
3146 "subpage-ram", UINT64_MAX
);
3147 memory_region_init_io(&io_mem_watch
, &watch_mem_ops
, NULL
,
3148 "watch", UINT64_MAX
);
3151 static void core_begin(MemoryListener
*listener
)
3153 destroy_all_mappings();
3154 phys_sections_clear();
3155 phys_map
.ptr
= PHYS_MAP_NODE_NIL
;
3156 phys_section_unassigned
= dummy_section(&io_mem_unassigned
);
3157 phys_section_notdirty
= dummy_section(&io_mem_notdirty
);
3158 phys_section_rom
= dummy_section(&io_mem_rom
);
3159 phys_section_watch
= dummy_section(&io_mem_watch
);
3162 static void core_commit(MemoryListener
*listener
)
3166 /* since each CPU stores ram addresses in its TLB cache, we must
3167 reset the modified entries */
3169 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
3174 static void core_region_add(MemoryListener
*listener
,
3175 MemoryRegionSection
*section
)
3177 cpu_register_physical_memory_log(section
, section
->readonly
);
3180 static void core_region_del(MemoryListener
*listener
,
3181 MemoryRegionSection
*section
)
3185 static void core_region_nop(MemoryListener
*listener
,
3186 MemoryRegionSection
*section
)
3188 cpu_register_physical_memory_log(section
, section
->readonly
);
3191 static void core_log_start(MemoryListener
*listener
,
3192 MemoryRegionSection
*section
)
3196 static void core_log_stop(MemoryListener
*listener
,
3197 MemoryRegionSection
*section
)
3201 static void core_log_sync(MemoryListener
*listener
,
3202 MemoryRegionSection
*section
)
3206 static void core_log_global_start(MemoryListener
*listener
)
3208 cpu_physical_memory_set_dirty_tracking(1);
3211 static void core_log_global_stop(MemoryListener
*listener
)
3213 cpu_physical_memory_set_dirty_tracking(0);
3216 static void core_eventfd_add(MemoryListener
*listener
,
3217 MemoryRegionSection
*section
,
3218 bool match_data
, uint64_t data
, EventNotifier
*e
)
3222 static void core_eventfd_del(MemoryListener
*listener
,
3223 MemoryRegionSection
*section
,
3224 bool match_data
, uint64_t data
, EventNotifier
*e
)
3228 static void io_begin(MemoryListener
*listener
)
3232 static void io_commit(MemoryListener
*listener
)
3236 static void io_region_add(MemoryListener
*listener
,
3237 MemoryRegionSection
*section
)
3239 MemoryRegionIORange
*mrio
= g_new(MemoryRegionIORange
, 1);
3241 mrio
->mr
= section
->mr
;
3242 mrio
->offset
= section
->offset_within_region
;
3243 iorange_init(&mrio
->iorange
, &memory_region_iorange_ops
,
3244 section
->offset_within_address_space
, section
->size
);
3245 ioport_register(&mrio
->iorange
);
3248 static void io_region_del(MemoryListener
*listener
,
3249 MemoryRegionSection
*section
)
3251 isa_unassign_ioport(section
->offset_within_address_space
, section
->size
);
3254 static void io_region_nop(MemoryListener
*listener
,
3255 MemoryRegionSection
*section
)
3259 static void io_log_start(MemoryListener
*listener
,
3260 MemoryRegionSection
*section
)
3264 static void io_log_stop(MemoryListener
*listener
,
3265 MemoryRegionSection
*section
)
3269 static void io_log_sync(MemoryListener
*listener
,
3270 MemoryRegionSection
*section
)
3274 static void io_log_global_start(MemoryListener
*listener
)
3278 static void io_log_global_stop(MemoryListener
*listener
)
3282 static void io_eventfd_add(MemoryListener
*listener
,
3283 MemoryRegionSection
*section
,
3284 bool match_data
, uint64_t data
, EventNotifier
*e
)
3288 static void io_eventfd_del(MemoryListener
*listener
,
3289 MemoryRegionSection
*section
,
3290 bool match_data
, uint64_t data
, EventNotifier
*e
)
3294 static MemoryListener core_memory_listener
= {
3295 .begin
= core_begin
,
3296 .commit
= core_commit
,
3297 .region_add
= core_region_add
,
3298 .region_del
= core_region_del
,
3299 .region_nop
= core_region_nop
,
3300 .log_start
= core_log_start
,
3301 .log_stop
= core_log_stop
,
3302 .log_sync
= core_log_sync
,
3303 .log_global_start
= core_log_global_start
,
3304 .log_global_stop
= core_log_global_stop
,
3305 .eventfd_add
= core_eventfd_add
,
3306 .eventfd_del
= core_eventfd_del
,
3310 static MemoryListener io_memory_listener
= {
3312 .commit
= io_commit
,
3313 .region_add
= io_region_add
,
3314 .region_del
= io_region_del
,
3315 .region_nop
= io_region_nop
,
3316 .log_start
= io_log_start
,
3317 .log_stop
= io_log_stop
,
3318 .log_sync
= io_log_sync
,
3319 .log_global_start
= io_log_global_start
,
3320 .log_global_stop
= io_log_global_stop
,
3321 .eventfd_add
= io_eventfd_add
,
3322 .eventfd_del
= io_eventfd_del
,
3326 static void memory_map_init(void)
3328 system_memory
= g_malloc(sizeof(*system_memory
));
3329 memory_region_init(system_memory
, "system", INT64_MAX
);
3330 set_system_memory_map(system_memory
);
3332 system_io
= g_malloc(sizeof(*system_io
));
3333 memory_region_init(system_io
, "io", 65536);
3334 set_system_io_map(system_io
);
3336 memory_listener_register(&core_memory_listener
, system_memory
);
3337 memory_listener_register(&io_memory_listener
, system_io
);
3340 MemoryRegion
*get_system_memory(void)
3342 return system_memory
;
3345 MemoryRegion
*get_system_io(void)
3350 #endif /* !defined(CONFIG_USER_ONLY) */
3352 /* physical memory access (slow version, mainly for debug) */
3353 #if defined(CONFIG_USER_ONLY)
3354 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
3355 uint8_t *buf
, int len
, int is_write
)
3362 page
= addr
& TARGET_PAGE_MASK
;
3363 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3366 flags
= page_get_flags(page
);
3367 if (!(flags
& PAGE_VALID
))
3370 if (!(flags
& PAGE_WRITE
))
3372 /* XXX: this code should not depend on lock_user */
3373 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3376 unlock_user(p
, addr
, l
);
3378 if (!(flags
& PAGE_READ
))
3380 /* XXX: this code should not depend on lock_user */
3381 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3384 unlock_user(p
, addr
, 0);
3394 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3395 int len
, int is_write
)
3400 target_phys_addr_t page
;
3401 MemoryRegionSection
*section
;
3404 page
= addr
& TARGET_PAGE_MASK
;
3405 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3408 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3411 if (!memory_region_is_ram(section
->mr
)) {
3412 target_phys_addr_t addr1
;
3413 addr1
= memory_region_section_addr(section
, addr
);
3414 /* XXX: could force cpu_single_env to NULL to avoid
3416 if (l
>= 4 && ((addr1
& 3) == 0)) {
3417 /* 32 bit write access */
3419 io_mem_write(section
->mr
, addr1
, val
, 4);
3421 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3422 /* 16 bit write access */
3424 io_mem_write(section
->mr
, addr1
, val
, 2);
3427 /* 8 bit write access */
3429 io_mem_write(section
->mr
, addr1
, val
, 1);
3432 } else if (!section
->readonly
) {
3434 addr1
= memory_region_get_ram_addr(section
->mr
)
3435 + memory_region_section_addr(section
, addr
);
3437 ptr
= qemu_get_ram_ptr(addr1
);
3438 memcpy(ptr
, buf
, l
);
3439 if (!cpu_physical_memory_is_dirty(addr1
)) {
3440 /* invalidate code */
3441 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3443 cpu_physical_memory_set_dirty_flags(
3444 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3446 qemu_put_ram_ptr(ptr
);
3449 if (!(memory_region_is_ram(section
->mr
) ||
3450 memory_region_is_romd(section
->mr
))) {
3451 target_phys_addr_t addr1
;
3453 addr1
= memory_region_section_addr(section
, addr
);
3454 if (l
>= 4 && ((addr1
& 3) == 0)) {
3455 /* 32 bit read access */
3456 val
= io_mem_read(section
->mr
, addr1
, 4);
3459 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3460 /* 16 bit read access */
3461 val
= io_mem_read(section
->mr
, addr1
, 2);
3465 /* 8 bit read access */
3466 val
= io_mem_read(section
->mr
, addr1
, 1);
3472 ptr
= qemu_get_ram_ptr(section
->mr
->ram_addr
3473 + memory_region_section_addr(section
,
3475 memcpy(buf
, ptr
, l
);
3476 qemu_put_ram_ptr(ptr
);
3485 /* used for ROM loading : can write in RAM and ROM */
3486 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3487 const uint8_t *buf
, int len
)
3491 target_phys_addr_t page
;
3492 MemoryRegionSection
*section
;
3495 page
= addr
& TARGET_PAGE_MASK
;
3496 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3499 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3501 if (!(memory_region_is_ram(section
->mr
) ||
3502 memory_region_is_romd(section
->mr
))) {
3505 unsigned long addr1
;
3506 addr1
= memory_region_get_ram_addr(section
->mr
)
3507 + memory_region_section_addr(section
, addr
);
3509 ptr
= qemu_get_ram_ptr(addr1
);
3510 memcpy(ptr
, buf
, l
);
3511 qemu_put_ram_ptr(ptr
);
3521 target_phys_addr_t addr
;
3522 target_phys_addr_t len
;
3525 static BounceBuffer bounce
;
3527 typedef struct MapClient
{
3529 void (*callback
)(void *opaque
);
3530 QLIST_ENTRY(MapClient
) link
;
3533 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3534 = QLIST_HEAD_INITIALIZER(map_client_list
);
3536 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3538 MapClient
*client
= g_malloc(sizeof(*client
));
3540 client
->opaque
= opaque
;
3541 client
->callback
= callback
;
3542 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3546 void cpu_unregister_map_client(void *_client
)
3548 MapClient
*client
= (MapClient
*)_client
;
3550 QLIST_REMOVE(client
, link
);
3554 static void cpu_notify_map_clients(void)
3558 while (!QLIST_EMPTY(&map_client_list
)) {
3559 client
= QLIST_FIRST(&map_client_list
);
3560 client
->callback(client
->opaque
);
3561 cpu_unregister_map_client(client
);
3565 /* Map a physical memory region into a host virtual address.
3566 * May map a subset of the requested range, given by and returned in *plen.
3567 * May return NULL if resources needed to perform the mapping are exhausted.
3568 * Use only for reads OR writes - not for read-modify-write operations.
3569 * Use cpu_register_map_client() to know when retrying the map operation is
3570 * likely to succeed.
3572 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3573 target_phys_addr_t
*plen
,
3576 target_phys_addr_t len
= *plen
;
3577 target_phys_addr_t todo
= 0;
3579 target_phys_addr_t page
;
3580 MemoryRegionSection
*section
;
3581 ram_addr_t raddr
= RAM_ADDR_MAX
;
3586 page
= addr
& TARGET_PAGE_MASK
;
3587 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3590 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3592 if (!(memory_region_is_ram(section
->mr
) && !section
->readonly
)) {
3593 if (todo
|| bounce
.buffer
) {
3596 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3600 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
3604 return bounce
.buffer
;
3607 raddr
= memory_region_get_ram_addr(section
->mr
)
3608 + memory_region_section_addr(section
, addr
);
3616 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
3621 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3622 * Will also mark the memory as dirty if is_write == 1. access_len gives
3623 * the amount of memory that was actually read or written by the caller.
3625 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3626 int is_write
, target_phys_addr_t access_len
)
3628 if (buffer
!= bounce
.buffer
) {
3630 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
3631 while (access_len
) {
3633 l
= TARGET_PAGE_SIZE
;
3636 if (!cpu_physical_memory_is_dirty(addr1
)) {
3637 /* invalidate code */
3638 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3640 cpu_physical_memory_set_dirty_flags(
3641 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3647 if (xen_enabled()) {
3648 xen_invalidate_map_cache_entry(buffer
);
3653 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3655 qemu_vfree(bounce
.buffer
);
3656 bounce
.buffer
= NULL
;
3657 cpu_notify_map_clients();
3660 /* warning: addr must be aligned */
3661 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr
,
3662 enum device_endian endian
)
3666 MemoryRegionSection
*section
;
3668 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3670 if (!(memory_region_is_ram(section
->mr
) ||
3671 memory_region_is_romd(section
->mr
))) {
3673 addr
= memory_region_section_addr(section
, addr
);
3674 val
= io_mem_read(section
->mr
, addr
, 4);
3675 #if defined(TARGET_WORDS_BIGENDIAN)
3676 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3680 if (endian
== DEVICE_BIG_ENDIAN
) {
3686 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3688 + memory_region_section_addr(section
, addr
));
3690 case DEVICE_LITTLE_ENDIAN
:
3691 val
= ldl_le_p(ptr
);
3693 case DEVICE_BIG_ENDIAN
:
3694 val
= ldl_be_p(ptr
);
3704 uint32_t ldl_phys(target_phys_addr_t addr
)
3706 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3709 uint32_t ldl_le_phys(target_phys_addr_t addr
)
3711 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3714 uint32_t ldl_be_phys(target_phys_addr_t addr
)
3716 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3719 /* warning: addr must be aligned */
3720 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr
,
3721 enum device_endian endian
)
3725 MemoryRegionSection
*section
;
3727 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3729 if (!(memory_region_is_ram(section
->mr
) ||
3730 memory_region_is_romd(section
->mr
))) {
3732 addr
= memory_region_section_addr(section
, addr
);
3734 /* XXX This is broken when device endian != cpu endian.
3735 Fix and add "endian" variable check */
3736 #ifdef TARGET_WORDS_BIGENDIAN
3737 val
= io_mem_read(section
->mr
, addr
, 4) << 32;
3738 val
|= io_mem_read(section
->mr
, addr
+ 4, 4);
3740 val
= io_mem_read(section
->mr
, addr
, 4);
3741 val
|= io_mem_read(section
->mr
, addr
+ 4, 4) << 32;
3745 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3747 + memory_region_section_addr(section
, addr
));
3749 case DEVICE_LITTLE_ENDIAN
:
3750 val
= ldq_le_p(ptr
);
3752 case DEVICE_BIG_ENDIAN
:
3753 val
= ldq_be_p(ptr
);
3763 uint64_t ldq_phys(target_phys_addr_t addr
)
3765 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3768 uint64_t ldq_le_phys(target_phys_addr_t addr
)
3770 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3773 uint64_t ldq_be_phys(target_phys_addr_t addr
)
3775 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3779 uint32_t ldub_phys(target_phys_addr_t addr
)
3782 cpu_physical_memory_read(addr
, &val
, 1);
3786 /* warning: addr must be aligned */
3787 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr
,
3788 enum device_endian endian
)
3792 MemoryRegionSection
*section
;
3794 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3796 if (!(memory_region_is_ram(section
->mr
) ||
3797 memory_region_is_romd(section
->mr
))) {
3799 addr
= memory_region_section_addr(section
, addr
);
3800 val
= io_mem_read(section
->mr
, addr
, 2);
3801 #if defined(TARGET_WORDS_BIGENDIAN)
3802 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3806 if (endian
== DEVICE_BIG_ENDIAN
) {
3812 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3814 + memory_region_section_addr(section
, addr
));
3816 case DEVICE_LITTLE_ENDIAN
:
3817 val
= lduw_le_p(ptr
);
3819 case DEVICE_BIG_ENDIAN
:
3820 val
= lduw_be_p(ptr
);
3830 uint32_t lduw_phys(target_phys_addr_t addr
)
3832 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3835 uint32_t lduw_le_phys(target_phys_addr_t addr
)
3837 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3840 uint32_t lduw_be_phys(target_phys_addr_t addr
)
3842 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3845 /* warning: addr must be aligned. The ram page is not masked as dirty
3846 and the code inside is not invalidated. It is useful if the dirty
3847 bits are used to track modified PTEs */
3848 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3851 MemoryRegionSection
*section
;
3853 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3855 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3856 addr
= memory_region_section_addr(section
, addr
);
3857 if (memory_region_is_ram(section
->mr
)) {
3858 section
= &phys_sections
[phys_section_rom
];
3860 io_mem_write(section
->mr
, addr
, val
, 4);
3862 unsigned long addr1
= (memory_region_get_ram_addr(section
->mr
)
3864 + memory_region_section_addr(section
, addr
);
3865 ptr
= qemu_get_ram_ptr(addr1
);
3868 if (unlikely(in_migration
)) {
3869 if (!cpu_physical_memory_is_dirty(addr1
)) {
3870 /* invalidate code */
3871 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3873 cpu_physical_memory_set_dirty_flags(
3874 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3880 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3883 MemoryRegionSection
*section
;
3885 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3887 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3888 addr
= memory_region_section_addr(section
, addr
);
3889 if (memory_region_is_ram(section
->mr
)) {
3890 section
= &phys_sections
[phys_section_rom
];
3892 #ifdef TARGET_WORDS_BIGENDIAN
3893 io_mem_write(section
->mr
, addr
, val
>> 32, 4);
3894 io_mem_write(section
->mr
, addr
+ 4, (uint32_t)val
, 4);
3896 io_mem_write(section
->mr
, addr
, (uint32_t)val
, 4);
3897 io_mem_write(section
->mr
, addr
+ 4, val
>> 32, 4);
3900 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3902 + memory_region_section_addr(section
, addr
));
3907 /* warning: addr must be aligned */
3908 static inline void stl_phys_internal(target_phys_addr_t addr
, uint32_t val
,
3909 enum device_endian endian
)
3912 MemoryRegionSection
*section
;
3914 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3916 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3917 addr
= memory_region_section_addr(section
, addr
);
3918 if (memory_region_is_ram(section
->mr
)) {
3919 section
= &phys_sections
[phys_section_rom
];
3921 #if defined(TARGET_WORDS_BIGENDIAN)
3922 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3926 if (endian
== DEVICE_BIG_ENDIAN
) {
3930 io_mem_write(section
->mr
, addr
, val
, 4);
3932 unsigned long addr1
;
3933 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
3934 + memory_region_section_addr(section
, addr
);
3936 ptr
= qemu_get_ram_ptr(addr1
);
3938 case DEVICE_LITTLE_ENDIAN
:
3941 case DEVICE_BIG_ENDIAN
:
3948 if (!cpu_physical_memory_is_dirty(addr1
)) {
3949 /* invalidate code */
3950 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3952 cpu_physical_memory_set_dirty_flags(addr1
,
3953 (0xff & ~CODE_DIRTY_FLAG
));
3958 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3960 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
3963 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
)
3965 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
3968 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
)
3970 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
3974 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3977 cpu_physical_memory_write(addr
, &v
, 1);
3980 /* warning: addr must be aligned */
3981 static inline void stw_phys_internal(target_phys_addr_t addr
, uint32_t val
,
3982 enum device_endian endian
)
3985 MemoryRegionSection
*section
;
3987 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3989 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3990 addr
= memory_region_section_addr(section
, addr
);
3991 if (memory_region_is_ram(section
->mr
)) {
3992 section
= &phys_sections
[phys_section_rom
];
3994 #if defined(TARGET_WORDS_BIGENDIAN)
3995 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3999 if (endian
== DEVICE_BIG_ENDIAN
) {
4003 io_mem_write(section
->mr
, addr
, val
, 2);
4005 unsigned long addr1
;
4006 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
4007 + memory_region_section_addr(section
, addr
);
4009 ptr
= qemu_get_ram_ptr(addr1
);
4011 case DEVICE_LITTLE_ENDIAN
:
4014 case DEVICE_BIG_ENDIAN
:
4021 if (!cpu_physical_memory_is_dirty(addr1
)) {
4022 /* invalidate code */
4023 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4025 cpu_physical_memory_set_dirty_flags(addr1
,
4026 (0xff & ~CODE_DIRTY_FLAG
));
4031 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4033 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4036 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
)
4038 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4041 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
)
4043 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4047 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4050 cpu_physical_memory_write(addr
, &val
, 8);
4053 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
)
4055 val
= cpu_to_le64(val
);
4056 cpu_physical_memory_write(addr
, &val
, 8);
4059 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
)
4061 val
= cpu_to_be64(val
);
4062 cpu_physical_memory_write(addr
, &val
, 8);
4065 /* virtual memory access for debug (includes writing to ROM) */
4066 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
4067 uint8_t *buf
, int len
, int is_write
)
4070 target_phys_addr_t phys_addr
;
4074 page
= addr
& TARGET_PAGE_MASK
;
4075 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4076 /* if no physical page mapped, return an error */
4077 if (phys_addr
== -1)
4079 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4082 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4084 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4086 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4095 /* in deterministic execution mode, instructions doing device I/Os
4096 must be at the end of the TB */
4097 void cpu_io_recompile(CPUArchState
*env
, uintptr_t retaddr
)
4099 TranslationBlock
*tb
;
4101 target_ulong pc
, cs_base
;
4104 tb
= tb_find_pc(retaddr
);
4106 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4109 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4110 cpu_restore_state(tb
, env
, retaddr
);
4111 /* Calculate how many instructions had been executed before the fault
4113 n
= n
- env
->icount_decr
.u16
.low
;
4114 /* Generate a new TB ending on the I/O insn. */
4116 /* On MIPS and SH, delay slot instructions can only be restarted if
4117 they were already the first instruction in the TB. If this is not
4118 the first instruction in a TB then re-execute the preceding
4120 #if defined(TARGET_MIPS)
4121 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4122 env
->active_tc
.PC
-= 4;
4123 env
->icount_decr
.u16
.low
++;
4124 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4126 #elif defined(TARGET_SH4)
4127 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4130 env
->icount_decr
.u16
.low
++;
4131 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4134 /* This should never happen. */
4135 if (n
> CF_COUNT_MASK
)
4136 cpu_abort(env
, "TB too big during recompile");
4138 cflags
= n
| CF_LAST_IO
;
4140 cs_base
= tb
->cs_base
;
4142 tb_phys_invalidate(tb
, -1);
4143 /* FIXME: In theory this could raise an exception. In practice
4144 we have already translated the block once so it's probably ok. */
4145 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4146 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4147 the first in the TB) then we end up generating a whole new TB and
4148 repeating the fault, which is horribly inefficient.
4149 Better would be to execute just this insn uncached, or generate a
4151 cpu_resume_from_signal(env
, NULL
);
4154 #if !defined(CONFIG_USER_ONLY)
4156 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4158 int i
, target_code_size
, max_target_code_size
;
4159 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4160 TranslationBlock
*tb
;
4162 target_code_size
= 0;
4163 max_target_code_size
= 0;
4165 direct_jmp_count
= 0;
4166 direct_jmp2_count
= 0;
4167 for(i
= 0; i
< nb_tbs
; i
++) {
4169 target_code_size
+= tb
->size
;
4170 if (tb
->size
> max_target_code_size
)
4171 max_target_code_size
= tb
->size
;
4172 if (tb
->page_addr
[1] != -1)
4174 if (tb
->tb_next_offset
[0] != 0xffff) {
4176 if (tb
->tb_next_offset
[1] != 0xffff) {
4177 direct_jmp2_count
++;
4181 /* XXX: avoid using doubles ? */
4182 cpu_fprintf(f
, "Translation buffer state:\n");
4183 cpu_fprintf(f
, "gen code size %td/%ld\n",
4184 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4185 cpu_fprintf(f
, "TB count %d/%d\n",
4186 nb_tbs
, code_gen_max_blocks
);
4187 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4188 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4189 max_target_code_size
);
4190 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4191 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4192 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4193 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4195 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4196 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4198 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4200 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4201 cpu_fprintf(f
, "\nStatistics:\n");
4202 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4203 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4204 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4205 tcg_dump_info(f
, cpu_fprintf
);
4209 * A helper function for the _utterly broken_ virtio device model to find out if
4210 * it's running on a big endian machine. Don't do this at home kids!
4212 bool virtio_is_big_endian(void);
4213 bool virtio_is_big_endian(void)
4215 #if defined(TARGET_WORDS_BIGENDIAN)
4224 #ifndef CONFIG_USER_ONLY
4225 bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr
)
4227 MemoryRegionSection
*section
;
4229 section
= phys_page_find(phys_addr
>> TARGET_PAGE_BITS
);
4231 return !(memory_region_is_ram(section
->mr
) ||
4232 memory_region_is_romd(section
->mr
));