2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
62 #define WANT_EXEC_OBSOLETE
63 #include "exec-obsolete.h"
65 //#define DEBUG_TB_INVALIDATE
67 //#define DEBUG_UNASSIGNED
69 /* make various TB consistency checks */
70 //#define DEBUG_TB_CHECK
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
80 #define SMC_BITMAP_USE_THRESHOLD 10
82 static TranslationBlock
*tbs
;
83 static int code_gen_max_blocks
;
84 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
96 #elif defined(_WIN32) && !defined(_WIN64)
97 #define code_gen_section \
98 __attribute__((aligned (16)))
100 #define code_gen_section \
101 __attribute__((aligned (32)))
104 uint8_t code_gen_prologue
[1024] code_gen_section
;
105 static uint8_t *code_gen_buffer
;
106 static unsigned long code_gen_buffer_size
;
107 /* threshold to flush the translated code buffer */
108 static unsigned long code_gen_buffer_max_size
;
109 static uint8_t *code_gen_ptr
;
111 #if !defined(CONFIG_USER_ONLY)
113 static int in_migration
;
115 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
117 static MemoryRegion
*system_memory
;
118 static MemoryRegion
*system_io
;
120 MemoryRegion io_mem_ram
, io_mem_rom
, io_mem_unassigned
, io_mem_notdirty
;
121 static MemoryRegion io_mem_subpage_ram
;
125 CPUArchState
*first_cpu
;
126 /* current CPU in the current thread. It is only valid inside
128 DEFINE_TLS(CPUArchState
*,cpu_single_env
);
129 /* 0 = Do not count executed instructions.
130 1 = Precise instruction counting.
131 2 = Adaptive rate instruction counting. */
134 typedef struct PageDesc
{
135 /* list of TBs intersecting this ram page */
136 TranslationBlock
*first_tb
;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count
;
140 uint8_t *code_bitmap
;
141 #if defined(CONFIG_USER_ONLY)
146 /* In system mode we want L1_MAP to be based on ram offsets,
147 while in user mode we want it to be based on virtual addresses. */
148 #if !defined(CONFIG_USER_ONLY)
149 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
155 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
158 /* Size of the L2 (and L3, etc) page tables. */
160 #define L2_SIZE (1 << L2_BITS)
162 #define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165 /* The bits remaining after N lower levels of page tables. */
166 #define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169 #if V_L1_BITS_REM < 4
170 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172 #define V_L1_BITS V_L1_BITS_REM
175 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179 uintptr_t qemu_real_host_page_size
;
180 uintptr_t qemu_host_page_size
;
181 uintptr_t qemu_host_page_mask
;
183 /* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185 static void *l1_map
[V_L1_SIZE
];
187 #if !defined(CONFIG_USER_ONLY)
188 typedef struct PhysPageEntry PhysPageEntry
;
190 static MemoryRegionSection
*phys_sections
;
191 static unsigned phys_sections_nb
, phys_sections_nb_alloc
;
192 static uint16_t phys_section_unassigned
;
193 static uint16_t phys_section_notdirty
;
194 static uint16_t phys_section_rom
;
195 static uint16_t phys_section_watch
;
197 struct PhysPageEntry
{
198 uint16_t is_leaf
: 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
203 /* Simple allocator for PhysPageEntry nodes */
204 static PhysPageEntry (*phys_map_nodes
)[L2_SIZE
];
205 static unsigned phys_map_nodes_nb
, phys_map_nodes_nb_alloc
;
207 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
209 /* This is a multi-level map on the physical address space.
210 The bottom level has pointers to MemoryRegionSections. */
211 static PhysPageEntry phys_map
= { .ptr
= PHYS_MAP_NODE_NIL
, .is_leaf
= 0 };
213 static void io_mem_init(void);
214 static void memory_map_init(void);
216 static MemoryRegion io_mem_watch
;
220 static int tb_flush_count
;
221 static int tb_phys_invalidate_count
;
224 static void map_exec(void *addr
, long size
)
227 VirtualProtect(addr
, size
,
228 PAGE_EXECUTE_READWRITE
, &old_protect
);
232 static void map_exec(void *addr
, long size
)
234 unsigned long start
, end
, page_size
;
236 page_size
= getpagesize();
237 start
= (unsigned long)addr
;
238 start
&= ~(page_size
- 1);
240 end
= (unsigned long)addr
+ size
;
241 end
+= page_size
- 1;
242 end
&= ~(page_size
- 1);
244 mprotect((void *)start
, end
- start
,
245 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
249 static void page_init(void)
251 /* NOTE: we can always suppose that qemu_host_page_size >=
255 SYSTEM_INFO system_info
;
257 GetSystemInfo(&system_info
);
258 qemu_real_host_page_size
= system_info
.dwPageSize
;
261 qemu_real_host_page_size
= getpagesize();
263 if (qemu_host_page_size
== 0)
264 qemu_host_page_size
= qemu_real_host_page_size
;
265 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
266 qemu_host_page_size
= TARGET_PAGE_SIZE
;
267 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
269 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
271 #ifdef HAVE_KINFO_GETVMMAP
272 struct kinfo_vmentry
*freep
;
275 freep
= kinfo_getvmmap(getpid(), &cnt
);
278 for (i
= 0; i
< cnt
; i
++) {
279 unsigned long startaddr
, endaddr
;
281 startaddr
= freep
[i
].kve_start
;
282 endaddr
= freep
[i
].kve_end
;
283 if (h2g_valid(startaddr
)) {
284 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
286 if (h2g_valid(endaddr
)) {
287 endaddr
= h2g(endaddr
);
288 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
290 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
292 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
303 last_brk
= (unsigned long)sbrk(0);
305 f
= fopen("/compat/linux/proc/self/maps", "r");
310 unsigned long startaddr
, endaddr
;
313 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
315 if (n
== 2 && h2g_valid(startaddr
)) {
316 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
318 if (h2g_valid(endaddr
)) {
319 endaddr
= h2g(endaddr
);
323 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
335 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
341 #if defined(CONFIG_USER_ONLY)
342 /* We can't use g_malloc because it may recurse into a locked mutex. */
343 # define ALLOC(P, SIZE) \
345 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
346 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
349 # define ALLOC(P, SIZE) \
350 do { P = g_malloc0(SIZE); } while (0)
353 /* Level 1. Always allocated. */
354 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
357 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
364 ALLOC(p
, sizeof(void *) * L2_SIZE
);
368 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
376 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
382 return pd
+ (index
& (L2_SIZE
- 1));
385 static inline PageDesc
*page_find(tb_page_addr_t index
)
387 return page_find_alloc(index
, 0);
390 #if !defined(CONFIG_USER_ONLY)
392 static void phys_map_node_reserve(unsigned nodes
)
394 if (phys_map_nodes_nb
+ nodes
> phys_map_nodes_nb_alloc
) {
395 typedef PhysPageEntry Node
[L2_SIZE
];
396 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
* 2, 16);
397 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
,
398 phys_map_nodes_nb
+ nodes
);
399 phys_map_nodes
= g_renew(Node
, phys_map_nodes
,
400 phys_map_nodes_nb_alloc
);
404 static uint16_t phys_map_node_alloc(void)
409 ret
= phys_map_nodes_nb
++;
410 assert(ret
!= PHYS_MAP_NODE_NIL
);
411 assert(ret
!= phys_map_nodes_nb_alloc
);
412 for (i
= 0; i
< L2_SIZE
; ++i
) {
413 phys_map_nodes
[ret
][i
].is_leaf
= 0;
414 phys_map_nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
419 static void phys_map_nodes_reset(void)
421 phys_map_nodes_nb
= 0;
425 static void phys_page_set_level(PhysPageEntry
*lp
, target_phys_addr_t
*index
,
426 target_phys_addr_t
*nb
, uint16_t leaf
,
431 target_phys_addr_t step
= (target_phys_addr_t
)1 << (level
* L2_BITS
);
433 if (!lp
->is_leaf
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
434 lp
->ptr
= phys_map_node_alloc();
435 p
= phys_map_nodes
[lp
->ptr
];
437 for (i
= 0; i
< L2_SIZE
; i
++) {
439 p
[i
].ptr
= phys_section_unassigned
;
443 p
= phys_map_nodes
[lp
->ptr
];
445 lp
= &p
[(*index
>> (level
* L2_BITS
)) & (L2_SIZE
- 1)];
447 while (*nb
&& lp
< &p
[L2_SIZE
]) {
448 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
454 phys_page_set_level(lp
, index
, nb
, leaf
, level
- 1);
460 static void phys_page_set(target_phys_addr_t index
, target_phys_addr_t nb
,
463 /* Wildly overreserve - it doesn't matter much. */
464 phys_map_node_reserve(3 * P_L2_LEVELS
);
466 phys_page_set_level(&phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
469 MemoryRegionSection
*phys_page_find(target_phys_addr_t index
)
471 PhysPageEntry lp
= phys_map
;
474 uint16_t s_index
= phys_section_unassigned
;
476 for (i
= P_L2_LEVELS
- 1; i
>= 0 && !lp
.is_leaf
; i
--) {
477 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
480 p
= phys_map_nodes
[lp
.ptr
];
481 lp
= p
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
486 return &phys_sections
[s_index
];
489 bool memory_region_is_unassigned(MemoryRegion
*mr
)
491 return mr
!= &io_mem_ram
&& mr
!= &io_mem_rom
492 && mr
!= &io_mem_notdirty
&& !mr
->rom_device
493 && mr
!= &io_mem_watch
;
496 #define mmap_lock() do { } while(0)
497 #define mmap_unlock() do { } while(0)
500 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
502 #if defined(CONFIG_USER_ONLY)
503 /* Currently it is not recommended to allocate big chunks of data in
504 user mode. It will change when a dedicated libc will be used */
505 #define USE_STATIC_CODE_GEN_BUFFER
508 #ifdef USE_STATIC_CODE_GEN_BUFFER
509 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
510 __attribute__((aligned (CODE_GEN_ALIGN
)));
513 static void code_gen_alloc(unsigned long tb_size
)
515 #ifdef USE_STATIC_CODE_GEN_BUFFER
516 code_gen_buffer
= static_code_gen_buffer
;
517 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
518 map_exec(code_gen_buffer
, code_gen_buffer_size
);
520 code_gen_buffer_size
= tb_size
;
521 if (code_gen_buffer_size
== 0) {
522 #if defined(CONFIG_USER_ONLY)
523 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
525 /* XXX: needs adjustments */
526 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
529 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
530 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
531 /* The code gen buffer location may have constraints depending on
532 the host cpu and OS */
533 #if defined(__linux__)
538 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
539 #if defined(__x86_64__)
541 /* Cannot map more than that */
542 if (code_gen_buffer_size
> (800 * 1024 * 1024))
543 code_gen_buffer_size
= (800 * 1024 * 1024);
544 #elif defined(__sparc_v9__)
545 // Map the buffer below 2G, so we can use direct calls and branches
547 start
= (void *) 0x60000000UL
;
548 if (code_gen_buffer_size
> (512 * 1024 * 1024))
549 code_gen_buffer_size
= (512 * 1024 * 1024);
550 #elif defined(__arm__)
551 /* Keep the buffer no bigger than 16MB to branch between blocks */
552 if (code_gen_buffer_size
> 16 * 1024 * 1024)
553 code_gen_buffer_size
= 16 * 1024 * 1024;
554 #elif defined(__s390x__)
555 /* Map the buffer so that we can use direct calls and branches. */
556 /* We have a +- 4GB range on the branches; leave some slop. */
557 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
558 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
560 start
= (void *)0x90000000UL
;
562 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
563 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
565 if (code_gen_buffer
== MAP_FAILED
) {
566 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
570 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
571 || defined(__DragonFly__) || defined(__OpenBSD__) \
572 || defined(__NetBSD__)
576 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
577 #if defined(__x86_64__)
578 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
579 * 0x40000000 is free */
581 addr
= (void *)0x40000000;
582 /* Cannot map more than that */
583 if (code_gen_buffer_size
> (800 * 1024 * 1024))
584 code_gen_buffer_size
= (800 * 1024 * 1024);
585 #elif defined(__sparc_v9__)
586 // Map the buffer below 2G, so we can use direct calls and branches
588 addr
= (void *) 0x60000000UL
;
589 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
590 code_gen_buffer_size
= (512 * 1024 * 1024);
593 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
594 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
596 if (code_gen_buffer
== MAP_FAILED
) {
597 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
602 code_gen_buffer
= g_malloc(code_gen_buffer_size
);
603 map_exec(code_gen_buffer
, code_gen_buffer_size
);
605 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
606 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
607 code_gen_buffer_max_size
= code_gen_buffer_size
-
608 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
609 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
610 tbs
= g_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
613 /* Must be called before using the QEMU cpus. 'tb_size' is the size
614 (in bytes) allocated to the translation buffer. Zero means default
616 void tcg_exec_init(unsigned long tb_size
)
619 code_gen_alloc(tb_size
);
620 code_gen_ptr
= code_gen_buffer
;
621 tcg_register_jit(code_gen_buffer
, code_gen_buffer_size
);
623 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
624 /* There's no guest base to take into account, so go ahead and
625 initialize the prologue now. */
626 tcg_prologue_init(&tcg_ctx
);
630 bool tcg_enabled(void)
632 return code_gen_buffer
!= NULL
;
635 void cpu_exec_init_all(void)
637 #if !defined(CONFIG_USER_ONLY)
643 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
645 static int cpu_common_post_load(void *opaque
, int version_id
)
647 CPUArchState
*env
= opaque
;
649 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
650 version_id is increased. */
651 env
->interrupt_request
&= ~0x01;
657 static const VMStateDescription vmstate_cpu_common
= {
658 .name
= "cpu_common",
660 .minimum_version_id
= 1,
661 .minimum_version_id_old
= 1,
662 .post_load
= cpu_common_post_load
,
663 .fields
= (VMStateField
[]) {
664 VMSTATE_UINT32(halted
, CPUArchState
),
665 VMSTATE_UINT32(interrupt_request
, CPUArchState
),
666 VMSTATE_END_OF_LIST()
671 CPUArchState
*qemu_get_cpu(int cpu
)
673 CPUArchState
*env
= first_cpu
;
676 if (env
->cpu_index
== cpu
)
684 void cpu_exec_init(CPUArchState
*env
)
689 #if defined(CONFIG_USER_ONLY)
692 env
->next_cpu
= NULL
;
695 while (*penv
!= NULL
) {
696 penv
= &(*penv
)->next_cpu
;
699 env
->cpu_index
= cpu_index
;
701 QTAILQ_INIT(&env
->breakpoints
);
702 QTAILQ_INIT(&env
->watchpoints
);
703 #ifndef CONFIG_USER_ONLY
704 env
->thread_id
= qemu_get_thread_id();
707 #if defined(CONFIG_USER_ONLY)
710 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
711 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
712 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
713 cpu_save
, cpu_load
, env
);
717 /* Allocate a new translation block. Flush the translation buffer if
718 too many translation blocks or too much generated code. */
719 static TranslationBlock
*tb_alloc(target_ulong pc
)
721 TranslationBlock
*tb
;
723 if (nb_tbs
>= code_gen_max_blocks
||
724 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
732 void tb_free(TranslationBlock
*tb
)
734 /* In practice this is mostly used for single use temporary TB
735 Ignore the hard cases and just back up if this TB happens to
736 be the last one generated. */
737 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
738 code_gen_ptr
= tb
->tc_ptr
;
743 static inline void invalidate_page_bitmap(PageDesc
*p
)
745 if (p
->code_bitmap
) {
746 g_free(p
->code_bitmap
);
747 p
->code_bitmap
= NULL
;
749 p
->code_write_count
= 0;
752 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
754 static void page_flush_tb_1 (int level
, void **lp
)
763 for (i
= 0; i
< L2_SIZE
; ++i
) {
764 pd
[i
].first_tb
= NULL
;
765 invalidate_page_bitmap(pd
+ i
);
769 for (i
= 0; i
< L2_SIZE
; ++i
) {
770 page_flush_tb_1 (level
- 1, pp
+ i
);
775 static void page_flush_tb(void)
778 for (i
= 0; i
< V_L1_SIZE
; i
++) {
779 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
783 /* flush all the translation blocks */
784 /* XXX: tb_flush is currently not thread safe */
785 void tb_flush(CPUArchState
*env1
)
788 #if defined(DEBUG_FLUSH)
789 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
790 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
792 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
794 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
795 cpu_abort(env1
, "Internal error: code buffer overflow\n");
799 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
800 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
803 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
806 code_gen_ptr
= code_gen_buffer
;
807 /* XXX: flush processor icache at this point if cache flush is
812 #ifdef DEBUG_TB_CHECK
814 static void tb_invalidate_check(target_ulong address
)
816 TranslationBlock
*tb
;
818 address
&= TARGET_PAGE_MASK
;
819 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
820 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
821 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
822 address
>= tb
->pc
+ tb
->size
)) {
823 printf("ERROR invalidate: address=" TARGET_FMT_lx
824 " PC=%08lx size=%04x\n",
825 address
, (long)tb
->pc
, tb
->size
);
831 /* verify that all the pages have correct rights for code */
832 static void tb_page_check(void)
834 TranslationBlock
*tb
;
835 int i
, flags1
, flags2
;
837 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
838 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
839 flags1
= page_get_flags(tb
->pc
);
840 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
841 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
842 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
843 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
851 /* invalidate one TB */
852 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
855 TranslationBlock
*tb1
;
859 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
862 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
866 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
868 TranslationBlock
*tb1
;
873 n1
= (uintptr_t)tb1
& 3;
874 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
876 *ptb
= tb1
->page_next
[n1
];
879 ptb
= &tb1
->page_next
[n1
];
883 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
885 TranslationBlock
*tb1
, **ptb
;
888 ptb
= &tb
->jmp_next
[n
];
891 /* find tb(n) in circular list */
894 n1
= (uintptr_t)tb1
& 3;
895 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
896 if (n1
== n
&& tb1
== tb
)
899 ptb
= &tb1
->jmp_first
;
901 ptb
= &tb1
->jmp_next
[n1
];
904 /* now we can suppress tb(n) from the list */
905 *ptb
= tb
->jmp_next
[n
];
907 tb
->jmp_next
[n
] = NULL
;
911 /* reset the jump entry 'n' of a TB so that it is not chained to
913 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
915 tb_set_jmp_target(tb
, n
, (uintptr_t)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
918 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
923 tb_page_addr_t phys_pc
;
924 TranslationBlock
*tb1
, *tb2
;
926 /* remove the TB from the hash list */
927 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
928 h
= tb_phys_hash_func(phys_pc
);
929 tb_remove(&tb_phys_hash
[h
], tb
,
930 offsetof(TranslationBlock
, phys_hash_next
));
932 /* remove the TB from the page list */
933 if (tb
->page_addr
[0] != page_addr
) {
934 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
935 tb_page_remove(&p
->first_tb
, tb
);
936 invalidate_page_bitmap(p
);
938 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
939 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
940 tb_page_remove(&p
->first_tb
, tb
);
941 invalidate_page_bitmap(p
);
944 tb_invalidated_flag
= 1;
946 /* remove the TB from the hash list */
947 h
= tb_jmp_cache_hash_func(tb
->pc
);
948 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
949 if (env
->tb_jmp_cache
[h
] == tb
)
950 env
->tb_jmp_cache
[h
] = NULL
;
953 /* suppress this TB from the two jump lists */
954 tb_jmp_remove(tb
, 0);
955 tb_jmp_remove(tb
, 1);
957 /* suppress any remaining jumps to this TB */
960 n1
= (uintptr_t)tb1
& 3;
963 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
964 tb2
= tb1
->jmp_next
[n1
];
965 tb_reset_jump(tb1
, n1
);
966 tb1
->jmp_next
[n1
] = NULL
;
969 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2); /* fail safe */
971 tb_phys_invalidate_count
++;
974 static inline void set_bits(uint8_t *tab
, int start
, int len
)
980 mask
= 0xff << (start
& 7);
981 if ((start
& ~7) == (end
& ~7)) {
983 mask
&= ~(0xff << (end
& 7));
988 start
= (start
+ 8) & ~7;
990 while (start
< end1
) {
995 mask
= ~(0xff << (end
& 7));
1001 static void build_page_bitmap(PageDesc
*p
)
1003 int n
, tb_start
, tb_end
;
1004 TranslationBlock
*tb
;
1006 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
1009 while (tb
!= NULL
) {
1010 n
= (uintptr_t)tb
& 3;
1011 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1012 /* NOTE: this is subtle as a TB may span two physical pages */
1014 /* NOTE: tb_end may be after the end of the page, but
1015 it is not a problem */
1016 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
1017 tb_end
= tb_start
+ tb
->size
;
1018 if (tb_end
> TARGET_PAGE_SIZE
)
1019 tb_end
= TARGET_PAGE_SIZE
;
1022 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1024 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
1025 tb
= tb
->page_next
[n
];
1029 TranslationBlock
*tb_gen_code(CPUArchState
*env
,
1030 target_ulong pc
, target_ulong cs_base
,
1031 int flags
, int cflags
)
1033 TranslationBlock
*tb
;
1035 tb_page_addr_t phys_pc
, phys_page2
;
1036 target_ulong virt_page2
;
1039 phys_pc
= get_page_addr_code(env
, pc
);
1042 /* flush must be done */
1044 /* cannot fail at this point */
1046 /* Don't forget to invalidate previous TB info. */
1047 tb_invalidated_flag
= 1;
1049 tc_ptr
= code_gen_ptr
;
1050 tb
->tc_ptr
= tc_ptr
;
1051 tb
->cs_base
= cs_base
;
1053 tb
->cflags
= cflags
;
1054 cpu_gen_code(env
, tb
, &code_gen_size
);
1055 code_gen_ptr
= (void *)(((uintptr_t)code_gen_ptr
+ code_gen_size
+
1056 CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1058 /* check next page if needed */
1059 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1061 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1062 phys_page2
= get_page_addr_code(env
, virt_page2
);
1064 tb_link_page(tb
, phys_pc
, phys_page2
);
1069 * Invalidate all TBs which intersect with the target physical address range
1070 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1071 * 'is_cpu_write_access' should be true if called from a real cpu write
1072 * access: the virtual CPU will exit the current TB if code is modified inside
1075 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
,
1076 int is_cpu_write_access
)
1078 while (start
< end
) {
1079 tb_invalidate_phys_page_range(start
, end
, is_cpu_write_access
);
1080 start
&= TARGET_PAGE_MASK
;
1081 start
+= TARGET_PAGE_SIZE
;
1086 * Invalidate all TBs which intersect with the target physical address range
1087 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1088 * 'is_cpu_write_access' should be true if called from a real cpu write
1089 * access: the virtual CPU will exit the current TB if code is modified inside
1092 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1093 int is_cpu_write_access
)
1095 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1096 CPUArchState
*env
= cpu_single_env
;
1097 tb_page_addr_t tb_start
, tb_end
;
1100 #ifdef TARGET_HAS_PRECISE_SMC
1101 int current_tb_not_found
= is_cpu_write_access
;
1102 TranslationBlock
*current_tb
= NULL
;
1103 int current_tb_modified
= 0;
1104 target_ulong current_pc
= 0;
1105 target_ulong current_cs_base
= 0;
1106 int current_flags
= 0;
1107 #endif /* TARGET_HAS_PRECISE_SMC */
1109 p
= page_find(start
>> TARGET_PAGE_BITS
);
1112 if (!p
->code_bitmap
&&
1113 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1114 is_cpu_write_access
) {
1115 /* build code bitmap */
1116 build_page_bitmap(p
);
1119 /* we remove all the TBs in the range [start, end[ */
1120 /* XXX: see if in some cases it could be faster to invalidate all the code */
1122 while (tb
!= NULL
) {
1123 n
= (uintptr_t)tb
& 3;
1124 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1125 tb_next
= tb
->page_next
[n
];
1126 /* NOTE: this is subtle as a TB may span two physical pages */
1128 /* NOTE: tb_end may be after the end of the page, but
1129 it is not a problem */
1130 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1131 tb_end
= tb_start
+ tb
->size
;
1133 tb_start
= tb
->page_addr
[1];
1134 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1136 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1137 #ifdef TARGET_HAS_PRECISE_SMC
1138 if (current_tb_not_found
) {
1139 current_tb_not_found
= 0;
1141 if (env
->mem_io_pc
) {
1142 /* now we have a real cpu fault */
1143 current_tb
= tb_find_pc(env
->mem_io_pc
);
1146 if (current_tb
== tb
&&
1147 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1148 /* If we are modifying the current TB, we must stop
1149 its execution. We could be more precise by checking
1150 that the modification is after the current PC, but it
1151 would require a specialized function to partially
1152 restore the CPU state */
1154 current_tb_modified
= 1;
1155 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1156 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1159 #endif /* TARGET_HAS_PRECISE_SMC */
1160 /* we need to do that to handle the case where a signal
1161 occurs while doing tb_phys_invalidate() */
1164 saved_tb
= env
->current_tb
;
1165 env
->current_tb
= NULL
;
1167 tb_phys_invalidate(tb
, -1);
1169 env
->current_tb
= saved_tb
;
1170 if (env
->interrupt_request
&& env
->current_tb
)
1171 cpu_interrupt(env
, env
->interrupt_request
);
1176 #if !defined(CONFIG_USER_ONLY)
1177 /* if no code remaining, no need to continue to use slow writes */
1179 invalidate_page_bitmap(p
);
1180 if (is_cpu_write_access
) {
1181 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1185 #ifdef TARGET_HAS_PRECISE_SMC
1186 if (current_tb_modified
) {
1187 /* we generate a block containing just the instruction
1188 modifying the memory. It will ensure that it cannot modify
1190 env
->current_tb
= NULL
;
1191 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1192 cpu_resume_from_signal(env
, NULL
);
1197 /* len must be <= 8 and start must be a multiple of len */
1198 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1204 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1205 cpu_single_env
->mem_io_vaddr
, len
,
1206 cpu_single_env
->eip
,
1207 cpu_single_env
->eip
+
1208 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1211 p
= page_find(start
>> TARGET_PAGE_BITS
);
1214 if (p
->code_bitmap
) {
1215 offset
= start
& ~TARGET_PAGE_MASK
;
1216 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1217 if (b
& ((1 << len
) - 1))
1221 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1225 #if !defined(CONFIG_SOFTMMU)
1226 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1227 uintptr_t pc
, void *puc
)
1229 TranslationBlock
*tb
;
1232 #ifdef TARGET_HAS_PRECISE_SMC
1233 TranslationBlock
*current_tb
= NULL
;
1234 CPUArchState
*env
= cpu_single_env
;
1235 int current_tb_modified
= 0;
1236 target_ulong current_pc
= 0;
1237 target_ulong current_cs_base
= 0;
1238 int current_flags
= 0;
1241 addr
&= TARGET_PAGE_MASK
;
1242 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1246 #ifdef TARGET_HAS_PRECISE_SMC
1247 if (tb
&& pc
!= 0) {
1248 current_tb
= tb_find_pc(pc
);
1251 while (tb
!= NULL
) {
1252 n
= (uintptr_t)tb
& 3;
1253 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1254 #ifdef TARGET_HAS_PRECISE_SMC
1255 if (current_tb
== tb
&&
1256 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1257 /* If we are modifying the current TB, we must stop
1258 its execution. We could be more precise by checking
1259 that the modification is after the current PC, but it
1260 would require a specialized function to partially
1261 restore the CPU state */
1263 current_tb_modified
= 1;
1264 cpu_restore_state(current_tb
, env
, pc
);
1265 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1268 #endif /* TARGET_HAS_PRECISE_SMC */
1269 tb_phys_invalidate(tb
, addr
);
1270 tb
= tb
->page_next
[n
];
1273 #ifdef TARGET_HAS_PRECISE_SMC
1274 if (current_tb_modified
) {
1275 /* we generate a block containing just the instruction
1276 modifying the memory. It will ensure that it cannot modify
1278 env
->current_tb
= NULL
;
1279 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1280 cpu_resume_from_signal(env
, puc
);
1286 /* add the tb in the target page and protect it if necessary */
1287 static inline void tb_alloc_page(TranslationBlock
*tb
,
1288 unsigned int n
, tb_page_addr_t page_addr
)
1291 #ifndef CONFIG_USER_ONLY
1292 bool page_already_protected
;
1295 tb
->page_addr
[n
] = page_addr
;
1296 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1297 tb
->page_next
[n
] = p
->first_tb
;
1298 #ifndef CONFIG_USER_ONLY
1299 page_already_protected
= p
->first_tb
!= NULL
;
1301 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1302 invalidate_page_bitmap(p
);
1304 #if defined(TARGET_HAS_SMC) || 1
1306 #if defined(CONFIG_USER_ONLY)
1307 if (p
->flags
& PAGE_WRITE
) {
1312 /* force the host page as non writable (writes will have a
1313 page fault + mprotect overhead) */
1314 page_addr
&= qemu_host_page_mask
;
1316 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1317 addr
+= TARGET_PAGE_SIZE
) {
1319 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1323 p2
->flags
&= ~PAGE_WRITE
;
1325 mprotect(g2h(page_addr
), qemu_host_page_size
,
1326 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1327 #ifdef DEBUG_TB_INVALIDATE
1328 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1333 /* if some code is already present, then the pages are already
1334 protected. So we handle the case where only the first TB is
1335 allocated in a physical page */
1336 if (!page_already_protected
) {
1337 tlb_protect_code(page_addr
);
1341 #endif /* TARGET_HAS_SMC */
1344 /* add a new TB and link it to the physical page tables. phys_page2 is
1345 (-1) to indicate that only one page contains the TB. */
1346 void tb_link_page(TranslationBlock
*tb
,
1347 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1350 TranslationBlock
**ptb
;
1352 /* Grab the mmap lock to stop another thread invalidating this TB
1353 before we are done. */
1355 /* add in the physical hash table */
1356 h
= tb_phys_hash_func(phys_pc
);
1357 ptb
= &tb_phys_hash
[h
];
1358 tb
->phys_hash_next
= *ptb
;
1361 /* add in the page list */
1362 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1363 if (phys_page2
!= -1)
1364 tb_alloc_page(tb
, 1, phys_page2
);
1366 tb
->page_addr
[1] = -1;
1368 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2);
1369 tb
->jmp_next
[0] = NULL
;
1370 tb
->jmp_next
[1] = NULL
;
1372 /* init original jump addresses */
1373 if (tb
->tb_next_offset
[0] != 0xffff)
1374 tb_reset_jump(tb
, 0);
1375 if (tb
->tb_next_offset
[1] != 0xffff)
1376 tb_reset_jump(tb
, 1);
1378 #ifdef DEBUG_TB_CHECK
1384 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1385 tb[1].tc_ptr. Return NULL if not found */
1386 TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1388 int m_min
, m_max
, m
;
1390 TranslationBlock
*tb
;
1394 if (tc_ptr
< (uintptr_t)code_gen_buffer
||
1395 tc_ptr
>= (uintptr_t)code_gen_ptr
) {
1398 /* binary search (cf Knuth) */
1401 while (m_min
<= m_max
) {
1402 m
= (m_min
+ m_max
) >> 1;
1404 v
= (uintptr_t)tb
->tc_ptr
;
1407 else if (tc_ptr
< v
) {
1416 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1418 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1420 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1423 tb1
= tb
->jmp_next
[n
];
1425 /* find head of list */
1427 n1
= (uintptr_t)tb1
& 3;
1428 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1431 tb1
= tb1
->jmp_next
[n1
];
1433 /* we are now sure now that tb jumps to tb1 */
1436 /* remove tb from the jmp_first list */
1437 ptb
= &tb_next
->jmp_first
;
1440 n1
= (uintptr_t)tb1
& 3;
1441 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1442 if (n1
== n
&& tb1
== tb
)
1444 ptb
= &tb1
->jmp_next
[n1
];
1446 *ptb
= tb
->jmp_next
[n
];
1447 tb
->jmp_next
[n
] = NULL
;
1449 /* suppress the jump to next tb in generated code */
1450 tb_reset_jump(tb
, n
);
1452 /* suppress jumps in the tb on which we could have jumped */
1453 tb_reset_jump_recursive(tb_next
);
1457 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1459 tb_reset_jump_recursive2(tb
, 0);
1460 tb_reset_jump_recursive2(tb
, 1);
1463 #if defined(TARGET_HAS_ICE)
1464 #if defined(CONFIG_USER_ONLY)
1465 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
1467 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1470 void tb_invalidate_phys_addr(target_phys_addr_t addr
)
1472 ram_addr_t ram_addr
;
1473 MemoryRegionSection
*section
;
1475 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1476 if (!(memory_region_is_ram(section
->mr
)
1477 || (section
->mr
->rom_device
&& section
->mr
->readable
))) {
1480 ram_addr
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1481 + memory_region_section_addr(section
, addr
);
1482 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1485 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
1487 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env
, pc
) |
1488 (pc
& ~TARGET_PAGE_MASK
));
1491 #endif /* TARGET_HAS_ICE */
1493 #if defined(CONFIG_USER_ONLY)
1494 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
1499 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1500 int flags
, CPUWatchpoint
**watchpoint
)
1505 /* Add a watchpoint. */
1506 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1507 int flags
, CPUWatchpoint
**watchpoint
)
1509 target_ulong len_mask
= ~(len
- 1);
1512 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1513 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
1514 len
== 0 || len
> TARGET_PAGE_SIZE
) {
1515 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1516 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1519 wp
= g_malloc(sizeof(*wp
));
1522 wp
->len_mask
= len_mask
;
1525 /* keep all GDB-injected watchpoints in front */
1527 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1529 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1531 tlb_flush_page(env
, addr
);
1538 /* Remove a specific watchpoint. */
1539 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1542 target_ulong len_mask
= ~(len
- 1);
1545 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1546 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1547 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1548 cpu_watchpoint_remove_by_ref(env
, wp
);
1555 /* Remove a specific watchpoint by reference. */
1556 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
1558 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1560 tlb_flush_page(env
, watchpoint
->vaddr
);
1565 /* Remove all matching watchpoints. */
1566 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
1568 CPUWatchpoint
*wp
, *next
;
1570 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1571 if (wp
->flags
& mask
)
1572 cpu_watchpoint_remove_by_ref(env
, wp
);
1577 /* Add a breakpoint. */
1578 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
1579 CPUBreakpoint
**breakpoint
)
1581 #if defined(TARGET_HAS_ICE)
1584 bp
= g_malloc(sizeof(*bp
));
1589 /* keep all GDB-injected breakpoints in front */
1591 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1593 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1595 breakpoint_invalidate(env
, pc
);
1605 /* Remove a specific breakpoint. */
1606 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
1608 #if defined(TARGET_HAS_ICE)
1611 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1612 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1613 cpu_breakpoint_remove_by_ref(env
, bp
);
1623 /* Remove a specific breakpoint by reference. */
1624 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
1626 #if defined(TARGET_HAS_ICE)
1627 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1629 breakpoint_invalidate(env
, breakpoint
->pc
);
1635 /* Remove all matching breakpoints. */
1636 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
1638 #if defined(TARGET_HAS_ICE)
1639 CPUBreakpoint
*bp
, *next
;
1641 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1642 if (bp
->flags
& mask
)
1643 cpu_breakpoint_remove_by_ref(env
, bp
);
1648 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1649 CPU loop after each instruction */
1650 void cpu_single_step(CPUArchState
*env
, int enabled
)
1652 #if defined(TARGET_HAS_ICE)
1653 if (env
->singlestep_enabled
!= enabled
) {
1654 env
->singlestep_enabled
= enabled
;
1656 kvm_update_guest_debug(env
, 0);
1658 /* must flush all the translated code to avoid inconsistencies */
1659 /* XXX: only flush what is necessary */
1666 static void cpu_unlink_tb(CPUArchState
*env
)
1668 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1669 problem and hope the cpu will stop of its own accord. For userspace
1670 emulation this often isn't actually as bad as it sounds. Often
1671 signals are used primarily to interrupt blocking syscalls. */
1672 TranslationBlock
*tb
;
1673 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1675 spin_lock(&interrupt_lock
);
1676 tb
= env
->current_tb
;
1677 /* if the cpu is currently executing code, we must unlink it and
1678 all the potentially executing TB */
1680 env
->current_tb
= NULL
;
1681 tb_reset_jump_recursive(tb
);
1683 spin_unlock(&interrupt_lock
);
1686 #ifndef CONFIG_USER_ONLY
1687 /* mask must never be zero, except for A20 change call */
1688 static void tcg_handle_interrupt(CPUArchState
*env
, int mask
)
1692 old_mask
= env
->interrupt_request
;
1693 env
->interrupt_request
|= mask
;
1696 * If called from iothread context, wake the target cpu in
1699 if (!qemu_cpu_is_self(env
)) {
1705 env
->icount_decr
.u16
.high
= 0xffff;
1707 && (mask
& ~old_mask
) != 0) {
1708 cpu_abort(env
, "Raised interrupt while not in I/O function");
1715 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1717 #else /* CONFIG_USER_ONLY */
1719 void cpu_interrupt(CPUArchState
*env
, int mask
)
1721 env
->interrupt_request
|= mask
;
1724 #endif /* CONFIG_USER_ONLY */
1726 void cpu_reset_interrupt(CPUArchState
*env
, int mask
)
1728 env
->interrupt_request
&= ~mask
;
1731 void cpu_exit(CPUArchState
*env
)
1733 env
->exit_request
= 1;
1737 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
1744 fprintf(stderr
, "qemu: fatal: ");
1745 vfprintf(stderr
, fmt
, ap
);
1746 fprintf(stderr
, "\n");
1748 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1750 cpu_dump_state(env
, stderr
, fprintf
, 0);
1752 if (qemu_log_enabled()) {
1753 qemu_log("qemu: fatal: ");
1754 qemu_log_vprintf(fmt
, ap2
);
1757 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1759 log_cpu_state(env
, 0);
1766 #if defined(CONFIG_USER_ONLY)
1768 struct sigaction act
;
1769 sigfillset(&act
.sa_mask
);
1770 act
.sa_handler
= SIG_DFL
;
1771 sigaction(SIGABRT
, &act
, NULL
);
1777 CPUArchState
*cpu_copy(CPUArchState
*env
)
1779 CPUArchState
*new_env
= cpu_init(env
->cpu_model_str
);
1780 CPUArchState
*next_cpu
= new_env
->next_cpu
;
1781 int cpu_index
= new_env
->cpu_index
;
1782 #if defined(TARGET_HAS_ICE)
1787 memcpy(new_env
, env
, sizeof(CPUArchState
));
1789 /* Preserve chaining and index. */
1790 new_env
->next_cpu
= next_cpu
;
1791 new_env
->cpu_index
= cpu_index
;
1793 /* Clone all break/watchpoints.
1794 Note: Once we support ptrace with hw-debug register access, make sure
1795 BP_CPU break/watchpoints are handled correctly on clone. */
1796 QTAILQ_INIT(&env
->breakpoints
);
1797 QTAILQ_INIT(&env
->watchpoints
);
1798 #if defined(TARGET_HAS_ICE)
1799 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1800 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1802 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1803 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1811 #if !defined(CONFIG_USER_ONLY)
1812 void tb_flush_jmp_cache(CPUArchState
*env
, target_ulong addr
)
1816 /* Discard jump cache entries for any tb which might potentially
1817 overlap the flushed page. */
1818 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1819 memset (&env
->tb_jmp_cache
[i
], 0,
1820 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1822 i
= tb_jmp_cache_hash_page(addr
);
1823 memset (&env
->tb_jmp_cache
[i
], 0,
1824 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1827 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t end
,
1832 /* we modify the TLB cache so that the dirty bit will be set again
1833 when accessing the range */
1834 start1
= (uintptr_t)qemu_safe_ram_ptr(start
);
1835 /* Check that we don't span multiple blocks - this breaks the
1836 address comparisons below. */
1837 if ((uintptr_t)qemu_safe_ram_ptr(end
- 1) - start1
1838 != (end
- 1) - start
) {
1841 cpu_tlb_reset_dirty_all(start1
, length
);
1845 /* Note: start and end must be within the same ram block. */
1846 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1851 start
&= TARGET_PAGE_MASK
;
1852 end
= TARGET_PAGE_ALIGN(end
);
1854 length
= end
- start
;
1857 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
1859 if (tcg_enabled()) {
1860 tlb_reset_dirty_range_all(start
, end
, length
);
1864 int cpu_physical_memory_set_dirty_tracking(int enable
)
1867 in_migration
= enable
;
1871 target_phys_addr_t
memory_region_section_get_iotlb(CPUArchState
*env
,
1872 MemoryRegionSection
*section
,
1874 target_phys_addr_t paddr
,
1876 target_ulong
*address
)
1878 target_phys_addr_t iotlb
;
1881 if (memory_region_is_ram(section
->mr
)) {
1883 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1884 + memory_region_section_addr(section
, paddr
);
1885 if (!section
->readonly
) {
1886 iotlb
|= phys_section_notdirty
;
1888 iotlb
|= phys_section_rom
;
1891 /* IO handlers are currently passed a physical address.
1892 It would be nice to pass an offset from the base address
1893 of that region. This would avoid having to special case RAM,
1894 and avoid full address decoding in every device.
1895 We can't use the high bits of pd for this because
1896 IO_MEM_ROMD uses these as a ram address. */
1897 iotlb
= section
- phys_sections
;
1898 iotlb
+= memory_region_section_addr(section
, paddr
);
1901 /* Make accesses to pages with watchpoints go via the
1902 watchpoint trap routines. */
1903 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1904 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
1905 /* Avoid trapping reads of pages with a write breakpoint. */
1906 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1907 iotlb
= phys_section_watch
+ paddr
;
1908 *address
|= TLB_MMIO
;
1919 * Walks guest process memory "regions" one by one
1920 * and calls callback function 'fn' for each region.
1923 struct walk_memory_regions_data
1925 walk_memory_regions_fn fn
;
1931 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1932 abi_ulong end
, int new_prot
)
1934 if (data
->start
!= -1ul) {
1935 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1941 data
->start
= (new_prot
? end
: -1ul);
1942 data
->prot
= new_prot
;
1947 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1948 abi_ulong base
, int level
, void **lp
)
1954 return walk_memory_regions_end(data
, base
, 0);
1959 for (i
= 0; i
< L2_SIZE
; ++i
) {
1960 int prot
= pd
[i
].flags
;
1962 pa
= base
| (i
<< TARGET_PAGE_BITS
);
1963 if (prot
!= data
->prot
) {
1964 rc
= walk_memory_regions_end(data
, pa
, prot
);
1972 for (i
= 0; i
< L2_SIZE
; ++i
) {
1973 pa
= base
| ((abi_ulong
)i
<<
1974 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
1975 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
1985 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
1987 struct walk_memory_regions_data data
;
1995 for (i
= 0; i
< V_L1_SIZE
; i
++) {
1996 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
1997 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2003 return walk_memory_regions_end(&data
, 0, 0);
2006 static int dump_region(void *priv
, abi_ulong start
,
2007 abi_ulong end
, unsigned long prot
)
2009 FILE *f
= (FILE *)priv
;
2011 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2012 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2013 start
, end
, end
- start
,
2014 ((prot
& PAGE_READ
) ? 'r' : '-'),
2015 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2016 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2021 /* dump memory mappings */
2022 void page_dump(FILE *f
)
2024 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2025 "start", "end", "size", "prot");
2026 walk_memory_regions(f
, dump_region
);
2029 int page_get_flags(target_ulong address
)
2033 p
= page_find(address
>> TARGET_PAGE_BITS
);
2039 /* Modify the flags of a page and invalidate the code if necessary.
2040 The flag PAGE_WRITE_ORG is positioned automatically depending
2041 on PAGE_WRITE. The mmap_lock should already be held. */
2042 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2044 target_ulong addr
, len
;
2046 /* This function should never be called with addresses outside the
2047 guest address space. If this assert fires, it probably indicates
2048 a missing call to h2g_valid. */
2049 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2050 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2052 assert(start
< end
);
2054 start
= start
& TARGET_PAGE_MASK
;
2055 end
= TARGET_PAGE_ALIGN(end
);
2057 if (flags
& PAGE_WRITE
) {
2058 flags
|= PAGE_WRITE_ORG
;
2061 for (addr
= start
, len
= end
- start
;
2063 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2064 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2066 /* If the write protection bit is set, then we invalidate
2068 if (!(p
->flags
& PAGE_WRITE
) &&
2069 (flags
& PAGE_WRITE
) &&
2071 tb_invalidate_phys_page(addr
, 0, NULL
);
2077 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2083 /* This function should never be called with addresses outside the
2084 guest address space. If this assert fires, it probably indicates
2085 a missing call to h2g_valid. */
2086 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2087 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2093 if (start
+ len
- 1 < start
) {
2094 /* We've wrapped around. */
2098 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2099 start
= start
& TARGET_PAGE_MASK
;
2101 for (addr
= start
, len
= end
- start
;
2103 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2104 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2107 if( !(p
->flags
& PAGE_VALID
) )
2110 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2112 if (flags
& PAGE_WRITE
) {
2113 if (!(p
->flags
& PAGE_WRITE_ORG
))
2115 /* unprotect the page if it was put read-only because it
2116 contains translated code */
2117 if (!(p
->flags
& PAGE_WRITE
)) {
2118 if (!page_unprotect(addr
, 0, NULL
))
2127 /* called from signal handler: invalidate the code and unprotect the
2128 page. Return TRUE if the fault was successfully handled. */
2129 int page_unprotect(target_ulong address
, uintptr_t pc
, void *puc
)
2133 target_ulong host_start
, host_end
, addr
;
2135 /* Technically this isn't safe inside a signal handler. However we
2136 know this only ever happens in a synchronous SEGV handler, so in
2137 practice it seems to be ok. */
2140 p
= page_find(address
>> TARGET_PAGE_BITS
);
2146 /* if the page was really writable, then we change its
2147 protection back to writable */
2148 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2149 host_start
= address
& qemu_host_page_mask
;
2150 host_end
= host_start
+ qemu_host_page_size
;
2153 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2154 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2155 p
->flags
|= PAGE_WRITE
;
2158 /* and since the content will be modified, we must invalidate
2159 the corresponding translated code. */
2160 tb_invalidate_phys_page(addr
, pc
, puc
);
2161 #ifdef DEBUG_TB_CHECK
2162 tb_invalidate_check(addr
);
2165 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2174 #endif /* defined(CONFIG_USER_ONLY) */
2176 #if !defined(CONFIG_USER_ONLY)
2178 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2179 typedef struct subpage_t
{
2181 target_phys_addr_t base
;
2182 uint16_t sub_section
[TARGET_PAGE_SIZE
];
2185 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2187 static subpage_t
*subpage_init(target_phys_addr_t base
);
2188 static void destroy_page_desc(uint16_t section_index
)
2190 MemoryRegionSection
*section
= &phys_sections
[section_index
];
2191 MemoryRegion
*mr
= section
->mr
;
2194 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
2195 memory_region_destroy(&subpage
->iomem
);
2200 static void destroy_l2_mapping(PhysPageEntry
*lp
, unsigned level
)
2205 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
2209 p
= phys_map_nodes
[lp
->ptr
];
2210 for (i
= 0; i
< L2_SIZE
; ++i
) {
2211 if (!p
[i
].is_leaf
) {
2212 destroy_l2_mapping(&p
[i
], level
- 1);
2214 destroy_page_desc(p
[i
].ptr
);
2218 lp
->ptr
= PHYS_MAP_NODE_NIL
;
2221 static void destroy_all_mappings(void)
2223 destroy_l2_mapping(&phys_map
, P_L2_LEVELS
- 1);
2224 phys_map_nodes_reset();
2227 static uint16_t phys_section_add(MemoryRegionSection
*section
)
2229 if (phys_sections_nb
== phys_sections_nb_alloc
) {
2230 phys_sections_nb_alloc
= MAX(phys_sections_nb_alloc
* 2, 16);
2231 phys_sections
= g_renew(MemoryRegionSection
, phys_sections
,
2232 phys_sections_nb_alloc
);
2234 phys_sections
[phys_sections_nb
] = *section
;
2235 return phys_sections_nb
++;
2238 static void phys_sections_clear(void)
2240 phys_sections_nb
= 0;
2243 /* register physical memory.
2244 For RAM, 'size' must be a multiple of the target page size.
2245 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2246 io memory page. The address used when calling the IO function is
2247 the offset from the start of the region, plus region_offset. Both
2248 start_addr and region_offset are rounded down to a page boundary
2249 before calculating this offset. This should not be a problem unless
2250 the low bits of start_addr and region_offset differ. */
2251 static void register_subpage(MemoryRegionSection
*section
)
2254 target_phys_addr_t base
= section
->offset_within_address_space
2256 MemoryRegionSection
*existing
= phys_page_find(base
>> TARGET_PAGE_BITS
);
2257 MemoryRegionSection subsection
= {
2258 .offset_within_address_space
= base
,
2259 .size
= TARGET_PAGE_SIZE
,
2261 target_phys_addr_t start
, end
;
2263 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
2265 if (!(existing
->mr
->subpage
)) {
2266 subpage
= subpage_init(base
);
2267 subsection
.mr
= &subpage
->iomem
;
2268 phys_page_set(base
>> TARGET_PAGE_BITS
, 1,
2269 phys_section_add(&subsection
));
2271 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
2273 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
2274 end
= start
+ section
->size
;
2275 subpage_register(subpage
, start
, end
, phys_section_add(section
));
2279 static void register_multipage(MemoryRegionSection
*section
)
2281 target_phys_addr_t start_addr
= section
->offset_within_address_space
;
2282 ram_addr_t size
= section
->size
;
2283 target_phys_addr_t addr
;
2284 uint16_t section_index
= phys_section_add(section
);
2289 phys_page_set(addr
>> TARGET_PAGE_BITS
, size
>> TARGET_PAGE_BITS
,
2293 void cpu_register_physical_memory_log(MemoryRegionSection
*section
,
2296 MemoryRegionSection now
= *section
, remain
= *section
;
2298 if ((now
.offset_within_address_space
& ~TARGET_PAGE_MASK
)
2299 || (now
.size
< TARGET_PAGE_SIZE
)) {
2300 now
.size
= MIN(TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
2301 - now
.offset_within_address_space
,
2303 register_subpage(&now
);
2304 remain
.size
-= now
.size
;
2305 remain
.offset_within_address_space
+= now
.size
;
2306 remain
.offset_within_region
+= now
.size
;
2309 now
.size
&= TARGET_PAGE_MASK
;
2311 register_multipage(&now
);
2312 remain
.size
-= now
.size
;
2313 remain
.offset_within_address_space
+= now
.size
;
2314 remain
.offset_within_region
+= now
.size
;
2318 register_subpage(&now
);
2323 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2326 kvm_coalesce_mmio_region(addr
, size
);
2329 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2332 kvm_uncoalesce_mmio_region(addr
, size
);
2335 void qemu_flush_coalesced_mmio_buffer(void)
2338 kvm_flush_coalesced_mmio_buffer();
2341 #if defined(__linux__) && !defined(TARGET_S390X)
2343 #include <sys/vfs.h>
2345 #define HUGETLBFS_MAGIC 0x958458f6
2347 static long gethugepagesize(const char *path
)
2353 ret
= statfs(path
, &fs
);
2354 } while (ret
!= 0 && errno
== EINTR
);
2361 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2362 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2367 static void *file_ram_alloc(RAMBlock
*block
,
2377 unsigned long hpagesize
;
2379 hpagesize
= gethugepagesize(path
);
2384 if (memory
< hpagesize
) {
2388 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2389 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2393 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2397 fd
= mkstemp(filename
);
2399 perror("unable to create backing store for hugepages");
2406 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2409 * ftruncate is not supported by hugetlbfs in older
2410 * hosts, so don't bother bailing out on errors.
2411 * If anything goes wrong with it under other filesystems,
2414 if (ftruncate(fd
, memory
))
2415 perror("ftruncate");
2418 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2419 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2420 * to sidestep this quirk.
2422 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2423 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2425 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2427 if (area
== MAP_FAILED
) {
2428 perror("file_ram_alloc: can't mmap RAM pages");
2437 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2439 RAMBlock
*block
, *next_block
;
2440 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
2442 if (QLIST_EMPTY(&ram_list
.blocks
))
2445 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2446 ram_addr_t end
, next
= RAM_ADDR_MAX
;
2448 end
= block
->offset
+ block
->length
;
2450 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2451 if (next_block
->offset
>= end
) {
2452 next
= MIN(next
, next_block
->offset
);
2455 if (next
- end
>= size
&& next
- end
< mingap
) {
2457 mingap
= next
- end
;
2461 if (offset
== RAM_ADDR_MAX
) {
2462 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
2470 static ram_addr_t
last_ram_offset(void)
2473 ram_addr_t last
= 0;
2475 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2476 last
= MAX(last
, block
->offset
+ block
->length
);
2481 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
2483 RAMBlock
*new_block
, *block
;
2486 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2487 if (block
->offset
== addr
) {
2493 assert(!new_block
->idstr
[0]);
2496 char *id
= qdev_get_dev_path(dev
);
2498 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2502 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2504 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2505 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
2506 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2513 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
2516 RAMBlock
*new_block
;
2518 size
= TARGET_PAGE_ALIGN(size
);
2519 new_block
= g_malloc0(sizeof(*new_block
));
2522 new_block
->offset
= find_ram_offset(size
);
2524 new_block
->host
= host
;
2525 new_block
->flags
|= RAM_PREALLOC_MASK
;
2528 #if defined (__linux__) && !defined(TARGET_S390X)
2529 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2530 if (!new_block
->host
) {
2531 new_block
->host
= qemu_vmalloc(size
);
2532 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2535 fprintf(stderr
, "-mem-path option unsupported\n");
2539 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2540 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2541 an system defined value, which is at least 256GB. Larger systems
2542 have larger values. We put the guest between the end of data
2543 segment (system break) and this value. We use 32GB as a base to
2544 have enough room for the system break to grow. */
2545 new_block
->host
= mmap((void*)0x800000000, size
,
2546 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2547 MAP_SHARED
| MAP_ANONYMOUS
| MAP_FIXED
, -1, 0);
2548 if (new_block
->host
== MAP_FAILED
) {
2549 fprintf(stderr
, "Allocating RAM failed\n");
2553 if (xen_enabled()) {
2554 xen_ram_alloc(new_block
->offset
, size
, mr
);
2556 new_block
->host
= qemu_vmalloc(size
);
2559 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2562 new_block
->length
= size
;
2564 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2566 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
2567 last_ram_offset() >> TARGET_PAGE_BITS
);
2568 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2569 0xff, size
>> TARGET_PAGE_BITS
);
2572 kvm_setup_guest_memory(new_block
->host
, size
);
2574 return new_block
->offset
;
2577 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
2579 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
2582 void qemu_ram_free_from_ptr(ram_addr_t addr
)
2586 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2587 if (addr
== block
->offset
) {
2588 QLIST_REMOVE(block
, next
);
2595 void qemu_ram_free(ram_addr_t addr
)
2599 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2600 if (addr
== block
->offset
) {
2601 QLIST_REMOVE(block
, next
);
2602 if (block
->flags
& RAM_PREALLOC_MASK
) {
2604 } else if (mem_path
) {
2605 #if defined (__linux__) && !defined(TARGET_S390X)
2607 munmap(block
->host
, block
->length
);
2610 qemu_vfree(block
->host
);
2616 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2617 munmap(block
->host
, block
->length
);
2619 if (xen_enabled()) {
2620 xen_invalidate_map_cache_entry(block
->host
);
2622 qemu_vfree(block
->host
);
2634 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2641 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2642 offset
= addr
- block
->offset
;
2643 if (offset
< block
->length
) {
2644 vaddr
= block
->host
+ offset
;
2645 if (block
->flags
& RAM_PREALLOC_MASK
) {
2649 munmap(vaddr
, length
);
2651 #if defined(__linux__) && !defined(TARGET_S390X)
2654 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
2657 flags
|= MAP_PRIVATE
;
2659 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2660 flags
, block
->fd
, offset
);
2662 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2663 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2670 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2671 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
2672 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2675 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2676 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2680 if (area
!= vaddr
) {
2681 fprintf(stderr
, "Could not remap addr: "
2682 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
2686 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
2692 #endif /* !_WIN32 */
2694 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2695 With the exception of the softmmu code in this file, this should
2696 only be used for local memory (e.g. video ram) that the device owns,
2697 and knows it isn't going to access beyond the end of the block.
2699 It should not be used for general purpose DMA.
2700 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2702 void *qemu_get_ram_ptr(ram_addr_t addr
)
2706 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2707 if (addr
- block
->offset
< block
->length
) {
2708 /* Move this entry to to start of the list. */
2709 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
2710 QLIST_REMOVE(block
, next
);
2711 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
2713 if (xen_enabled()) {
2714 /* We need to check if the requested address is in the RAM
2715 * because we don't want to map the entire memory in QEMU.
2716 * In that case just map until the end of the page.
2718 if (block
->offset
== 0) {
2719 return xen_map_cache(addr
, 0, 0);
2720 } else if (block
->host
== NULL
) {
2722 xen_map_cache(block
->offset
, block
->length
, 1);
2725 return block
->host
+ (addr
- block
->offset
);
2729 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2735 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2736 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2738 void *qemu_safe_ram_ptr(ram_addr_t addr
)
2742 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2743 if (addr
- block
->offset
< block
->length
) {
2744 if (xen_enabled()) {
2745 /* We need to check if the requested address is in the RAM
2746 * because we don't want to map the entire memory in QEMU.
2747 * In that case just map until the end of the page.
2749 if (block
->offset
== 0) {
2750 return xen_map_cache(addr
, 0, 0);
2751 } else if (block
->host
== NULL
) {
2753 xen_map_cache(block
->offset
, block
->length
, 1);
2756 return block
->host
+ (addr
- block
->offset
);
2760 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2766 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2767 * but takes a size argument */
2768 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
2773 if (xen_enabled()) {
2774 return xen_map_cache(addr
, *size
, 1);
2778 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2779 if (addr
- block
->offset
< block
->length
) {
2780 if (addr
- block
->offset
+ *size
> block
->length
)
2781 *size
= block
->length
- addr
+ block
->offset
;
2782 return block
->host
+ (addr
- block
->offset
);
2786 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2791 void qemu_put_ram_ptr(void *addr
)
2793 trace_qemu_put_ram_ptr(addr
);
2796 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
2799 uint8_t *host
= ptr
;
2801 if (xen_enabled()) {
2802 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
2806 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2807 /* This case append when the block is not mapped. */
2808 if (block
->host
== NULL
) {
2811 if (host
- block
->host
< block
->length
) {
2812 *ram_addr
= block
->offset
+ (host
- block
->host
);
2820 /* Some of the softmmu routines need to translate from a host pointer
2821 (typically a TLB entry) back to a ram offset. */
2822 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
2824 ram_addr_t ram_addr
;
2826 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
2827 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2833 static uint64_t unassigned_mem_read(void *opaque
, target_phys_addr_t addr
,
2836 #ifdef DEBUG_UNASSIGNED
2837 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2839 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2840 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, size
);
2845 static void unassigned_mem_write(void *opaque
, target_phys_addr_t addr
,
2846 uint64_t val
, unsigned size
)
2848 #ifdef DEBUG_UNASSIGNED
2849 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
2851 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2852 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, size
);
2856 static const MemoryRegionOps unassigned_mem_ops
= {
2857 .read
= unassigned_mem_read
,
2858 .write
= unassigned_mem_write
,
2859 .endianness
= DEVICE_NATIVE_ENDIAN
,
2862 static uint64_t error_mem_read(void *opaque
, target_phys_addr_t addr
,
2868 static void error_mem_write(void *opaque
, target_phys_addr_t addr
,
2869 uint64_t value
, unsigned size
)
2874 static const MemoryRegionOps error_mem_ops
= {
2875 .read
= error_mem_read
,
2876 .write
= error_mem_write
,
2877 .endianness
= DEVICE_NATIVE_ENDIAN
,
2880 static const MemoryRegionOps rom_mem_ops
= {
2881 .read
= error_mem_read
,
2882 .write
= unassigned_mem_write
,
2883 .endianness
= DEVICE_NATIVE_ENDIAN
,
2886 static void notdirty_mem_write(void *opaque
, target_phys_addr_t ram_addr
,
2887 uint64_t val
, unsigned size
)
2890 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
2891 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2892 #if !defined(CONFIG_USER_ONLY)
2893 tb_invalidate_phys_page_fast(ram_addr
, size
);
2894 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
2899 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2902 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2905 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2910 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2911 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
2912 /* we remove the notdirty callback only if the code has been
2914 if (dirty_flags
== 0xff)
2915 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2918 static const MemoryRegionOps notdirty_mem_ops
= {
2919 .read
= error_mem_read
,
2920 .write
= notdirty_mem_write
,
2921 .endianness
= DEVICE_NATIVE_ENDIAN
,
2924 /* Generate a debug exception if a watchpoint has been hit. */
2925 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2927 CPUArchState
*env
= cpu_single_env
;
2928 target_ulong pc
, cs_base
;
2929 TranslationBlock
*tb
;
2934 if (env
->watchpoint_hit
) {
2935 /* We re-entered the check after replacing the TB. Now raise
2936 * the debug interrupt so that is will trigger after the
2937 * current instruction. */
2938 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2941 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2942 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2943 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2944 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2945 wp
->flags
|= BP_WATCHPOINT_HIT
;
2946 if (!env
->watchpoint_hit
) {
2947 env
->watchpoint_hit
= wp
;
2948 tb
= tb_find_pc(env
->mem_io_pc
);
2950 cpu_abort(env
, "check_watchpoint: could not find TB for "
2951 "pc=%p", (void *)env
->mem_io_pc
);
2953 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
2954 tb_phys_invalidate(tb
, -1);
2955 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2956 env
->exception_index
= EXCP_DEBUG
;
2959 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2960 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2961 cpu_resume_from_signal(env
, NULL
);
2965 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2970 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2971 so these check for a hit then pass through to the normal out-of-line
2973 static uint64_t watch_mem_read(void *opaque
, target_phys_addr_t addr
,
2976 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
2978 case 1: return ldub_phys(addr
);
2979 case 2: return lduw_phys(addr
);
2980 case 4: return ldl_phys(addr
);
2985 static void watch_mem_write(void *opaque
, target_phys_addr_t addr
,
2986 uint64_t val
, unsigned size
)
2988 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
2991 stb_phys(addr
, val
);
2994 stw_phys(addr
, val
);
2997 stl_phys(addr
, val
);
3003 static const MemoryRegionOps watch_mem_ops
= {
3004 .read
= watch_mem_read
,
3005 .write
= watch_mem_write
,
3006 .endianness
= DEVICE_NATIVE_ENDIAN
,
3009 static uint64_t subpage_read(void *opaque
, target_phys_addr_t addr
,
3012 subpage_t
*mmio
= opaque
;
3013 unsigned int idx
= SUBPAGE_IDX(addr
);
3014 MemoryRegionSection
*section
;
3015 #if defined(DEBUG_SUBPAGE)
3016 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3017 mmio
, len
, addr
, idx
);
3020 section
= &phys_sections
[mmio
->sub_section
[idx
]];
3022 addr
-= section
->offset_within_address_space
;
3023 addr
+= section
->offset_within_region
;
3024 return io_mem_read(section
->mr
, addr
, len
);
3027 static void subpage_write(void *opaque
, target_phys_addr_t addr
,
3028 uint64_t value
, unsigned len
)
3030 subpage_t
*mmio
= opaque
;
3031 unsigned int idx
= SUBPAGE_IDX(addr
);
3032 MemoryRegionSection
*section
;
3033 #if defined(DEBUG_SUBPAGE)
3034 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3035 " idx %d value %"PRIx64
"\n",
3036 __func__
, mmio
, len
, addr
, idx
, value
);
3039 section
= &phys_sections
[mmio
->sub_section
[idx
]];
3041 addr
-= section
->offset_within_address_space
;
3042 addr
+= section
->offset_within_region
;
3043 io_mem_write(section
->mr
, addr
, value
, len
);
3046 static const MemoryRegionOps subpage_ops
= {
3047 .read
= subpage_read
,
3048 .write
= subpage_write
,
3049 .endianness
= DEVICE_NATIVE_ENDIAN
,
3052 static uint64_t subpage_ram_read(void *opaque
, target_phys_addr_t addr
,
3055 ram_addr_t raddr
= addr
;
3056 void *ptr
= qemu_get_ram_ptr(raddr
);
3058 case 1: return ldub_p(ptr
);
3059 case 2: return lduw_p(ptr
);
3060 case 4: return ldl_p(ptr
);
3065 static void subpage_ram_write(void *opaque
, target_phys_addr_t addr
,
3066 uint64_t value
, unsigned size
)
3068 ram_addr_t raddr
= addr
;
3069 void *ptr
= qemu_get_ram_ptr(raddr
);
3071 case 1: return stb_p(ptr
, value
);
3072 case 2: return stw_p(ptr
, value
);
3073 case 4: return stl_p(ptr
, value
);
3078 static const MemoryRegionOps subpage_ram_ops
= {
3079 .read
= subpage_ram_read
,
3080 .write
= subpage_ram_write
,
3081 .endianness
= DEVICE_NATIVE_ENDIAN
,
3084 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3089 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3091 idx
= SUBPAGE_IDX(start
);
3092 eidx
= SUBPAGE_IDX(end
);
3093 #if defined(DEBUG_SUBPAGE)
3094 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3095 mmio
, start
, end
, idx
, eidx
, memory
);
3097 if (memory_region_is_ram(phys_sections
[section
].mr
)) {
3098 MemoryRegionSection new_section
= phys_sections
[section
];
3099 new_section
.mr
= &io_mem_subpage_ram
;
3100 section
= phys_section_add(&new_section
);
3102 for (; idx
<= eidx
; idx
++) {
3103 mmio
->sub_section
[idx
] = section
;
3109 static subpage_t
*subpage_init(target_phys_addr_t base
)
3113 mmio
= g_malloc0(sizeof(subpage_t
));
3116 memory_region_init_io(&mmio
->iomem
, &subpage_ops
, mmio
,
3117 "subpage", TARGET_PAGE_SIZE
);
3118 mmio
->iomem
.subpage
= true;
3119 #if defined(DEBUG_SUBPAGE)
3120 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3121 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3123 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, phys_section_unassigned
);
3128 static uint16_t dummy_section(MemoryRegion
*mr
)
3130 MemoryRegionSection section
= {
3132 .offset_within_address_space
= 0,
3133 .offset_within_region
= 0,
3137 return phys_section_add(§ion
);
3140 MemoryRegion
*iotlb_to_region(target_phys_addr_t index
)
3142 return phys_sections
[index
& ~TARGET_PAGE_MASK
].mr
;
3145 static void io_mem_init(void)
3147 memory_region_init_io(&io_mem_ram
, &error_mem_ops
, NULL
, "ram", UINT64_MAX
);
3148 memory_region_init_io(&io_mem_rom
, &rom_mem_ops
, NULL
, "rom", UINT64_MAX
);
3149 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
3150 "unassigned", UINT64_MAX
);
3151 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
3152 "notdirty", UINT64_MAX
);
3153 memory_region_init_io(&io_mem_subpage_ram
, &subpage_ram_ops
, NULL
,
3154 "subpage-ram", UINT64_MAX
);
3155 memory_region_init_io(&io_mem_watch
, &watch_mem_ops
, NULL
,
3156 "watch", UINT64_MAX
);
3159 static void core_begin(MemoryListener
*listener
)
3161 destroy_all_mappings();
3162 phys_sections_clear();
3163 phys_map
.ptr
= PHYS_MAP_NODE_NIL
;
3164 phys_section_unassigned
= dummy_section(&io_mem_unassigned
);
3165 phys_section_notdirty
= dummy_section(&io_mem_notdirty
);
3166 phys_section_rom
= dummy_section(&io_mem_rom
);
3167 phys_section_watch
= dummy_section(&io_mem_watch
);
3170 static void core_commit(MemoryListener
*listener
)
3174 /* since each CPU stores ram addresses in its TLB cache, we must
3175 reset the modified entries */
3177 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
3182 static void core_region_add(MemoryListener
*listener
,
3183 MemoryRegionSection
*section
)
3185 cpu_register_physical_memory_log(section
, section
->readonly
);
3188 static void core_region_del(MemoryListener
*listener
,
3189 MemoryRegionSection
*section
)
3193 static void core_region_nop(MemoryListener
*listener
,
3194 MemoryRegionSection
*section
)
3196 cpu_register_physical_memory_log(section
, section
->readonly
);
3199 static void core_log_start(MemoryListener
*listener
,
3200 MemoryRegionSection
*section
)
3204 static void core_log_stop(MemoryListener
*listener
,
3205 MemoryRegionSection
*section
)
3209 static void core_log_sync(MemoryListener
*listener
,
3210 MemoryRegionSection
*section
)
3214 static void core_log_global_start(MemoryListener
*listener
)
3216 cpu_physical_memory_set_dirty_tracking(1);
3219 static void core_log_global_stop(MemoryListener
*listener
)
3221 cpu_physical_memory_set_dirty_tracking(0);
3224 static void core_eventfd_add(MemoryListener
*listener
,
3225 MemoryRegionSection
*section
,
3226 bool match_data
, uint64_t data
, int fd
)
3230 static void core_eventfd_del(MemoryListener
*listener
,
3231 MemoryRegionSection
*section
,
3232 bool match_data
, uint64_t data
, int fd
)
3236 static void io_begin(MemoryListener
*listener
)
3240 static void io_commit(MemoryListener
*listener
)
3244 static void io_region_add(MemoryListener
*listener
,
3245 MemoryRegionSection
*section
)
3247 MemoryRegionIORange
*mrio
= g_new(MemoryRegionIORange
, 1);
3249 mrio
->mr
= section
->mr
;
3250 mrio
->offset
= section
->offset_within_region
;
3251 iorange_init(&mrio
->iorange
, &memory_region_iorange_ops
,
3252 section
->offset_within_address_space
, section
->size
);
3253 ioport_register(&mrio
->iorange
);
3256 static void io_region_del(MemoryListener
*listener
,
3257 MemoryRegionSection
*section
)
3259 isa_unassign_ioport(section
->offset_within_address_space
, section
->size
);
3262 static void io_region_nop(MemoryListener
*listener
,
3263 MemoryRegionSection
*section
)
3267 static void io_log_start(MemoryListener
*listener
,
3268 MemoryRegionSection
*section
)
3272 static void io_log_stop(MemoryListener
*listener
,
3273 MemoryRegionSection
*section
)
3277 static void io_log_sync(MemoryListener
*listener
,
3278 MemoryRegionSection
*section
)
3282 static void io_log_global_start(MemoryListener
*listener
)
3286 static void io_log_global_stop(MemoryListener
*listener
)
3290 static void io_eventfd_add(MemoryListener
*listener
,
3291 MemoryRegionSection
*section
,
3292 bool match_data
, uint64_t data
, int fd
)
3296 static void io_eventfd_del(MemoryListener
*listener
,
3297 MemoryRegionSection
*section
,
3298 bool match_data
, uint64_t data
, int fd
)
3302 static MemoryListener core_memory_listener
= {
3303 .begin
= core_begin
,
3304 .commit
= core_commit
,
3305 .region_add
= core_region_add
,
3306 .region_del
= core_region_del
,
3307 .region_nop
= core_region_nop
,
3308 .log_start
= core_log_start
,
3309 .log_stop
= core_log_stop
,
3310 .log_sync
= core_log_sync
,
3311 .log_global_start
= core_log_global_start
,
3312 .log_global_stop
= core_log_global_stop
,
3313 .eventfd_add
= core_eventfd_add
,
3314 .eventfd_del
= core_eventfd_del
,
3318 static MemoryListener io_memory_listener
= {
3320 .commit
= io_commit
,
3321 .region_add
= io_region_add
,
3322 .region_del
= io_region_del
,
3323 .region_nop
= io_region_nop
,
3324 .log_start
= io_log_start
,
3325 .log_stop
= io_log_stop
,
3326 .log_sync
= io_log_sync
,
3327 .log_global_start
= io_log_global_start
,
3328 .log_global_stop
= io_log_global_stop
,
3329 .eventfd_add
= io_eventfd_add
,
3330 .eventfd_del
= io_eventfd_del
,
3334 static void memory_map_init(void)
3336 system_memory
= g_malloc(sizeof(*system_memory
));
3337 memory_region_init(system_memory
, "system", INT64_MAX
);
3338 set_system_memory_map(system_memory
);
3340 system_io
= g_malloc(sizeof(*system_io
));
3341 memory_region_init(system_io
, "io", 65536);
3342 set_system_io_map(system_io
);
3344 memory_listener_register(&core_memory_listener
, system_memory
);
3345 memory_listener_register(&io_memory_listener
, system_io
);
3348 MemoryRegion
*get_system_memory(void)
3350 return system_memory
;
3353 MemoryRegion
*get_system_io(void)
3358 #endif /* !defined(CONFIG_USER_ONLY) */
3360 /* physical memory access (slow version, mainly for debug) */
3361 #if defined(CONFIG_USER_ONLY)
3362 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
3363 uint8_t *buf
, int len
, int is_write
)
3370 page
= addr
& TARGET_PAGE_MASK
;
3371 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3374 flags
= page_get_flags(page
);
3375 if (!(flags
& PAGE_VALID
))
3378 if (!(flags
& PAGE_WRITE
))
3380 /* XXX: this code should not depend on lock_user */
3381 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3384 unlock_user(p
, addr
, l
);
3386 if (!(flags
& PAGE_READ
))
3388 /* XXX: this code should not depend on lock_user */
3389 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3392 unlock_user(p
, addr
, 0);
3402 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3403 int len
, int is_write
)
3408 target_phys_addr_t page
;
3409 MemoryRegionSection
*section
;
3412 page
= addr
& TARGET_PAGE_MASK
;
3413 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3416 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3419 if (!memory_region_is_ram(section
->mr
)) {
3420 target_phys_addr_t addr1
;
3421 addr1
= memory_region_section_addr(section
, addr
);
3422 /* XXX: could force cpu_single_env to NULL to avoid
3424 if (l
>= 4 && ((addr1
& 3) == 0)) {
3425 /* 32 bit write access */
3427 io_mem_write(section
->mr
, addr1
, val
, 4);
3429 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3430 /* 16 bit write access */
3432 io_mem_write(section
->mr
, addr1
, val
, 2);
3435 /* 8 bit write access */
3437 io_mem_write(section
->mr
, addr1
, val
, 1);
3440 } else if (!section
->readonly
) {
3442 addr1
= memory_region_get_ram_addr(section
->mr
)
3443 + memory_region_section_addr(section
, addr
);
3445 ptr
= qemu_get_ram_ptr(addr1
);
3446 memcpy(ptr
, buf
, l
);
3447 if (!cpu_physical_memory_is_dirty(addr1
)) {
3448 /* invalidate code */
3449 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3451 cpu_physical_memory_set_dirty_flags(
3452 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3454 qemu_put_ram_ptr(ptr
);
3457 if (!(memory_region_is_ram(section
->mr
) ||
3458 memory_region_is_romd(section
->mr
))) {
3459 target_phys_addr_t addr1
;
3461 addr1
= memory_region_section_addr(section
, addr
);
3462 if (l
>= 4 && ((addr1
& 3) == 0)) {
3463 /* 32 bit read access */
3464 val
= io_mem_read(section
->mr
, addr1
, 4);
3467 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3468 /* 16 bit read access */
3469 val
= io_mem_read(section
->mr
, addr1
, 2);
3473 /* 8 bit read access */
3474 val
= io_mem_read(section
->mr
, addr1
, 1);
3480 ptr
= qemu_get_ram_ptr(section
->mr
->ram_addr
3481 + memory_region_section_addr(section
,
3483 memcpy(buf
, ptr
, l
);
3484 qemu_put_ram_ptr(ptr
);
3493 /* used for ROM loading : can write in RAM and ROM */
3494 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3495 const uint8_t *buf
, int len
)
3499 target_phys_addr_t page
;
3500 MemoryRegionSection
*section
;
3503 page
= addr
& TARGET_PAGE_MASK
;
3504 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3507 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3509 if (!(memory_region_is_ram(section
->mr
) ||
3510 memory_region_is_romd(section
->mr
))) {
3513 unsigned long addr1
;
3514 addr1
= memory_region_get_ram_addr(section
->mr
)
3515 + memory_region_section_addr(section
, addr
);
3517 ptr
= qemu_get_ram_ptr(addr1
);
3518 memcpy(ptr
, buf
, l
);
3519 qemu_put_ram_ptr(ptr
);
3529 target_phys_addr_t addr
;
3530 target_phys_addr_t len
;
3533 static BounceBuffer bounce
;
3535 typedef struct MapClient
{
3537 void (*callback
)(void *opaque
);
3538 QLIST_ENTRY(MapClient
) link
;
3541 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3542 = QLIST_HEAD_INITIALIZER(map_client_list
);
3544 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3546 MapClient
*client
= g_malloc(sizeof(*client
));
3548 client
->opaque
= opaque
;
3549 client
->callback
= callback
;
3550 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3554 void cpu_unregister_map_client(void *_client
)
3556 MapClient
*client
= (MapClient
*)_client
;
3558 QLIST_REMOVE(client
, link
);
3562 static void cpu_notify_map_clients(void)
3566 while (!QLIST_EMPTY(&map_client_list
)) {
3567 client
= QLIST_FIRST(&map_client_list
);
3568 client
->callback(client
->opaque
);
3569 cpu_unregister_map_client(client
);
3573 /* Map a physical memory region into a host virtual address.
3574 * May map a subset of the requested range, given by and returned in *plen.
3575 * May return NULL if resources needed to perform the mapping are exhausted.
3576 * Use only for reads OR writes - not for read-modify-write operations.
3577 * Use cpu_register_map_client() to know when retrying the map operation is
3578 * likely to succeed.
3580 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3581 target_phys_addr_t
*plen
,
3584 target_phys_addr_t len
= *plen
;
3585 target_phys_addr_t todo
= 0;
3587 target_phys_addr_t page
;
3588 MemoryRegionSection
*section
;
3589 ram_addr_t raddr
= RAM_ADDR_MAX
;
3594 page
= addr
& TARGET_PAGE_MASK
;
3595 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3598 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3600 if (!(memory_region_is_ram(section
->mr
) && !section
->readonly
)) {
3601 if (todo
|| bounce
.buffer
) {
3604 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3608 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
3612 return bounce
.buffer
;
3615 raddr
= memory_region_get_ram_addr(section
->mr
)
3616 + memory_region_section_addr(section
, addr
);
3624 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
3629 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3630 * Will also mark the memory as dirty if is_write == 1. access_len gives
3631 * the amount of memory that was actually read or written by the caller.
3633 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3634 int is_write
, target_phys_addr_t access_len
)
3636 if (buffer
!= bounce
.buffer
) {
3638 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
3639 while (access_len
) {
3641 l
= TARGET_PAGE_SIZE
;
3644 if (!cpu_physical_memory_is_dirty(addr1
)) {
3645 /* invalidate code */
3646 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3648 cpu_physical_memory_set_dirty_flags(
3649 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3655 if (xen_enabled()) {
3656 xen_invalidate_map_cache_entry(buffer
);
3661 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3663 qemu_vfree(bounce
.buffer
);
3664 bounce
.buffer
= NULL
;
3665 cpu_notify_map_clients();
3668 /* warning: addr must be aligned */
3669 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr
,
3670 enum device_endian endian
)
3674 MemoryRegionSection
*section
;
3676 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3678 if (!(memory_region_is_ram(section
->mr
) ||
3679 memory_region_is_romd(section
->mr
))) {
3681 addr
= memory_region_section_addr(section
, addr
);
3682 val
= io_mem_read(section
->mr
, addr
, 4);
3683 #if defined(TARGET_WORDS_BIGENDIAN)
3684 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3688 if (endian
== DEVICE_BIG_ENDIAN
) {
3694 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3696 + memory_region_section_addr(section
, addr
));
3698 case DEVICE_LITTLE_ENDIAN
:
3699 val
= ldl_le_p(ptr
);
3701 case DEVICE_BIG_ENDIAN
:
3702 val
= ldl_be_p(ptr
);
3712 uint32_t ldl_phys(target_phys_addr_t addr
)
3714 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3717 uint32_t ldl_le_phys(target_phys_addr_t addr
)
3719 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3722 uint32_t ldl_be_phys(target_phys_addr_t addr
)
3724 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3727 /* warning: addr must be aligned */
3728 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr
,
3729 enum device_endian endian
)
3733 MemoryRegionSection
*section
;
3735 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3737 if (!(memory_region_is_ram(section
->mr
) ||
3738 memory_region_is_romd(section
->mr
))) {
3740 addr
= memory_region_section_addr(section
, addr
);
3742 /* XXX This is broken when device endian != cpu endian.
3743 Fix and add "endian" variable check */
3744 #ifdef TARGET_WORDS_BIGENDIAN
3745 val
= io_mem_read(section
->mr
, addr
, 4) << 32;
3746 val
|= io_mem_read(section
->mr
, addr
+ 4, 4);
3748 val
= io_mem_read(section
->mr
, addr
, 4);
3749 val
|= io_mem_read(section
->mr
, addr
+ 4, 4) << 32;
3753 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3755 + memory_region_section_addr(section
, addr
));
3757 case DEVICE_LITTLE_ENDIAN
:
3758 val
= ldq_le_p(ptr
);
3760 case DEVICE_BIG_ENDIAN
:
3761 val
= ldq_be_p(ptr
);
3771 uint64_t ldq_phys(target_phys_addr_t addr
)
3773 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3776 uint64_t ldq_le_phys(target_phys_addr_t addr
)
3778 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3781 uint64_t ldq_be_phys(target_phys_addr_t addr
)
3783 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3787 uint32_t ldub_phys(target_phys_addr_t addr
)
3790 cpu_physical_memory_read(addr
, &val
, 1);
3794 /* warning: addr must be aligned */
3795 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr
,
3796 enum device_endian endian
)
3800 MemoryRegionSection
*section
;
3802 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3804 if (!(memory_region_is_ram(section
->mr
) ||
3805 memory_region_is_romd(section
->mr
))) {
3807 addr
= memory_region_section_addr(section
, addr
);
3808 val
= io_mem_read(section
->mr
, addr
, 2);
3809 #if defined(TARGET_WORDS_BIGENDIAN)
3810 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3814 if (endian
== DEVICE_BIG_ENDIAN
) {
3820 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3822 + memory_region_section_addr(section
, addr
));
3824 case DEVICE_LITTLE_ENDIAN
:
3825 val
= lduw_le_p(ptr
);
3827 case DEVICE_BIG_ENDIAN
:
3828 val
= lduw_be_p(ptr
);
3838 uint32_t lduw_phys(target_phys_addr_t addr
)
3840 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3843 uint32_t lduw_le_phys(target_phys_addr_t addr
)
3845 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3848 uint32_t lduw_be_phys(target_phys_addr_t addr
)
3850 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3853 /* warning: addr must be aligned. The ram page is not masked as dirty
3854 and the code inside is not invalidated. It is useful if the dirty
3855 bits are used to track modified PTEs */
3856 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3859 MemoryRegionSection
*section
;
3861 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3863 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3864 addr
= memory_region_section_addr(section
, addr
);
3865 if (memory_region_is_ram(section
->mr
)) {
3866 section
= &phys_sections
[phys_section_rom
];
3868 io_mem_write(section
->mr
, addr
, val
, 4);
3870 unsigned long addr1
= (memory_region_get_ram_addr(section
->mr
)
3872 + memory_region_section_addr(section
, addr
);
3873 ptr
= qemu_get_ram_ptr(addr1
);
3876 if (unlikely(in_migration
)) {
3877 if (!cpu_physical_memory_is_dirty(addr1
)) {
3878 /* invalidate code */
3879 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3881 cpu_physical_memory_set_dirty_flags(
3882 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3888 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3891 MemoryRegionSection
*section
;
3893 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3895 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3896 addr
= memory_region_section_addr(section
, addr
);
3897 if (memory_region_is_ram(section
->mr
)) {
3898 section
= &phys_sections
[phys_section_rom
];
3900 #ifdef TARGET_WORDS_BIGENDIAN
3901 io_mem_write(section
->mr
, addr
, val
>> 32, 4);
3902 io_mem_write(section
->mr
, addr
+ 4, (uint32_t)val
, 4);
3904 io_mem_write(section
->mr
, addr
, (uint32_t)val
, 4);
3905 io_mem_write(section
->mr
, addr
+ 4, val
>> 32, 4);
3908 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3910 + memory_region_section_addr(section
, addr
));
3915 /* warning: addr must be aligned */
3916 static inline void stl_phys_internal(target_phys_addr_t addr
, uint32_t val
,
3917 enum device_endian endian
)
3920 MemoryRegionSection
*section
;
3922 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3924 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3925 addr
= memory_region_section_addr(section
, addr
);
3926 if (memory_region_is_ram(section
->mr
)) {
3927 section
= &phys_sections
[phys_section_rom
];
3929 #if defined(TARGET_WORDS_BIGENDIAN)
3930 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3934 if (endian
== DEVICE_BIG_ENDIAN
) {
3938 io_mem_write(section
->mr
, addr
, val
, 4);
3940 unsigned long addr1
;
3941 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
3942 + memory_region_section_addr(section
, addr
);
3944 ptr
= qemu_get_ram_ptr(addr1
);
3946 case DEVICE_LITTLE_ENDIAN
:
3949 case DEVICE_BIG_ENDIAN
:
3956 if (!cpu_physical_memory_is_dirty(addr1
)) {
3957 /* invalidate code */
3958 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3960 cpu_physical_memory_set_dirty_flags(addr1
,
3961 (0xff & ~CODE_DIRTY_FLAG
));
3966 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3968 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
3971 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
)
3973 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
3976 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
)
3978 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
3982 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3985 cpu_physical_memory_write(addr
, &v
, 1);
3988 /* warning: addr must be aligned */
3989 static inline void stw_phys_internal(target_phys_addr_t addr
, uint32_t val
,
3990 enum device_endian endian
)
3993 MemoryRegionSection
*section
;
3995 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3997 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3998 addr
= memory_region_section_addr(section
, addr
);
3999 if (memory_region_is_ram(section
->mr
)) {
4000 section
= &phys_sections
[phys_section_rom
];
4002 #if defined(TARGET_WORDS_BIGENDIAN)
4003 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4007 if (endian
== DEVICE_BIG_ENDIAN
) {
4011 io_mem_write(section
->mr
, addr
, val
, 2);
4013 unsigned long addr1
;
4014 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
4015 + memory_region_section_addr(section
, addr
);
4017 ptr
= qemu_get_ram_ptr(addr1
);
4019 case DEVICE_LITTLE_ENDIAN
:
4022 case DEVICE_BIG_ENDIAN
:
4029 if (!cpu_physical_memory_is_dirty(addr1
)) {
4030 /* invalidate code */
4031 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4033 cpu_physical_memory_set_dirty_flags(addr1
,
4034 (0xff & ~CODE_DIRTY_FLAG
));
4039 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4041 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4044 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
)
4046 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4049 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
)
4051 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4055 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4058 cpu_physical_memory_write(addr
, &val
, 8);
4061 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
)
4063 val
= cpu_to_le64(val
);
4064 cpu_physical_memory_write(addr
, &val
, 8);
4067 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
)
4069 val
= cpu_to_be64(val
);
4070 cpu_physical_memory_write(addr
, &val
, 8);
4073 /* virtual memory access for debug (includes writing to ROM) */
4074 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
4075 uint8_t *buf
, int len
, int is_write
)
4078 target_phys_addr_t phys_addr
;
4082 page
= addr
& TARGET_PAGE_MASK
;
4083 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4084 /* if no physical page mapped, return an error */
4085 if (phys_addr
== -1)
4087 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4090 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4092 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4094 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4103 /* in deterministic execution mode, instructions doing device I/Os
4104 must be at the end of the TB */
4105 void cpu_io_recompile(CPUArchState
*env
, uintptr_t retaddr
)
4107 TranslationBlock
*tb
;
4109 target_ulong pc
, cs_base
;
4112 tb
= tb_find_pc(retaddr
);
4114 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4117 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4118 cpu_restore_state(tb
, env
, retaddr
);
4119 /* Calculate how many instructions had been executed before the fault
4121 n
= n
- env
->icount_decr
.u16
.low
;
4122 /* Generate a new TB ending on the I/O insn. */
4124 /* On MIPS and SH, delay slot instructions can only be restarted if
4125 they were already the first instruction in the TB. If this is not
4126 the first instruction in a TB then re-execute the preceding
4128 #if defined(TARGET_MIPS)
4129 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4130 env
->active_tc
.PC
-= 4;
4131 env
->icount_decr
.u16
.low
++;
4132 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4134 #elif defined(TARGET_SH4)
4135 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4138 env
->icount_decr
.u16
.low
++;
4139 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4142 /* This should never happen. */
4143 if (n
> CF_COUNT_MASK
)
4144 cpu_abort(env
, "TB too big during recompile");
4146 cflags
= n
| CF_LAST_IO
;
4148 cs_base
= tb
->cs_base
;
4150 tb_phys_invalidate(tb
, -1);
4151 /* FIXME: In theory this could raise an exception. In practice
4152 we have already translated the block once so it's probably ok. */
4153 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4154 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4155 the first in the TB) then we end up generating a whole new TB and
4156 repeating the fault, which is horribly inefficient.
4157 Better would be to execute just this insn uncached, or generate a
4159 cpu_resume_from_signal(env
, NULL
);
4162 #if !defined(CONFIG_USER_ONLY)
4164 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4166 int i
, target_code_size
, max_target_code_size
;
4167 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4168 TranslationBlock
*tb
;
4170 target_code_size
= 0;
4171 max_target_code_size
= 0;
4173 direct_jmp_count
= 0;
4174 direct_jmp2_count
= 0;
4175 for(i
= 0; i
< nb_tbs
; i
++) {
4177 target_code_size
+= tb
->size
;
4178 if (tb
->size
> max_target_code_size
)
4179 max_target_code_size
= tb
->size
;
4180 if (tb
->page_addr
[1] != -1)
4182 if (tb
->tb_next_offset
[0] != 0xffff) {
4184 if (tb
->tb_next_offset
[1] != 0xffff) {
4185 direct_jmp2_count
++;
4189 /* XXX: avoid using doubles ? */
4190 cpu_fprintf(f
, "Translation buffer state:\n");
4191 cpu_fprintf(f
, "gen code size %td/%ld\n",
4192 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4193 cpu_fprintf(f
, "TB count %d/%d\n",
4194 nb_tbs
, code_gen_max_blocks
);
4195 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4196 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4197 max_target_code_size
);
4198 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4199 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4200 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4201 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4203 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4204 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4206 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4208 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4209 cpu_fprintf(f
, "\nStatistics:\n");
4210 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4211 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4212 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4213 tcg_dump_info(f
, cpu_fprintf
);
4217 * A helper function for the _utterly broken_ virtio device model to find out if
4218 * it's running on a big endian machine. Don't do this at home kids!
4220 bool virtio_is_big_endian(void);
4221 bool virtio_is_big_endian(void)
4223 #if defined(TARGET_WORDS_BIGENDIAN)
4232 #ifndef CONFIG_USER_ONLY
4233 bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr
)
4235 MemoryRegionSection
*section
;
4237 section
= phys_page_find(phys_addr
>> TARGET_PAGE_BITS
);
4239 return !(memory_region_is_ram(section
->mr
) ||
4240 memory_region_is_romd(section
->mr
));